summaryrefslogtreecommitdiffstats
path: root/patches/linux-3.2.16/0000-arago-beaglebone.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/linux-3.2.16/0000-arago-beaglebone.patch')
-rw-r--r--patches/linux-3.2.16/0000-arago-beaglebone.patch79756
1 files changed, 79756 insertions, 0 deletions
diff --git a/patches/linux-3.2.16/0000-arago-beaglebone.patch b/patches/linux-3.2.16/0000-arago-beaglebone.patch
new file mode 100644
index 0000000..3c5ac3c
--- /dev/null
+++ b/patches/linux-3.2.16/0000-arago-beaglebone.patch
@@ -0,0 +1,79756 @@
+diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
+index 771d48d..208a2d4 100644
+--- a/Documentation/arm/memory.txt
++++ b/Documentation/arm/memory.txt
+@@ -51,15 +51,14 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned
+ ff000000 ffbfffff Reserved for future expansion of DMA
+ mapping region.
+
+-VMALLOC_END feffffff Free for platform use, recommended.
+- VMALLOC_END must be aligned to a 2MB
+- boundary.
+-
+ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
+ Memory returned by vmalloc/ioremap will
+ be dynamically placed in this region.
+- VMALLOC_START may be based upon the value
+- of the high_memory variable.
++ Machine specific static mappings are also
++ located here through iotable_init().
++ VMALLOC_START is based upon the value
++ of the high_memory variable, and VMALLOC_END
++ is equal to 0xff000000.
+
+ PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
+ This maps the platforms RAM, and typically
+diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
+index 52916b4..9b4b82a 100644
+--- a/Documentation/devicetree/bindings/arm/gic.txt
++++ b/Documentation/devicetree/bindings/arm/gic.txt
+@@ -42,6 +42,10 @@ Optional
+ - interrupts : Interrupt source of the parent interrupt controller. Only
+ present on secondary GICs.
+
++- cpu-offset : per-cpu offset within the distributor and cpu interface
++ regions, used when the GIC doesn't have banked registers. The offset is
++ cpu-offset * cpu-nr.
++
+ Example:
+
+ intc: interrupt-controller@fff11000 {
+diff --git a/Documentation/devicetree/bindings/arm/vic.txt b/Documentation/devicetree/bindings/arm/vic.txt
+new file mode 100644
+index 0000000..266716b
+--- /dev/null
++++ b/Documentation/devicetree/bindings/arm/vic.txt
+@@ -0,0 +1,29 @@
++* ARM Vectored Interrupt Controller
++
++One or more Vectored Interrupt Controllers (VIC's) can be connected in an ARM
++system for interrupt routing. For multiple controllers they can either be
++nested or have the outputs wire-OR'd together.
++
++Required properties:
++
++- compatible : should be one of
++ "arm,pl190-vic"
++ "arm,pl192-vic"
++- interrupt-controller : Identifies the node as an interrupt controller
++- #interrupt-cells : The number of cells to define the interrupts. Must be 1 as
++ the VIC has no configuration options for interrupt sources. The cell is a u32
++ and defines the interrupt number.
++- reg : The register bank for the VIC.
++
++Optional properties:
++
++- interrupts : Interrupt source for parent controllers if the VIC is nested.
++
++Example:
++
++ vic0: interrupt-controller@60000 {
++ compatible = "arm,pl192-vic";
++ interrupt-controller;
++ #interrupt-cells = <1>;
++ reg = <0x60000 0x1000>;
++ };
+diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
+new file mode 100644
+index 0000000..342eedd
+--- /dev/null
++++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
+@@ -0,0 +1,10 @@
++OMAP UART controller
++
++Required properties:
++- compatible : should be "ti,omap2-uart" for OMAP2 controllers
++- compatible : should be "ti,omap3-uart" for OMAP3 controllers
++- compatible : should be "ti,omap4-uart" for OMAP4 controllers
++- ti,hwmods : Must be "uart<n>", n being the instance number (1-based)
++
++Optional properties:
++- clock-frequency : frequency of the clock input to the UART
+diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
+index 3d84912..a7e4ac1 100644
+--- a/Documentation/feature-removal-schedule.txt
++++ b/Documentation/feature-removal-schedule.txt
+@@ -535,6 +535,20 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
+ information log when acer-wmi initial.
+ Who: Lee, Chun-Yi <jlee@novell.com>
+
++---------------------------
++
++What: /sys/devices/platform/_UDC_/udc/_UDC_/is_dualspeed file and
++ is_dualspeed line in /sys/devices/platform/ci13xxx_*/udc/device file.
++When: 3.8
++Why: The is_dualspeed file is superseded by maximum_speed in the same
++ directory and is_dualspeed line in device file is superseded by
++ max_speed line in the same file.
++
++ The maximum_speed/max_speed specifies maximum speed supported by UDC.
++ To check if dualspeeed is supported, check if the value is >= 3.
++ Various possible speeds are defined in <linux/usb/ch9.h>.
++Who: Michal Nazarewicz <mina86@mina86.com>
++
+ ----------------------------
+
+ What: The XFS nodelaylog mount option
+diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt
+new file mode 100644
+index 0000000..7112e66
+--- /dev/null
++++ b/Documentation/pwm.txt
+@@ -0,0 +1,259 @@
++ Generic PWM Device API
++
++ February 1, 2010
++ Bill Gatliff
++ <bgat@billgatliff.com>
++
++
++
++The code in drivers/pwm and include/linux/pwm/ implements an API for
++applications involving pulse-width-modulation signals. This document
++describes how the API implementation facilitates both PWM-generating
++devices, and users of those devices.
++
++
++
++Motivation
++
++The primary goals for implementing the "generic PWM API" are to
++consolidate the various PWM implementations within a consistent and
++redundancy-reducing framework, and to facilitate the use of
++hotpluggable PWM devices.
++
++Previous PWM-related implementations within the Linux kernel achieved
++their consistency via cut-and-paste, but did not need to (and didn't)
++facilitate more than one PWM-generating device within the system---
++hotplug or otherwise. The Generic PWM Device API might be most
++appropriately viewed as an update to those implementations, rather
++than a complete rewrite.
++
++
++
++Challenges
++
++One of the difficulties in implementing a generic PWM framework is the
++fact that pulse-width-modulation applications involve real-world
++signals, which often must be carefully managed to prevent destruction
++of hardware that is linked to those signals. A DC motor that
++experiences a brief interruption in the PWM signal controlling it
++might destructively overheat; it could suddenly change speed, losing
++synchronization with a sensor; it could even suddenly change direction
++or torque, breaking the mechanical device connected to it.
++
++(A generic PWM device framework is not directly responsible for
++preventing the above scenarios: that responsibility lies with the
++hardware designer, and the application and driver authors. But it
++must to the greatest extent possible make it easy to avoid such
++problems).
++
++A generic PWM device framework must accommodate the substantial
++differences between available PWM-generating hardware devices, without
++becoming sub-optimal for any of them.
++
++Finally, a generic PWM device framework must be relatively
++lightweight, computationally speaking. Some PWM users demand
++high-speed outputs, plus the ability to regulate those outputs
++quickly. A device framework must be able to "keep up" with such
++hardware, while still leaving time to do real work.
++
++The Generic PWM Device API is an attempt to meet all of the above
++requirements. At its initial publication, the API was already in use
++managing small DC motors, sensors and solenoids through a
++custom-designed, optically-isolated H-bridge driver.
++
++
++
++Functional Overview
++
++The Generic PWM Device API framework is implemented in
++include/linux/pwm/pwm.h and drivers/pwm/pwm.c. The functions therein
++use information from pwm_device, pwm_channel and pwm_channel_config
++structures to invoke services in PWM peripheral device drivers.
++Consult drivers/pwm/atmel-pwm.c for an example driver.
++
++There are two classes of adopters of the PWM framework:
++
++ "Users" -- those wishing to employ the API merely to produce PWM
++ signals; once they have identified the appropriate physical output
++ on the platform in question, they don't care about the details of
++ the underlying hardware
++
++ "Driver authors" -- those wishing to bind devices that can generate
++ PWM signals to the Generic PWM Device API, so that the services of
++ those devices become available to users. Assuming the hardware can
++ support the needs of a user, driver authors don't care about the
++ details of the user's application
++
++Generally speaking, users will first invoke pwm_request() to obtain a
++handle to a PWM device. They will then pass that handle to functions
++like pwm_duty_ns() and pwm_period_ns() to set the duty cycle and
++period of the PWM signal, respectively. They will also invoke
++pwm_start() and pwm_stop() to turn the signal on and off.
++
++The Generic PWM API framework also provides a sysfs interface to PWM
++devices, which is adequate for basic application needs and testing.
++
++Driver authors fill out a pwm_device structure, which describes the
++capabilities of the PWM hardware being constructed--- including the
++number of distinct output "channels" the peripheral offers. They then
++invoke pwm_register() (usually from within their device's probe()
++handler) to make the PWM API aware of their device. The framework
++will call back to the methods described in the pwm_device structure as
++users begin to configure and utilize the hardware.
++
++Note that PWM signals can be produced by a variety of peripherals,
++beyond the true "PWM hardware" offered by many system-on-chip devices.
++Other possibilities include timer/counters with compare-match
++capabilities, carefully-programmed synchronous serial ports
++(e.g. SPI), and GPIO pins driven by kernel interval timers. With a
++proper pwm_device structure, these devices and pseudo-devices can all
++be accommodated by the Generic PWM Device API framework.
++
++
++
++Using the API to Generate PWM Signals -- Basic Functions for Users
++
++
++pwm_request() -- Returns a pwm_channel pointer, which is subsequently
++passed to the other user-related PWM functions. Once requested, a PWM
++channel is marked as in-use and subsequent requests prior to
++pwm_free() will fail.
++
++The names used to refer to PWM devices are defined by driver authors.
++Typically they are platform device bus identifiers, and this
++convention is encouraged for consistency.
++
++
++pwm_free() -- Marks a PWM channel as no longer in use. The PWM device
++is stopped before it is released by the API.
++
++
++pwm_period_ns() -- Specifies the PWM signal's period, in nanoseconds.
++
++
++pwm_duty_ns() -- Specifies the PWM signal's active duration, in nanoseconds.
++
++
++pwm_duty_percent() -- Specifies the PWM signal's active duration, as a
++percentage of the current period of the signal. NOTE: this value is
++not recalculated if the period of the signal is subsequently changed.
++
++
++pwm_start(), pwm_stop() -- Turns the PWM signal on and off. Except
++where stated otherwise by a driver author, signals are stopped at the
++end of the current period, at which time the output is set to its
++inactive state.
++
++
++pwm_polarity() -- Defines whether the PWM signal output's active
++region is "1" or "0". A 10% duty-cycle, polarity=1 signal will
++conventionally be at 5V (or 3.3V, or 1000V, or whatever the platform
++hardware does) for 10% of the period. The same configuration of a
++polarity=0 signal will be at 5V (or 3.3V, or ...) for 90% of the
++period.
++
++
++
++Using the API to Generate PWM Signals -- Advanced Functions
++
++
++pwm_config() -- Passes a pwm_channel_config structure to the
++associated device driver. This function is invoked by pwm_start(),
++pwm_duty_ns(), etc. and is one of two main entry points to the PWM
++driver for the hardware being used. The configuration change is
++guaranteed atomic if multiple configuration changes are specified.
++This function might sleep, depending on what the device driver has to
++do to satisfy the request. All PWM device drivers must support this
++entry point.
++
++
++pwm_config_nosleep() -- Passes a pwm_channel_config structure to the
++associated device driver. If the driver must sleep in order to
++implement the requested configuration change, -EWOULDBLOCK is
++returned. Users may call this function from interrupt handlers, for
++example. This is the other main entry point into the PWM hardware
++driver, but not all device drivers support this entry point.
++
++
++pwm_synchronize(), pwm_unsynchronize() -- "Synchronizes" two or more
++PWM channels, if the underlying hardware permits. (If it doesn't, the
++framework facilitates emulating this capability but it is not yet
++implemented). Synchronized channels will start and stop
++simultaneously when any single channel in the group is started or
++stopped. Use pwm_unsynchronize(..., NULL) to completely detach a
++channel from any other synchronized channels. By default, all PWM
++channels are unsynchronized.
++
++
++pwm_set_handler() -- Defines an end-of-period callback. The indicated
++function will be invoked in a worker thread at the end of each PWM
++period, and can subsequently invoke pwm_config(), etc. Must be used
++with extreme care for high-speed PWM outputs. Set the handler
++function to NULL to un-set the handler.
++
++
++
++Implementing a PWM Device API Driver -- Functions for Driver Authors
++
++
++Fill out the appropriate fields in a pwm_device structure, and submit
++to pwm_register():
++
++
++bus_id -- the plain-text name of the device. Users will bind to a
++channel on the device using this name plus the channel number. For
++example, the Atmel PWMC's bus_id is "atmel_pwmc", the same as used by
++the platform device driver (recommended). The first device registered
++thereby receives bus_id "atmel_pwmc.0", which is what you put in
++pwm_device.bus_id. Channels are then named "atmel_pwmc.0:[0-3]".
++(Hint: just use pdev->dev.bus_id in your probe() method).
++
++
++nchan -- the number of distinct output channels provided by the device.
++
++
++request -- (optional) Invoked each time a user requests a channel.
++Use to turn on clocks, clean up register states, etc. The framework
++takes care of device locking/unlocking; you will see only successful
++requests.
++
++
++free -- (optional) Callback for each time a user relinquishes a
++channel. The framework will have already stopped, unsynchronized and
++un-handled the channel. Use to turn off clocks, etc. as necessary.
++
++
++synchronize, unsynchronize -- (optional) Callbacks to
++synchronize/unsynchronize channels. Some devices provide this
++capability in hardware; for others, it can be emulated (see
++atmel_pwmc.c's sync_mask for an example).
++
++
++set_callback -- (optional) Invoked when a user requests a handler. If
++the hardware supports an end-of-period interrupt, invoke the function
++indicated during your interrupt handler. The callback function itself
++is always internal to the API, and does not map directly to the user's
++callback function.
++
++
++config -- Invoked to change the device configuration, always from a
++sleep-capable context. All the changes indicated must be performed
++atomically, ideally synchronized to an end-of-period event (so that
++you avoid short or long output pulses). You may sleep, etc. as
++necessary within this function.
++
++
++config_nosleep -- (optional) Invoked to change device configuration
++from within a context that is not allowed to sleep. If you cannot
++perform the requested configuration changes without sleeping, return
++-EWOULDBLOCK.
++
++
++
++Acknowledgements
++
++
++The author expresses his gratitude to the countless developers who
++have reviewed and submitted feedback on the various versions of the
++Generic PWM Device API code, and those who have submitted drivers and
++applications that use the framework. You know who you are. ;)
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index ab3740e..6cd71ec 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -938,6 +938,7 @@ config ARCH_OMAP
+ select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAS_CPUFREQ
+ select CLKSRC_MMIO
++ select GENERIC_ALLOCATOR
+ select GENERIC_CLOCKEVENTS
+ select HAVE_SCHED_CLOCK
+ select ARCH_HAS_HOLES_MEMORYMODEL
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index c5213e7..0a9ac1b 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -271,4 +271,10 @@ config ARM_KPROBES_TEST
+ help
+ Perform tests of kprobes API and instruction set simulation.
+
++config DEBUG_JTAG_ENABLE
++ bool "Enable JTAG clock for debugger connectivity"
++ help
++ Say Y here if you want to enable the JTAG clock to enable
++ connectivity to a debugger
++
+ endmenu
+diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
+new file mode 100644
+index 0000000..f2ab4ea
+--- /dev/null
++++ b/arch/arm/boot/dts/omap2.dtsi
+@@ -0,0 +1,67 @@
++/*
++ * Device Tree Source for OMAP2 SoC
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++/include/ "skeleton.dtsi"
++
++/ {
++ compatible = "ti,omap2430", "ti,omap2420", "ti,omap2";
++
++ aliases {
++ serial0 = &uart1;
++ serial1 = &uart2;
++ serial2 = &uart3;
++ };
++
++ cpus {
++ cpu@0 {
++ compatible = "arm,arm1136jf-s";
++ };
++ };
++
++ soc {
++ compatible = "ti,omap-infra";
++ mpu {
++ compatible = "ti,omap2-mpu";
++ ti,hwmods = "mpu";
++ };
++ };
++
++ ocp {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges;
++ ti,hwmods = "l3_main";
++
++ intc: interrupt-controller@1 {
++ compatible = "ti,omap2-intc";
++ interrupt-controller;
++ #interrupt-cells = <1>;
++ };
++
++ uart1: serial@4806a000 {
++ compatible = "ti,omap2-uart";
++ ti,hwmods = "uart1";
++ clock-frequency = <48000000>;
++ };
++
++ uart2: serial@4806c000 {
++ compatible = "ti,omap2-uart";
++ ti,hwmods = "uart2";
++ clock-frequency = <48000000>;
++ };
++
++ uart3: serial@4806e000 {
++ compatible = "ti,omap2-uart";
++ ti,hwmods = "uart3";
++ clock-frequency = <48000000>;
++ };
++ };
++};
+diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
+index 9486be6..9f72cd4 100644
+--- a/arch/arm/boot/dts/omap3-beagle.dts
++++ b/arch/arm/boot/dts/omap3-beagle.dts
+@@ -13,15 +13,6 @@
+ model = "TI OMAP3 BeagleBoard";
+ compatible = "ti,omap3-beagle", "ti,omap3";
+
+- /*
+- * Since the initial device tree board file does not create any
+- * devices (MMC, network...), the only way to boot is to provide a
+- * ramdisk.
+- */
+- chosen {
+- bootargs = "root=/dev/ram0 rw console=ttyO2,115200n8 initrd=0x81600000,20M ramdisk_size=20480 no_console_suspend debug earlyprintk";
+- };
+-
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
+index d202bb5..216c331 100644
+--- a/arch/arm/boot/dts/omap3.dtsi
++++ b/arch/arm/boot/dts/omap3.dtsi
+@@ -13,6 +13,13 @@
+ / {
+ compatible = "ti,omap3430", "ti,omap3";
+
++ aliases {
++ serial0 = &uart1;
++ serial1 = &uart2;
++ serial2 = &uart3;
++ serial3 = &uart4;
++ };
++
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a8";
+@@ -59,5 +66,29 @@
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
++
++ uart1: serial@0x4806a000 {
++ compatible = "ti,omap3-uart";
++ ti,hwmods = "uart1";
++ clock-frequency = <48000000>;
++ };
++
++ uart2: serial@0x4806c000 {
++ compatible = "ti,omap3-uart";
++ ti,hwmods = "uart2";
++ clock-frequency = <48000000>;
++ };
++
++ uart3: serial@0x49020000 {
++ compatible = "ti,omap3-uart";
++ ti,hwmods = "uart3";
++ clock-frequency = <48000000>;
++ };
++
++ uart4: serial@0x49042000 {
++ compatible = "ti,omap3-uart";
++ ti,hwmods = "uart4";
++ clock-frequency = <48000000>;
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
+index c702657..9755ad5 100644
+--- a/arch/arm/boot/dts/omap4-panda.dts
++++ b/arch/arm/boot/dts/omap4-panda.dts
+@@ -13,15 +13,6 @@
+ model = "TI OMAP4 PandaBoard";
+ compatible = "ti,omap4-panda", "ti,omap4430", "ti,omap4";
+
+- /*
+- * Since the initial device tree board file does not create any
+- * devices (MMC, network...), the only way to boot is to provide a
+- * ramdisk.
+- */
+- chosen {
+- bootargs = "root=/dev/ram0 rw console=ttyO2,115200n8 initrd=0x81600000,20M ramdisk_size=20480 no_console_suspend debug";
+- };
+-
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
+index 066e28c..63c6b2b 100644
+--- a/arch/arm/boot/dts/omap4-sdp.dts
++++ b/arch/arm/boot/dts/omap4-sdp.dts
+@@ -13,15 +13,6 @@
+ model = "TI OMAP4 SDP board";
+ compatible = "ti,omap4-sdp", "ti,omap4430", "ti,omap4";
+
+- /*
+- * Since the initial device tree board file does not create any
+- * devices (MMC, network...), the only way to boot is to provide a
+- * ramdisk.
+- */
+- chosen {
+- bootargs = "root=/dev/ram0 rw console=ttyO2,115200n8 initrd=0x81600000,20M ramdisk_size=20480 no_console_suspend debug";
+- };
+-
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 4c61c82..e8fe75f 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -21,6 +21,10 @@
+ interrupt-parent = <&gic>;
+
+ aliases {
++ serial0 = &uart1;
++ serial1 = &uart2;
++ serial2 = &uart3;
++ serial3 = &uart4;
+ };
+
+ cpus {
+@@ -99,5 +103,29 @@
+ reg = <0x48241000 0x1000>,
+ <0x48240100 0x0100>;
+ };
++
++ uart1: serial@0x4806a000 {
++ compatible = "ti,omap4-uart";
++ ti,hwmods = "uart1";
++ clock-frequency = <48000000>;
++ };
++
++ uart2: serial@0x4806c000 {
++ compatible = "ti,omap4-uart";
++ ti,hwmods = "uart2";
++ clock-frequency = <48000000>;
++ };
++
++ uart3: serial@0x48020000 {
++ compatible = "ti,omap4-uart";
++ ti,hwmods = "uart3";
++ clock-frequency = <48000000>;
++ };
++
++ uart4: serial@0x4806e000 {
++ compatible = "ti,omap4-uart";
++ ti,hwmods = "uart4";
++ clock-frequency = <48000000>;
++ };
+ };
+ };
+diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
+index 74df9ca..81a933e 100644
+--- a/arch/arm/common/Kconfig
++++ b/arch/arm/common/Kconfig
+@@ -1,8 +1,14 @@
+ config ARM_GIC
+ select IRQ_DOMAIN
++ select MULTI_IRQ_HANDLER
++ bool
++
++config GIC_NON_BANKED
+ bool
+
+ config ARM_VIC
++ select IRQ_DOMAIN
++ select MULTI_IRQ_HANDLER
+ bool
+
+ config ARM_VIC_NR
+diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
+index 6ea9b6f..40bc575 100644
+--- a/arch/arm/common/Makefile
++++ b/arch/arm/common/Makefile
+@@ -17,3 +17,4 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
+ obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
+ obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
+ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
++obj-$(CONFIG_OMAP3_EDMA) += edma.o
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+new file mode 100644
+index 0000000..fe00c92
+--- /dev/null
++++ b/arch/arm/common/edma.c
+@@ -0,0 +1,1740 @@
++/*
++ * EDMA3 Driver
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/pm_runtime.h>
++
++#include <mach/edma.h>
++
++/* Offsets matching "struct edmacc_param" */
++#define PARM_OPT 0x00
++#define PARM_SRC 0x04
++#define PARM_A_B_CNT 0x08
++#define PARM_DST 0x0c
++#define PARM_SRC_DST_BIDX 0x10
++#define PARM_LINK_BCNTRLD 0x14
++#define PARM_SRC_DST_CIDX 0x18
++#define PARM_CCNT 0x1c
++
++#define PARM_SIZE 0x20
++
++/* Offsets for EDMA CC global channel registers and their shadows */
++#define SH_ER 0x00 /* 64 bits */
++#define SH_ECR 0x08 /* 64 bits */
++#define SH_ESR 0x10 /* 64 bits */
++#define SH_CER 0x18 /* 64 bits */
++#define SH_EER 0x20 /* 64 bits */
++#define SH_EECR 0x28 /* 64 bits */
++#define SH_EESR 0x30 /* 64 bits */
++#define SH_SER 0x38 /* 64 bits */
++#define SH_SECR 0x40 /* 64 bits */
++#define SH_IER 0x50 /* 64 bits */
++#define SH_IECR 0x58 /* 64 bits */
++#define SH_IESR 0x60 /* 64 bits */
++#define SH_IPR 0x68 /* 64 bits */
++#define SH_ICR 0x70 /* 64 bits */
++#define SH_IEVAL 0x78
++#define SH_QER 0x80
++#define SH_QEER 0x84
++#define SH_QEECR 0x88
++#define SH_QEESR 0x8c
++#define SH_QSER 0x90
++#define SH_QSECR 0x94
++#define SH_SIZE 0x200
++
++/* Offsets for EDMA CC global registers */
++#define EDMA_REV 0x0000
++#define EDMA_CCCFG 0x0004
++#define EDMA_QCHMAP 0x0200 /* 8 registers */
++#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
++#define EDMA_QDMAQNUM 0x0260
++#define EDMA_QUETCMAP 0x0280
++#define EDMA_QUEPRI 0x0284
++#define EDMA_EMR 0x0300 /* 64 bits */
++#define EDMA_EMCR 0x0308 /* 64 bits */
++#define EDMA_QEMR 0x0310
++#define EDMA_QEMCR 0x0314
++#define EDMA_CCERR 0x0318
++#define EDMA_CCERRCLR 0x031c
++#define EDMA_EEVAL 0x0320
++#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
++#define EDMA_QRAE 0x0380 /* 4 registers */
++#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
++#define EDMA_QSTAT 0x0600 /* 2 registers */
++#define EDMA_QWMTHRA 0x0620
++#define EDMA_QWMTHRB 0x0624
++#define EDMA_CCSTAT 0x0640
++
++#define EDMA_M 0x1000 /* global channel registers */
++#define EDMA_ECR 0x1008
++#define EDMA_ECRH 0x100C
++#define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
++#define EDMA_PARM 0x4000 /* 128 param entries */
++
++#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
++
++#define EDMA_DCHMAP 0x0100 /* 64 registers */
++#define CHMAP_EXIST BIT(24)
++
++
++/*function that maps the cross bar events to channels */
++int (*xbar_event_to_channel_map)(unsigned event, unsigned *channel,
++ struct event_to_channel_map *xbar_event_map) = NULL;
++
++/*****************************************************************************/
++
++static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
++
++static inline unsigned int edma_read(unsigned ctlr, int offset)
++{
++ return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
++}
++
++static inline void edma_write(unsigned ctlr, int offset, int val)
++{
++ __raw_writel(val, edmacc_regs_base[ctlr] + offset);
++}
++static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
++ unsigned or)
++{
++ unsigned val = edma_read(ctlr, offset);
++ val &= and;
++ val |= or;
++ edma_write(ctlr, offset, val);
++}
++static inline void edma_and(unsigned ctlr, int offset, unsigned and)
++{
++ unsigned val = edma_read(ctlr, offset);
++ val &= and;
++ edma_write(ctlr, offset, val);
++}
++static inline void edma_or(unsigned ctlr, int offset, unsigned or)
++{
++ unsigned val = edma_read(ctlr, offset);
++ val |= or;
++ edma_write(ctlr, offset, val);
++}
++static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
++{
++ return edma_read(ctlr, offset + (i << 2));
++}
++static inline unsigned int edma_read_array2(unsigned ctlr, int offset, int i,
++ int j)
++{
++ return edma_read(ctlr, offset + ((i*2 + j) << 2));
++}
++static inline void edma_write_array(unsigned ctlr, int offset, int i,
++ unsigned val)
++{
++ edma_write(ctlr, offset + (i << 2), val);
++}
++static inline void edma_modify_array(unsigned ctlr, int offset, int i,
++ unsigned and, unsigned or)
++{
++ edma_modify(ctlr, offset + (i << 2), and, or);
++}
++static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
++{
++ edma_or(ctlr, offset + (i << 2), or);
++}
++static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
++ unsigned or)
++{
++ edma_or(ctlr, offset + ((i*2 + j) << 2), or);
++}
++static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
++ unsigned val)
++{
++ edma_write(ctlr, offset + ((i*2 + j) << 2), val);
++}
++static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
++{
++ return edma_read(ctlr, EDMA_SHADOW0 + offset);
++}
++static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
++ int i)
++{
++ return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
++}
++static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
++{
++ edma_write(ctlr, EDMA_SHADOW0 + offset, val);
++}
++static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
++ unsigned val)
++{
++ edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
++}
++static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
++ int param_no)
++{
++ return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
++}
++static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
++ unsigned val)
++{
++ edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
++}
++static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
++ unsigned and, unsigned or)
++{
++ edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
++}
++static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
++ unsigned and)
++{
++ edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
++}
++static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
++ unsigned or)
++{
++ edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
++}
++
++static inline void set_bits(int offset, int len, unsigned long *p)
++{
++ for (; len > 0; len--)
++ set_bit(offset + (len - 1), p);
++}
++
++static inline void clear_bits(int offset, int len, unsigned long *p)
++{
++ for (; len > 0; len--)
++ clear_bit(offset + (len - 1), p);
++}
++
++/*****************************************************************************/
++
++struct edma *edma_cc[EDMA_MAX_CC];
++static int arch_num_cc;
++
++/* dummy param set used to (re)initialize parameter RAM slots */
++static const struct edmacc_param dummy_paramset = {
++ .link_bcntrld = 0xffff,
++ .ccnt = 1,
++};
++
++/*****************************************************************************/
++
++static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
++ enum dma_event_q queue_no)
++{
++ int bit = (ch_no & 0x7) * 4;
++
++ /* default to low priority queue */
++ if (queue_no == EVENTQ_DEFAULT)
++ queue_no = edma_cc[ctlr]->default_queue;
++
++ queue_no &= 7;
++ edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
++ ~(0x7 << bit), queue_no << bit);
++}
++
++static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
++{
++ int bit = queue_no * 4;
++ edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
++}
++
++static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
++ int priority)
++{
++ int bit = queue_no * 4;
++ edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
++ ((priority & 0x7) << bit));
++}
++
++/**
++ * map_dmach_param - Maps channel number to param entry number
++ *
++ * This maps the dma channel number to param entry numberter. In
++ * other words using the DMA channel mapping registers a param entry
++ * can be mapped to any channel
++ *
++ * Callers are responsible for ensuring the channel mapping logic is
++ * included in that particular EDMA variant (Eg : dm646x)
++ *
++ */
++static void __init map_dmach_param(unsigned ctlr)
++{
++ int i;
++ for (i = 0; i < EDMA_MAX_DMACH; i++)
++ edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
++}
++
++static inline void
++setup_dma_interrupt(unsigned lch,
++ void (*callback)(unsigned channel, u16 ch_status, void *data),
++ void *data)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(lch);
++ lch = EDMA_CHAN_SLOT(lch);
++
++ if (!callback)
++ edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
++ BIT(lch & 0x1f));
++
++ edma_cc[ctlr]->intr_data[lch].callback = callback;
++ edma_cc[ctlr]->intr_data[lch].data = data;
++
++ if (callback) {
++ edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
++ BIT(lch & 0x1f));
++ edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
++ BIT(lch & 0x1f));
++ }
++}
++
++static int irq2ctlr(int irq)
++{
++ if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
++ return 0;
++ else if (irq >= edma_cc[1]->irq_res_start &&
++ irq <= edma_cc[1]->irq_res_end)
++ return 1;
++
++ return -1;
++}
++
++/******************************************************************************
++ *
++ * DMA interrupt handler
++ *
++ *****************************************************************************/
++static irqreturn_t dma_irq_handler(int irq, void *data)
++{
++ int i;
++ int ctlr;
++ unsigned int cnt = 0;
++
++ ctlr = irq2ctlr(irq);
++ if (ctlr < 0)
++ return IRQ_NONE;
++
++ dev_dbg(data, "dma_irq_handler\n");
++
++ if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
++ (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
++ return IRQ_NONE;
++
++ while (1) {
++ int j;
++ if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
++ edma_shadow0_read_array(ctlr, SH_IER, 0))
++ j = 0;
++ else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
++ edma_shadow0_read_array(ctlr, SH_IER, 1))
++ j = 1;
++ else
++ break;
++ dev_dbg(data, "IPR%d %08x\n", j,
++ edma_shadow0_read_array(ctlr, SH_IPR, j));
++ for (i = 0; i < 32; i++) {
++ int k = (j << 5) + i;
++ if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
++ && (edma_shadow0_read_array(ctlr,
++ SH_IER, j) & BIT(i))) {
++ /* Clear the corresponding IPR bits */
++ edma_shadow0_write_array(ctlr, SH_ICR, j,
++ BIT(i));
++ if (edma_cc[ctlr]->intr_data[k].callback)
++ edma_cc[ctlr]->intr_data[k].callback(
++ k, DMA_COMPLETE,
++ edma_cc[ctlr]->intr_data[k].
++ data);
++ }
++ }
++ cnt++;
++ if (cnt > 10)
++ break;
++ }
++ edma_shadow0_write(ctlr, SH_IEVAL, 1);
++ return IRQ_HANDLED;
++}
++
++/******************************************************************************
++ *
++ * DMA error interrupt handler
++ *
++ *****************************************************************************/
++static irqreturn_t dma_ccerr_handler(int irq, void *data)
++{
++ int i;
++ int ctlr;
++ unsigned int cnt = 0;
++
++ ctlr = irq2ctlr(irq);
++ if (ctlr < 0)
++ return IRQ_NONE;
++
++ dev_dbg(data, "dma_ccerr_handler\n");
++
++ if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
++ (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
++ (edma_read(ctlr, EDMA_QEMR) == 0) &&
++ (edma_read(ctlr, EDMA_CCERR) == 0))
++ return IRQ_NONE;
++
++ while (1) {
++ int j = -1;
++ if (edma_read_array(ctlr, EDMA_EMR, 0))
++ j = 0;
++ else if (edma_read_array(ctlr, EDMA_EMR, 1))
++ j = 1;
++ if (j >= 0) {
++ dev_dbg(data, "EMR%d %08x\n", j,
++ edma_read_array(ctlr, EDMA_EMR, j));
++ for (i = 0; i < 32; i++) {
++ int k = (j << 5) + i;
++ if (edma_read_array(ctlr, EDMA_EMR, j) &
++ BIT(i)) {
++ /* Clear the corresponding EMR bits */
++ edma_write_array(ctlr, EDMA_EMCR, j,
++ BIT(i));
++ /* Clear any SER */
++ edma_shadow0_write_array(ctlr, SH_SECR,
++ j, BIT(i));
++ if (edma_cc[ctlr]->intr_data[k].
++ callback) {
++ edma_cc[ctlr]->intr_data[k].
++ callback(k,
++ DMA_CC_ERROR,
++ edma_cc[ctlr]->intr_data
++ [k].data);
++ }
++ }
++ }
++ } else if (edma_read(ctlr, EDMA_QEMR)) {
++ dev_dbg(data, "QEMR %02x\n",
++ edma_read(ctlr, EDMA_QEMR));
++ for (i = 0; i < 8; i++) {
++ if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
++ /* Clear the corresponding IPR bits */
++ edma_write(ctlr, EDMA_QEMCR, BIT(i));
++ edma_shadow0_write(ctlr, SH_QSECR,
++ BIT(i));
++
++ /* NOTE: not reported!! */
++ }
++ }
++ } else if (edma_read(ctlr, EDMA_CCERR)) {
++ dev_dbg(data, "CCERR %08x\n",
++ edma_read(ctlr, EDMA_CCERR));
++ /* FIXME: CCERR.BIT(16) ignored! much better
++ * to just write CCERRCLR with CCERR value...
++ */
++ for (i = 0; i < 8; i++) {
++ if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
++ /* Clear the corresponding IPR bits */
++ edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
++
++ /* NOTE: not reported!! */
++ }
++ }
++ }
++ if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
++ (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
++ (edma_read(ctlr, EDMA_QEMR) == 0) &&
++ (edma_read(ctlr, EDMA_CCERR) == 0))
++ break;
++ cnt++;
++ if (cnt > 10)
++ break;
++ }
++ edma_write(ctlr, EDMA_EEVAL, 1);
++ return IRQ_HANDLED;
++}
++
++/*-----------------------------------------------------------------------*/
++
++static int reserve_contiguous_slots(int ctlr, unsigned int id,
++ unsigned int num_slots,
++ unsigned int start_slot)
++{
++ int i, j;
++ unsigned int count = num_slots;
++ int stop_slot = start_slot;
++ DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
++
++ for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
++ j = EDMA_CHAN_SLOT(i);
++ if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
++ /* Record our current beginning slot */
++ if (count == num_slots)
++ stop_slot = i;
++
++ count--;
++ set_bit(j, tmp_inuse);
++
++ if (count == 0)
++ break;
++ } else {
++ clear_bit(j, tmp_inuse);
++
++ if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
++ stop_slot = i;
++ break;
++ } else {
++ count = num_slots;
++ }
++ }
++ }
++
++ /*
++ * We have to clear any bits that we set
++ * if we run out parameter RAM slots, i.e we do find a set
++ * of contiguous parameter RAM slots but do not find the exact number
++ * requested as we may reach the total number of parameter RAM slots
++ */
++ if (i == edma_cc[ctlr]->num_slots)
++ stop_slot = i;
++
++ for (j = start_slot; j < stop_slot; j++)
++ if (test_bit(j, tmp_inuse))
++ clear_bit(j, edma_cc[ctlr]->edma_inuse);
++
++ if (count)
++ return -EBUSY;
++
++ for (j = i - num_slots + 1; j <= i; ++j)
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
++ &dummy_paramset, PARM_SIZE);
++
++ return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
++}
++
++static int prepare_unused_channel_list(struct device *dev, void *data)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ int i, ctlr;
++
++ for (i = 0; i < pdev->num_resources; i++) {
++ if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
++ (int)pdev->resource[i].start >= 0) {
++ ctlr = EDMA_CTLR(pdev->resource[i].start);
++ /* confirm the range */
++ if (EDMA_CHAN_SLOT(pdev->resource[i].start <
++ EDMA_MAX_DMACH))
++ clear_bit(
++ EDMA_CHAN_SLOT(pdev->resource[i].start),
++ edma_cc[ctlr]->edma_unused);
++ }
++ }
++
++ return 0;
++}
++
++/*-----------------------------------------------------------------------*/
++
++static bool unused_chan_list_done;
++
++/* Resource alloc/free: dma channels, parameter RAM slots */
++
++/**
++ * edma_alloc_channel - allocate DMA channel and paired parameter RAM
++ * @channel: specific channel to allocate; negative for "any unmapped channel"
++ * @callback: optional; to be issued on DMA completion or errors
++ * @data: passed to callback
++ * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
++ * Controller (TC) executes requests using this channel. Use
++ * EVENTQ_DEFAULT unless you really need a high priority queue.
++ *
++ * This allocates a DMA channel and its associated parameter RAM slot.
++ * The parameter RAM is initialized to hold a dummy transfer.
++ *
++ * Normal use is to pass a specific channel number as @channel, to make
++ * use of hardware events mapped to that channel. When the channel will
++ * be used only for software triggering or event chaining, channels not
++ * mapped to hardware events (or mapped to unused events) are preferable.
++ *
++ * DMA transfers start from a channel using edma_start(), or by
++ * chaining. When the transfer described in that channel's parameter RAM
++ * slot completes, that slot's data may be reloaded through a link.
++ *
++ * DMA errors are only reported to the @callback associated with the
++ * channel driving that transfer, but transfer completion callbacks can
++ * be sent to another channel under control of the TCC field in
++ * the option word of the transfer's parameter RAM set. Drivers must not
++ * use DMA transfer completion callbacks for channels they did not allocate.
++ * (The same applies to TCC codes used in transfer chaining.)
++ *
++ * Returns the number of the channel, else negative errno.
++ */
++int edma_alloc_channel(int channel,
++ void (*callback)(unsigned channel, u16 ch_status, void *data),
++ void *data,
++ enum dma_event_q eventq_no)
++{
++ unsigned i, done = 0, ctlr = 0;
++ int ret = 0;
++
++ if (!unused_chan_list_done) {
++ /*
++ * Scan all the platform devices to find out the EDMA channels
++ * used and clear them in the unused list, making the rest
++ * available for ARM usage.
++ */
++ ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
++ prepare_unused_channel_list);
++ if (ret < 0)
++ return ret;
++
++ unused_chan_list_done = true;
++ }
++
++ if (channel >= 0) {
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++ if (xbar_event_to_channel_map) {
++ ret = xbar_event_to_channel_map(channel,
++ &channel, edma_cc[ctlr]->
++ xbar_event_mapping);
++ if (ret != 0)
++ return ret;
++ }
++ }
++
++ if (channel < 0) {
++ for (i = 0; i < arch_num_cc; i++) {
++ channel = 0;
++ for (;;) {
++ channel = find_next_bit(edma_cc[i]->edma_unused,
++ edma_cc[i]->num_channels,
++ channel);
++ if (channel == edma_cc[i]->num_channels)
++ break;
++ if (!test_and_set_bit(channel,
++ edma_cc[i]->edma_inuse)) {
++ done = 1;
++ ctlr = i;
++ break;
++ }
++ channel++;
++ }
++ if (done)
++ break;
++ }
++ if (!done)
++ return -ENOMEM;
++ } else if (channel >= edma_cc[ctlr]->num_channels) {
++ return -EINVAL;
++ } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
++ return -EBUSY;
++ }
++
++ /* ensure access through shadow region 0 */
++ edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
++
++ /* ensure no events are pending */
++ edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
++ &dummy_paramset, PARM_SIZE);
++
++ if (callback)
++ setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
++ callback, data);
++
++ map_dmach_queue(ctlr, channel, eventq_no);
++
++ return EDMA_CTLR_CHAN(ctlr, channel);
++}
++EXPORT_SYMBOL(edma_alloc_channel);
++
++
++/**
++ * edma_free_channel - deallocate DMA channel
++ * @channel: dma channel returned from edma_alloc_channel()
++ *
++ * This deallocates the DMA channel and associated parameter RAM slot
++ * allocated by edma_alloc_channel().
++ *
++ * Callers are responsible for ensuring the channel is inactive, and
++ * will not be reactivated by linking, chaining, or software calls to
++ * edma_start().
++ */
++void edma_free_channel(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel >= edma_cc[ctlr]->num_channels)
++ return;
++
++ setup_dma_interrupt(channel, NULL, NULL);
++ /* REVISIT should probably take out of shadow region 0 */
++
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
++ &dummy_paramset, PARM_SIZE);
++ clear_bit(channel, edma_cc[ctlr]->edma_inuse);
++}
++EXPORT_SYMBOL(edma_free_channel);
++
++/**
++ * edma_alloc_slot - allocate DMA parameter RAM
++ * @slot: specific slot to allocate; negative for "any unused slot"
++ *
++ * This allocates a parameter RAM slot, initializing it to hold a
++ * dummy transfer. Slots allocated using this routine have not been
++ * mapped to a hardware DMA channel, and will normally be used by
++ * linking to them from a slot associated with a DMA channel.
++ *
++ * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
++ * slots may be allocated on behalf of DSP firmware.
++ *
++ * Returns the number of the slot, else negative errno.
++ */
++int edma_alloc_slot(unsigned ctlr, int slot)
++{
++ if (slot >= 0)
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < 0) {
++ slot = edma_cc[ctlr]->num_channels;
++ for (;;) {
++ slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
++ edma_cc[ctlr]->num_slots, slot);
++ if (slot == edma_cc[ctlr]->num_slots)
++ return -ENOMEM;
++ if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
++ break;
++ }
++ } else if (slot < edma_cc[ctlr]->num_channels ||
++ slot >= edma_cc[ctlr]->num_slots) {
++ return -EINVAL;
++ } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
++ return -EBUSY;
++ }
++
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++ &dummy_paramset, PARM_SIZE);
++
++ return EDMA_CTLR_CHAN(ctlr, slot);
++}
++EXPORT_SYMBOL(edma_alloc_slot);
++
++/**
++ * edma_free_slot - deallocate DMA parameter RAM
++ * @slot: parameter RAM slot returned from edma_alloc_slot()
++ *
++ * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
++ * Callers are responsible for ensuring the slot is inactive, and will
++ * not be activated.
++ */
++void edma_free_slot(unsigned slot)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_channels ||
++ slot >= edma_cc[ctlr]->num_slots)
++ return;
++
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++ &dummy_paramset, PARM_SIZE);
++ clear_bit(slot, edma_cc[ctlr]->edma_inuse);
++}
++EXPORT_SYMBOL(edma_free_slot);
++
++
++/**
++ * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
++ * The API will return the starting point of a set of
++ * contiguous parameter RAM slots that have been requested
++ *
++ * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
++ * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
++ * @count: number of contiguous Paramter RAM slots
++ * @slot - the start value of Parameter RAM slot that should be passed if id
++ * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
++ *
++ * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
++ * contiguous Parameter RAM slots from parameter RAM 64 in the case of
++ * DaVinci SOCs and 32 in the case of DA8xx SOCs.
++ *
++ * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
++ * set of contiguous parameter RAM slots from the "slot" that is passed as an
++ * argument to the API.
++ *
++ * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
++ * starts looking for a set of contiguous parameter RAMs from the "slot"
++ * that is passed as an argument to the API. On failure the API will try to
++ * find a set of contiguous Parameter RAM slots from the remaining Parameter
++ * RAM slots
++ */
++int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
++{
++ /*
++ * The start slot requested should be greater than
++ * the number of channels and lesser than the total number
++ * of slots
++ */
++ if ((id != EDMA_CONT_PARAMS_ANY) &&
++ (slot < edma_cc[ctlr]->num_channels ||
++ slot >= edma_cc[ctlr]->num_slots))
++ return -EINVAL;
++
++ /*
++ * The number of parameter RAM slots requested cannot be less than 1
++ * and cannot be more than the number of slots minus the number of
++ * channels
++ */
++ if (count < 1 || count >
++ (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
++ return -EINVAL;
++
++ switch (id) {
++ case EDMA_CONT_PARAMS_ANY:
++ return reserve_contiguous_slots(ctlr, id, count,
++ edma_cc[ctlr]->num_channels);
++ case EDMA_CONT_PARAMS_FIXED_EXACT:
++ case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
++ return reserve_contiguous_slots(ctlr, id, count, slot);
++ default:
++ return -EINVAL;
++ }
++
++}
++EXPORT_SYMBOL(edma_alloc_cont_slots);
++
++/**
++ * edma_free_cont_slots - deallocate DMA parameter RAM slots
++ * @slot: first parameter RAM of a set of parameter RAM slots to be freed
++ * @count: the number of contiguous parameter RAM slots to be freed
++ *
++ * This deallocates the parameter RAM slots allocated by
++ * edma_alloc_cont_slots.
++ * Callers/applications need to keep track of sets of contiguous
++ * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
++ * API.
++ * Callers are responsible for ensuring the slots are inactive, and will
++ * not be activated.
++ */
++int edma_free_cont_slots(unsigned slot, int count)
++{
++ unsigned ctlr, slot_to_free;
++ int i;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_channels ||
++ slot >= edma_cc[ctlr]->num_slots ||
++ count < 1)
++ return -EINVAL;
++
++ for (i = slot; i < slot + count; ++i) {
++ ctlr = EDMA_CTLR(i);
++ slot_to_free = EDMA_CHAN_SLOT(i);
++
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
++ &dummy_paramset, PARM_SIZE);
++ clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(edma_free_cont_slots);
++
++/*-----------------------------------------------------------------------*/
++
++/* Parameter RAM operations (i) -- read/write partial slots */
++
++/**
++ * edma_set_src - set initial DMA source address in parameter RAM slot
++ * @slot: parameter RAM slot being configured
++ * @src_port: physical address of source (memory, controller FIFO, etc)
++ * @addressMode: INCR, except in very rare cases
++ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
++ * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
++ *
++ * Note that the source address is modified during the DMA transfer
++ * according to edma_set_src_index().
++ */
++void edma_set_src(unsigned slot, dma_addr_t src_port,
++ enum address_mode mode, enum fifo_width width)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_slots) {
++ unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
++
++ if (mode) {
++ /* set SAM and program FWID */
++ i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
++ } else {
++ /* clear SAM */
++ i &= ~SAM;
++ }
++ edma_parm_write(ctlr, PARM_OPT, slot, i);
++
++ /* set the source port address
++ in source register of param structure */
++ edma_parm_write(ctlr, PARM_SRC, slot, src_port);
++ }
++}
++EXPORT_SYMBOL(edma_set_src);
++
++/**
++ * edma_set_dest - set initial DMA destination address in parameter RAM slot
++ * @slot: parameter RAM slot being configured
++ * @dest_port: physical address of destination (memory, controller FIFO, etc)
++ * @addressMode: INCR, except in very rare cases
++ * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
++ * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
++ *
++ * Note that the destination address is modified during the DMA transfer
++ * according to edma_set_dest_index().
++ */
++void edma_set_dest(unsigned slot, dma_addr_t dest_port,
++ enum address_mode mode, enum fifo_width width)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_slots) {
++ unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
++
++ if (mode) {
++ /* set DAM and program FWID */
++ i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
++ } else {
++ /* clear DAM */
++ i &= ~DAM;
++ }
++ edma_parm_write(ctlr, PARM_OPT, slot, i);
++ /* set the destination port address
++ in dest register of param structure */
++ edma_parm_write(ctlr, PARM_DST, slot, dest_port);
++ }
++}
++EXPORT_SYMBOL(edma_set_dest);
++
++/**
++ * edma_get_position - returns the current transfer points
++ * @slot: parameter RAM slot being examined
++ * @src: pointer to source port position
++ * @dst: pointer to destination port position
++ *
++ * Returns current source and destination addresses for a particular
++ * parameter RAM slot. Its channel should not be active when this is called.
++ */
++void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
++{
++ struct edmacc_param temp;
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
++ if (src != NULL)
++ *src = temp.src;
++ if (dst != NULL)
++ *dst = temp.dst;
++}
++EXPORT_SYMBOL(edma_get_position);
++
++/**
++ * edma_set_src_index - configure DMA source address indexing
++ * @slot: parameter RAM slot being configured
++ * @src_bidx: byte offset between source arrays in a frame
++ * @src_cidx: byte offset between source frames in a block
++ *
++ * Offsets are specified to support either contiguous or discontiguous
++ * memory transfers, or repeated access to a hardware register, as needed.
++ * When accessing hardware registers, both offsets are normally zero.
++ */
++void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_slots) {
++ edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
++ 0xffff0000, src_bidx);
++ edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
++ 0xffff0000, src_cidx);
++ }
++}
++EXPORT_SYMBOL(edma_set_src_index);
++
++/**
++ * edma_set_dest_index - configure DMA destination address indexing
++ * @slot: parameter RAM slot being configured
++ * @dest_bidx: byte offset between destination arrays in a frame
++ * @dest_cidx: byte offset between destination frames in a block
++ *
++ * Offsets are specified to support either contiguous or discontiguous
++ * memory transfers, or repeated access to a hardware register, as needed.
++ * When accessing hardware registers, both offsets are normally zero.
++ */
++void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_slots) {
++ edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
++ 0x0000ffff, dest_bidx << 16);
++ edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
++ 0x0000ffff, dest_cidx << 16);
++ }
++}
++EXPORT_SYMBOL(edma_set_dest_index);
++
++/**
++ * edma_set_transfer_params - configure DMA transfer parameters
++ * @slot: parameter RAM slot being configured
++ * @acnt: how many bytes per array (at least one)
++ * @bcnt: how many arrays per frame (at least one)
++ * @ccnt: how many frames per block (at least one)
++ * @bcnt_rld: used only for A-Synchronized transfers; this specifies
++ * the value to reload into bcnt when it decrements to zero
++ * @sync_mode: ASYNC or ABSYNC
++ *
++ * See the EDMA3 documentation to understand how to configure and link
++ * transfers using the fields in PaRAM slots. If you are not doing it
++ * all at once with edma_write_slot(), you will use this routine
++ * plus two calls each for source and destination, setting the initial
++ * address and saying how to index that address.
++ *
++ * An example of an A-Synchronized transfer is a serial link using a
++ * single word shift register. In that case, @acnt would be equal to
++ * that word size; the serial controller issues a DMA synchronization
++ * event to transfer each word, and memory access by the DMA transfer
++ * controller will be word-at-a-time.
++ *
++ * An example of an AB-Synchronized transfer is a device using a FIFO.
++ * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
++ * The controller with the FIFO issues DMA synchronization events when
++ * the FIFO threshold is reached, and the DMA transfer controller will
++ * transfer one frame to (or from) the FIFO. It will probably use
++ * efficient burst modes to access memory.
++ */
++void edma_set_transfer_params(unsigned slot,
++ u16 acnt, u16 bcnt, u16 ccnt,
++ u16 bcnt_rld, enum sync_dimension sync_mode)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot < edma_cc[ctlr]->num_slots) {
++ edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
++ 0x0000ffff, bcnt_rld << 16);
++ if (sync_mode == ASYNC)
++ edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
++ else
++ edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
++ /* Set the acount, bcount, ccount registers */
++ edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
++ edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
++ }
++}
++EXPORT_SYMBOL(edma_set_transfer_params);
++
++/**
++ * edma_link - link one parameter RAM slot to another
++ * @from: parameter RAM slot originating the link
++ * @to: parameter RAM slot which is the link target
++ *
++ * The originating slot should not be part of any active DMA transfer.
++ */
++void edma_link(unsigned from, unsigned to)
++{
++ unsigned ctlr_from, ctlr_to;
++
++ ctlr_from = EDMA_CTLR(from);
++ from = EDMA_CHAN_SLOT(from);
++ ctlr_to = EDMA_CTLR(to);
++ to = EDMA_CHAN_SLOT(to);
++
++ if (from >= edma_cc[ctlr_from]->num_slots)
++ return;
++ if (to >= edma_cc[ctlr_to]->num_slots)
++ return;
++ edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
++ PARM_OFFSET(to));
++}
++EXPORT_SYMBOL(edma_link);
++
++/**
++ * edma_unlink - cut link from one parameter RAM slot
++ * @from: parameter RAM slot originating the link
++ *
++ * The originating slot should not be part of any active DMA transfer.
++ * Its link is set to 0xffff.
++ */
++void edma_unlink(unsigned from)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(from);
++ from = EDMA_CHAN_SLOT(from);
++
++ if (from >= edma_cc[ctlr]->num_slots)
++ return;
++ edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
++}
++EXPORT_SYMBOL(edma_unlink);
++
++/*-----------------------------------------------------------------------*/
++
++/* Parameter RAM operations (ii) -- read/write whole parameter sets */
++
++/**
++ * edma_write_slot - write parameter RAM data for slot
++ * @slot: number of parameter RAM slot being modified
++ * @param: data to be written into parameter RAM slot
++ *
++ * Use this to assign all parameters of a transfer at once. This
++ * allows more efficient setup of transfers than issuing multiple
++ * calls to set up those parameters in small pieces, and provides
++ * complete control over all transfer options.
++ */
++void edma_write_slot(unsigned slot, const struct edmacc_param *param)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot >= edma_cc[ctlr]->num_slots)
++ return;
++ memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
++ PARM_SIZE);
++}
++EXPORT_SYMBOL(edma_write_slot);
++
++/**
++ * edma_read_slot - read parameter RAM data from slot
++ * @slot: number of parameter RAM slot being copied
++ * @param: where to store copy of parameter RAM data
++ *
++ * Use this to read data from a parameter RAM slot, perhaps to
++ * save them as a template for later reuse.
++ */
++void edma_read_slot(unsigned slot, struct edmacc_param *param)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(slot);
++ slot = EDMA_CHAN_SLOT(slot);
++
++ if (slot >= edma_cc[ctlr]->num_slots)
++ return;
++ memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
++ PARM_SIZE);
++}
++EXPORT_SYMBOL(edma_read_slot);
++
++/*-----------------------------------------------------------------------*/
++
++/* Various EDMA channel control operations */
++
++/**
++ * edma_pause - pause dma on a channel
++ * @channel: on which edma_start() has been called
++ *
++ * This temporarily disables EDMA hardware events on the specified channel,
++ * preventing them from triggering new transfers on its behalf
++ */
++void edma_pause(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel < edma_cc[ctlr]->num_channels) {
++ unsigned int mask = BIT(channel & 0x1f);
++
++ edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
++ }
++}
++EXPORT_SYMBOL(edma_pause);
++
++/**
++ * edma_resume - resumes dma on a paused channel
++ * @channel: on which edma_pause() has been called
++ *
++ * This re-enables EDMA hardware events on the specified channel.
++ */
++void edma_resume(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel < edma_cc[ctlr]->num_channels) {
++ unsigned int mask = BIT(channel & 0x1f);
++
++ edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
++ }
++}
++EXPORT_SYMBOL(edma_resume);
++
++/**
++ * edma_start - start dma on a channel
++ * @channel: channel being activated
++ *
++ * Channels with event associations will be triggered by their hardware
++ * events, and channels without such associations will be triggered by
++ * software. (At this writing there is no interface for using software
++ * triggers except with channels that don't support hardware triggers.)
++ *
++ * Returns zero on success, else negative errno.
++ */
++int edma_start(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel < edma_cc[ctlr]->num_channels) {
++ int j = channel >> 5;
++ unsigned int mask = BIT(channel & 0x1f);
++
++ /* EDMA channels without event association */
++ if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
++ pr_debug("EDMA: ESR%d %08x\n", j,
++ edma_shadow0_read_array(ctlr, SH_ESR, j));
++ edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
++ return 0;
++ }
++
++ /* EDMA channel with event association */
++ pr_debug("EDMA: ER%d %08x\n", j,
++ edma_shadow0_read_array(ctlr, SH_ER, j));
++ /* Clear any pending event or error */
++ edma_write_array(ctlr, EDMA_ECR, j, mask);
++ edma_write_array(ctlr, EDMA_EMCR, j, mask);
++ /* Clear any SER */
++ edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++ edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
++ pr_debug("EDMA: EER%d %08x\n", j,
++ edma_shadow0_read_array(ctlr, SH_EER, j));
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(edma_start);
++
++/**
++ * edma_stop - stops dma on the channel passed
++ * @channel: channel being deactivated
++ *
++ * When @lch is a channel, any active transfer is paused and
++ * all pending hardware events are cleared. The current transfer
++ * may not be resumed, and the channel's Parameter RAM should be
++ * reinitialized before being reused.
++ */
++void edma_stop(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel < edma_cc[ctlr]->num_channels) {
++ int j = channel >> 5;
++ unsigned int mask = BIT(channel & 0x1f);
++
++ edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
++ edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
++ edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++ edma_write_array(ctlr, EDMA_EMCR, j, mask);
++
++ pr_debug("EDMA: EER%d %08x\n", j,
++ edma_shadow0_read_array(ctlr, SH_EER, j));
++
++ /* REVISIT: consider guarding against inappropriate event
++ * chaining by overwriting with dummy_paramset.
++ */
++ }
++}
++EXPORT_SYMBOL(edma_stop);
++
++/******************************************************************************
++ *
++ * It cleans ParamEntry qand bring back EDMA to initial state if media has
++ * been removed before EDMA has finished.It is usedful for removable media.
++ * Arguments:
++ * ch_no - channel no
++ *
++ * Return: zero on success, or corresponding error no on failure
++ *
++ * FIXME this should not be needed ... edma_stop() should suffice.
++ *
++ *****************************************************************************/
++
++void edma_clean_channel(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel < edma_cc[ctlr]->num_channels) {
++ int j = (channel >> 5);
++ unsigned int mask = BIT(channel & 0x1f);
++
++ pr_debug("EDMA: EMR%d %08x\n", j,
++ edma_read_array(ctlr, EDMA_EMR, j));
++ edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
++ /* Clear the corresponding EMR bits */
++ edma_write_array(ctlr, EDMA_EMCR, j, mask);
++ /* Clear any SER */
++ edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
++ edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
++ }
++}
++EXPORT_SYMBOL(edma_clean_channel);
++
++/*
++ * edma_clear_event - clear an outstanding event on the DMA channel
++ * Arguments:
++ * channel - channel number
++ */
++void edma_clear_event(unsigned channel)
++{
++ unsigned ctlr;
++
++ ctlr = EDMA_CTLR(channel);
++ channel = EDMA_CHAN_SLOT(channel);
++
++ if (channel >= edma_cc[ctlr]->num_channels)
++ return;
++ if (channel < 32)
++ edma_write(ctlr, EDMA_ECR, BIT(channel));
++ else
++ edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
++}
++EXPORT_SYMBOL(edma_clear_event);
++
++/*-----------------------------------------------------------------------*/
++
++static int __init edma_probe(struct platform_device *pdev)
++{
++ struct edma_soc_info *info = pdev->dev.platform_data;
++ const s8 (*queue_priority_mapping)[2];
++ const s8 (*queue_tc_mapping)[2];
++ int i, j, off, ln, found = 0;
++ int status = -1;
++ const s16 (*rsv_chans)[2];
++ const s16 (*rsv_slots)[2];
++ int irq[EDMA_MAX_CC] = {0, 0};
++ int err_irq[EDMA_MAX_CC] = {0, 0};
++ struct resource *r[EDMA_MAX_CC] = {NULL};
++ resource_size_t len[EDMA_MAX_CC];
++ char res_name[10];
++ char irq_name[10];
++
++ if (!info)
++ return -ENODEV;
++
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
++
++ for (j = 0; j < EDMA_MAX_CC; j++) {
++ sprintf(res_name, "edma_cc%d", j);
++ r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ res_name);
++ if (!r[j]) {
++ if (found)
++ break;
++ else
++ return -ENODEV;
++ } else {
++ found = 1;
++ }
++
++ len[j] = resource_size(r[j]);
++
++ r[j] = request_mem_region(r[j]->start, len[j],
++ dev_name(&pdev->dev));
++ if (!r[j]) {
++ status = -EBUSY;
++ goto fail1;
++ }
++
++ edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
++ if (!edmacc_regs_base[j]) {
++ status = -EBUSY;
++ goto fail1;
++ }
++
++ edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
++ if (!edma_cc[j]) {
++ status = -ENOMEM;
++ goto fail1;
++ }
++
++ edma_cc[j]->num_channels = min_t(unsigned, info[j].n_channel,
++ EDMA_MAX_DMACH);
++ edma_cc[j]->num_slots = min_t(unsigned, info[j].n_slot,
++ EDMA_MAX_PARAMENTRY);
++ edma_cc[j]->num_cc = min_t(unsigned, info[j].n_cc, EDMA_MAX_CC);
++ edma_cc[j]->num_region = min_t(unsigned, info[j].n_region,
++ EDMA_MAX_REGION);
++
++ edma_cc[j]->bkp_prm_set = kzalloc((sizeof(struct edmacc_param) *
++ edma_cc[j]->num_slots),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_prm_set) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_prm_set\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->bkp_ch_map = kzalloc((sizeof(unsigned int) *
++ edma_cc[j]->num_channels),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_ch_map) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_ch_map\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->bkp_que_num = kzalloc((sizeof(unsigned int) * 8),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_que_num) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_que_num\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->bkp_drae = kzalloc((sizeof(unsigned int) *
++ edma_cc[j]->num_region),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_drae) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_drae\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->bkp_draeh = kzalloc((sizeof(unsigned int) *
++ edma_cc[j]->num_region),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_draeh) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_draeh\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->bkp_qrae = kzalloc((sizeof(unsigned int) *
++ edma_cc[j]->num_region),
++ GFP_KERNEL);
++ if (!edma_cc[j]->bkp_qrae) {
++ status = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc bkp_qrae\n");
++ goto fail1;
++ }
++
++ edma_cc[j]->default_queue = info[j].default_queue;
++ if (!edma_cc[j]->default_queue)
++ edma_cc[j]->default_queue = EVENTQ_1;
++
++ dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
++ edmacc_regs_base[j]);
++
++ for (i = 0; i < edma_cc[j]->num_slots; i++)
++ memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
++ &dummy_paramset, PARM_SIZE);
++
++ /* Mark all channels as unused */
++ memset(edma_cc[j]->edma_unused, 0xff,
++ sizeof(edma_cc[j]->edma_unused));
++
++ /* Clear the reserved channels in unused list */
++ rsv_chans = info[j].rsv_chans;
++ if (rsv_chans) {
++ for (i = 0; rsv_chans[i][0] != -1; i++) {
++ off = rsv_chans[i][0];
++ ln = rsv_chans[i][1];
++ /* confirm the range */
++ if ((off+ln) < EDMA_MAX_DMACH)
++ clear_bits(off, ln,
++ edma_cc[j]->edma_unused);
++ }
++ }
++
++ /* Set the reserved channels/slots in inuse list */
++ rsv_slots = info[j].rsv_slots;
++ if (rsv_slots) {
++ for (i = 0; rsv_slots[i][0] != -1; i++) {
++ off = rsv_slots[i][0];
++ ln = rsv_slots[i][1];
++ set_bits(off, ln, edma_cc[j]->edma_inuse);
++ }
++ }
++
++ sprintf(irq_name, "edma%d", j);
++ irq[j] = platform_get_irq_byname(pdev, irq_name);
++ edma_cc[j]->irq_res_start = irq[j];
++ status = request_irq(irq[j], dma_irq_handler, 0, "edma",
++ &pdev->dev);
++ if (status < 0) {
++ dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
++ irq[j], status);
++ goto fail;
++ }
++
++ sprintf(irq_name, "edma%d_err", j);
++ err_irq[j] = platform_get_irq_byname(pdev, irq_name);
++ edma_cc[j]->irq_res_end = err_irq[j];
++ status = request_irq(err_irq[j], dma_ccerr_handler, 0,
++ "edma_error", &pdev->dev);
++ if (status < 0) {
++ dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
++ err_irq[j], status);
++ goto fail;
++ }
++
++ /* Everything lives on transfer controller 1 until otherwise
++ * specified. This way, long transfers on the low priority queue
++ * started by the codec engine will not cause audio defects.
++ */
++ for (i = 0; i < edma_cc[j]->num_channels; i++)
++ map_dmach_queue(j, i, EVENTQ_1);
++
++ queue_tc_mapping = info[j].queue_tc_mapping;
++ queue_priority_mapping = info[j].queue_priority_mapping;
++
++ /* Event queue to TC mapping */
++ for (i = 0; queue_tc_mapping[i][0] != -1; i++)
++ map_queue_tc(j, queue_tc_mapping[i][0],
++ queue_tc_mapping[i][1]);
++
++ /* Event queue priority mapping */
++ for (i = 0; queue_priority_mapping[i][0] != -1; i++)
++ assign_priority_to_queue(j,
++ queue_priority_mapping[i][0],
++ queue_priority_mapping[i][1]);
++
++ /* Map the channel to param entry if channel mapping logic
++ * exist
++ */
++ if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
++ map_dmach_param(j);
++
++ for (i = 0; i < info[j].n_region; i++) {
++ edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
++ edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
++ edma_write_array(j, EDMA_QRAE, i, 0x0);
++ }
++
++ edma_cc[j]->is_xbar = info[j].is_xbar;
++
++ if (edma_cc[j]->is_xbar) {
++ edma_cc[j]->num_events = info[j].n_events;
++ edma_cc[j]->xbar_event_mapping =
++ info[j].xbar_event_mapping;
++ xbar_event_to_channel_map = info[j].map_xbar_channel;
++ }
++
++ arch_num_cc++;
++ }
++
++ return 0;
++
++fail:
++ for (i = 0; i < EDMA_MAX_CC; i++) {
++ if (err_irq[i])
++ free_irq(err_irq[i], &pdev->dev);
++ if (irq[i])
++ free_irq(irq[i], &pdev->dev);
++ }
++fail1:
++ for (i = 0; i < EDMA_MAX_CC; i++) {
++ if (r[i])
++ release_mem_region(r[i]->start, len[i]);
++ if (edmacc_regs_base[i])
++ iounmap(edmacc_regs_base[i]);
++ kfree(edma_cc[i]);
++ }
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ return status;
++}
++
++#ifdef CONFIG_PM
++static int edma3_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ int i, j;
++
++ for (i = 0; i < arch_num_cc; i++) {
++ /* backup channel data */
++ for (j = 0; j < edma_cc[i]->num_channels; j++) {
++ edma_cc[i]->bkp_ch_map[j] = edma_read_array(i,
++ EDMA_DCHMAP, j);
++ }
++
++ /* backup DMA Queue Number */
++ for (j = 0; j < 8; j++) {
++ edma_cc[i]->bkp_que_num[j] = edma_read_array(i,
++ EDMA_DMAQNUM, j);
++ }
++
++ for (j = 0; j < edma_cc[i]->num_region; j++) {
++ /* backup DMA DMA Region Access Enable data */
++ edma_cc[i]->bkp_drae[j] = edma_read_array2(i,
++ EDMA_DRAE, j, 0);
++ edma_cc[i]->bkp_draeh[j] = edma_read_array2(i,
++ EDMA_DRAE, j, 1);
++
++ /* backup DMA QDMA Region Access Enable data */
++ edma_cc[i]->bkp_qrae[j] = edma_read_array(i,
++ EDMA_QRAE, j);
++ }
++
++ /* backup DMA shadow Event Set data */
++ edma_cc[i]->bkp_sh_esr = edma_shadow0_read_array(i, SH_ESR, 0);
++ edma_cc[i]->bkp_sh_esrh = edma_shadow0_read_array(i, SH_ESR, 1);
++
++ /* backup DMA Shadow Event Enable Set data */
++ edma_cc[i]->bkp_sh_eesr = edma_shadow0_read_array(i,
++ SH_EER, 0);
++ edma_cc[i]->bkp_sh_eesrh = edma_shadow0_read_array(i,
++ SH_EER, 1);
++
++ /* backup DMA Shadow Interrupt Enable Set data */
++ edma_cc[i]->bkp_sh_iesr = edma_shadow0_read_array(i,
++ SH_IER, 0);
++ edma_cc[i]->bkp_sh_iesrh = edma_shadow0_read_array(i,
++ SH_IER, 1);
++
++ edma_cc[i]->bkp_que_tc_map = edma_read(i, EDMA_QUETCMAP);
++
++ /* backup DMA Queue Priority data */
++ edma_cc[i]->bkp_que_pri = edma_read(i, EDMA_QUEPRI);
++
++ /* backup paramset */
++ for (j = 0; j < edma_cc[i]->num_slots; j++) {
++ memcpy_fromio(&edma_cc[i]->bkp_prm_set[j],
++ edmacc_regs_base[i] + PARM_OFFSET(j),
++ PARM_SIZE);
++ }
++ }
++
++ pm_runtime_put_sync(&pdev->dev);
++
++ return 0;
++}
++
++static int edma3_resume(struct platform_device *pdev)
++{
++ int i, j;
++
++ pm_runtime_get_sync(&pdev->dev);
++
++ for (i = 0; i < arch_num_cc; i++) {
++
++ /* restore channel data */
++ for (j = 0; j < edma_cc[i]->num_channels; j++) {
++ edma_write_array(i, EDMA_DCHMAP, j,
++ edma_cc[i]->bkp_ch_map[j]);
++ }
++
++ /* restore DMA Queue Number */
++ for (j = 0; j < 8; j++) {
++ edma_write_array(i, EDMA_DMAQNUM, j,
++ edma_cc[i]->bkp_que_num[j]);
++ }
++
++ for (j = 0; j < edma_cc[i]->num_region; j++) {
++ /* restore DMA DMA Region Access Enable data */
++ edma_write_array2(i, EDMA_DRAE, j, 0,
++ edma_cc[i]->bkp_drae[j]);
++ edma_write_array2(i, EDMA_DRAE, j, 1,
++ edma_cc[i]->bkp_draeh[j]);
++
++ /* restore DMA QDMA Region Access Enable data */
++ edma_write_array(i, EDMA_QRAE, j,
++ edma_cc[i]->bkp_qrae[j]);
++ }
++
++ /* restore DMA shadow Event Set data */
++ edma_shadow0_write_array(i, SH_ESR, 0, edma_cc[i]->bkp_sh_esr);
++ edma_shadow0_write_array(i, SH_ESR, 1, edma_cc[i]->bkp_sh_esrh);
++
++ /* restore DMA Shadow Event Enable Set data */
++ edma_shadow0_write_array(i, SH_EESR, 0,
++ edma_cc[i]->bkp_sh_eesr);
++ edma_shadow0_write_array(i, SH_EESR, 1,
++ edma_cc[i]->bkp_sh_eesrh);
++
++ /* restore DMA Shadow Interrupt Enable Set data */
++ edma_shadow0_write_array(i, SH_IESR, 0,
++ edma_cc[i]->bkp_sh_iesr);
++ edma_shadow0_write_array(i, SH_IESR, 1,
++ edma_cc[i]->bkp_sh_iesrh);
++
++ edma_write(i, EDMA_QUETCMAP, edma_cc[i]->bkp_que_tc_map);
++
++ /* restore DMA Queue Priority data */
++ edma_write(i, EDMA_QUEPRI, edma_cc[i]->bkp_que_pri);
++
++ /* restore paramset */
++ for (j = 0; j < edma_cc[i]->num_slots; j++) {
++ memcpy_toio(edmacc_regs_base[i] + PARM_OFFSET(j),
++ &edma_cc[i]->bkp_prm_set[j], PARM_SIZE);
++ }
++ }
++
++ return 0;
++}
++
++#else
++#define edma3_suspend NULL
++#define edma3_resume NULL
++#endif
++
++static struct platform_driver edma_driver = {
++ .driver.name = "edma",
++ .suspend = edma3_suspend,
++ .resume = edma3_resume,
++};
++
++static int __init edma_init(void)
++{
++ return platform_driver_probe(&edma_driver, edma_probe);
++}
++subsys_initcall(edma_init);
+diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
+index 410a546..b2dc2dd 100644
+--- a/arch/arm/common/gic.c
++++ b/arch/arm/common/gic.c
+@@ -40,13 +40,36 @@
+ #include <linux/slab.h>
+
+ #include <asm/irq.h>
++#include <asm/exception.h>
+ #include <asm/mach/irq.h>
+ #include <asm/hardware/gic.h>
+
+-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
++union gic_base {
++ void __iomem *common_base;
++ void __percpu __iomem **percpu_base;
++};
+
+-/* Address of GIC 0 CPU interface */
+-void __iomem *gic_cpu_base_addr __read_mostly;
++struct gic_chip_data {
++ unsigned int irq_offset;
++ union gic_base dist_base;
++ union gic_base cpu_base;
++#ifdef CONFIG_CPU_PM
++ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
++ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
++ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
++ u32 __percpu *saved_ppi_enable;
++ u32 __percpu *saved_ppi_conf;
++#endif
++#ifdef CONFIG_IRQ_DOMAIN
++ struct irq_domain domain;
++#endif
++ unsigned int gic_irqs;
++#ifdef CONFIG_GIC_NON_BANKED
++ void __iomem *(*get_base)(union gic_base *);
++#endif
++};
++
++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+ /*
+ * Supported arch specific GIC irq extension.
+@@ -67,16 +90,48 @@ struct irq_chip gic_arch_extn = {
+
+ static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+
++#ifdef CONFIG_GIC_NON_BANKED
++static void __iomem *gic_get_percpu_base(union gic_base *base)
++{
++ return *__this_cpu_ptr(base->percpu_base);
++}
++
++static void __iomem *gic_get_common_base(union gic_base *base)
++{
++ return base->common_base;
++}
++
++static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
++{
++ return data->get_base(&data->dist_base);
++}
++
++static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
++{
++ return data->get_base(&data->cpu_base);
++}
++
++static inline void gic_set_base_accessor(struct gic_chip_data *data,
++ void __iomem *(*f)(union gic_base *))
++{
++ data->get_base = f;
++}
++#else
++#define gic_data_dist_base(d) ((d)->dist_base.common_base)
++#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
++#define gic_set_base_accessor(d,f)
++#endif
++
+ static inline void __iomem *gic_dist_base(struct irq_data *d)
+ {
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+- return gic_data->dist_base;
++ return gic_data_dist_base(gic_data);
+ }
+
+ static inline void __iomem *gic_cpu_base(struct irq_data *d)
+ {
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+- return gic_data->cpu_base;
++ return gic_data_cpu_base(gic_data);
+ }
+
+ static inline unsigned int gic_irq(struct irq_data *d)
+@@ -215,6 +270,32 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
+ #define gic_set_wake NULL
+ #endif
+
++asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
++{
++ u32 irqstat, irqnr;
++ struct gic_chip_data *gic = &gic_data[0];
++ void __iomem *cpu_base = gic_data_cpu_base(gic);
++
++ do {
++ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
++ irqnr = irqstat & ~0x1c00;
++
++ if (likely(irqnr > 15 && irqnr < 1021)) {
++ irqnr = irq_domain_to_irq(&gic->domain, irqnr);
++ handle_IRQ(irqnr, regs);
++ continue;
++ }
++ if (irqnr < 16) {
++ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
++#ifdef CONFIG_SMP
++ handle_IPI(irqnr, regs);
++#endif
++ continue;
++ }
++ break;
++ } while (1);
++}
++
+ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+@@ -225,7 +306,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+ chained_irq_enter(chip, desc);
+
+ raw_spin_lock(&irq_controller_lock);
+- status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
++ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+ raw_spin_unlock(&irq_controller_lock);
+
+ gic_irq = (status & 0x3ff);
+@@ -270,7 +351,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
+ u32 cpumask;
+ unsigned int gic_irqs = gic->gic_irqs;
+ struct irq_domain *domain = &gic->domain;
+- void __iomem *base = gic->dist_base;
++ void __iomem *base = gic_data_dist_base(gic);
+ u32 cpu = 0;
+
+ #ifdef CONFIG_SMP
+@@ -330,8 +411,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
+
+ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
+ {
+- void __iomem *dist_base = gic->dist_base;
+- void __iomem *base = gic->cpu_base;
++ void __iomem *dist_base = gic_data_dist_base(gic);
++ void __iomem *base = gic_data_cpu_base(gic);
+ int i;
+
+ /*
+@@ -368,7 +449,7 @@ static void gic_dist_save(unsigned int gic_nr)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+- dist_base = gic_data[gic_nr].dist_base;
++ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+@@ -403,7 +484,7 @@ static void gic_dist_restore(unsigned int gic_nr)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+- dist_base = gic_data[gic_nr].dist_base;
++ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+@@ -439,8 +520,8 @@ static void gic_cpu_save(unsigned int gic_nr)
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+- dist_base = gic_data[gic_nr].dist_base;
+- cpu_base = gic_data[gic_nr].cpu_base;
++ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
++ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+@@ -465,8 +546,8 @@ static void gic_cpu_restore(unsigned int gic_nr)
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+- dist_base = gic_data[gic_nr].dist_base;
+- cpu_base = gic_data[gic_nr].cpu_base;
++ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
++ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+@@ -491,6 +572,11 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
++#ifdef CONFIG_GIC_NON_BANKED
++ /* Skip over unused GICs */
++ if (!gic_data[i].get_base)
++ continue;
++#endif
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+@@ -564,8 +650,9 @@ const struct irq_domain_ops gic_irq_domain_ops = {
+ #endif
+ };
+
+-void __init gic_init(unsigned int gic_nr, int irq_start,
+- void __iomem *dist_base, void __iomem *cpu_base)
++void __init gic_init_bases(unsigned int gic_nr, int irq_start,
++ void __iomem *dist_base, void __iomem *cpu_base,
++ u32 percpu_offset)
+ {
+ struct gic_chip_data *gic;
+ struct irq_domain *domain;
+@@ -575,8 +662,36 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
+
+ gic = &gic_data[gic_nr];
+ domain = &gic->domain;
+- gic->dist_base = dist_base;
+- gic->cpu_base = cpu_base;
++#ifdef CONFIG_GIC_NON_BANKED
++ if (percpu_offset) { /* Frankein-GIC without banked registers... */
++ unsigned int cpu;
++
++ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
++ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
++ if (WARN_ON(!gic->dist_base.percpu_base ||
++ !gic->cpu_base.percpu_base)) {
++ free_percpu(gic->dist_base.percpu_base);
++ free_percpu(gic->cpu_base.percpu_base);
++ return;
++ }
++
++ for_each_possible_cpu(cpu) {
++ unsigned long offset = percpu_offset * cpu_logical_map(cpu);
++ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
++ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
++ }
++
++ gic_set_base_accessor(gic, gic_get_percpu_base);
++ } else
++#endif
++ { /* Normal, sane GIC... */
++ WARN(percpu_offset,
++ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
++ percpu_offset);
++ gic->dist_base.common_base = dist_base;
++ gic->cpu_base.common_base = cpu_base;
++ gic_set_base_accessor(gic, gic_get_common_base);
++ }
+
+ /*
+ * For primary GICs, skip over SGIs.
+@@ -584,8 +699,6 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
+ */
+ domain->hwirq_base = 32;
+ if (gic_nr == 0) {
+- gic_cpu_base_addr = cpu_base;
+-
+ if ((irq_start & 31) > 0) {
+ domain->hwirq_base = 16;
+ if (irq_start != -1)
+@@ -597,7 +710,7 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+- gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
++ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+@@ -645,7 +758,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+ dsb();
+
+ /* this always happens on GIC0 */
+- writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
++ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ }
+ #endif
+
+@@ -656,6 +769,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ {
+ void __iomem *cpu_base;
+ void __iomem *dist_base;
++ u32 percpu_offset;
+ int irq;
+ struct irq_domain *domain = &gic_data[gic_cnt].domain;
+
+@@ -668,9 +782,12 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ cpu_base = of_iomap(node, 1);
+ WARN(!cpu_base, "unable to map gic cpu registers\n");
+
++ if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
++ percpu_offset = 0;
++
+ domain->of_node = of_node_get(node);
+
+- gic_init(gic_cnt, -1, dist_base, cpu_base);
++ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
+index 01f18a4..6ed41ec 100644
+--- a/arch/arm/common/vic.c
++++ b/arch/arm/common/vic.c
+@@ -19,17 +19,22 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/io.h>
++#include <linux/irqdomain.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/device.h>
+ #include <linux/amba/bus.h>
+
++#include <asm/exception.h>
+ #include <asm/mach/irq.h>
+ #include <asm/hardware/vic.h>
+
+-#ifdef CONFIG_PM
+ /**
+ * struct vic_device - VIC PM device
+ * @irq: The IRQ number for the base of the VIC.
+@@ -40,6 +45,7 @@
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
++ * @domain: The IRQ domain for the VIC.
+ */
+ struct vic_device {
+ void __iomem *base;
+@@ -50,13 +56,13 @@ struct vic_device {
+ u32 int_enable;
+ u32 soft_int;
+ u32 protect;
++ struct irq_domain domain;
+ };
+
+ /* we cannot allocate memory when VICs are initially registered */
+ static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+ static int vic_id;
+-#endif /* CONFIG_PM */
+
+ /**
+ * vic_init2 - common initialisation code
+@@ -156,39 +162,50 @@ static int __init vic_pm_init(void)
+ return 0;
+ }
+ late_initcall(vic_pm_init);
++#endif /* CONFIG_PM */
+
+ /**
+- * vic_pm_register - Register a VIC for later power management control
++ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @irq: The base IRQ for the VIC.
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
++ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
++ *
++ * This also configures the IRQ domain for the VIC.
+ */
+-static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
++static void __init vic_register(void __iomem *base, unsigned int irq,
++ u32 resume_sources, struct device_node *node)
+ {
+ struct vic_device *v;
+
+- if (vic_id >= ARRAY_SIZE(vic_devices))
++ if (vic_id >= ARRAY_SIZE(vic_devices)) {
+ printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+- else {
+- v = &vic_devices[vic_id];
+- v->base = base;
+- v->resume_sources = resume_sources;
+- v->irq = irq;
+- vic_id++;
++ return;
+ }
++
++ v = &vic_devices[vic_id];
++ v->base = base;
++ v->resume_sources = resume_sources;
++ v->irq = irq;
++ vic_id++;
++
++ v->domain.irq_base = irq;
++ v->domain.nr_irq = 32;
++#ifdef CONFIG_OF_IRQ
++ v->domain.of_node = of_node_get(node);
++ v->domain.ops = &irq_domain_simple_ops;
++#endif /* CONFIG_OF */
++ irq_domain_add(&v->domain);
+ }
+-#else
+-static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
+-#endif /* CONFIG_PM */
+
+ static void vic_ack_irq(struct irq_data *d)
+ {
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+- unsigned int irq = d->irq & 31;
++ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+@@ -197,14 +214,14 @@ static void vic_ack_irq(struct irq_data *d)
+ static void vic_mask_irq(struct irq_data *d)
+ {
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+- unsigned int irq = d->irq & 31;
++ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ }
+
+ static void vic_unmask_irq(struct irq_data *d)
+ {
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+- unsigned int irq = d->irq & 31;
++ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE);
+ }
+
+@@ -226,7 +243,7 @@ static struct vic_device *vic_from_irq(unsigned int irq)
+ static int vic_set_wake(struct irq_data *d, unsigned int on)
+ {
+ struct vic_device *v = vic_from_irq(d->irq);
+- unsigned int off = d->irq & 31;
++ unsigned int off = d->hwirq;
+ u32 bit = 1 << off;
+
+ if (!v)
+@@ -330,15 +347,9 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+ vic_set_irq_sources(base, irq_start, vic_sources);
+ }
+
+-/**
+- * vic_init - initialise a vectored interrupt controller
+- * @base: iomem base address
+- * @irq_start: starting interrupt number, must be muliple of 32
+- * @vic_sources: bitmask of interrupt sources to allow
+- * @resume_sources: bitmask of interrupt sources to allow for resume
+- */
+-void __init vic_init(void __iomem *base, unsigned int irq_start,
+- u32 vic_sources, u32 resume_sources)
++static void __init __vic_init(void __iomem *base, unsigned int irq_start,
++ u32 vic_sources, u32 resume_sources,
++ struct device_node *node)
+ {
+ unsigned int i;
+ u32 cellid = 0;
+@@ -375,5 +386,81 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
+
+ vic_set_irq_sources(base, irq_start, vic_sources);
+
+- vic_pm_register(base, irq_start, resume_sources);
++ vic_register(base, irq_start, resume_sources, node);
++}
++
++/**
++ * vic_init() - initialise a vectored interrupt controller
++ * @base: iomem base address
++ * @irq_start: starting interrupt number, must be muliple of 32
++ * @vic_sources: bitmask of interrupt sources to allow
++ * @resume_sources: bitmask of interrupt sources to allow for resume
++ */
++void __init vic_init(void __iomem *base, unsigned int irq_start,
++ u32 vic_sources, u32 resume_sources)
++{
++ __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
++}
++
++#ifdef CONFIG_OF
++int __init vic_of_init(struct device_node *node, struct device_node *parent)
++{
++ void __iomem *regs;
++ int irq_base;
++
++ if (WARN(parent, "non-root VICs are not supported"))
++ return -EINVAL;
++
++ regs = of_iomap(node, 0);
++ if (WARN_ON(!regs))
++ return -EIO;
++
++ irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
++ if (WARN_ON(irq_base < 0))
++ goto out_unmap;
++
++ __vic_init(regs, irq_base, ~0, ~0, node);
++
++ return 0;
++
++ out_unmap:
++ iounmap(regs);
++
++ return -EIO;
++}
++#endif /* CONFIG OF */
++
++/*
++ * Handle each interrupt in a single VIC. Returns non-zero if we've
++ * handled at least one interrupt. This does a single read of the
++ * status register and handles all interrupts in order from LSB first.
++ */
++static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
++{
++ u32 stat, irq;
++ int handled = 0;
++
++ stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
++ while (stat) {
++ irq = ffs(stat) - 1;
++ handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs);
++ stat &= ~(1 << irq);
++ handled = 1;
++ }
++
++ return handled;
++}
++
++/*
++ * Keep iterating over all registered VIC's until there are no pending
++ * interrupts.
++ */
++asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
++{
++ int i, handled;
++
++ do {
++ for (i = 0, handled = 0; i < vic_id; ++i)
++ handled |= handle_one_vic(&vic_devices[i], regs);
++ } while (handled);
+ }
+diff --git a/arch/arm/configs/am335x_evm_defconfig b/arch/arm/configs/am335x_evm_defconfig
+new file mode 100644
+index 0000000..56d8b10
+--- /dev/null
++++ b/arch/arm/configs/am335x_evm_defconfig
+@@ -0,0 +1,2602 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/arm 3.2.0 Kernel Configuration
++#
++CONFIG_ARM=y
++CONFIG_HAVE_PWM=y
++CONFIG_SYS_SUPPORTS_APM_EMULATION=y
++CONFIG_HAVE_SCHED_CLOCK=y
++CONFIG_GENERIC_GPIO=y
++# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_KTIME_SCALAR=y
++CONFIG_HAVE_PROC_CPU=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_HAVE_LATENCYTOP_SUPPORT=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++CONFIG_HARDIRQS_SW_RESEND=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_ARCH_HAS_CPUFREQ=y
++CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_VECTORS_BASE=0xffff0000
++CONFIG_ARM_PATCH_PHYS_VIRT=y
++CONFIG_GENERIC_BUG=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_HAVE_IRQ_WORK=y
++
++#
++# General setup
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE=""
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_HAVE_KERNEL_GZIP=y
++CONFIG_HAVE_KERNEL_LZMA=y
++CONFIG_HAVE_KERNEL_LZO=y
++CONFIG_KERNEL_GZIP=y
++# CONFIG_KERNEL_LZMA is not set
++# CONFIG_KERNEL_LZO is not set
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_FHANDLE is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_HAVE_GENERIC_HARDIRQS=y
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_HAVE_SPARSE_IRQ=y
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_GENERIC_IRQ_CHIP=y
++CONFIG_IRQ_DOMAIN=y
++# CONFIG_SPARSE_IRQ is not set
++
++#
++# RCU Subsystem
++#
++CONFIG_TINY_RCU=y
++# CONFIG_PREEMPT_RCU is not set
++# CONFIG_RCU_TRACE is not set
++# CONFIG_TREE_RCU_TRACE is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=16
++# CONFIG_CGROUPS is not set
++CONFIG_NAMESPACES=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_USER_NS=y
++CONFIG_PID_NS=y
++CONFIG_NET_NS=y
++# CONFIG_SCHED_AUTOGROUP is not set
++# CONFIG_SYSFS_DEPRECATED is not set
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++# CONFIG_EXPERT is not set
++CONFIG_UID16=y
++# CONFIG_SYSCTL_SYSCALL is not set
++CONFIG_KALLSYMS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++CONFIG_PERF_USE_VMALLOC=y
++
++#
++# Kernel Performance Events And Counters
++#
++# CONFIG_PERF_EVENTS is not set
++# CONFIG_PERF_COUNTERS is not set
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_COMPAT_BRK=y
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=y
++CONFIG_HAVE_OPROFILE=y
++# CONFIG_KPROBES is not set
++CONFIG_HAVE_KPROBES=y
++CONFIG_HAVE_KRETPROBES=y
++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++
++#
++# GCOV-based kernel profiling
++#
++CONFIG_HAVE_GENERIC_DMA_COHERENT=y
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_FORCE_LOAD=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_BLOCK=y
++CONFIG_LBDAF=y
++# CONFIG_BLK_DEV_BSG is not set
++# CONFIG_BLK_DEV_BSGLIB is not set
++# CONFIG_BLK_DEV_INTEGRITY is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++# CONFIG_INLINE_SPIN_TRYLOCK is not set
++# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
++# CONFIG_INLINE_SPIN_LOCK is not set
++# CONFIG_INLINE_SPIN_LOCK_BH is not set
++# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
++# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
++CONFIG_INLINE_SPIN_UNLOCK=y
++# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
++# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
++# CONFIG_INLINE_READ_TRYLOCK is not set
++# CONFIG_INLINE_READ_LOCK is not set
++# CONFIG_INLINE_READ_LOCK_BH is not set
++# CONFIG_INLINE_READ_LOCK_IRQ is not set
++# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
++CONFIG_INLINE_READ_UNLOCK=y
++# CONFIG_INLINE_READ_UNLOCK_BH is not set
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
++# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
++# CONFIG_INLINE_WRITE_TRYLOCK is not set
++# CONFIG_INLINE_WRITE_LOCK is not set
++# CONFIG_INLINE_WRITE_LOCK_BH is not set
++# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
++# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
++CONFIG_INLINE_WRITE_UNLOCK=y
++# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
++# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
++# CONFIG_MUTEX_SPIN_ON_OWNER is not set
++CONFIG_FREEZER=y
++
++#
++# System Type
++#
++CONFIG_MMU=y
++# CONFIG_ARCH_INTEGRATOR is not set
++# CONFIG_ARCH_REALVIEW is not set
++# CONFIG_ARCH_VERSATILE is not set
++# CONFIG_ARCH_VEXPRESS is not set
++# CONFIG_ARCH_AT91 is not set
++# CONFIG_ARCH_BCMRING is not set
++# CONFIG_ARCH_HIGHBANK is not set
++# CONFIG_ARCH_CLPS711X is not set
++# CONFIG_ARCH_CNS3XXX is not set
++# CONFIG_ARCH_GEMINI is not set
++# CONFIG_ARCH_PRIMA2 is not set
++# CONFIG_ARCH_EBSA110 is not set
++# CONFIG_ARCH_EP93XX is not set
++# CONFIG_ARCH_FOOTBRIDGE is not set
++# CONFIG_ARCH_MXC is not set
++# CONFIG_ARCH_MXS is not set
++# CONFIG_ARCH_NETX is not set
++# CONFIG_ARCH_H720X is not set
++# CONFIG_ARCH_IOP13XX is not set
++# CONFIG_ARCH_IOP32X is not set
++# CONFIG_ARCH_IOP33X is not set
++# CONFIG_ARCH_IXP23XX is not set
++# CONFIG_ARCH_IXP2000 is not set
++# CONFIG_ARCH_IXP4XX is not set
++# CONFIG_ARCH_DOVE is not set
++# CONFIG_ARCH_KIRKWOOD is not set
++# CONFIG_ARCH_LPC32XX is not set
++# CONFIG_ARCH_MV78XX0 is not set
++# CONFIG_ARCH_ORION5X is not set
++# CONFIG_ARCH_MMP is not set
++# CONFIG_ARCH_KS8695 is not set
++# CONFIG_ARCH_W90X900 is not set
++# CONFIG_ARCH_TEGRA is not set
++# CONFIG_ARCH_PICOXCELL is not set
++# CONFIG_ARCH_PNX4008 is not set
++# CONFIG_ARCH_PXA is not set
++# CONFIG_ARCH_MSM is not set
++# CONFIG_ARCH_SHMOBILE is not set
++# CONFIG_ARCH_RPC is not set
++# CONFIG_ARCH_SA1100 is not set
++# CONFIG_ARCH_S3C2410 is not set
++# CONFIG_ARCH_S3C64XX is not set
++# CONFIG_ARCH_S5P64X0 is not set
++# CONFIG_ARCH_S5PC100 is not set
++# CONFIG_ARCH_S5PV210 is not set
++# CONFIG_ARCH_EXYNOS is not set
++# CONFIG_ARCH_SHARK is not set
++# CONFIG_ARCH_TCC_926 is not set
++# CONFIG_ARCH_U300 is not set
++# CONFIG_ARCH_U8500 is not set
++# CONFIG_ARCH_NOMADIK is not set
++# CONFIG_ARCH_DAVINCI is not set
++CONFIG_ARCH_OMAP=y
++# CONFIG_PLAT_SPEAR is not set
++# CONFIG_ARCH_VT8500 is not set
++# CONFIG_ARCH_ZYNQ is not set
++# CONFIG_GPIO_PCA953X is not set
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++
++#
++# TI OMAP Common Features
++#
++# CONFIG_ARCH_OMAP1 is not set
++CONFIG_ARCH_OMAP2PLUS=y
++
++#
++# OMAP Feature Selections
++#
++# CONFIG_OMAP_SMARTREFLEX is not set
++CONFIG_OMAP_RESET_CLOCKS=y
++CONFIG_OMAP_MUX=y
++CONFIG_OMAP_MUX_DEBUG=y
++CONFIG_OMAP_MUX_WARNINGS=y
++# CONFIG_OMAP_MCBSP is not set
++CONFIG_OMAP_MBOX_FWK=y
++CONFIG_OMAP_MBOX_KFIFO_SIZE=256
++# CONFIG_OMAP_32K_TIMER is not set
++# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
++CONFIG_OMAP_DM_TIMER=y
++CONFIG_OMAP_PM_NOOP=y
++CONFIG_MACH_OMAP_GENERIC=y
++
++#
++# TI OMAP2/3/4 Specific Features
++#
++CONFIG_ARCH_OMAP2PLUS_TYPICAL=y
++# CONFIG_ARCH_OMAP2 is not set
++CONFIG_ARCH_OMAP3=y
++# CONFIG_ARCH_OMAP4 is not set
++# CONFIG_SOC_OMAP3430 is not set
++CONFIG_SOC_OMAPTI81XX=y
++CONFIG_SOC_OMAPAM33XX=y
++CONFIG_OMAP_PACKAGE_CBB=y
++
++#
++# OMAP Board Type
++#
++CONFIG_MACH_OMAP3_BEAGLE=y
++# CONFIG_MACH_DEVKIT8000 is not set
++# CONFIG_MACH_OMAP_LDP is not set
++# CONFIG_MACH_OMAP3530_LV_SOM is not set
++# CONFIG_MACH_OMAP3_TORPEDO is not set
++# CONFIG_MACH_ENCORE is not set
++# CONFIG_MACH_OVERO is not set
++# CONFIG_MACH_OMAP3EVM is not set
++# CONFIG_MACH_OMAP3517EVM is not set
++# CONFIG_MACH_CRANEBOARD is not set
++# CONFIG_MACH_OMAP3_PANDORA is not set
++# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
++# CONFIG_MACH_OMAP_3430SDP is not set
++# CONFIG_MACH_NOKIA_RM680 is not set
++# CONFIG_MACH_NOKIA_RX51 is not set
++# CONFIG_MACH_OMAP_ZOOM2 is not set
++# CONFIG_MACH_OMAP_ZOOM3 is not set
++# CONFIG_MACH_CM_T35 is not set
++# CONFIG_MACH_CM_T3517 is not set
++# CONFIG_MACH_IGEP0020 is not set
++# CONFIG_MACH_IGEP0030 is not set
++# CONFIG_MACH_SBC3530 is not set
++# CONFIG_MACH_OMAP_3630SDP is not set
++CONFIG_MACH_TI8168EVM=y
++CONFIG_MACH_TI8148EVM=y
++CONFIG_MACH_AM335XEVM=y
++CONFIG_MACH_AM335XIAEVM=y
++# CONFIG_OMAP3_EMU is not set
++# CONFIG_OMAP3_SDRC_AC_TIMING is not set
++CONFIG_OMAP3_EDMA=y
++
++#
++# System MMU
++#
++
++#
++# Processor Type
++#
++CONFIG_CPU_V7=y
++CONFIG_CPU_32v6K=y
++CONFIG_CPU_32v7=y
++CONFIG_CPU_ABRT_EV7=y
++CONFIG_CPU_PABRT_V7=y
++CONFIG_CPU_CACHE_V7=y
++CONFIG_CPU_CACHE_VIPT=y
++CONFIG_CPU_COPY_V6=y
++CONFIG_CPU_TLB_V7=y
++CONFIG_CPU_HAS_ASID=y
++CONFIG_CPU_CP15=y
++CONFIG_CPU_CP15_MMU=y
++
++#
++# Processor Features
++#
++CONFIG_ARM_THUMB=y
++CONFIG_ARM_THUMBEE=y
++# CONFIG_SWP_EMULATE is not set
++# CONFIG_CPU_ICACHE_DISABLE is not set
++# CONFIG_CPU_DCACHE_DISABLE is not set
++# CONFIG_CPU_BPREDICT_DISABLE is not set
++CONFIG_ARM_L1_CACHE_SHIFT_6=y
++CONFIG_ARM_L1_CACHE_SHIFT=6
++CONFIG_ARM_DMA_MEM_BUFFERABLE=y
++CONFIG_MULTI_IRQ_HANDLER=y
++# CONFIG_ARM_ERRATA_430973 is not set
++# CONFIG_ARM_ERRATA_458693 is not set
++# CONFIG_ARM_ERRATA_460075 is not set
++# CONFIG_ARM_ERRATA_720789 is not set
++# CONFIG_ARM_ERRATA_743622 is not set
++# CONFIG_ARM_ERRATA_751472 is not set
++# CONFIG_ARM_ERRATA_754322 is not set
++
++#
++# Bus support
++#
++# CONFIG_PCI_SYSCALL is not set
++# CONFIG_ARCH_SUPPORTS_MSI is not set
++# CONFIG_PCCARD is not set
++
++#
++# Kernel Features
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++CONFIG_VMSPLIT_3G=y
++# CONFIG_VMSPLIT_2G is not set
++# CONFIG_VMSPLIT_1G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_HZ=100
++# CONFIG_THUMB2_KERNEL is not set
++CONFIG_AEABI=y
++CONFIG_OABI_COMPAT=y
++CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
++# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
++# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
++CONFIG_HAVE_ARCH_PFN_VALID=y
++# CONFIG_HIGHMEM is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_PAGEFLAGS_EXTENDED=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_COMPACTION is not set
++# CONFIG_PHYS_ADDR_T_64BIT is not set
++CONFIG_ZONE_DMA_FLAG=0
++CONFIG_VIRT_TO_BUS=y
++# CONFIG_KSM is not set
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
++CONFIG_NEED_PER_CPU_KM=y
++# CONFIG_CLEANCACHE is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++# CONFIG_LEDS is not set
++CONFIG_ALIGNMENT_TRAP=y
++# CONFIG_UACCESS_WITH_MEMCPY is not set
++# CONFIG_SECCOMP is not set
++# CONFIG_CC_STACKPROTECTOR is not set
++# CONFIG_DEPRECATED_PARAM_STRUCT is not set
++
++#
++# Boot options
++#
++CONFIG_USE_OF=y
++CONFIG_ZBOOT_ROM_TEXT=0x0
++CONFIG_ZBOOT_ROM_BSS=0x0
++# CONFIG_ARM_APPENDED_DTB is not set
++CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO0,115200"
++CONFIG_CMDLINE_FROM_BOOTLOADER=y
++# CONFIG_CMDLINE_EXTEND is not set
++# CONFIG_CMDLINE_FORCE is not set
++# CONFIG_XIP_KERNEL is not set
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_AUTO_ZRELADDR is not set
++
++#
++# CPU Power Management
++#
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_TABLE=y
++CONFIG_CPU_FREQ_STAT=y
++CONFIG_CPU_FREQ_STAT_DETAILS=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++
++#
++# ARM CPU frequency scaling drivers
++#
++CONFIG_CPU_IDLE=y
++CONFIG_CPU_IDLE_GOV_LADDER=y
++CONFIG_CPU_IDLE_GOV_MENU=y
++
++#
++# Floating point emulation
++#
++
++#
++# At least one emulation must be selected
++#
++CONFIG_FPE_NWFPE=y
++# CONFIG_FPE_NWFPE_XP is not set
++# CONFIG_FPE_FASTFPE is not set
++CONFIG_VFP=y
++CONFIG_VFPv3=y
++CONFIG_NEON=y
++
++#
++# Userspace binary formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++CONFIG_HAVE_AOUT=y
++# CONFIG_BINFMT_AOUT is not set
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management options
++#
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++CONFIG_PM_SLEEP=y
++CONFIG_PM_RUNTIME=y
++CONFIG_PM=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_ADVANCED_DEBUG=y
++# CONFIG_PM_TEST_SUSPEND is not set
++CONFIG_CAN_PM_TRACE=y
++# CONFIG_APM_EMULATION is not set
++CONFIG_ARCH_HAS_OPP=y
++CONFIG_PM_OPP=y
++CONFIG_PM_CLK=y
++CONFIG_CPU_PM=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++CONFIG_ARM_CPU_SUSPEND=y
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE_DEMUX is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++# CONFIG_IPV6 is not set
++# CONFIG_NETLABEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_NETFILTER_ADVANCED=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK_QUEUE is not set
++# CONFIG_NETFILTER_NETLINK_LOG is not set
++CONFIG_NF_CONNTRACK=y
++# CONFIG_NF_CONNTRACK_MARK is not set
++# CONFIG_NF_CONNTRACK_EVENTS is not set
++# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
++# CONFIG_NF_CT_PROTO_DCCP is not set
++# CONFIG_NF_CT_PROTO_SCTP is not set
++# CONFIG_NF_CT_PROTO_UDPLITE is not set
++# CONFIG_NF_CONNTRACK_AMANDA is not set
++# CONFIG_NF_CONNTRACK_FTP is not set
++# CONFIG_NF_CONNTRACK_H323 is not set
++# CONFIG_NF_CONNTRACK_IRC is not set
++# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
++# CONFIG_NF_CONNTRACK_SNMP is not set
++# CONFIG_NF_CONNTRACK_PPTP is not set
++# CONFIG_NF_CONNTRACK_SANE is not set
++# CONFIG_NF_CONNTRACK_SIP is not set
++# CONFIG_NF_CONNTRACK_TFTP is not set
++# CONFIG_NF_CT_NETLINK is not set
++CONFIG_NETFILTER_XTABLES=y
++
++#
++# Xtables combined modules
++#
++# CONFIG_NETFILTER_XT_MARK is not set
++# CONFIG_NETFILTER_XT_CONNMARK is not set
++
++#
++# Xtables targets
++#
++# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
++# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
++# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
++# CONFIG_NETFILTER_XT_TARGET_MARK is not set
++# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
++# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
++# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
++# CONFIG_NETFILTER_XT_TARGET_TEE is not set
++# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
++
++#
++# Xtables matches
++#
++# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
++# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
++# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
++# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
++# CONFIG_NETFILTER_XT_MATCH_CPU is not set
++# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
++# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
++# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
++# CONFIG_NETFILTER_XT_MATCH_ESP is not set
++# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
++# CONFIG_NETFILTER_XT_MATCH_HL is not set
++# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
++# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
++# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
++# CONFIG_NETFILTER_XT_MATCH_MAC is not set
++# CONFIG_NETFILTER_XT_MATCH_MARK is not set
++# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
++# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
++# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
++# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
++# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
++# CONFIG_NETFILTER_XT_MATCH_REALM is not set
++# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
++# CONFIG_NETFILTER_XT_MATCH_STATE is not set
++# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
++# CONFIG_NETFILTER_XT_MATCH_STRING is not set
++# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
++# CONFIG_NETFILTER_XT_MATCH_TIME is not set
++# CONFIG_NETFILTER_XT_MATCH_U32 is not set
++# CONFIG_IP_VS is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_NF_DEFRAG_IPV4=y
++CONFIG_NF_CONNTRACK_IPV4=y
++CONFIG_NF_CONNTRACK_PROC_COMPAT=y
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=y
++# CONFIG_IP_NF_MATCH_AH is not set
++# CONFIG_IP_NF_MATCH_ECN is not set
++# CONFIG_IP_NF_MATCH_TTL is not set
++CONFIG_IP_NF_FILTER=y
++# CONFIG_IP_NF_TARGET_REJECT is not set
++CONFIG_IP_NF_TARGET_LOG=y
++# CONFIG_IP_NF_TARGET_ULOG is not set
++CONFIG_NF_NAT=y
++CONFIG_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=y
++# CONFIG_IP_NF_TARGET_NETMAP is not set
++# CONFIG_IP_NF_TARGET_REDIRECT is not set
++# CONFIG_NF_NAT_FTP is not set
++# CONFIG_NF_NAT_IRC is not set
++# CONFIG_NF_NAT_TFTP is not set
++# CONFIG_NF_NAT_AMANDA is not set
++# CONFIG_NF_NAT_PPTP is not set
++# CONFIG_NF_NAT_H323 is not set
++# CONFIG_NF_NAT_SIP is not set
++# CONFIG_IP_NF_MANGLE is not set
++# CONFIG_IP_NF_RAW is not set
++# CONFIG_IP_NF_SECURITY is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_RDS is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_L2TP is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_NET_DSA is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++# CONFIG_PHONET is not set
++# CONFIG_IEEE802154 is not set
++# CONFIG_NET_SCHED is not set
++# CONFIG_DCB is not set
++CONFIG_DNS_RESOLVER=y
++# CONFIG_BATMAN_ADV is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++CONFIG_CAN=y
++CONFIG_CAN_RAW=y
++CONFIG_CAN_BCM=y
++# CONFIG_CAN_GW is not set
++
++#
++# CAN Device Drivers
++#
++# CONFIG_CAN_VCAN is not set
++# CONFIG_CAN_SLCAN is not set
++CONFIG_CAN_DEV=y
++CONFIG_CAN_CALC_BITTIMING=y
++# CONFIG_CAN_TI_HECC is not set
++# CONFIG_CAN_MCP251X is not set
++# CONFIG_CAN_SJA1000 is not set
++# CONFIG_CAN_C_CAN is not set
++CONFIG_CAN_D_CAN=y
++CONFIG_CAN_D_CAN_PLATFORM=y
++
++#
++# CAN USB interfaces
++#
++# CONFIG_CAN_EMS_USB is not set
++# CONFIG_CAN_ESD_USB2 is not set
++# CONFIG_CAN_SOFTING is not set
++# CONFIG_CAN_DEBUG_DEVICES is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_PRIV=y
++# CONFIG_CFG80211 is not set
++CONFIG_WIRELESS_EXT_SYSFS=y
++# CONFIG_LIB80211 is not set
++
++#
++# CFG80211 needs to be enabled for MAC80211
++#
++# CONFIG_WIMAX is not set
++CONFIG_RFKILL=y
++CONFIG_RFKILL_INPUT=y
++# CONFIG_RFKILL_REGULATOR is not set
++# CONFIG_RFKILL_GPIO is not set
++# CONFIG_NET_9P is not set
++# CONFIG_CAIF is not set
++# CONFIG_CEPH_LIB is not set
++# CONFIG_NFC is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++# CONFIG_DEVTMPFS is not set
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_FIRMWARE_IN_KERNEL=y
++CONFIG_EXTRA_FIRMWARE="am335x-pm-firmware.bin"
++CONFIG_EXTRA_FIRMWARE_DIR="firmware"
++# CONFIG_SYS_HYPERVISOR is not set
++CONFIG_REGMAP=y
++CONFIG_REGMAP_I2C=y
++CONFIG_REGMAP_SPI=y
++
++#
++# CBUS support
++#
++# CONFIG_CBUS is not set
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_TESTS is not set
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++# CONFIG_MTD_AFS_PARTS is not set
++# CONFIG_MTD_OF_PARTS is not set
++# CONFIG_MTD_AR7_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=y
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_SM_FTL is not set
++CONFIG_MTD_OOPS=y
++# CONFIG_MTD_SWAP is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_CFI_INTELEXT is not set
++# CONFIG_MTD_CFI_AMDSTD is not set
++# CONFIG_MTD_CFI_STAA is not set
++CONFIG_MTD_CFI_UTIL=y
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PHYSMAP_OF is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_DATAFLASH is not set
++CONFIG_MTD_M25P80=y
++CONFIG_M25PXX_USE_FAST_READ=y
++# CONFIG_MTD_SST25L is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_DOCG3 is not set
++CONFIG_MTD_NAND_ECC=y
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND=y
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_BCH is not set
++# CONFIG_MTD_SM_COMMON is not set
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++# CONFIG_MTD_NAND_GPIO is not set
++CONFIG_MTD_NAND_OMAP2=y
++CONFIG_MTD_NAND_IDS=y
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++# CONFIG_MTD_NAND_PLATFORM is not set
++# CONFIG_MTD_ALAUDA is not set
++CONFIG_MTD_ONENAND=y
++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
++# CONFIG_MTD_ONENAND_GENERIC is not set
++CONFIG_MTD_ONENAND_OMAP2=y
++# CONFIG_MTD_ONENAND_OTP is not set
++# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
++# CONFIG_MTD_ONENAND_SIM is not set
++
++#
++# LPDDR flash memory drivers
++#
++# CONFIG_MTD_LPDDR is not set
++CONFIG_MTD_UBI=y
++CONFIG_MTD_UBI_WL_THRESHOLD=4096
++CONFIG_MTD_UBI_BEB_RESERVE=1
++# CONFIG_MTD_UBI_GLUEBI is not set
++# CONFIG_MTD_UBI_DEBUG is not set
++CONFIG_DTC=y
++CONFIG_OF=y
++
++#
++# Device Tree and Open Firmware support
++#
++CONFIG_PROC_DEVICETREE=y
++CONFIG_OF_FLATTREE=y
++CONFIG_OF_EARLY_FLATTREE=y
++CONFIG_OF_ADDRESS=y
++CONFIG_OF_IRQ=y
++CONFIG_OF_DEVICE=y
++CONFIG_OF_GPIO=y
++CONFIG_OF_I2C=y
++CONFIG_OF_NET=y
++CONFIG_OF_SPI=y
++CONFIG_OF_MDIO=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++
++#
++# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
++#
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++# CONFIG_BLK_DEV_XIP is not set
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++# CONFIG_MG_DISK is not set
++# CONFIG_BLK_DEV_RBD is not set
++CONFIG_SENSORS_LIS3LV02D=y
++CONFIG_MISC_DEVICES=y
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_ATMEL_PWM is not set
++# CONFIG_ICS932S401 is not set
++# CONFIG_ENCLOSURE_SERVICES is not set
++# CONFIG_APDS9802ALS is not set
++# CONFIG_ISL29003 is not set
++# CONFIG_ISL29020 is not set
++CONFIG_SENSORS_TSL2550=y
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_BH1770 is not set
++# CONFIG_SENSORS_APDS990X is not set
++# CONFIG_HMC6352 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_TI_DAC7512 is not set
++# CONFIG_BMP085 is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++# CONFIG_C2PORT is not set
++
++#
++# EEPROM support
++#
++CONFIG_EEPROM_AT24=y
++# CONFIG_EEPROM_AT25 is not set
++# CONFIG_EEPROM_LEGACY is not set
++# CONFIG_EEPROM_MAX6875 is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_EEPROM_93XX46 is not set
++# CONFIG_IWMC3200TOP is not set
++
++#
++# Texas Instruments shared transport line discipline
++#
++# CONFIG_TI_ST is not set
++# CONFIG_SENSORS_LIS3_SPI is not set
++CONFIG_SENSORS_LIS3_I2C=y
++
++#
++# Altera FPGA firmware download module
++#
++# CONFIG_ALTERA_STAPL is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_TGT is not set
++# CONFIG_SCSI_NETLINK is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++CONFIG_SCSI_MULTI_LUN=y
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++CONFIG_SCSI_SCAN_ASYNC=y
++CONFIG_SCSI_WAIT_SCAN=m
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++CONFIG_SCSI_LOWLEVEL=y
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_ISCSI_BOOT_SYSFS is not set
++# CONFIG_LIBFC is not set
++# CONFIG_LIBFCOE is not set
++# CONFIG_SCSI_DEBUG is not set
++# CONFIG_SCSI_DH is not set
++# CONFIG_SCSI_OSD_INITIATOR is not set
++# CONFIG_ATA is not set
++# CONFIG_MD is not set
++# CONFIG_TARGET_CORE is not set
++CONFIG_NETDEVICES=y
++CONFIG_NET_CORE=y
++# CONFIG_BONDING is not set
++# CONFIG_DUMMY is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_MII=y
++# CONFIG_MACVLAN is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_TUN is not set
++# CONFIG_VETH is not set
++
++#
++# CAIF transport drivers
++#
++CONFIG_ETHERNET=y
++CONFIG_NET_VENDOR_BROADCOM=y
++# CONFIG_B44 is not set
++CONFIG_NET_VENDOR_CHELSIO=y
++# CONFIG_DM9000 is not set
++# CONFIG_DNET is not set
++CONFIG_NET_VENDOR_FARADAY=y
++# CONFIG_FTMAC100 is not set
++# CONFIG_FTGMAC100 is not set
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_NET_VENDOR_I825XX=y
++CONFIG_NET_VENDOR_MARVELL=y
++CONFIG_NET_VENDOR_MICREL=y
++# CONFIG_KS8851 is not set
++# CONFIG_KS8851_MLL is not set
++CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++CONFIG_NET_VENDOR_NATSEMI=y
++CONFIG_NET_VENDOR_8390=y
++# CONFIG_AX88796 is not set
++# CONFIG_ETHOC is not set
++CONFIG_NET_VENDOR_SEEQ=y
++# CONFIG_SEEQ8005 is not set
++CONFIG_NET_VENDOR_SMSC=y
++CONFIG_SMC91X=y
++# CONFIG_SMC911X is not set
++CONFIG_SMSC911X=y
++# CONFIG_SMSC911X_ARCH_HOOKS is not set
++CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++CONFIG_NET_VENDOR_TI=y
++# CONFIG_TI_DAVINCI_EMAC is not set
++CONFIG_TI_DAVINCI_MDIO=y
++CONFIG_TI_DAVINCI_CPDMA=y
++CONFIG_TI_CPSW=y
++CONFIG_TLK110_WORKAROUND=y
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++# CONFIG_VITESSE_PHY is not set
++CONFIG_SMSC_PHY=y
++# CONFIG_BROADCOM_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++# CONFIG_REALTEK_PHY is not set
++# CONFIG_NATIONAL_PHY is not set
++# CONFIG_STE10XP is not set
++# CONFIG_LSI_ET1011C_PHY is not set
++# CONFIG_MICREL_PHY is not set
++# CONFIG_FIXED_PHY is not set
++# CONFIG_MDIO_BITBANG is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++CONFIG_USB_USBNET=y
++# CONFIG_USB_NET_AX8817X is not set
++CONFIG_USB_NET_CDCETHER=y
++CONFIG_USB_NET_CDC_EEM=y
++CONFIG_USB_NET_CDC_NCM=y
++CONFIG_USB_NET_DM9601=y
++# CONFIG_USB_NET_SMSC75XX is not set
++# CONFIG_USB_NET_SMSC95XX is not set
++# CONFIG_USB_NET_GL620A is not set
++# CONFIG_USB_NET_NET1080 is not set
++# CONFIG_USB_NET_PLUSB is not set
++# CONFIG_USB_NET_MCS7830 is not set
++# CONFIG_USB_NET_RNDIS_HOST is not set
++# CONFIG_USB_NET_CDC_SUBSET is not set
++# CONFIG_USB_NET_ZAURUS is not set
++# CONFIG_USB_NET_CX82310_ETH is not set
++# CONFIG_USB_NET_KALMIA is not set
++# CONFIG_USB_HSO is not set
++# CONFIG_USB_NET_INT51X1 is not set
++# CONFIG_USB_IPHETH is not set
++# CONFIG_USB_SIERRA_NET is not set
++# CONFIG_USB_VL600 is not set
++CONFIG_WLAN=y
++CONFIG_USB_ZD1201=y
++# CONFIG_HOSTAP is not set
++CONFIG_WL12XX_PLATFORM_DATA=y
++
++#
++# Enable WiMAX (Networking options) to see the WiMAX drivers
++#
++# CONFIG_WAN is not set
++# CONFIG_ISDN is not set
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++CONFIG_INPUT_POLLDEV=y
++# CONFIG_INPUT_SPARSEKMAP is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADP5588 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++CONFIG_KEYBOARD_GPIO=y
++# CONFIG_KEYBOARD_TCA6416 is not set
++CONFIG_KEYBOARD_MATRIX=y
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_TWL4030 is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_ELANTECH is not set
++# CONFIG_MOUSE_PS2_SENTELIC is not set
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_BCM5974 is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_MOUSE_GPIO is not set
++# CONFIG_MOUSE_SYNAPTICS_I2C is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++CONFIG_INPUT_TOUCHSCREEN=y
++# CONFIG_TOUCHSCREEN_ADS7846 is not set
++# CONFIG_TOUCHSCREEN_AD7877 is not set
++# CONFIG_TOUCHSCREEN_AD7879 is not set
++# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
++# CONFIG_TOUCHSCREEN_BU21013 is not set
++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
++# CONFIG_TOUCHSCREEN_DYNAPRO is not set
++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
++# CONFIG_TOUCHSCREEN_EETI is not set
++# CONFIG_TOUCHSCREEN_FUJITSU is not set
++# CONFIG_TOUCHSCREEN_GUNZE is not set
++# CONFIG_TOUCHSCREEN_ELO is not set
++# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
++# CONFIG_TOUCHSCREEN_MAX11801 is not set
++# CONFIG_TOUCHSCREEN_MCS5000 is not set
++# CONFIG_TOUCHSCREEN_MTOUCH is not set
++# CONFIG_TOUCHSCREEN_INEXIO is not set
++# CONFIG_TOUCHSCREEN_MK712 is not set
++# CONFIG_TOUCHSCREEN_PENMOUNT is not set
++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
++CONFIG_TOUCHSCREEN_TI_TSCADC=y
++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
++# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
++# CONFIG_TOUCHSCREEN_TSC2005 is not set
++# CONFIG_TOUCHSCREEN_TSC2007 is not set
++# CONFIG_TOUCHSCREEN_W90X900 is not set
++# CONFIG_TOUCHSCREEN_ST1232 is not set
++# CONFIG_TOUCHSCREEN_TPS6507X is not set
++CONFIG_INPUT_MISC=y
++# CONFIG_INPUT_AD714X is not set
++# CONFIG_INPUT_BMA150 is not set
++# CONFIG_INPUT_MMA8450 is not set
++# CONFIG_INPUT_MPU3050 is not set
++# CONFIG_INPUT_ATI_REMOTE2 is not set
++# CONFIG_INPUT_KEYSPAN_REMOTE is not set
++# CONFIG_INPUT_KXTJ9 is not set
++# CONFIG_INPUT_POWERMATE is not set
++# CONFIG_INPUT_YEALINK is not set
++# CONFIG_INPUT_CM109 is not set
++# CONFIG_INPUT_TWL4030_PWRBUTTON is not set
++# CONFIG_INPUT_TWL4030_VIBRA is not set
++# CONFIG_INPUT_TWL6040_VIBRA is not set
++# CONFIG_INPUT_UINPUT is not set
++# CONFIG_INPUT_PCF8574 is not set
++# CONFIG_INPUT_PWM_BEEPER is not set
++# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
++# CONFIG_INPUT_ADXL34X is not set
++# CONFIG_INPUT_CMA3000 is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_SERPORT is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_SERIO_ALTERA_PS2 is not set
++# CONFIG_SERIO_PS2MULT is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++# CONFIG_N_GSM is not set
++# CONFIG_TRACE_SINK is not set
++CONFIG_DEVKMEM=y
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_MAX3100 is not set
++# CONFIG_SERIAL_MAX3107 is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++CONFIG_SERIAL_OMAP=y
++CONFIG_SERIAL_OMAP_CONSOLE=y
++# CONFIG_SERIAL_TIMBERDALE is not set
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++# CONFIG_SERIAL_IFX6X60 is not set
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++# CONFIG_HVC_DCC is not set
++# CONFIG_IPMI_HANDLER is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_R3964 is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++# CONFIG_RAMOOPS is not set
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_MUX is not set
++CONFIG_I2C_HELPER_AUTO=y
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_GPIO is not set
++# CONFIG_I2C_OCORES is not set
++CONFIG_I2C_OMAP=y
++# CONFIG_I2C_PCA_PLATFORM is not set
++# CONFIG_I2C_PXA_PCI is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_XILINX is not set
++
++#
++# External I2C/SMBus adapter drivers
++#
++# CONFIG_I2C_DIOLAN_U2C is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_TINY_USB is not set
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++# CONFIG_SPI_ALTERA is not set
++# CONFIG_SPI_BITBANG is not set
++# CONFIG_SPI_GPIO is not set
++# CONFIG_SPI_OC_TINY is not set
++CONFIG_SPI_OMAP24XX=y
++# CONFIG_SPI_PXA2XX_PCI is not set
++# CONFIG_SPI_XILINX is not set
++# CONFIG_SPI_DESIGNWARE is not set
++
++#
++# SPI Protocol Masters
++#
++# CONFIG_SPI_SPIDEV is not set
++# CONFIG_SPI_TLE62X0 is not set
++
++#
++# PPS support
++#
++# CONFIG_PPS is not set
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++
++#
++# Enable Device Drivers -> PPS to see the PTP clock options.
++#
++CONFIG_ARCH_REQUIRE_GPIOLIB=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++
++#
++# Memory mapped GPIO drivers:
++#
++# CONFIG_GPIO_GENERIC_PLATFORM is not set
++# CONFIG_GPIO_IT8761E is not set
++
++#
++# I2C GPIO expanders:
++#
++# CONFIG_GPIO_MAX7300 is not set
++# CONFIG_GPIO_MAX732X is not set
++# CONFIG_GPIO_PCF857X is not set
++# CONFIG_GPIO_SX150X is not set
++# CONFIG_GPIO_TWL4030 is not set
++# CONFIG_GPIO_ADP5588 is not set
++
++#
++# PCI GPIO expanders:
++#
++
++#
++# SPI GPIO expanders:
++#
++# CONFIG_GPIO_MAX7301 is not set
++# CONFIG_GPIO_MCP23S08 is not set
++# CONFIG_GPIO_MC33880 is not set
++# CONFIG_GPIO_74X164 is not set
++
++#
++# AC97 GPIO expanders:
++#
++
++#
++# MODULbus GPIO expanders:
++#
++CONFIG_GPIO_TPS65910=y
++CONFIG_GENERIC_PWM=y
++CONFIG_DAVINCI_EHRPWM=y
++CONFIG_ECAP_PWM=y
++# CONFIG_W1 is not set
++# CONFIG_POWER_SUPPLY is not set
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Native drivers
++#
++# CONFIG_SENSORS_AD7314 is not set
++# CONFIG_SENSORS_AD7414 is not set
++# CONFIG_SENSORS_AD7418 is not set
++# CONFIG_SENSORS_ADCXX is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1029 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ADT7411 is not set
++# CONFIG_SENSORS_ADT7462 is not set
++# CONFIG_SENSORS_ADT7470 is not set
++# CONFIG_SENSORS_ADT7475 is not set
++# CONFIG_SENSORS_ASC7621 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS620 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_F71882FG is not set
++# CONFIG_SENSORS_F75375S is not set
++# CONFIG_SENSORS_G760A is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_GPIO_FAN is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_LINEAGE is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM70 is not set
++# CONFIG_SENSORS_LM73 is not set
++CONFIG_SENSORS_LM75=y
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_LM93 is not set
++# CONFIG_SENSORS_LTC4151 is not set
++# CONFIG_SENSORS_LTC4215 is not set
++# CONFIG_SENSORS_LTC4245 is not set
++# CONFIG_SENSORS_LTC4261 is not set
++# CONFIG_SENSORS_LM95241 is not set
++# CONFIG_SENSORS_LM95245 is not set
++# CONFIG_SENSORS_MAX1111 is not set
++# CONFIG_SENSORS_MAX16065 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_MAX1668 is not set
++# CONFIG_SENSORS_MAX6639 is not set
++# CONFIG_SENSORS_MAX6642 is not set
++# CONFIG_SENSORS_MAX6650 is not set
++# CONFIG_SENSORS_NTC_THERMISTOR is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_PC87427 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_PMBUS is not set
++# CONFIG_SENSORS_SHT15 is not set
++# CONFIG_SENSORS_SHT21 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_DME1737 is not set
++# CONFIG_SENSORS_EMC1403 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_EMC6W201 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SCH56XX_COMMON is not set
++# CONFIG_SENSORS_SCH5627 is not set
++# CONFIG_SENSORS_SCH5636 is not set
++# CONFIG_SENSORS_ADS1015 is not set
++# CONFIG_SENSORS_ADS7828 is not set
++# CONFIG_SENSORS_ADS7871 is not set
++# CONFIG_SENSORS_AMC6821 is not set
++# CONFIG_SENSORS_THMC50 is not set
++# CONFIG_SENSORS_TMP102 is not set
++# CONFIG_SENSORS_TMP401 is not set
++# CONFIG_SENSORS_TMP421 is not set
++# CONFIG_SENSORS_VT1211 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83793 is not set
++# CONFIG_SENSORS_W83795 is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83L786NG is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_THERMAL is not set
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_CORE is not set
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++# CONFIG_SOFT_WATCHDOG is not set
++# CONFIG_DW_WATCHDOG is not set
++CONFIG_OMAP_WATCHDOG=y
++# CONFIG_TWL4030_WATCHDOG is not set
++# CONFIG_MAX63XX_WATCHDOG is not set
++
++#
++# USB-based Watchdog Cards
++#
++# CONFIG_USBPCWATCHDOG is not set
++CONFIG_SSB_POSSIBLE=y
++
++#
++# Sonics Silicon Backplane
++#
++# CONFIG_SSB is not set
++CONFIG_BCMA_POSSIBLE=y
++
++#
++# Broadcom specific AMBA
++#
++# CONFIG_BCMA is not set
++
++#
++# Multifunction device drivers
++#
++CONFIG_MFD_CORE=y
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_MFD_SM501 is not set
++# CONFIG_MFD_ASIC3 is not set
++# CONFIG_HTC_EGPIO is not set
++# CONFIG_HTC_PASIC3 is not set
++# CONFIG_HTC_I2CPLD is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_TPS65010 is not set
++# CONFIG_TPS6507X is not set
++CONFIG_MFD_TPS65217=y
++# CONFIG_MFD_TPS6586X is not set
++CONFIG_MFD_TPS65910=y
++# CONFIG_MFD_TPS65912_I2C is not set
++# CONFIG_MFD_TPS65912_SPI is not set
++CONFIG_TWL4030_CORE=y
++# CONFIG_TWL4030_MADC is not set
++CONFIG_TWL4030_POWER=y
++# CONFIG_MFD_TWL4030_AUDIO is not set
++# CONFIG_TWL6030_PWM is not set
++# CONFIG_TWL6040_CORE is not set
++# CONFIG_MFD_STMPE is not set
++# CONFIG_MFD_TC3589X is not set
++# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_T7L66XB is not set
++# CONFIG_MFD_TC6387XB is not set
++# CONFIG_MFD_TC6393XB is not set
++# CONFIG_PMIC_DA903X is not set
++# CONFIG_PMIC_ADP5520 is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_MAX8997 is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_WM831X_SPI is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8994 is not set
++# CONFIG_MFD_PCF50633 is not set
++# CONFIG_MFD_MC13XXX is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_EZX_PCAP is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_AAT2870_CORE is not set
++CONFIG_REGULATOR=y
++# CONFIG_REGULATOR_DEBUG is not set
++CONFIG_REGULATOR_DUMMY=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
++# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
++# CONFIG_REGULATOR_GPIO is not set
++# CONFIG_REGULATOR_BQ24022 is not set
++# CONFIG_REGULATOR_MAX1586 is not set
++# CONFIG_REGULATOR_MAX8649 is not set
++# CONFIG_REGULATOR_MAX8660 is not set
++# CONFIG_REGULATOR_MAX8952 is not set
++# CONFIG_REGULATOR_TWL4030 is not set
++# CONFIG_REGULATOR_LP3971 is not set
++# CONFIG_REGULATOR_LP3972 is not set
++# CONFIG_REGULATOR_TPS65023 is not set
++# CONFIG_REGULATOR_TPS6507X is not set
++CONFIG_REGULATOR_TPS65217=y
++# CONFIG_REGULATOR_ISL6271A is not set
++# CONFIG_REGULATOR_AD5398 is not set
++# CONFIG_REGULATOR_TPS6524X is not set
++CONFIG_REGULATOR_TPS65910=y
++CONFIG_MEDIA_SUPPORT=y
++
++#
++# Multimedia core support
++#
++# CONFIG_MEDIA_CONTROLLER is not set
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L2_COMMON=y
++# CONFIG_DVB_CORE is not set
++CONFIG_VIDEO_MEDIA=y
++
++#
++# Multimedia drivers
++#
++# CONFIG_RC_CORE is not set
++# CONFIG_MEDIA_ATTACH is not set
++CONFIG_MEDIA_TUNER=y
++# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
++CONFIG_MEDIA_TUNER_SIMPLE=y
++CONFIG_MEDIA_TUNER_TDA8290=y
++CONFIG_MEDIA_TUNER_TDA827X=y
++CONFIG_MEDIA_TUNER_TDA18271=y
++CONFIG_MEDIA_TUNER_TDA9887=y
++CONFIG_MEDIA_TUNER_TEA5761=y
++CONFIG_MEDIA_TUNER_TEA5767=y
++CONFIG_MEDIA_TUNER_MT20XX=y
++CONFIG_MEDIA_TUNER_XC2028=y
++CONFIG_MEDIA_TUNER_XC5000=y
++CONFIG_MEDIA_TUNER_XC4000=y
++CONFIG_MEDIA_TUNER_MC44S803=y
++CONFIG_VIDEO_V4L2=y
++CONFIG_VIDEO_CAPTURE_DRIVERS=y
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
++# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
++
++#
++# Encoders, decoders, sensors and other helper chips
++#
++
++#
++# Audio decoders, processors and mixers
++#
++# CONFIG_VIDEO_TVAUDIO is not set
++# CONFIG_VIDEO_TDA7432 is not set
++# CONFIG_VIDEO_TDA9840 is not set
++# CONFIG_VIDEO_TEA6415C is not set
++# CONFIG_VIDEO_TEA6420 is not set
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS5345 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_VP27SMPX is not set
++
++#
++# RDS decoders
++#
++# CONFIG_VIDEO_SAA6588 is not set
++
++#
++# Video decoders
++#
++# CONFIG_VIDEO_ADV7180 is not set
++# CONFIG_VIDEO_BT819 is not set
++# CONFIG_VIDEO_BT856 is not set
++# CONFIG_VIDEO_BT866 is not set
++# CONFIG_VIDEO_KS0127 is not set
++# CONFIG_VIDEO_SAA7110 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7191 is not set
++# CONFIG_VIDEO_TVP514X is not set
++# CONFIG_VIDEO_TVP5150 is not set
++# CONFIG_VIDEO_TVP7002 is not set
++# CONFIG_VIDEO_VPX3220 is not set
++
++#
++# Video and audio decoders
++#
++# CONFIG_VIDEO_SAA717X is not set
++# CONFIG_VIDEO_CX25840 is not set
++
++#
++# MPEG video encoders
++#
++# CONFIG_VIDEO_CX2341X is not set
++
++#
++# Video encoders
++#
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_SAA7185 is not set
++# CONFIG_VIDEO_ADV7170 is not set
++# CONFIG_VIDEO_ADV7175 is not set
++# CONFIG_VIDEO_ADV7343 is not set
++# CONFIG_VIDEO_AK881X is not set
++
++#
++# Camera sensor devices
++#
++# CONFIG_VIDEO_OV7670 is not set
++# CONFIG_VIDEO_MT9V011 is not set
++# CONFIG_VIDEO_TCM825X is not set
++# CONFIG_VIDEO_SR030PC30 is not set
++
++#
++# Flash devices
++#
++
++#
++# Video improvement chips
++#
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# Miscelaneous helper chips
++#
++# CONFIG_VIDEO_THS7303 is not set
++# CONFIG_VIDEO_M52790 is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_VPFE_CAPTURE is not set
++# CONFIG_VIDEO_OMAP2_VOUT is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_SOC_CAMERA is not set
++CONFIG_V4L_USB_DRIVERS=y
++CONFIG_USB_VIDEO_CLASS=y
++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
++# CONFIG_USB_GSPCA is not set
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_HDPVR is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_VIDEO_USBVISION is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_PWC is not set
++# CONFIG_USB_ZR364XX is not set
++# CONFIG_USB_STKWEBCAM is not set
++# CONFIG_USB_S2255 is not set
++# CONFIG_V4L_MEM2MEM_DRIVERS is not set
++# CONFIG_RADIO_ADAPTERS is not set
++
++#
++# Graphics support
++#
++# CONFIG_DRM is not set
++# CONFIG_VGASTATE is not set
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
++CONFIG_FB=y
++# CONFIG_FIRMWARE_EDID is not set
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++# CONFIG_FB_SYS_FILLRECT is not set
++# CONFIG_FB_SYS_COPYAREA is not set
++# CONFIG_FB_SYS_IMAGEBLIT is not set
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++# CONFIG_FB_SYS_FOPS is not set
++# CONFIG_FB_WMT_GE_ROPS is not set
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++# CONFIG_FB_MODE_HELPERS is not set
++# CONFIG_FB_TILEBLITTING is not set
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_TMIO is not set
++# CONFIG_FB_SMSCUFX is not set
++# CONFIG_FB_UDL is not set
++CONFIG_FB_DA8XX=y
++CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE=5
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_FB_METRONOME is not set
++# CONFIG_FB_BROADSHEET is not set
++# CONFIG_FB_OMAP is not set
++# CONFIG_OMAP2_DSS is not set
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++# CONFIG_LCD_L4F00242T03 is not set
++# CONFIG_LCD_LMS283GF05 is not set
++# CONFIG_LCD_LTV350QV is not set
++# CONFIG_LCD_TDO24M is not set
++# CONFIG_LCD_VGG2432A4 is not set
++# CONFIG_LCD_PLATFORM is not set
++# CONFIG_LCD_S6E63M0 is not set
++# CONFIG_LCD_LD9040 is not set
++# CONFIG_LCD_AMS369FG06 is not set
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++# CONFIG_BACKLIGHT_GENERIC is not set
++CONFIG_BACKLIGHT_PWM=y
++# CONFIG_BACKLIGHT_ADP8860 is not set
++# CONFIG_BACKLIGHT_ADP8870 is not set
++CONFIG_BACKLIGHT_TLC59108=y
++
++#
++# Display device support
++#
++CONFIG_DISPLAY_SUPPORT=y
++
++#
++# Display hardware drivers
++#
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_LOGO_LINUX_MONO=y
++CONFIG_LOGO_LINUX_VGA16=y
++CONFIG_LOGO_LINUX_CLUT224=y
++CONFIG_SOUND=y
++# CONFIG_SOUND_OSS_CORE is not set
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_JACK=y
++# CONFIG_SND_SEQUENCER is not set
++# CONFIG_SND_MIXER_OSS is not set
++# CONFIG_SND_PCM_OSS is not set
++# CONFIG_SND_HRTIMER is not set
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++# CONFIG_SND_RAWMIDI_SEQ is not set
++# CONFIG_SND_OPL3_LIB_SEQ is not set
++# CONFIG_SND_OPL4_LIB_SEQ is not set
++# CONFIG_SND_SBAWE_SEQ is not set
++# CONFIG_SND_EMU10K1_SEQ is not set
++CONFIG_SND_DRIVERS=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_ALOOP is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++CONFIG_SND_ARM=y
++CONFIG_SND_SPI=y
++CONFIG_SND_USB=y
++CONFIG_SND_USB_AUDIO=y
++# CONFIG_SND_USB_UA101 is not set
++# CONFIG_SND_USB_CAIAQ is not set
++# CONFIG_SND_USB_6FIRE is not set
++CONFIG_SND_SOC=y
++# CONFIG_SND_SOC_CACHE_LZO is not set
++CONFIG_SND_AM33XX_SOC=y
++CONFIG_SND_DAVINCI_SOC_MCASP=y
++CONFIG_SND_AM335X_SOC_EVM=y
++# CONFIG_SND_OMAP_SOC is not set
++CONFIG_SND_SOC_I2C_AND_SPI=y
++# CONFIG_SND_SOC_ALL_CODECS is not set
++CONFIG_SND_SOC_TLV320AIC3X=y
++# CONFIG_SOUND_PRIME is not set
++CONFIG_HID_SUPPORT=y
++CONFIG_HID=y
++# CONFIG_HIDRAW is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++# CONFIG_HID_PID is not set
++# CONFIG_USB_HIDDEV is not set
++
++#
++# Special HID drivers
++#
++CONFIG_HID_A4TECH=y
++# CONFIG_HID_ACRUX is not set
++CONFIG_HID_APPLE=y
++CONFIG_HID_BELKIN=y
++CONFIG_HID_CHERRY=y
++CONFIG_HID_CHICONY=y
++# CONFIG_HID_PRODIKEYS is not set
++CONFIG_HID_CYPRESS=y
++# CONFIG_HID_DRAGONRISE is not set
++# CONFIG_HID_EMS_FF is not set
++CONFIG_HID_EZKEY=y
++# CONFIG_HID_HOLTEK is not set
++# CONFIG_HID_KEYTOUCH is not set
++CONFIG_HID_KYE=y
++# CONFIG_HID_UCLOGIC is not set
++# CONFIG_HID_WALTOP is not set
++# CONFIG_HID_GYRATION is not set
++# CONFIG_HID_TWINHAN is not set
++CONFIG_HID_KENSINGTON=y
++# CONFIG_HID_LCPOWER is not set
++CONFIG_HID_LOGITECH=y
++CONFIG_HID_LOGITECH_DJ=m
++# CONFIG_LOGITECH_FF is not set
++# CONFIG_LOGIRUMBLEPAD2_FF is not set
++# CONFIG_LOGIG940_FF is not set
++# CONFIG_LOGIWHEELS_FF is not set
++CONFIG_HID_MICROSOFT=y
++CONFIG_HID_MONTEREY=y
++# CONFIG_HID_MULTITOUCH is not set
++# CONFIG_HID_NTRIG is not set
++# CONFIG_HID_ORTEK is not set
++# CONFIG_HID_PANTHERLORD is not set
++# CONFIG_HID_PETALYNX is not set
++# CONFIG_HID_PICOLCD is not set
++# CONFIG_HID_PRIMAX is not set
++# CONFIG_HID_QUANTA is not set
++# CONFIG_HID_ROCCAT is not set
++# CONFIG_HID_SAMSUNG is not set
++# CONFIG_HID_SONY is not set
++# CONFIG_HID_SPEEDLINK is not set
++# CONFIG_HID_SUNPLUS is not set
++# CONFIG_HID_GREENASIA is not set
++# CONFIG_HID_SMARTJOYPLUS is not set
++# CONFIG_HID_TOPSEED is not set
++# CONFIG_HID_THRUSTMASTER is not set
++# CONFIG_HID_ZEROPLUS is not set
++# CONFIG_HID_ZYDACRON is not set
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB_ARCH_HAS_XHCI is not set
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_DEVICE_CLASS=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++CONFIG_USB_SUSPEND=y
++CONFIG_USB_OTG=y
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_BLACKLIST_HUB is not set
++# CONFIG_USB_DWC3 is not set
++# CONFIG_USB_MON is not set
++# CONFIG_USB_WUSB is not set
++# CONFIG_USB_WUSB_CBAF is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_C67X00_HCD is not set
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_OXU210HP_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_ISP1760_HCD is not set
++# CONFIG_USB_ISP1362_HCD is not set
++# CONFIG_USB_OHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++# CONFIG_USB_HWA_HCD is not set
++CONFIG_USB_MUSB_HDRC=y
++
++#
++# Platform Glue Layer
++#
++# CONFIG_USB_MUSB_TUSB6010_GLUE is not set
++# CONFIG_USB_MUSB_OMAP2PLUS_GLUE is not set
++# CONFIG_USB_MUSB_AM35X_GLUE is not set
++CONFIG_USB_MUSB_TI81XX_GLUE=y
++# CONFIG_USB_MUSB_DAVINCI is not set
++# CONFIG_USB_MUSB_DA8XX is not set
++# CONFIG_USB_MUSB_TUSB6010 is not set
++# CONFIG_USB_MUSB_OMAP2PLUS is not set
++# CONFIG_USB_MUSB_AM35X is not set
++CONFIG_USB_MUSB_TI81XX=y
++# CONFIG_USB_MUSB_BLACKFIN is not set
++# CONFIG_USB_MUSB_UX500 is not set
++CONFIG_USB_TI_CPPI41_DMA_HW=y
++# CONFIG_MUSB_PIO_ONLY is not set
++# CONFIG_USB_INVENTRA_DMA is not set
++# CONFIG_USB_TI_CPPI_DMA is not set
++CONFIG_USB_TI_CPPI41_DMA=y
++# CONFIG_USB_TUSB_OMAP_DMA is not set
++# CONFIG_USB_UX500_DMA is not set
++# CONFIG_USB_RENESAS_USBHS is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++# CONFIG_USB_WDM is not set
++# CONFIG_USB_TMC is not set
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_REALTEK is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_ONETOUCH is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
++# CONFIG_USB_STORAGE_ENE_UB6250 is not set
++# CONFIG_USB_UAS is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB port drivers
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_SEVSEG is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_ISIGHTFW is not set
++# CONFIG_USB_YUREX is not set
++CONFIG_USB_GADGET=y
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_VBUS_DRAW=2
++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
++# CONFIG_USB_FUSB300 is not set
++# CONFIG_USB_OMAP is not set
++# CONFIG_USB_R8A66597 is not set
++CONFIG_USB_GADGET_MUSB_HDRC=y
++# CONFIG_USB_M66592 is not set
++# CONFIG_USB_NET2272 is not set
++# CONFIG_USB_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++# CONFIG_USB_ZERO is not set
++# CONFIG_USB_AUDIO is not set
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++# CONFIG_USB_ETH_EEM is not set
++# CONFIG_USB_G_NCM is not set
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_FUNCTIONFS is not set
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_MASS_STORAGE=m
++# CONFIG_USB_G_SERIAL is not set
++# CONFIG_USB_MIDI_GADGET is not set
++# CONFIG_USB_G_PRINTER is not set
++# CONFIG_USB_CDC_COMPOSITE is not set
++# CONFIG_USB_G_ACM_MS is not set
++# CONFIG_USB_G_MULTI is not set
++# CONFIG_USB_G_HID is not set
++# CONFIG_USB_G_DBGP is not set
++# CONFIG_USB_G_WEBCAM is not set
++
++#
++# OTG and related infrastructure
++#
++CONFIG_USB_OTG_UTILS=y
++# CONFIG_USB_GPIO_VBUS is not set
++# CONFIG_USB_ULPI is not set
++# CONFIG_TWL6030_USB is not set
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_MMC=y
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_UNSAFE_RESUME=y
++# CONFIG_MMC_CLKGATE is not set
++
++#
++# MMC/SD/SDIO Card Drivers
++#
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++# CONFIG_SDIO_UART is not set
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++# CONFIG_MMC_SDHCI is not set
++# CONFIG_MMC_SDHCI_PXAV3 is not set
++# CONFIG_MMC_SDHCI_PXAV2 is not set
++# CONFIG_MMC_OMAP is not set
++CONFIG_MMC_OMAP_HS=y
++# CONFIG_MMC_SPI is not set
++# CONFIG_MMC_DW is not set
++# CONFIG_MMC_VUB300 is not set
++# CONFIG_MMC_USHC is not set
++# CONFIG_MEMSTICK is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_ACCESSIBILITY is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++# CONFIG_RTC_DRV_DS3232 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_TWL4030 is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_M41T93 is not set
++# CONFIG_RTC_DRV_M41T94 is not set
++# CONFIG_RTC_DRV_DS1305 is not set
++# CONFIG_RTC_DRV_DS1390 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++# CONFIG_RTC_DRV_R9701 is not set
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_DS3234 is not set
++# CONFIG_RTC_DRV_PCF2123 is not set
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_CMOS is not set
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++CONFIG_RTC_DRV_OMAP=y
++# CONFIG_DMADEVICES is not set
++# CONFIG_AUXDISPLAY is not set
++# CONFIG_UIO is not set
++
++#
++# Virtio drivers
++#
++# CONFIG_VIRTIO_BALLOON is not set
++# CONFIG_VIRTIO_MMIO is not set
++# CONFIG_STAGING is not set
++CONFIG_CLKDEV_LOOKUP=y
++
++#
++# Hardware Spinlock drivers
++#
++CONFIG_CLKSRC_MMIO=y
++# CONFIG_IOMMU_SUPPORT is not set
++# CONFIG_VIRT_DRIVERS is not set
++# CONFIG_PM_DEVFREQ is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
++# CONFIG_EXT3_FS_XATTR is not set
++# CONFIG_EXT4_FS is not set
++CONFIG_JBD=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_BTRFS_FS is not set
++# CONFIG_NILFS2_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_FILE_LOCKING=y
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_FANOTIFY is not set
++CONFIG_QUOTA=y
++# CONFIG_QUOTA_NETLINK_INTERFACE is not set
++CONFIG_PRINT_QUOTA_WARNING=y
++# CONFIG_QUOTA_DEBUG is not set
++CONFIG_QUOTA_TREE=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# Caches
++#
++# CONFIG_FSCACHE is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++# CONFIG_TMPFS_XATTR is not set
++# CONFIG_HUGETLB_PAGE is not set
++# CONFIG_CONFIGFS_FS is not set
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_ECRYPT_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_JFFS2_FS is not set
++CONFIG_UBIFS_FS=y
++# CONFIG_UBIFS_FS_XATTR is not set
++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
++CONFIG_UBIFS_FS_LZO=y
++CONFIG_UBIFS_FS_ZLIB=y
++# CONFIG_UBIFS_FS_DEBUG is not set
++# CONFIG_LOGFS is not set
++CONFIG_CRAMFS=y
++# CONFIG_SQUASHFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_OMFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_PSTORE is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++# CONFIG_NFS_V4_1 is not set
++CONFIG_ROOT_NFS=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_NFS_USE_KERNEL_DNS=y
++# CONFIG_NFS_USE_NEW_IDMAPPER is not set
++# CONFIG_NFSD is not set
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_CEPH_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Kernel hacking
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_FRAME_WARN=1024
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++# CONFIG_DEBUG_KERNEL is not set
++# CONFIG_HARDLOCKUP_DETECTOR is not set
++# CONFIG_SPARSE_RCU_POINTER is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_MEMORY_INIT=y
++CONFIG_FRAME_POINTER=y
++# CONFIG_SYSCTL_SYSCALL_CHECK is not set
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_RING_BUFFER=y
++CONFIG_RING_BUFFER_ALLOW_SWAP=y
++CONFIG_TRACING_SUPPORT=y
++# CONFIG_FTRACE is not set
++CONFIG_DYNAMIC_DEBUG=y
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_ATOMIC64_SELFTEST is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++# CONFIG_TEST_KSTRTOX is not set
++# CONFIG_STRICT_DEVMEM is not set
++# CONFIG_ARM_UNWIND is not set
++# CONFIG_DEBUG_USER is not set
++CONFIG_DEBUG_JTAG_ENABLE=y
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_ENCRYPTED_KEYS is not set
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++# CONFIG_SECURITYFS is not set
++# CONFIG_SECURITY_NETWORK is not set
++# CONFIG_SECURITY_PATH is not set
++# CONFIG_SECURITY_TOMOYO is not set
++# CONFIG_SECURITY_APPARMOR is not set
++# CONFIG_IMA is not set
++# CONFIG_EVM is not set
++CONFIG_DEFAULT_SECURITY_DAC=y
++CONFIG_DEFAULT_SECURITY=""
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_PCOMP2=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++# CONFIG_CRYPTO_USER is not set
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_NULL is not set
++CONFIG_CRYPTO_WORKQUEUE=y
++# CONFIG_CRYPTO_CRYPTD is not set
++# CONFIG_CRYPTO_AUTHENC is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Authenticated Encryption with Associated Data
++#
++# CONFIG_CRYPTO_CCM is not set
++# CONFIG_CRYPTO_GCM is not set
++# CONFIG_CRYPTO_SEQIV is not set
++
++#
++# Block modes
++#
++# CONFIG_CRYPTO_CBC is not set
++# CONFIG_CRYPTO_CTR is not set
++# CONFIG_CRYPTO_CTS is not set
++CONFIG_CRYPTO_ECB=y
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_XTS is not set
++
++#
++# Hash modes
++#
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_VMAC is not set
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++# CONFIG_CRYPTO_GHASH is not set
++# CONFIG_CRYPTO_MD4 is not set
++# CONFIG_CRYPTO_MD5 is not set
++CONFIG_CRYPTO_MICHAEL_MIC=y
++# CONFIG_CRYPTO_RMD128 is not set
++# CONFIG_CRYPTO_RMD160 is not set
++# CONFIG_CRYPTO_RMD256 is not set
++# CONFIG_CRYPTO_RMD320 is not set
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_WP512 is not set
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_ARC4=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_SALSA20 is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++
++#
++# Compression
++#
++CONFIG_CRYPTO_DEFLATE=y
++# CONFIG_CRYPTO_ZLIB is not set
++CONFIG_CRYPTO_LZO=y
++
++#
++# Random Number Generation
++#
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++# CONFIG_CRYPTO_USER_API_HASH is not set
++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
++# CONFIG_CRYPTO_HW is not set
++# CONFIG_BINARY_PRINTF is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC16=y
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC_ITU_T=y
++CONFIG_CRC32=y
++CONFIG_CRC7=y
++CONFIG_LIBCRC32C=y
++# CONFIG_CRC8 is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++# CONFIG_XZ_DEC_TEST is not set
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT=y
++CONFIG_HAS_DMA=y
++CONFIG_NLATTR=y
++CONFIG_AVERAGE=y
++# CONFIG_CORDIC is not set
+diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
+index 945a34f..dde2a1a 100644
+--- a/arch/arm/configs/omap1_defconfig
++++ b/arch/arm/configs/omap1_defconfig
+@@ -48,7 +48,6 @@ CONFIG_MACH_SX1=y
+ CONFIG_MACH_NOKIA770=y
+ CONFIG_MACH_AMS_DELTA=y
+ CONFIG_MACH_OMAP_GENERIC=y
+-CONFIG_OMAP_ARM_182MHZ=y
+ # CONFIG_ARM_THUMB is not set
+ CONFIG_PCCARD=y
+ CONFIG_OMAP_CF=y
+diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
+index d5f00d7..6c716aa 100644
+--- a/arch/arm/configs/omap2plus_defconfig
++++ b/arch/arm/configs/omap2plus_defconfig
+@@ -21,7 +21,8 @@ CONFIG_MODVERSIONS=y
+ CONFIG_MODULE_SRCVERSION_ALL=y
+ # CONFIG_BLK_DEV_BSG is not set
+ CONFIG_ARCH_OMAP=y
+-CONFIG_OMAP_RESET_CLOCKS=y
++# CONFIG_OMAP_RESET_CLOCKS is not set
++# CONFIG_OMAP_32K_TIMER is not set
+ CONFIG_OMAP_MUX_DEBUG=y
+ CONFIG_ARM_THUMBEE=y
+ CONFIG_ARM_ERRATA_411920=y
+@@ -81,6 +82,8 @@ CONFIG_MTD_UBI=y
+ CONFIG_BLK_DEV_LOOP=y
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_MISC_DEVICES=y
++CONFIG_EEPROM_AT24=y
+ CONFIG_SCSI=y
+ CONFIG_BLK_DEV_SD=y
+ CONFIG_SCSI_MULTI_LUN=y
+@@ -89,6 +92,9 @@ CONFIG_MD=y
+ CONFIG_NETDEVICES=y
+ CONFIG_SMSC_PHY=y
+ CONFIG_NET_ETHERNET=y
++CONFIG_TI_DAVINCI_MDIO=y
++CONFIG_TI_DAVINCI_CPDMA=y
++CONFIG_TI_CPSW=y
+ CONFIG_SMC91X=y
+ CONFIG_SMSC911X=y
+ CONFIG_KS8851=y
+@@ -108,6 +114,7 @@ CONFIG_KEYBOARD_GPIO=y
+ CONFIG_KEYBOARD_TWL4030=y
+ CONFIG_INPUT_TOUCHSCREEN=y
+ CONFIG_TOUCHSCREEN_ADS7846=y
++CONFIG_TOUCHSCREEN_TI_TSCADC=y
+ CONFIG_INPUT_MISC=y
+ CONFIG_INPUT_TWL4030_PWRBUTTON=y
+ CONFIG_VT_HW_CONSOLE_BINDING=y
+@@ -151,14 +158,23 @@ CONFIG_PANEL_TPO_TD043MTEA1=m
+ CONFIG_PANEL_ACX565AKM=m
+ CONFIG_BACKLIGHT_LCD_SUPPORT=y
+ CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++# CONFIG_BACKLIGHT_GENERIC is not set
++# CONFIG_BACKLIGHT_ADP8860 is not set
++CONFIG_BACKLIGHT_TLC59108=y
+ CONFIG_LCD_PLATFORM=y
+ CONFIG_DISPLAY_SUPPORT=y
++CONFIG_FB_DA8XX=y
++CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE=4
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+ CONFIG_FONTS=y
+ CONFIG_FONT_8x8=y
+ CONFIG_FONT_8x16=y
+ CONFIG_LOGO=y
++CONFIG_LOGO_LINUX_MONO=y
++CONFIG_LOGO_LINUX_VGA16=y
++CONFIG_LOGO_LINUX_CLUT224=y
+ CONFIG_SOUND=m
+ CONFIG_SND=m
+ CONFIG_SND_MIXER_OSS=m
+@@ -225,6 +241,9 @@ CONFIG_PROVE_LOCKING=y
+ CONFIG_DEBUG_SPINLOCK_SLEEP=y
+ # CONFIG_DEBUG_BUGVERBOSE is not set
+ CONFIG_DEBUG_INFO=y
++ONFIG_DEBUG_LL=y
++CONFIG_DEBUG_LL_UART_NONE=y
++CONFIG_EARLY_PRINTK=y
+ # CONFIG_RCU_CPU_STALL_DETECTOR is not set
+ CONFIG_SECURITY=y
+ CONFIG_CRYPTO_MICHAEL_MIC=y
+diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
+new file mode 100644
+index 0000000..a0ada3e
+--- /dev/null
++++ b/arch/arm/include/asm/cti.h
+@@ -0,0 +1,179 @@
++#ifndef __ASMARM_CTI_H
++#define __ASMARM_CTI_H
++
++#include <asm/io.h>
++
++/* The registers' definition is from section 3.2 of
++ * Embedded Cross Trigger Revision: r0p0
++ */
++#define CTICONTROL 0x000
++#define CTISTATUS 0x004
++#define CTILOCK 0x008
++#define CTIPROTECTION 0x00C
++#define CTIINTACK 0x010
++#define CTIAPPSET 0x014
++#define CTIAPPCLEAR 0x018
++#define CTIAPPPULSE 0x01c
++#define CTIINEN 0x020
++#define CTIOUTEN 0x0A0
++#define CTITRIGINSTATUS 0x130
++#define CTITRIGOUTSTATUS 0x134
++#define CTICHINSTATUS 0x138
++#define CTICHOUTSTATUS 0x13c
++#define CTIPERIPHID0 0xFE0
++#define CTIPERIPHID1 0xFE4
++#define CTIPERIPHID2 0xFE8
++#define CTIPERIPHID3 0xFEC
++#define CTIPCELLID0 0xFF0
++#define CTIPCELLID1 0xFF4
++#define CTIPCELLID2 0xFF8
++#define CTIPCELLID3 0xFFC
++
++/* The below are from section 3.6.4 of
++ * CoreSight v1.0 Architecture Specification
++ */
++#define LOCKACCESS 0xFB0
++#define LOCKSTATUS 0xFB4
++
++/* write this value to LOCKACCESS will unlock the module, and
++ * other value will lock the module
++ */
++#define LOCKCODE 0xC5ACCE55
++
++/**
++ * struct cti - cross trigger interface struct
++ * @base: mapped virtual address for the cti base
++ * @irq: irq number for the cti
++ * @trig_out_for_irq: triger out number which will cause
++ * the @irq happen
++ *
++ * cti struct used to operate cti registers.
++ */
++struct cti {
++ void __iomem *base;
++ int irq;
++ int trig_out_for_irq;
++};
++
++/**
++ * cti_init - initialize the cti instance
++ * @cti: cti instance
++ * @base: mapped virtual address for the cti base
++ * @irq: irq number for the cti
++ * @trig_out: triger out number which will cause
++ * the @irq happen
++ *
++ * called by machine code to pass the board dependent
++ * @base, @irq and @trig_out to cti.
++ */
++static inline void cti_init(struct cti *cti,
++ void __iomem *base, int irq, int trig_out)
++{
++ cti->base = base;
++ cti->irq = irq;
++ cti->trig_out_for_irq = trig_out;
++}
++
++/**
++ * cti_map_trigger - use the @chan to map @trig_in to @trig_out
++ * @cti: cti instance
++ * @trig_in: trigger in number
++ * @trig_out: trigger out number
++ * @channel: channel number
++ *
++ * This function maps one trigger in of @trig_in to one trigger
++ * out of @trig_out using the channel @chan.
++ */
++static inline void cti_map_trigger(struct cti *cti,
++ int trig_in, int trig_out, int chan)
++{
++ void __iomem *base = cti->base;
++ unsigned long val;
++
++ val = __raw_readl(base + CTIINEN + trig_in * 4);
++ val |= BIT(chan);
++ __raw_writel(val, base + CTIINEN + trig_in * 4);
++
++ val = __raw_readl(base + CTIOUTEN + trig_out * 4);
++ val |= BIT(chan);
++ __raw_writel(val, base + CTIOUTEN + trig_out * 4);
++}
++
++/**
++ * cti_enable - enable the cti module
++ * @cti: cti instance
++ *
++ * enable the cti module
++ */
++static inline void cti_enable(struct cti *cti)
++{
++ __raw_writel(0x1, cti->base + CTICONTROL);
++}
++
++/**
++ * cti_disable - disable the cti module
++ * @cti: cti instance
++ *
++ * enable the cti module
++ */
++static inline void cti_disable(struct cti *cti)
++{
++ __raw_writel(0, cti->base + CTICONTROL);
++}
++
++/**
++ * cti_irq_ack - clear the cti irq
++ * @cti: cti instance
++ *
++ * clear the cti irq
++ */
++static inline void cti_irq_ack(struct cti *cti)
++{
++ void __iomem *base = cti->base;
++ unsigned long val;
++
++ val = __raw_readl(base + CTIINTACK);
++ val |= BIT(cti->trig_out_for_irq);
++ __raw_writel(val, base + CTIINTACK);
++}
++
++/**
++ * cti_unlock - unlock cti module
++ * @cti: cti instance
++ *
++ * unlock the cti module, or else any writes to the cti
++ * module is not allowed.
++ */
++static inline void cti_unlock(struct cti *cti)
++{
++ void __iomem *base = cti->base;
++ unsigned long val;
++
++ val = __raw_readl(base + LOCKSTATUS);
++
++ if (val & 1) {
++ val = LOCKCODE;
++ __raw_writel(val, base + LOCKACCESS);
++ }
++}
++
++/**
++ * cti_lock - lock cti module
++ * @cti: cti instance
++ *
++ * lock the cti module, so any writes to the cti
++ * module will be not allowed.
++ */
++static inline void cti_lock(struct cti *cti)
++{
++ void __iomem *base = cti->base;
++ unsigned long val;
++
++ val = __raw_readl(base + LOCKSTATUS);
++
++ if (!(val & 1)) {
++ val = ~LOCKCODE;
++ __raw_writel(val, base + LOCKACCESS);
++ }
++}
++#endif
+diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
+deleted file mode 100644
+index 3ceb85e..0000000
+--- a/arch/arm/include/asm/entry-macro-vic2.S
++++ /dev/null
+@@ -1,57 +0,0 @@
+-/* arch/arm/include/asm/entry-macro-vic2.S
+- *
+- * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
+- *
+- * Copyright 2008 Openmoko, Inc.
+- * Copyright 2008 Simtec Electronics
+- * http://armlinux.simtec.co.uk/
+- * Ben Dooks <ben@simtec.co.uk>
+- *
+- * Low-level IRQ helper macros for a device with two VICs
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+-*/
+-
+-/* This should be included from <mach/entry-macro.S> with the necessary
+- * defines for virtual addresses and IRQ bases for the two vics.
+- *
+- * The code needs the following defined:
+- * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
+- * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
+- * VA_VIC0 Virtual address of VIC0
+- * VA_VIC1 Virtual address of VIC1
+- *
+- * Note, code assumes VIC0's virtual address is an ARM immediate constant
+- * away from VIC1.
+-*/
+-
+-#include <asm/hardware/vic.h>
+-
+- .macro disable_fiq
+- .endm
+-
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =VA_VIC0
+- .endm
+-
+- .macro arch_ret_to_user, tmp1, tmp2
+- .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- @ check the vic0
+- mov \irqnr, #IRQ_VIC0_BASE + 31
+- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
+- teq \irqstat, #0
+-
+- @ otherwise try vic1
+- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
+- addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- clzne \irqstat, \irqstat
+- subne \irqnr, \irqnr, \irqstat
+- .endm
+diff --git a/arch/arm/include/asm/hardware/asp.h b/arch/arm/include/asm/hardware/asp.h
+new file mode 100644
+index 0000000..0d5c0bf
+--- /dev/null
++++ b/arch/arm/include/asm/hardware/asp.h
+@@ -0,0 +1,143 @@
++/*
++ * <asm/hardware/asp.h> - DaVinci Audio Serial Port support
++ */
++#ifndef __ASM_HARDWARE_MCASP_H
++#define __ASM_HARDWARE_MCASP_H
++
++#include <mach/irqs.h>
++#include <mach/edma.h>
++
++/* Bases of dm644x and dm355 register banks */
++#define DAVINCI_ASP0_BASE 0x01E02000
++#define DAVINCI_ASP1_BASE 0x01E04000
++
++/* Bases of dm365 register banks */
++#define DAVINCI_DM365_ASP0_BASE 0x01D02000
++
++/* Bases of dm646x register banks */
++#define DAVINCI_DM646X_MCASP0_REG_BASE 0x01D01000
++#define DAVINCI_DM646X_MCASP1_REG_BASE 0x01D01800
++
++/* Bases of da850/da830 McASP0 register banks */
++#define DAVINCI_DA8XX_MCASP0_REG_BASE 0x01D00000
++
++/* Bases of da830 McASP1 register banks */
++#define DAVINCI_DA830_MCASP1_REG_BASE 0x01D04000
++
++/* EDMA channels of dm644x and dm355 */
++#define DAVINCI_DMA_ASP0_TX 2
++#define DAVINCI_DMA_ASP0_RX 3
++#define DAVINCI_DMA_ASP1_TX 8
++#define DAVINCI_DMA_ASP1_RX 9
++
++/* EDMA channels of dm646x */
++#define DAVINCI_DM646X_DMA_MCASP0_AXEVT0 6
++#define DAVINCI_DM646X_DMA_MCASP0_AREVT0 9
++#define DAVINCI_DM646X_DMA_MCASP1_AXEVT1 12
++
++/* EDMA channels of da850/da830 McASP0 */
++#define DAVINCI_DA8XX_DMA_MCASP0_AREVT 0
++#define DAVINCI_DA8XX_DMA_MCASP0_AXEVT 1
++
++/* EDMA channels of da830 McASP1 */
++#define DAVINCI_DA830_DMA_MCASP1_AREVT 2
++#define DAVINCI_DA830_DMA_MCASP1_AXEVT 3
++
++/* Interrupts */
++#define DAVINCI_ASP0_RX_INT IRQ_MBRINT
++#define DAVINCI_ASP0_TX_INT IRQ_MBXINT
++#define DAVINCI_ASP1_RX_INT IRQ_MBRINT
++#define DAVINCI_ASP1_TX_INT IRQ_MBXINT
++
++struct snd_platform_data {
++ u32 tx_dma_offset;
++ u32 rx_dma_offset;
++ enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
++ enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
++ unsigned int codec_fmt;
++ /*
++ * Allowing this is more efficient and eliminates left and right swaps
++ * caused by underruns, but will swap the left and right channels
++ * when compared to previous behavior.
++ */
++ unsigned enable_channel_combine:1;
++ unsigned sram_size_playback;
++ unsigned sram_size_capture;
++
++ /*
++ * If McBSP peripheral gets the clock from an external pin,
++ * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR
++ * and MCBSP_CLKS.
++ * Depending on different hardware connections it is possible
++ * to use this setting to change the behaviour of McBSP
++ * driver. The dm365_clk_input_pin enum is available for dm365
++ */
++ int clk_input_pin;
++
++ /*
++ * This flag works when both clock and FS are outputs for the cpu
++ * and makes clock more accurate (FS is not symmetrical and the
++ * clock is very fast.
++ * The clock becoming faster is named
++ * i2s continuous serial clock (I2S_SCK) and it is an externally
++ * visible bit clock.
++ *
++ * first line : WordSelect
++ * second line : ContinuousSerialClock
++ * third line: SerialData
++ *
++ * SYMMETRICAL APPROACH:
++ * _______________________ LEFT
++ * _| RIGHT |______________________|
++ * _ _ _ _ _ _ _ _
++ * _| |_| |_ x16 _| |_| |_| |_| |_ x16 _| |_| |_
++ * _ _ _ _ _ _ _ _
++ * _/ \_/ \_ ... _/ \_/ \_/ \_/ \_ ... _/ \_/ \_
++ * \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
++ *
++ * ACCURATE CLOCK APPROACH:
++ * ______________ LEFT
++ * _| RIGHT |_______________________________|
++ * _ _ _ _ _ _ _ _ _
++ * _| |_ x16 _| |_| |_ x16 _| |_| |_| |_| |_| |_| |
++ * _ _ _ _ dummy cycles
++ * _/ \_ ... _/ \_/ \_ ... _/ \__________________
++ * \_/ \_/ \_/ \_/
++ *
++ */
++ bool i2s_accurate_sck;
++
++ /* McASP specific fields */
++ int tdm_slots;
++ u8 op_mode;
++ u8 num_serializer;
++ u8 *serial_dir;
++ u8 version;
++ u8 txnumevt;
++ u8 rxnumevt;
++};
++
++enum {
++ MCASP_VERSION_1 = 0, /* DM646x */
++ MCASP_VERSION_2, /* DA8xx/OMAPL1x */
++ MCASP_VERSION_3, /* AM33xx */
++};
++
++enum dm365_clk_input_pin {
++ MCBSP_CLKR = 0, /* DM365 */
++ MCBSP_CLKS,
++};
++
++#define INACTIVE_MODE 0
++#define TX_MODE 1
++#define RX_MODE 2
++
++#define DAVINCI_MCASP_IIS_MODE 0
++#define DAVINCI_MCASP_DIT_MODE 1
++
++#if (defined(CONFIG_SOC_OMAPAM33XX) && (defined(CONFIG_SND_AM33XX_SOC) \
++ || (defined(CONFIG_SND_AM33XX_SOC_MODULE))))
++#define davinci_gen_pool omap_gen_pool
++#endif
++
++#endif /* __ASM_HARDWARE_MCASP_H */
+diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S
+deleted file mode 100644
+index 74ebc80..0000000
+--- a/arch/arm/include/asm/hardware/entry-macro-gic.S
++++ /dev/null
+@@ -1,60 +0,0 @@
+-/*
+- * arch/arm/include/asm/hardware/entry-macro-gic.S
+- *
+- * Low-level IRQ helper macros for GIC
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <asm/hardware/gic.h>
+-
+-#ifndef HAVE_GET_IRQNR_PREAMBLE
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =gic_cpu_base_addr
+- ldr \base, [\base]
+- .endm
+-#endif
+-
+-/*
+- * The interrupt numbering scheme is defined in the
+- * interrupt controller spec. To wit:
+- *
+- * Interrupts 0-15 are IPI
+- * 16-31 are local. We allow 30 to be used for the watchdog.
+- * 32-1020 are global
+- * 1021-1022 are reserved
+- * 1023 is "spurious" (no interrupt)
+- *
+- * A simple read from the controller will tell us the number of the highest
+- * priority enabled interrupt. We then just need to check whether it is in the
+- * valid range for an IRQ (30-1020 inclusive).
+- */
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- ldr \irqstat, [\base, #GIC_CPU_INTACK]
+- /* bits 12-10 = src CPU, 9-0 = int # */
+-
+- ldr \tmp, =1021
+- bic \irqnr, \irqstat, #0x1c00
+- cmp \irqnr, #15
+- cmpcc \irqnr, \irqnr
+- cmpne \irqnr, \tmp
+- cmpcs \irqnr, \irqnr
+- .endm
+-
+-/* We assume that irqstat (the raw value of the IRQ acknowledge
+- * register) is preserved from the macro above.
+- * If there is an IPI, we immediately signal end of interrupt on the
+- * controller, since this requires the original irqstat value which
+- * we won't easily be able to recreate later.
+- */
+-
+- .macro test_for_ipi, irqnr, irqstat, base, tmp
+- bic \irqnr, \irqstat, #0x1c00
+- cmp \irqnr, #16
+- strcc \irqstat, [\base, #GIC_CPU_EOI]
+- cmpcs \irqnr, \irqnr
+- .endm
+diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
+index 3e91f22..4bdfe00 100644
+--- a/arch/arm/include/asm/hardware/gic.h
++++ b/arch/arm/include/asm/hardware/gic.h
+@@ -36,30 +36,22 @@
+ #include <linux/irqdomain.h>
+ struct device_node;
+
+-extern void __iomem *gic_cpu_base_addr;
+ extern struct irq_chip gic_arch_extn;
+
+-void gic_init(unsigned int, int, void __iomem *, void __iomem *);
++void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
++ u32 offset);
+ int gic_of_init(struct device_node *node, struct device_node *parent);
+ void gic_secondary_init(unsigned int);
++void gic_handle_irq(struct pt_regs *regs);
+ void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+-struct gic_chip_data {
+- void __iomem *dist_base;
+- void __iomem *cpu_base;
+-#ifdef CONFIG_CPU_PM
+- u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+- u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+- u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+- u32 __percpu *saved_ppi_enable;
+- u32 __percpu *saved_ppi_conf;
+-#endif
+-#ifdef CONFIG_IRQ_DOMAIN
+- struct irq_domain domain;
+-#endif
+- unsigned int gic_irqs;
+-};
++static inline void gic_init(unsigned int nr, int start,
++ void __iomem *dist , void __iomem *cpu)
++{
++ gic_init_bases(nr, start, dist, cpu, 0);
++}
++
+ #endif
+
+ #endif
+diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
+index 5d72550..f42ebd6 100644
+--- a/arch/arm/include/asm/hardware/vic.h
++++ b/arch/arm/include/asm/hardware/vic.h
+@@ -41,7 +41,15 @@
+ #define VIC_PL192_VECT_ADDR 0xF00
+
+ #ifndef __ASSEMBLY__
++#include <linux/compiler.h>
++#include <linux/types.h>
++
++struct device_node;
++struct pt_regs;
++
+ void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
+-#endif
++int vic_of_init(struct device_node *node, struct device_node *parent);
++void vic_handle_irq(struct pt_regs *regs);
+
++#endif /* __ASSEMBLY__ */
+ #endif
+diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
+index 2b0efc3..bcb0c88 100644
+--- a/arch/arm/include/asm/mach/arch.h
++++ b/arch/arm/include/asm/mach/arch.h
+@@ -31,10 +31,10 @@ struct machine_desc {
+ unsigned int video_start; /* start of video RAM */
+ unsigned int video_end; /* end of video RAM */
+
+- unsigned int reserve_lp0 :1; /* never has lp0 */
+- unsigned int reserve_lp1 :1; /* never has lp1 */
+- unsigned int reserve_lp2 :1; /* never has lp2 */
+- unsigned int soft_reboot :1; /* soft reboot */
++ unsigned char reserve_lp0 :1; /* never has lp0 */
++ unsigned char reserve_lp1 :1; /* never has lp1 */
++ unsigned char reserve_lp2 :1; /* never has lp2 */
++ char restart_mode; /* default restart mode */
+ void (*fixup)(struct tag *, char **,
+ struct meminfo *);
+ void (*reserve)(void);/* reserve mem blocks */
+@@ -46,6 +46,7 @@ struct machine_desc {
+ #ifdef CONFIG_MULTI_IRQ_HANDLER
+ void (*handle_irq)(struct pt_regs *);
+ #endif
++ void (*restart)(char, const char *);
+ };
+
+ /*
+diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
+index 0f8e382..99cfe36 100644
+--- a/arch/arm/include/asm/perf_event.h
++++ b/arch/arm/include/asm/perf_event.h
+@@ -32,7 +32,4 @@ enum arm_perf_pmu_ids {
+ extern enum arm_perf_pmu_ids
+ armpmu_get_pmu_id(void);
+
+-extern int
+-armpmu_get_max_events(void);
+-
+ #endif /* __ARM_PERF_EVENT_H__ */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9451dce..bcae9b8 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -21,7 +21,6 @@
+ #else
+
+ #include <asm/memory.h>
+-#include <mach/vmalloc.h>
+ #include <asm/pgtable-hwdef.h>
+
+ #include <asm/pgtable-2level.h>
+@@ -33,14 +32,16 @@
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+- *
+- * Note that platforms may override VMALLOC_START, but they must provide
+- * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
+- * which may not overlap IO space.
+ */
+-#ifndef VMALLOC_START
+ #define VMALLOC_OFFSET (8*1024*1024)
+ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
++#define VMALLOC_END 0xff000000UL
++
++/* This is a temporary hack until shmobile's DMA area size is sorted out */
++#ifdef CONFIG_ARCH_SHMOBILE
++#warning "SH-Mobile's consistent DMA size conflicts with VMALLOC_END by 144MB"
++#undef VMALLOC_END
++#define VMALLOC_END 0xF6000000UL
+ #endif
+
+ #define LIBRARY_TEXT_START 0x0c000000
+diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
+index 665ef2c..90114fa 100644
+--- a/arch/arm/include/asm/pmu.h
++++ b/arch/arm/include/asm/pmu.h
+@@ -27,13 +27,22 @@ enum arm_pmu_type {
+ /*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+- * @handle_irq: an optional handler which will be called from the interrupt and
+- * passed the address of the low level handler, and can be used to implement
+- * any platform specific handling before or after calling it.
++ * @handle_irq: an optional handler which will be called from the
++ * interrupt and passed the address of the low level handler,
++ * and can be used to implement any platform specific handling
++ * before or after calling it.
++ * @enable_irq: an optional handler which will be called after
++ * request_irq and be used to handle some platform specific
++ * irq enablement
++ * @disable_irq: an optional handler which will be called before
++ * free_irq and be used to handle some platform specific
++ * irq disablement
+ */
+ struct arm_pmu_platdata {
+ irqreturn_t (*handle_irq)(int irq, void *dev,
+ irq_handler_t pmu_handler);
++ void (*enable_irq)(int irq);
++ void (*disable_irq)(int irq);
+ };
+
+ #ifdef CONFIG_CPU_HAS_PMU
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index 984014b..fe7de75 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -101,6 +101,7 @@ extern int __pure cpu_architecture(void);
+ extern void cpu_init(void);
+
+ void arm_machine_restart(char mode, const char *cmd);
++void soft_restart(unsigned long);
+ extern void (*arm_pm_restart)(char str, const char *cmd);
+
+ #define UDBG_UNDEFINED (1 << 0)
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index b145f16..3a456c6 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -36,12 +36,11 @@
+ #ifdef CONFIG_MULTI_IRQ_HANDLER
+ ldr r1, =handle_arch_irq
+ mov r0, sp
+- ldr r1, [r1]
+ adr lr, BSYM(9997f)
+- teq r1, #0
+- movne pc, r1
+-#endif
++ ldr pc, [r1]
++#else
+ arch_irq_handler_default
++#endif
+ 9997:
+ .endm
+
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index e59bbd4..29620b6 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -16,7 +16,7 @@
+ extern const unsigned char relocate_new_kernel[];
+ extern const unsigned int relocate_new_kernel_size;
+
+-extern void setup_mm_for_reboot(char mode);
++extern void setup_mm_for_reboot(void);
+
+ extern unsigned long kexec_start_address;
+ extern unsigned long kexec_indirection_page;
+@@ -113,7 +113,7 @@ void machine_kexec(struct kimage *image)
+ kexec_reinit();
+ local_irq_disable();
+ local_fiq_disable();
+- setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
++ setup_mm_for_reboot();
+ flush_cache_all();
+ outer_flush_all();
+ outer_disable();
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index ecebb89..56173ae 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
+ }
+ EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+
+-int
+-armpmu_get_max_events(void)
++int perf_num_counters(void)
+ {
+ int max_events = 0;
+
+@@ -69,12 +68,6 @@ armpmu_get_max_events(void)
+
+ return max_events;
+ }
+-EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+-
+-int perf_num_counters(void)
+-{
+- return armpmu_get_max_events();
+-}
+ EXPORT_SYMBOL_GPL(perf_num_counters);
+
+ #define HW_OP_UNSUPPORTED 0xFFFF
+@@ -374,6 +367,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
+ {
+ int i, irq, irqs;
+ struct platform_device *pmu_device = armpmu->plat_device;
++ struct arm_pmu_platdata *plat =
++ dev_get_platdata(&pmu_device->dev);
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+@@ -381,8 +376,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+- if (irq >= 0)
++ if (irq >= 0) {
++ if (plat && plat->disable_irq)
++ plat->disable_irq(irq);
+ free_irq(irq, armpmu);
++ }
+ }
+
+ release_pmu(armpmu->type);
+@@ -442,7 +440,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+- }
++ } else if (plat && plat->enable_irq)
++ plat->enable_irq(irq);
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
+diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
+index 0ad3c6f..b78af0c 100644
+--- a/arch/arm/kernel/perf_event_v6.c
++++ b/arch/arm/kernel/perf_event_v6.c
+@@ -65,13 +65,15 @@ enum armv6_counters {
+ * accesses/misses in hardware.
+ */
+ static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
+- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
+ };
+
+ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
+ * accesses/misses in hardware.
+ */
+ static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
+ };
+
+ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
+index 1049319..2127ca3 100644
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
+ * they are not available.
+ */
+ enum armv7_perf_types {
+- ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
+- ARMV7_PERFCTR_IFETCH_MISS = 0x01,
+- ARMV7_PERFCTR_ITLB_MISS = 0x02,
+- ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
+- ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
+- ARMV7_PERFCTR_DTLB_REFILL = 0x05,
+- ARMV7_PERFCTR_DREAD = 0x06,
+- ARMV7_PERFCTR_DWRITE = 0x07,
+- ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
+- ARMV7_PERFCTR_EXC_TAKEN = 0x09,
+- ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
+- ARMV7_PERFCTR_CID_WRITE = 0x0B,
+- /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
++ ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
++ ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
++ ARMV7_PERFCTR_ITLB_REFILL = 0x02,
++ ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
++ ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
++ ARMV7_PERFCTR_DTLB_REFILL = 0x05,
++ ARMV7_PERFCTR_MEM_READ = 0x06,
++ ARMV7_PERFCTR_MEM_WRITE = 0x07,
++ ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
++ ARMV7_PERFCTR_EXC_TAKEN = 0x09,
++ ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
++ ARMV7_PERFCTR_CID_WRITE = 0x0B,
++
++ /*
++ * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+ * It counts:
+- * - all branch instructions,
++ * - all (taken) branch instructions,
+ * - instructions that explicitly write the PC,
+ * - exception generating instructions.
+ */
+- ARMV7_PERFCTR_PC_WRITE = 0x0C,
+- ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
+- ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
+- ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
++ ARMV7_PERFCTR_PC_WRITE = 0x0C,
++ ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
++ ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
++ ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
++ ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
++ ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
++ ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
+
+ /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
+- ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
+- ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
+- ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
+- ARMV7_PERFCTR_MEM_ACCESS = 0x13,
+- ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+- ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
+- ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
+- ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
+- ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
+- ARMV7_PERFCTR_BUS_ACCESS = 0x19,
+- ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
+- ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
+- ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
+- ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
+-
+- ARMV7_PERFCTR_CPU_CYCLES = 0xFF
++ ARMV7_PERFCTR_MEM_ACCESS = 0x13,
++ ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
++ ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
++ ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
++ ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
++ ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
++ ARMV7_PERFCTR_BUS_ACCESS = 0x19,
++ ARMV7_PERFCTR_MEM_ERROR = 0x1A,
++ ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
++ ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
++ ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
++
++ ARMV7_PERFCTR_CPU_CYCLES = 0xFF
+ };
+
+ /* ARMv7 Cortex-A8 specific event types */
+ enum armv7_a8_perf_types {
+- ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
+- ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
+- ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
+- ARMV7_PERFCTR_L2_ACCESS = 0x43,
+- ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
+- ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
+- ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
+- ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
+- ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
+- ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
+- ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
+- ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
+- ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
+- ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
+- ARMV7_PERFCTR_L2_NEON = 0x4E,
+- ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
+- ARMV7_PERFCTR_L1_INST = 0x50,
+- ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
+- ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
+- ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
+- ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
+- ARMV7_PERFCTR_OP_EXECUTED = 0x55,
+- ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
+- ARMV7_PERFCTR_CYCLES_INST = 0x57,
+- ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
+- ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
+- ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
+-
+- ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
+- ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
+- ARMV7_PERFCTR_PMU_EVENTS = 0x72,
++ ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
++ ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
++ ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
++ ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
+ };
+
+ /* ARMv7 Cortex-A9 specific event types */
+ enum armv7_a9_perf_types {
+- ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
+- ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
+- ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
+-
+- ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
+- ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
+-
+- ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
+- ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
+- ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
+- ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
+- ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
+- ARMV7_PERFCTR_DATA_EVICTION = 0x65,
+- ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
+- ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
+- ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
+-
+- ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
+-
+- ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
+- ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
+- ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
+- ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
+- ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
+-
+- ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
+- ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
+- ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
+- ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
+- ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
+- ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
+- ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
+-
+- ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
+- ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
+-
+- ARMV7_PERFCTR_ISB_INST = 0x90,
+- ARMV7_PERFCTR_DSB_INST = 0x91,
+- ARMV7_PERFCTR_DMB_INST = 0x92,
+- ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
+-
+- ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
+- ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
+- ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
+- ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
+- ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
+- ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
++ ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
++ ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
++ ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
+ };
+
+ /* ARMv7 Cortex-A5 specific event types */
+ enum armv7_a5_perf_types {
+- ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
+- ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
+-
+- ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
+- ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
+- ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
+- ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
+- ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
+- ARMV7_PERFCTR_READ_ALLOC = 0xc5,
+-
+- ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
++ ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
++ ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
+ };
+
+ /* ARMv7 Cortex-A15 specific event types */
+ enum armv7_a15_perf_types {
+- ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
+- ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
+- ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
+- ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
++ ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
++ ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
++ ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
++ ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
+
+- ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
+- ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
++ ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
++ ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
+
+- ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
+- ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
+- ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
+- ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
++ ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
++ ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
++ ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
++ ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
+
+- ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
++ ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
+ };
+
+ /*
+@@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
+ * accesses/misses in hardware.
+ */
+ static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+ };
+
+ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+@@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ * combined.
+ */
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
++ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
++ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
++ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
++ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ * Cortex-A9 HW events mapping
+ */
+ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] =
+- ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
+ };
+
+ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+@@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ * combined.
+ */
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ * Cortex-A5 HW events mapping
+ */
+ static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+ };
+
+ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+@@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_DCACHE_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_DCACHE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
++ [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
++ [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ /*
+ * The prefetch counters don't differentiate between the I
+ * side and the D side.
+ */
+ [C(OP_PREFETCH)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
++ [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
++ [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(LL)] = {
+@@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ * Cortex-A15 HW events mapping
+ */
+ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
+- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
++ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
++ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
++ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
++ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+ };
+
+ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+@@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
+ },
+ [C(OP_WRITE)] = {
+- [C(RESULT_ACCESS)]
+- = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
++ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
++ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+@@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+- [C(RESULT_MISS)]
+- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
++ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
+index 9fc2c95..71a21e6 100644
+--- a/arch/arm/kernel/perf_event_xscale.c
++++ b/arch/arm/kernel/perf_event_xscale.c
+@@ -48,13 +48,15 @@ enum xscale_counters {
+ };
+
+ static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+- [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+- [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+- [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
++ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
++ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
++ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+ };
+
+ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 3d0c6fb..eeb3e16 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -57,7 +57,7 @@ static const char *isa_modes[] = {
+ "ARM" , "Thumb" , "Jazelle", "ThumbEE"
+ };
+
+-extern void setup_mm_for_reboot(char mode);
++extern void setup_mm_for_reboot(void);
+
+ static volatile int hlt_counter;
+
+@@ -92,7 +92,7 @@ static int __init hlt_setup(char *__unused)
+ __setup("nohlt", nohlt_setup);
+ __setup("hlt", hlt_setup);
+
+-void arm_machine_restart(char mode, const char *cmd)
++void soft_restart(unsigned long addr)
+ {
+ /* Disable interrupts first */
+ local_irq_disable();
+@@ -103,7 +103,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+- setup_mm_for_reboot(mode);
++ setup_mm_for_reboot();
+
+ /* Clean and invalidate caches */
+ flush_cache_all();
+@@ -114,18 +114,17 @@ void arm_machine_restart(char mode, const char *cmd)
+ /* Push out any further dirty data, and ensure cache is empty */
+ flush_cache_all();
+
+- /*
+- * Now call the architecture specific reboot code.
+- */
+- arch_reset(mode, cmd);
++ cpu_reset(addr);
++}
+
+- /*
+- * Whoops - the architecture was unable to reboot.
+- * Tell the user!
+- */
+- mdelay(1000);
+- printk("Reboot failed -- System halted\n");
+- while (1);
++void arm_machine_restart(char mode, const char *cmd)
++{
++ /* Disable interrupts first */
++ local_irq_disable();
++ local_fiq_disable();
++
++ /* Call the architecture specific reboot code. */
++ arch_reset(mode, cmd);
+ }
+
+ /*
+@@ -253,7 +252,15 @@ void machine_power_off(void)
+ void machine_restart(char *cmd)
+ {
+ machine_shutdown();
++
+ arm_pm_restart(reboot_mode, cmd);
++
++ /* Give a grace period for failure to restart of 1s */
++ mdelay(1000);
++
++ /* Whoops - the platform was unable to reboot. Tell the user! */
++ printk("Reboot failed -- System halted\n");
++ while (1);
+ }
+
+ void __show_regs(struct pt_regs *regs)
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 8fc2c8f..cd18aa9 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -31,6 +31,7 @@
+ #include <linux/memblock.h>
+ #include <linux/bug.h>
+ #include <linux/compiler.h>
++#include <linux/sort.h>
+
+ #include <asm/unified.h>
+ #include <asm/cpu.h>
+@@ -890,6 +891,12 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
+ return mdesc;
+ }
+
++static int __init meminfo_cmp(const void *_a, const void *_b)
++{
++ const struct membank *a = _a, *b = _b;
++ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
++ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
++}
+
+ void __init setup_arch(char **cmdline_p)
+ {
+@@ -902,14 +909,8 @@ void __init setup_arch(char **cmdline_p)
+ machine_desc = mdesc;
+ machine_name = mdesc->name;
+
+-#ifdef CONFIG_ZONE_DMA
+- if (mdesc->dma_zone_size) {
+- extern unsigned long arm_dma_zone_size;
+- arm_dma_zone_size = mdesc->dma_zone_size;
+- }
+-#endif
+- if (mdesc->soft_reboot)
+- reboot_setup("s");
++ if (mdesc->restart_mode)
++ reboot_setup(&mdesc->restart_mode);
+
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+@@ -922,12 +923,16 @@ void __init setup_arch(char **cmdline_p)
+
+ parse_early_param();
+
++ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+ sanity_check_meminfo();
+ arm_memblock_init(&meminfo, mdesc);
+
+ paging_init(mdesc);
+ request_standard_resources(mdesc);
+
++ if (mdesc->restart)
++ arm_pm_restart = mdesc->restart;
++
+ unflatten_device_tree();
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
+index 8c57dd3..3abe5da 100644
+--- a/arch/arm/kernel/time.c
++++ b/arch/arm/kernel/time.c
+@@ -112,7 +112,7 @@ void timer_tick(void)
+ }
+ #endif
+
+-#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
++#if defined(CONFIG_PM)
+ static int timer_suspend(void)
+ {
+ if (system_timer->suspend)
+diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
+index 4298e78..4ca09ef 100644
+--- a/arch/arm/mach-at91/include/mach/io.h
++++ b/arch/arm/mach-at91/include/mach/io.h
+@@ -30,14 +30,6 @@
+
+ #ifndef __ASSEMBLY__
+
+-#ifndef CONFIG_ARCH_AT91X40
+-#define __arch_ioremap at91_ioremap
+-#define __arch_iounmap at91_iounmap
+-#endif
+-
+-void __iomem *at91_ioremap(unsigned long phys, size_t size, unsigned int type);
+-void at91_iounmap(volatile void __iomem *addr);
+-
+ static inline unsigned int at91_sys_read(unsigned int reg_offset)
+ {
+ void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;
+diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
+deleted file mode 100644
+index 8e4a1bd..0000000
+--- a/arch/arm/mach-at91/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,28 +0,0 @@
+-/*
+- * arch/arm/mach-at91/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2003 SAN People
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#include <mach/hardware.h>
+-
+-#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK)
+-
+-#endif
+diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
+index f5bbe0ef..39d8ea0 100644
+--- a/arch/arm/mach-at91/setup.c
++++ b/arch/arm/mach-at91/setup.c
+@@ -76,24 +76,6 @@ static struct map_desc at91_io_desc __initdata = {
+ .type = MT_DEVICE,
+ };
+
+-void __iomem *at91_ioremap(unsigned long p, size_t size, unsigned int type)
+-{
+- if (p >= AT91_BASE_SYS && p <= (AT91_BASE_SYS + SZ_16K - 1))
+- return (void __iomem *)AT91_IO_P2V(p);
+-
+- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
+-}
+-EXPORT_SYMBOL(at91_ioremap);
+-
+-void at91_iounmap(volatile void __iomem *addr)
+-{
+- unsigned long virt = (unsigned long)addr;
+-
+- if (virt >= VMALLOC_START && virt < VMALLOC_END)
+- __iounmap(addr);
+-}
+-EXPORT_SYMBOL(at91_iounmap);
+-
+ #define AT91_DBGU0 0xfffff200
+ #define AT91_DBGU1 0xffffee00
+
+diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
+index f4d4d6d..1a1a27d 100644
+--- a/arch/arm/mach-bcmring/dma.c
++++ b/arch/arm/mach-bcmring/dma.c
+@@ -1615,7 +1615,7 @@ DMA_MemType_t dma_mem_type(void *addr)
+ {
+ unsigned long addrVal = (unsigned long)addr;
+
+- if (addrVal >= VMALLOC_END) {
++ if (addrVal >= CONSISTENT_BASE) {
+ /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
+
+ /* dma_alloc_xxx pages are physically and virtually contiguous */
+diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
+deleted file mode 100644
+index 7397bd7..0000000
+--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,25 +0,0 @@
+-/*
+- *
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-
+-/*
+- * Move VMALLOC_END to 0xf0000000 so that the vm space can range from
+- * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
+- * larger physical memory designs better.
+- */
+-#define VMALLOC_END 0xf0000000UL
+diff --git a/arch/arm/mach-clps711x/Makefile b/arch/arm/mach-clps711x/Makefile
+index 4a19731..f2f0256 100644
+--- a/arch/arm/mach-clps711x/Makefile
++++ b/arch/arm/mach-clps711x/Makefile
+@@ -4,7 +4,7 @@
+
+ # Object file lists.
+
+-obj-y := irq.o mm.o time.o
++obj-y := common.o
+ obj-m :=
+ obj-n :=
+ obj- :=
+diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
+new file mode 100644
+index 0000000..ced2a4e
+--- /dev/null
++++ b/arch/arm/mach-clps711x/common.c
+@@ -0,0 +1,222 @@
++/*
++ * linux/arch/arm/mach-clps711x/core.c
++ *
++ * Core support for the CLPS711x-based machines.
++ *
++ * Copyright (C) 2001,2011 Deep Blue Solutions Ltd
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/sched.h>
++#include <linux/timex.h>
++
++#include <asm/sizes.h>
++#include <mach/hardware.h>
++#include <asm/irq.h>
++#include <asm/leds.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/mach/map.h>
++#include <asm/mach/time.h>
++#include <asm/hardware/clps7111.h>
++
++/*
++ * This maps the generic CLPS711x registers
++ */
++static struct map_desc clps711x_io_desc[] __initdata = {
++ {
++ .virtual = CLPS7111_VIRT_BASE,
++ .pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
++ .length = SZ_1M,
++ .type = MT_DEVICE
++ }
++};
++
++void __init clps711x_map_io(void)
++{
++ iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
++}
++
++static void int1_mask(struct irq_data *d)
++{
++ u32 intmr1;
++
++ intmr1 = clps_readl(INTMR1);
++ intmr1 &= ~(1 << d->irq);
++ clps_writel(intmr1, INTMR1);
++}
++
++static void int1_ack(struct irq_data *d)
++{
++ u32 intmr1;
++
++ intmr1 = clps_readl(INTMR1);
++ intmr1 &= ~(1 << d->irq);
++ clps_writel(intmr1, INTMR1);
++
++ switch (d->irq) {
++ case IRQ_CSINT: clps_writel(0, COEOI); break;
++ case IRQ_TC1OI: clps_writel(0, TC1EOI); break;
++ case IRQ_TC2OI: clps_writel(0, TC2EOI); break;
++ case IRQ_RTCMI: clps_writel(0, RTCEOI); break;
++ case IRQ_TINT: clps_writel(0, TEOI); break;
++ case IRQ_UMSINT: clps_writel(0, UMSEOI); break;
++ }
++}
++
++static void int1_unmask(struct irq_data *d)
++{
++ u32 intmr1;
++
++ intmr1 = clps_readl(INTMR1);
++ intmr1 |= 1 << d->irq;
++ clps_writel(intmr1, INTMR1);
++}
++
++static struct irq_chip int1_chip = {
++ .irq_ack = int1_ack,
++ .irq_mask = int1_mask,
++ .irq_unmask = int1_unmask,
++};
++
++static void int2_mask(struct irq_data *d)
++{
++ u32 intmr2;
++
++ intmr2 = clps_readl(INTMR2);
++ intmr2 &= ~(1 << (d->irq - 16));
++ clps_writel(intmr2, INTMR2);
++}
++
++static void int2_ack(struct irq_data *d)
++{
++ u32 intmr2;
++
++ intmr2 = clps_readl(INTMR2);
++ intmr2 &= ~(1 << (d->irq - 16));
++ clps_writel(intmr2, INTMR2);
++
++ switch (d->irq) {
++ case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
++ }
++}
++
++static void int2_unmask(struct irq_data *d)
++{
++ u32 intmr2;
++
++ intmr2 = clps_readl(INTMR2);
++ intmr2 |= 1 << (d->irq - 16);
++ clps_writel(intmr2, INTMR2);
++}
++
++static struct irq_chip int2_chip = {
++ .irq_ack = int2_ack,
++ .irq_mask = int2_mask,
++ .irq_unmask = int2_unmask,
++};
++
++void __init clps711x_init_irq(void)
++{
++ unsigned int i;
++
++ for (i = 0; i < NR_IRQS; i++) {
++ if (INT1_IRQS & (1 << i)) {
++ irq_set_chip_and_handler(i, &int1_chip,
++ handle_level_irq);
++ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
++ }
++ if (INT2_IRQS & (1 << i)) {
++ irq_set_chip_and_handler(i, &int2_chip,
++ handle_level_irq);
++ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
++ }
++ }
++
++ /*
++ * Disable interrupts
++ */
++ clps_writel(0, INTMR1);
++ clps_writel(0, INTMR2);
++
++ /*
++ * Clear down any pending interrupts
++ */
++ clps_writel(0, COEOI);
++ clps_writel(0, TC1EOI);
++ clps_writel(0, TC2EOI);
++ clps_writel(0, RTCEOI);
++ clps_writel(0, TEOI);
++ clps_writel(0, UMSEOI);
++ clps_writel(0, SYNCIO);
++ clps_writel(0, KBDEOI);
++}
++
++/*
++ * gettimeoffset() returns time since last timer tick, in usecs.
++ *
++ * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
++ * 'tick' is usecs per jiffy.
++ */
++static unsigned long clps711x_gettimeoffset(void)
++{
++ unsigned long hwticks;
++ hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
++ return (hwticks * (tick_nsec / 1000)) / LATCH;
++}
++
++/*
++ * IRQ handler for the timer
++ */
++static irqreturn_t p720t_timer_interrupt(int irq, void *dev_id)
++{
++ timer_tick();
++ return IRQ_HANDLED;
++}
++
++static struct irqaction clps711x_timer_irq = {
++ .name = "CLPS711x Timer Tick",
++ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
++ .handler = p720t_timer_interrupt,
++};
++
++static void __init clps711x_timer_init(void)
++{
++ struct timespec tv;
++ unsigned int syscon;
++
++ syscon = clps_readl(SYSCON1);
++ syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
++ clps_writel(syscon, SYSCON1);
++
++ clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
++
++ setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
++
++ tv.tv_nsec = 0;
++ tv.tv_sec = clps_readl(RTCDR);
++ do_settimeofday(&tv);
++}
++
++struct sys_timer clps711x_timer = {
++ .init = clps711x_timer_init,
++ .offset = clps711x_gettimeoffset,
++};
+diff --git a/arch/arm/mach-clps711x/include/mach/system.h b/arch/arm/mach-clps711x/include/mach/system.h
+index f916cd7..6c11993 100644
+--- a/arch/arm/mach-clps711x/include/mach/system.h
++++ b/arch/arm/mach-clps711x/include/mach/system.h
+@@ -34,7 +34,7 @@ static inline void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ #endif
+diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
+deleted file mode 100644
+index 467b961..0000000
+--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-clps711x/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2000 Deep Blue Solutions Ltd.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/irq.c
+deleted file mode 100644
+index c2eceee..0000000
+--- a/arch/arm/mach-clps711x/irq.c
++++ /dev/null
+@@ -1,143 +0,0 @@
+-/*
+- * linux/arch/arm/mach-clps711x/irq.c
+- *
+- * Copyright (C) 2000 Deep Blue Solutions Ltd.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#include <linux/init.h>
+-#include <linux/list.h>
+-#include <linux/io.h>
+-
+-#include <asm/mach/irq.h>
+-#include <mach/hardware.h>
+-#include <asm/irq.h>
+-
+-#include <asm/hardware/clps7111.h>
+-
+-static void int1_mask(struct irq_data *d)
+-{
+- u32 intmr1;
+-
+- intmr1 = clps_readl(INTMR1);
+- intmr1 &= ~(1 << d->irq);
+- clps_writel(intmr1, INTMR1);
+-}
+-
+-static void int1_ack(struct irq_data *d)
+-{
+- u32 intmr1;
+-
+- intmr1 = clps_readl(INTMR1);
+- intmr1 &= ~(1 << d->irq);
+- clps_writel(intmr1, INTMR1);
+-
+- switch (d->irq) {
+- case IRQ_CSINT: clps_writel(0, COEOI); break;
+- case IRQ_TC1OI: clps_writel(0, TC1EOI); break;
+- case IRQ_TC2OI: clps_writel(0, TC2EOI); break;
+- case IRQ_RTCMI: clps_writel(0, RTCEOI); break;
+- case IRQ_TINT: clps_writel(0, TEOI); break;
+- case IRQ_UMSINT: clps_writel(0, UMSEOI); break;
+- }
+-}
+-
+-static void int1_unmask(struct irq_data *d)
+-{
+- u32 intmr1;
+-
+- intmr1 = clps_readl(INTMR1);
+- intmr1 |= 1 << d->irq;
+- clps_writel(intmr1, INTMR1);
+-}
+-
+-static struct irq_chip int1_chip = {
+- .irq_ack = int1_ack,
+- .irq_mask = int1_mask,
+- .irq_unmask = int1_unmask,
+-};
+-
+-static void int2_mask(struct irq_data *d)
+-{
+- u32 intmr2;
+-
+- intmr2 = clps_readl(INTMR2);
+- intmr2 &= ~(1 << (d->irq - 16));
+- clps_writel(intmr2, INTMR2);
+-}
+-
+-static void int2_ack(struct irq_data *d)
+-{
+- u32 intmr2;
+-
+- intmr2 = clps_readl(INTMR2);
+- intmr2 &= ~(1 << (d->irq - 16));
+- clps_writel(intmr2, INTMR2);
+-
+- switch (d->irq) {
+- case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
+- }
+-}
+-
+-static void int2_unmask(struct irq_data *d)
+-{
+- u32 intmr2;
+-
+- intmr2 = clps_readl(INTMR2);
+- intmr2 |= 1 << (d->irq - 16);
+- clps_writel(intmr2, INTMR2);
+-}
+-
+-static struct irq_chip int2_chip = {
+- .irq_ack = int2_ack,
+- .irq_mask = int2_mask,
+- .irq_unmask = int2_unmask,
+-};
+-
+-void __init clps711x_init_irq(void)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < NR_IRQS; i++) {
+- if (INT1_IRQS & (1 << i)) {
+- irq_set_chip_and_handler(i, &int1_chip,
+- handle_level_irq);
+- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+- }
+- if (INT2_IRQS & (1 << i)) {
+- irq_set_chip_and_handler(i, &int2_chip,
+- handle_level_irq);
+- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+- }
+- }
+-
+- /*
+- * Disable interrupts
+- */
+- clps_writel(0, INTMR1);
+- clps_writel(0, INTMR2);
+-
+- /*
+- * Clear down any pending interrupts
+- */
+- clps_writel(0, COEOI);
+- clps_writel(0, TC1EOI);
+- clps_writel(0, TC2EOI);
+- clps_writel(0, RTCEOI);
+- clps_writel(0, TEOI);
+- clps_writel(0, UMSEOI);
+- clps_writel(0, SYNCIO);
+- clps_writel(0, KBDEOI);
+-}
+diff --git a/arch/arm/mach-clps711x/mm.c b/arch/arm/mach-clps711x/mm.c
+deleted file mode 100644
+index 9865921..0000000
+--- a/arch/arm/mach-clps711x/mm.c
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/*
+- * linux/arch/arm/mach-clps711x/mm.c
+- *
+- * Generic MM setup for the CLPS711x-based machines.
+- *
+- * Copyright (C) 2001 Deep Blue Solutions Ltd
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/init.h>
+-
+-#include <asm/sizes.h>
+-#include <mach/hardware.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/mach/map.h>
+-#include <asm/hardware/clps7111.h>
+-
+-/*
+- * This maps the generic CLPS711x registers
+- */
+-static struct map_desc clps711x_io_desc[] __initdata = {
+- {
+- .virtual = CLPS7111_VIRT_BASE,
+- .pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
+- .length = SZ_1M,
+- .type = MT_DEVICE
+- }
+-};
+-
+-void __init clps711x_map_io(void)
+-{
+- iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
+-}
+diff --git a/arch/arm/mach-clps711x/time.c b/arch/arm/mach-clps711x/time.c
+deleted file mode 100644
+index d581ef0..0000000
+--- a/arch/arm/mach-clps711x/time.c
++++ /dev/null
+@@ -1,84 +0,0 @@
+-/*
+- * linux/arch/arm/mach-clps711x/time.c
+- *
+- * Copyright (C) 2001 Deep Blue Solutions Ltd.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#include <linux/timex.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/irq.h>
+-#include <linux/sched.h>
+-#include <linux/io.h>
+-
+-#include <mach/hardware.h>
+-#include <asm/irq.h>
+-#include <asm/leds.h>
+-#include <asm/hardware/clps7111.h>
+-
+-#include <asm/mach/time.h>
+-
+-
+-/*
+- * gettimeoffset() returns time since last timer tick, in usecs.
+- *
+- * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
+- * 'tick' is usecs per jiffy.
+- */
+-static unsigned long clps711x_gettimeoffset(void)
+-{
+- unsigned long hwticks;
+- hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
+- return (hwticks * (tick_nsec / 1000)) / LATCH;
+-}
+-
+-/*
+- * IRQ handler for the timer
+- */
+-static irqreturn_t
+-p720t_timer_interrupt(int irq, void *dev_id)
+-{
+- timer_tick();
+- return IRQ_HANDLED;
+-}
+-
+-static struct irqaction clps711x_timer_irq = {
+- .name = "CLPS711x Timer Tick",
+- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+- .handler = p720t_timer_interrupt,
+-};
+-
+-static void __init clps711x_timer_init(void)
+-{
+- struct timespec tv;
+- unsigned int syscon;
+-
+- syscon = clps_readl(SYSCON1);
+- syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
+- clps_writel(syscon, SYSCON1);
+-
+- clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
+-
+- setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
+-
+- tv.tv_nsec = 0;
+- tv.tv_sec = clps_readl(RTCDR);
+- do_settimeofday(&tv);
+-}
+-
+-struct sys_timer clps711x_timer = {
+- .init = clps711x_timer_init,
+- .offset = clps711x_gettimeoffset,
+-};
+diff --git a/arch/arm/mach-cns3xxx/cns3420vb.c b/arch/arm/mach-cns3xxx/cns3420vb.c
+index 55f7b4b..594852f 100644
+--- a/arch/arm/mach-cns3xxx/cns3420vb.c
++++ b/arch/arm/mach-cns3xxx/cns3420vb.c
+@@ -26,6 +26,7 @@
+ #include <linux/mtd/partitions.h>
+ #include <asm/setup.h>
+ #include <asm/mach-types.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/time.h>
+@@ -201,5 +202,6 @@ MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
+ .map_io = cns3420_map_io,
+ .init_irq = cns3xxx_init_irq,
+ .timer = &cns3xxx_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = cns3420_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
+index d87bfc3..01c57df 100644
+--- a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
++++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
+@@ -8,8 +8,6 @@
+ * published by the Free Software Foundation.
+ */
+
+-#include <asm/hardware/entry-macro-gic.S>
+-
+ .macro disable_fiq
+ .endm
+
+diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
+deleted file mode 100644
+index 1dd231d..0000000
+--- a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/*
+- * Copyright 2000 Russell King.
+- * Copyright 2003 ARM Limited
+- * Copyright 2008 Cavium Networks
+- *
+- * This file is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License, Version 2, as
+- * published by the Free Software Foundation.
+- */
+-
+-#define VMALLOC_END 0xd8000000UL
+diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
+index 495e313..2db78bd 100644
+--- a/arch/arm/mach-davinci/Makefile
++++ b/arch/arm/mach-davinci/Makefile
+@@ -4,7 +4,7 @@
+ #
+
+ # Common objects
+-obj-y := time.o clock.o serial.o io.o psc.o \
++obj-y := time.o clock.o serial.o psc.o \
+ dma.o usb.o common.o sram.o aemif.o
+
+ obj-$(CONFIG_DAVINCI_MUX) += mux.o
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
+index 6659a90..f044671 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -32,7 +32,6 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/flash.h>
+ #include <linux/delay.h>
+-#include <linux/wl12xx.h>
+
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+@@ -794,11 +793,13 @@ static const short da850_evm_mmcsd0_pins[] __initconst = {
+
+ static void da850_panel_power_ctrl(int val)
+ {
+- /* lcd backlight */
+- gpio_set_value(DA850_LCD_BL_PIN, val);
+-
+ /* lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, val);
++
++ mdelay(200);
++
++ /* lcd backlight */
++ gpio_set_value(DA850_LCD_BL_PIN, val);
+ }
+
+ static int da850_lcd_hw_init(void)
+@@ -818,12 +819,6 @@ static int da850_lcd_hw_init(void)
+ gpio_direction_output(DA850_LCD_BL_PIN, 0);
+ gpio_direction_output(DA850_LCD_PWR_PIN, 0);
+
+- /* Switch off panel power and backlight */
+- da850_panel_power_ctrl(0);
+-
+- /* Switch on panel power and backlight */
+- da850_panel_power_ctrl(1);
+-
+ return 0;
+ }
+
+@@ -1254,6 +1249,17 @@ static __init int da850_wl12xx_init(void)
+
+ #define DA850EVM_SATA_REFCLKPN_RATE (100 * 1000 * 1000)
+
++#ifdef CONFIG_UIO_PRUSS
++struct uio_pruss_pdata da8xx_pruss_uio_pdata = {
++ .pintc_base = 0x4000,
++};
++
++ ret = da8xx_register_pruss_uio(&da8xx_pruss_uio_pdata);
++ if (ret)
++ pr_warning("%s: pruss_uio initialization failed: %d\n",
++ __func__, ret);
++#endif
++
+ static __init void da850_evm_init(void)
+ {
+ int ret;
+diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
+index a30c7c5..8e5c7b3 100644
+--- a/arch/arm/mach-davinci/cpuidle.c
++++ b/arch/arm/mach-davinci/cpuidle.c
+@@ -174,4 +174,3 @@ static int __init davinci_cpuidle_init(void)
+ davinci_cpuidle_probe);
+ }
+ device_initcall(davinci_cpuidle_init);
+-
+diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
+index b047f87..c7e9839 100644
+--- a/arch/arm/mach-davinci/da850.c
++++ b/arch/arm/mach-davinci/da850.c
+@@ -240,6 +240,12 @@ static struct clk tptc2_clk = {
+ .flags = ALWAYS_ENABLED,
+ };
+
++static struct clk pruss_clk = {
++ .name = "pruss",
++ .parent = &pll0_sysclk2,
++ .lpsc = DA8XX_LPSC0_PRUSS,
++};
++
+ static struct clk uart0_clk = {
+ .name = "uart0",
+ .parent = &pll0_sysclk2,
+@@ -411,6 +417,7 @@ static struct clk_lookup da850_clks[] = {
+ CLK(NULL, "tpcc1", &tpcc1_clk),
+ CLK(NULL, "tptc2", &tptc2_clk),
+ CLK(NULL, "uart0", &uart0_clk),
++ CLK(NULL, "pruss", &pruss_clk),
+ CLK(NULL, "uart1", &uart1_clk),
+ CLK(NULL, "uart2", &uart2_clk),
+ CLK(NULL, "aintc", &aintc_clk),
+@@ -747,7 +754,7 @@ static struct map_desc da850_io_desc[] = {
+ },
+ {
+ .virtual = SRAM_VIRT,
+- .pfn = __phys_to_pfn(DA8XX_ARM_RAM_BASE),
++ .pfn = __phys_to_pfn(DA8XX_SHARED_RAM_BASE),
+ .length = SZ_8K,
+ .type = MT_DEVICE
+ },
+@@ -1119,8 +1126,9 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
+ .gpio_irq = IRQ_DA8XX_GPIO0,
+ .serial_dev = &da8xx_serial_device,
+ .emac_pdata = &da8xx_emac_pdata,
+- .sram_dma = DA8XX_ARM_RAM_BASE,
+- .sram_len = SZ_8K,
++ .sram_phys = DA8XX_ARM_RAM_BASE,
++ .sram_dma = DA8XX_SHARED_RAM_BASE,
++ .sram_len = SZ_128K,
+ .reset_device = &da8xx_wdt_device,
+ };
+
+diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
+index 68def71..b899238 100644
+--- a/arch/arm/mach-davinci/devices-da8xx.c
++++ b/arch/arm/mach-davinci/devices-da8xx.c
+@@ -519,6 +519,71 @@ void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
+ }
+ }
+
++#define DA8XX_PRUSS_MEM_BASE 0x01C30000
++
++static struct resource da8xx_pruss_resources[] = {
++ {
++ .start = DA8XX_PRUSS_MEM_BASE,
++ .end = DA8XX_PRUSS_MEM_BASE + 0xFFFF,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT0,
++ .end = IRQ_DA8XX_EVTOUT0,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT1,
++ .end = IRQ_DA8XX_EVTOUT1,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT2,
++ .end = IRQ_DA8XX_EVTOUT2,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT3,
++ .end = IRQ_DA8XX_EVTOUT3,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT4,
++ .end = IRQ_DA8XX_EVTOUT4,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT5,
++ .end = IRQ_DA8XX_EVTOUT5,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT6,
++ .end = IRQ_DA8XX_EVTOUT6,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_DA8XX_EVTOUT7,
++ .end = IRQ_DA8XX_EVTOUT7,
++ .flags = IORESOURCE_IRQ,
++ },
++;
++
++static struct platform_device da8xx_pruss_uio_dev = {
++ .name = "pruss_uio",
++ .id = -1,
++ .num_resources = ARRAY_SIZE(da8xx_pruss_resources),
++ .resource = da8xx_pruss_resources,
++ .dev = {
++ .coherent_dma_mask = 0xffffffff,
++ }
++};
++
++int __init da8xx_register_pruss_uio(struct uio_pruss_pdata *config)
++{
++ da8xx_pruss_uio_dev.dev.platform_data = config;
++ return platform_device_register(&da8xx_pruss_uio_dev);
++
+ static const struct display_panel disp_panel = {
+ QVGA,
+ 16,
+@@ -541,6 +606,7 @@ static struct lcd_ctrl_config lcd_cfg = {
+ .sync_edge = 0,
+ .sync_ctrl = 1,
+ .raster_order = 0,
++ .fifo_th = 6,
+ };
+
+ struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = {
+diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
+index fe520d4..07427f0 100644
+--- a/arch/arm/mach-davinci/dm355.c
++++ b/arch/arm/mach-davinci/dm355.c
+@@ -27,7 +27,7 @@
+ #include <mach/time.h>
+ #include <mach/serial.h>
+ #include <mach/common.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <mach/spi.h>
+ #include <mach/gpio-davinci.h>
+
+@@ -851,7 +851,7 @@ static struct davinci_soc_info davinci_soc_info_dm355 = {
+ .gpio_num = 104,
+ .gpio_irq = IRQ_DM355_GPIOBNK0,
+ .serial_dev = &dm355_serial_device,
+- .sram_dma = 0x00010000,
++ .sram_phys = 0x00010000,
+ .sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
+ };
+diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
+index 679e168..ed9753d 100644
+--- a/arch/arm/mach-davinci/dm365.c
++++ b/arch/arm/mach-davinci/dm365.c
+@@ -30,7 +30,7 @@
+ #include <mach/time.h>
+ #include <mach/serial.h>
+ #include <mach/common.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <mach/keyscan.h>
+ #include <mach/spi.h>
+ #include <mach/gpio-davinci.h>
+@@ -1081,7 +1081,7 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
+ .gpio_unbanked = 8, /* really 16 ... skip muxed GPIOs */
+ .serial_dev = &dm365_serial_device,
+ .emac_pdata = &dm365_emac_pdata,
+- .sram_dma = 0x00010000,
++ .sram_phys = 0x00010000,
+ .sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
+ };
+diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
+index 3470983..df72a4e 100644
+--- a/arch/arm/mach-davinci/dm644x.c
++++ b/arch/arm/mach-davinci/dm644x.c
+@@ -765,7 +765,7 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
+ .gpio_irq = IRQ_GPIOBNK0,
+ .serial_dev = &dm644x_serial_device,
+ .emac_pdata = &dm644x_emac_pdata,
+- .sram_dma = 0x00008000,
++ .sram_phys = 0x00008000,
+ .sram_len = SZ_16K,
+ .reset_device = &davinci_wdt_device,
+ };
+diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
+index af27c13..4ab327a 100644
+--- a/arch/arm/mach-davinci/dm646x.c
++++ b/arch/arm/mach-davinci/dm646x.c
+@@ -852,7 +852,7 @@ static struct davinci_soc_info davinci_soc_info_dm646x = {
+ .gpio_irq = IRQ_DM646X_GPIOBNK0,
+ .serial_dev = &dm646x_serial_device,
+ .emac_pdata = &dm646x_emac_pdata,
+- .sram_dma = 0x10010000,
++ .sram_phys = 0x10010000,
+ .sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
+ };
+diff --git a/arch/arm/mach-davinci/include/mach/asp.h b/arch/arm/mach-davinci/include/mach/asp.h
+deleted file mode 100644
+index 9aa2409..0000000
+--- a/arch/arm/mach-davinci/include/mach/asp.h
++++ /dev/null
+@@ -1,137 +0,0 @@
+-/*
+- * <mach/asp.h> - DaVinci Audio Serial Port support
+- */
+-#ifndef __ASM_ARCH_DAVINCI_ASP_H
+-#define __ASM_ARCH_DAVINCI_ASP_H
+-
+-#include <mach/irqs.h>
+-#include <mach/edma.h>
+-
+-/* Bases of dm644x and dm355 register banks */
+-#define DAVINCI_ASP0_BASE 0x01E02000
+-#define DAVINCI_ASP1_BASE 0x01E04000
+-
+-/* Bases of dm365 register banks */
+-#define DAVINCI_DM365_ASP0_BASE 0x01D02000
+-
+-/* Bases of dm646x register banks */
+-#define DAVINCI_DM646X_MCASP0_REG_BASE 0x01D01000
+-#define DAVINCI_DM646X_MCASP1_REG_BASE 0x01D01800
+-
+-/* Bases of da850/da830 McASP0 register banks */
+-#define DAVINCI_DA8XX_MCASP0_REG_BASE 0x01D00000
+-
+-/* Bases of da830 McASP1 register banks */
+-#define DAVINCI_DA830_MCASP1_REG_BASE 0x01D04000
+-
+-/* EDMA channels of dm644x and dm355 */
+-#define DAVINCI_DMA_ASP0_TX 2
+-#define DAVINCI_DMA_ASP0_RX 3
+-#define DAVINCI_DMA_ASP1_TX 8
+-#define DAVINCI_DMA_ASP1_RX 9
+-
+-/* EDMA channels of dm646x */
+-#define DAVINCI_DM646X_DMA_MCASP0_AXEVT0 6
+-#define DAVINCI_DM646X_DMA_MCASP0_AREVT0 9
+-#define DAVINCI_DM646X_DMA_MCASP1_AXEVT1 12
+-
+-/* EDMA channels of da850/da830 McASP0 */
+-#define DAVINCI_DA8XX_DMA_MCASP0_AREVT 0
+-#define DAVINCI_DA8XX_DMA_MCASP0_AXEVT 1
+-
+-/* EDMA channels of da830 McASP1 */
+-#define DAVINCI_DA830_DMA_MCASP1_AREVT 2
+-#define DAVINCI_DA830_DMA_MCASP1_AXEVT 3
+-
+-/* Interrupts */
+-#define DAVINCI_ASP0_RX_INT IRQ_MBRINT
+-#define DAVINCI_ASP0_TX_INT IRQ_MBXINT
+-#define DAVINCI_ASP1_RX_INT IRQ_MBRINT
+-#define DAVINCI_ASP1_TX_INT IRQ_MBXINT
+-
+-struct snd_platform_data {
+- u32 tx_dma_offset;
+- u32 rx_dma_offset;
+- enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
+- enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
+- unsigned int codec_fmt;
+- /*
+- * Allowing this is more efficient and eliminates left and right swaps
+- * caused by underruns, but will swap the left and right channels
+- * when compared to previous behavior.
+- */
+- unsigned enable_channel_combine:1;
+- unsigned sram_size_playback;
+- unsigned sram_size_capture;
+-
+- /*
+- * If McBSP peripheral gets the clock from an external pin,
+- * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR
+- * and MCBSP_CLKS.
+- * Depending on different hardware connections it is possible
+- * to use this setting to change the behaviour of McBSP
+- * driver. The dm365_clk_input_pin enum is available for dm365
+- */
+- int clk_input_pin;
+-
+- /*
+- * This flag works when both clock and FS are outputs for the cpu
+- * and makes clock more accurate (FS is not symmetrical and the
+- * clock is very fast.
+- * The clock becoming faster is named
+- * i2s continuous serial clock (I2S_SCK) and it is an externally
+- * visible bit clock.
+- *
+- * first line : WordSelect
+- * second line : ContinuousSerialClock
+- * third line: SerialData
+- *
+- * SYMMETRICAL APPROACH:
+- * _______________________ LEFT
+- * _| RIGHT |______________________|
+- * _ _ _ _ _ _ _ _
+- * _| |_| |_ x16 _| |_| |_| |_| |_ x16 _| |_| |_
+- * _ _ _ _ _ _ _ _
+- * _/ \_/ \_ ... _/ \_/ \_/ \_/ \_ ... _/ \_/ \_
+- * \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
+- *
+- * ACCURATE CLOCK APPROACH:
+- * ______________ LEFT
+- * _| RIGHT |_______________________________|
+- * _ _ _ _ _ _ _ _ _
+- * _| |_ x16 _| |_| |_ x16 _| |_| |_| |_| |_| |_| |
+- * _ _ _ _ dummy cycles
+- * _/ \_ ... _/ \_/ \_ ... _/ \__________________
+- * \_/ \_/ \_/ \_/
+- *
+- */
+- bool i2s_accurate_sck;
+-
+- /* McASP specific fields */
+- int tdm_slots;
+- u8 op_mode;
+- u8 num_serializer;
+- u8 *serial_dir;
+- u8 version;
+- u8 txnumevt;
+- u8 rxnumevt;
+-};
+-
+-enum {
+- MCASP_VERSION_1 = 0, /* DM646x */
+- MCASP_VERSION_2, /* DA8xx/OMAPL1x */
+-};
+-
+-enum dm365_clk_input_pin {
+- MCBSP_CLKR = 0, /* DM365 */
+- MCBSP_CLKS,
+-};
+-
+-#define INACTIVE_MODE 0
+-#define TX_MODE 1
+-#define RX_MODE 2
+-
+-#define DAVINCI_MCASP_IIS_MODE 0
+-#define DAVINCI_MCASP_DIT_MODE 1
+-
+-#endif /* __ASM_ARCH_DAVINCI_ASP_H */
+diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
+index a57cba2..665d049 100644
+--- a/arch/arm/mach-davinci/include/mach/common.h
++++ b/arch/arm/mach-davinci/include/mach/common.h
+@@ -75,7 +75,7 @@ struct davinci_soc_info {
+ int gpio_ctlrs_num;
+ struct platform_device *serial_dev;
+ struct emac_platform_data *emac_pdata;
+- dma_addr_t sram_dma;
++ phys_addr_t sram_phys;
+ unsigned sram_len;
+ struct platform_device *reset_device;
+ void (*reset)(struct platform_device *);
+diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
+index eaca7d8..f9a577d 100644
+--- a/arch/arm/mach-davinci/include/mach/da8xx.h
++++ b/arch/arm/mach-davinci/include/mach/da8xx.h
+@@ -16,11 +16,12 @@
+ #include <linux/platform_device.h>
+ #include <linux/davinci_emac.h>
+ #include <linux/spi/spi.h>
++#include <linux/platform_data/uio_pruss.h>
+
+ #include <mach/serial.h>
+ #include <mach/edma.h>
+ #include <mach/i2c.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <mach/mmc.h>
+ #include <mach/usb.h>
+ #include <mach/pm.h>
+@@ -69,6 +70,7 @@ extern unsigned int da850_max_speed;
+ #define DA8XX_AEMIF_CS3_BASE 0x62000000
+ #define DA8XX_AEMIF_CTL_BASE 0x68000000
+ #define DA8XX_ARM_RAM_BASE 0xffff0000
++#define DA8XX_SHARED_RAM_BASE 0x80000000
+
+ void __init da830_init(void);
+ void __init da850_init(void);
+@@ -81,6 +83,7 @@ int da8xx_register_watchdog(void);
+ int da8xx_register_usb20(unsigned mA, unsigned potpgt);
+ int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
+ int da8xx_register_emac(void);
++int da8xx_register_pruss_uio(struct uio_pruss_pdata *config);
+ int da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata);
+ int da8xx_register_mmcsd0(struct davinci_mmc_config *config);
+ int da850_register_mmcsd1(struct davinci_mmc_config *config);
+diff --git a/arch/arm/mach-davinci/include/mach/dm355.h b/arch/arm/mach-davinci/include/mach/dm355.h
+index 36dff4a..a5829ea 100644
+--- a/arch/arm/mach-davinci/include/mach/dm355.h
++++ b/arch/arm/mach-davinci/include/mach/dm355.h
+@@ -12,7 +12,7 @@
+ #define __ASM_ARCH_DM355_H
+
+ #include <mach/hardware.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <media/davinci/vpfe_capture.h>
+
+ #define DM355_ASYNC_EMIF_CONTROL_BASE 0x01E10000
+diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
+index 2563bf4..92d8db2 100644
+--- a/arch/arm/mach-davinci/include/mach/dm365.h
++++ b/arch/arm/mach-davinci/include/mach/dm365.h
+@@ -16,7 +16,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/davinci_emac.h>
+ #include <mach/hardware.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <mach/keyscan.h>
+ #include <media/davinci/vpfe_capture.h>
+
+diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
+index 5a1b26d..dd1bfa3 100644
+--- a/arch/arm/mach-davinci/include/mach/dm644x.h
++++ b/arch/arm/mach-davinci/include/mach/dm644x.h
+@@ -24,7 +24,7 @@
+
+ #include <linux/davinci_emac.h>
+ #include <mach/hardware.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <media/davinci/vpfe_capture.h>
+
+ #define DM644X_EMAC_BASE (0x01C80000)
+diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
+index 2a00fe5..8294bc6 100644
+--- a/arch/arm/mach-davinci/include/mach/dm646x.h
++++ b/arch/arm/mach-davinci/include/mach/dm646x.h
+@@ -12,7 +12,7 @@
+ #define __ASM_ARCH_DM646X_H
+
+ #include <mach/hardware.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <linux/i2c.h>
+ #include <linux/videodev2.h>
+ #include <linux/davinci_emac.h>
+diff --git a/arch/arm/mach-davinci/include/mach/io.h b/arch/arm/mach-davinci/include/mach/io.h
+index d1b9549..b2267d1 100644
+--- a/arch/arm/mach-davinci/include/mach/io.h
++++ b/arch/arm/mach-davinci/include/mach/io.h
+@@ -21,12 +21,4 @@
+ #define __mem_pci(a) (a)
+ #define __mem_isa(a) (a)
+
+-#ifndef __ASSEMBLER__
+-#define __arch_ioremap davinci_ioremap
+-#define __arch_iounmap davinci_iounmap
+-
+-void __iomem *davinci_ioremap(unsigned long phys, size_t size,
+- unsigned int type);
+-void davinci_iounmap(volatile void __iomem *addr);
+-#endif
+ #endif /* __ASM_ARCH_IO_H */
+diff --git a/arch/arm/mach-davinci/include/mach/sram.h b/arch/arm/mach-davinci/include/mach/sram.h
+index 111f7cc..aa52009 100644
+--- a/arch/arm/mach-davinci/include/mach/sram.h
++++ b/arch/arm/mach-davinci/include/mach/sram.h
+@@ -10,18 +10,11 @@
+ #ifndef __MACH_SRAM_H
+ #define __MACH_SRAM_H
+
++#include <linux/genalloc.h>
++
+ /* ARBITRARY: SRAM allocations are multiples of this 2^N size */
+ #define SRAM_GRANULARITY 512
+
+-/*
+- * SRAM allocations return a CPU virtual address, or NULL on error.
+- * If a DMA address is requested and the SRAM supports DMA, its
+- * mapped address is also returned.
+- *
+- * Errors include SRAM memory not being available, and requesting
+- * DMA mapped SRAM on systems which don't allow that.
+- */
+-extern void *sram_alloc(size_t len, dma_addr_t *dma);
+-extern void sram_free(void *addr, size_t len);
++extern struct gen_pool *davinci_gen_pool;
+
+ #endif /* __MACH_SRAM_H */
+diff --git a/arch/arm/mach-davinci/include/mach/vmalloc.h b/arch/arm/mach-davinci/include/mach/vmalloc.h
+deleted file mode 100644
+index d49646a..0000000
+--- a/arch/arm/mach-davinci/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/*
+- * DaVinci vmalloc definitions
+- *
+- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
+- *
+- * 2007 (c) MontaVista Software, Inc. This file is licensed under
+- * the terms of the GNU General Public License version 2. This program
+- * is licensed "as is" without any warranty of any kind, whether express
+- * or implied.
+- */
+-#include <mach/hardware.h>
+-
+-/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
+-#define VMALLOC_END (IO_VIRT - (2<<20))
+diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
+deleted file mode 100644
+index 8ea60a8..0000000
+--- a/arch/arm/mach-davinci/io.c
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/*
+- * DaVinci I/O mapping code
+- *
+- * Copyright (C) 2005-2006 Texas Instruments
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/io.h>
+-
+-#include <asm/tlb.h>
+-#include <asm/mach/map.h>
+-
+-#include <mach/common.h>
+-
+-/*
+- * Intercept ioremap() requests for addresses in our fixed mapping regions.
+- */
+-void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
+-{
+- struct map_desc *desc = davinci_soc_info.io_desc;
+- int desc_num = davinci_soc_info.io_desc_num;
+- int i;
+-
+- for (i = 0; i < desc_num; i++, desc++) {
+- unsigned long iophys = __pfn_to_phys(desc->pfn);
+- unsigned long iosize = desc->length;
+-
+- if (p >= iophys && (p + size) <= (iophys + iosize))
+- return __io(desc->virtual + p - iophys);
+- }
+-
+- return __arm_ioremap_caller(p, size, type,
+- __builtin_return_address(0));
+-}
+-EXPORT_SYMBOL(davinci_ioremap);
+-
+-void davinci_iounmap(volatile void __iomem *addr)
+-{
+- unsigned long virt = (unsigned long)addr;
+-
+- if (virt >= VMALLOC_START && virt < VMALLOC_END)
+- __iounmap(addr);
+-}
+-EXPORT_SYMBOL(davinci_iounmap);
+diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
+index 04c49f7..25b1d2a 100644
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -18,6 +18,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/delay.h>
+ #include <asm/io.h>
++#include <asm/fncpy.h>
+
+ #include <mach/da8xx.h>
+ #include <mach/sram.h>
+@@ -28,14 +29,9 @@
+ #define DEEPSLEEP_SLEEPCOUNT_MASK 0xFFFF
+
+ static void (*davinci_sram_suspend) (struct davinci_pm_config *);
++static void *davinci_sram_suspend_mem;
+ static struct davinci_pm_config *pdata;
+
+-static void davinci_sram_push(void *dest, void *src, unsigned int size)
+-{
+- memcpy(dest, src, size);
+- flush_icache_range((unsigned long)dest, (unsigned long)(dest + size));
+-}
+-
+ static void davinci_pm_suspend(void)
+ {
+ unsigned val;
+@@ -124,14 +120,14 @@ static int __init davinci_pm_probe(struct platform_device *pdev)
+ return -ENOENT;
+ }
+
+- davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
+- if (!davinci_sram_suspend) {
++ davinci_sram_suspend_mem = (void *)gen_pool_alloc(davinci_gen_pool,
++ davinci_cpu_suspend_sz);
++ if (!davinci_sram_suspend_mem) {
+ dev_err(&pdev->dev, "cannot allocate SRAM memory\n");
+ return -ENOMEM;
+ }
+-
+- davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
+- davinci_cpu_suspend_sz);
++ davinci_sram_suspend = fncpy(davinci_sram_suspend_mem,
++ &davinci_cpu_suspend, davinci_cpu_suspend_sz);
+
+ suspend_set_ops(&davinci_pm_ops);
+
+@@ -140,7 +136,8 @@ static int __init davinci_pm_probe(struct platform_device *pdev)
+
+ static int __exit davinci_pm_remove(struct platform_device *pdev)
+ {
+- sram_free(davinci_sram_suspend, davinci_cpu_suspend_sz);
++ gen_pool_free(davinci_gen_pool, (unsigned long)davinci_sram_suspend_mem,
++ davinci_cpu_suspend_sz);
+ return 0;
+ }
+
+diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c
+index db0f778..2c53db2 100644
+--- a/arch/arm/mach-davinci/sram.c
++++ b/arch/arm/mach-davinci/sram.c
+@@ -10,40 +10,12 @@
+ */
+ #include <linux/module.h>
+ #include <linux/init.h>
+-#include <linux/genalloc.h>
+
+ #include <mach/common.h>
+ #include <mach/sram.h>
+
+-static struct gen_pool *sram_pool;
+-
+-void *sram_alloc(size_t len, dma_addr_t *dma)
+-{
+- unsigned long vaddr;
+- dma_addr_t dma_base = davinci_soc_info.sram_dma;
+-
+- if (dma)
+- *dma = 0;
+- if (!sram_pool || (dma && !dma_base))
+- return NULL;
+-
+- vaddr = gen_pool_alloc(sram_pool, len);
+- if (!vaddr)
+- return NULL;
+-
+- if (dma)
+- *dma = dma_base + (vaddr - SRAM_VIRT);
+- return (void *)vaddr;
+-
+-}
+-EXPORT_SYMBOL(sram_alloc);
+-
+-void sram_free(void *addr, size_t len)
+-{
+- gen_pool_free(sram_pool, (unsigned long) addr, len);
+-}
+-EXPORT_SYMBOL(sram_free);
+-
++struct gen_pool *davinci_gen_pool;
++EXPORT_SYMBOL_GPL(davinci_gen_pool);
+
+ /*
+ * REVISIT This supports CPU and DMA access to/from SRAM, but it
+@@ -54,18 +26,19 @@ EXPORT_SYMBOL(sram_free);
+ static int __init sram_init(void)
+ {
+ unsigned len = davinci_soc_info.sram_len;
+- int status = 0;
+
+- if (len) {
+- len = min_t(unsigned, len, SRAM_SIZE);
+- sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
+- if (!sram_pool)
+- status = -ENOMEM;
+- }
+- if (sram_pool)
+- status = gen_pool_add(sram_pool, SRAM_VIRT, len, -1);
+- WARN_ON(status < 0);
+- return status;
++ if (!len)
++ return 0;
++
++ len = min_t(unsigned, len, SRAM_SIZE);
++ davinci_gen_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
++
++ if (!davinci_gen_pool)
++ return -ENOMEM;
++
++ WARN_ON(gen_pool_add_virt(davinci_gen_pool, SRAM_VIRT,
++ davinci_soc_info.sram_phys, len, -1));
++
++ return 0;
+ }
+ core_initcall(sram_init);
+-
+diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
+index b20ec9a..ad1165d 100644
+--- a/arch/arm/mach-dove/include/mach/dove.h
++++ b/arch/arm/mach-dove/include/mach/dove.h
+@@ -11,8 +11,6 @@
+ #ifndef __ASM_ARCH_DOVE_H
+ #define __ASM_ARCH_DOVE_H
+
+-#include <mach/vmalloc.h>
+-
+ /*
+ * Marvell Dove address maps.
+ *
+diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h
+deleted file mode 100644
+index a28792c..0000000
+--- a/arch/arm/mach-dove/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-dove/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfd800000UL
+diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
+index d0ce8ab..ce3ed24 100644
+--- a/arch/arm/mach-ebsa110/core.c
++++ b/arch/arm/mach-ebsa110/core.c
+@@ -283,7 +283,7 @@ MACHINE_START(EBSA110, "EBSA110")
+ .atag_offset = 0x400,
+ .reserve_lp0 = 1,
+ .reserve_lp2 = 1,
+- .soft_reboot = 1,
++ .restart_mode = 's',
+ .map_io = ebsa110_map_io,
+ .init_irq = ebsa110_init_irq,
+ .timer = &ebsa110_timer,
+diff --git a/arch/arm/mach-ebsa110/include/mach/system.h b/arch/arm/mach-ebsa110/include/mach/system.h
+index 9a26245..0d5df72 100644
+--- a/arch/arm/mach-ebsa110/include/mach/system.h
++++ b/arch/arm/mach-ebsa110/include/mach/system.h
+@@ -34,6 +34,6 @@ static inline void arch_idle(void)
+ asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
+ }
+
+-#define arch_reset(mode, cmd) cpu_reset(0x80000000)
++#define arch_reset(mode, cmd) soft_restart(0x80000000)
+
+ #endif
+diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+deleted file mode 100644
+index ea141b7..0000000
+--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * arch/arm/mach-ebsa110/include/mach/vmalloc.h
+- *
+- * Copyright (C) 1998 Russell King
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#define VMALLOC_END 0xdf000000UL
+diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
+index 0713448..d9b0ea2 100644
+--- a/arch/arm/mach-ep93xx/adssphere.c
++++ b/arch/arm/mach-ep93xx/adssphere.c
+@@ -16,6 +16,7 @@
+
+ #include <mach/hardware.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -36,6 +37,7 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = adssphere_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
+index 70ef8c5..9bbae08 100644
+--- a/arch/arm/mach-ep93xx/edb93xx.c
++++ b/arch/arm/mach-ep93xx/edb93xx.c
+@@ -39,6 +39,7 @@
+ #include <mach/ep93xx_spi.h>
+ #include <mach/gpio-ep93xx.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -250,6 +251,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -261,6 +263,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -272,6 +275,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -283,6 +287,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -294,6 +299,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -305,6 +311,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -316,6 +323,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+@@ -327,6 +335,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = edb93xx_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
+index 45ee205..1dd32a7 100644
+--- a/arch/arm/mach-ep93xx/gesbc9312.c
++++ b/arch/arm/mach-ep93xx/gesbc9312.c
+@@ -16,6 +16,7 @@
+
+ #include <mach/hardware.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -36,6 +37,7 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = gesbc9312_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/include/mach/entry-macro.S b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
+index 96b85e2..9be6edc 100644
+--- a/arch/arm/mach-ep93xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
+@@ -9,51 +9,9 @@
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+-#include <mach/ep93xx-regs.h>
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \base, =(EP93XX_AHB_VIRT_BASE)
+- orr \base, \base, #0x000b0000
+- mov \irqnr, #0
+- ldr \irqstat, [\base] @ lower 32 interrupts
+- cmp \irqstat, #0
+- bne 1001f
+-
+- eor \base, \base, #0x00070000
+- ldr \irqstat, [\base] @ upper 32 interrupts
+- cmp \irqstat, #0
+- beq 1002f
+- mov \irqnr, #0x20
+-
+-1001:
+- movs \tmp, \irqstat, lsl #16
+- movne \irqstat, \tmp
+- addeq \irqnr, \irqnr, #16
+-
+- movs \tmp, \irqstat, lsl #8
+- movne \irqstat, \tmp
+- addeq \irqnr, \irqnr, #8
+-
+- movs \tmp, \irqstat, lsl #4
+- movne \irqstat, \tmp
+- addeq \irqnr, \irqnr, #4
+-
+- movs \tmp, \irqstat, lsl #2
+- movne \irqstat, \tmp
+- addeq \irqnr, \irqnr, #2
+-
+- movs \tmp, \irqstat, lsl #1
+- addeq \irqnr, \irqnr, #1
+- orrs \base, \base, #1
+-
+-1002:
+- .endm
+diff --git a/arch/arm/mach-ep93xx/include/mach/system.h b/arch/arm/mach-ep93xx/include/mach/system.h
+index 6d661fe..bdf6c4f 100644
+--- a/arch/arm/mach-ep93xx/include/mach/system.h
++++ b/arch/arm/mach-ep93xx/include/mach/system.h
+@@ -11,8 +11,6 @@ static inline void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- local_irq_disable();
+-
+ /*
+ * Set then clear the SWRST bit to initiate a software reset
+ */
+diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 1b3f25d..0000000
+--- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-ep93xx/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe800000UL
+diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
+index e72f736..a6dae6c 100644
+--- a/arch/arm/mach-ep93xx/micro9.c
++++ b/arch/arm/mach-ep93xx/micro9.c
+@@ -18,6 +18,7 @@
+
+ #include <mach/hardware.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -80,6 +81,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = micro9_init_machine,
+ MACHINE_END
+@@ -91,6 +93,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = micro9_init_machine,
+ MACHINE_END
+@@ -102,6 +105,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = micro9_init_machine,
+ MACHINE_END
+@@ -113,6 +117,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = micro9_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
+index 52e090d..40121ba 100644
+--- a/arch/arm/mach-ep93xx/simone.c
++++ b/arch/arm/mach-ep93xx/simone.c
+@@ -25,6 +25,7 @@
+ #include <mach/fb.h>
+ #include <mach/gpio-ep93xx.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -80,6 +81,7 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = simone_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
+index 8121e3a..ec7c63f 100644
+--- a/arch/arm/mach-ep93xx/snappercl15.c
++++ b/arch/arm/mach-ep93xx/snappercl15.c
+@@ -31,6 +31,7 @@
+ #include <mach/fb.h>
+ #include <mach/gpio-ep93xx.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -177,6 +178,7 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
+ .atag_offset = 0x100,
+ .map_io = ep93xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = snappercl15_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
+index 8b2f143..760384e 100644
+--- a/arch/arm/mach-ep93xx/ts72xx.c
++++ b/arch/arm/mach-ep93xx/ts72xx.c
+@@ -23,6 +23,7 @@
+ #include <mach/hardware.h>
+ #include <mach/ts72xx.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/arch.h>
+@@ -247,6 +248,7 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
+ .atag_offset = 0x100,
+ .map_io = ts72xx_map_io,
+ .init_irq = ep93xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &ep93xx_timer,
+ .init_machine = ts72xx_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-exynos/cpu.c b/arch/arm/mach-exynos/cpu.c
+index cc8d4bd..699774c 100644
+--- a/arch/arm/mach-exynos/cpu.c
++++ b/arch/arm/mach-exynos/cpu.c
+@@ -15,6 +15,7 @@
+ #include <asm/mach/irq.h>
+
+ #include <asm/proc-fns.h>
++#include <asm/exception.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include <asm/hardware/gic.h>
+
+@@ -33,8 +34,6 @@
+ #include <mach/regs-irq.h>
+ #include <mach/regs-pmu.h>
+
+-unsigned int gic_bank_offset __read_mostly;
+-
+ extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
+ unsigned int irq_start);
+ extern void combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq);
+@@ -202,27 +201,14 @@ void __init exynos4_init_clocks(int xtal)
+ exynos4_setup_clocks();
+ }
+
+-static void exynos4_gic_irq_fix_base(struct irq_data *d)
+-{
+- struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+-
+- gic_data->cpu_base = S5P_VA_GIC_CPU +
+- (gic_bank_offset * smp_processor_id());
+-
+- gic_data->dist_base = S5P_VA_GIC_DIST +
+- (gic_bank_offset * smp_processor_id());
+-}
+-
+ void __init exynos4_init_irq(void)
+ {
+ int irq;
++ unsigned int gic_bank_offset;
+
+ gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
+
+- gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
+- gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
+- gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
+- gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
++ gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset);
+
+ for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
+
+diff --git a/arch/arm/mach-exynos/include/mach/entry-macro.S b/arch/arm/mach-exynos/include/mach/entry-macro.S
+index f5e9fd8..3ba4f54 100644
+--- a/arch/arm/mach-exynos/include/mach/entry-macro.S
++++ b/arch/arm/mach-exynos/include/mach/entry-macro.S
+@@ -9,83 +9,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <mach/hardware.h>
+-#include <mach/map.h>
+-#include <asm/hardware/gic.h>
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- mov \tmp, #0
+-
+- mrc p15, 0, \base, c0, c0, 5
+- and \base, \base, #3
+- cmp \base, #0
+- beq 1f
+-
+- ldr \tmp, =gic_bank_offset
+- ldr \tmp, [\tmp]
+- cmp \base, #1
+- beq 1f
+-
+- cmp \base, #2
+- addeq \tmp, \tmp, \tmp
+- addne \tmp, \tmp, \tmp, LSL #1
+-
+-1: ldr \base, =gic_cpu_base_addr
+- ldr \base, [\base]
+- add \base, \base, \tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- /*
+- * The interrupt numbering scheme is defined in the
+- * interrupt controller spec. To wit:
+- *
+- * Interrupts 0-15 are IPI
+- * 16-28 are reserved
+- * 29-31 are local. We allow 30 to be used for the watchdog.
+- * 32-1020 are global
+- * 1021-1022 are reserved
+- * 1023 is "spurious" (no interrupt)
+- *
+- * For now, we ignore all local interrupts so only return an interrupt if it's
+- * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+- *
+- * A simple read from the controller will tell us the number of the highest
+- * priority enabled interrupt. We then just need to check whether it is in the
+- * valid range for an IRQ (30-1020 inclusive).
+- */
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+-
+- ldr \tmp, =1021
+-
+- bic \irqnr, \irqstat, #0x1c00
+-
+- cmp \irqnr, #15
+- cmpcc \irqnr, \irqnr
+- cmpne \irqnr, \tmp
+- cmpcs \irqnr, \irqnr
+- addne \irqnr, \irqnr, #32
+-
+- .endm
+-
+- /* We assume that irqstat (the raw value of the IRQ acknowledge
+- * register) is preserved from the macro above.
+- * If there is an IPI, we immediately signal end of interrupt on the
+- * controller, since this requires the original irqstat value which
+- * we won't easily be able to recreate later.
+- */
+-
+- .macro test_for_ipi, irqnr, irqstat, base, tmp
+- bic \irqnr, \irqstat, #0x1c00
+- cmp \irqnr, #16
+- strcc \irqstat, [\base, #GIC_CPU_EOI]
+- cmpcs \irqnr, \irqnr
+- .endm
+diff --git a/arch/arm/mach-exynos/include/mach/vmalloc.h b/arch/arm/mach-exynos/include/mach/vmalloc.h
+deleted file mode 100644
+index 284330e..0000000
+--- a/arch/arm/mach-exynos/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+- * http://www.samsung.com
+- *
+- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+- *
+- * Based on arch/arm/mach-s5p6440/include/mach/vmalloc.h
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * EXYNOS4 vmalloc definition
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H __FILE__
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
+index f0ca6c1..49da308 100644
+--- a/arch/arm/mach-exynos/mach-armlex4210.c
++++ b/arch/arm/mach-exynos/mach-armlex4210.c
+@@ -16,6 +16,7 @@
+ #include <linux/smsc911x.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <plat/cpu.h>
+@@ -210,6 +211,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = armlex4210_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = armlex4210_machine_init,
+ .timer = &exynos4_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
+index 236bbe1..5acec11 100644
+--- a/arch/arm/mach-exynos/mach-nuri.c
++++ b/arch/arm/mach-exynos/mach-nuri.c
+@@ -32,6 +32,7 @@
+ #include <media/v4l2-mediabus.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <plat/adc.h>
+@@ -1333,6 +1334,7 @@ MACHINE_START(NURI, "NURI")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = nuri_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = nuri_machine_init,
+ .timer = &exynos4_timer,
+ .reserve = &nuri_reserve,
+diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
+index f80b563..5561b06 100644
+--- a/arch/arm/mach-exynos/mach-origen.c
++++ b/arch/arm/mach-exynos/mach-origen.c
+@@ -22,6 +22,7 @@
+ #include <linux/lcd.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <video/platform_lcd.h>
+@@ -694,6 +695,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = origen_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = origen_machine_init,
+ .timer = &exynos4_timer,
+ .reserve = &origen_reserve,
+diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
+index fcf2e0e..722d82d 100644
+--- a/arch/arm/mach-exynos/mach-smdk4x12.c
++++ b/arch/arm/mach-exynos/mach-smdk4x12.c
+@@ -21,6 +21,7 @@
+ #include <linux/serial_core.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <plat/backlight.h>
+@@ -287,6 +288,7 @@ MACHINE_START(SMDK4212, "SMDK4212")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = smdk4x12_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = smdk4x12_machine_init,
+ .timer = &exynos4_timer,
+ MACHINE_END
+@@ -297,6 +299,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = smdk4x12_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = smdk4x12_machine_init,
+ .timer = &exynos4_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
+index cec2afa..edc60b6 100644
+--- a/arch/arm/mach-exynos/mach-smdkv310.c
++++ b/arch/arm/mach-exynos/mach-smdkv310.c
+@@ -21,6 +21,7 @@
+ #include <linux/pwm_backlight.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <video/platform_lcd.h>
+@@ -375,6 +376,7 @@ MACHINE_START(SMDKV310, "SMDKV310")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = smdkv310_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = smdkv310_machine_init,
+ .timer = &exynos4_timer,
+ .reserve = &smdkv310_reserve,
+@@ -385,6 +387,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = smdkv310_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = smdkv310_machine_init,
+ .timer = &exynos4_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
+index a2a177f..cfc7d50 100644
+--- a/arch/arm/mach-exynos/mach-universal_c210.c
++++ b/arch/arm/mach-exynos/mach-universal_c210.c
+@@ -24,6 +24,7 @@
+ #include <linux/i2c/atmel_mxt_ts.h>
+
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+
+ #include <plat/regs-serial.h>
+@@ -1058,6 +1059,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
+ .atag_offset = 0x100,
+ .init_irq = exynos4_init_irq,
+ .map_io = universal_map_io,
++ .handle_irq = gic_handle_irq,
+ .init_machine = universal_machine_init,
+ .timer = &exynos4_timer,
+ .reserve = &universal_reserve,
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index 69ffb2f..60bc45e 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -32,7 +32,6 @@
+
+ #include <plat/cpu.h>
+
+-extern unsigned int gic_bank_offset;
+ extern void exynos4_secondary_startup(void);
+
+ #define CPU1_BOOT_REG (samsung_rev() == EXYNOS4210_REV_1_1 ? \
+@@ -65,31 +64,6 @@ static void __iomem *scu_base_addr(void)
+
+ static DEFINE_SPINLOCK(boot_lock);
+
+-static void __cpuinit exynos4_gic_secondary_init(void)
+-{
+- void __iomem *dist_base = S5P_VA_GIC_DIST +
+- (gic_bank_offset * smp_processor_id());
+- void __iomem *cpu_base = S5P_VA_GIC_CPU +
+- (gic_bank_offset * smp_processor_id());
+- int i;
+-
+- /*
+- * Deal with the banked PPI and SGI interrupts - disable all
+- * PPI interrupts, ensure all SGI interrupts are enabled.
+- */
+- __raw_writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+- __raw_writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+-
+- /*
+- * Set priority on PPI and SGI interrupts
+- */
+- for (i = 0; i < 32; i += 4)
+- __raw_writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+-
+- __raw_writel(0xf0, cpu_base + GIC_CPU_PRIMASK);
+- __raw_writel(1, cpu_base + GIC_CPU_CTRL);
+-}
+-
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+ /*
+@@ -97,7 +71,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+- exynos4_gic_secondary_init();
++ gic_secondary_init(0);
+
+ /*
+ * let the primary processor know we're out of the
+diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
+index d5f1785..60b6774 100644
+--- a/arch/arm/mach-footbridge/cats-hw.c
++++ b/arch/arm/mach-footbridge/cats-hw.c
+@@ -86,7 +86,7 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi)
+ MACHINE_START(CATS, "Chalice-CATS")
+ /* Maintainer: Philip Blundell */
+ .atag_offset = 0x100,
+- .soft_reboot = 1,
++ .restart_mode = 's',
+ .fixup = fixup_cats,
+ .map_io = footbridge_map_io,
+ .init_irq = footbridge_init_irq,
+diff --git a/arch/arm/mach-footbridge/include/mach/system.h b/arch/arm/mach-footbridge/include/mach/system.h
+index 0b29315..249f895 100644
+--- a/arch/arm/mach-footbridge/include/mach/system.h
++++ b/arch/arm/mach-footbridge/include/mach/system.h
+@@ -24,7 +24,7 @@ static inline void arch_reset(char mode, const char *cmd)
+ /*
+ * Jump into the ROM
+ */
+- cpu_reset(0x41000000);
++ soft_restart(0x41000000);
+ } else {
+ if (machine_is_netwinder()) {
+ /* open up the SuperIO chip
+diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
+deleted file mode 100644
+index 40ba78e..0000000
+--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * arch/arm/mach-footbridge/include/mach/vmalloc.h
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-
+-#define VMALLOC_END 0xf0000000UL
+diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h
+deleted file mode 100644
+index 45371eb..0000000
+--- a/arch/arm/mach-gemini/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
+-
+-#define VMALLOC_END 0xf0000000UL
+diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
+deleted file mode 100644
+index 8520b4a..0000000
+--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * arch/arm/mach-h720x/include/mach/vmalloc.h
+- */
+-
+-#ifndef __ARCH_ARM_VMALLOC_H
+-#define __ARCH_ARM_VMALLOC_H
+-
+-#define VMALLOC_END 0xd0000000UL
+-
+-#endif
+diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
+index 88660d5..7266dd5 100644
+--- a/arch/arm/mach-highbank/highbank.c
++++ b/arch/arm/mach-highbank/highbank.c
+@@ -144,6 +144,7 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
+ .map_io = highbank_map_io,
+ .init_irq = highbank_init_irq,
+ .timer = &highbank_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = highbank_init,
+ .dt_compat = highbank_match,
+ MACHINE_END
+diff --git a/arch/arm/mach-highbank/include/mach/entry-macro.S b/arch/arm/mach-highbank/include/mach/entry-macro.S
+index 73c1129..a14f9e6 100644
+--- a/arch/arm/mach-highbank/include/mach/entry-macro.S
++++ b/arch/arm/mach-highbank/include/mach/entry-macro.S
+@@ -1,5 +1,3 @@
+-#include <asm/hardware/entry-macro-gic.S>
+-
+ .macro disable_fiq
+ .endm
+
+diff --git a/arch/arm/mach-highbank/include/mach/vmalloc.h b/arch/arm/mach-highbank/include/mach/vmalloc.h
+deleted file mode 100644
+index 1969e95..0000000
+--- a/arch/arm/mach-highbank/include/mach/vmalloc.h
++++ /dev/null
+@@ -1 +0,0 @@
+-#define VMALLOC_END 0xFEE00000UL
+diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
+deleted file mode 100644
+index 2f5a2ba..0000000
+--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-integrator/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-iop13xx/include/mach/vmalloc.h b/arch/arm/mach-iop13xx/include/mach/vmalloc.h
+deleted file mode 100644
+index c534567..0000000
+--- a/arch/arm/mach-iop13xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,4 +0,0 @@
+-#ifndef _VMALLOC_H_
+-#define _VMALLOC_H_
+-#define VMALLOC_END 0xfa000000UL
+-#endif
+diff --git a/arch/arm/mach-iop32x/include/mach/io.h b/arch/arm/mach-iop32x/include/mach/io.h
+index 059c783..2d88264 100644
+--- a/arch/arm/mach-iop32x/include/mach/io.h
++++ b/arch/arm/mach-iop32x/include/mach/io.h
+@@ -13,15 +13,8 @@
+
+ #include <asm/hardware/iop3xx.h>
+
+-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
+- unsigned int mtype);
+-extern void __iop3xx_iounmap(void __iomem *addr);
+-
+ #define IO_SPACE_LIMIT 0xffffffff
+ #define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
+ #define __mem_pci(a) (a)
+
+-#define __arch_ioremap __iop3xx_ioremap
+-#define __arch_iounmap __iop3xx_iounmap
+-
+ #endif
+diff --git a/arch/arm/mach-iop32x/include/mach/system.h b/arch/arm/mach-iop32x/include/mach/system.h
+index a4b808f..b4f83e5 100644
+--- a/arch/arm/mach-iop32x/include/mach/system.h
++++ b/arch/arm/mach-iop32x/include/mach/system.h
+@@ -18,8 +18,6 @@ static inline void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- local_irq_disable();
+-
+ if (machine_is_n2100()) {
+ gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
+ gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
+@@ -30,5 +28,5 @@ static inline void arch_reset(char mode, const char *cmd)
+ *IOP3XX_PCSR = 0x30;
+
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h
+deleted file mode 100644
+index c4862d4..0000000
+--- a/arch/arm/mach-iop32x/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-iop32x/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe000000UL
+diff --git a/arch/arm/mach-iop33x/include/mach/io.h b/arch/arm/mach-iop33x/include/mach/io.h
+index 39e893e..a8a66fc 100644
+--- a/arch/arm/mach-iop33x/include/mach/io.h
++++ b/arch/arm/mach-iop33x/include/mach/io.h
+@@ -13,15 +13,8 @@
+
+ #include <asm/hardware/iop3xx.h>
+
+-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
+- unsigned int mtype);
+-extern void __iop3xx_iounmap(void __iomem *addr);
+-
+ #define IO_SPACE_LIMIT 0xffffffff
+ #define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
+ #define __mem_pci(a) (a)
+
+-#define __arch_ioremap __iop3xx_ioremap
+-#define __arch_iounmap __iop3xx_iounmap
+-
+ #endif
+diff --git a/arch/arm/mach-iop33x/include/mach/system.h b/arch/arm/mach-iop33x/include/mach/system.h
+index f192a34..86d1b20 100644
+--- a/arch/arm/mach-iop33x/include/mach/system.h
++++ b/arch/arm/mach-iop33x/include/mach/system.h
+@@ -19,5 +19,5 @@ static inline void arch_reset(char mode, const char *cmd)
+ *IOP3XX_PCSR = 0x30;
+
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h
+deleted file mode 100644
+index 48331dc..0000000
+--- a/arch/arm/mach-iop33x/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-iop33x/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe000000UL
+diff --git a/arch/arm/mach-ixp2000/include/mach/system.h b/arch/arm/mach-ixp2000/include/mach/system.h
+index de37099..810df7b 100644
+--- a/arch/arm/mach-ixp2000/include/mach/system.h
++++ b/arch/arm/mach-ixp2000/include/mach/system.h
+@@ -19,8 +19,6 @@ static inline void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- local_irq_disable();
+-
+ /*
+ * Reset flash banking register so that we are pointing at
+ * RedBoot bank.
+diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h
+deleted file mode 100644
+index 61c8dae..0000000
+--- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-ixp2000/include/mach/vmalloc.h
+- *
+- * Author: Naeem Afzal <naeem.m.afzal@intel.com>
+- *
+- * Copyright 2002 Intel Corp.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- *
+- * Just any arbitrary offset to the start of the vmalloc VM area: the
+- * current 8MB value just means that there will be a 8MB "hole" after the
+- * physical memory until the kernel virtual memory starts. That means that
+- * any out-of-bounds memory accesses will hopefully be caught.
+- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+- * area for the same reason. ;)
+- */
+-#define VMALLOC_END 0xfb000000UL
+diff --git a/arch/arm/mach-ixp23xx/include/mach/io.h b/arch/arm/mach-ixp23xx/include/mach/io.h
+index a1749d0..4ce4353 100644
+--- a/arch/arm/mach-ixp23xx/include/mach/io.h
++++ b/arch/arm/mach-ixp23xx/include/mach/io.h
+@@ -20,33 +20,4 @@
+ #define __io(p) ((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT))
+ #define __mem_pci(a) (a)
+
+-static inline void __iomem *
+-ixp23xx_ioremap(unsigned long addr, unsigned long size, unsigned int mtype)
+-{
+- if (addr >= IXP23XX_PCI_MEM_START &&
+- addr <= IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE) {
+- if (addr + size > IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE)
+- return NULL;
+-
+- return (void __iomem *)
+- ((addr - IXP23XX_PCI_MEM_START) + IXP23XX_PCI_MEM_VIRT);
+- }
+-
+- return __arm_ioremap(addr, size, mtype);
+-}
+-
+-static inline void
+-ixp23xx_iounmap(void __iomem *addr)
+-{
+- if ((((u32)addr) >= IXP23XX_PCI_MEM_VIRT) &&
+- (((u32)addr) < IXP23XX_PCI_MEM_VIRT + IXP23XX_PCI_MEM_SIZE))
+- return;
+-
+- __iounmap(addr);
+-}
+-
+-#define __arch_ioremap ixp23xx_ioremap
+-#define __arch_iounmap ixp23xx_iounmap
+-
+-
+ #endif
+diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 896c56a..0000000
+--- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * arch/arm/mach-ixp23xx/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2005 MontaVista Software, Inc.
+- *
+- * NPU mappings end at 0xf0000000 and we allocate 64MB for board
+- * specific static I/O.
+- */
+-
+-#define VMALLOC_END (0xec000000UL)
+diff --git a/arch/arm/mach-ixp4xx/include/mach/system.h b/arch/arm/mach-ixp4xx/include/mach/system.h
+index 54c0af7..24337d9 100644
+--- a/arch/arm/mach-ixp4xx/include/mach/system.h
++++ b/arch/arm/mach-ixp4xx/include/mach/system.h
+@@ -26,7 +26,7 @@ static inline void arch_reset(char mode, const char *cmd)
+ {
+ if ( 1 && mode == 's') {
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ } else {
+ /* Use on-chip reset capability */
+
+diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 9bcd64d..0000000
+--- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-ixp4xx/include/mach/vmalloc.h
+- */
+-#define VMALLOC_END (0xff000000UL)
+-
+diff --git a/arch/arm/mach-kirkwood/include/mach/io.h b/arch/arm/mach-kirkwood/include/mach/io.h
+index 1aaddc3..49dd0cb 100644
+--- a/arch/arm/mach-kirkwood/include/mach/io.h
++++ b/arch/arm/mach-kirkwood/include/mach/io.h
+@@ -19,31 +19,6 @@ static inline void __iomem *__io(unsigned long addr)
+ + KIRKWOOD_PCIE_IO_VIRT_BASE);
+ }
+
+-static inline void __iomem *
+-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
+-{
+- void __iomem *retval;
+- unsigned long offs = paddr - KIRKWOOD_REGS_PHYS_BASE;
+- if (mtype == MT_DEVICE && size && offs < KIRKWOOD_REGS_SIZE &&
+- size <= KIRKWOOD_REGS_SIZE && offs + size <= KIRKWOOD_REGS_SIZE) {
+- retval = (void __iomem *)KIRKWOOD_REGS_VIRT_BASE + offs;
+- } else {
+- retval = __arm_ioremap(paddr, size, mtype);
+- }
+-
+- return retval;
+-}
+-
+-static inline void
+-__arch_iounmap(void __iomem *addr)
+-{
+- if (addr < (void __iomem *)KIRKWOOD_REGS_VIRT_BASE ||
+- addr >= (void __iomem *)(KIRKWOOD_REGS_VIRT_BASE + KIRKWOOD_REGS_SIZE))
+- __iounmap(addr);
+-}
+-
+-#define __arch_ioremap __arch_ioremap
+-#define __arch_iounmap __arch_iounmap
+ #define __io(a) __io(a)
+ #define __mem_pci(a) (a)
+
+diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h
+deleted file mode 100644
+index bf162ca..0000000
+--- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-kirkwood/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe800000UL
+diff --git a/arch/arm/mach-ks8695/include/mach/system.h b/arch/arm/mach-ks8695/include/mach/system.h
+index fb1dda9..ceb19c9 100644
+--- a/arch/arm/mach-ks8695/include/mach/system.h
++++ b/arch/arm/mach-ks8695/include/mach/system.h
+@@ -32,7 +32,7 @@ static void arch_reset(char mode, const char *cmd)
+ unsigned int reg;
+
+ if (mode == 's')
+- cpu_reset(0);
++ soft_restart(0);
+
+ /* disable timer0 */
+ reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
+diff --git a/arch/arm/mach-ks8695/include/mach/vmalloc.h b/arch/arm/mach-ks8695/include/mach/vmalloc.h
+deleted file mode 100644
+index 744ac66..0000000
+--- a/arch/arm/mach-ks8695/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * arch/arm/mach-ks8695/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2006 Ben Dooks
+- * Copyright (C) 2006 Simtec Electronics <linux@simtec.co.uk>
+- *
+- * KS8695 vmalloc definition
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END (KS8695_IO_VA & PGDIR_MASK)
+-
+-#endif
+diff --git a/arch/arm/mach-lpc32xx/include/mach/system.h b/arch/arm/mach-lpc32xx/include/mach/system.h
+index df3b0de..d47f3b1 100644
+--- a/arch/arm/mach-lpc32xx/include/mach/system.h
++++ b/arch/arm/mach-lpc32xx/include/mach/system.h
+@@ -33,9 +33,6 @@ static inline void arch_reset(char mode, const char *cmd)
+ case 'h':
+ printk(KERN_CRIT "RESET: Rebooting system\n");
+
+- /* Disable interrupts */
+- local_irq_disable();
+-
+ lpc32xx_watchdog_reset();
+ break;
+
+diff --git a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 720fa43..0000000
+--- a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,24 +0,0 @@
+-/*
+- * arch/arm/mach-lpc32xx/include/mach/vmalloc.h
+- *
+- * Author: Kevin Wells <kevin.wells@nxp.com>
+- *
+- * Copyright (C) 2010 NXP Semiconductors
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END 0xF0000000UL
+-
+-#endif
+diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
+index 1a8a25e..cb06379 100644
+--- a/arch/arm/mach-mmp/include/mach/system.h
++++ b/arch/arm/mach-mmp/include/mach/system.h
+@@ -19,8 +19,8 @@ static inline void arch_idle(void)
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+ if (cpu_is_pxa168())
+- cpu_reset(0xffff0000);
++ soft_restart(0xffff0000);
+ else
+- cpu_reset(0);
++ soft_restart(0);
+ }
+ #endif /* __ASM_MACH_SYSTEM_H */
+diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h
+deleted file mode 100644
+index 1d0bac0..0000000
+--- a/arch/arm/mach-mmp/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * linux/arch/arm/mach-mmp/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe000000UL
+diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
+index 6dc1cbd..ed35981 100644
+--- a/arch/arm/mach-msm/board-msm8960.c
++++ b/arch/arm/mach-msm/board-msm8960.c
+@@ -99,6 +99,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
+ .map_io = msm8960_map_io,
+ .init_irq = msm8960_init_irq,
+ .timer = &msm_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8960_sim_init,
+ MACHINE_END
+
+@@ -108,6 +109,7 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
+ .map_io = msm8960_map_io,
+ .init_irq = msm8960_init_irq,
+ .timer = &msm_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8960_rumi3_init,
+ MACHINE_END
+
+diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
+index 44bf716..0a11342 100644
+--- a/arch/arm/mach-msm/board-msm8x60.c
++++ b/arch/arm/mach-msm/board-msm8x60.c
+@@ -108,6 +108,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
+ .reserve = msm8x60_reserve,
+ .map_io = msm8x60_map_io,
+ .init_irq = msm8x60_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8x60_init,
+ .timer = &msm_timer,
+ MACHINE_END
+@@ -117,6 +118,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
+ .reserve = msm8x60_reserve,
+ .map_io = msm8x60_map_io,
+ .init_irq = msm8x60_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8x60_init,
+ .timer = &msm_timer,
+ MACHINE_END
+@@ -126,6 +128,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
+ .reserve = msm8x60_reserve,
+ .map_io = msm8x60_map_io,
+ .init_irq = msm8x60_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8x60_init,
+ .timer = &msm_timer,
+ MACHINE_END
+@@ -135,6 +138,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
+ .reserve = msm8x60_reserve,
+ .map_io = msm8x60_map_io,
+ .init_irq = msm8x60_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = msm8x60_init,
+ .timer = &msm_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+deleted file mode 100644
+index 717076f..0000000
+--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/*
+- * Low-level IRQ helper macros
+- *
+- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <asm/hardware/entry-macro-gic.S>
+-
+- .macro disable_fiq
+- .endm
+-
+- .macro arch_ret_to_user, tmp1, tmp2
+- .endm
+diff --git a/arch/arm/mach-msm/include/mach/entry-macro-vic.S b/arch/arm/mach-msm/include/mach/entry-macro-vic.S
+deleted file mode 100644
+index 70563ed..0000000
+--- a/arch/arm/mach-msm/include/mach/entry-macro-vic.S
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/*
+- * Copyright (C) 2007 Google, Inc.
+- * Author: Brian Swetland <swetland@google.com>
+- *
+- * This software is licensed under the terms of the GNU General Public
+- * License version 2, as published by the Free Software Foundation, and
+- * may be copied, distributed, and modified under those terms.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- */
+-
+-#include <mach/msm_iomap.h>
+-
+- .macro disable_fiq
+- .endm
+-
+- .macro get_irqnr_preamble, base, tmp
+- @ enable imprecise aborts
+- cpsie a
+- mov \base, #MSM_VIC_BASE
+- .endm
+-
+- .macro arch_ret_to_user, tmp1, tmp2
+- .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- @ 0xD0 has irq# or old irq# if the irq has been handled
+- @ 0xD4 has irq# or -1 if none pending *but* if you just
+- @ read 0xD4 you never get the first irq for some reason
+- ldr \irqnr, [\base, #0xD0]
+- ldr \irqnr, [\base, #0xD4]
+- cmp \irqnr, #0xffffffff
+- .endm
+diff --git a/arch/arm/mach-msm/include/mach/entry-macro.S b/arch/arm/mach-msm/include/mach/entry-macro.S
+index b16f082..41f7003 100644
+--- a/arch/arm/mach-msm/include/mach/entry-macro.S
++++ b/arch/arm/mach-msm/include/mach/entry-macro.S
+@@ -16,8 +16,27 @@
+ *
+ */
+
+-#if defined(CONFIG_ARM_GIC)
+-#include <mach/entry-macro-qgic.S>
+-#else
+-#include <mach/entry-macro-vic.S>
++ .macro disable_fiq
++ .endm
++
++ .macro arch_ret_to_user, tmp1, tmp2
++ .endm
++
++#if !defined(CONFIG_ARM_GIC)
++#include <mach/msm_iomap.h>
++
++ .macro get_irqnr_preamble, base, tmp
++ @ enable imprecise aborts
++ cpsie a
++ mov \base, #MSM_VIC_BASE
++ .endm
++
++ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
++ @ 0xD0 has irq# or old irq# if the irq has been handled
++ @ 0xD4 has irq# or -1 if none pending *but* if you just
++ @ read 0xD4 you never get the first irq for some reason
++ ldr \irqnr, [\base, #0xD0]
++ ldr \irqnr, [\base, #0xD4]
++ cmp \irqnr, #0xffffffff
++ .endm
+ #endif
+diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
+deleted file mode 100644
+index d138448..0000000
+--- a/arch/arm/mach-msm/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/* arch/arm/mach-msm/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2007 Google, Inc.
+- *
+- * This software is licensed under the terms of the GNU General Public
+- * License version 2, as published by the Free Software Foundation, and
+- * may be copied, distributed, and modified under those terms.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- */
+-
+-#ifndef __ASM_ARCH_MSM_VMALLOC_H
+-#define __ASM_ARCH_MSM_VMALLOC_H
+-
+-#define VMALLOC_END 0xd0000000UL
+-
+-#endif
+-
+diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
+deleted file mode 100644
+index ba26fe9..0000000
+--- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-mv78xx0/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfe000000UL
+diff --git a/arch/arm/mach-mxs/include/mach/vmalloc.h b/arch/arm/mach-mxs/include/mach/vmalloc.h
+deleted file mode 100644
+index 103b016..0000000
+--- a/arch/arm/mach-mxs/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/*
+- * Copyright (C) 2000 Russell King.
+- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __MACH_MXS_VMALLOC_H__
+-#define __MACH_MXS_VMALLOC_H__
+-
+-/* vmalloc ending address */
+-#define VMALLOC_END 0xf4000000UL
+-
+-#endif /* __MACH_MXS_VMALLOC_H__ */
+diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
+index 20ec3bd..cab8836 100644
+--- a/arch/arm/mach-mxs/system.c
++++ b/arch/arm/mach-mxs/system.c
+@@ -53,7 +53,7 @@ void arch_reset(char mode, const char *cmd)
+ mdelay(50);
+
+ /* We'll take a jump through zero as a poor second */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ static int __init mxs_arch_reset_init(void)
+diff --git a/arch/arm/mach-netx/include/mach/entry-macro.S b/arch/arm/mach-netx/include/mach/entry-macro.S
+index 844f1f9..6e9f1cb 100644
+--- a/arch/arm/mach-netx/include/mach/entry-macro.S
++++ b/arch/arm/mach-netx/include/mach/entry-macro.S
+@@ -18,22 +18,9 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+-#include <mach/hardware.h>
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =io_p2v(0x001ff000)
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \irqstat, [\base, #0]
+- clz \irqnr, \irqstat
+- rsb \irqnr, \irqnr, #31
+- cmp \irqstat, #0
+- .endm
+-
+diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
+deleted file mode 100644
+index 871f1ef..0000000
+--- a/arch/arm/mach-netx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * arch/arm/mach-netx/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2
+- * as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c
+index 90903dd..ef8cf35 100644
+--- a/arch/arm/mach-netx/nxdb500.c
++++ b/arch/arm/mach-netx/nxdb500.c
+@@ -28,6 +28,7 @@
+ #include <mach/hardware.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
++#include <asm/hardware/vic.h>
+ #include <mach/netx-regs.h>
+ #include <mach/eth.h>
+
+@@ -203,6 +204,7 @@ MACHINE_START(NXDB500, "Hilscher nxdb500")
+ .atag_offset = 0x100,
+ .map_io = netx_map_io,
+ .init_irq = netx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &netx_timer,
+ .init_machine = nxdb500_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c
+index c63384a..588558b 100644
+--- a/arch/arm/mach-netx/nxdkn.c
++++ b/arch/arm/mach-netx/nxdkn.c
+@@ -28,6 +28,7 @@
+ #include <mach/hardware.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
++#include <asm/hardware/vic.h>
+ #include <mach/netx-regs.h>
+ #include <mach/eth.h>
+
+@@ -96,6 +97,7 @@ MACHINE_START(NXDKN, "Hilscher nxdkn")
+ .atag_offset = 0x100,
+ .map_io = netx_map_io,
+ .init_irq = netx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &netx_timer,
+ .init_machine = nxdkn_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c
+index 8f548ec..cfcbb50 100644
+--- a/arch/arm/mach-netx/nxeb500hmi.c
++++ b/arch/arm/mach-netx/nxeb500hmi.c
+@@ -28,6 +28,7 @@
+ #include <mach/hardware.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
++#include <asm/hardware/vic.h>
+ #include <mach/netx-regs.h>
+ #include <mach/eth.h>
+
+@@ -180,6 +181,7 @@ MACHINE_START(NXEB500HMI, "Hilscher nxeb500hmi")
+ .atag_offset = 0x100,
+ .map_io = netx_map_io,
+ .init_irq = netx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &netx_timer,
+ .init_machine = nxeb500hmi_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
+index 0cbb74c..f98259c 100644
+--- a/arch/arm/mach-nomadik/board-nhk8815.c
++++ b/arch/arm/mach-nomadik/board-nhk8815.c
+@@ -21,6 +21,7 @@
+ #include <linux/mtd/onenand.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/io.h>
++#include <asm/hardware/vic.h>
+ #include <asm/sizes.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+@@ -280,6 +281,7 @@ MACHINE_START(NOMADIK, "NHK8815")
+ .atag_offset = 0x100,
+ .map_io = cpu8815_map_io,
+ .init_irq = cpu8815_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &nomadik_timer,
+ .init_machine = nhk8815_platform_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-nomadik/include/mach/entry-macro.S b/arch/arm/mach-nomadik/include/mach/entry-macro.S
+index 49f1aa3..98ea1c1 100644
+--- a/arch/arm/mach-nomadik/include/mach/entry-macro.S
++++ b/arch/arm/mach-nomadik/include/mach/entry-macro.S
+@@ -6,38 +6,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <mach/hardware.h>
+-#include <mach/irqs.h>
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =IO_ADDRESS(NOMADIK_IC_BASE)
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- /* This stanza gets the irq mask from one of two status registers */
+- mov \irqnr, #0
+- ldr \irqstat, [\base, #VIC_REG_IRQSR0] @ get masked status
+- cmp \irqstat, #0
+- bne 1001f
+- add \irqnr, \irqnr, #32
+- ldr \irqstat, [\base, #VIC_REG_IRQSR1] @ get masked status
+-
+-1001: tst \irqstat, #15
+- bne 1002f
+- add \irqnr, \irqnr, #4
+- movs \irqstat, \irqstat, lsr #4
+- bne 1001b
+-1002: tst \irqstat, #1
+- bne 1003f
+- add \irqnr, \irqnr, #1
+- movs \irqstat, \irqstat, lsr #1
+- bne 1002b
+-1003: /* EQ will be set if no irqs pending */
+- .endm
+diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h
+deleted file mode 100644
+index f83d574..0000000
+--- a/arch/arm/mach-nomadik/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,2 +0,0 @@
+-
+-#define VMALLOC_END 0xe8000000UL
+diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
+index 73f287d..4f8d66f 100644
+--- a/arch/arm/mach-omap1/Kconfig
++++ b/arch/arm/mach-omap1/Kconfig
+@@ -168,70 +168,6 @@ config MACH_OMAP_GENERIC
+ custom OMAP boards. Say Y here if you have a custom
+ board.
+
+-comment "OMAP CPU Speed"
+- depends on ARCH_OMAP1
+-
+-config OMAP_ARM_216MHZ
+- bool "OMAP ARM 216 MHz CPU (1710 only)"
+- depends on ARCH_OMAP1 && ARCH_OMAP16XX
+- help
+- Enable 216 MHz clock for OMAP1710 CPU. If unsure, say N.
+-
+-config OMAP_ARM_195MHZ
+- bool "OMAP ARM 195 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 195MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_192MHZ
+- bool "OMAP ARM 192 MHz CPU"
+- depends on ARCH_OMAP1 && ARCH_OMAP16XX
+- help
+- Enable 192MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_182MHZ
+- bool "OMAP ARM 182 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 182MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_168MHZ
+- bool "OMAP ARM 168 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 168MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_150MHZ
+- bool "OMAP ARM 150 MHz CPU"
+- depends on ARCH_OMAP1 && ARCH_OMAP15XX
+- help
+- Enable 150MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_120MHZ
+- bool "OMAP ARM 120 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 120MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_96MHZ
+- bool "OMAP ARM 96 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 96MHz clock for OMAP CPU. If unsure, say N.
+-
+-config OMAP_ARM_60MHZ
+- bool "OMAP ARM 60 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
+- default y
+- help
+- Enable 60MHz clock for OMAP CPU. If unsure, say Y.
+-
+-config OMAP_ARM_30MHZ
+- bool "OMAP ARM 30 MHz CPU"
+- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
+- help
+- Enable 30MHz clock for OMAP CPU. If unsure, say N.
+-
+ endmenu
+
+ endif
+diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
+index b0f15d2..af791196 100644
+--- a/arch/arm/mach-omap1/board-ams-delta.c
++++ b/arch/arm/mach-omap1/board-ams-delta.c
+@@ -35,7 +35,7 @@
+ #include <plat/mux.h>
+ #include <plat/usb.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <mach/camera.h>
+
+ #include <mach/ams-delta-fiq.h>
+diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
+index 2317827..b9c4c0f 100644
+--- a/arch/arm/mach-omap1/board-fsample.c
++++ b/arch/arm/mach-omap1/board-fsample.c
+@@ -32,7 +32,7 @@
+ #include <plat/flash.h>
+ #include <plat/fpga.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+
+ /* fsample is pretty close to p2-sample */
+diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
+index dc5b75d..7f41d7a 100644
+--- a/arch/arm/mach-omap1/board-generic.c
++++ b/arch/arm/mach-omap1/board-generic.c
+@@ -25,7 +25,7 @@
+ #include <plat/mux.h>
+ #include <plat/usb.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ /* assume no Mini-AB port */
+
+diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
+index b334b14..7933b97 100644
+--- a/arch/arm/mach-omap1/board-h2.c
++++ b/arch/arm/mach-omap1/board-h2.c
+@@ -43,7 +43,7 @@
+ #include <plat/irda.h>
+ #include <plat/usb.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/flash.h>
+
+ #include "board-h2.h"
+diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
+index 74ebe72..04be2f8 100644
+--- a/arch/arm/mach-omap1/board-h3.c
++++ b/arch/arm/mach-omap1/board-h3.c
+@@ -45,7 +45,7 @@
+ #include <plat/usb.h>
+ #include <plat/keypad.h>
+ #include <plat/dma.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/flash.h>
+
+ #include "board-h3.h"
+diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
+index 3e91baa..46fcfeb 100644
+--- a/arch/arm/mach-omap1/board-htcherald.c
++++ b/arch/arm/mach-omap1/board-htcherald.c
+@@ -41,7 +41,7 @@
+ #include <asm/mach/arch.h>
+
+ #include <plat/omap7xx.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+ #include <plat/keypad.h>
+ #include <plat/usb.h>
+diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
+index 273153d..f99d11d 100644
+--- a/arch/arm/mach-omap1/board-innovator.c
++++ b/arch/arm/mach-omap1/board-innovator.c
+@@ -37,7 +37,7 @@
+ #include <plat/tc.h>
+ #include <plat/usb.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/mmc.h>
+
+ /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
+diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
+index 6798b84..f0dfc51 100644
+--- a/arch/arm/mach-omap1/board-nokia770.c
++++ b/arch/arm/mach-omap1/board-nokia770.c
+@@ -12,6 +12,8 @@
+ #include <linux/init.h>
+ #include <linux/mutex.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_data/cbus.h>
++#include <linux/irq.h>
+ #include <linux/input.h>
+ #include <linux/clk.h>
+ #include <linux/omapfb.h>
+@@ -30,7 +32,7 @@
+ #include <plat/usb.h>
+ #include <plat/board.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/hwa742.h>
+ #include <plat/lcd_mipid.h>
+ #include <plat/mmc.h>
+@@ -82,6 +84,104 @@ static struct platform_device nokia770_kp_device = {
+ .resource = nokia770_kp_resources,
+ };
+
++#if defined(CONFIG_CBUS) || defined(CONFIG_CBUS_MODULE)
++
++static struct cbus_host_platform_data nokia770_cbus_data = {
++ .clk_gpio = OMAP_MPUIO(11),
++ .dat_gpio = OMAP_MPUIO(10),
++ .sel_gpio = OMAP_MPUIO(9),
++};
++
++static struct platform_device nokia770_cbus_device = {
++ .name = "cbus",
++ .id = -1,
++ .dev = {
++ .platform_data = &nokia770_cbus_data,
++ },
++};
++
++static struct resource retu_resource[] = {
++ {
++ .start = -EINVAL, /* set later */
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device retu_device = {
++ .name = "retu",
++ .id = -1,
++ .resource = retu_resource,
++ .num_resources = ARRAY_SIZE(retu_resource),
++ .dev = {
++ .parent = &nokia770_cbus_device.dev,
++ },
++};
++
++static struct resource tahvo_resource[] = {
++ {
++ .start = -EINVAL, /* set later */
++ .flags = IORESOURCE_IRQ,
++ }
++};
++
++static struct platform_device tahvo_device = {
++ .name = "tahvo",
++ .id = -1,
++ .resource = tahvo_resource,
++ .num_resources = ARRAY_SIZE(tahvo_resource),
++ .dev = {
++ .parent = &nokia770_cbus_device.dev,
++ },
++};
++
++static void __init nokia770_cbus_init(void)
++{
++ int ret;
++
++ platform_device_register(&nokia770_cbus_device);
++
++ ret = gpio_request(62, "RETU irq");
++ if (ret < 0) {
++ pr_err("retu: Unable to reserve IRQ GPIO\n");
++ return;
++ }
++
++ ret = gpio_direction_input(62);
++ if (ret < 0) {
++ pr_err("retu: Unable to change gpio direction\n");
++ gpio_free(62);
++ return;
++ }
++
++ irq_set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_RISING);
++ retu_resource[0].start = gpio_to_irq(62);
++ platform_device_register(&retu_device);
++
++ ret = gpio_request(40, "TAHVO irq");
++ if (ret) {
++ pr_err("tahvo: Unable to reserve IRQ GPIO\n");
++ gpio_free(62);
++ return;
++ }
++
++ ret = gpio_direction_input(40);
++ if (ret) {
++ pr_err("tahvo: Unable to change direction\n");
++ gpio_free(62);
++ gpio_free(40);
++ return;
++ }
++
++ tahvo_resource[0].start = gpio_to_irq(40);
++ platform_device_register(&tahvo_device);
++}
++
++#else
++static inline void __init nokia770_cbus_init(void)
++{
++}
++#endif
++
+ static struct platform_device *nokia770_devices[] __initdata = {
+ &nokia770_kp_device,
+ };
+@@ -239,6 +339,7 @@ static void __init omap_nokia770_init(void)
+ /* Unmask SleepX signal */
+ omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
+
++ nokia770_cbus_init();
+ platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
+ spi_register_board_info(nokia770_spi_board_info,
+ ARRAY_SIZE(nokia770_spi_board_info));
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
+index c385927..a409dfc 100644
+--- a/arch/arm/mach-omap1/board-osk.c
++++ b/arch/arm/mach-omap1/board-osk.c
+@@ -51,7 +51,7 @@
+ #include <plat/usb.h>
+ #include <plat/mux.h>
+ #include <plat/tc.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ /* At OMAP5912 OSK the Ethernet is directly connected to CS1 */
+ #define OMAP_OSK_ETHR_START 0x04800300
+diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
+index f9c44cb..105292d 100644
+--- a/arch/arm/mach-omap1/board-palmte.c
++++ b/arch/arm/mach-omap1/board-palmte.c
+@@ -41,7 +41,7 @@
+ #include <plat/board.h>
+ #include <plat/irda.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ #define PALMTE_USBDETECT_GPIO 0
+ #define PALMTE_USB_OR_DC_GPIO 1
+diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
+index 11a9853..387a900 100644
+--- a/arch/arm/mach-omap1/board-palmtt.c
++++ b/arch/arm/mach-omap1/board-palmtt.c
+@@ -39,7 +39,7 @@
+ #include <plat/board.h>
+ #include <plat/irda.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
+index 4206157..df6d15e 100644
+--- a/arch/arm/mach-omap1/board-palmz71.c
++++ b/arch/arm/mach-omap1/board-palmz71.c
+@@ -41,7 +41,7 @@
+ #include <plat/board.h>
+ #include <plat/irda.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
+index 203ae07..57ecd7e 100644
+--- a/arch/arm/mach-omap1/board-perseus2.c
++++ b/arch/arm/mach-omap1/board-perseus2.c
+@@ -32,7 +32,7 @@
+ #include <plat/fpga.h>
+ #include <plat/flash.h>
+ #include <plat/keypad.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+
+ static const unsigned int p2_keymap[] = {
+diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
+index 092a4c0..774ae39 100644
+--- a/arch/arm/mach-omap1/board-sx1.c
++++ b/arch/arm/mach-omap1/board-sx1.c
+@@ -40,7 +40,7 @@
+ #include <plat/usb.h>
+ #include <plat/tc.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/keypad.h>
+ #include <plat/board-sx1.h>
+
+diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
+index 61ed6cd..7721c14 100644
+--- a/arch/arm/mach-omap1/board-voiceblue.c
++++ b/arch/arm/mach-omap1/board-voiceblue.c
+@@ -34,7 +34,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board-voiceblue.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/flash.h>
+ #include <plat/mux.h>
+ #include <plat/tc.h>
+diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
+index 84ef704..0c50df0 100644
+--- a/arch/arm/mach-omap1/clock.c
++++ b/arch/arm/mach-omap1/clock.c
+@@ -197,11 +197,10 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
+ ref_rate = ck_ref_p->rate;
+
+ for (ptr = omap1_rate_table; ptr->rate; ptr++) {
+- if (ptr->xtal != ref_rate)
++ if (!(ptr->flags & cpu_mask))
+ continue;
+
+- /* DPLL1 cannot be reprogrammed without risking system crash */
+- if (likely(dpll1_rate != 0) && ptr->pll_rate != dpll1_rate)
++ if (ptr->xtal != ref_rate)
+ continue;
+
+ /* Can check only after xtal frequency check */
+@@ -215,12 +214,8 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
+ /*
+ * In most cases we should not need to reprogram DPLL.
+ * Reprogramming the DPLL is tricky, it must be done from SRAM.
+- * (on 730, bit 13 must always be 1)
+ */
+- if (cpu_is_omap7xx())
+- omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val | 0x2000);
+- else
+- omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
++ omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
+
+ /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
+ ck_dpll1_p->rate = ptr->pll_rate;
+@@ -290,6 +285,9 @@ long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
+ highest_rate = -EINVAL;
+
+ for (ptr = omap1_rate_table; ptr->rate; ptr++) {
++ if (!(ptr->flags & cpu_mask))
++ continue;
++
+ if (ptr->xtal != ref_rate)
+ continue;
+
+diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
+index 16b1423..3d04f4f 100644
+--- a/arch/arm/mach-omap1/clock.h
++++ b/arch/arm/mach-omap1/clock.h
+@@ -111,4 +111,7 @@ extern const struct clkops clkops_dummy;
+ extern const struct clkops clkops_uart_16xx;
+ extern const struct clkops clkops_generic;
+
++/* used for passing SoC type to omap1_{select,round_to}_table_rate() */
++extern u32 cpu_mask;
++
+ #endif
+diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
+index 9ff90a7..94699a8 100644
+--- a/arch/arm/mach-omap1/clock_data.c
++++ b/arch/arm/mach-omap1/clock_data.c
+@@ -25,6 +25,7 @@
+ #include <plat/clock.h>
+ #include <plat/cpu.h>
+ #include <plat/clkdev_omap.h>
++#include <plat/sram.h> /* for omap_sram_reprogram_clock() */
+ #include <plat/usb.h> /* for OTG_BASE */
+
+ #include "clock.h"
+@@ -778,12 +779,14 @@ static void __init omap1_show_rates(void)
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+ }
+
++u32 cpu_mask;
++
+ int __init omap1_clk_init(void)
+ {
+ struct omap_clk *c;
+ const struct omap_clock_config *info;
+ int crystal_type = 0; /* Default 12 MHz */
+- u32 reg, cpu_mask;
++ u32 reg;
+
+ #ifdef CONFIG_DEBUG_LL
+ /*
+@@ -808,6 +811,8 @@ int __init omap1_clk_init(void)
+ clk_preinit(c->lk.clk);
+
+ cpu_mask = 0;
++ if (cpu_is_omap1710())
++ cpu_mask |= CK_1710;
+ if (cpu_is_omap16xx())
+ cpu_mask |= CK_16XX;
+ if (cpu_is_omap1510())
+@@ -931,17 +936,13 @@ void __init omap1_clk_late_init(void)
+ {
+ unsigned long rate = ck_dpll1.rate;
+
+- if (rate >= OMAP1_DPLL1_SANE_VALUE)
+- return;
+-
+- /* System booting at unusable rate, force reprogramming of DPLL1 */
+- ck_dpll1_p->rate = 0;
+-
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ pr_err("System frequencies not set, using default. Check your config.\n");
+- omap_writew(0x2290, DPLL_CTL);
+- omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
++ /*
++ * Reprogramming the DPLL is tricky, it must be done from SRAM.
++ */
++ omap_sram_reprogram_clock(0x2290, 0x0005);
+ ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+ }
+ propagate_rate(&ck_dpll1);
+diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
+new file mode 100644
+index 0000000..52c4eda
+--- /dev/null
++++ b/arch/arm/mach-omap1/common.h
+@@ -0,0 +1,61 @@
++/*
++ *
++ * Header for code common to all OMAP1 machines.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP1_COMMON_H
++#define __ARCH_ARM_MACH_OMAP1_COMMON_H
++
++#include <plat/common.h>
++
++#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
++void omap7xx_map_io(void);
++#else
++static inline void omap7xx_map_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_ARCH_OMAP15XX
++void omap15xx_map_io(void);
++#else
++static inline void omap15xx_map_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_ARCH_OMAP16XX
++void omap16xx_map_io(void);
++#else
++static inline void omap16xx_map_io(void)
++{
++}
++#endif
++
++void omap1_init_early(void);
++void omap1_init_irq(void);
++
++extern struct sys_timer omap1_timer;
++extern bool omap_32k_timer_init(void);
++
++#endif /* __ARCH_ARM_MACH_OMAP1_COMMON_H */
+diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
+index 475cb2f..1d76a63 100644
+--- a/arch/arm/mach-omap1/devices.c
++++ b/arch/arm/mach-omap1/devices.c
+@@ -22,7 +22,7 @@
+ #include <mach/hardware.h>
+ #include <asm/mach/map.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/tc.h>
+ #include <plat/board.h>
+ #include <plat/mux.h>
+diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
+deleted file mode 100644
+index 22ec4a4..0000000
+--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-omap1/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd8000000UL
+diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
+index 7969cfd..8e55b6f 100644
+--- a/arch/arm/mach-omap1/io.c
++++ b/arch/arm/mach-omap1/io.c
+@@ -121,7 +121,6 @@ void __init omap16xx_map_io(void)
+ void omap1_init_early(void)
+ {
+ omap_check_revision();
+- omap_ioremap_init();
+
+ /* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
+ * on a Posted Write in the TIPB Bridge".
+diff --git a/arch/arm/mach-omap1/opp.h b/arch/arm/mach-omap1/opp.h
+index 07074d7..79a6838 100644
+--- a/arch/arm/mach-omap1/opp.h
++++ b/arch/arm/mach-omap1/opp.h
+@@ -21,6 +21,7 @@ struct mpu_rate {
+ unsigned long pll_rate;
+ __u16 ckctl_val;
+ __u16 dpllctl_val;
++ u32 flags;
+ };
+
+ extern struct mpu_rate omap1_rate_table[];
+diff --git a/arch/arm/mach-omap1/opp_data.c b/arch/arm/mach-omap1/opp_data.c
+index 75a5465..9cd4ddb 100644
+--- a/arch/arm/mach-omap1/opp_data.c
++++ b/arch/arm/mach-omap1/opp_data.c
+@@ -10,6 +10,7 @@
+ * published by the Free Software Foundation.
+ */
+
++#include <plat/clkdev_omap.h>
+ #include "opp.h"
+
+ /*-------------------------------------------------------------------------
+@@ -20,40 +21,34 @@ struct mpu_rate omap1_rate_table[] = {
+ * NOTE: Comment order here is different from bits in CKCTL value:
+ * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
+ */
+-#if defined(CONFIG_OMAP_ARM_216MHZ)
+- { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_195MHZ)
+- { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_192MHZ)
+- { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
+- { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
+- { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
+- { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
+- { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_182MHZ)
+- { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_168MHZ)
+- { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_150MHZ)
+- { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_120MHZ)
+- { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_96MHZ)
+- { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_60MHZ)
+- { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
+-#endif
+-#if defined(CONFIG_OMAP_ARM_30MHZ)
+- { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
+-#endif
++ { 216000000, 12000000, 216000000, 0x050d, 0x2910, /* 1/1/2/2/2/8 */
++ CK_1710 },
++ { 195000000, 13000000, 195000000, 0x050e, 0x2790, /* 1/1/2/2/4/8 */
++ CK_7XX },
++ { 192000000, 19200000, 192000000, 0x050f, 0x2510, /* 1/1/2/2/8/8 */
++ CK_16XX },
++ { 192000000, 12000000, 192000000, 0x050f, 0x2810, /* 1/1/2/2/8/8 */
++ CK_16XX },
++ { 96000000, 12000000, 192000000, 0x055f, 0x2810, /* 2/2/2/2/8/8 */
++ CK_16XX },
++ { 48000000, 12000000, 192000000, 0x0baf, 0x2810, /* 4/4/4/8/8/8 */
++ CK_16XX },
++ { 24000000, 12000000, 192000000, 0x0fff, 0x2810, /* 8/8/8/8/8/8 */
++ CK_16XX },
++ { 182000000, 13000000, 182000000, 0x050e, 0x2710, /* 1/1/2/2/4/8 */
++ CK_7XX },
++ { 168000000, 12000000, 168000000, 0x010f, 0x2710, /* 1/1/1/2/8/8 */
++ CK_16XX|CK_7XX },
++ { 150000000, 12000000, 150000000, 0x010a, 0x2cb0, /* 1/1/1/2/4/4 */
++ CK_1510 },
++ { 120000000, 12000000, 120000000, 0x010a, 0x2510, /* 1/1/1/2/4/4 */
++ CK_16XX|CK_1510|CK_310|CK_7XX },
++ { 96000000, 12000000, 96000000, 0x0005, 0x2410, /* 1/1/1/1/2/2 */
++ CK_16XX|CK_1510|CK_310|CK_7XX },
++ { 60000000, 12000000, 60000000, 0x0005, 0x2290, /* 1/1/1/1/2/2 */
++ CK_16XX|CK_1510|CK_310|CK_7XX },
++ { 30000000, 12000000, 60000000, 0x0555, 0x2290, /* 2/2/2/2/2/2 */
++ CK_16XX|CK_1510|CK_310|CK_7XX },
+ { 0, 0, 0, 0, 0 },
+ };
+
+diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
+index 89ea20c..b551a62 100644
+--- a/arch/arm/mach-omap1/pm.c
++++ b/arch/arm/mach-omap1/pm.c
+@@ -584,6 +584,9 @@ static void omap_pm_init_proc(void)
+ #endif /* DEBUG && CONFIG_PROC_FS */
+
+ static void (*saved_idle)(void) = NULL;
++static void omap1_dummy_idle(void)
++{
++}
+
+ /*
+ * omap_pm_prepare - Do preliminary suspend work.
+@@ -593,7 +596,7 @@ static int omap_pm_prepare(void)
+ {
+ /* We cannot sleep in idle until we have resumed */
+ saved_idle = pm_idle;
+- pm_idle = NULL;
++ pm_idle = omap1_dummy_idle;
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
+index a183777..485a21d 100644
+--- a/arch/arm/mach-omap1/time.c
++++ b/arch/arm/mach-omap1/time.c
+@@ -54,7 +54,7 @@
+ #include <asm/mach/irq.h>
+ #include <asm/mach/time.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #ifdef CONFIG_OMAP_MPU_TIMER
+
+diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
+index 96604a5..9a54ef4 100644
+--- a/arch/arm/mach-omap1/timer32k.c
++++ b/arch/arm/mach-omap1/timer32k.c
+@@ -52,7 +52,7 @@
+ #include <asm/irq.h>
+ #include <asm/mach/irq.h>
+ #include <asm/mach/time.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/dmtimer.h>
+
+ /*
+diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
+index e1293aa..e44e942 100644
+--- a/arch/arm/mach-omap2/Kconfig
++++ b/arch/arm/mach-omap2/Kconfig
+@@ -25,6 +25,7 @@ config ARCH_OMAP2
+ depends on ARCH_OMAP2PLUS
+ default y
+ select CPU_V6
++ select MULTI_IRQ_HANDLER
+
+ config ARCH_OMAP3
+ bool "TI OMAP3"
+@@ -36,6 +37,7 @@ config ARCH_OMAP3
+ select ARCH_HAS_OPP
+ select PM_OPP if PM
+ select ARM_CPU_SUSPEND if PM
++ select MULTI_IRQ_HANDLER
+
+ config ARCH_OMAP4
+ bool "TI OMAP4"
+@@ -74,8 +76,13 @@ config SOC_OMAP3430
+ default y
+ select ARCH_OMAP_OTG
+
+-config SOC_OMAPTI816X
+- bool "TI816X support"
++config SOC_OMAPTI81XX
++ bool "TI81XX support"
++ depends on ARCH_OMAP3
++ default y
++
++config SOC_OMAPAM33XX
++ bool "AM33XX support"
+ depends on ARCH_OMAP3
+ default y
+
+@@ -109,7 +116,6 @@ comment "OMAP Board Type"
+ config MACH_OMAP_GENERIC
+ bool "Generic OMAP2+ board"
+ depends on ARCH_OMAP2PLUS
+- select USE_OF
+ default y
+ help
+ Support for generic TI OMAP2+ boards using Flattened Device Tree.
+@@ -177,6 +183,12 @@ config MACH_OMAP3_TORPEDO
+ for full description please see the products webpage at
+ http://www.logicpd.com/products/development-kits/zoom-omap35x-torpedo-development-kit
+
++config MACH_ENCORE
++ bool "Barnes & Noble Encore (Nook Color)"
++ depends on ARCH_OMAP3
++ default y
++ select OMAP_PACKAGE_CBP
++
+ config MACH_OVERO
+ bool "Gumstix Overo board"
+ depends on ARCH_OMAP3
+@@ -312,7 +324,22 @@ config MACH_OMAP_3630SDP
+
+ config MACH_TI8168EVM
+ bool "TI8168 Evaluation Module"
+- depends on SOC_OMAPTI816X
++ depends on SOC_OMAPTI81XX
++ default y
++
++config MACH_TI8148EVM
++ bool "TI8148 Evaluation Module"
++ depends on SOC_OMAPTI81XX
++ default y
++
++config MACH_AM335XEVM
++ bool "AM335X Evaluation Module"
++ depends on SOC_OMAPAM33XX
++ default y
++
++config MACH_AM335XIAEVM
++ bool "AM335X IA Evaluation Module"
++ depends on SOC_OMAPAM33XX
+ default y
+
+ config MACH_OMAP_4430SDP
+@@ -331,6 +358,12 @@ config MACH_OMAP4_PANDA
+ select OMAP_PACKAGE_CBS
+ select REGULATOR_FIXED_VOLTAGE
+
++config MACH_PCM049
++ bool "OMAP4 based phyCORE OMAP4"
++ depends on ARCH_OMAP4
++ default y
++ select OMAP_PACKAGE_CBS
++
+ config OMAP3_EMU
+ bool "OMAP3 debugging peripherals"
+ depends on ARCH_OMAP3
+@@ -351,6 +384,35 @@ config OMAP3_SDRC_AC_TIMING
+ wish to say no. Selecting yes without understanding what is
+ going on could result in system crashes;
+
++config OMAP4_ERRATA_I688
++ bool "OMAP4 errata: Async Bridge Corruption"
++ depends on ARCH_OMAP4
++ select ARCH_HAS_BARRIERS
++ help
++ If a data is stalled inside asynchronous bridge because of back
++ pressure, it may be accepted multiple times, creating pointer
++ misalignment that will corrupt next transfers on that data path
++ until next reset of the system (No recovery procedure once the
++ issue is hit, the path remains consistently broken). Async bridge
++ can be found on path between MPU to EMIF and MPU to L3 interconnect.
++ This situation can happen only when the idle is initiated by a
++ Master Request Disconnection (which is trigged by software when
++ executing WFI on CPU).
++ The work-around for this errata needs all the initiators connected
++ through async bridge must ensure that data path is properly drained
++ before issuing WFI. This condition will be met if one Strongly ordered
++ access is performed to the target right before executing the WFI.
++ In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
++ IO barrier ensure that there is no synchronisation loss on initiators
++ operating on both interconnect port simultaneously.
++
++config OMAP3_EDMA
++ bool "OMAP3 EDMA support"
++ default n
++ depends on ARCH_OMAP3
++ help
++ Select this option if EDMA is used
++
+ endmenu
+
+ endif
+diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
+index b009f17..f275e74 100644
+--- a/arch/arm/mach-omap2/Makefile
++++ b/arch/arm/mach-omap2/Makefile
+@@ -11,10 +11,11 @@ hwmod-common = omap_hwmod.o \
+ omap_hwmod_common_data.o
+ clock-common = clock.o clock_common_data.o \
+ clkt_dpll.o clkt_clksel.o
++secure-common = omap-smc.o omap-secure.o
+
+-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
+-obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
+-obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
++obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
++obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
++obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
+
+ obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
+
+@@ -24,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
+ obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
+ obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
+ obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
+-obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
++obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \
++ sleep44xx.o
+
+ plus_sec := $(call as-instr,.arch_extension sec,+sec)
+ AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
+-AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec)
++AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec)
++AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec)
+
+ # Functions loaded to SRAM
+ obj-$(CONFIG_SOC_OMAP2420) += sram242x.o
+@@ -44,6 +47,7 @@ obj-$(CONFIG_SOC_OMAP2420) += mux2420.o
+ obj-$(CONFIG_SOC_OMAP2430) += mux2430.o
+ obj-$(CONFIG_ARCH_OMAP3) += mux34xx.o
+ obj-$(CONFIG_ARCH_OMAP4) += mux44xx.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += mux33xx.o
+
+ # SMS/SDRC
+ obj-$(CONFIG_ARCH_OMAP2) += sdrc2xxx.o
+@@ -62,13 +66,17 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
+ obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
+ obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
+ cpuidle34xx.o
+-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
++obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \
++ cpuidle44xx.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += cpuidle33xx.o pm33xx.o \
++ sleep33xx.o
+ obj-$(CONFIG_PM_DEBUG) += pm-debug.o
+ obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
+ obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
+
+ AFLAGS_sleep24xx.o :=-Wa,-march=armv6
+ AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec)
++AFLAGS_sleep33xx.o :=-Wa,-march=armv7-a$(plus_sec)
+
+ ifeq ($(CONFIG_PM_VERBOSE),y)
+ CFLAGS_pm_bus.o += -DDEBUG
+@@ -77,16 +85,19 @@ endif
+ endif
+
+ # PRCM
++obj-y += prm_common.o
+ obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+ obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
+ vc3xxx_data.o vp3xxx_data.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += cminst44xx.o
++
+ # XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
+ # will be removed once the OMAP4 part of the codebase is converted to
+ # use OMAP4-specific PRCM functions.
+ obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
+ cm44xx.o prcm_mpu44xx.o \
+ prminst44xx.o vc44xx_data.o \
+- vp44xx_data.o
++ vp44xx_data.o prm44xx.o
+
+ # OMAP voltage domains
+ voltagedomain-common := voltage.o vc.o vp.o
+@@ -94,6 +105,8 @@ obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common) \
+ voltagedomains2xxx_data.o
+ obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common) \
+ voltagedomains3xxx_data.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += $(voltagedomain-common) \
++ voltagedomains33xx_data.o
+ obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common) \
+ voltagedomains44xx_data.o
+
+@@ -107,6 +120,9 @@ obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common) \
+ powerdomain2xxx_3xxx.o \
+ powerdomains3xxx_data.o \
+ powerdomains2xxx_3xxx_data.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += prminst44xx.o \
++ powerdomain44xx.o \
++ powerdomains33xx_data.o
+ obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common) \
+ powerdomain44xx.o \
+ powerdomains44xx_data.o
+@@ -121,6 +137,8 @@ obj-$(CONFIG_ARCH_OMAP3) += clockdomain.o \
+ clockdomain2xxx_3xxx.o \
+ clockdomains2xxx_3xxx_data.o \
+ clockdomains3xxx_data.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += clockdomain44xx.o \
++ clockdomains33xx_data.o
+ obj-$(CONFIG_ARCH_OMAP4) += clockdomain.o \
+ clockdomain44xx.o \
+ clockdomains44xx_data.o
+@@ -139,6 +157,7 @@ obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o \
+ clock3517.o clock36xx.o \
+ dpll3xxx.o clock3xxx_data.o \
+ clkt_iclk.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += clock33xx_data.o
+ obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o \
+ dpll3xxx.o dpll44xx.o
+
+@@ -160,6 +179,7 @@ obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o \
+ obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o \
+ omap_hwmod_2xxx_3xxx_interconnect_data.o \
+ omap_hwmod_3xxx_data.o
++obj-$(CONFIG_SOC_OMAPAM33XX) += omap_hwmod_33xx_data.o
+ obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
+
+ # EMU peripherals
+@@ -232,6 +252,9 @@ obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
+
+ obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o
+ obj-$(CONFIG_MACH_TI8168EVM) += board-ti8168evm.o
++obj-$(CONFIG_MACH_TI8148EVM) += board-ti8168evm.o
++obj-$(CONFIG_MACH_AM335XEVM) += board-am335xevm.o
++obj-$(CONFIG_MACH_AM335XIAEVM) += board-am335xevm.o
+
+ # Platform specific device init code
+
+diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
+index d704f0a..d88143f 100644
+--- a/arch/arm/mach-omap2/board-2430sdp.c
++++ b/arch/arm/mach-omap2/board-2430sdp.c
+@@ -34,7 +34,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <plat/usb.h>
+ #include <plat/gpmc-smc91x.h>
+@@ -301,6 +301,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
+ .map_io = omap243x_map_io,
+ .init_early = omap2430_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = omap_2430sdp_init,
+ .timer = &omap2_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
+index 77142c1..109b434 100644
+--- a/arch/arm/mach-omap2/board-3430sdp.c
++++ b/arch/arm/mach-omap2/board-3430sdp.c
+@@ -33,7 +33,7 @@
+ #include <plat/mcspi.h>
+ #include <plat/board.h>
+ #include <plat/usb.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/dma.h>
+ #include <plat/gpmc.h>
+ #include <video/omapdss.h>
+@@ -475,106 +475,8 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
+ static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+ };
+-
+-static struct omap_device_pad serial1_pads[] __initdata = {
+- /*
+- * Note that off output enable is an active low
+- * signal. So setting this means pin is a
+- * input enabled in off mode
+- */
+- OMAP_MUX_STATIC("uart1_cts.uart1_cts",
+- OMAP_PIN_INPUT |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart1_rts.uart1_rts",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart1_rx.uart1_rx",
+- OMAP_PIN_INPUT |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart1_tx.uart1_tx",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial2_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
+- OMAP_PIN_INPUT_PULLUP |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
+- OMAP_PIN_INPUT |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial3_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
+- OMAP_PIN_INPUT_PULLDOWN |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
+- OMAP_PIN_INPUT |
+- OMAP_PIN_OFF_INPUT_PULLDOWN |
+- OMAP_OFFOUT_EN |
+- OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
+- OMAP_PIN_OUTPUT |
+- OMAP_OFF_EN |
+- OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_board_data serial1_data __initdata = {
+- .id = 0,
+- .pads = serial1_pads,
+- .pads_cnt = ARRAY_SIZE(serial1_pads),
+-};
+-
+-static struct omap_board_data serial2_data __initdata = {
+- .id = 1,
+- .pads = serial2_pads,
+- .pads_cnt = ARRAY_SIZE(serial2_pads),
+-};
+-
+-static struct omap_board_data serial3_data __initdata = {
+- .id = 2,
+- .pads = serial3_pads,
+- .pads_cnt = ARRAY_SIZE(serial3_pads),
+-};
+-
+-static inline void board_serial_init(void)
+-{
+- omap_serial_init_port(&serial1_data);
+- omap_serial_init_port(&serial2_data);
+- omap_serial_init_port(&serial3_data);
+-}
+ #else
+ #define board_mux NULL
+-
+-static inline void board_serial_init(void)
+-{
+- omap_serial_init();
+-}
+ #endif
+
+ /*
+@@ -711,7 +613,7 @@ static void __init omap_3430sdp_init(void)
+ else
+ gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
+ omap_ads7846_init(1, gpio_pendown, 310, NULL);
+- board_serial_init();
++ omap_serial_init();
+ omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL);
+ usb_musb_init(NULL);
+ board_smc91x_init();
+@@ -728,6 +630,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_3430sdp_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
+index f552305..7969dd9 100644
+--- a/arch/arm/mach-omap2/board-3630sdp.c
++++ b/arch/arm/mach-omap2/board-3630sdp.c
+@@ -16,7 +16,7 @@
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+ #include <plat/gpmc-smc91x.h>
+ #include <plat/usb.h>
+@@ -215,6 +215,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
+ .map_io = omap3_map_io,
+ .init_early = omap3630_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_sdp_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
+index 02cd29a..44610d8 100644
+--- a/arch/arm/mach-omap2/board-4430sdp.c
++++ b/arch/arm/mach-omap2/board-4430sdp.c
+@@ -27,13 +27,13 @@
+ #include <linux/leds_pwm.h>
+
+ #include <mach/hardware.h>
+-#include <mach/omap4-common.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+ #include <plat/mmc.h>
+ #include <plat/omap4-keypad.h>
+@@ -373,11 +373,17 @@ static struct platform_device sdp4430_vbat = {
+ },
+ };
+
++static struct platform_device sdp4430_dmic_codec = {
++ .name = "dmic-codec",
++ .id = -1,
++};
++
+ static struct platform_device *sdp4430_devices[] __initdata = {
+ &sdp4430_gpio_keys_device,
+ &sdp4430_leds_gpio,
+ &sdp4430_leds_pwm,
+ &sdp4430_vbat,
++ &sdp4430_dmic_codec,
+ };
+
+ static struct omap_musb_board_data musb_board_data = {
+@@ -405,6 +411,7 @@ static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 5,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
++ .pm_caps = MMC_PM_KEEP_POWER,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = -EINVAL,
+ .ocr_mask = MMC_VDD_165_195,
+@@ -843,74 +850,8 @@ static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+ };
+
+-static struct omap_device_pad serial2_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial3_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
+- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial4_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
+- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_board_data serial2_data __initdata = {
+- .id = 1,
+- .pads = serial2_pads,
+- .pads_cnt = ARRAY_SIZE(serial2_pads),
+-};
+-
+-static struct omap_board_data serial3_data __initdata = {
+- .id = 2,
+- .pads = serial3_pads,
+- .pads_cnt = ARRAY_SIZE(serial3_pads),
+-};
+-
+-static struct omap_board_data serial4_data __initdata = {
+- .id = 3,
+- .pads = serial4_pads,
+- .pads_cnt = ARRAY_SIZE(serial4_pads),
+-};
+-
+-static inline void board_serial_init(void)
+-{
+- struct omap_board_data bdata;
+- bdata.flags = 0;
+- bdata.pads = NULL;
+- bdata.pads_cnt = 0;
+- bdata.id = 0;
+- /* pass dummy data for UART1 */
+- omap_serial_init_port(&bdata);
+-
+- omap_serial_init_port(&serial2_data);
+- omap_serial_init_port(&serial3_data);
+- omap_serial_init_port(&serial4_data);
+-}
+ #else
+ #define board_mux NULL
+-
+-static inline void board_serial_init(void)
+-{
+- omap_serial_init();
+-}
+ #endif
+
+ static void omap4_sdp4430_wifi_mux_init(void)
+@@ -960,7 +901,7 @@ static void __init omap_4430sdp_init(void)
+ omap4_i2c_init();
+ omap_sfh7741prox_init();
+ platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
+- board_serial_init();
++ omap_serial_init();
+ omap_sdrc_init(NULL, NULL);
+ omap4_sdp4430_wifi_init();
+ omap4_twl6030_hsmmc_init(mmc);
+@@ -990,6 +931,7 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
+ .map_io = omap4_map_io,
+ .init_early = omap4430_init_early,
+ .init_irq = gic_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = omap_4430sdp_init,
+ .timer = &omap4_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-am335xevm.c b/arch/arm/mach-omap2/board-am335xevm.c
+new file mode 100644
+index 0000000..08a0425
+--- /dev/null
++++ b/arch/arm/mach-omap2/board-am335xevm.c
+@@ -0,0 +1,3158 @@
++/*
++ * Code for AM335X EVM.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++#include <linux/i2c/at24.h>
++#include <linux/phy.h>
++#include <linux/gpio.h>
++#include <linux/leds.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/flash.h>
++#include <linux/gpio_keys.h>
++#include <linux/input.h>
++#include <linux/input/matrix_keypad.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/nand.h>
++#include <linux/mtd/partitions.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/wl12xx.h>
++#include <linux/ethtool.h>
++#include <linux/mfd/tps65910.h>
++#include <linux/mfd/tps65217.h>
++#include <linux/pwm_backlight.h>
++#include <linux/reboot.h>
++#include <linux/pwm/pwm.h>
++#include <linux/w1-gpio.h>
++#include <linux/can/platform/mcp251x.h>
++
++/* LCD controller is similar to DA850 */
++#include <video/da8xx-fb.h>
++
++#include <mach/hardware.h>
++#include <mach/board-am335xevm.h>
++
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++#include <asm/mach/map.h>
++#include <asm/hardware/asp.h>
++
++#include <plat/irqs.h>
++#include <plat/board.h>
++#include <plat/common.h>
++#include <plat/lcdc.h>
++#include <plat/usb.h>
++#include <plat/mmc.h>
++#include <plat/emif.h>
++#include <plat/nand.h>
++
++#include "board-flash.h"
++#include "cpuidle33xx.h"
++#include "mux.h"
++#include "devices.h"
++#include "hsmmc.h"
++
++/* Convert GPIO signal to GPIO pin number */
++#define GPIO_TO_PIN(bank, gpio) (32 * (bank) + (gpio))
++
++/* TLK PHY IDs */
++#define TLK110_PHY_ID 0x2000A201
++#define TLK110_PHY_MASK 0xfffffff0
++
++/* BBB PHY IDs */
++#define BBB_PHY_ID 0x7c0f1
++#define BBB_PHY_MASK 0xfffffffe
++
++/* TLK110 PHY register offsets */
++#define TLK110_COARSEGAIN_REG 0x00A3
++#define TLK110_LPFHPF_REG 0x00AC
++#define TLK110_SPAREANALOG_REG 0x00B9
++#define TLK110_VRCR_REG 0x00D0
++#define TLK110_SETFFE_REG 0x0107
++#define TLK110_FTSP_REG 0x0154
++#define TLK110_ALFATPIDL_REG 0x002A
++#define TLK110_PSCOEF21_REG 0x0096
++#define TLK110_PSCOEF3_REG 0x0097
++#define TLK110_ALFAFACTOR1_REG 0x002C
++#define TLK110_ALFAFACTOR2_REG 0x0023
++#define TLK110_CFGPS_REG 0x0095
++#define TLK110_FTSPTXGAIN_REG 0x0150
++#define TLK110_SWSCR3_REG 0x000B
++#define TLK110_SCFALLBACK_REG 0x0040
++#define TLK110_PHYRCR_REG 0x001F
++
++/* TLK110 register writes values */
++#define TLK110_COARSEGAIN_VAL 0x0000
++#define TLK110_LPFHPF_VAL 0x8000
++#define TLK110_SPANALOG_VAL 0x0000
++#define TLK110_VRCR_VAL 0x0008
++#define TLK110_SETFFE_VAL 0x0605
++#define TLK110_FTSP_VAL 0x0255
++#define TLK110_ALFATPIDL_VAL 0x7998
++#define TLK110_PSCOEF21_VAL 0x3A20
++#define TLK110_PSCOEF3_VAL 0x003F
++#define TLK110_ALFACTOR1_VAL 0xFF80
++#define TLK110_ALFACTOR2_VAL 0x021C
++#define TLK110_CFGPS_VAL 0x0000
++#define TLK110_FTSPTXGAIN_VAL 0x6A88
++#define TLK110_SWSCR3_VAL 0x0000
++#define TLK110_SCFALLBACK_VAL 0xC11D
++#define TLK110_PHYRCR_VAL 0x4000
++
++#if defined(CONFIG_TLK110_WORKAROUND) || \
++ defined(CONFIG_TLK110_WORKAROUND_MODULE)
++#define am335x_tlk110_phy_init()\
++ do { \
++ phy_register_fixup_for_uid(TLK110_PHY_ID,\
++ TLK110_PHY_MASK,\
++ am335x_tlk110_phy_fixup);\
++ } while (0);
++#else
++#define am335x_tlk110_phy_init() do { } while (0);
++#endif
++
++static const struct display_panel disp_panel = {
++ WVGA,
++ 32,
++ 32,
++ COLOR_ACTIVE,
++};
++
++/* LCD backlight platform Data */
++#define AM335X_BACKLIGHT_MAX_BRIGHTNESS 100
++#define AM335X_BACKLIGHT_DEFAULT_BRIGHTNESS 50
++#define AM335X_PWM_PERIOD_NANO_SECONDS (1000000 * 5)
++
++#define PWM_DEVICE_ID "ecap.0"
++
++static struct platform_pwm_backlight_data am335x_backlight_data = {
++ .pwm_id = PWM_DEVICE_ID,
++ .ch = -1,
++ .lth_brightness = 21,
++ .max_brightness = AM335X_BACKLIGHT_MAX_BRIGHTNESS,
++ .dft_brightness = AM335X_BACKLIGHT_DEFAULT_BRIGHTNESS,
++ .pwm_period_ns = AM335X_PWM_PERIOD_NANO_SECONDS,
++};
++
++static struct lcd_ctrl_config lcd_cfg = {
++ &disp_panel,
++ .ac_bias = 255,
++ .ac_bias_intrpt = 0,
++ .dma_burst_sz = 16,
++ .bpp = 32,
++ .fdd = 0x80,
++ .tft_alt_mode = 0,
++ .stn_565_mode = 0,
++ .mono_8bit_mode = 0,
++ .invert_line_clock = 1,
++ .invert_frm_clock = 1,
++ .sync_edge = 0,
++ .sync_ctrl = 1,
++ .raster_order = 0,
++};
++
++struct da8xx_lcdc_platform_data TFC_S9700RTWV35TR_01B_pdata = {
++ .manu_name = "ThreeFive",
++ .controller_data = &lcd_cfg,
++ .type = "TFC_S9700RTWV35TR_01B",
++};
++
++#include "common.h"
++
++static const struct display_panel bbtoys7_panel = {
++ WVGA,
++ 16,
++ 16,
++ COLOR_ACTIVE,
++};
++
++#define BBTOYS7LCD_PWM_DEVICE_ID "ehrpwm.1:0"
++
++static struct platform_pwm_backlight_data bbtoys7lcd_backlight_data = {
++ .pwm_id = BBTOYS7LCD_PWM_DEVICE_ID,
++ .ch = -1,
++ .max_brightness = AM335X_BACKLIGHT_MAX_BRIGHTNESS,
++ .dft_brightness = AM335X_BACKLIGHT_DEFAULT_BRIGHTNESS,
++ .pwm_period_ns = AM335X_PWM_PERIOD_NANO_SECONDS,
++};
++
++static struct lcd_ctrl_config bbtoys7_cfg = {
++ &bbtoys7_panel,
++ .ac_bias = 255,
++ .ac_bias_intrpt = 0,
++ .dma_burst_sz = 16,
++ .bpp = 16,
++ .fdd = 0x80,
++ .tft_alt_mode = 0,
++ .stn_565_mode = 0,
++ .mono_8bit_mode = 0,
++ .invert_line_clock = 1,
++ .invert_frm_clock = 1,
++ .sync_edge = 0,
++ .sync_ctrl = 1,
++ .raster_order = 0,
++};
++
++struct da8xx_lcdc_platform_data bbtoys7_pdata = {
++ .manu_name = "ThreeFive",
++ .controller_data = &bbtoys7_cfg,
++ .type = "TFC_S9700RTWV35TR_01B",
++};
++
++static struct lcd_ctrl_config bbtoys35_cfg = {
++ &bbtoys7_panel,
++ .ac_bias = 255,
++ .ac_bias_intrpt = 0,
++ .dma_burst_sz = 16,
++ .bpp = 16,
++ .fdd = 0x80,
++ .tft_alt_mode = 0,
++ .stn_565_mode = 0,
++ .mono_8bit_mode = 0,
++ .invert_line_clock = 1,
++ .invert_frm_clock = 1,
++ .sync_edge = 0,
++ .sync_ctrl = 1,
++ .raster_order = 0,
++};
++
++struct da8xx_lcdc_platform_data bbtoys35_pdata = {
++ .manu_name = "BBToys",
++ .controller_data = &bbtoys35_cfg,
++ .type = "CDTech_S035Q01",
++};
++
++static const struct display_panel dvi_panel = {
++ WVGA,
++ 16,
++ 16,
++ COLOR_ACTIVE,
++};
++
++static struct lcd_ctrl_config dvi_cfg = {
++ &dvi_panel,
++ .ac_bias = 255,
++ .ac_bias_intrpt = 0,
++ .dma_burst_sz = 16,
++ .bpp = 16,
++ .fdd = 0x80,
++ .tft_alt_mode = 0,
++ .stn_565_mode = 0,
++ .mono_8bit_mode = 0,
++ .invert_line_clock = 1,
++ .invert_frm_clock = 1,
++ .sync_edge = 0,
++ .sync_ctrl = 1,
++ .raster_order = 0,
++};
++
++struct da8xx_lcdc_platform_data dvi_pdata = {
++ .manu_name = "BBToys",
++ .controller_data = &dvi_cfg,
++ .type = "1024x768@60",
++};
++
++/* TSc controller */
++#include <linux/input/ti_tscadc.h>
++#include <linux/lis3lv02d.h>
++
++/* TSc controller */
++static struct tsc_data am335x_touchscreen_data = {
++ .wires = 4,
++ .x_plate_resistance = 200,
++ .mode = TI_TSCADC_TSCMODE,
++};
++
++static struct tsc_data bone_touchscreen_data = {
++ .mode = TI_TSCADC_GENMODE,
++};
++
++static u8 am335x_iis_serializer_direction1[] = {
++ INACTIVE_MODE, INACTIVE_MODE, TX_MODE, RX_MODE,
++ INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
++ INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
++ INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
++};
++
++static struct snd_platform_data am335x_evm_snd_data1 = {
++ .tx_dma_offset = 0x46400000, /* McASP1 */
++ .rx_dma_offset = 0x46400000,
++ .op_mode = DAVINCI_MCASP_IIS_MODE,
++ .num_serializer = ARRAY_SIZE(am335x_iis_serializer_direction1),
++ .tdm_slots = 2,
++ .serial_dir = am335x_iis_serializer_direction1,
++ .asp_chan_q = EVENTQ_2,
++ .version = MCASP_VERSION_3,
++ .txnumevt = 1,
++ .rxnumevt = 1,
++};
++
++static struct omap2_hsmmc_info am335x_mmc[] __initdata = {
++ {
++ .mmc = 1,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_cd = GPIO_TO_PIN(0, 6),
++ .gpio_wp = GPIO_TO_PIN(3, 18),
++ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3V3 */
++ },
++ {
++ .mmc = 0, /* will be set at runtime */
++ },
++ {
++ .mmc = 0, /* will be set at runtime */
++ },
++ {} /* Terminator */
++};
++
++
++#ifdef CONFIG_OMAP_MUX
++static struct omap_board_mux board_mux[] __initdata = {
++ AM33XX_MUX(I2C0_SDA, OMAP_MUX_MODE0 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_INPUT_EN | AM33XX_PIN_OUTPUT),
++ AM33XX_MUX(I2C0_SCL, OMAP_MUX_MODE0 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_INPUT_EN | AM33XX_PIN_OUTPUT),
++ { .reg_offset = OMAP_MUX_TERMINATOR },
++};
++#else
++#define board_mux NULL
++#endif
++
++/* module pin mux structure */
++struct pinmux_config {
++ const char *string_name; /* signal name format */
++ int val; /* Options for the mux register value */
++};
++
++struct evm_dev_cfg {
++ void (*device_init)(int evm_id, int profile);
++
++/*
++* If the device is required on both baseboard & daughter board (ex i2c),
++* specify DEV_ON_BASEBOARD
++*/
++#define DEV_ON_BASEBOARD 0
++#define DEV_ON_DGHTR_BRD 1
++ u32 device_on;
++
++ u32 profile; /* Profiles (0-7) in which the module is present */
++};
++
++/* AM335X - CPLD Register Offsets */
++#define CPLD_DEVICE_HDR 0x00 /* CPLD Header */
++#define CPLD_DEVICE_ID 0x04 /* CPLD identification */
++#define CPLD_DEVICE_REV 0x0C /* Revision of the CPLD code */
++#define CPLD_CFG_REG 0x10 /* Configuration Register */
++
++static struct i2c_client *cpld_client;
++static u32 am335x_evm_id;
++static struct omap_board_config_kernel am335x_evm_config[] __initdata = {
++};
++
++/*
++* EVM Config held in On-Board eeprom device.
++*
++* Header Format
++*
++* Name Size Contents
++* (Bytes)
++*-------------------------------------------------------------
++* Header 4 0xAA, 0x55, 0x33, 0xEE
++*
++* Board Name 8 Name for board in ASCII.
++* example "A33515BB" = "AM335X
++ Low Cost EVM board"
++*
++* Version 4 Hardware version code for board in
++* in ASCII. "1.0A" = rev.01.0A
++*
++* Serial Number 12 Serial number of the board. This is a 12
++* character string which is WWYY4P16nnnn, where
++* WW = 2 digit week of the year of production
++* YY = 2 digit year of production
++* nnnn = incrementing board number
++*
++* Configuration option 32 Codes(TBD) to show the configuration
++* setup on this board.
++*
++* Available 32720 Available space for other non-volatile
++* data.
++*/
++struct am335x_evm_eeprom_config {
++ u32 header;
++ u8 name[8];
++ char version[4];
++ u8 serial[12];
++ u8 opt[32];
++};
++
++/*
++* EVM Config held in daughter board eeprom device.
++*
++* Header Format
++*
++* Name Size Contents
++* (Bytes)
++*-------------------------------------------------------------
++* Header 4 0xAA, 0x55, 0x33, 0xEE
++*
++* Board Name 8 Name for board in ASCII.
++* example "A335GPBD" = "AM335x
++* General Purpose Daughterboard"
++*
++* Version 4 Hardware version code for board in
++* in ASCII. "1.0A" = rev.01.0A
++* Serial Number 12 Serial number of the board. This is a 12
++* character string which is: WWYY4P13nnnn, where
++* WW = 2 digit week of the year of production
++* YY = 2 digit year of production
++* nnnn = incrementing board number
++* Configuration Option 32 Codes to show the configuration
++* setup on this board.
++* CPLD Version 8 CPLD code version for board in ASCII
++* "CPLD1.0A" = rev. 01.0A of the CPLD
++* Available 32700 Available space for other non-volatile
++* codes/data
++*/
++
++struct am335x_eeprom_config1 {
++ u32 header;
++ u8 name[8];
++ char version[4];
++ u8 serial[12];
++ u8 opt[32];
++ u8 cpld_ver[8];
++};
++
++static struct am335x_evm_eeprom_config config;
++static struct am335x_eeprom_config1 config1;
++static bool daughter_brd_detected;
++
++struct beaglebone_cape_eeprom_config {
++ u32 header;
++ char format_revision[2];
++ char name[32];
++ char version[4];
++ char manufacturer[16];
++ char partnumber[16];
++ u16 numpins;
++ char serial[12];
++ u8 muxdata[170];
++ u16 current_3v3;
++ u16 current_vdd5v;
++ u16 current_sys5v;
++ u16 dc;
++};
++
++static struct beaglebone_cape_eeprom_config cape_config;
++static bool beaglebone_cape_detected;
++
++/* keep track of ADC pin usage */
++static int capecount = 0;
++static bool beaglebone_tsadcpins_free = 1;
++
++
++#define GP_EVM_REV_IS_1_0 0x1
++#define GP_EVM_REV_IS_1_0A 0x1
++#define GP_EVM_REV_IS_1_1A 0x2
++#define GP_EVM_REV_IS_UNKNOWN 0xFF
++#define GP_EVM_ACTUALLY_BEAGLEBONE 0xBB
++static unsigned int gp_evm_revision = GP_EVM_REV_IS_UNKNOWN;
++
++unsigned int gigabit_enable = 1;
++
++#define EEPROM_MAC_ADDRESS_OFFSET 60 /* 4+8+4+12+32 */
++#define EEPROM_NO_OF_MAC_ADDR 3
++static char am335x_mac_addr[EEPROM_NO_OF_MAC_ADDR][ETH_ALEN];
++
++#define AM335X_EEPROM_HEADER 0xEE3355AA
++
++/* current profile if exists else PROFILE_0 on error */
++static u32 am335x_get_profile_selection(void)
++{
++ int val = 0;
++
++ if (!cpld_client)
++ /* error checking is not done in func's calling this routine.
++ so return profile 0 on error */
++ return 0;
++
++ val = i2c_smbus_read_word_data(cpld_client, CPLD_CFG_REG);
++ if (val < 0)
++ return 0; /* default to Profile 0 on Error */
++ else
++ return val & 0x7;
++}
++
++static struct pinmux_config haptics_pin_mux[] = {
++ {"gpmc_ad9.ehrpwm2B", OMAP_MUX_MODE4 |
++ AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++/* Module pin mux for LCDC */
++static struct pinmux_config lcdc_pin_mux[] = {
++ {"lcd_data0.lcd_data0", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data1.lcd_data1", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data2.lcd_data2", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data3.lcd_data3", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data4.lcd_data4", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data5.lcd_data5", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data6.lcd_data6", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data7.lcd_data7", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data8.lcd_data8", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data9.lcd_data9", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data10.lcd_data10", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data11.lcd_data11", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data12.lcd_data12", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data13.lcd_data13", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data14.lcd_data14", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data15.lcd_data15", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"gpmc_ad8.lcd_data16", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad9.lcd_data17", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad10.lcd_data18", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad11.lcd_data19", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad12.lcd_data20", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad13.lcd_data21", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad14.lcd_data22", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"gpmc_ad15.lcd_data23", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"lcd_vsync.lcd_vsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_hsync.lcd_hsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_pclk.lcd_pclk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_ac_bias_en.lcd_ac_bias_en", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++/* Module pin mux for Beagleboardtoys DVI cape */
++static struct pinmux_config dvi_pin_mux[] = {
++ {"lcd_data0.lcd_data0", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data1.lcd_data1", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data2.lcd_data2", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data3.lcd_data3", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data4.lcd_data4", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data5.lcd_data5", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data6.lcd_data6", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data7.lcd_data7", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data8.lcd_data8", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data9.lcd_data9", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data10.lcd_data10", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data11.lcd_data11", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data12.lcd_data12", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data13.lcd_data13", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data14.lcd_data14", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data15.lcd_data15", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_vsync.lcd_vsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_hsync.lcd_hsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_pclk.lcd_pclk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_ac_bias_en.lcd_ac_bias_en", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a2.rgmii2_td3", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT}, // USR0 LED
++ {"gpmc_a3.rgmii2_td2", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT}, // USR1 LED
++ {"gpmc_ad7.gpmc_ad7", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT}, // DVI PDn
++ {NULL, 0},
++};
++
++/* Module pin mux for Beagleboardtoys 7" LCD cape */
++static struct pinmux_config bbtoys7_pin_mux[] = {
++ {"lcd_data0.lcd_data0", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data1.lcd_data1", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data2.lcd_data2", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data3.lcd_data3", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data4.lcd_data4", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data5.lcd_data5", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data6.lcd_data6", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data7.lcd_data7", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data8.lcd_data8", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data9.lcd_data9", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data10.lcd_data10", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data11.lcd_data11", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data12.lcd_data12", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data13.lcd_data13", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data14.lcd_data14", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_data15.lcd_data15", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT
++ | AM33XX_PULL_DISA},
++ {"lcd_vsync.lcd_vsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_hsync.lcd_hsync", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_pclk.lcd_pclk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"lcd_ac_bias_en.lcd_ac_bias_en", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"ecap0_in_pwm0_out.gpio0_7", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT}, // AVDD_EN
++ {"gpmc_a2.ehrpwm1A", OMAP_MUX_MODE6 | AM33XX_PIN_OUTPUT}, // Backlight
++ {NULL, 0},
++};
++
++static struct pinmux_config w1_gpio_pin_mux[] = {
++ {"gpmc_ad3.gpio1_3", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config tsc_pin_mux[] = {
++ {"ain0.ain0", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain1.ain1", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain2.ain2", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain3.ain3", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain4.ain4", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain5.ain5", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain6.ain6", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"ain7.ain7", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"vrefp.vrefp", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {"vrefn.vrefn", OMAP_MUX_MODE0 | AM33XX_INPUT_EN},
++ {NULL, 0},
++};
++
++/* Pin mux for nand flash module */
++static struct pinmux_config nand_pin_mux[] = {
++ {"gpmc_ad0.gpmc_ad0", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad1.gpmc_ad1", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad2.gpmc_ad2", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad3.gpmc_ad3", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad4.gpmc_ad4", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad5.gpmc_ad5", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad6.gpmc_ad6", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad7.gpmc_ad7", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_wait0.gpmc_wait0", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_wpn.gpmc_wpn", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_csn0.gpmc_csn0", OMAP_MUX_MODE0 | AM33XX_PULL_DISA},
++ {"gpmc_advn_ale.gpmc_advn_ale", OMAP_MUX_MODE0 | AM33XX_PULL_DISA},
++ {"gpmc_oen_ren.gpmc_oen_ren", OMAP_MUX_MODE0 | AM33XX_PULL_DISA},
++ {"gpmc_wen.gpmc_wen", OMAP_MUX_MODE0 | AM33XX_PULL_DISA},
++ {"gpmc_ben0_cle.gpmc_ben0_cle", OMAP_MUX_MODE0 | AM33XX_PULL_DISA},
++ {NULL, 0},
++};
++
++/* Module pin mux for SPI fash */
++static struct pinmux_config spi0_pin_mux[] = {
++ {"spi0_sclk.spi0_sclk", OMAP_MUX_MODE0 | AM33XX_PULL_ENBL
++ | AM33XX_INPUT_EN},
++ {"spi0_d0.spi0_d0", OMAP_MUX_MODE0 | AM33XX_PULL_ENBL | AM33XX_PULL_UP
++ | AM33XX_INPUT_EN},
++ {"spi0_d1.spi0_d1", OMAP_MUX_MODE0 | AM33XX_PULL_ENBL
++ | AM33XX_INPUT_EN},
++ {"spi0_cs0.spi0_cs0", OMAP_MUX_MODE0 | AM33XX_PULL_ENBL | AM33XX_PULL_UP
++ | AM33XX_INPUT_EN},
++ {NULL, 0},
++};
++
++/* Module pin mux for SPI flash */
++static struct pinmux_config spi1_pin_mux[] = {
++ {"mcasp0_aclkx.spi1_sclk", OMAP_MUX_MODE3 | AM33XX_PULL_ENBL
++ | AM33XX_INPUT_EN},
++ {"mcasp0_fsx.spi1_d0", OMAP_MUX_MODE3 | AM33XX_PULL_ENBL
++ | AM33XX_PULL_UP | AM33XX_INPUT_EN},
++ {"mcasp0_axr0.spi1_d1", OMAP_MUX_MODE3 | AM33XX_PULL_ENBL
++ | AM33XX_INPUT_EN},
++ {"mcasp0_ahclkr.spi1_cs0", OMAP_MUX_MODE3 | AM33XX_PULL_ENBL
++ | AM33XX_PULL_UP | AM33XX_INPUT_EN},
++ {NULL, 0},
++};
++
++/* Module pin mux for rgmii1 */
++static struct pinmux_config rgmii1_pin_mux[] = {
++ {"mii1_txen.rgmii1_tctl", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_rxdv.rgmii1_rctl", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_txd3.rgmii1_td3", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd2.rgmii1_td2", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd1.rgmii1_td1", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd0.rgmii1_td0", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_txclk.rgmii1_tclk", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"mii1_rxclk.rgmii1_rclk", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd3.rgmii1_rd3", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd2.rgmii1_rd2", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd1.rgmii1_rd1", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd0.rgmii1_rd0", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mdio_data.mdio_data", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mdio_clk.mdio_clk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT_PULLUP},
++ {NULL, 0},
++};
++
++/* Module pin mux for rgmii2 */
++static struct pinmux_config rgmii2_pin_mux[] = {
++ {"gpmc_a0.rgmii2_tctl", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a1.rgmii2_rctl", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"gpmc_a2.rgmii2_td3", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a3.rgmii2_td2", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a4.rgmii2_td1", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a5.rgmii2_td0", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a6.rgmii2_tclk", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a7.rgmii2_rclk", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"gpmc_a8.rgmii2_rd3", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"gpmc_a9.rgmii2_rd2", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"gpmc_a10.rgmii2_rd1", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"gpmc_a11.rgmii2_rd0", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mdio_data.mdio_data", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mdio_clk.mdio_clk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT_PULLUP},
++ {NULL, 0},
++};
++
++/* Module pin mux for mii1 */
++static struct pinmux_config mii1_pin_mux[] = {
++ {"mii1_rxerr.mii1_rxerr", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_txen.mii1_txen", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"mii1_rxdv.mii1_rxdv", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_txd3.mii1_txd3", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd2.mii1_txd2", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd1.mii1_txd1", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd0.mii1_txd0", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"mii1_txclk.mii1_txclk", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxclk.mii1_rxclk", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd3.mii1_rxd3", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd2.mii1_rxd2", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd1.mii1_rxd1", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd0.mii1_rxd0", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mdio_data.mdio_data", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mdio_clk.mdio_clk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT_PULLUP},
++ {NULL, 0},
++};
++
++/* Module pin mux for rmii1 */
++static struct pinmux_config rmii1_pin_mux[] = {
++ {"mii1_crs.rmii1_crs_dv", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxerr.mii1_rxerr", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_txen.mii1_txen", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd1.mii1_txd1", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"mii1_txd0.mii1_txd0", OMAP_MUX_MODE1 | AM33XX_PIN_OUTPUT},
++ {"mii1_rxd1.mii1_rxd1", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxd0.mii1_rxd0", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"rmii1_refclk.rmii1_refclk", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mdio_data.mdio_data", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mdio_clk.mdio_clk", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config i2c1_pin_mux[] = {
++ {"spi0_d1.i2c1_sda", OMAP_MUX_MODE2 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_PULL_ENBL | AM33XX_INPUT_EN},
++ {"spi0_cs0.i2c1_scl", OMAP_MUX_MODE2 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_PULL_ENBL | AM33XX_INPUT_EN},
++ {NULL, 0},
++};
++
++static struct pinmux_config i2c2_pin_mux[] = {
++ {"uart1_ctsn.i2c2_sda", OMAP_MUX_MODE3 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_PIN_INPUT_PULLUP},
++ {"uart1_rtsn.i2c2_scl", OMAP_MUX_MODE3 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++/* Module pin mux for mcasp1 */
++static struct pinmux_config mcasp1_pin_mux[] = {
++ {"mii1_crs.mcasp1_aclkx", OMAP_MUX_MODE4 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_rxerr.mcasp1_fsx", OMAP_MUX_MODE4 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"mii1_col.mcasp1_axr2", OMAP_MUX_MODE4 | AM33XX_PIN_INPUT_PULLDOWN},
++ {"rmii1_refclk.mcasp1_axr3", OMAP_MUX_MODE4 |
++ AM33XX_PIN_INPUT_PULLDOWN},
++ {NULL, 0},
++};
++
++
++/* Module pin mux for mmc0 */
++static struct pinmux_config mmc0_pin_mux[] = {
++ {"mmc0_dat3.mmc0_dat3", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat2.mmc0_dat2", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat1.mmc0_dat1", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat0.mmc0_dat0", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_clk.mmc0_clk", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_cmd.mmc0_cmd", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mcasp0_aclkr.mmc0_sdwp", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {"spi0_cs1.mmc0_sdcd", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config mmc0_no_cd_pin_mux[] = {
++ {"mmc0_dat3.mmc0_dat3", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat2.mmc0_dat2", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat1.mmc0_dat1", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_dat0.mmc0_dat0", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_clk.mmc0_clk", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mmc0_cmd.mmc0_cmd", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"mcasp0_aclkr.mmc0_sdwp", OMAP_MUX_MODE4 | AM33XX_PIN_INPUT_PULLDOWN},
++ {NULL, 0},
++};
++
++/* Module pin mux for mmc1 */
++static struct pinmux_config mmc1_pin_mux[] = {
++ {"gpmc_ad7.mmc1_dat7", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad6.mmc1_dat6", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad5.mmc1_dat5", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad4.mmc1_dat4", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad3.mmc1_dat3", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad2.mmc1_dat2", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad1.mmc1_dat1", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ad0.mmc1_dat0", OMAP_MUX_MODE1 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_csn1.mmc1_clk", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_csn2.mmc1_cmd", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_csn0.gpio1_29", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_advn_ale.mmc1_sdcd", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++/* Module pin mux for uart3 */
++static struct pinmux_config uart3_pin_mux[] = {
++ {"spi0_cs1.uart3_rxd", AM33XX_PIN_INPUT_PULLUP},
++ {"ecap0_in_pwm0_out.uart3_txd", AM33XX_PULL_ENBL},
++ {NULL, 0},
++};
++
++static struct pinmux_config d_can_gp_pin_mux[] = {
++ {"uart0_ctsn.d_can1_tx", OMAP_MUX_MODE2 | AM33XX_PULL_ENBL},
++ {"uart0_rtsn.d_can1_rx", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config d_can_ia_pin_mux[] = {
++ {"uart0_rxd.d_can0_tx", OMAP_MUX_MODE2 | AM33XX_PULL_ENBL},
++ {"uart0_txd.d_can0_rx", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config tt3201_pin_mux[] = {
++ {"uart1_rxd.d_can1_tx", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT },
++ {"uart1_txd.d_can1_rx", OMAP_MUX_MODE2 | AM33XX_PIN_INPUT_PULLUP },
++ {"mcasp0_fsr.gpio3_19", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP },
++ {"mcasp0_ahclkx.gpio3_21", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP },
++ {"ecap0_in_pwm0_out.spi1_cs1", OMAP_MUX_MODE2 | AM33XX_PIN_OUTPUT_PULLUP },
++ {NULL, 0},
++};
++
++/* Module pin mux for uart2 */
++static struct pinmux_config uart2_pin_mux[] = {
++ {"spi0_sclk.uart2_rxd", OMAP_MUX_MODE1 | AM33XX_SLEWCTRL_SLOW |
++ AM33XX_PIN_INPUT_PULLUP},
++ {"spi0_d0.uart2_txd", OMAP_MUX_MODE1 | AM33XX_PULL_UP |
++ AM33XX_PULL_DISA |
++ AM33XX_SLEWCTRL_SLOW},
++ {NULL, 0},
++};
++
++
++/*
++* @pin_mux - single module pin-mux structure which defines pin-mux
++* details for all its pins.
++*/
++static void setup_pin_mux(struct pinmux_config *pin_mux)
++{
++ int i;
++
++ for (i = 0; pin_mux->string_name != NULL; pin_mux++)
++ omap_mux_init_signal(pin_mux->string_name, pin_mux->val);
++
++}
++
++/* Matrix GPIO Keypad Support for profile-0 only: TODO */
++
++/* pinmux for keypad device */
++static struct pinmux_config matrix_keypad_pin_mux[] = {
++ {"gpmc_a5.gpio1_21", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a6.gpio1_22", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a9.gpio1_25", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_a10.gpio1_26", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_a11.gpio1_27", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {NULL, 0},
++};
++
++/* Keys mapping */
++static const uint32_t am335x_evm_matrix_keys[] = {
++ KEY(0, 0, KEY_MENU),
++ KEY(1, 0, KEY_BACK),
++ KEY(2, 0, KEY_LEFT),
++
++ KEY(0, 1, KEY_RIGHT),
++ KEY(1, 1, KEY_ENTER),
++ KEY(2, 1, KEY_DOWN),
++};
++
++const struct matrix_keymap_data am335x_evm_keymap_data = {
++ .keymap = am335x_evm_matrix_keys,
++ .keymap_size = ARRAY_SIZE(am335x_evm_matrix_keys),
++};
++
++static const unsigned int am335x_evm_keypad_row_gpios[] = {
++ GPIO_TO_PIN(1, 25), GPIO_TO_PIN(1, 26), GPIO_TO_PIN(1, 27)
++};
++
++static const unsigned int am335x_evm_keypad_col_gpios[] = {
++ GPIO_TO_PIN(1, 21), GPIO_TO_PIN(1, 22)
++};
++
++static struct matrix_keypad_platform_data am335x_evm_keypad_platform_data = {
++ .keymap_data = &am335x_evm_keymap_data,
++ .row_gpios = am335x_evm_keypad_row_gpios,
++ .num_row_gpios = ARRAY_SIZE(am335x_evm_keypad_row_gpios),
++ .col_gpios = am335x_evm_keypad_col_gpios,
++ .num_col_gpios = ARRAY_SIZE(am335x_evm_keypad_col_gpios),
++ .active_low = false,
++ .debounce_ms = 5,
++ .col_scan_delay_us = 2,
++};
++
++static struct platform_device am335x_evm_keyboard = {
++ .name = "matrix-keypad",
++ .id = -1,
++ .dev = {
++ .platform_data = &am335x_evm_keypad_platform_data,
++ },
++};
++
++static void matrix_keypad_init(int evm_id, int profile)
++{
++ int err;
++
++ setup_pin_mux(matrix_keypad_pin_mux);
++ err = platform_device_register(&am335x_evm_keyboard);
++ if (err) {
++ pr_err("failed to register matrix keypad (2x3) device\n");
++ }
++}
++
++
++/* pinmux for keypad device */
++static struct pinmux_config volume_keys_pin_mux[] = {
++ {"spi0_sclk.gpio0_2", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"spi0_d0.gpio0_3", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {NULL, 0},
++};
++
++/* Configure GPIOs for Volume Keys */
++static struct gpio_keys_button am335x_evm_volume_gpio_buttons[] = {
++ {
++ .code = KEY_VOLUMEUP,
++ .gpio = GPIO_TO_PIN(0, 2),
++ .active_low = true,
++ .desc = "volume-up",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_VOLUMEDOWN,
++ .gpio = GPIO_TO_PIN(0, 3),
++ .active_low = true,
++ .desc = "volume-down",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++};
++
++static struct gpio_keys_platform_data am335x_evm_volume_gpio_key_info = {
++ .buttons = am335x_evm_volume_gpio_buttons,
++ .nbuttons = ARRAY_SIZE(am335x_evm_volume_gpio_buttons),
++};
++
++static struct platform_device am335x_evm_volume_keys = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &am335x_evm_volume_gpio_key_info,
++ },
++};
++
++static void volume_keys_init(int evm_id, int profile)
++{
++ int err;
++
++ setup_pin_mux(volume_keys_pin_mux);
++ err = platform_device_register(&am335x_evm_volume_keys);
++ if (err)
++ pr_err("failed to register matrix keypad (2x3) device\n");
++}
++
++/* pinmux for lcd7 keys */
++static struct pinmux_config lcd7_keys_pin_mux[] = {
++ {"gpmc_a0.gpio1_16", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_a1.gpio1_17", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_a3.gpio1_19", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"mcasp0_axr0.gpio3_16", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"mcasp0_fsr.gpio3_19", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {NULL, 0},
++};
++
++/* Configure GPIOs for lcd7 keys */
++static struct gpio_keys_button beaglebone_lcd7_gpio_keys[] = {
++ {
++ .code = KEY_LEFT,
++ .gpio = GPIO_TO_PIN(1, 16),
++ .active_low = true,
++ .desc = "left",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_RIGHT,
++ .gpio = GPIO_TO_PIN(1, 17),
++ .active_low = true,
++ .desc = "right",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_UP,
++ .gpio = GPIO_TO_PIN(1, 19),
++ .active_low = true,
++ .desc = "up",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_DOWN,
++ .gpio = GPIO_TO_PIN(3, 16),
++ .active_low = true,
++ .desc = "down",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_ENTER,
++ .gpio = GPIO_TO_PIN(3, 19),
++ .active_low = true,
++ .desc = "enter",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++};
++
++static struct gpio_keys_platform_data beaglebone_lcd7_gpio_key_info = {
++ .buttons = beaglebone_lcd7_gpio_keys,
++ .nbuttons = ARRAY_SIZE(beaglebone_lcd7_gpio_keys),
++};
++
++static struct platform_device beaglebone_lcd7_keys = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &beaglebone_lcd7_gpio_key_info,
++ },
++};
++
++static void beaglebone_lcd7_keys_init(int evm_id, int profile)
++{
++ int err;
++ setup_pin_mux(lcd7_keys_pin_mux);
++ err = platform_device_register(&beaglebone_lcd7_keys);
++ if (err)
++ pr_err("failed to register gpio keys for LCD7 cape\n");
++}
++
++/* pinmux for lcd3 keys */
++static struct pinmux_config lcd3_keys_pin_mux[] = {
++ {"gpmc_a0.gpio1_16", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_a1.gpio1_17", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"mcasp0_fsr.gpio3_19", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_ben1.gpio1_28", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"ecap0_in_pwm0_out.gpio0_7", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {NULL, 0},
++};
++
++/* Configure GPIOs for lcd3 keys */
++static struct gpio_keys_button beaglebone_lcd3_gpio_keys[] = {
++ {
++ .code = KEY_LEFT,
++ .gpio = GPIO_TO_PIN(1, 16),
++ .active_low = true,
++ .desc = "left",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_RIGHT,
++ .gpio = GPIO_TO_PIN(1, 17),
++ .active_low = true,
++ .desc = "right",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_UP,
++ .gpio = GPIO_TO_PIN(3, 19),
++ .active_low = true,
++ .desc = "up",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_DOWN,
++ .gpio = GPIO_TO_PIN(1, 28),
++ .active_low = true,
++ .desc = "down",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_ENTER,
++ .gpio = GPIO_TO_PIN(0, 7),
++ .active_low = true,
++ .desc = "down",
++ .type = EV_KEY,
++ .wakeup = 1,
++ },
++};
++
++static struct gpio_keys_platform_data beaglebone_lcd3_gpio_key_info = {
++ .buttons = beaglebone_lcd3_gpio_keys,
++ .nbuttons = ARRAY_SIZE(beaglebone_lcd3_gpio_keys),
++};
++
++static struct platform_device beaglebone_lcd3_keys = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &beaglebone_lcd3_gpio_key_info,
++ },
++};
++
++static void beaglebone_lcd3_keys_init(int evm_id, int profile)
++{
++ int err;
++ setup_pin_mux(lcd3_keys_pin_mux);
++ err = platform_device_register(&beaglebone_lcd3_keys);
++ if (err)
++ pr_err("failed to register gpio keys for LCD3 cape\n");
++}
++
++/*
++* @evm_id - evm id which needs to be configured
++* @dev_cfg - single evm structure which includes
++* all module inits, pin-mux defines
++* @profile - if present, else PROFILE_NONE
++* @dghtr_brd_flg - Whether Daughter board is present or not
++*/
++static void _configure_device(int evm_id, struct evm_dev_cfg *dev_cfg,
++ int profile)
++{
++ int i;
++
++ /*
++ * Only General Purpose & Industrial Auto Motro Control
++ * EVM has profiles. So check if this evm has profile.
++ * If not, ignore the profile comparison
++ */
++
++ /*
++ * If the device is on baseboard, directly configure it. Else (device on
++ * Daughter board), check if the daughter card is detected.
++ */
++ if (profile == PROFILE_NONE) {
++ for (i = 0; dev_cfg->device_init != NULL; dev_cfg++) {
++ if (dev_cfg->device_on == DEV_ON_BASEBOARD)
++ dev_cfg->device_init(evm_id, profile);
++ else if (daughter_brd_detected == true)
++ dev_cfg->device_init(evm_id, profile);
++ }
++ } else {
++ for (i = 0; dev_cfg->device_init != NULL; dev_cfg++) {
++ if (dev_cfg->profile & profile) {
++ if (dev_cfg->device_on == DEV_ON_BASEBOARD)
++ dev_cfg->device_init(evm_id, profile);
++ else if (daughter_brd_detected == true)
++ dev_cfg->device_init(evm_id, profile);
++ }
++ }
++ }
++}
++
++
++/* pinmux for usb0 drvvbus */
++static struct pinmux_config usb0_pin_mux[] = {
++ {"usb0_drvvbus.usb0_drvvbus", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++/* pinmux for usb1 drvvbus */
++static struct pinmux_config usb1_pin_mux[] = {
++ {"usb1_drvvbus.usb1_drvvbus", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++/* pinmux for profibus */
++static struct pinmux_config profibus_pin_mux[] = {
++ {"uart1_rxd.pr1_uart0_rxd_mux1", OMAP_MUX_MODE5 | AM33XX_PIN_INPUT},
++ {"uart1_txd.pr1_uart0_txd_mux1", OMAP_MUX_MODE5 | AM33XX_PIN_OUTPUT},
++ {"mcasp0_fsr.pr1_pru0_pru_r30_5", OMAP_MUX_MODE5 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++#define BEAGLEBONE_W1_GPIO GPIO_TO_PIN(1, 3)
++
++static struct w1_gpio_platform_data bone_w1_gpio_pdata = {
++ .pin = BEAGLEBONE_W1_GPIO,
++ .is_open_drain = 0,
++};
++
++static struct platform_device bone_w1_device = {
++ .name = "w1-gpio",
++ .id = -1,
++ .dev.platform_data = &bone_w1_gpio_pdata,
++};
++
++/* LEDS - gpio1_21 -> gpio1_24 */
++
++#define BEAGLEBONE_USR1_LED GPIO_TO_PIN(1, 21)
++#define BEAGLEBONE_USR2_LED GPIO_TO_PIN(1, 22)
++#define BEAGLEBONE_USR3_LED GPIO_TO_PIN(1, 23)
++#define BEAGLEBONE_USR4_LED GPIO_TO_PIN(1, 24)
++
++static struct gpio_led bone_gpio_leds[] = {
++ {
++ .name = "beaglebone::usr0",
++ .default_trigger = "heartbeat",
++ .gpio = BEAGLEBONE_USR1_LED,
++ },
++ {
++ .name = "beaglebone::usr1",
++ .default_trigger = "mmc0",
++ .gpio = BEAGLEBONE_USR2_LED,
++ },
++ {
++ .name = "beaglebone::usr2",
++ .gpio = BEAGLEBONE_USR3_LED,
++ },
++ {
++ .name = "beaglebone::usr3",
++ .gpio = BEAGLEBONE_USR4_LED,
++ },
++};
++
++static struct gpio_led_platform_data bone_gpio_led_info = {
++ .leds = bone_gpio_leds,
++ .num_leds = ARRAY_SIZE(bone_gpio_leds),
++};
++
++static struct platform_device bone_leds_gpio = {
++ .name = "leds-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &bone_gpio_led_info,
++ },
++};
++
++
++#define BEAGLEBONEDVI_USR0_LED GPIO_TO_PIN(1, 18)
++#define BEAGLEBONEDVI_USR1_LED GPIO_TO_PIN(1, 19)
++
++static struct gpio_led dvi_gpio_leds[] = {
++ {
++ .name = "beaglebone::usr0",
++ .default_trigger = "heartbeat",
++ .gpio = BEAGLEBONE_USR1_LED,
++ },
++ {
++ .name = "beaglebone::usr1",
++ .default_trigger = "mmc0",
++ .gpio = BEAGLEBONE_USR2_LED,
++ },
++ {
++ .name = "beaglebone::usr2",
++ .gpio = BEAGLEBONE_USR3_LED,
++ },
++ {
++ .name = "beaglebone::usr3",
++ .gpio = BEAGLEBONE_USR4_LED,
++ },
++ {
++ .name = "dvi::usr0",
++ .default_trigger = "heartbeat",
++ .gpio = BEAGLEBONEDVI_USR0_LED,
++ },
++ {
++ .name = "dvi::usr1",
++ .default_trigger = "mmc0",
++ .gpio = BEAGLEBONEDVI_USR1_LED,
++ },
++};
++
++static struct gpio_led_platform_data dvi_gpio_led_info = {
++ .leds = dvi_gpio_leds,
++ .num_leds = ARRAY_SIZE(dvi_gpio_leds),
++};
++
++static struct platform_device dvi_leds_gpio = {
++ .name = "leds-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &dvi_gpio_led_info,
++ },
++};
++
++static struct pinmux_config bone_pin_mux[] = {
++ /* User LED gpios (gpio1_21 to gpio1_24) */
++ {"gpmc_a5.rgmii2_td0", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a6.rgmii2_tclk", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a7.rgmii2_rclk", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"gpmc_a8.rgmii2_rd3", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ /* Grounding gpio1_6 (pin 3 Conn A) signals bone tester to start diag tests */
++ {"gpmc_ad6.gpio1_6", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT_PULLUP},
++};
++
++/* Module pin mux for eCAP0 */
++static struct pinmux_config ecap0_pin_mux[] = {
++ {"ecap0_in_pwm0_out.ecap0_in_pwm0_out",
++ OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++static int ehrpwm_backlight_enable;
++static int backlight_enable;
++
++#define AM335XEVM_WLAN_PMENA_GPIO GPIO_TO_PIN(1, 30)
++#define AM335XEVM_WLAN_IRQ_GPIO GPIO_TO_PIN(3, 17)
++
++struct wl12xx_platform_data am335xevm_wlan_data = {
++ .irq = OMAP_GPIO_IRQ(AM335XEVM_WLAN_IRQ_GPIO),
++ .board_ref_clock = WL12XX_REFCLOCK_38_XTAL, /* 38.4Mhz */
++};
++
++/* Module pin mux for wlan and bluetooth */
++static struct pinmux_config mmc2_wl12xx_pin_mux[] = {
++ {"gpmc_a1.mmc2_dat0", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_a2.mmc2_dat1", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_a3.mmc2_dat2", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_ben1.mmc2_dat3", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_csn3.mmc2_cmd", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {"gpmc_clk.mmc2_clk", OMAP_MUX_MODE3 | AM33XX_PIN_INPUT_PULLUP},
++ {NULL, 0},
++};
++
++static struct pinmux_config uart1_wl12xx_pin_mux[] = {
++ {"uart1_ctsn.uart1_ctsn", OMAP_MUX_MODE0 | AM33XX_PIN_OUTPUT},
++ {"uart1_rtsn.uart1_rtsn", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT},
++ {"uart1_rxd.uart1_rxd", OMAP_MUX_MODE0 | AM33XX_PIN_INPUT_PULLUP},
++ {"uart1_txd.uart1_txd", OMAP_MUX_MODE0 | AM33XX_PULL_ENBL},
++ {NULL, 0},
++};
++
++static struct pinmux_config wl12xx_pin_mux_evm_rev1_1a[] = {
++ {"gpmc_a0.gpio1_16", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"mcasp0_ahclkr.gpio3_17", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"mcasp0_ahclkx.gpio3_21", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++ };
++
++static struct pinmux_config wl12xx_pin_mux_evm_rev1_0[] = {
++ {"gpmc_csn1.gpio1_30", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {"mcasp0_ahclkr.gpio3_17", OMAP_MUX_MODE7 | AM33XX_PIN_INPUT},
++ {"gpmc_csn2.gpio1_31", OMAP_MUX_MODE7 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++ };
++
++static void enable_ecap0(int evm_id, int profile)
++{
++ backlight_enable = true;
++ setup_pin_mux(ecap0_pin_mux);
++}
++
++/* Setup pwm-backlight */
++static struct platform_device am335x_backlight = {
++ .name = "pwm-backlight",
++ .id = -1,
++ .dev = {
++ .platform_data = &am335x_backlight_data,
++ }
++};
++
++static struct pwmss_platform_data pwm_pdata[3] = {
++ {
++ .version = PWM_VERSION_1,
++ },
++ {
++ .version = PWM_VERSION_1,
++ },
++ {
++ .version = PWM_VERSION_1,
++ },
++};
++
++static int __init ecap0_init(void)
++{
++ int status = 0;
++
++ if (backlight_enable) {
++ am33xx_register_ecap(0, &pwm_pdata[0]);
++ platform_device_register(&am335x_backlight);
++ }
++ return status;
++}
++late_initcall(ecap0_init);
++
++static void enable_ehrpwm1(int evm_id, int profile)
++{
++ ehrpwm_backlight_enable = true;
++ am33xx_register_ehrpwm(1, &pwm_pdata[1]);
++}
++
++/* Setup pwm-backlight for bbtoys7lcd */
++static struct platform_device bbtoys7lcd_backlight = {
++ .name = "pwm-backlight",
++ .id = -1,
++ .dev = {
++ .platform_data = &bbtoys7lcd_backlight_data,
++ }
++};
++
++static int __init ehrpwm1_init(void)
++{
++ int status = 0;
++ if (ehrpwm_backlight_enable) {
++ platform_device_register(&bbtoys7lcd_backlight);
++ }
++ return status;
++}
++late_initcall(ehrpwm1_init);
++
++static int __init conf_disp_pll(int rate)
++{
++ struct clk *disp_pll;
++ int ret = -EINVAL;
++
++ disp_pll = clk_get(NULL, "dpll_disp_ck");
++ if (IS_ERR(disp_pll)) {
++ pr_err("Cannot clk_get disp_pll\n");
++ goto out;
++ }
++
++ ret = clk_set_rate(disp_pll, rate);
++ clk_put(disp_pll);
++out:
++ return ret;
++}
++
++static void lcdc_init(int evm_id, int profile)
++{
++
++ setup_pin_mux(lcdc_pin_mux);
++
++ if (conf_disp_pll(300000000)) {
++ pr_info("Failed configure display PLL, not attempting to"
++ "register LCDC\n");
++ return;
++ }
++
++ if (am33xx_register_lcdc(&TFC_S9700RTWV35TR_01B_pdata))
++ pr_info("Failed to register LCDC device\n");
++ return;
++}
++
++#define BEAGLEBONE_LCD_AVDD_EN GPIO_TO_PIN(0, 7)
++
++static void bbtoys7lcd_init(int evm_id, int profile)
++{
++ setup_pin_mux(bbtoys7_pin_mux);
++ gpio_request(BEAGLEBONE_LCD_AVDD_EN, "BONE_LCD_AVDD_EN");
++ gpio_direction_output(BEAGLEBONE_LCD_AVDD_EN, 1);
++
++ // we are being stupid and setting pixclock from here instead of da8xx-fb.c
++ if (conf_disp_pll(300000000)) {
++ pr_info("Failed to set pixclock to 300000000, not attempting to"
++ "register LCD cape\n");
++ return;
++ }
++
++ if (am33xx_register_lcdc(&bbtoys7_pdata))
++ pr_info("Failed to register Beagleboardtoys 7\" LCD cape device\n");
++
++ return;
++}
++
++static void bbtoys35lcd_init(int evm_id, int profile)
++{
++ setup_pin_mux(bbtoys7_pin_mux);
++
++ // we are being stupid and setting pixclock from here instead of da8xx-fb.c
++ if (conf_disp_pll(16000000)) {
++ pr_info("Failed to set pixclock to 16000000, not attempting to"
++ "register LCD cape\n");
++ return;
++ }
++
++ if (am33xx_register_lcdc(&bbtoys35_pdata))
++ pr_info("Failed to register Beagleboardtoys 3.5\" LCD cape device\n");
++
++ return;
++}
++
++#define BEAGLEBONEDVI_PDn GPIO_TO_PIN(1, 7)
++
++static void dvi_init(int evm_id, int profile)
++{
++ setup_pin_mux(dvi_pin_mux);
++ gpio_request(BEAGLEBONEDVI_PDn, "DVI_PDn");
++ gpio_direction_output(BEAGLEBONEDVI_PDn, 1);
++
++ // we are being stupid and setting pixclock from here instead of da8xx-fb.c
++ if (conf_disp_pll(560000000)) {
++ pr_info("Failed to set pixclock to 56000000, not attempting to"
++ "register DVI adapter\n");
++ return;
++ }
++
++ if (am33xx_register_lcdc(&dvi_pdata))
++ pr_info("Failed to register BeagleBoardToys DVI cape\n");
++ return;
++}
++
++static void tsc_init(int evm_id, int profile)
++{
++ int err;
++
++ if (gp_evm_revision == GP_EVM_REV_IS_1_1A) {
++ am335x_touchscreen_data.analog_input = 1;
++ pr_info("TSC connected to beta GP EVM\n");
++ }
++ if (gp_evm_revision == GP_EVM_REV_IS_1_1A) {
++ am335x_touchscreen_data.analog_input = 0;
++ pr_info("TSC connected to alpha GP EVM\n");
++ }
++ if( gp_evm_revision == GP_EVM_ACTUALLY_BEAGLEBONE) {
++ am335x_touchscreen_data.analog_input = 1;
++ pr_info("TSC connected to BeagleBone\n");;
++ }
++ setup_pin_mux(tsc_pin_mux);
++
++ err = am33xx_register_tsc(&am335x_touchscreen_data);
++ if (err)
++ pr_err("failed to register touchscreen device\n");
++}
++
++static void bone_tsc_init(int evm_id, int profile)
++{
++ int err;
++ setup_pin_mux(tsc_pin_mux);
++ err = am33xx_register_tsc(&bone_touchscreen_data);
++ if (err)
++ pr_err("failed to register touchscreen device\n");
++}
++
++
++static void boneleds_init(int evm_id, int profile )
++{
++ int err;
++ setup_pin_mux(bone_pin_mux);
++ err = platform_device_register(&bone_leds_gpio);
++ if (err)
++ pr_err("failed to register BeagleBone LEDS\n");
++}
++
++static void dvileds_init(int evm_id, int profile )
++{
++ int err;
++ err = platform_device_register(&dvi_leds_gpio);
++ if (err)
++ pr_err("failed to register BeagleBone DVI cape LEDS\n");
++}
++
++static void bonew1_gpio_init(int evm_id, int profile )
++{
++ int err;
++ setup_pin_mux(w1_gpio_pin_mux);
++ err = platform_device_register(&bone_w1_device);
++ if (err)
++ pr_err("failed to register w1-gpio\n");
++ else
++ pr_info("w1-gpio connected to P8_6\n");
++}
++
++static void rgmii1_init(int evm_id, int profile)
++{
++ setup_pin_mux(rgmii1_pin_mux);
++ return;
++}
++
++static void rgmii2_init(int evm_id, int profile)
++{
++ setup_pin_mux(rgmii2_pin_mux);
++ return;
++}
++
++static void mii1_init(int evm_id, int profile)
++{
++ setup_pin_mux(mii1_pin_mux);
++ return;
++}
++
++static void rmii1_init(int evm_id, int profile)
++{
++ setup_pin_mux(rmii1_pin_mux);
++ return;
++}
++
++static void usb0_init(int evm_id, int profile)
++{
++ setup_pin_mux(usb0_pin_mux);
++ return;
++}
++
++static void usb1_init(int evm_id, int profile)
++{
++ setup_pin_mux(usb1_pin_mux);
++ return;
++}
++
++/* setup uart3 */
++static void uart3_init(int evm_id, int profile)
++{
++ setup_pin_mux(uart3_pin_mux);
++ return;
++}
++
++/* setup uart2 */
++static void uart2_init(int evm_id, int profile)
++{
++ setup_pin_mux(uart2_pin_mux);
++ return;
++}
++
++/* setup haptics */
++#define HAPTICS_MAX_FREQ 250
++static void haptics_init(int evm_id, int profile)
++{
++ setup_pin_mux(haptics_pin_mux);
++ pwm_pdata[2].chan_attrib[1].max_freq = HAPTICS_MAX_FREQ;
++ am33xx_register_ehrpwm(2, &pwm_pdata[2]);
++}
++
++/* NAND partition information */
++static struct mtd_partition am335x_nand_partitions[] = {
++/* All the partition sizes are listed in terms of NAND block size */
++ {
++ .name = "SPL",
++ .offset = 0, /* Offset = 0x0 */
++ .size = SZ_128K,
++ },
++ {
++ .name = "SPL.backup1",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x20000 */
++ .size = SZ_128K,
++ },
++ {
++ .name = "SPL.backup2",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x40000 */
++ .size = SZ_128K,
++ },
++ {
++ .name = "SPL.backup3",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x60000 */
++ .size = SZ_128K,
++ },
++ {
++ .name = "U-Boot",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
++ .size = 15 * SZ_128K,
++ },
++ {
++ .name = "U-Boot Env",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
++ .size = 1 * SZ_128K,
++ },
++ {
++ .name = "Kernel",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
++ .size = 40 * SZ_128K,
++ },
++ {
++ .name = "File System",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x780000 */
++ .size = MTDPART_SIZ_FULL,
++ },
++};
++
++/* SPI 0/1 Platform Data */
++/* SPI flash information */
++static struct mtd_partition am335x_spi_partitions[] = {
++ /* All the partition sizes are listed in terms of erase size */
++ {
++ .name = "SPL",
++ .offset = 0, /* Offset = 0x0 */
++ .size = SZ_128K,
++ },
++ {
++ .name = "U-Boot",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x20000 */
++ .size = 2 * SZ_128K,
++ },
++ {
++ .name = "U-Boot Env",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x60000 */
++ .size = 2 * SZ_4K,
++ },
++ {
++ .name = "Kernel",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x62000 */
++ .size = 28 * SZ_128K,
++ },
++ {
++ .name = "File System",
++ .offset = MTDPART_OFS_APPEND, /* Offset = 0x3E2000 */
++ .size = MTDPART_SIZ_FULL, /* size ~= 4.1 MiB */
++ }
++};
++
++static const struct flash_platform_data am335x_spi_flash = {
++ .type = "w25q64",
++ .name = "spi_flash",
++ .parts = am335x_spi_partitions,
++ .nr_parts = ARRAY_SIZE(am335x_spi_partitions),
++};
++
++/*
++ * SPI Flash works at 80Mhz however SPI Controller works at 48MHz.
++ * So setup Max speed to be less than that of Controller speed
++ */
++static struct spi_board_info am335x_spi0_slave_info[] = {
++ {
++ .modalias = "m25p80",
++ .platform_data = &am335x_spi_flash,
++ .irq = -1,
++ .max_speed_hz = 24000000,
++ .bus_num = 1,
++ .chip_select = 0,
++ },
++};
++
++static struct spi_board_info am335x_spi1_slave_info[] = {
++ {
++ .modalias = "m25p80",
++ .platform_data = &am335x_spi_flash,
++ .irq = -1,
++ .max_speed_hz = 12000000,
++ .bus_num = 2,
++ .chip_select = 0,
++ },
++};
++
++static struct gpmc_timings am335x_nand_timings = {
++ .sync_clk = 0,
++
++ .cs_on = 0,
++ .cs_rd_off = 44,
++ .cs_wr_off = 44,
++
++ .adv_on = 6,
++ .adv_rd_off = 34,
++ .adv_wr_off = 44,
++ .we_off = 40,
++ .oe_off = 54,
++
++ .access = 64,
++ .rd_cycle = 82,
++ .wr_cycle = 82,
++
++ .wr_access = 40,
++ .wr_data_mux_bus = 0,
++};
++
++static void evm_nand_init(int evm_id, int profile)
++{
++ struct omap_nand_platform_data *pdata;
++ struct gpmc_devices_info gpmc_device[2] = {
++ { NULL, 0 },
++ { NULL, 0 },
++ };
++
++ setup_pin_mux(nand_pin_mux);
++ pdata = omap_nand_init(am335x_nand_partitions,
++ ARRAY_SIZE(am335x_nand_partitions), 0, 0,
++ &am335x_nand_timings);
++ if (!pdata)
++ return;
++ pdata->ecc_opt =OMAP_ECC_BCH8_CODE_HW;
++ pdata->elm_used = true;
++ gpmc_device[0].pdata = pdata;
++ gpmc_device[0].flag = GPMC_DEVICE_NAND;
++
++ omap_init_gpmc(gpmc_device, sizeof(gpmc_device));
++ omap_init_elm();
++}
++
++/* TPS65217 voltage regulator support */
++
++/* 1.8V */
++static struct regulator_consumer_supply tps65217_dcdc1_consumers[] = {
++ {
++ .supply = "vdds_osc",
++ },
++ {
++ .supply = "vdds_pll_ddr",
++ },
++ {
++ .supply = "vdds_pll_mpu",
++ },
++ {
++ .supply = "vdds_pll_core_lcd",
++ },
++ {
++ .supply = "vdds_sram_mpu_bb",
++ },
++ {
++ .supply = "vdds_sram_core_bg",
++ },
++ {
++ .supply = "vdda_usb0_1p8v",
++ },
++ {
++ .supply = "vdds_ddr",
++ },
++ {
++ .supply = "vdds",
++ },
++ {
++ .supply = "vdds_hvx_1p8v",
++ },
++ {
++ .supply = "vdda_adc",
++ },
++ {
++ .supply = "ddr2",
++ },
++};
++
++/* 1.1V */
++static struct regulator_consumer_supply tps65217_dcdc2_consumers[] = {
++ {
++ .supply = "vdd_mpu",
++ },
++};
++
++/* 1.1V */
++static struct regulator_consumer_supply tps65217_dcdc3_consumers[] = {
++ {
++ .supply = "vdd_core",
++ },
++};
++
++/* 1.8V LDO */
++static struct regulator_consumer_supply tps65217_ldo1_consumers[] = {
++ {
++ .supply = "vdds_rtc",
++ },
++};
++
++/* 3.3V LDO */
++static struct regulator_consumer_supply tps65217_ldo2_consumers[] = {
++ {
++ .supply = "vdds_any_pn",
++ },
++};
++
++/* 3.3V LDO */
++static struct regulator_consumer_supply tps65217_ldo3_consumers[] = {
++ {
++ .supply = "vdds_hvx_ldo3_3p3v",
++ },
++ {
++ .supply = "vdda_usb0_3p3v",
++ },
++};
++
++/* 3.3V LDO */
++static struct regulator_consumer_supply tps65217_ldo4_consumers[] = {
++ {
++ .supply = "vdds_hvx_ldo4_3p3v",
++ },
++};
++
++static struct regulator_init_data tps65217_regulator_data[] = {
++ /* dcdc1 */
++ {
++ .constraints = {
++ .min_uV = 900000,
++ .max_uV = 1800000,
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_dcdc1_consumers),
++ .consumer_supplies = tps65217_dcdc1_consumers,
++ },
++
++ /* dcdc2 */
++ {
++ .constraints = {
++ .min_uV = 900000,
++ .max_uV = 3300000,
++ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
++ REGULATOR_CHANGE_STATUS),
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_dcdc2_consumers),
++ .consumer_supplies = tps65217_dcdc2_consumers,
++ },
++
++ /* dcdc3 */
++ {
++ .constraints = {
++ .min_uV = 900000,
++ .max_uV = 1500000,
++ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
++ REGULATOR_CHANGE_STATUS),
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_dcdc3_consumers),
++ .consumer_supplies = tps65217_dcdc3_consumers,
++ },
++
++ /* ldo1 */
++ {
++ .constraints = {
++ .min_uV = 1000000,
++ .max_uV = 3300000,
++ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_ldo1_consumers),
++ .consumer_supplies = tps65217_ldo1_consumers,
++ },
++
++ /* ldo2 */
++ {
++ .constraints = {
++ .min_uV = 900000,
++ .max_uV = 3300000,
++ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
++ REGULATOR_CHANGE_STATUS),
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_ldo2_consumers),
++ .consumer_supplies = tps65217_ldo2_consumers,
++ },
++
++ /* ldo3 */
++ {
++ .constraints = {
++ .min_uV = 1800000,
++ .max_uV = 3300000,
++ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
++ REGULATOR_CHANGE_STATUS),
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_ldo3_consumers),
++ .consumer_supplies = tps65217_ldo3_consumers,
++ },
++
++ /* ldo4 */
++ {
++ .constraints = {
++ .min_uV = 1800000,
++ .max_uV = 3300000,
++ .valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
++ REGULATOR_CHANGE_STATUS),
++ .boot_on = 1,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(tps65217_ldo4_consumers),
++ .consumer_supplies = tps65217_ldo4_consumers,
++ },
++};
++
++static struct tps65217_board beaglebone_tps65217_info = {
++ .tps65217_init_data = &tps65217_regulator_data[0],
++};
++
++static struct lis3lv02d_platform_data lis331dlh_pdata = {
++ .click_flags = LIS3_CLICK_SINGLE_X |
++ LIS3_CLICK_SINGLE_Y |
++ LIS3_CLICK_SINGLE_Z,
++ .wakeup_flags = LIS3_WAKEUP_X_LO | LIS3_WAKEUP_X_HI |
++ LIS3_WAKEUP_Y_LO | LIS3_WAKEUP_Y_HI |
++ LIS3_WAKEUP_Z_LO | LIS3_WAKEUP_Z_HI,
++ .irq_cfg = LIS3_IRQ1_CLICK | LIS3_IRQ2_CLICK,
++ .wakeup_thresh = 10,
++ .click_thresh_x = 10,
++ .click_thresh_y = 10,
++ .click_thresh_z = 10,
++ .g_range = 2,
++ .st_min_limits[0] = 120,
++ .st_min_limits[1] = 120,
++ .st_min_limits[2] = 140,
++ .st_max_limits[0] = 550,
++ .st_max_limits[1] = 550,
++ .st_max_limits[2] = 750,
++};
++
++static struct i2c_board_info am335x_i2c_boardinfo1[] = {
++ {
++ I2C_BOARD_INFO("tlv320aic3x", 0x1b),
++ },
++ {
++ I2C_BOARD_INFO("lis331dlh", 0x18),
++ .platform_data = &lis331dlh_pdata,
++ },
++ {
++ I2C_BOARD_INFO("tsl2550", 0x39),
++ },
++ {
++ I2C_BOARD_INFO("tmp275", 0x48),
++ },
++};
++
++static void i2c1_init(int evm_id, int profile)
++{
++ setup_pin_mux(i2c1_pin_mux);
++ omap_register_i2c_bus(2, 100, am335x_i2c_boardinfo1,
++ ARRAY_SIZE(am335x_i2c_boardinfo1));
++ return;
++}
++
++static struct mcp251x_platform_data mcp251x_info = {
++ .oscillator_frequency = 16000000,
++};
++
++static struct spi_board_info tt3201_spi_info[] = {
++ {
++ .modalias = "mcp2515",
++ .max_speed_hz = 10000000,
++ .bus_num = 2,
++ .chip_select = 0,
++ .mode = SPI_MODE_0,
++ .platform_data = &mcp251x_info,
++ },
++ {
++ .modalias = "mcp2515",
++ .max_speed_hz = 10000000,
++ .bus_num = 2,
++ .chip_select = 1,
++ .mode = SPI_MODE_0,
++ .platform_data = &mcp251x_info,
++ },
++};
++
++static void tt3201_init(int evm_id, int profile)
++{
++ pr_info("TowerTech TT3201 CAN Cape\n");
++
++ setup_pin_mux(spi1_pin_mux);
++ setup_pin_mux(tt3201_pin_mux);
++
++ tt3201_spi_info[0].irq = gpio_to_irq(GPIO_TO_PIN(3, 19));
++ tt3201_spi_info[1].irq = gpio_to_irq(GPIO_TO_PIN(3, 21));
++
++ spi_register_board_info(tt3201_spi_info,
++ ARRAY_SIZE(tt3201_spi_info));
++
++ am33xx_d_can_init(1);
++}
++static void beaglebone_cape_setup(struct memory_accessor *mem_acc, void *context)
++{
++ capecount++;
++ int ret;
++ char tmp[32];
++ char name[32];
++ char version[4];
++ char manufacturer[32];
++
++ /* get cape specific data */
++ ret = mem_acc->read(mem_acc, (char *)&cape_config, 0, sizeof(cape_config));
++ if (ret != sizeof(cape_config)) {
++ pr_warning("BeagleBone cape EEPROM: could not read eeprom at address 0x%x\n", capecount + 0x53);
++ if ((capecount > 3) && (beaglebone_tsadcpins_free == 1)) {
++ pr_info("BeagleBone cape: exporting ADC pins to sysfs\n");
++ bone_tsc_init(0,0);
++ beaglebone_tsadcpins_free = 0;
++ }
++ return;
++ }
++
++ if (cape_config.header != AM335X_EEPROM_HEADER) {
++ pr_warning("BeagleBone Cape EEPROM: wrong header 0x%x, expected 0x%x\n",
++ cape_config.header, AM335X_EEPROM_HEADER);
++ goto out;
++ }
++
++ pr_info("BeagleBone cape EEPROM: found eeprom at address 0x%x\n", capecount + 0x53);
++ snprintf(name, sizeof(cape_config.name) + 1, "%s", cape_config.name);
++ snprintf(version, sizeof(cape_config.version) + 1, "%s", cape_config.version);
++ snprintf(manufacturer, sizeof(cape_config.manufacturer) + 1, "%s", cape_config.manufacturer);
++ pr_info("BeagleBone cape: %s %s, revision %s\n", manufacturer, name, version);
++ snprintf(tmp, sizeof(cape_config.partnumber) + 1, "%s", cape_config.partnumber);
++ pr_info("BeagleBone cape partnumber: %s\n", tmp);
++
++ if (!strncmp("BB-BONE-DVID-01", cape_config.partnumber, 15)) {
++ pr_info("BeagleBone cape: initializing DVI cape\n");
++ dvi_init(0,0);
++ }
++ if (!strncmp("BB-BONE-LCD7-01", cape_config.partnumber, 15)) {
++ pr_info("BeagleBone cape: initializing LCD cape\n");
++ bbtoys7lcd_init(0,0);
++ pr_info("BeagleBone cape: initializing LCD cape touchscreen\n");
++ tsc_init(0,0);
++ pr_info("BeagleBone cape: Registering PWM backlight for LCD cape\n");
++ enable_ehrpwm1(0,0);
++ beaglebone_tsadcpins_free = 0;
++ pr_info("BeagleBone cape: Registering gpio-keys for LCD cape\n");
++ beaglebone_lcd7_keys_init(0,0);
++ }
++
++ if (!strncmp("BB-BONE-LCD3-01", cape_config.partnumber, 15)) {
++ pr_info("BeagleBone cape: initializing LCD cape\n");
++ bbtoys35lcd_init(0,0);
++ pr_info("BeagleBone cape: initializing LCD cape touchscreen\n");
++ tsc_init(0,0);
++ beaglebone_tsadcpins_free = 0;
++ pr_info("BeagleBone cape: Registering gpio-keys for LCD cape\n");
++ beaglebone_lcd3_keys_init(0,0);
++ }
++
++ if (!strncmp("BB-BONE-VGA-01", cape_config.partnumber, 14)) {
++ pr_info("BeagleBone cape: initializing VGA cape\n");
++ dvi_init(0,0);
++ }
++
++ if (!strncmp("BB-BONE-BATT-01", cape_config.partnumber, 15)) {
++ pr_info("BeagleBone cape: initializing battery cape\n");
++ // gpio1_6, P9_15 lowbat output
++ // AIN4, P9_33 vbat
++ //foo_init(0,0);
++ }
++
++ if (!strncmp("BB-BONE-SERL", cape_config.partnumber, 12)) {
++ pr_info("BeagleBone cape: initializing serial cape\n");
++ // 01 -> CAN
++ // 02 -> Profibus
++ // 03 -> RS232
++ // 04 -> RS485
++ //foo_init(0,0);
++ }
++
++ if (!strncmp("TT3201-001", cape_config.partnumber, 10)) {
++ pr_info("BeagleBone cape: initializing CAN cape\n");
++ tt3201_init(0,0);
++ }
++
++ if ((capecount > 3) && (beaglebone_tsadcpins_free == 1)) {
++ pr_info("BeagleBone cape: exporting ADC pins to sysfs\n");
++ bone_tsc_init(0,0);
++ beaglebone_tsadcpins_free = 0;
++ }
++
++ return;
++out:
++ /*
++ * If the EEPROM hasn't been programed or an incorrect header
++ * or board name are read, assume this is an old beaglebone board
++ * (< Rev A3)
++ */
++ pr_err("Could not detect BeagleBone cape properly\n");
++ beaglebone_cape_detected = false;
++
++}
++
++static struct at24_platform_data cape_eeprom_info = {
++ .byte_len = (256*1024) / 8,
++ .page_size = 64,
++ .flags = AT24_FLAG_ADDR16,
++ .context = (void *)NULL,
++ .setup = beaglebone_cape_setup,
++};
++
++static struct i2c_board_info __initdata cape_i2c_boardinfo[] = {
++ {
++ I2C_BOARD_INFO("24c256", 0x54),
++ .platform_data = &cape_eeprom_info,
++ },
++ {
++ I2C_BOARD_INFO("24c256", 0x55),
++ .platform_data = &cape_eeprom_info,
++ },
++ {
++ I2C_BOARD_INFO("24c256", 0x56),
++ .platform_data = &cape_eeprom_info,
++ },
++ {
++ I2C_BOARD_INFO("24c256", 0x57),
++ .platform_data = &cape_eeprom_info,
++ },
++};
++
++static void i2c2_init(int evm_id, int profile)
++{
++ setup_pin_mux(i2c2_pin_mux);
++ omap_register_i2c_bus(3, 100, cape_i2c_boardinfo,
++ ARRAY_SIZE(cape_i2c_boardinfo));
++ return;
++}
++
++
++/* Setup McASP 1 */
++static void mcasp1_init(int evm_id, int profile)
++{
++ /* Configure McASP */
++ setup_pin_mux(mcasp1_pin_mux);
++ am335x_register_mcasp(&am335x_evm_snd_data1, 1);
++ return;
++}
++
++static void mmc1_init(int evm_id, int profile)
++{
++ setup_pin_mux(mmc1_pin_mux);
++
++ am335x_mmc[1].mmc = 2;
++ am335x_mmc[1].caps = MMC_CAP_4_BIT_DATA;
++ am335x_mmc[1].gpio_cd = GPIO_TO_PIN(2, 2);
++ am335x_mmc[1].gpio_wp = GPIO_TO_PIN(1, 29);
++ am335x_mmc[1].ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; /* 3V3 */
++
++ /* mmc will be initialized when mmc0_init is called */
++ return;
++}
++
++static void mmc2_wl12xx_init(int evm_id, int profile)
++{
++ setup_pin_mux(mmc2_wl12xx_pin_mux);
++
++ am335x_mmc[1].mmc = 3;
++ am335x_mmc[1].name = "wl1271";
++ am335x_mmc[1].caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD
++ | MMC_PM_KEEP_POWER;
++ am335x_mmc[1].nonremovable = true;
++ am335x_mmc[1].gpio_cd = -EINVAL;
++ am335x_mmc[1].gpio_wp = -EINVAL;
++ am335x_mmc[1].ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; /* 3V3 */
++
++ /* mmc will be initialized when mmc0_init is called */
++ return;
++}
++
++static void uart1_wl12xx_init(int evm_id, int profile)
++{
++ setup_pin_mux(uart1_wl12xx_pin_mux);
++}
++
++static void wl12xx_bluetooth_enable(void)
++{
++ int status = gpio_request(am335xevm_wlan_data.bt_enable_gpio,
++ "bt_en\n");
++ if (status < 0)
++ pr_err("Failed to request gpio for bt_enable");
++
++ pr_info("Configure Bluetooth Enable pin...\n");
++ gpio_direction_output(am335xevm_wlan_data.bt_enable_gpio, 0);
++}
++
++static int wl12xx_set_power(struct device *dev, int slot, int on, int vdd)
++{
++ if (on) {
++ gpio_set_value(am335xevm_wlan_data.wlan_enable_gpio, 1);
++ mdelay(70);
++ }
++ else
++ gpio_set_value(am335xevm_wlan_data.wlan_enable_gpio, 0);
++
++ return 0;
++}
++
++static void wl12xx_init(int evm_id, int profile)
++{
++ struct device *dev;
++ struct omap_mmc_platform_data *pdata;
++ int ret;
++
++ /* Register WLAN and BT enable pins based on the evm board revision */
++ if (gp_evm_revision == GP_EVM_REV_IS_1_1A) {
++ am335xevm_wlan_data.wlan_enable_gpio = GPIO_TO_PIN(1, 16);
++ am335xevm_wlan_data.bt_enable_gpio = GPIO_TO_PIN(3, 21);
++ }
++ else {
++ am335xevm_wlan_data.wlan_enable_gpio = GPIO_TO_PIN(1, 30);
++ am335xevm_wlan_data.bt_enable_gpio = GPIO_TO_PIN(1, 31);
++ }
++
++ wl12xx_bluetooth_enable();
++
++ if (wl12xx_set_platform_data(&am335xevm_wlan_data))
++ pr_err("error setting wl12xx data\n");
++
++ dev = am335x_mmc[1].dev;
++ if (!dev) {
++ pr_err("wl12xx mmc device initialization failed\n");
++ goto out;
++ }
++
++ pdata = dev->platform_data;
++ if (!pdata) {
++ pr_err("Platfrom data of wl12xx device not set\n");
++ goto out;
++ }
++
++ ret = gpio_request_one(am335xevm_wlan_data.wlan_enable_gpio,
++ GPIOF_OUT_INIT_LOW, "wlan_en");
++ if (ret) {
++ pr_err("Error requesting wlan enable gpio: %d\n", ret);
++ goto out;
++ }
++
++ if (gp_evm_revision == GP_EVM_REV_IS_1_1A)
++ setup_pin_mux(wl12xx_pin_mux_evm_rev1_1a);
++ else
++ setup_pin_mux(wl12xx_pin_mux_evm_rev1_0);
++
++ pdata->slots[0].set_power = wl12xx_set_power;
++out:
++ return;
++}
++
++static void d_can_init(int evm_id, int profile)
++{
++ switch (evm_id) {
++ case IND_AUT_MTR_EVM:
++ if ((profile == PROFILE_0) || (profile == PROFILE_1)) {
++ setup_pin_mux(d_can_ia_pin_mux);
++ /* Instance Zero */
++ am33xx_d_can_init(0);
++ }
++ break;
++ case GEN_PURP_EVM:
++ if (profile == PROFILE_1) {
++ setup_pin_mux(d_can_gp_pin_mux);
++ /* Instance One */
++ am33xx_d_can_init(1);
++ }
++ break;
++ default:
++ break;
++ }
++}
++
++static void mmc0_init(int evm_id, int profile)
++{
++ setup_pin_mux(mmc0_pin_mux);
++
++ omap2_hsmmc_init(am335x_mmc);
++ return;
++}
++
++static struct i2c_board_info tps65217_i2c_boardinfo[] = {
++ {
++ I2C_BOARD_INFO("tps65217", TPS65217_I2C_ID),
++ .platform_data = &beaglebone_tps65217_info,
++ },
++};
++
++static void tps65217_init(int evm_id, int profile)
++{
++ struct i2c_adapter *adapter;
++ struct i2c_client *client;
++
++ /* I2C1 adapter request */
++ adapter = i2c_get_adapter(1);
++ if (!adapter) {
++ pr_err("failed to get adapter i2c1\n");
++ return;
++ }
++
++ client = i2c_new_device(adapter, tps65217_i2c_boardinfo);
++ if (!client)
++ pr_err("failed to register tps65217 to i2c1\n");
++
++ i2c_put_adapter(adapter);
++}
++
++static void mmc0_no_cd_init(int evm_id, int profile)
++{
++ setup_pin_mux(mmc0_no_cd_pin_mux);
++
++ omap2_hsmmc_init(am335x_mmc);
++ return;
++}
++
++
++/* setup spi0 */
++static void spi0_init(int evm_id, int profile)
++{
++ setup_pin_mux(spi0_pin_mux);
++ spi_register_board_info(am335x_spi0_slave_info,
++ ARRAY_SIZE(am335x_spi0_slave_info));
++ return;
++}
++
++/* setup spi1 */
++static void spi1_init(int evm_id, int profile)
++{
++ setup_pin_mux(spi1_pin_mux);
++ spi_register_board_info(am335x_spi1_slave_info,
++ ARRAY_SIZE(am335x_spi1_slave_info));
++ return;
++}
++
++
++static int beaglebone_phy_fixup(struct phy_device *phydev)
++{
++ phydev->supported &= ~(SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full);
++
++ return 0;
++}
++
++#if defined(CONFIG_TLK110_WORKAROUND) || \
++ defined(CONFIG_TLK110_WORKAROUND_MODULE)
++static int am335x_tlk110_phy_fixup(struct phy_device *phydev)
++{
++ unsigned int val;
++
++ /* This is done as a workaround to support TLK110 rev1.0 phy */
++ val = phy_read(phydev, TLK110_COARSEGAIN_REG);
++ phy_write(phydev, TLK110_COARSEGAIN_REG, (val | TLK110_COARSEGAIN_VAL));
++
++ val = phy_read(phydev, TLK110_LPFHPF_REG);
++ phy_write(phydev, TLK110_LPFHPF_REG, (val | TLK110_LPFHPF_VAL));
++
++ val = phy_read(phydev, TLK110_SPAREANALOG_REG);
++ phy_write(phydev, TLK110_SPAREANALOG_REG, (val | TLK110_SPANALOG_VAL));
++
++ val = phy_read(phydev, TLK110_VRCR_REG);
++ phy_write(phydev, TLK110_VRCR_REG, (val | TLK110_VRCR_VAL));
++
++ val = phy_read(phydev, TLK110_SETFFE_REG);
++ phy_write(phydev, TLK110_SETFFE_REG, (val | TLK110_SETFFE_VAL));
++
++ val = phy_read(phydev, TLK110_FTSP_REG);
++ phy_write(phydev, TLK110_FTSP_REG, (val | TLK110_FTSP_VAL));
++
++ val = phy_read(phydev, TLK110_ALFATPIDL_REG);
++ phy_write(phydev, TLK110_ALFATPIDL_REG, (val | TLK110_ALFATPIDL_VAL));
++
++ val = phy_read(phydev, TLK110_PSCOEF21_REG);
++ phy_write(phydev, TLK110_PSCOEF21_REG, (val | TLK110_PSCOEF21_VAL));
++
++ val = phy_read(phydev, TLK110_PSCOEF3_REG);
++ phy_write(phydev, TLK110_PSCOEF3_REG, (val | TLK110_PSCOEF3_VAL));
++
++ val = phy_read(phydev, TLK110_ALFAFACTOR1_REG);
++ phy_write(phydev, TLK110_ALFAFACTOR1_REG, (val | TLK110_ALFACTOR1_VAL));
++
++ val = phy_read(phydev, TLK110_ALFAFACTOR2_REG);
++ phy_write(phydev, TLK110_ALFAFACTOR2_REG, (val | TLK110_ALFACTOR2_VAL));
++
++ val = phy_read(phydev, TLK110_CFGPS_REG);
++ phy_write(phydev, TLK110_CFGPS_REG, (val | TLK110_CFGPS_VAL));
++
++ val = phy_read(phydev, TLK110_FTSPTXGAIN_REG);
++ phy_write(phydev, TLK110_FTSPTXGAIN_REG, (val | TLK110_FTSPTXGAIN_VAL));
++
++ val = phy_read(phydev, TLK110_SWSCR3_REG);
++ phy_write(phydev, TLK110_SWSCR3_REG, (val | TLK110_SWSCR3_VAL));
++
++ val = phy_read(phydev, TLK110_SCFALLBACK_REG);
++ phy_write(phydev, TLK110_SCFALLBACK_REG, (val | TLK110_SCFALLBACK_VAL));
++
++ val = phy_read(phydev, TLK110_PHYRCR_REG);
++ phy_write(phydev, TLK110_PHYRCR_REG, (val | TLK110_PHYRCR_VAL));
++
++ return 0;
++}
++#endif
++
++static void profibus_init(int evm_id, int profile)
++{
++ setup_pin_mux(profibus_pin_mux);
++ return;
++}
++
++/* Low-Cost EVM */
++static struct evm_dev_cfg low_cost_evm_dev_cfg[] = {
++ {rgmii1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {evm_nand_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {NULL, 0, 0},
++};
++
++/* General Purpose EVM */
++static struct evm_dev_cfg gen_purp_evm_dev_cfg[] = {
++ {enable_ecap0, DEV_ON_DGHTR_BRD, (PROFILE_0 | PROFILE_1 |
++ PROFILE_2 | PROFILE_7) },
++ {lcdc_init, DEV_ON_DGHTR_BRD, (PROFILE_0 | PROFILE_1 |
++ PROFILE_2 | PROFILE_7) },
++ {tsc_init, DEV_ON_DGHTR_BRD, (PROFILE_0 | PROFILE_1 |
++ PROFILE_2 | PROFILE_7) },
++ {rgmii1_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {rgmii2_init, DEV_ON_DGHTR_BRD, (PROFILE_1 | PROFILE_2 |
++ PROFILE_4 | PROFILE_6) },
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {evm_nand_init, DEV_ON_DGHTR_BRD,
++ (PROFILE_ALL & ~PROFILE_2 & ~PROFILE_3)},
++ {i2c1_init, DEV_ON_DGHTR_BRD, (PROFILE_ALL & ~PROFILE_2)},
++ {mcasp1_init, DEV_ON_DGHTR_BRD, (PROFILE_0 | PROFILE_3 | PROFILE_7)},
++ {mmc1_init, DEV_ON_DGHTR_BRD, PROFILE_2},
++ {mmc2_wl12xx_init, DEV_ON_BASEBOARD, (PROFILE_0 | PROFILE_3 |
++ PROFILE_5)},
++ {mmc0_init, DEV_ON_BASEBOARD, (PROFILE_ALL & ~PROFILE_5)},
++ {mmc0_no_cd_init, DEV_ON_BASEBOARD, PROFILE_5},
++ {spi0_init, DEV_ON_DGHTR_BRD, PROFILE_2},
++ {uart1_wl12xx_init, DEV_ON_BASEBOARD, (PROFILE_0 | PROFILE_3 |
++ PROFILE_5)},
++ {wl12xx_init, DEV_ON_BASEBOARD, (PROFILE_0 | PROFILE_3 | PROFILE_5)},
++ {d_can_init, DEV_ON_DGHTR_BRD, PROFILE_1},
++ {matrix_keypad_init, DEV_ON_DGHTR_BRD, PROFILE_0},
++ {volume_keys_init, DEV_ON_DGHTR_BRD, PROFILE_0},
++ {uart2_init, DEV_ON_DGHTR_BRD, PROFILE_3},
++ {haptics_init, DEV_ON_DGHTR_BRD, (PROFILE_4)},
++ {NULL, 0, 0},
++};
++
++/* Industrial Auto Motor Control EVM */
++static struct evm_dev_cfg ind_auto_mtrl_evm_dev_cfg[] = {
++ {mii1_init, DEV_ON_DGHTR_BRD, PROFILE_ALL},
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {profibus_init, DEV_ON_DGHTR_BRD, PROFILE_ALL},
++ {evm_nand_init, DEV_ON_DGHTR_BRD, PROFILE_ALL},
++ {spi1_init, DEV_ON_DGHTR_BRD, PROFILE_ALL},
++ {uart3_init, DEV_ON_DGHTR_BRD, PROFILE_ALL},
++ {i2c1_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {mmc0_no_cd_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {NULL, 0, 0},
++};
++
++/* IP-Phone EVM */
++static struct evm_dev_cfg ip_phn_evm_dev_cfg[] = {
++ {enable_ecap0, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {lcdc_init, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {tsc_init, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {rgmii1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {rgmii2_init, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {evm_nand_init, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {i2c1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {mcasp1_init, DEV_ON_DGHTR_BRD, PROFILE_NONE},
++ {mmc0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {NULL, 0, 0},
++};
++
++/* Beaglebone < Rev A3 */
++static struct evm_dev_cfg beaglebone_old_dev_cfg[] = {
++ {rmii1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {i2c2_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {mmc0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {boneleds_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {NULL, 0, 0},
++};
++
++/* Beaglebone Rev A3 and after */
++static struct evm_dev_cfg beaglebone_dev_cfg[] = {
++ {tps65217_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {mii1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {usb1_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {i2c2_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {mmc0_init, DEV_ON_BASEBOARD, PROFILE_NONE},
++ {boneleds_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {bonew1_gpio_init, DEV_ON_BASEBOARD, PROFILE_ALL},
++ {NULL, 0, 0},
++};
++
++static void setup_low_cost_evm(void)
++{
++ pr_info("The board is a AM335x Low Cost EVM.\n");
++
++ _configure_device(LOW_COST_EVM, low_cost_evm_dev_cfg, PROFILE_NONE);
++}
++
++static void setup_general_purpose_evm(void)
++{
++ u32 prof_sel = am335x_get_profile_selection();
++ pr_info("The board is general purpose EVM in profile %d\n", prof_sel);
++
++ if (!strncmp("1.1A", config.version, 4)) {
++ pr_info("EVM version is %s\n", config.version);
++ gp_evm_revision = GP_EVM_REV_IS_1_1A;
++ } else if (!strncmp("1.0", config.version, 3)) {
++ gp_evm_revision = GP_EVM_REV_IS_1_0;
++ } else {
++ pr_err("Found invalid GP EVM revision, falling back to Rev1.1A");
++ gp_evm_revision = GP_EVM_REV_IS_1_1A;
++ }
++
++ if (gp_evm_revision == GP_EVM_REV_IS_1_0)
++ gigabit_enable = 0;
++ else if (gp_evm_revision == GP_EVM_REV_IS_1_1A)
++ gigabit_enable = 1;
++
++ _configure_device(GEN_PURP_EVM, gen_purp_evm_dev_cfg, (1L << prof_sel));
++}
++
++static void setup_ind_auto_motor_ctrl_evm(void)
++{
++ u32 prof_sel = am335x_get_profile_selection();
++
++ pr_info("The board is an industrial automation EVM in profile %d\n",
++ prof_sel);
++
++ /* Only Profile 0 is supported */
++ if ((1L << prof_sel) != PROFILE_0) {
++ pr_err("AM335X: Only Profile 0 is supported\n");
++ pr_err("Assuming profile 0 & continuing\n");
++ prof_sel = PROFILE_0;
++ }
++
++ _configure_device(IND_AUT_MTR_EVM, ind_auto_mtrl_evm_dev_cfg,
++ PROFILE_0);
++
++ /* Fillup global evmid */
++ am33xx_evmid_fillup(IND_AUT_MTR_EVM);
++
++ /* Initialize TLK110 PHY registers for phy version 1.0 */
++ am335x_tlk110_phy_init();
++
++
++}
++
++static void setup_ip_phone_evm(void)
++{
++ pr_info("The board is an IP phone EVM\n");
++
++ _configure_device(IP_PHN_EVM, ip_phn_evm_dev_cfg, PROFILE_NONE);
++}
++
++/* BeagleBone < Rev A3 */
++static void setup_beaglebone_old(void)
++{
++ pr_info("The board is a AM335x Beaglebone < Rev A3.\n");
++
++ /* Beagle Bone has Micro-SD slot which doesn't have Write Protect pin */
++ am335x_mmc[0].gpio_wp = -EINVAL;
++
++ _configure_device(LOW_COST_EVM, beaglebone_old_dev_cfg, PROFILE_NONE);
++
++ phy_register_fixup_for_uid(BBB_PHY_ID, BBB_PHY_MASK,
++ beaglebone_phy_fixup);
++
++ /* Fill up global evmid */
++ am33xx_evmid_fillup(BEAGLE_BONE_OLD);
++}
++
++/* BeagleBone after Rev A3 */
++static void setup_beaglebone(void)
++{
++ pr_info("The board is a AM335x Beaglebone.\n");
++ gp_evm_revision = GP_EVM_ACTUALLY_BEAGLEBONE;
++
++ /* Beagle Bone has Micro-SD slot which doesn't have Write Protect pin */
++ am335x_mmc[0].gpio_wp = -EINVAL;
++
++ _configure_device(LOW_COST_EVM, beaglebone_dev_cfg, PROFILE_NONE);
++
++ /* TPS65217 regulator has full constraints */
++ regulator_has_full_constraints();
++
++ /* Fill up global evmid */
++ am33xx_evmid_fillup(BEAGLE_BONE_A3);
++}
++
++
++static void am335x_setup_daughter_board(struct memory_accessor *m, void *c)
++{
++ int ret;
++
++ /*
++ * Read from the EEPROM to see the presence of daughter board.
++ * If present, print the cpld version.
++ */
++
++ ret = m->read(m, (char *)&config1, 0, sizeof(config1));
++ if (ret == sizeof(config1)) {
++ pr_info("Detected a daughter card on AM335x EVM..");
++ daughter_brd_detected = true;
++ }
++ else {
++ pr_info("No daughter card found\n");
++ daughter_brd_detected = false;
++ return;
++ }
++
++ if (!strncmp("CPLD", config1.cpld_ver, 4))
++ pr_info("CPLD version: %s\n", config1.cpld_ver);
++ else
++ pr_err("Unknown CPLD version found\n");
++}
++
++static void am335x_evm_setup(struct memory_accessor *mem_acc, void *context)
++{
++ int ret;
++ char tmp[10];
++
++ /* 1st get the MAC address from EEPROM */
++ ret = mem_acc->read(mem_acc, (char *)&am335x_mac_addr,
++ EEPROM_MAC_ADDRESS_OFFSET, sizeof(am335x_mac_addr));
++
++ if (ret != sizeof(am335x_mac_addr)) {
++ pr_warning("AM335X: EVM Config read fail: %d\n", ret);
++ return;
++ }
++
++ /* Fillup global mac id */
++ am33xx_cpsw_macidfillup(&am335x_mac_addr[0][0],
++ &am335x_mac_addr[1][0]);
++
++ /* get board specific data */
++ ret = mem_acc->read(mem_acc, (char *)&config, 0, sizeof(config));
++ if (ret != sizeof(config)) {
++ pr_err("AM335X EVM config read fail, read %d bytes\n", ret);
++ pr_err("This likely means that there either is no/or a failed EEPROM\n");
++ goto out;
++ }
++
++ if (config.header != AM335X_EEPROM_HEADER) {
++ pr_err("AM335X: wrong header 0x%x, expected 0x%x\n",
++ config.header, AM335X_EEPROM_HEADER);
++ goto out;
++ }
++
++ if (strncmp("A335", config.name, 4)) {
++ pr_err("Board %s\ndoesn't look like an AM335x board\n",
++ config.name);
++ goto out;
++ }
++
++ snprintf(tmp, sizeof(config.name) + 1, "%s", config.name);
++ pr_info("Board name: %s\n", tmp);
++ snprintf(tmp, sizeof(config.version) + 1, "%s", config.version);
++ pr_info("Board version: %s\n", tmp);
++
++ if (!strncmp("A335BONE", config.name, 8)) {
++ daughter_brd_detected = false;
++ if(!strncmp("00A1", config.version, 4) ||
++ !strncmp("00A2", config.version, 4))
++ setup_beaglebone_old();
++ else
++ setup_beaglebone();
++ } else {
++ /* only 6 characters of options string used for now */
++ snprintf(tmp, 7, "%s", config.opt);
++ pr_info("SKU: %s\n", tmp);
++
++ if (!strncmp("SKU#00", config.opt, 6))
++ setup_low_cost_evm();
++ else if (!strncmp("SKU#01", config.opt, 6))
++ setup_general_purpose_evm();
++ else if (!strncmp("SKU#02", config.opt, 6))
++ setup_ind_auto_motor_ctrl_evm();
++ else if (!strncmp("SKU#03", config.opt, 6))
++ setup_ip_phone_evm();
++ else
++ goto out;
++ }
++ /* Initialize cpsw after board detection is completed as board
++ * information is required for configuring phy address and hence
++ * should be call only after board detection
++ */
++ am33xx_cpsw_init(gigabit_enable);
++
++ return;
++
++out:
++ /*
++ * If the EEPROM hasn't been programed or an incorrect header
++ * or board name are read then the hardware details are unknown.
++ * Notify the user and call machine_halt to stop the boot process.
++ */
++ pr_err("The error message above indicates that there is an issue with\n"
++ "the EEPROM or the EEPROM contents. After verifying the EEPROM\n"
++ "contents, if any, refer to the %s function in the\n"
++ "%s file to modify the board\n"
++ "initialization code to match the hardware configuration\n",
++ __func__ , __FILE__);
++ machine_halt();
++}
++
++static struct at24_platform_data am335x_daughter_board_eeprom_info = {
++ .byte_len = (256*1024) / 8,
++ .page_size = 64,
++ .flags = AT24_FLAG_ADDR16,
++ .setup = am335x_setup_daughter_board,
++ .context = (void *)NULL,
++};
++
++static struct at24_platform_data am335x_baseboard_eeprom_info = {
++ .byte_len = (256*1024) / 8,
++ .page_size = 64,
++ .flags = AT24_FLAG_ADDR16,
++ .setup = am335x_evm_setup,
++ .context = (void *)NULL,
++};
++
++static struct regulator_init_data am335x_dummy = {
++ .constraints.always_on = true,
++};
++
++static struct regulator_consumer_supply am335x_vdd1_supply[] = {
++ REGULATOR_SUPPLY("vdd_mpu", NULL),
++};
++
++static struct regulator_init_data am335x_vdd1 = {
++ .constraints = {
++ .min_uV = 600000,
++ .max_uV = 1500000,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL,
++ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(am335x_vdd1_supply),
++ .consumer_supplies = am335x_vdd1_supply,
++};
++
++static struct regulator_consumer_supply am335x_vdd2_supply[] = {
++ REGULATOR_SUPPLY("vdd_core", NULL),
++};
++
++static struct regulator_init_data am335x_vdd2 = {
++ .constraints = {
++ .min_uV = 600000,
++ .max_uV = 1500000,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL,
++ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
++ .always_on = 1,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(am335x_vdd2_supply),
++ .consumer_supplies = am335x_vdd2_supply,
++};
++
++static struct tps65910_board am335x_tps65910_info = {
++ .tps65910_pmic_init_data[TPS65910_REG_VRTC] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VIO] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VDD1] = &am335x_vdd1,
++ .tps65910_pmic_init_data[TPS65910_REG_VDD2] = &am335x_vdd2,
++ .tps65910_pmic_init_data[TPS65910_REG_VDD3] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VDIG1] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VDIG2] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VPLL] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VDAC] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VAUX1] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VAUX2] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VAUX33] = &am335x_dummy,
++ .tps65910_pmic_init_data[TPS65910_REG_VMMC] = &am335x_dummy,
++};
++
++/*
++* Daughter board Detection.
++* Every board has a ID memory (EEPROM) on board. We probe these devices at
++* machine init, starting from daughter board and ending with baseboard.
++* Assumptions :
++* 1. probe for i2c devices are called in the order they are included in
++* the below struct. Daughter boards eeprom are probed 1st. Baseboard
++* eeprom probe is called last.
++*/
++static struct i2c_board_info __initdata am335x_i2c_boardinfo[] = {
++ {
++ /* Daughter Board EEPROM */
++ I2C_BOARD_INFO("24c256", DAUG_BOARD_I2C_ADDR),
++ .platform_data = &am335x_daughter_board_eeprom_info,
++ },
++ {
++ /* Baseboard board EEPROM */
++ I2C_BOARD_INFO("24c256", BASEBOARD_I2C_ADDR),
++ .platform_data = &am335x_baseboard_eeprom_info,
++ },
++ {
++ I2C_BOARD_INFO("cpld_reg", 0x35),
++ },
++ {
++ I2C_BOARD_INFO("tlc59108", 0x40),
++ },
++ {
++ I2C_BOARD_INFO("tps65910", TPS65910_I2C_ID1),
++ .platform_data = &am335x_tps65910_info,
++ },
++};
++
++static struct omap_musb_board_data musb_board_data = {
++ .interface_type = MUSB_INTERFACE_ULPI,
++ /*
++ * mode[0:3] = USB0PORT's mode
++ * mode[4:7] = USB1PORT's mode
++ * AM335X beta EVM has USB0 in OTG mode and USB1 in host mode.
++ */
++ .mode = (MUSB_HOST << 4) | MUSB_OTG,
++ .power = 500,
++ .instances = 1,
++};
++
++static int cpld_reg_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ cpld_client = client;
++ return 0;
++}
++
++static int __devexit cpld_reg_remove(struct i2c_client *client)
++{
++ cpld_client = NULL;
++ return 0;
++}
++
++static const struct i2c_device_id cpld_reg_id[] = {
++ { "cpld_reg", 0 },
++ { }
++};
++
++static struct i2c_driver cpld_reg_driver = {
++ .driver = {
++ .name = "cpld_reg",
++ },
++ .probe = cpld_reg_probe,
++ .remove = cpld_reg_remove,
++ .id_table = cpld_reg_id,
++};
++
++static void evm_init_cpld(void)
++{
++ i2c_add_driver(&cpld_reg_driver);
++}
++
++static void __init am335x_evm_i2c_init(void)
++{
++ /* Initially assume Low Cost EVM Config */
++ am335x_evm_id = LOW_COST_EVM;
++
++ evm_init_cpld();
++
++ omap_register_i2c_bus(1, 100, am335x_i2c_boardinfo,
++ ARRAY_SIZE(am335x_i2c_boardinfo));
++}
++
++static struct resource am335x_rtc_resources[] = {
++ {
++ .start = AM33XX_RTC_BASE,
++ .end = AM33XX_RTC_BASE + SZ_4K - 1,
++ .flags = IORESOURCE_MEM,
++ },
++ { /* timer irq */
++ .start = AM33XX_IRQ_RTC_TIMER,
++ .end = AM33XX_IRQ_RTC_TIMER,
++ .flags = IORESOURCE_IRQ,
++ },
++ { /* alarm irq */
++ .start = AM33XX_IRQ_RTC_ALARM,
++ .end = AM33XX_IRQ_RTC_ALARM,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device am335x_rtc_device = {
++ .name = "omap_rtc",
++ .id = -1,
++ .num_resources = ARRAY_SIZE(am335x_rtc_resources),
++ .resource = am335x_rtc_resources,
++};
++
++static int am335x_rtc_init(void)
++{
++ void __iomem *base;
++ struct clk *clk;
++
++ clk = clk_get(NULL, "rtc_fck");
++ if (IS_ERR(clk)) {
++ pr_err("rtc : Failed to get RTC clock\n");
++ return -1;
++ }
++
++ if (clk_enable(clk)) {
++ pr_err("rtc: Clock Enable Failed\n");
++ return -1;
++ }
++
++ base = ioremap(AM33XX_RTC_BASE, SZ_4K);
++
++ if (WARN_ON(!base))
++ return -ENOMEM;
++
++ /* Unlock the rtc's registers */
++ writel(0x83e70b13, base + 0x6c);
++ writel(0x95a4f1e0, base + 0x70);
++
++ /*
++ * Enable the 32K OSc
++ * TODO: Need a better way to handle this
++ * Since we want the clock to be running before mmc init
++ * we need to do it before the rtc probe happens
++ */
++ writel(0x48, base + 0x54);
++
++ iounmap(base);
++
++ return platform_device_register(&am335x_rtc_device);
++}
++
++/* Enable clkout2 */
++static struct pinmux_config clkout2_pin_mux[] = {
++ {"xdma_event_intr1.clkout2", OMAP_MUX_MODE3 | AM33XX_PIN_OUTPUT},
++ {NULL, 0},
++};
++
++static void __init clkout2_enable(void)
++{
++ struct clk *ck_32;
++
++ ck_32 = clk_get(NULL, "clkout2_ck");
++ if (IS_ERR(ck_32)) {
++ pr_err("Cannot clk_get ck_32\n");
++ return;
++ }
++
++ clk_enable(ck_32);
++
++ setup_pin_mux(clkout2_pin_mux);
++}
++
++void __iomem *am33xx_emif_base;
++
++void __iomem * __init am33xx_get_mem_ctlr(void)
++{
++
++ am33xx_emif_base = ioremap(AM33XX_EMIF0_BASE, SZ_32K);
++
++ if (!am33xx_emif_base)
++ pr_warning("%s: Unable to map DDR2 controller", __func__);
++
++ return am33xx_emif_base;
++}
++
++void __iomem *am33xx_get_ram_base(void)
++{
++ return am33xx_emif_base;
++}
++
++static struct resource am33xx_cpuidle_resources[] = {
++ {
++ .start = AM33XX_EMIF0_BASE,
++ .end = AM33XX_EMIF0_BASE + SZ_32K - 1,
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++/* AM33XX devices support DDR2 power down */
++static struct am33xx_cpuidle_config am33xx_cpuidle_pdata = {
++ .ddr2_pdown = 1,
++};
++
++static struct platform_device am33xx_cpuidle_device = {
++ .name = "cpuidle-am33xx",
++ .num_resources = ARRAY_SIZE(am33xx_cpuidle_resources),
++ .resource = am33xx_cpuidle_resources,
++ .dev = {
++ .platform_data = &am33xx_cpuidle_pdata,
++ },
++};
++
++static void __init am33xx_cpuidle_init(void)
++{
++ int ret;
++
++ am33xx_cpuidle_pdata.emif_base = am33xx_get_mem_ctlr();
++
++ ret = platform_device_register(&am33xx_cpuidle_device);
++
++ if (ret)
++ pr_warning("AM33XX cpuidle registration failed\n");
++
++}
++
++static void __init am335x_evm_init(void)
++{
++ am33xx_cpuidle_init();
++ am33xx_mux_init(board_mux);
++ omap_serial_init();
++ am335x_rtc_init();
++ clkout2_enable();
++ am335x_evm_i2c_init();
++ omap_sdrc_init(NULL, NULL);
++ usb_musb_init(&musb_board_data);
++ omap_board_config = am335x_evm_config;
++ omap_board_config_size = ARRAY_SIZE(am335x_evm_config);
++ /* Create an alias for icss clock */
++ if (clk_add_alias("pruss", NULL, "pruss_uart_gclk", NULL))
++ pr_warn("failed to create an alias: icss_uart_gclk --> pruss\n");
++ /* Create an alias for gfx/sgx clock */
++ if (clk_add_alias("sgx_ck", NULL, "gfx_fclk", NULL))
++ pr_warn("failed to create an alias: gfx_fclk --> sgx_ck\n");
++}
++
++static void __init am335x_evm_map_io(void)
++{
++ omap2_set_globals_am33xx();
++ omapam33xx_map_common_io();
++}
++
++MACHINE_START(AM335XEVM, "am335xevm")
++ /* Maintainer: Texas Instruments */
++ .atag_offset = 0x100,
++ .map_io = am335x_evm_map_io,
++ .init_early = am33xx_init_early,
++ .init_irq = ti81xx_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
++ .timer = &omap3_am33xx_timer,
++ .init_machine = am335x_evm_init,
++MACHINE_END
++
++MACHINE_START(AM335XIAEVM, "am335xiaevm")
++ /* Maintainer: Texas Instruments */
++ .atag_offset = 0x100,
++ .map_io = am335x_evm_map_io,
++ .init_irq = ti81xx_init_irq,
++ .init_early = am33xx_init_early,
++ .timer = &omap3_am33xx_timer,
++ .init_machine = am335x_evm_init,
++MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
+index 7834536..7e90f93 100644
+--- a/arch/arm/mach-omap2/board-am3517crane.c
++++ b/arch/arm/mach-omap2/board-am3517crane.c
+@@ -27,7 +27,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+
+ #include "mux.h"
+@@ -98,6 +98,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
+ .map_io = omap3_map_io,
+ .init_early = am35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = am3517_crane_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
+index d314f03..3a44f07 100644
+--- a/arch/arm/mach-omap2/board-am3517evm.c
++++ b/arch/arm/mach-omap2/board-am3517evm.c
+@@ -24,6 +24,7 @@
+ #include <linux/i2c/pca953x.h>
+ #include <linux/can/platform/ti_hecc.h>
+ #include <linux/davinci_emac.h>
++#include <linux/mmc/host.h>
+
+ #include <mach/hardware.h>
+ #include <mach/am35xx.h>
+@@ -32,7 +33,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+ #include <video/omapdss.h>
+ #include <video/omap-panel-generic-dpi.h>
+@@ -40,6 +41,7 @@
+
+ #include "mux.h"
+ #include "control.h"
++#include "hsmmc.h"
+
+ #define AM35XX_EVM_MDIO_FREQUENCY (1000000)
+
+@@ -455,6 +457,23 @@ static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
+ static struct omap_board_config_kernel am3517_evm_config[] __initdata = {
+ };
+
++static struct omap2_hsmmc_info mmc[] = {
++ {
++ .mmc = 1,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_cd = 127,
++ .gpio_wp = 126,
++ },
++ {
++ .mmc = 2,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_cd = 128,
++ .gpio_wp = 129,
++ },
++ {} /* Terminator */
++};
++
++
+ static void __init am3517_evm_init(void)
+ {
+ omap_board_config = am3517_evm_config;
+@@ -483,6 +502,9 @@ static void __init am3517_evm_init(void)
+
+ /* MUSB */
+ am3517_evm_musb_init();
++
++ /* MMC init function */
++ omap2_hsmmc_init(mmc);
+ }
+
+ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
+@@ -491,6 +513,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
+ .map_io = omap3_map_io,
+ .init_early = am35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = am3517_evm_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
+index de8134b..5a66480 100644
+--- a/arch/arm/mach-omap2/board-apollon.c
++++ b/arch/arm/mach-omap2/board-apollon.c
+@@ -37,7 +37,7 @@
+ #include <plat/led.h>
+ #include <plat/usb.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+
+ #include <video/omapdss.h>
+@@ -354,6 +354,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = omap_apollon_init,
+ .timer = &omap2_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
+index bd1bcac..4cc2f04 100644
+--- a/arch/arm/mach-omap2/board-cm-t35.c
++++ b/arch/arm/mach-omap2/board-cm-t35.c
+@@ -37,7 +37,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/nand.h>
+ #include <plat/gpmc.h>
+ #include <plat/usb.h>
+@@ -53,7 +53,8 @@
+ #include "hsmmc.h"
+ #include "common-board-devices.h"
+
+-#define CM_T35_GPIO_PENDOWN 57
++#define CM_T35_GPIO_PENDOWN 57
++#define SB_T35_USB_HUB_RESET_GPIO 167
+
+ #define CM_T35_SMSC911X_CS 5
+ #define CM_T35_SMSC911X_GPIO 163
+@@ -279,7 +280,6 @@ static struct omap_dss_board_info cm_t35_dss_data = {
+
+ static struct omap2_mcspi_device_config tdo24m_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1, /* 0: slave, 1: master */
+ };
+
+ static struct tdo24m_platform_data tdo24m_config = {
+@@ -339,8 +339,10 @@ static struct regulator_consumer_supply cm_t35_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
+ };
+
+-static struct regulator_consumer_supply cm_t35_vdvi_supply[] = {
+- REGULATOR_SUPPLY("vdvi", "omapdss"),
++static struct regulator_consumer_supply cm_t35_vio_supplies[] = {
++ REGULATOR_SUPPLY("vcc", "spi1.0"),
++ REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
++ REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
+ };
+
+ /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+@@ -373,6 +375,19 @@ static struct regulator_init_data cm_t35_vsim = {
+ .consumer_supplies = cm_t35_vsim_supply,
+ };
+
++static struct regulator_init_data cm_t35_vio = {
++ .constraints = {
++ .min_uV = 1800000,
++ .max_uV = 1800000,
++ .apply_uV = true,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL
++ | REGULATOR_MODE_STANDBY,
++ .valid_ops_mask = REGULATOR_CHANGE_MODE,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(cm_t35_vio_supplies),
++ .consumer_supplies = cm_t35_vio_supplies,
++};
++
+ static uint32_t cm_t35_keymap[] = {
+ KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_LEFT),
+ KEY(1, 0, KEY_UP), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_DOWN),
+@@ -421,6 +436,23 @@ static struct usbhs_omap_board_data usbhs_bdata __initdata = {
+ .reset_gpio_port[2] = -EINVAL
+ };
+
++static void cm_t35_init_usbh(void)
++{
++ int err;
++
++ err = gpio_request_one(SB_T35_USB_HUB_RESET_GPIO,
++ GPIOF_OUT_INIT_LOW, "usb hub rst");
++ if (err) {
++ pr_err("SB-T35: usb hub rst gpio request failed: %d\n", err);
++ } else {
++ udelay(10);
++ gpio_set_value(SB_T35_USB_HUB_RESET_GPIO, 1);
++ msleep(1);
++ }
++
++ usbhs_init(&usbhs_bdata);
++}
++
+ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
+ unsigned ngpio)
+ {
+@@ -456,17 +488,14 @@ static struct twl4030_platform_data cm_t35_twldata = {
+ .gpio = &cm_t35_gpio_data,
+ .vmmc1 = &cm_t35_vmmc1,
+ .vsim = &cm_t35_vsim,
++ .vio = &cm_t35_vio,
+ };
+
+ static void __init cm_t35_init_i2c(void)
+ {
+ omap3_pmic_get_config(&cm_t35_twldata, TWL_COMMON_PDATA_USB,
+- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+-
+- cm_t35_twldata.vpll2->constraints.name = "VDVI";
+- cm_t35_twldata.vpll2->num_consumer_supplies =
+- ARRAY_SIZE(cm_t35_vdvi_supply);
+- cm_t35_twldata.vpll2->consumer_supplies = cm_t35_vdvi_supply;
++ TWL_COMMON_REGULATOR_VDAC |
++ TWL_COMMON_PDATA_AUDIO);
+
+ omap3_pmic_init("tps65930", &cm_t35_twldata);
+ }
+@@ -570,24 +599,28 @@ static void __init cm_t3x_common_dss_mux_init(int mux_mode)
+
+ static void __init cm_t35_init_mux(void)
+ {
+- omap_mux_init_signal("gpio_70", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("gpio_71", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("gpio_72", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("gpio_73", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("gpio_74", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("gpio_75", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+- cm_t3x_common_dss_mux_init(OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
++ int mux_mode = OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT;
++
++ omap_mux_init_signal("dss_data0.dss_data0", mux_mode);
++ omap_mux_init_signal("dss_data1.dss_data1", mux_mode);
++ omap_mux_init_signal("dss_data2.dss_data2", mux_mode);
++ omap_mux_init_signal("dss_data3.dss_data3", mux_mode);
++ omap_mux_init_signal("dss_data4.dss_data4", mux_mode);
++ omap_mux_init_signal("dss_data5.dss_data5", mux_mode);
++ cm_t3x_common_dss_mux_init(mux_mode);
+ }
+
+ static void __init cm_t3730_init_mux(void)
+ {
+- omap_mux_init_signal("sys_boot0", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("sys_boot1", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("sys_boot3", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("sys_boot4", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("sys_boot5", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- omap_mux_init_signal("sys_boot6", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+- cm_t3x_common_dss_mux_init(OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
++ int mux_mode = OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT;
++
++ omap_mux_init_signal("sys_boot0", mux_mode);
++ omap_mux_init_signal("sys_boot1", mux_mode);
++ omap_mux_init_signal("sys_boot3", mux_mode);
++ omap_mux_init_signal("sys_boot4", mux_mode);
++ omap_mux_init_signal("sys_boot5", mux_mode);
++ omap_mux_init_signal("sys_boot6", mux_mode);
++ cm_t3x_common_dss_mux_init(mux_mode);
+ }
+ #else
+ static inline void cm_t35_init_mux(void) {}
+@@ -612,7 +645,7 @@ static void __init cm_t3x_common_init(void)
+ cm_t35_init_display();
+
+ usb_musb_init(NULL);
+- usbhs_init(&usbhs_bdata);
++ cm_t35_init_usbh();
+ }
+
+ static void __init cm_t35_init(void)
+@@ -634,6 +667,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = cm_t35_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+@@ -644,6 +678,7 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
+ .map_io = omap3_map_io,
+ .init_early = omap3630_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = cm_t3730_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
+index 3f4dc66..efc5ced 100644
+--- a/arch/arm/mach-omap2/board-cm-t3517.c
++++ b/arch/arm/mach-omap2/board-cm-t3517.c
+@@ -39,7 +39,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+ #include <plat/nand.h>
+ #include <plat/gpmc.h>
+@@ -299,6 +299,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
+ .map_io = omap3_map_io,
+ .init_early = am35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = cm_t3517_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
+index 90154e4..0a00ce6 100644
+--- a/arch/arm/mach-omap2/board-devkit8000.c
++++ b/arch/arm/mach-omap2/board-devkit8000.c
+@@ -41,7 +41,7 @@
+ #include <asm/mach/flash.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <plat/nand.h>
+ #include <plat/usb.h>
+@@ -59,6 +59,7 @@
+
+ #include "mux.h"
+ #include "hsmmc.h"
++#include "board-flash.h"
+ #include "common-board-devices.h"
+
+ #define OMAP_DM9000_GPIO_IRQ 25
+@@ -646,8 +647,9 @@ static void __init devkit8000_init(void)
+
+ usb_musb_init(NULL);
+ usbhs_init(&usbhs_bdata);
+- omap_nand_flash_init(NAND_BUSWIDTH_16, devkit8000_nand_partitions,
+- ARRAY_SIZE(devkit8000_nand_partitions));
++ omap_nand_init(devkit8000_nand_partitions,
++ ARRAY_SIZE(devkit8000_nand_partitions), GPMC_CS_NUM + 1,
++ NAND_BUSWIDTH_16, NULL);
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+@@ -660,6 +662,7 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = devkit8000_init,
+ .timer = &omap3_secure_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
+index 30a6f52..915ec79 100644
+--- a/arch/arm/mach-omap2/board-flash.c
++++ b/arch/arm/mach-omap2/board-flash.c
+@@ -104,11 +104,8 @@ __init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
+ }
+ #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
+
+-#if defined(CONFIG_MTD_NAND_OMAP2) || \
+- defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+-
+ /* Note that all values in this struct are in nanoseconds */
+-static struct gpmc_timings nand_timings = {
++struct gpmc_timings nand_default_timings = {
+
+ .sync_clk = 0,
+
+@@ -131,22 +128,24 @@ static struct gpmc_timings nand_timings = {
+ .wr_data_mux_bus = 0,
+ };
+
+-static struct omap_nand_platform_data board_nand_data = {
+- .gpmc_t = &nand_timings,
++#if defined(CONFIG_MTD_NAND_OMAP2) || \
++ defined(CONFIG_MTD_NAND_OMAP2_MODULE)
++
++static struct omap_nand_platform_data omap_nand_data = {
++ .gpmc_t = &nand_default_timings,
+ };
+
+-void
+-__init board_nand_init(struct mtd_partition *nand_parts,
+- u8 nr_parts, u8 cs, int nand_type)
++struct omap_nand_platform_data *
++__init omap_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
++ int nand_type, struct gpmc_timings *gpmc_t)
+ {
+- board_nand_data.cs = cs;
+- board_nand_data.parts = nand_parts;
+- board_nand_data.nr_parts = nr_parts;
+- board_nand_data.devsize = nand_type;
+-
+- board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
+- board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs;
+- gpmc_nand_init(&board_nand_data);
++ omap_nand_data.cs = cs;
++ omap_nand_data.parts = nand_parts;
++ omap_nand_data.nr_parts = nr_parts;
++ omap_nand_data.devsize = nand_type;
++ omap_nand_data.gpmc_t = gpmc_t;
++
++ return &omap_nand_data;
+ }
+ #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
+
+@@ -242,6 +241,7 @@ void board_flash_init(struct flash_partitions partition_info[],
+ if (nandcs > GPMC_CS_NUM)
+ pr_err("NAND: Unable to find configuration in GPMC\n");
+ else
+- board_nand_init(partition_info[2].parts,
+- partition_info[2].nr_parts, nandcs, nand_type);
++ omap_nand_init(partition_info[2].parts,
++ partition_info[2].nr_parts, nandcs,
++ nand_type, &nand_default_timings);
+ }
+diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h
+index d25503a..93c02cc 100644
+--- a/arch/arm/mach-omap2/board-flash.h
++++ b/arch/arm/mach-omap2/board-flash.h
+@@ -39,11 +39,15 @@ static inline void board_flash_init(struct flash_partitions part[],
+
+ #if defined(CONFIG_MTD_NAND_OMAP2) || \
+ defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+-extern void board_nand_init(struct mtd_partition *nand_parts,
+- u8 nr_parts, u8 cs, int nand_type);
++extern struct gpmc_timings nand_default_timings;
++extern struct omap_nand_platform_data *
++__init omap_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
++ int nand_type, struct gpmc_timings *gpmc_t);
+ #else
+-static inline void board_nand_init(struct mtd_partition *nand_parts,
+- u8 nr_parts, u8 cs, int nand_type)
++static inline struct omap_nand_platform_data *
++omap_nand_init(struct mtd_partition *nand_parts,
++ u8 nr_parts, u8 cs, int nand_type, struct gpmc_timings *gpmc_t)
+ {
++ return NULL;
+ }
+ #endif
+diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
+index fb55fa3d..f2389e4 100644
+--- a/arch/arm/mach-omap2/board-generic.c
++++ b/arch/arm/mach-omap2/board-generic.c
+@@ -17,11 +17,11 @@
+ #include <linux/i2c/twl.h>
+
+ #include <mach/hardware.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach/arch.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
+-#include <mach/omap4-common.h>
++#include "common.h"
+ #include "common-board-devices.h"
+
+ /*
+@@ -70,7 +70,6 @@ static void __init omap_generic_init(void)
+ if (node)
+ irq_domain_add_simple(node, 0);
+
+- omap_serial_init();
+ omap_sdrc_init(NULL, NULL);
+
+ of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
+@@ -104,6 +103,7 @@ DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap2_timer,
+ .dt_compat = omap242x_boards_compat,
+@@ -122,6 +122,7 @@ DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)")
+ .map_io = omap243x_map_io,
+ .init_early = omap2430_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap2_timer,
+ .dt_compat = omap243x_boards_compat,
+@@ -140,6 +141,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3_init,
+ .timer = &omap3_timer,
+ .dt_compat = omap3_boards_compat,
+@@ -158,6 +160,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
+ .map_io = omap4_map_io,
+ .init_early = omap4430_init_early,
+ .init_irq = gic_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = omap4_init,
+ .timer = &omap4_timer,
+ .dt_compat = omap4_boards_compat,
+diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
+index 8b351d9..ec40183 100644
+--- a/arch/arm/mach-omap2/board-h4.c
++++ b/arch/arm/mach-omap2/board-h4.c
+@@ -34,7 +34,7 @@
+
+ #include <plat/usb.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/menelaus.h>
+ #include <plat/dma.h>
+ #include <plat/gpmc.h>
+@@ -396,6 +396,7 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = omap_h4_init,
+ .timer = &omap2_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
+index d0a3f78..5949f6a 100644
+--- a/arch/arm/mach-omap2/board-igep0020.c
++++ b/arch/arm/mach-omap2/board-igep0020.c
+@@ -28,7 +28,7 @@
+ #include <asm/mach/arch.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <plat/usb.h>
+ #include <video/omapdss.h>
+@@ -672,6 +672,7 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = igep_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+@@ -682,6 +683,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = igep_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
+index e179da0..09e9235 100644
+--- a/arch/arm/mach-omap2/board-ldp.c
++++ b/arch/arm/mach-omap2/board-ldp.c
+@@ -36,7 +36,7 @@
+
+ #include <plat/mcspi.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <mach/board-zoom.h>
+
+@@ -421,8 +421,8 @@ static void __init omap_ldp_init(void)
+ omap_serial_init();
+ omap_sdrc_init(NULL, NULL);
+ usb_musb_init(NULL);
+- board_nand_init(ldp_nand_partitions,
+- ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
++ omap_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),
++ ZOOM_NAND_CS, 0, &nand_default_timings);
+
+ omap2_hsmmc_init(mmc);
+ ldp_display_init();
+@@ -434,6 +434,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_ldp_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index e9d5f4a..b521198 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -15,8 +15,11 @@
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ #include <linux/io.h>
+ #include <linux/stddef.h>
++#include <linux/platform_device.h>
++#include <linux/platform_data/cbus.h>
+ #include <linux/i2c.h>
+ #include <linux/spi/spi.h>
+ #include <linux/usb/musb.h>
+@@ -26,7 +29,7 @@
+ #include <asm/mach-types.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/menelaus.h>
+ #include <mach/irqs.h>
+ #include <plat/mcspi.h>
+@@ -46,7 +49,7 @@ static struct device *mmc_device;
+ #define TUSB6010_GPIO_ENABLE 0
+ #define TUSB6010_DMACHAN 0x3f
+
+-#ifdef CONFIG_USB_MUSB_TUSB6010
++#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+ /*
+ * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
+ * 1.5 V voltage regulators of PM companion chip. Companion chip will then
+@@ -137,7 +140,6 @@ static void __init n8x0_usb_init(void) {}
+
+ static struct omap2_mcspi_device_config p54spi_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1,
+ };
+
+ static struct spi_board_info n800_spi_board_info[] __initdata = {
+@@ -193,6 +195,105 @@ static struct omap_onenand_platform_data board_onenand_data[] = {
+ };
+ #endif
+
++#if defined(CONFIG_CBUS) || defined(CONFIG_CBUS_MODULE)
++
++static struct cbus_host_platform_data n8x0_cbus_data = {
++ .clk_gpio = 66,
++ .dat_gpio = 65,
++ .sel_gpio = 64,
++};
++
++static struct platform_device n8x0_cbus_device = {
++ .name = "cbus",
++ .id = -1,
++ .dev = {
++ .platform_data = &n8x0_cbus_data,
++ },
++};
++
++static struct resource retu_resource[] = {
++ {
++ .start = -EINVAL, /* set later */
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device retu_device = {
++ .name = "retu",
++ .id = -1,
++ .resource = retu_resource,
++ .num_resources = ARRAY_SIZE(retu_resource),
++ .dev = {
++ .parent = &n8x0_cbus_device.dev,
++ },
++};
++
++static struct resource tahvo_resource[] = {
++ {
++ .start = -EINVAL, /* set later */
++ .flags = IORESOURCE_IRQ,
++ }
++};
++
++static struct platform_device tahvo_device = {
++ .name = "tahvo",
++ .id = -1,
++ .resource = tahvo_resource,
++ .num_resources = ARRAY_SIZE(tahvo_resource),
++ .dev = {
++ .parent = &n8x0_cbus_device.dev,
++ },
++};
++
++static void __init n8x0_cbus_init(void)
++{
++ int ret;
++
++ platform_device_register(&n8x0_cbus_device);
++
++ ret = gpio_request(108, "RETU irq");
++ if (ret < 0) {
++ pr_err("retu: Unable to reserve IRQ GPIO\n");
++ return;
++ }
++
++ ret = gpio_direction_input(108);
++ if (ret < 0) {
++ pr_err("retu: Unable to change gpio direction\n");
++ gpio_free(108);
++ return;
++ }
++
++ irq_set_irq_type(gpio_to_irq(108), IRQ_TYPE_EDGE_RISING);
++ retu_resource[0].start = gpio_to_irq(108);
++ platform_device_register(&retu_device);
++
++ ret = gpio_request(111, "TAHVO irq");
++ if (ret) {
++ pr_err("tahvo: Unable to reserve IRQ GPIO\n");
++ gpio_free(108);
++ return;
++ }
++
++ /* Set the pin as input */
++ ret = gpio_direction_input(111);
++ if (ret) {
++ pr_err("tahvo: Unable to change direction\n");
++ gpio_free(108);
++ gpio_free(111);
++ return;
++ }
++
++ tahvo_resource[0].start = gpio_to_irq(111);
++ platform_device_register(&tahvo_device);
++}
++
++#else
++static inline void __init n8x0_cbus_init(void)
++{
++}
++#endif
++
+ #if defined(CONFIG_MENELAUS) && \
+ (defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE))
+
+@@ -644,15 +745,15 @@ static inline void board_serial_init(void)
+ bdata.pads_cnt = 0;
+
+ bdata.id = 0;
+- omap_serial_init_port(&bdata);
++ omap_serial_init_port(&bdata, NULL);
+
+ bdata.id = 1;
+- omap_serial_init_port(&bdata);
++ omap_serial_init_port(&bdata, NULL);
+
+ bdata.id = 2;
+ bdata.pads = serial2_pads;
+ bdata.pads_cnt = ARRAY_SIZE(serial2_pads);
+- omap_serial_init_port(&bdata);
++ omap_serial_init_port(&bdata, NULL);
+ }
+
+ #else
+@@ -667,6 +768,8 @@ static inline void board_serial_init(void)
+ static void __init n8x0_init_machine(void)
+ {
+ omap2420_mux_init(board_mux, OMAP_PACKAGE_ZAC);
++ n8x0_cbus_init();
++
+ /* FIXME: add n810 spi devices */
+ spi_register_board_info(n800_spi_board_info,
+ ARRAY_SIZE(n800_spi_board_info));
+@@ -689,6 +792,7 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = n8x0_init_machine,
+ .timer = &omap2_timer,
+ MACHINE_END
+@@ -699,6 +803,7 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = n8x0_init_machine,
+ .timer = &omap2_timer,
+ MACHINE_END
+@@ -709,6 +814,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
+ .map_io = omap242x_map_io,
+ .init_early = omap2420_init_early,
+ .init_irq = omap2_init_irq,
++ .handle_irq = omap2_intc_handle_irq,
+ .init_machine = n8x0_init_machine,
+ .timer = &omap2_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
+index 4a71cb7..f17ae3d 100644
+--- a/arch/arm/mach-omap2/board-omap3beagle.c
++++ b/arch/arm/mach-omap2/board-omap3beagle.c
+@@ -40,7 +40,7 @@
+ #include <asm/mach/flash.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <video/omapdss.h>
+ #include <video/omap-panel-dvi.h>
+ #include <plat/gpmc.h>
+@@ -51,6 +51,7 @@
+ #include "mux.h"
+ #include "hsmmc.h"
+ #include "pm.h"
++#include "board-flash.h"
+ #include "common-board-devices.h"
+
+ /*
+@@ -538,8 +539,9 @@ static void __init omap3_beagle_init(void)
+
+ usb_musb_init(NULL);
+ usbhs_init(&usbhs_bdata);
+- omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
+- ARRAY_SIZE(omap3beagle_nand_partitions));
++ omap_nand_init(omap3beagle_nand_partitions,
++ ARRAY_SIZE(omap3beagle_nand_partitions), GPMC_CS_NUM + 1,
++ NAND_BUSWIDTH_16, NULL);
+
+ /* Ensure msecure is mux'd to be able to set the RTC. */
+ omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
+@@ -559,6 +561,7 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
+ .map_io = omap3_map_io,
+ .init_early = omap3_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3_beagle_init,
+ .timer = &omap3_secure_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3encore.c b/arch/arm/mach-omap2/board-omap3encore.c
+new file mode 100644
+index 0000000..ab60e3e
+--- /dev/null
++++ b/arch/arm/mach-omap2/board-omap3encore.c
+@@ -0,0 +1,344 @@
++/*
++ * Support for Barns&Noble Nook Color
++ *
++ * Loosely based on mach-omap2/board-zoom.c
++ * Copyright (C) 2008-2010 Texas Instruments Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * June 2011 Oleg Drokin <green@linuxhacker.ru> - Port to mainline
++ *
++ */
++
++#include <linux/gpio.h>
++#include <linux/gpio_keys.h>
++#include <linux/i2c/twl.h>
++#include <linux/regulator/machine.h>
++
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++
++#include <plat/usb.h>
++#include <plat/mux.h>
++#include <plat/mmc.h>
++
++#include "common.h"
++#include "mux.h"
++#include "hsmmc.h"
++#include "sdram-hynix-h8mbx00u0mer-0em.h"
++
++/* Encore-specific device-info and i2c addresses. */
++/* Battery, bus 1 */
++#define MAX17042_I2C_SLAVE_ADDRESS 0x36
++#define MAX17042_GPIO_FOR_IRQ 100
++
++/*addition of MAXIM8903/TI GPIO mapping WRT schematics */
++#define MAX8903_UOK_GPIO_FOR_IRQ 115
++#define MAX8903_DOK_GPIO_FOR_IRQ 114
++#define MAX8903_GPIO_CHG_EN 110
++#define MAX8903_GPIO_CHG_STATUS 111
++#define MAX8903_GPIO_CHG_FLT 101
++#define MAX8903_GPIO_CHG_IUSB 102
++#define MAX8903_GPIO_CHG_USUS 104
++#define MAX8903_GPIO_CHG_ILM 61
++
++/* TI WLAN */
++#define ENCORE_WIFI_PMENA_GPIO 22
++#define ENCORE_WIFI_IRQ_GPIO 15
++#define ENCORE_WIFI_EN_POW 16
++
++/* Accelerometer i2c bus 1*/
++#define KXTF9_I2C_SLAVE_ADDRESS 0x0F
++#define KXTF9_GPIO_FOR_PWR 34
++#define KXTF9_GPIO_FOR_IRQ 113
++
++/* Touch screen i2c bus 2*/
++#define CYTTSP_I2C_SLAVEADDRESS 34
++#define ENCORE_CYTTSP_GPIO 99
++#define ENCORE_CYTTSP_RESET_GPIO 46
++
++/* Audio codec, i2c bus 2 */
++#define AUDIO_CODEC_POWER_ENABLE_GPIO 103
++#define AUDIO_CODEC_RESET_GPIO 37
++#define AUDIO_CODEC_IRQ_GPIO 59
++#define AIC3100_I2CSLAVEADDRESS 0x18
++
++
++/* Different HW revisions */
++#define BOARD_ENCORE_REV_EVT1A 0x1
++#define BOARD_ENCORE_REV_EVT1B 0x2
++#define BOARD_ENCORE_REV_EVT2 0x3
++#define BOARD_ENCORE_REV_DVT 0x4
++#define BOARD_ENCORE_REV_PVT 0x5
++#define BOARD_ENCORE_REV_UNKNOWN 0x6
++
++static inline int is_encore_board_evt2(void)
++{
++ return system_rev >= BOARD_ENCORE_REV_EVT2;
++}
++
++static inline int is_encore_board_evt1b(void)
++{
++ return system_rev == BOARD_ENCORE_REV_EVT1B;
++}
++
++static int encore_twl4030_keymap[] = {
++ KEY(1, 0, KEY_VOLUMEUP),
++ KEY(2, 0, KEY_VOLUMEDOWN),
++};
++
++static struct matrix_keymap_data encore_twl4030_keymap_data = {
++ .keymap = encore_twl4030_keymap,
++ .keymap_size = ARRAY_SIZE(encore_twl4030_keymap),
++};
++
++static struct twl4030_keypad_data encore_kp_twl4030_data = {
++ .rows = 8,
++ .cols = 8,
++ .keymap_data = &encore_twl4030_keymap_data,
++ .rep = 1,
++};
++
++/* HOME key code for HW > EVT2A */
++static struct gpio_keys_button encore_gpio_buttons[] = {
++ {
++ .code = KEY_POWER,
++ .gpio = 14,
++ .desc = "POWER",
++ .active_low = 0,
++ .wakeup = 1,
++ },
++ {
++ .code = KEY_HOME,
++ .gpio = 48,
++ .desc = "HOME",
++ .active_low = 1,
++ .wakeup = 1,
++ },
++};
++
++static struct gpio_keys_platform_data encore_gpio_key_info = {
++ .buttons = encore_gpio_buttons,
++ .nbuttons = ARRAY_SIZE(encore_gpio_buttons),
++};
++
++static struct platform_device encore_keys_gpio = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &encore_gpio_key_info,
++ },
++};
++
++static struct platform_device *encore_devices[] __initdata = {
++ &encore_keys_gpio,
++};
++
++static struct twl4030_usb_data encore_usb_data = {
++ .usb_mode = T2_USB_MODE_ULPI,
++};
++
++static struct regulator_consumer_supply encore_vmmc1_supply[] = {
++ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
++};
++
++static struct regulator_consumer_supply encore_vdda_dac_supply[] = {
++ REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
++};
++
++/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
++static struct regulator_init_data encore_vmmc1 = {
++ .constraints = {
++ .min_uV = 1850000,
++ .max_uV = 3150000,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL
++ | REGULATOR_MODE_STANDBY,
++ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
++ | REGULATOR_CHANGE_MODE
++ | REGULATOR_CHANGE_STATUS,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(encore_vmmc1_supply),
++ .consumer_supplies = encore_vmmc1_supply,
++};
++
++static struct regulator_init_data encore_vdac = {
++ .constraints = {
++ .min_uV = 1800000,
++ .max_uV = 1800000,
++ .valid_modes_mask = REGULATOR_MODE_NORMAL
++ | REGULATOR_MODE_STANDBY,
++ .valid_ops_mask = REGULATOR_CHANGE_MODE
++ | REGULATOR_CHANGE_STATUS,
++ },
++ .num_consumer_supplies = ARRAY_SIZE(encore_vdda_dac_supply),
++ .consumer_supplies = encore_vdda_dac_supply,
++};
++
++/*
++ * The order is reverted in this table so that internal eMMC is presented
++ * as first mmc card for compatibility with existing installations and
++ * for common sense reasons
++ */
++static struct omap2_hsmmc_info mmc[] __initdata = {
++ {
++ .name = "internal",
++ .mmc = 2,
++ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
++ .gpio_cd = -EINVAL,
++ .gpio_wp = -EINVAL,
++ .nonremovable = true,
++ .power_saving = true,
++ .ocr_mask = MMC_VDD_165_195, /* 1.85V */
++ },
++ {
++ .name = "external",
++ .mmc = 1,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_cd = -EINVAL,
++ .gpio_wp = -EINVAL,
++ .power_saving = true,
++ },
++ {
++ .name = "internal",
++ .mmc = 3,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_cd = -EINVAL,
++ .gpio_wp = -EINVAL,
++ .nonremovable = true,
++ .power_saving = true,
++ },
++ {} /* Terminator */
++};
++
++static int encore_hsmmc_card_detect(struct device *dev, int slot)
++{
++ struct omap_mmc_platform_data *mmc = dev->platform_data;
++
++ /* Encore board EVT2 and later has pin high when card is present */
++ return gpio_get_value_cansleep(mmc->slots[0].switch_pin);
++}
++
++static int encore_twl4030_hsmmc_late_init(struct device *dev)
++{
++ int ret = 0;
++ struct platform_device *pdev = container_of(dev,
++ struct platform_device, dev);
++ struct omap_mmc_platform_data *pdata = dev->platform_data;
++
++ if (is_encore_board_evt2()) {
++ /* Setting MMC1 (external) Card detect */
++ if (pdev->id == 0)
++ pdata->slots[0].card_detect = encore_hsmmc_card_detect;
++ }
++
++ return ret;
++}
++
++static __init void encore_hsmmc_set_late_init(struct device *dev)
++{
++ struct omap_mmc_platform_data *pdata;
++
++ /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
++ if (!dev)
++ return;
++
++ pdata = dev->platform_data;
++ pdata->init = encore_twl4030_hsmmc_late_init;
++}
++
++static int __ref encore_twl_gpio_setup(struct device *dev,
++ unsigned gpio, unsigned ngpio)
++{
++ struct omap2_hsmmc_info *c;
++ /*
++ * gpio + 0 is "mmc0_cd" (input/IRQ),
++ * gpio + 1 is "mmc1_cd" (input/IRQ)
++ */
++ mmc[1].gpio_cd = gpio + 0;
++ mmc[0].gpio_cd = gpio + 1;
++ omap2_hsmmc_init(mmc);
++ for (c = mmc; c->mmc; c++)
++ encore_hsmmc_set_late_init(c->dev);
++
++ return 0;
++}
++
++static struct twl4030_gpio_platform_data encore_gpio_data = {
++ .gpio_base = OMAP_MAX_GPIO_LINES,
++ .irq_base = TWL4030_GPIO_IRQ_BASE,
++ .irq_end = TWL4030_GPIO_IRQ_END,
++ .setup = encore_twl_gpio_setup,
++};
++
++static struct twl4030_madc_platform_data encore_madc_data = {
++ .irq_line = 1,
++};
++
++static struct twl4030_platform_data __refdata encore_twldata = {
++ .irq_base = TWL4030_IRQ_BASE,
++ .irq_end = TWL4030_IRQ_END,
++
++ .madc = &encore_madc_data,
++ .usb = &encore_usb_data,
++ .gpio = &encore_gpio_data,
++ .keypad = &encore_kp_twl4030_data,
++ .vmmc1 = &encore_vmmc1,
++ .vdac = &encore_vdac,
++};
++
++static struct i2c_board_info __initdata encore_i2c_bus1_info[] = {
++ {
++ I2C_BOARD_INFO("tps65921", 0x48),
++ .flags = I2C_CLIENT_WAKE,
++ .irq = INT_34XX_SYS_NIRQ,
++ .platform_data = &encore_twldata,
++ },
++};
++
++static struct i2c_board_info __initdata encore_i2c_bus2_info[] = {
++};
++
++#ifdef CONFIG_OMAP_MUX
++static struct omap_board_mux board_mux[] __initdata = {
++ { .reg_offset = OMAP_MUX_TERMINATOR },
++};
++#endif
++
++static struct omap_board_config_kernel encore_config[] __initdata = {
++};
++
++static void __init omap_i2c_init(void)
++{
++ omap_register_i2c_bus(1, 100, encore_i2c_bus1_info,
++ ARRAY_SIZE(encore_i2c_bus1_info));
++ omap_register_i2c_bus(2, 400, encore_i2c_bus2_info,
++ ARRAY_SIZE(encore_i2c_bus2_info));
++}
++
++static void __init omap_encore_init(void)
++{
++ omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
++ omap_i2c_init();
++ omap_serial_init();
++ omap_sdrc_init(h8mbx00u0mer0em_sdrc_params,
++ h8mbx00u0mer0em_sdrc_params);
++ usb_musb_init(NULL);
++
++ omap_board_config = encore_config;
++ omap_board_config_size = ARRAY_SIZE(encore_config);
++
++ platform_add_devices(encore_devices, ARRAY_SIZE(encore_devices));
++}
++
++MACHINE_START(ENCORE, "encore")
++ .atag_offset = 0x100,
++ .reserve = omap_reserve,
++ .map_io = omap3_map_io,
++ .init_early = omap3630_init_early,
++ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
++ .init_machine = omap_encore_init,
++ .timer = &omap3_timer,
++MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
+index ec00b2e..620056f 100644
+--- a/arch/arm/mach-omap2/board-omap3evm.c
++++ b/arch/arm/mach-omap2/board-omap3evm.c
+@@ -43,7 +43,7 @@
+
+ #include <plat/board.h>
+ #include <plat/usb.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/mcspi.h>
+ #include <video/omapdss.h>
+ #include <video/omap-panel-dvi.h>
+@@ -637,7 +637,7 @@ static void __init omap3_evm_init(void)
+ omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
+
+ /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
+- usb_nop_xceiv_register();
++ usb_nop_xceiv_register(0);
+
+ if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
+ /* enable EHCI VBUS using GPIO22 */
+@@ -672,6 +672,12 @@ static void __init omap3_evm_init(void)
+ pr_err("error setting wl12xx data\n");
+ platform_device_register(&omap3evm_wlan_regulator);
+ #endif
++ /* NAND */
++ omap_nand_init(omap3_evm_nand_partitions,
++ ARRAY_SIZE(omap3_evm_nand_partitions),
++ 0, NAND_BUSWIDTH_16, &nand_default_timings);
++ board_onenand_init(omap3_evm_onenand_partitions,
++ ARRAY_SIZE(omap3_evm_onenand_partitions), 0);
+ }
+
+ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
+@@ -681,6 +687,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3_evm_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
+index 7c0f193..5fa6bad 100644
+--- a/arch/arm/mach-omap2/board-omap3logic.c
++++ b/arch/arm/mach-omap2/board-omap3logic.c
+@@ -40,7 +40,7 @@
+
+ #include <plat/mux.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc-smsc911x.h>
+ #include <plat/gpmc.h>
+ #include <plat/sdrc.h>
+@@ -208,6 +208,7 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3logic_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+@@ -217,6 +218,7 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3logic_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
+index f7811f4..ef315c5 100644
+--- a/arch/arm/mach-omap2/board-omap3pandora.c
++++ b/arch/arm/mach-omap2/board-omap3pandora.c
+@@ -41,7 +41,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <mach/hardware.h>
+ #include <plat/mcspi.h>
+ #include <plat/usb.h>
+@@ -606,6 +606,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3pandora_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
+index ddb7d66..b21d70a 100644
+--- a/arch/arm/mach-omap2/board-omap3stalker.c
++++ b/arch/arm/mach-omap2/board-omap3stalker.c
+@@ -35,7 +35,7 @@
+ #include <asm/mach/flash.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <plat/nand.h>
+ #include <plat/usb.h>
+@@ -454,6 +454,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3_stalker_init,
+ .timer = &omap3_secure_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
+index a2d0d19..f55922e 100644
+--- a/arch/arm/mach-omap2/board-omap3touchbook.c
++++ b/arch/arm/mach-omap2/board-omap3touchbook.c
+@@ -44,13 +44,14 @@
+ #include <asm/mach/flash.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/gpmc.h>
+ #include <plat/nand.h>
+ #include <plat/usb.h>
+
+ #include "mux.h"
+ #include "hsmmc.h"
++#include "board-flash.h"
+ #include "common-board-devices.h"
+
+ #include <asm/setup.h>
+@@ -366,8 +367,9 @@ static void __init omap3_touchbook_init(void)
+ omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
+ usb_musb_init(NULL);
+ usbhs_init(&usbhs_bdata);
+- omap_nand_flash_init(NAND_BUSWIDTH_16, omap3touchbook_nand_partitions,
+- ARRAY_SIZE(omap3touchbook_nand_partitions));
++ omap_nand_init(omap3touchbook_nand_partitions,
++ ARRAY_SIZE(omap3touchbook_nand_partitions), GPMC_CS_NUM + 1,
++ NAND_BUSWIDTH_16, NULL);
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+@@ -381,6 +383,7 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap3_touchbook_init,
+ .timer = &omap3_secure_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
+index 51b1c93..9cc9564 100644
+--- a/arch/arm/mach-omap2/board-omap4panda.c
++++ b/arch/arm/mach-omap2/board-omap4panda.c
+@@ -30,14 +30,14 @@
+ #include <linux/wl12xx.h>
+
+ #include <mach/hardware.h>
+-#include <mach/omap4-common.h>
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <video/omapdss.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+ #include <plat/mmc.h>
+ #include <video/omap-panel-dvi.h>
+@@ -365,74 +365,8 @@ static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+ };
+
+-static struct omap_device_pad serial2_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial3_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
+- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
+- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_device_pad serial4_pads[] __initdata = {
+- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
+- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
+- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
+- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+-};
+-
+-static struct omap_board_data serial2_data __initdata = {
+- .id = 1,
+- .pads = serial2_pads,
+- .pads_cnt = ARRAY_SIZE(serial2_pads),
+-};
+-
+-static struct omap_board_data serial3_data __initdata = {
+- .id = 2,
+- .pads = serial3_pads,
+- .pads_cnt = ARRAY_SIZE(serial3_pads),
+-};
+-
+-static struct omap_board_data serial4_data __initdata = {
+- .id = 3,
+- .pads = serial4_pads,
+- .pads_cnt = ARRAY_SIZE(serial4_pads),
+-};
+-
+-static inline void board_serial_init(void)
+-{
+- struct omap_board_data bdata;
+- bdata.flags = 0;
+- bdata.pads = NULL;
+- bdata.pads_cnt = 0;
+- bdata.id = 0;
+- /* pass dummy data for UART1 */
+- omap_serial_init_port(&bdata);
+-
+- omap_serial_init_port(&serial2_data);
+- omap_serial_init_port(&serial3_data);
+- omap_serial_init_port(&serial4_data);
+-}
+ #else
+ #define board_mux NULL
+-
+-static inline void board_serial_init(void)
+-{
+- omap_serial_init();
+-}
+ #endif
+
+ /* Display DVI */
+@@ -568,7 +502,7 @@ static void __init omap4_panda_init(void)
+ omap4_panda_i2c_init();
+ platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
+ platform_device_register(&omap_vwlan_device);
+- board_serial_init();
++ omap_serial_init();
+ omap_sdrc_init(NULL, NULL);
+ omap4_twl6030_hsmmc_init(mmc);
+ omap4_ehci_init();
+@@ -583,6 +517,7 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
+ .map_io = omap4_map_io,
+ .init_early = omap4430_init_early,
+ .init_irq = gic_init_irq,
++ .handle_irq = gic_handle_irq,
+ .init_machine = omap4_panda_init,
+ .timer = &omap4_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-omap4pcm049.c b/arch/arm/mach-omap2/board-omap4pcm049.c
+new file mode 100644
+index 0000000..3b39464b
+--- /dev/null
++++ b/arch/arm/mach-omap2/board-omap4pcm049.c
+@@ -0,0 +1,584 @@
++/*
++ * Board support file for Phytec phyCORE-OMAP4 Board.
++ *
++ * Copyright (C) 2011 Phytec Messtechnik GmbH
++ *
++ * Author: Jan Weitzel <armlinux@phytec.de>
++ *
++ * Based on mach-omap2/board-omap4panda.c
++ *
++ * Author: David Anders <x0132446@ti.com>
++ *
++ * Author: Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/leds.h>
++#include <linux/gpio.h>
++#include <linux/usb/otg.h>
++#include <linux/i2c/twl.h>
++#include <linux/i2c/at24.h>
++#include <linux/mfd/stmpe.h>
++#include <linux/leds-pca9532.h>
++#include <linux/regulator/machine.h>
++#include <linux/regulator/fixed.h>
++#include <linux/smsc911x.h>
++
++#include <mach/hardware.h>
++#include <asm/hardware/gic.h>
++#include <asm/mach-types.h>
++#include <asm/mach/arch.h>
++#include <asm/mach/map.h>
++#include <video/omapdss.h>
++
++#include <plat/board.h>
++#include <plat/usb.h>
++#include <plat/gpmc.h>
++#include <plat/gpmc-smsc911x.h>
++#include <plat/mmc.h>
++#include <video/omap-panel-generic-dpi.h>
++
++#include "common.h"
++#include "hsmmc.h"
++#include "control.h"
++#include "mux.h"
++#include "common-board-devices.h"
++
++#define OMAP4_PCM049_ETH_GPIO_IRQ 121
++#define OMAP4_PCM049_ETH_CS 5
++#define OMAP4_PCM049_STMPE811_GPIO_IRQ 117
++#define OMAP4_PCM049_LCD_ENABLE 118
++
++static struct gpio_led gpio_leds[] = {
++ {
++ .name = "modul:red:status1",
++ .default_trigger = "heartbeat",
++ .gpio = 152,
++ },
++ {
++ .name = "modul:green:status2",
++ .default_trigger = "mmc0",
++ .gpio = 153,
++ },
++};
++
++static struct gpio_led_platform_data gpio_led_info = {
++ .leds = gpio_leds,
++ .num_leds = ARRAY_SIZE(gpio_leds),
++};
++
++static struct platform_device leds_gpio = {
++ .name = "leds-gpio",
++ .id = -1,
++ .dev = {
++ .platform_data = &gpio_led_info,
++ },
++};
++
++static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
++ .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
++ .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
++ .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
++ .phy_reset = false,
++ .reset_gpio_port[0] = -EINVAL,
++ .reset_gpio_port[1] = -EINVAL,
++ .reset_gpio_port[2] = -EINVAL
++};
++
++static void __init omap4_ehci_init(void)
++{
++ struct clk *phy_ref_clk;
++ /* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
++ phy_ref_clk = clk_get(NULL, "auxclk3_ck");
++ if (IS_ERR(phy_ref_clk)) {
++ pr_err("Cannot request auxclk3\n");
++ return;
++ }
++ clk_set_rate(phy_ref_clk, 19200000);
++ clk_enable(phy_ref_clk);
++
++ usbhs_init(&usbhs_bdata);
++ return;
++}
++
++static struct omap_musb_board_data musb_board_data = {
++ .interface_type = MUSB_INTERFACE_UTMI,
++ .mode = MUSB_OTG,
++ .power = 100,
++};
++
++static struct omap2_hsmmc_info mmc[] = {
++ {
++ .mmc = 1,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_wp = -EINVAL,
++ .gpio_cd = -EINVAL,
++ }, {
++ .mmc = 5,
++ .caps = MMC_CAP_4_BIT_DATA,
++ .gpio_wp = -EINVAL,
++ .gpio_cd = 30, /* wk30 */
++ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3V3 */
++ }, {} /* Terminator */
++};
++
++#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
++static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
++ .cs = OMAP4_PCM049_ETH_CS,
++ .gpio_irq = OMAP4_PCM049_ETH_GPIO_IRQ,
++ .gpio_reset = -EINVAL,
++ .flags = SMSC911X_USE_16BIT,
++};
++
++static inline void __init pcm049_init_smsc911x(void)
++{
++ omap_mux_init_gpio(OMAP4_PCM049_ETH_GPIO_IRQ, OMAP_PIN_INPUT);
++ gpmc_smsc911x_init(&board_smsc911x_data);
++}
++#else
++static inline void __init pcm049_init_smsc911x(void) { return; }
++#endif
++
++static int omap4_twl6030_hsmmc_late_init(struct device *dev)
++{
++ int ret = 0;
++ struct platform_device *pdev = container_of(dev,
++ struct platform_device, dev);
++ struct omap_mmc_platform_data *pdata = dev->platform_data;
++
++ /* Setting MMC1 Card detect Irq */
++ if (pdev->id == 0) {
++ ret = twl6030_mmc_card_detect_config();
++ if (ret)
++ dev_err(dev, "%s: Error card detect config(%d)\n",
++ __func__, ret);
++ pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
++ MMCDETECT_INTR_OFFSET;
++ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
++ }
++ return ret;
++}
++
++static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
++{
++ struct omap_mmc_platform_data *pdata;
++
++ /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
++ if (!dev) {
++ pr_err("Failed omap4_twl6030_hsmmc_set_late_init\n");
++ return;
++ }
++ pdata = dev->platform_data;
++
++ pdata->init = omap4_twl6030_hsmmc_late_init;
++}
++
++static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
++{
++ struct omap2_hsmmc_info *c;
++
++ omap2_hsmmc_init(controllers);
++ for (c = controllers; c->mmc; c++)
++ omap4_twl6030_hsmmc_set_late_init(c->dev);
++
++ return 0;
++}
++
++/* Fixed regulator for max1027 */
++static struct regulator_consumer_supply pcm049_vcc_3v3_consumer_supply[] = {
++ REGULATOR_SUPPLY("vcc", "4-0064"),
++};
++
++struct regulator_init_data pcm049_vcc_3v3_initdata = {
++ .consumer_supplies = pcm049_vcc_3v3_consumer_supply,
++ .num_consumer_supplies = ARRAY_SIZE(pcm049_vcc_3v3_consumer_supply),
++ .constraints = {
++ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
++ },
++};
++
++static struct fixed_voltage_config pcm049_vcc_3v3_config = {
++ .supply_name = "pcm049_vcc_3v3",
++ .microvolts = 3300000,
++ .gpio = -EINVAL,
++ .enabled_at_boot = 1,
++ .init_data = &pcm049_vcc_3v3_initdata,
++};
++
++static struct platform_device pcm049_vcc_3v3_device = {
++ .name = "reg-fixed-voltage",
++ .id = 0,
++ .dev = {
++ .platform_data = &pcm049_vcc_3v3_config,
++ },
++};
++
++static struct at24_platform_data board_eeprom = {
++ .byte_len = 4096,
++ .page_size = 32,
++ .flags = AT24_FLAG_ADDR16,
++};
++
++static struct stmpe_gpio_platform_data pba_gpio_stm_data = {
++ .gpio_base = -1,
++ .norequest_mask = STMPE_GPIO_NOREQ_811_TOUCH,
++};
++
++static struct stmpe_ts_platform_data pba_ts_stm_pdata = {
++ .sample_time = 4,
++ .mod_12b = 1,
++ .ref_sel = 0,
++ .adc_freq = 1,
++ .ave_ctrl = 3,
++ .touch_det_delay = 3,
++ .settling = 3,
++ .fraction_z = 7,
++ .i_drive = 0,
++};
++
++static struct stmpe_platform_data pba_stm_pdata = {
++ .blocks = STMPE_BLOCK_GPIO | STMPE_BLOCK_TOUCHSCREEN,
++ .irq_base = TWL6030_IRQ_END,
++ .irq_trigger = IRQF_TRIGGER_RISING,
++ .irq_invert_polarity = true,
++ .gpio = &pba_gpio_stm_data,
++ .ts = &pba_ts_stm_pdata,
++};
++
++static struct pca9532_platform_data pba_pca9532 = {
++ .leds = {
++ {
++ .name = "board:red:free_use1",
++ .state = PCA9532_OFF,
++ .type = PCA9532_TYPE_LED,
++ }, {
++ .name = "board:yellow:free_use2",
++ .state = PCA9532_OFF,
++ .type = PCA9532_TYPE_LED,
++ }, {
++ .name = "board:yellow:free_use3",
++ .state = PCA9532_OFF,
++ .type = PCA9532_TYPE_LED,
++ }, {
++ .name = "board:green:free_use4",
++ .state = PCA9532_OFF,
++ .type = PCA9532_TYPE_LED,
++ },
++ },
++ .psc = { 1, 1 },
++ .pwm = { 1, 1 },
++};
++
++static struct i2c_board_info __initdata pcm049_i2c_1_boardinfo[] = {
++ {
++ I2C_BOARD_INFO("at24", 0x57), /* E0=1, E1=1, E2=1 */
++ .platform_data = &board_eeprom,
++ },
++};
++
++static struct i2c_board_info __initdata pcm049_i2c_3_boardinfo[] = {
++};
++
++static struct i2c_board_info __initdata pcm049_i2c_4_boardinfo[] = {
++ {
++ I2C_BOARD_INFO("stmpe811", 0x41), /* Touch controller */
++ .irq = OMAP_GPIO_IRQ(OMAP4_PCM049_STMPE811_GPIO_IRQ),
++ .platform_data = &pba_stm_pdata,
++ }, {
++ I2C_BOARD_INFO("max1037", 0x64), /* A/D converter */
++ }, {
++ I2C_BOARD_INFO("pca9533", 0x62), /* Leds pca9533 */
++ .platform_data = &pba_pca9532,
++ }
++};
++
++static struct twl4030_platform_data pcm049_twldata;
++
++static int __init pcm049_i2c_init(void)
++{
++ /* I2C1 */
++ omap4_pmic_get_config(&pcm049_twldata, TWL_COMMON_PDATA_USB,
++ TWL_COMMON_REGULATOR_VDAC |
++ TWL_COMMON_REGULATOR_VAUX2 |
++ TWL_COMMON_REGULATOR_VAUX3 |
++ TWL_COMMON_REGULATOR_VMMC |
++ TWL_COMMON_REGULATOR_VPP |
++ TWL_COMMON_REGULATOR_VANA |
++ TWL_COMMON_REGULATOR_VCXIO |
++ TWL_COMMON_REGULATOR_VUSB |
++ TWL_COMMON_REGULATOR_CLK32KG);
++ omap4_pmic_init("twl6030", &pcm049_twldata);
++ i2c_register_board_info(1, pcm049_i2c_1_boardinfo,
++ ARRAY_SIZE(pcm049_i2c_1_boardinfo));
++
++ /* I2C3 */
++ omap_register_i2c_bus(3, 400, pcm049_i2c_3_boardinfo,
++ ARRAY_SIZE(pcm049_i2c_3_boardinfo));
++
++ /* I2C4 */
++ if (omap_mux_init_gpio(OMAP4_PCM049_STMPE811_GPIO_IRQ, OMAP_PIN_INPUT))
++ printk(KERN_ERR "Failed to mux GPIO%d for STMPE811 IRQ\n",
++ OMAP4_PCM049_STMPE811_GPIO_IRQ);
++ else if (gpio_request(OMAP4_PCM049_STMPE811_GPIO_IRQ, "STMPE811 irq"))
++ printk(KERN_ERR "Failed to request GPIO%d for STMPE811 IRQ\n",
++ OMAP4_PCM049_STMPE811_GPIO_IRQ);
++ else
++ gpio_direction_input(OMAP4_PCM049_STMPE811_GPIO_IRQ);
++
++ omap_register_i2c_bus(4, 400, pcm049_i2c_4_boardinfo,
++ ARRAY_SIZE(pcm049_i2c_4_boardinfo));
++ return 0;
++}
++
++#ifdef CONFIG_OMAP_MUX
++static struct omap_board_mux board_mux[] __initdata = {
++ OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
++ OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
++ OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
++ OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
++
++ /* dispc2_data23 */
++ OMAP4_MUX(USBB2_ULPITLL_STP, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data22 */
++ OMAP4_MUX(USBB2_ULPITLL_DIR, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data21 */
++ OMAP4_MUX(USBB2_ULPITLL_NXT, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data20 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT0, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data19 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT1, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data18 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT2, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data15 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data14 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data13 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data12 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data11 */
++ OMAP4_MUX(USBB2_ULPITLL_DAT7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data10 */
++ OMAP4_MUX(DPM_EMU3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data9 */
++ OMAP4_MUX(DPM_EMU4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data16 */
++ OMAP4_MUX(DPM_EMU5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data17 */
++ OMAP4_MUX(DPM_EMU6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_hsync */
++ OMAP4_MUX(DPM_EMU7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_pclk */
++ OMAP4_MUX(DPM_EMU8, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_vsync */
++ OMAP4_MUX(DPM_EMU9, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_de */
++ OMAP4_MUX(DPM_EMU10, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data8 */
++ OMAP4_MUX(DPM_EMU11, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data7 */
++ OMAP4_MUX(DPM_EMU12, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data6 */
++ OMAP4_MUX(DPM_EMU13, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data5 */
++ OMAP4_MUX(DPM_EMU14, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data4 */
++ OMAP4_MUX(DPM_EMU15, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data3 */
++ OMAP4_MUX(DPM_EMU16, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data2 */
++ OMAP4_MUX(DPM_EMU17, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data1 */
++ OMAP4_MUX(DPM_EMU18, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++ /* dispc2_data0 */
++ OMAP4_MUX(DPM_EMU19, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
++
++ { .reg_offset = OMAP_MUX_TERMINATOR },
++};
++
++static struct omap_device_pad serial2_pads[] __initdata = {
++ OMAP_MUX_STATIC("uart2_cts.uart2_cts",
++ OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart2_rts.uart2_rts",
++ OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart2_rx.uart2_rx",
++ OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart2_tx.uart2_tx",
++ OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
++};
++
++static struct omap_device_pad serial3_pads[] __initdata = {
++ OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
++ OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
++ OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
++ OMAP_PIN_INPUT | OMAP_MUX_MODE0),
++ OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
++ OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
++};
++
++static struct omap_board_data serial2_data __initdata = {
++ .id = 1,
++ .pads = serial2_pads,
++ .pads_cnt = ARRAY_SIZE(serial2_pads),
++};
++
++static struct omap_board_data serial3_data __initdata = {
++ .id = 2,
++ .pads = serial3_pads,
++ .pads_cnt = ARRAY_SIZE(serial3_pads),
++};
++
++static inline void board_serial_init(void)
++{
++ omap_serial_init_port(&serial2_data, NULL);
++ omap_serial_init_port(&serial3_data, NULL);
++}
++#else
++#define board_mux NULL
++
++static inline void board_serial_init(void)
++{
++ omap_serial_init();
++}
++#endif
++
++/* Display */
++static int pcm049_panel_enable_lcd(struct omap_dss_device *dssdev)
++{
++ return gpio_direction_output(OMAP4_PCM049_LCD_ENABLE, 1);
++}
++
++static void pcm049_panel_disable_lcd(struct omap_dss_device *dssdev)
++{
++ gpio_direction_output(OMAP4_PCM049_LCD_ENABLE, 0);
++ return;
++}
++
++/* Using generic display panel */
++static struct panel_generic_dpi_data omap4_dpi_panel = {
++ .name = "pd050vl1",
++ .platform_enable = pcm049_panel_enable_lcd,
++ .platform_disable = pcm049_panel_disable_lcd,
++};
++
++struct omap_dss_device pcm049_dpi_device = {
++ .type = OMAP_DISPLAY_TYPE_DPI,
++ .name = "dpi",
++ .driver_name = "generic_dpi_panel",
++ .data = &omap4_dpi_panel,
++ .phy.dpi.data_lines = 24,
++ .channel = OMAP_DSS_CHANNEL_LCD2,
++};
++
++static void pcm049_dvi_mux_init(void)
++{
++ /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
++ omap_mux_init_signal("hdmi_hpd",
++ OMAP_PIN_INPUT_PULLUP);
++ omap_mux_init_signal("hdmi_cec",
++ OMAP_PIN_INPUT_PULLUP);
++ /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
++ omap_mux_init_signal("hdmi_ddc_scl",
++ OMAP_PIN_INPUT_PULLUP);
++ omap_mux_init_signal("hdmi_ddc_sda",
++ OMAP_PIN_INPUT_PULLUP);
++}
++
++static struct omap_dss_device pcm049_dvi_device = {
++ .name = "dvi",
++ .driver_name = "hdmi_panel",
++ .type = OMAP_DISPLAY_TYPE_HDMI,
++ .clocks = {
++ .dispc = {
++ .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
++ },
++ .hdmi = {
++ .regn = 15,
++ .regm2 = 1,
++ },
++ },
++ .channel = OMAP_DSS_CHANNEL_DIGIT,
++};
++
++static struct omap_dss_device *pcm049_dss_devices[] = {
++ &pcm049_dpi_device,
++ &pcm049_dvi_device,
++};
++
++static struct omap_dss_board_info pcm049_dss_data = {
++ .num_devices = ARRAY_SIZE(pcm049_dss_devices),
++ .devices = pcm049_dss_devices,
++ .default_device = &pcm049_dpi_device,
++};
++
++void pcm049_display_init(void)
++{
++ omap_mux_init_gpio(OMAP4_PCM049_LCD_ENABLE, OMAP_PIN_OUTPUT);
++
++ if ((gpio_request(OMAP4_PCM049_LCD_ENABLE, "DISP_ENA") == 0) &&
++ (gpio_direction_output(OMAP4_PCM049_LCD_ENABLE, 1) == 0)) {
++ gpio_export(OMAP4_PCM049_LCD_ENABLE, 0);
++ gpio_set_value(OMAP4_PCM049_LCD_ENABLE, 0);
++ } else
++ printk(KERN_ERR "could not obtain gpio for DISP_ENA");
++ pcm049_dvi_mux_init();
++ omap_display_init(&pcm049_dss_data);
++}
++
++static struct platform_device *pcm049_devices[] __initdata = {
++ &pcm049_vcc_3v3_device,
++ &leds_gpio,
++};
++
++#define TWL_PHOENIX_DEV_ON 0x25
++
++static void pcm049_power_off(void)
++{
++ printk(KERN_INFO "Goodbye phyCORE OMAP4!\n");
++ twl_i2c_write_u8(TWL6030_MODULE_ID0, 0x7, TWL_PHOENIX_DEV_ON);
++}
++
++
++static void __init pcm049_init(void)
++{
++ pm_power_off = pcm049_power_off;
++ omap4_mux_init(board_mux, NULL, OMAP_PACKAGE_CBS);
++ pcm049_init_smsc911x();
++ pcm049_i2c_init();
++ platform_add_devices(pcm049_devices, ARRAY_SIZE(pcm049_devices));
++ board_serial_init();
++ omap_sdrc_init(NULL, NULL);
++ omap4_twl6030_hsmmc_init(mmc);
++ omap4_ehci_init();
++ usb_musb_init(&musb_board_data);
++ pcm049_display_init();
++}
++
++static void __init pcm049_map_io(void)
++{
++ omap2_set_globals_443x();
++ omap44xx_map_common_io();
++}
++
++MACHINE_START(PCM049, "phyCORE OMAP4")
++ /* Maintainer: Jan Weitzel - Phytec Messtechnik GmbH */
++ .atag_offset = 0x100,
++ .reserve = omap_reserve,
++ .map_io = pcm049_map_io,
++ .init_early = omap4430_init_early,
++ .init_irq = gic_init_irq,
++ .handle_irq = gic_handle_irq,
++ .init_machine = pcm049_init,
++ .timer = &omap4_timer,
++MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
+index 4cf7aea..d5619c5 100644
+--- a/arch/arm/mach-omap2/board-overo.c
++++ b/arch/arm/mach-omap2/board-overo.c
+@@ -43,7 +43,7 @@
+ #include <asm/mach/map.h>
+
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <video/omapdss.h>
+ #include <video/omap-panel-generic-dpi.h>
+ #include <video/omap-panel-dvi.h>
+@@ -57,6 +57,7 @@
+ #include "mux.h"
+ #include "sdram-micron-mt46h32m32lf-6.h"
+ #include "hsmmc.h"
++#include "board-flash.h"
+ #include "common-board-devices.h"
+
+ #define OVERO_GPIO_BT_XGATE 15
+@@ -510,8 +511,8 @@ static void __init overo_init(void)
+ omap_serial_init();
+ omap_sdrc_init(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
+- omap_nand_flash_init(0, overo_nand_partitions,
+- ARRAY_SIZE(overo_nand_partitions));
++ omap_nand_init(overo_nand_partitions,
++ ARRAY_SIZE(overo_nand_partitions), GPMC_CS_NUM + 1, 0, NULL);
+ usb_musb_init(NULL);
+ usbhs_init(&usbhs_bdata);
+ overo_spi_init();
+@@ -562,6 +563,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
+ .map_io = omap3_map_io,
+ .init_early = omap35xx_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = overo_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
+index 616fb39..a79d49e 100644
+--- a/arch/arm/mach-omap2/board-rm680.c
++++ b/arch/arm/mach-omap2/board-rm680.c
+@@ -25,7 +25,7 @@
+ #include <plat/mmc.h>
+ #include <plat/usb.h>
+ #include <plat/gpmc.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/onenand.h>
+
+ #include "mux.h"
+@@ -149,6 +149,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
+ .map_io = omap3_map_io,
+ .init_early = omap3630_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = rm680_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index c15c5c9..c142ad7 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -15,6 +15,7 @@
+ #include <linux/input/matrix_keypad.h>
+ #include <linux/spi/spi.h>
+ #include <linux/wl12xx.h>
++#include <linux/spi/tsc2005.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/twl.h>
+ #include <linux/clk.h>
+@@ -27,7 +28,7 @@
+
+ #include <plat/mcspi.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/dma.h>
+ #include <plat/gpmc.h>
+ #include <plat/onenand.h>
+@@ -58,6 +59,9 @@
+
+ #define RX51_USB_TRANSCEIVER_RST_GPIO 67
+
++#define RX51_TSC2005_RESET_GPIO 104
++#define RX51_TSC2005_IRQ_GPIO 100
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -66,6 +70,7 @@ enum {
+ };
+
+ static struct wl12xx_platform_data wl1251_pdata;
++static struct tsc2005_platform_data tsc2005_pdata;
+
+ #if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
+ static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
+@@ -133,17 +138,14 @@ static struct lp5523_platform_data rx51_lp5523_platform_data = {
+
+ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1,
+ };
+
+ static struct omap2_mcspi_device_config mipid_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1,
+ };
+
+ static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1,
+ };
+
+ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+@@ -167,10 +169,10 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+- /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
++ .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+- /* .platform_data = &tsc2005_config,*/
++ .platform_data = &tsc2005_pdata,
+ },
+ };
+
+@@ -1086,6 +1088,42 @@ error:
+ */
+ }
+
++static struct tsc2005_platform_data tsc2005_pdata = {
++ .ts_pressure_max = 2048,
++ .ts_pressure_fudge = 2,
++ .ts_x_max = 4096,
++ .ts_x_fudge = 4,
++ .ts_y_max = 4096,
++ .ts_y_fudge = 7,
++ .ts_x_plate_ohm = 280,
++ .esd_timeout_ms = 8000,
++};
++
++static void rx51_tsc2005_set_reset(bool enable)
++{
++ gpio_set_value(RX51_TSC2005_RESET_GPIO, enable);
++}
++
++static void __init rx51_init_tsc2005(void)
++{
++ int r;
++
++ r = gpio_request_one(RX51_TSC2005_IRQ_GPIO, GPIOF_IN, "tsc2005 IRQ");
++ if (r < 0) {
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 IRQ");
++ rx51_peripherals_spi_board_info[RX51_SPI_TSC2005].irq = 0;
++ }
++
++ r = gpio_request_one(RX51_TSC2005_RESET_GPIO, GPIOF_OUT_INIT_HIGH,
++ "tsc2005 reset");
++ if (r >= 0) {
++ tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
++ } else {
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 reset");
++ tsc2005_pdata.esd_timeout_ms = 0;
++ }
++}
++
+ void __init rx51_peripherals_init(void)
+ {
+ rx51_i2c_init();
+@@ -1094,6 +1132,7 @@ void __init rx51_peripherals_init(void)
+ board_smc91x_init();
+ rx51_add_gpio_keys();
+ rx51_init_wl1251();
++ rx51_init_tsc2005();
+ rx51_init_si4713();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
+index 4af7c4b..4e3c096 100644
+--- a/arch/arm/mach-omap2/board-rx51.c
++++ b/arch/arm/mach-omap2/board-rx51.c
+@@ -25,7 +25,7 @@
+
+ #include <plat/mcspi.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/dma.h>
+ #include <plat/gpmc.h>
+ #include <plat/usb.h>
+@@ -127,6 +127,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = rx51_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
+index e6ee884..5b6ad6e 100644
+--- a/arch/arm/mach-omap2/board-ti8168evm.c
++++ b/arch/arm/mach-omap2/board-ti8168evm.c
+@@ -1,5 +1,5 @@
+ /*
+- * Code for TI8168 EVM.
++ * Code for TI8168/TI8148 EVM.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
+ *
+@@ -22,30 +22,44 @@
+
+ #include <plat/irqs.h>
+ #include <plat/board.h>
+-#include <plat/common.h>
++#include "common.h"
++#include <plat/usb.h>
+
+-static struct omap_board_config_kernel ti8168_evm_config[] __initdata = {
++static struct omap_musb_board_data musb_board_data = {
++ .set_phy_power = ti81xx_musb_phy_power,
++ .interface_type = MUSB_INTERFACE_ULPI,
++ .mode = MUSB_OTG,
++ .power = 500,
+ };
+
+-static void __init ti8168_evm_init(void)
++static struct omap_board_config_kernel ti81xx_evm_config[] __initdata = {
++};
++
++static void __init ti81xx_evm_init(void)
+ {
+ omap_serial_init();
+ omap_sdrc_init(NULL, NULL);
+- omap_board_config = ti8168_evm_config;
+- omap_board_config_size = ARRAY_SIZE(ti8168_evm_config);
+-}
+-
+-static void __init ti8168_evm_map_io(void)
+-{
+- omapti816x_map_common_io();
++ omap_board_config = ti81xx_evm_config;
++ omap_board_config_size = ARRAY_SIZE(ti81xx_evm_config);
++ usb_musb_init(&musb_board_data);
+ }
+
+ MACHINE_START(TI8168EVM, "ti8168evm")
+ /* Maintainer: Texas Instruments */
+ .atag_offset = 0x100,
+- .map_io = ti8168_evm_map_io,
+- .init_early = ti816x_init_early,
+- .init_irq = ti816x_init_irq,
++ .map_io = ti81xx_map_io,
++ .init_early = ti81xx_init_early,
++ .init_irq = ti81xx_init_irq,
++ .timer = &omap3_timer,
++ .init_machine = ti81xx_evm_init,
++MACHINE_END
++
++MACHINE_START(TI8148EVM, "ti8148evm")
++ /* Maintainer: Texas Instruments */
++ .atag_offset = 0x100,
++ .map_io = ti81xx_map_io,
++ .init_early = ti81xx_init_early,
++ .init_irq = ti81xx_init_irq,
+ .timer = &omap3_timer,
+- .init_machine = ti8168_evm_init,
++ .init_machine = ti81xx_evm_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
+index d4683ba..2818290 100644
+--- a/arch/arm/mach-omap2/board-zoom-display.c
++++ b/arch/arm/mach-omap2/board-zoom-display.c
+@@ -117,7 +117,6 @@ static struct omap_dss_board_info zoom_dss_data = {
+
+ static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
+ .turbo_mode = 1,
+- .single_channel = 1, /* 0: slave, 1: master */
+ };
+
+ static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
+diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
+index 6d0aa4f..8d7ce11 100644
+--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
++++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
+@@ -24,7 +24,7 @@
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/usb.h>
+
+ #include <mach/board-zoom.h>
+diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
+index be6684d..fb6d606 100644
+--- a/arch/arm/mach-omap2/board-zoom.c
++++ b/arch/arm/mach-omap2/board-zoom.c
+@@ -21,7 +21,7 @@
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+ #include <plat/usb.h>
+
+@@ -114,8 +114,9 @@ static void __init omap_zoom_init(void)
+ usbhs_init(&usbhs_bdata);
+ }
+
+- board_nand_init(zoom_nand_partitions, ARRAY_SIZE(zoom_nand_partitions),
+- ZOOM_NAND_CS, NAND_BUSWIDTH_16);
++ omap_nand_init(zoom_nand_partitions,
++ ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS,
++ NAND_BUSWIDTH_16, &nand_default_timings);
+ zoom_debugboard_init();
+ zoom_peripherals_init();
+
+@@ -135,6 +136,7 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_zoom_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+@@ -145,6 +147,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
+ .map_io = omap3_map_io,
+ .init_early = omap3630_init_early,
+ .init_irq = omap3_init_irq,
++ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_zoom_init,
+ .timer = &omap3_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
+index e069a9b..a81abea 100644
+--- a/arch/arm/mach-omap2/clkt_dpll.c
++++ b/arch/arm/mach-omap2/clkt_dpll.c
+@@ -206,13 +206,9 @@ void omap2_init_dpll_parent(struct clk *clk)
+ if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP2XXX_EN_DPLL_FRBYPASS)
+ clk_reparent(clk, dd->clk_bypass);
+- } else if (cpu_is_omap34xx()) {
++ } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
+- v == OMAP3XXX_EN_DPLL_FRBYPASS)
+- clk_reparent(clk, dd->clk_bypass);
+- } else if (cpu_is_omap44xx()) {
+- if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
+- v == OMAP4XXX_EN_DPLL_FRBYPASS ||
++ v == OMAP3XXX_EN_DPLL_FRBYPASS ||
+ v == OMAP4XXX_EN_DPLL_MNBYPASS)
+ clk_reparent(clk, dd->clk_bypass);
+ }
+@@ -252,13 +248,9 @@ u32 omap2_get_dpll_rate(struct clk *clk)
+ if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP2XXX_EN_DPLL_FRBYPASS)
+ return dd->clk_bypass->rate;
+- } else if (cpu_is_omap34xx()) {
++ } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
+- v == OMAP3XXX_EN_DPLL_FRBYPASS)
+- return dd->clk_bypass->rate;
+- } else if (cpu_is_omap44xx()) {
+- if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
+- v == OMAP4XXX_EN_DPLL_FRBYPASS ||
++ v == OMAP3XXX_EN_DPLL_FRBYPASS ||
+ v == OMAP4XXX_EN_DPLL_MNBYPASS)
+ return dd->clk_bypass->rate;
+ }
+diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
+index 1f3481f..f57ed5b 100644
+--- a/arch/arm/mach-omap2/clock.c
++++ b/arch/arm/mach-omap2/clock.c
+@@ -35,7 +35,7 @@
+ #include "cm-regbits-24xx.h"
+ #include "cm-regbits-34xx.h"
+
+-u8 cpu_mask;
++u16 cpu_mask;
+
+ /*
+ * clkdm_control: if true, then when a clock is enabled in the
+diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
+index 2311bc2..3f818ba 100644
+--- a/arch/arm/mach-omap2/clock.h
++++ b/arch/arm/mach-omap2/clock.h
+@@ -37,9 +37,6 @@
+
+ /* OMAP4xxx CM_CLKMODE_DPLL*.EN_*_DPLL bits - for omap2_get_dpll_rate() */
+ #define OMAP4XXX_EN_DPLL_MNBYPASS 0x4
+-#define OMAP4XXX_EN_DPLL_LPBYPASS 0x5
+-#define OMAP4XXX_EN_DPLL_FRBYPASS 0x6
+-#define OMAP4XXX_EN_DPLL_LOCKED 0x7
+
+ /* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
+ #define DPLL_LOW_POWER_STOP 0x1
+@@ -132,7 +129,7 @@ void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
+ const char *core_ck_name,
+ const char *mpu_ck_name);
+
+-extern u8 cpu_mask;
++extern u16 cpu_mask;
+
+ extern const struct clkops clkops_omap2_dflt_wait;
+ extern const struct clkops clkops_dummy;
+diff --git a/arch/arm/mach-omap2/clock33xx.h b/arch/arm/mach-omap2/clock33xx.h
+new file mode 100644
+index 0000000..8409288
+--- /dev/null
++++ b/arch/arm/mach-omap2/clock33xx.h
+@@ -0,0 +1,37 @@
++/*
++ * AM33XX clock function prototypes and macros.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK33XX_H
++#define __ARCH_ARM_MACH_OMAP2_CLOCK33XX_H
++
++#define AM33XX_MAX_DPLL_MULT 2047
++#define AM33XX_MAX_DPLL_DIV 128
++
++
++int am33xx_clk_init(void);
++
++/* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
++ physically present, in such a case HWMOD enabling of
++ clock would be failure with default parent. And timer
++ probe thinks clock is already enabled, this leads to
++ crash upon accessing timer 3 & 6 registers in probe.
++ Fix by setting parent of both these timers to master
++ oscillator clock.
++ */
++static inline void am33xx_init_timer_parent(struct clk *clk)
++{
++ omap2_clksel_set_parent(clk, clk->parent);
++}
++#endif
+diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
+new file mode 100644
+index 0000000..a3d7776
+--- /dev/null
++++ b/arch/arm/mach-omap2/clock33xx_data.c
+@@ -0,0 +1,2233 @@
++/*
++ * AM33XX Clock data
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/clk.h>
++#include <plat/clkdev_omap.h>
++
++#include "control.h"
++#include "clock.h"
++#include "clock33xx.h"
++#include "cm.h"
++#include "cm33xx.h"
++#include "cm-regbits-33xx.h"
++#include "prm.h"
++
++/* Modulemode control */
++#define AM33XX_MODULEMODE_HWCTRL 0
++#define AM33XX_MODULEMODE_SWCTRL 1
++
++/* Root clocks */
++static struct clk clk_32768_ck = {
++ .name = "clk_32768_ck",
++ .rate = 32768,
++ .ops = &clkops_null,
++};
++
++/* On-Chip 32KHz RC OSC */
++static struct clk clk_rc32k_ck = {
++ .name = "clk_rc32k_ck",
++ .rate = 32000,
++ .ops = &clkops_null,
++};
++
++/* Crystal input clks */
++static struct clk virt_19_2m_ck = {
++ .name = "virt_19_2m_ck",
++ .rate = 19200000,
++ .ops = &clkops_null,
++};
++
++static struct clk virt_24m_ck = {
++ .name = "virt_24m_ck",
++ .rate = 24000000,
++ .ops = &clkops_null,
++};
++
++static struct clk virt_25m_ck = {
++ .name = "virt_25m_ck",
++ .rate = 25000000,
++ .ops = &clkops_null,
++};
++
++static struct clk virt_26m_ck = {
++ .name = "virt_26m_ck",
++ .rate = 26000000,
++ .ops = &clkops_null,
++};
++
++static struct clk tclkin_ck = {
++ .name = "tclkin_ck",
++ .rate = 12000000,
++ .ops = &clkops_null,
++};
++
++static const struct clksel_rate div_1_0_rates[] = {
++ { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel_rate div_1_1_rates[] = {
++ { .div = 1, .val = 1, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel_rate div_1_2_rates[] = {
++ { .div = 1, .val = 2, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel_rate div_1_3_rates[] = {
++ { .div = 1, .val = 3, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel_rate div_1_4_rates[] = {
++ { .div = 1, .val = 4, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel_rate div31_1to31_rates[] = {
++ { .div = 1, .val = 1, .flags = RATE_IN_AM33XX },
++ { .div = 2, .val = 2, .flags = RATE_IN_AM33XX },
++ { .div = 3, .val = 3, .flags = RATE_IN_AM33XX },
++ { .div = 4, .val = 4, .flags = RATE_IN_AM33XX },
++ { .div = 5, .val = 5, .flags = RATE_IN_AM33XX },
++ { .div = 6, .val = 6, .flags = RATE_IN_AM33XX },
++ { .div = 7, .val = 7, .flags = RATE_IN_AM33XX },
++ { .div = 8, .val = 8, .flags = RATE_IN_AM33XX },
++ { .div = 9, .val = 9, .flags = RATE_IN_AM33XX },
++ { .div = 10, .val = 10, .flags = RATE_IN_AM33XX },
++ { .div = 11, .val = 11, .flags = RATE_IN_AM33XX },
++ { .div = 12, .val = 12, .flags = RATE_IN_AM33XX },
++ { .div = 13, .val = 13, .flags = RATE_IN_AM33XX },
++ { .div = 14, .val = 14, .flags = RATE_IN_AM33XX },
++ { .div = 15, .val = 15, .flags = RATE_IN_AM33XX },
++ { .div = 16, .val = 16, .flags = RATE_IN_AM33XX },
++ { .div = 17, .val = 17, .flags = RATE_IN_AM33XX },
++ { .div = 18, .val = 18, .flags = RATE_IN_AM33XX },
++ { .div = 19, .val = 19, .flags = RATE_IN_AM33XX },
++ { .div = 20, .val = 20, .flags = RATE_IN_AM33XX },
++ { .div = 21, .val = 21, .flags = RATE_IN_AM33XX },
++ { .div = 22, .val = 22, .flags = RATE_IN_AM33XX },
++ { .div = 23, .val = 23, .flags = RATE_IN_AM33XX },
++ { .div = 24, .val = 24, .flags = RATE_IN_AM33XX },
++ { .div = 25, .val = 25, .flags = RATE_IN_AM33XX },
++ { .div = 26, .val = 26, .flags = RATE_IN_AM33XX },
++ { .div = 27, .val = 27, .flags = RATE_IN_AM33XX },
++ { .div = 28, .val = 28, .flags = RATE_IN_AM33XX },
++ { .div = 29, .val = 29, .flags = RATE_IN_AM33XX },
++ { .div = 30, .val = 30, .flags = RATE_IN_AM33XX },
++ { .div = 31, .val = 31, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++/* Oscillator clock */
++/* 19.2, 24, 25 or 26 MHz */
++static const struct clksel sys_clkin_sel[] = {
++ { .parent = &virt_19_2m_ck, .rates = div_1_0_rates },
++ { .parent = &virt_24m_ck, .rates = div_1_1_rates },
++ { .parent = &virt_25m_ck, .rates = div_1_2_rates },
++ { .parent = &virt_26m_ck, .rates = div_1_3_rates },
++ { .parent = NULL },
++};
++
++/* sys_clk_in */
++static struct clk sys_clkin_ck = {
++ .name = "sys_clkin_ck",
++ .parent = &virt_24m_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel_reg = AM33XX_CTRL_REGADDR(0x40), /* CONTROL_STATUS */
++ .clksel_mask = (0x3 << 22),
++ .clksel = sys_clkin_sel,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++};
++
++/* DPLL_CORE */
++static struct dpll_data dpll_core_dd = {
++ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_CORE,
++ .clk_bypass = &sys_clkin_ck,
++ .clk_ref = &sys_clkin_ck,
++ .control_reg = AM33XX_CM_CLKMODE_DPLL_CORE,
++ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
++ .idlest_reg = AM33XX_CM_IDLEST_DPLL_CORE,
++ .mult_mask = AM33XX_DPLL_MULT_MASK,
++ .div1_mask = AM33XX_DPLL_DIV_MASK,
++ .enable_mask = AM33XX_DPLL_EN_MASK,
++ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
++ .max_multiplier = AM33XX_MAX_DPLL_MULT,
++ .max_divider = AM33XX_MAX_DPLL_DIV,
++ .min_divider = 1,
++};
++
++/* CLKDCOLDO output */
++static struct clk dpll_core_ck = {
++ .name = "dpll_core_ck",
++ .parent = &sys_clkin_ck,
++ .dpll_data = &dpll_core_dd,
++ .init = &omap2_init_dpll_parent,
++ .ops = &clkops_omap3_core_dpll_ops,
++ .recalc = &omap3_dpll_recalc,
++};
++
++static struct clk dpll_core_x2_ck = {
++ .name = "dpll_core_x2_ck",
++ .parent = &dpll_core_ck,
++ .flags = CLOCK_CLKOUTX2,
++ .ops = &clkops_null,
++ .recalc = &omap3_clkoutx2_recalc,
++};
++
++
++static const struct clksel dpll_core_m4_div[] = {
++ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_core_m4_ck = {
++ .name = "dpll_core_m4_ck",
++ .parent = &dpll_core_x2_ck,
++ .clksel = dpll_core_m4_div,
++ .clksel_reg = AM33XX_CM_DIV_M4_DPLL_CORE,
++ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT1_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static const struct clksel dpll_core_m5_div[] = {
++ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_core_m5_ck = {
++ .name = "dpll_core_m5_ck",
++ .parent = &dpll_core_x2_ck,
++ .clksel = dpll_core_m5_div,
++ .clksel_reg = AM33XX_CM_DIV_M5_DPLL_CORE,
++ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT2_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static const struct clksel dpll_core_m6_div[] = {
++ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_core_m6_ck = {
++ .name = "dpll_core_m6_ck",
++ .parent = &dpll_core_x2_ck,
++ .clksel = dpll_core_m6_div,
++ .clksel_reg = AM33XX_CM_DIV_M6_DPLL_CORE,
++ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT3_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk sysclk1_ck = {
++ .name = "sysclk1_ck",
++ .parent = &dpll_core_m4_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk sysclk2_ck = {
++ .name = "sysclk2_ck",
++ .parent = &dpll_core_m5_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk core_clk_out = {
++ .name = "core_clk_out",
++ .parent = &dpll_core_m4_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++/* DPLL_MPU */
++static struct dpll_data dpll_mpu_dd = {
++ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_MPU,
++ .clk_bypass = &sys_clkin_ck,
++ .clk_ref = &sys_clkin_ck,
++ .control_reg = AM33XX_CM_CLKMODE_DPLL_MPU,
++ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
++ .idlest_reg = AM33XX_CM_IDLEST_DPLL_MPU,
++ .mult_mask = AM33XX_DPLL_MULT_MASK,
++ .div1_mask = AM33XX_DPLL_DIV_MASK,
++ .enable_mask = AM33XX_DPLL_EN_MASK,
++ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
++ .max_multiplier = AM33XX_MAX_DPLL_MULT,
++ .max_divider = AM33XX_MAX_DPLL_DIV,
++ .min_divider = 1,
++};
++
++/* CLKOUT: fdpll/M2 */
++static struct clk dpll_mpu_ck = {
++ .name = "dpll_mpu_ck",
++ .parent = &sys_clkin_ck,
++ .dpll_data = &dpll_mpu_dd,
++ .init = &omap2_init_dpll_parent,
++ .ops = &clkops_omap3_noncore_dpll_ops,
++ .recalc = &omap3_dpll_recalc,
++ .round_rate = &omap2_dpll_round_rate,
++ .set_rate = &omap3_noncore_dpll_set_rate,
++};
++
++/*
++ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
++ * and ALT_CLK1/2)
++ */
++static const struct clksel dpll_mpu_m2_div[] = {
++ { .parent = &dpll_mpu_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_mpu_m2_ck = {
++ .name = "dpll_mpu_m2_ck",
++ .parent = &dpll_mpu_ck,
++ .clksel = dpll_mpu_m2_div,
++ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_MPU,
++ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk mpu_fck = {
++ .name = "mpu_fck",
++ .clkdm_name = "mpu_clkdm",
++ .parent = &dpll_mpu_m2_ck,
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_MPU_MPU_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .recalc = &followparent_recalc,
++ .flags = ENABLE_ON_INIT,
++};
++
++/* DPLL_DDR */
++static struct dpll_data dpll_ddr_dd = {
++ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DDR,
++ .clk_bypass = &sys_clkin_ck,
++ .clk_ref = &sys_clkin_ck,
++ .control_reg = AM33XX_CM_CLKMODE_DPLL_DDR,
++ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
++ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DDR,
++ .mult_mask = AM33XX_DPLL_MULT_MASK,
++ .div1_mask = AM33XX_DPLL_DIV_MASK,
++ .enable_mask = AM33XX_DPLL_EN_MASK,
++ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
++ .max_multiplier = AM33XX_MAX_DPLL_MULT,
++ .max_divider = AM33XX_MAX_DPLL_DIV,
++ .min_divider = 1,
++};
++
++/* CLKOUT: fdpll/M2 */
++static struct clk dpll_ddr_ck = {
++ .name = "dpll_ddr_ck",
++ .parent = &sys_clkin_ck,
++ .dpll_data = &dpll_ddr_dd,
++ .init = &omap2_init_dpll_parent,
++ .ops = &clkops_null,
++ .recalc = &omap3_dpll_recalc,
++};
++
++/*
++ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
++ * and ALT_CLK1/2)
++ */
++static const struct clksel dpll_ddr_m2_div[] = {
++ { .parent = &dpll_ddr_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_ddr_m2_ck = {
++ .name = "dpll_ddr_m2_ck",
++ .parent = &dpll_ddr_ck,
++ .clksel = dpll_ddr_m2_div,
++ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DDR,
++ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk ddr_pll_clk = {
++ .name = "ddr_pll_clk",
++ .parent = &dpll_ddr_m2_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk emif_fck = {
++ .name = "emif_fck",
++ .clkdm_name = "l3_clkdm",
++ .parent = &ddr_pll_clk,
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_PER_EMIF_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++ .flags = ENABLE_ON_INIT,
++};
++
++/* DPLL_DISP */
++static struct dpll_data dpll_disp_dd = {
++ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DISP,
++ .clk_bypass = &sys_clkin_ck,
++ .clk_ref = &sys_clkin_ck,
++ .control_reg = AM33XX_CM_CLKMODE_DPLL_DISP,
++ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
++ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DISP,
++ .mult_mask = AM33XX_DPLL_MULT_MASK,
++ .div1_mask = AM33XX_DPLL_DIV_MASK,
++ .enable_mask = AM33XX_DPLL_EN_MASK,
++ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
++ .max_multiplier = AM33XX_MAX_DPLL_MULT,
++ .max_divider = AM33XX_MAX_DPLL_DIV,
++ .min_divider = 1,
++};
++
++/* CLKOUT: fdpll/M2 */
++static struct clk dpll_disp_ck = {
++ .name = "dpll_disp_ck",
++ .parent = &sys_clkin_ck,
++ .dpll_data = &dpll_disp_dd,
++ .init = &omap2_init_dpll_parent,
++ .ops = &clkops_null,
++ .recalc = &omap3_dpll_recalc,
++ .round_rate = &omap2_dpll_round_rate,
++ .set_rate = &omap3_noncore_dpll_set_rate,
++};
++
++/*
++ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
++ * and ALT_CLK1/2)
++ */
++static const struct clksel dpll_disp_m2_div[] = {
++ { .parent = &dpll_disp_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_disp_m2_ck = {
++ .name = "dpll_disp_m2_ck",
++ .parent = &dpll_disp_ck,
++ .clksel = dpll_disp_m2_div,
++ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DISP,
++ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk disp_pll_clk = {
++ .name = "disp_pll_clk",
++ .parent = &dpll_disp_m2_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++/* DPLL_PER */
++static struct dpll_data dpll_per_dd = {
++ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_PERIPH,
++ .clk_bypass = &sys_clkin_ck,
++ .clk_ref = &sys_clkin_ck,
++ .control_reg = AM33XX_CM_CLKMODE_DPLL_PER,
++ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
++ .idlest_reg = AM33XX_CM_IDLEST_DPLL_PER,
++ .mult_mask = AM33XX_DPLL_MULT_PERIPH_MASK,
++ .div1_mask = AM33XX_DPLL_PER_DIV_MASK,
++ .enable_mask = AM33XX_DPLL_EN_MASK,
++ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
++ .max_multiplier = AM33XX_MAX_DPLL_MULT,
++ .max_divider = AM33XX_MAX_DPLL_DIV,
++ .min_divider = 1,
++ .flags = DPLL_J_TYPE,
++};
++
++/* CLKDCOLDO */
++static struct clk dpll_per_ck = {
++ .name = "dpll_per_ck",
++ .parent = &sys_clkin_ck,
++ .dpll_data = &dpll_per_dd,
++ .init = &omap2_init_dpll_parent,
++ .ops = &clkops_null,
++ .recalc = &omap3_dpll_recalc,
++ .round_rate = &omap2_dpll_round_rate,
++ .set_rate = &omap3_noncore_dpll_set_rate,
++};
++
++/* CLKOUT: fdpll/M2 */
++static const struct clksel dpll_per_m2_div[] = {
++ { .parent = &dpll_per_ck, .rates = div31_1to31_rates },
++ { .parent = NULL },
++};
++
++static struct clk dpll_per_m2_ck = {
++ .name = "dpll_per_m2_ck",
++ .parent = &dpll_per_ck,
++ .clksel = dpll_per_m2_div,
++ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_PER,
++ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk per_192mhz_clk = {
++ .name = "per_192mhz_clk",
++ .parent = &dpll_per_m2_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk usb_pll_clk = {
++ .name = "usb_pll_clk",
++ .parent = &dpll_per_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk core_100mhz_ck = {
++ .name = "core_100mhz_ck",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk l3_aon_gclk = {
++ .name = "l3_aon_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4_wkup_aon_gclk = {
++ .name = "l4_wkup_aon_gclk",
++ .clkdm_name = "l4_wkup_aon_clkdm",
++ .parent = &sysclk1_ck,
++ .enable_reg = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l3_gclk = {
++ .name = "l3_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l3_ick = {
++ .name = "l3_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_L3_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l3_instr_ick = {
++ .name = "l3_instr_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_L3_INSTR_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4_wkup_gclk = {
++ .name = "l4_wkup_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk l4hs_gclk = {
++ .name = "l4hs_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gfx_l3_gclk = {
++ .name = "gfx_l3_gclk",
++ .clkdm_name = "gfx_l3_clkdm",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk debug_clka_gclk = {
++ .name = "debug_clka_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4_rtc_gclk = {
++ .name = "l4_rtc_gclk",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk rtc_ick = {
++ .name = "rtc_ick",
++ .parent = &l4_rtc_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l3s_gclk = {
++ .name = "l3s_gclk",
++ .parent = &core_100mhz_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4fw_gclk = {
++ .name = "l4fw_gclk",
++ .parent = &core_100mhz_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4ls_gclk = {
++ .name = "l4ls_gclk",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &core_100mhz_ck,
++ .enable_reg = AM33XX_CM_PER_L4LS_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk clk_24mhz = {
++ .name = "clk_24mhz",
++ .parent = &per_192mhz_clk,
++ .fixed_div = 8,
++ .ops = &clkops_null,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk l4_cefuse_gclk = {
++ .name = "l4_cefsue_gclk",
++ .parent = &core_100mhz_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk cefuse_iclk = {
++ .name = "cefuse_iclk",
++ .clkdm_name = "l4_cefuse_clkdm",
++ .parent = &l4_cefuse_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk cefuse_fck = {
++ .name = "cefuse_fck",
++ .clkdm_name = "l4_cefuse_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk sysclk_div_ck = {
++ .name = "sysclk_div_ck",
++ .parent = &dpll_core_m4_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk adc_tsc_fck = {
++ .name = "adc_tsc_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &sys_clkin_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk adc_tsc_ick = {
++ .name = "adc_tsc_ick",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &l4_wkup_gclk,
++ .enable_reg = AM33XX_CM_WKUP_ADC_TSC_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk aes0_fck = {
++ .name = "aes0_fck",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_AES0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++/*
++ * clkdiv32 is generated from fixed division of 732.4219
++ */
++static struct clk clkdiv32k_ick = {
++ .name = "clkdiv32k_ick",
++ .clkdm_name = "clk_24mhz_clkdm",
++ .rate = 32768,
++ .parent = &clk_24mhz,
++ .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++};
++
++static struct clk clk_32khz_ck = {
++ .name = "clk_32khz_ck",
++ .clkdm_name = "clk_24mhz_clkdm",
++ .parent = &clkdiv32k_ick,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk control_fck = {
++ .name = "control_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &l4_wkup_gclk,
++ .enable_reg = AM33XX_CM_WKUP_CONTROL_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk dcan0_ick = {
++ .name = "dcan0_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk dcan0_fck = {
++ .name = "dcan0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_PER_DCAN0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk dcan1_ick = {
++ .name = "dcan1_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk dcan1_fck = {
++ .name = "dcan1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_PER_DCAN1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk debugss_ick = {
++ .name = "debugss_ick",
++ .clkdm_name = "l3_aon_clkdm",
++ .parent = &l3_aon_gclk,
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk elm_fck = {
++ .name = "elm_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_ELM_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk emif_fw_fck = {
++ .name = "emif_fw_fck",
++ .clkdm_name = "l4fw_clkdm",
++ .parent = &l4fw_gclk,
++ .enable_reg = AM33XX_CM_PER_EMIF_FW_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk epwmss0_fck = {
++ .name = "epwmss0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_EPWMSS0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk epwmss1_fck = {
++ .name = "epwmss1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_EPWMSS1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk epwmss2_fck = {
++ .name = "epwmss2_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_EPWMSS2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpmc_fck = {
++ .name = "gpmc_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &l3s_gclk,
++ .enable_reg = AM33XX_CM_PER_GPMC_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk i2c1_ick = {
++ .name = "i2c1_ick",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk i2c1_fck = {
++ .name = "i2c1_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_WKUP_I2C0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk i2c2_ick = {
++ .name = "i2c2_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk i2c2_fck = {
++ .name = "i2c2_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_I2C1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk i2c3_ick = {
++ .name = "i2c3_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk i2c3_fck = {
++ .name = "i2c3_fck",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_I2C2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clkdm_name = "l4ls_clkdm",
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk ieee5000_fck = {
++ .name = "ieee5000_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &l3s_gclk,
++ .enable_reg = AM33XX_CM_PER_IEEE5000_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4hs_ick = {
++ .name = "l4hs_ick",
++ .clkdm_name = "l4hs_clkdm",
++ .parent = &l4hs_gclk,
++ .enable_reg = AM33XX_CM_PER_L4HS_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4wkup_ick = {
++ .name = "l4wkup_ick",
++ .clkdm_name = "l4_wkup_aon_clkdm",
++ .parent = &l4_wkup_aon_gclk,
++ .enable_reg = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4fw_ick = {
++ .name = "l4fw_ick",
++ .clkdm_name = "l4fw_clkdm",
++ .parent = &core_100mhz_ck,
++ .enable_reg = AM33XX_CM_PER_L4FW_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk l4ls_ick = {
++ .name = "l4ls_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_L4LS_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .flags = ENABLE_ON_INIT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mailbox0_fck = {
++ .name = "mailbox0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_MAILBOX0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mcasp0_ick = {
++ .name = "mcasp0_ick",
++ .parent = &l3s_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mcasp0_fck = {
++ .name = "mcasp0_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_PER_MCASP0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mcasp1_ick = {
++ .name = "mcasp1_ick",
++ .parent = &l3s_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mcasp1_fck = {
++ .name = "mcasp1_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_PER_MCASP1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mlb_fck = {
++ .name = "mlb_fck",
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_PER_MLB_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clkdm_name = "l3_clkdm",
++ .parent = &sysclk_div_ck,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmu_fck = {
++ .name = "mmu_fck",
++ .clkdm_name = "gfx_l3_clkdm",
++ .parent = &gfx_l3_gclk,
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_GFX_MMUDATA_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk ocmcram_ick = {
++ .name = "ocmcram_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_OCMCRAM_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk ocpwp_fck = {
++ .name = "ocpwp_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_OCPWP_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk pka_fck = {
++ .name = "pka_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_PKA_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk rng_fck = {
++ .name = "rng_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_RNG_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk rtc_fck = {
++ .name = "rtc_fck",
++ .clkdm_name = "l4_rtc_clkdm",
++ .parent = &clk_32768_ck,
++ .enable_reg = AM33XX_CM_RTC_RTC_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk sha0_fck = {
++ .name = "sha0_fck",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_SHA0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk smartreflex0_ick = {
++ .name = "smartreflex0_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk smartreflex0_fck = {
++ .name = "smartreflex0_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk smartreflex1_ick = {
++ .name = "smartreflex1_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk smartreflex1_fck = {
++ .name = "smartreflex1_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &sys_clkin_ck,
++ .enable_reg = AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk spi0_ick = {
++ .name = "spi0_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk spi0_fck = {
++ .name = "spi0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_SPI0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk spi1_ick = {
++ .name = "spi1_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk spi1_fck = {
++ .name = "spi1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_SPI1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk spinlock_fck = {
++ .name = "spinlock_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_SPINLOCK_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk clk_32khz_timer = {
++ .name = "clk_32khz_timer",
++ .parent = &clk_32khz_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++/* Timers */
++
++/* Secure Timer: Used only to disable the clocks and for completeness */
++static const struct clksel timer0_clkmux_sel[] = {
++ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
++ { .parent = &clk_32khz_ck, .rates = div_1_1_rates },
++ { .parent = &sys_clkin_ck, .rates = div_1_2_rates },
++ { .parent = &tclkin_ck, .rates = div_1_3_rates },
++ { .parent = NULL },
++};
++
++static struct clk timer0_ick = {
++ .name = "timer0_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer0_fck = {
++ .name = "timer0_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &clk_rc32k_ck,
++ .clksel = timer0_clkmux_sel,
++ .enable_reg = AM33XX_CM_WKUP_TIMER0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CTRL_REGADDR(0x01BC),
++ .clksel_mask = (0x3 << 4),
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static const struct clksel timer1_clkmux_sel[] = {
++ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
++ { .parent = &clk_32khz_ck, .rates = div_1_1_rates },
++ { .parent = &tclkin_ck, .rates = div_1_2_rates },
++ { .parent = &clk_rc32k_ck, .rates = div_1_3_rates },
++ { .parent = &clk_32768_ck, .rates = div_1_4_rates },
++ { .parent = NULL },
++};
++
++static struct clk timer1_ick = {
++ .name = "timer1_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer1_fck = {
++ .name = "timer1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = timer1_clkmux_sel,
++ .enable_reg = AM33XX_CM_WKUP_TIMER1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER1MS_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_2_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static const struct clksel timer2_to_7_clk_sel[] = {
++ { .parent = &tclkin_ck, .rates = div_1_0_rates },
++ { .parent = &sys_clkin_ck, .rates = div_1_1_rates },
++ { .parent = &clk_32khz_timer, .rates = div_1_2_rates },
++ { .parent = NULL },
++};
++
++static struct clk timer2_ick = {
++ .name = "timer2_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer2_fck = {
++ .name = "timer2_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER2_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk timer3_ick = {
++ .name = "timer3_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer3_fck = {
++ .name = "timer3_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &am33xx_init_timer_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER3_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER3_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk timer4_ick = {
++ .name = "timer4_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer4_fck = {
++ .name = "timer4_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER4_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER4_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk timer5_ick = {
++ .name = "timer5_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer5_fck = {
++ .name = "timer5_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER5_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER5_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk timer6_ick = {
++ .name = "timer6_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer6_fck = {
++ .name = "timer6_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &am33xx_init_timer_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER6_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER6_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk timer7_ick = {
++ .name = "timer7_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk timer7_fck = {
++ .name = "timer7_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &sys_clkin_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = timer2_to_7_clk_sel,
++ .enable_reg = AM33XX_CM_PER_TIMER7_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_TIMER7_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk tpcc_ick = {
++ .name = "tpcc_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_TPCC_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk tptc0_ick = {
++ .name = "tptc0_ick",
++ .parent = &l3_gclk,
++ .clkdm_name = "l3_clkdm",
++ .enable_reg = AM33XX_CM_PER_TPTC0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk tptc1_ick = {
++ .name = "tptc1_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_TPTC1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk tptc2_ick = {
++ .name = "tptc2_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &l3_gclk,
++ .enable_reg = AM33XX_CM_PER_TPTC2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart1_ick = {
++ .name = "uart1_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart1_fck = {
++ .name = "uart1_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_WKUP_UART0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk uart2_ick = {
++ .name = "uart2_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart2_fck = {
++ .name = "uart2_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_UART1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk uart3_ick = {
++ .name = "uart3_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart3_fck = {
++ .name = "uart3_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_UART2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk uart4_ick = {
++ .name = "uart4_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart4_fck = {
++ .name = "uart4_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_UART3_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk uart5_ick = {
++ .name = "uart5_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart5_fck = {
++ .name = "uart5_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_UART4_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk uart6_ick = {
++ .name = "uart6_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk uart6_fck = {
++ .name = "uart6_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_UART5_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .fixed_div = 4,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk wkup_m3_fck = {
++ .name = "wkup_m3_fck",
++ .clkdm_name = "l4_wkup_aon_clkdm",
++ .parent = &l4_wkup_aon_gclk,
++ .enable_reg = AM33XX_CM_WKUP_WKUP_M3_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk cpsw_250mhz_clk = {
++ .name = "cpsw_250mhz_clk",
++ .clkdm_name = "l4hs_clkdm",
++ .parent = &sysclk2_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk cpsw_125mhz_gclk = {
++ .name = "cpsw_125mhz_gclk",
++ .clkdm_name = "cpsw_125mhz_clkdm",
++ .parent = &sysclk2_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++/*
++ * TODO: As per clock tree @OPP50 /2 is used, but there is not register
++ * to configure this. @ normal OPP, /5 is used - 250MHz/5 = 50MHz
++ */
++static struct clk cpsw_50mhz_clk = {
++ .name = "cpsw_50mhz_clk",
++ .clkdm_name = "l4hs_clkdm",
++ .parent = &sysclk2_ck,
++ .ops = &clkops_null,
++ .fixed_div = 5,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk cpsw_5mhz_clk = {
++ .name = "cpsw_5mhz_clk",
++ .clkdm_name = "l4hs_clkdm",
++ .parent = &cpsw_50mhz_clk,
++ .ops = &clkops_null,
++ .fixed_div = 10,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk cpgmac0_ick = {
++ .name = "cpgmac0_ick",
++ .clkdm_name = "cpsw_125mhz_clkdm",
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_PER_CPGMAC0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .parent = &cpsw_125mhz_gclk,
++ .recalc = &followparent_recalc,
++};
++
++static const struct clksel cpsw_cpts_rft_clkmux_sel[] = {
++ { .parent = &sysclk2_ck, .rates = div_1_0_rates },
++ { .parent = &sysclk1_ck, .rates = div_1_1_rates },
++ { .parent = NULL },
++};
++
++static struct clk cpsw_cpts_rft_clk = {
++ .name = "cpsw_cpts_rft_clk",
++ .clkdm_name = "l3_clkdm",
++ .parent = &dpll_core_m5_ck,
++ .clksel = cpsw_cpts_rft_clkmux_sel,
++ .clksel_reg = AM33XX_CM_CPTS_RFT_CLKSEL,
++ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk usbotg_ick = {
++ .name = "usbotg_ick",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &l3s_gclk,
++ .ops = &clkops_omap2_dflt,
++ .enable_reg = AM33XX_CM_PER_USB0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk usbotg_fck = {
++ .name = "usbotg_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &usb_pll_clk,
++ .enable_reg = AM33XX_CM_CLKDCOLDO_DPLL_PER,
++ .enable_bit = AM33XX_ST_DPLL_CLKDCOLDO_SHIFT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++/* gpio */
++static const struct clksel gpio0_dbclk_mux_sel[] = {
++ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
++ { .parent = &clk_32768_ck, .rates = div_1_1_rates },
++ { .parent = &clk_32khz_timer, .rates = div_1_2_rates },
++ { .parent = NULL },
++};
++
++static struct clk gpio0_dbclk_mux_ck = {
++ .name = "gpio0_dbclk_mux_ck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &clk_rc32k_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = gpio0_dbclk_mux_sel,
++ .clksel_reg = AM33XX_CLKSEL_GPIO0_DBCLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static struct clk gpio0_dbclk = {
++ .name = "gpio0_dbclk",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &gpio0_dbclk_mux_ck,
++ .enable_reg = AM33XX_CM_WKUP_GPIO0_CLKCTRL,
++ .enable_bit = AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio0_ick = {
++ .name = "gpio0_ick",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &l4_wkup_gclk,
++ .enable_reg = AM33XX_CM_WKUP_GPIO0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio1_dbclk = {
++ .name = "gpio1_dbclk",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &clkdiv32k_ick,
++ .enable_reg = AM33XX_CM_PER_GPIO1_CLKCTRL,
++ .enable_bit = AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio1_ick = {
++ .name = "gpio1_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_GPIO1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio2_dbclk = {
++ .name = "gpio2_dbclk",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &clkdiv32k_ick,
++ .enable_reg = AM33XX_CM_PER_GPIO2_CLKCTRL,
++ .enable_bit = AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio2_ick = {
++ .name = "gpio2_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_GPIO2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio3_dbclk = {
++ .name = "gpio3_dbclk",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &clkdiv32k_ick,
++ .enable_reg = AM33XX_CM_PER_GPIO3_CLKCTRL,
++ .enable_bit = AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gpio3_ick = {
++ .name = "gpio3_ick",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &l4ls_gclk,
++ .enable_reg = AM33XX_CM_PER_GPIO3_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static const struct clksel pruss_ocp_clk_mux_sel[] = {
++ { .parent = &l3_gclk, .rates = div_1_0_rates },
++ { .parent = &disp_pll_clk, .rates = div_1_1_rates },
++ { .parent = NULL },
++};
++
++static struct clk pruss_ocp_gclk = {
++ .name = "pruss_ocp_gclk",
++ .parent = &l3_gclk,
++ .init = &omap2_init_clksel_parent,
++ .clksel = pruss_ocp_clk_mux_sel,
++ .clksel_reg = AM33XX_CLKSEL_PRUSS_OCP_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk pruss_iep_gclk = {
++ .name = "pruss_iep_gclk",
++ .clkdm_name = "pruss_ocp_clkdm",
++ .parent = &l3_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk pruss_uart_gclk = {
++ .name = "pruss_uart_gclk",
++ .clkdm_name = "pruss_ocp_clkdm",
++ .parent = &per_192mhz_clk,
++ .enable_reg = AM33XX_CM_PER_PRUSS_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk lcdc_ick = {
++ .name = "lcdc_ick",
++ .clkdm_name = "l3_clkdm",
++ .parent = &sysclk1_ck,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static const struct clksel lcd_clk_mux_sel[] = {
++ { .parent = &disp_pll_clk, .rates = div_1_0_rates },
++ { .parent = &sysclk2_ck, .rates = div_1_1_rates },
++ { .parent = &per_192mhz_clk, .rates = div_1_2_rates },
++ { .parent = NULL },
++};
++
++static struct clk lcd_gclk = {
++ .name = "lcd_gclk",
++ .parent = &disp_pll_clk,
++ .init = &omap2_init_clksel_parent,
++ .clksel = lcd_clk_mux_sel,
++ .clksel_reg = AM33XX_CLKSEL_LCDC_PIXEL_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk lcdc_fck = {
++ .name = "lcdc_fck",
++ .clkdm_name = "lcdc_clkdm",
++ .parent = &lcd_gclk,
++ .enable_reg = AM33XX_CM_PER_LCDC_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc_clk = {
++ .name = "mmc_clk",
++ .parent = &per_192mhz_clk,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static struct clk mmc0_ick = {
++ .name = "mmc0_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc0_fck = {
++ .name = "mmc0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &mmc_clk,
++ .enable_reg = AM33XX_CM_PER_MMC0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc1_ick = {
++ .name = "mmc1_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc1_fck = {
++ .name = "mmc1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .parent = &mmc_clk,
++ .enable_reg = AM33XX_CM_PER_MMC1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc2_ick = {
++ .name = "mmc2_ick",
++ .parent = &l4ls_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk mmc2_fck = {
++ .name = "mmc2_fck",
++ .clkdm_name = "l3s_clkdm",
++ .parent = &mmc_clk,
++ .enable_reg = AM33XX_CM_PER_MMC2_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static const struct clksel gfx_clksel_sel[] = {
++ { .parent = &sysclk1_ck, .rates = div_1_0_rates },
++ { .parent = &per_192mhz_clk, .rates = div_1_1_rates },
++ { .parent = NULL },
++};
++
++static struct clk gfx_fclk_clksel_ck = {
++ .name = "gfx_fclk_clksel_ck",
++ .parent = &sysclk1_ck,
++ .clksel = gfx_clksel_sel,
++ .ops = &clkops_null,
++ .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
++ .clksel_mask = AM33XX_CLKSEL_GFX_FCLK_MASK,
++ .recalc = &omap2_clksel_recalc,
++};
++
++static const struct clksel_rate div_1_0_2_1_rates[] = {
++ { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
++ { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel gfx_div_sel[] = {
++ { .parent = &gfx_fclk_clksel_ck, .rates = div_1_0_2_1_rates },
++ { .parent = NULL },
++};
++
++static struct clk gfx_ick = {
++ .name = "gfx_ick",
++ .clkdm_name = "gfx_l3_clkdm",
++ .parent = &gfx_l3_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk gfx_fclk = {
++ .name = "gfx_fclk",
++ .clkdm_name = "gfx_l3_clkdm",
++ .parent = &gfx_fclk_clksel_ck,
++ .clksel = gfx_div_sel,
++ .enable_reg = AM33XX_CM_GFX_GFX_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
++ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++ .ops = &clkops_omap2_dflt,
++};
++
++static const struct clksel sysclkout_pre_sel[] = {
++ { .parent = &clk_32768_ck, .rates = div_1_0_rates },
++ { .parent = &l3_gclk, .rates = div_1_1_rates },
++ { .parent = &ddr_pll_clk, .rates = div_1_2_rates },
++ { .parent = &per_192mhz_clk, .rates = div_1_3_rates },
++ { .parent = &lcd_gclk, .rates = div_1_4_rates },
++ { .parent = NULL },
++};
++
++static struct clk sysclkout_pre_ck = {
++ .name = "sysclkout_pre_ck",
++ .parent = &clk_32768_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = sysclkout_pre_sel,
++ .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
++ .clksel_mask = AM33XX_CLKOUT2SOURCE_MASK,
++ .ops = &clkops_null,
++ .recalc = &omap2_clksel_recalc,
++};
++
++/* Divide by 8 clock rates with default clock is 1/1*/
++static const struct clksel_rate div8_rates[] = {
++ { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
++ { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
++ { .div = 3, .val = 2, .flags = RATE_IN_AM33XX },
++ { .div = 4, .val = 3, .flags = RATE_IN_AM33XX },
++ { .div = 5, .val = 4, .flags = RATE_IN_AM33XX },
++ { .div = 6, .val = 5, .flags = RATE_IN_AM33XX },
++ { .div = 7, .val = 6, .flags = RATE_IN_AM33XX },
++ { .div = 8, .val = 7, .flags = RATE_IN_AM33XX },
++ { .div = 0 },
++};
++
++static const struct clksel clkout2_div[] = {
++ { .parent = &sysclkout_pre_ck, .rates = div8_rates },
++ { .parent = NULL },
++};
++
++static struct clk clkout2_ck = {
++ .name = "clkout2_ck",
++ .parent = &sysclkout_pre_ck,
++ .ops = &clkops_omap2_dflt,
++ .clksel = clkout2_div,
++ .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
++ .clksel_mask = AM33XX_CLKOUT2DIV_MASK,
++ .enable_reg = AM33XX_CM_CLKOUT_CTRL,
++ .enable_bit = AM33XX_CLKOUT2EN_SHIFT,
++ .recalc = &omap2_clksel_recalc,
++ .round_rate = &omap2_clksel_round_rate,
++ .set_rate = &omap2_clksel_set_rate,
++};
++
++static struct clk vtp_clk = {
++ .name = "vtp_clk",
++ .parent = &sys_clkin_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
++static const struct clksel wdt_clkmux_sel[] = {
++ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
++ { .parent = &clk_32khz_ck, .rates = div_1_1_rates },
++ { .parent = NULL },
++};
++
++static struct clk wdt0_ick = {
++ .name = "wdt0_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk wdt0_fck = {
++ .name = "wdt0_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &clk_rc32k_ck,
++ .clksel = wdt_clkmux_sel,
++ .enable_reg = AM33XX_CM_WKUP_WDT0_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk wdt1_ick = {
++ .name = "wdt1_ick",
++ .parent = &l4_wkup_gclk,
++ .ops = &clkops_null,
++ .recalc = &followparent_recalc,
++};
++
++static struct clk wdt1_fck = {
++ .name = "wdt1_fck",
++ .clkdm_name = "l4_wkup_clkdm",
++ .parent = &clk_rc32k_ck,
++ .init = &omap2_init_clksel_parent,
++ .clksel = wdt_clkmux_sel,
++ .enable_reg = AM33XX_CM_WKUP_WDT1_CLKCTRL,
++ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
++ .clksel_reg = AM33XX_CLKSEL_WDT1_CLK,
++ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
++ .ops = &clkops_omap2_dflt,
++ .recalc = &omap2_clksel_recalc,
++};
++
++/*
++ * Provides clock definitions for enabling bits for Time base module in
++ * PWMSS ctrl register.
++ */
++
++static struct clk ehrpwm0_tbclk = {
++ .name = "ehrpwm0_tbclk",
++ .enable_reg = AM33XX_CONTROL_PWMSS_CTRL,
++ .enable_bit = AM33XX_PWMSS0_TBCLKEN,
++ .ops = &clkops_omap2_dflt,
++ .flags = ENABLE_ON_INIT,
++};
++
++static struct clk ehrpwm1_tbclk = {
++ .name = "ehrpwm1_tbclk",
++ .enable_reg = AM33XX_CONTROL_PWMSS_CTRL,
++ .enable_bit = AM33XX_PWMSS1_TBCLKEN,
++ .ops = &clkops_omap2_dflt,
++ .flags = ENABLE_ON_INIT,
++};
++
++static struct clk ehrpwm2_tbclk = {
++ .name = "ehrpwm2_tbclk",
++ .enable_reg = AM33XX_CONTROL_PWMSS_CTRL,
++ .enable_bit = AM33XX_PWMSS2_TBCLKEN,
++ .ops = &clkops_omap2_dflt,
++ .flags = ENABLE_ON_INIT,
++};
++
++
++/*
++ * clkdev
++ */
++static struct omap_clk am33xx_clks[] = {
++ CLK(NULL, "clk_32768_ck", &clk_32768_ck, CK_AM33XX),
++ CLK(NULL, "clk_32khz_ck", &clk_32khz_ck, CK_AM33XX),
++ CLK(NULL, "clk_rc32k_ck", &clk_rc32k_ck, CK_AM33XX),
++ CLK(NULL, "virt_19_2m_ck", &virt_19_2m_ck, CK_AM33XX),
++ CLK(NULL, "virt_24m_ck", &virt_24m_ck, CK_AM33XX),
++ CLK(NULL, "virt_25m_ck", &virt_25m_ck, CK_AM33XX),
++ CLK(NULL, "virt_26m_ck", &virt_26m_ck, CK_AM33XX),
++ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_AM33XX),
++ CLK(NULL, "tclkin_ck", &tclkin_ck, CK_AM33XX),
++ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_AM33XX),
++ CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_AM33XX),
++ CLK(NULL, "dpll_core_m4_ck", &dpll_core_m4_ck, CK_AM33XX),
++ CLK(NULL, "dpll_core_m5_ck", &dpll_core_m5_ck, CK_AM33XX),
++ CLK(NULL, "dpll_core_m6_ck", &dpll_core_m6_ck, CK_AM33XX),
++ CLK(NULL, "sysclk1_ck", &sysclk1_ck, CK_AM33XX),
++ CLK(NULL, "sysclk2_ck", &sysclk2_ck, CK_AM33XX),
++ CLK(NULL, "core_clk_out", &core_clk_out, CK_AM33XX),
++ CLK(NULL, "clk_32khz_timer", &clk_32khz_timer, CK_AM33XX),
++ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_AM33XX),
++ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_AM33XX),
++ CLK(NULL, "mpu_ck", &mpu_fck, CK_AM33XX),
++ CLK(NULL, "dpll_ddr_ck", &dpll_ddr_ck, CK_AM33XX),
++ CLK(NULL, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck, CK_AM33XX),
++ CLK(NULL, "ddr_pll_clk", &ddr_pll_clk, CK_AM33XX),
++ CLK(NULL, "emif_fck", &emif_fck, CK_AM33XX),
++ CLK(NULL, "emif_fw_fck", &emif_fw_fck, CK_AM33XX),
++ CLK(NULL, "dpll_disp_ck", &dpll_disp_ck, CK_AM33XX),
++ CLK(NULL, "dpll_disp_m2_ck", &dpll_disp_m2_ck, CK_AM33XX),
++ CLK(NULL, "disp_pll_clk", &disp_pll_clk, CK_AM33XX),
++ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_AM33XX),
++ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_AM33XX),
++ CLK(NULL, "per_192mhz_clk", &per_192mhz_clk, CK_AM33XX),
++ CLK(NULL, "usb_pll_clk", &usb_pll_clk, CK_AM33XX),
++ CLK(NULL, "core_100mhz_ck", &core_100mhz_ck, CK_AM33XX),
++ CLK(NULL, "l3_ick", &l3_ick, CK_AM33XX),
++ CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_AM33XX),
++ CLK(NULL, "adc_tsc_fck", &adc_tsc_fck, CK_AM33XX),
++ CLK(NULL, "adc_tsc_ick", &adc_tsc_ick, CK_AM33XX),
++ CLK(NULL, "aes0_fck", &aes0_fck, CK_AM33XX),
++ CLK(NULL, "l4_cefuse_gclk", &l4_cefuse_gclk, CK_AM33XX),
++ CLK(NULL, "cefuse_fck", &cefuse_fck, CK_AM33XX),
++ CLK(NULL, "cefuse_iclk", &cefuse_iclk, CK_AM33XX),
++ CLK(NULL, "clkdiv32k_ick", &clkdiv32k_ick, CK_AM33XX),
++ CLK(NULL, "control_fck", &control_fck, CK_AM33XX),
++ CLK("cpsw.0", NULL, &cpgmac0_ick, CK_AM33XX),
++ CLK("d_can.0", "fck", &dcan0_fck, CK_AM33XX),
++ CLK("d_can.1", "fck", &dcan1_fck, CK_AM33XX),
++ CLK("d_can.0", "ick", &dcan0_ick, CK_AM33XX),
++ CLK("d_can.1", "ick", &dcan1_ick, CK_AM33XX),
++ CLK(NULL, "debugss_ick", &debugss_ick, CK_AM33XX),
++ CLK(NULL, "elm_fck", &elm_fck, CK_AM33XX),
++ CLK(NULL, "epwmss0_fck", &epwmss0_fck, CK_AM33XX),
++ CLK(NULL, "epwmss1_fck", &epwmss1_fck, CK_AM33XX),
++ CLK(NULL, "epwmss2_fck", &epwmss2_fck, CK_AM33XX),
++ CLK(NULL, "gpio0_ick", &gpio0_ick, CK_AM33XX),
++ CLK(NULL, "gpio1_ick", &gpio1_ick, CK_AM33XX),
++ CLK(NULL, "gpio2_ick", &gpio2_ick, CK_AM33XX),
++ CLK(NULL, "gpio3_ick", &gpio3_ick, CK_AM33XX),
++ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_AM33XX),
++ CLK("omap_i2c.1", "fck", &i2c1_fck, CK_AM33XX),
++ CLK("omap_i2c.1", "ick", &i2c1_ick, CK_AM33XX),
++ CLK("omap_i2c.2", "fck", &i2c2_fck, CK_AM33XX),
++ CLK("omap_i2c.2", "ick", &i2c2_ick, CK_AM33XX),
++ CLK("omap_i2c.3", "fck", &i2c3_fck, CK_AM33XX),
++ CLK("omap_i2c.3", "ick", &i2c3_ick, CK_AM33XX),
++ CLK(NULL, "pruss_ocp_gclk", &pruss_ocp_gclk, CK_AM33XX),
++ CLK(NULL, "pruss_uart_gclk", &pruss_uart_gclk, CK_AM33XX),
++ CLK(NULL, "pruss_iep_gclk", &pruss_iep_gclk, CK_AM33XX),
++ CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
++ CLK(NULL, "l4hs_ick", &l4hs_ick, CK_AM33XX),
++ CLK(NULL, "l4wkup_ick", &l4wkup_ick, CK_AM33XX),
++ CLK(NULL, "l4fw_ick", &l4fw_ick, CK_AM33XX),
++ CLK(NULL, "l4ls_ick", &l4ls_ick, CK_AM33XX),
++ CLK("da8xx_lcdc.0", NULL, &lcdc_fck, CK_AM33XX),
++ CLK(NULL, "mailbox0_fck", &mailbox0_fck, CK_AM33XX),
++ CLK(NULL, "mcasp1_ick", &mcasp0_ick, CK_AM33XX),
++ CLK("davinci-mcasp.0", NULL, &mcasp0_fck, CK_AM33XX),
++ CLK(NULL, "mcasp2_ick", &mcasp1_ick, CK_AM33XX),
++ CLK("davinci-mcasp.1", NULL, &mcasp1_fck, CK_AM33XX),
++ CLK(NULL, "mlb_fck", &mlb_fck, CK_AM33XX),
++ CLK("omap_hsmmc.0", "ick", &mmc0_ick, CK_AM33XX),
++ CLK("omap_hsmmc.1", "ick", &mmc1_ick, CK_AM33XX),
++ CLK("omap_hsmmc.2", "ick", &mmc2_ick, CK_AM33XX),
++ CLK("omap_hsmmc.0", "fck", &mmc0_fck, CK_AM33XX),
++ CLK("omap_hsmmc.1", "fck", &mmc1_fck, CK_AM33XX),
++ CLK("omap_hsmmc.2", "fck", &mmc2_fck, CK_AM33XX),
++ CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
++ CLK(NULL, "ocmcram_ick", &ocmcram_ick, CK_AM33XX),
++ CLK(NULL, "ocpwp_fck", &ocpwp_fck, CK_AM33XX),
++ CLK(NULL, "pka_fck", &pka_fck, CK_AM33XX),
++ CLK(NULL, "rng_fck", &rng_fck, CK_AM33XX),
++ CLK(NULL, "rtc_fck", &rtc_fck, CK_AM33XX),
++ CLK(NULL, "rtc_ick", &rtc_ick, CK_AM33XX),
++ CLK(NULL, "sha0_fck", &sha0_fck, CK_AM33XX),
++ CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
++ CLK(NULL, "smartreflex0_ick", &smartreflex0_ick, CK_AM33XX),
++ CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
++ CLK(NULL, "smartreflex1_ick", &smartreflex1_ick, CK_AM33XX),
++ CLK("omap2_mcspi.1", "fck", &spi0_fck, CK_AM33XX),
++ CLK("omap2_mcspi.2", "fck", &spi1_fck, CK_AM33XX),
++ CLK("omap2_mcspi.1", "ick", &spi0_ick, CK_AM33XX),
++ CLK("omap2_mcspi.2", "ick", &spi1_ick, CK_AM33XX),
++ CLK(NULL, "spinlock_fck", &spinlock_fck, CK_AM33XX),
++ CLK(NULL, "gpt0_fck", &timer0_fck, CK_AM33XX),
++ CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
++ CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
++ CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
++ CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
++ CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
++ CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
++ CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
++ CLK("da8xx_lcdc.0", "lcdc_ick", &lcdc_ick, CK_AM33XX),
++ CLK(NULL, "tpcc_ick", &tpcc_ick, CK_AM33XX),
++ CLK(NULL, "tptc0_ick", &tptc0_ick, CK_AM33XX),
++ CLK(NULL, "tptc1_ick", &tptc1_ick, CK_AM33XX),
++ CLK(NULL, "tptc2_ick", &tptc2_ick, CK_AM33XX),
++ CLK(NULL, "uart1_fck", &uart1_fck, CK_AM33XX),
++ CLK(NULL, "uart2_fck", &uart2_fck, CK_AM33XX),
++ CLK(NULL, "uart3_fck", &uart3_fck, CK_AM33XX),
++ CLK(NULL, "uart4_fck", &uart4_fck, CK_AM33XX),
++ CLK(NULL, "uart5_fck", &uart5_fck, CK_AM33XX),
++ CLK(NULL, "uart6_fck", &uart6_fck, CK_AM33XX),
++ CLK(NULL, "uart1_ick", &uart1_ick, CK_AM33XX),
++ CLK(NULL, "uart2_ick", &uart2_ick, CK_AM33XX),
++ CLK(NULL, "uart3_ick", &uart3_ick, CK_AM33XX),
++ CLK(NULL, "uart4_ick", &uart4_ick, CK_AM33XX),
++ CLK(NULL, "uart5_ick", &uart5_ick, CK_AM33XX),
++ CLK(NULL, "uart6_ick", &uart6_ick, CK_AM33XX),
++ CLK(NULL, "usbotg_ick", &usbotg_ick, CK_AM33XX),
++ CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
++ CLK(NULL, "wdt0_ick", &wdt0_ick, CK_AM33XX),
++ CLK(NULL, "wdt0_fck", &wdt0_fck, CK_AM33XX),
++ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_AM33XX),
++ CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
++ CLK(NULL, "wkup_m3_fck", &wkup_m3_fck, CK_AM33XX),
++ CLK(NULL, "l3_aon_gclk", &l3_aon_gclk, CK_AM33XX),
++ CLK(NULL, "l4_wkup_aon_gclk", &l4_wkup_aon_gclk, CK_AM33XX),
++ CLK(NULL, "l4_rtc_gclk", &l4_rtc_gclk, CK_AM33XX),
++ CLK(NULL, "l3_gclk", &l3_gclk, CK_AM33XX),
++ CLK(NULL, "gfx_l3_gclk", &gfx_l3_gclk, CK_AM33XX),
++ CLK(NULL, "l4_wkup_gclk", &l4_wkup_gclk, CK_AM33XX),
++ CLK(NULL, "l4hs_gclk", &l4hs_gclk, CK_AM33XX),
++ CLK(NULL, "l3s_gclk", &l3s_gclk, CK_AM33XX),
++ CLK(NULL, "l4fw_gclk", &l4fw_gclk, CK_AM33XX),
++ CLK(NULL, "l4ls_gclk", &l4ls_gclk, CK_AM33XX),
++ CLK(NULL, "debug_clka_gclk", &debug_clka_gclk, CK_AM33XX),
++ CLK(NULL, "clk_24mhz", &clk_24mhz, CK_AM33XX),
++ CLK(NULL, "sysclk_div_ck", &sysclk_div_ck, CK_AM33XX),
++ CLK(NULL, "cpsw_250mhz_clk", &cpsw_250mhz_clk, CK_AM33XX),
++ CLK(NULL, "cpsw_125mhz_gclk", &cpsw_125mhz_gclk, CK_AM33XX),
++ CLK(NULL, "cpsw_50mhz_clk", &cpsw_50mhz_clk, CK_AM33XX),
++ CLK(NULL, "cpsw_5mhz_clk", &cpsw_5mhz_clk, CK_AM33XX),
++ CLK(NULL, "cpsw_cpts_rft_clk", &cpsw_cpts_rft_clk, CK_AM33XX),
++ CLK(NULL, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, CK_AM33XX),
++ CLK(NULL, "gpio0_dbclk", &gpio0_dbclk, CK_AM33XX),
++ CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_AM33XX),
++ CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_AM33XX),
++ CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_AM33XX),
++ CLK(NULL, "lcd_gclk", &lcd_gclk, CK_AM33XX),
++ CLK(NULL, "mmc_clk", &mmc_clk, CK_AM33XX),
++ CLK(NULL, "gfx_fclk_clksel_ck", &gfx_fclk_clksel_ck, CK_AM33XX),
++ CLK(NULL, "gfx_fclk", &gfx_fclk, CK_AM33XX),
++ CLK(NULL, "gfx_ick", &gfx_ick, CK_AM33XX),
++ CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
++ CLK(NULL, "clkout2_ck", &clkout2_ck, CK_AM33XX),
++ CLK(NULL, "gpt0_ick", &timer0_ick, CK_AM33XX),
++ CLK(NULL, "gpt1_ick", &timer1_ick, CK_AM33XX),
++ CLK(NULL, "gpt2_ick", &timer2_ick, CK_AM33XX),
++ CLK(NULL, "gpt3_ick", &timer3_ick, CK_AM33XX),
++ CLK(NULL, "gpt4_ick", &timer4_ick, CK_AM33XX),
++ CLK(NULL, "gpt5_ick", &timer5_ick, CK_AM33XX),
++ CLK(NULL, "gpt6_ick", &timer6_ick, CK_AM33XX),
++ CLK(NULL, "gpt7_ick", &timer7_ick, CK_AM33XX),
++ CLK(NULL, "vtp_clk", &vtp_clk, CK_AM33XX),
++ CLK(NULL, "ehrpwm0_tbclk", &ehrpwm0_tbclk, CK_AM33XX),
++ CLK(NULL, "ehrpwm1_tbclk", &ehrpwm1_tbclk, CK_AM33XX),
++ CLK(NULL, "ehrpwm2_tbclk", &ehrpwm2_tbclk, CK_AM33XX),
++};
++
++int __init am33xx_clk_init(void)
++{
++ struct omap_clk *c;
++ u32 cpu_clkflg;
++
++ if (cpu_is_am33xx()) {
++ cpu_mask = RATE_IN_AM33XX;
++ cpu_clkflg = CK_AM33XX;
++ }
++
++ clk_init(&omap2_clk_functions);
++
++ for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++)
++ clk_preinit(c->lk.clk);
++
++ for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++)
++ if (c->cpu & cpu_clkflg) {
++ clkdev_add(&c->lk);
++ clk_register(c->lk.clk);
++ omap2_init_clk_clkdm(c->lk.clk);
++ }
++
++ recalculate_root_clocks();
++
++ /*
++ * Only enable those clocks we will need, let the drivers
++ * enable other clocks as necessary
++ */
++ clk_enable_init_clocks();
++
++ return 0;
++}
+diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
+index 5d0064a..7dbb218 100644
+--- a/arch/arm/mach-omap2/clock3xxx_data.c
++++ b/arch/arm/mach-omap2/clock3xxx_data.c
+@@ -27,6 +27,7 @@
+ #include "clock34xx.h"
+ #include "clock36xx.h"
+ #include "clock3517.h"
++#include "clock33xx.h"
+
+ #include "cm2xxx_3xxx.h"
+ #include "cm-regbits-34xx.h"
+@@ -2480,6 +2481,16 @@ static struct clk uart4_fck = {
+ .recalc = &followparent_recalc,
+ };
+
++static struct clk uart4_fck_am35xx = {
++ .name = "uart4_fck",
++ .ops = &clkops_omap2_dflt_wait,
++ .parent = &per_48m_fck,
++ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
++ .enable_bit = OMAP3430_EN_UART4_SHIFT,
++ .clkdm_name = "core_l4_clkdm",
++ .recalc = &followparent_recalc,
++};
++
+ static struct clk gpt2_fck = {
+ .name = "gpt2_fck",
+ .ops = &clkops_omap2_dflt_wait,
+@@ -3287,7 +3298,7 @@ static struct omap_clk omap3xxx_clks[] = {
+ CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
++ CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
+ CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
+ CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
+@@ -3323,7 +3334,7 @@ static struct omap_clk omap3xxx_clks[] = {
+ CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
+ CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
++ CLK("usbhs_omap", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
+ CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
+@@ -3369,20 +3380,18 @@ static struct omap_clk omap3xxx_clks[] = {
+ CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+- CLK("usbhs-omap.0", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
+- CLK("usbhs-omap.0", "init_60m_fclk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
++ CLK("usbhs_omap", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
++ CLK("usbhs_omap", "init_60m_fclk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
+ CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
+@@ -3403,6 +3412,7 @@ static struct omap_clk omap3xxx_clks[] = {
+ CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
++ CLK(NULL, "uart4_fck", &uart4_fck_am35xx, CK_3505 | CK_3517),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
+@@ -3517,6 +3527,11 @@ int __init omap3xxx_clk_init(void)
+ } else if (cpu_is_ti816x()) {
+ cpu_mask = RATE_IN_TI816X;
+ cpu_clkflg = CK_TI816X;
++ } else if (cpu_is_am33xx()) {
++ am33xx_clk_init();
++ return 0;
++ } else if (cpu_is_ti814x()) {
++ cpu_mask = RATE_IN_TI814X;
+ } else if (cpu_is_omap34xx()) {
+ if (omap_rev() == OMAP3430_REV_ES1_0) {
+ cpu_mask = RATE_IN_3430ES1;
+@@ -3600,7 +3615,7 @@ int __init omap3xxx_clk_init(void)
+ * Lock DPLL5 -- here only until other device init code can
+ * handle this
+ */
+- if (!cpu_is_ti816x() && (omap_rev() >= OMAP3430_REV_ES2_0))
++ if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0))
+ omap3_clk_lock_dpll5();
+
+ /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
+diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
+index 0798a80..08e86d7 100644
+--- a/arch/arm/mach-omap2/clock44xx_data.c
++++ b/arch/arm/mach-omap2/clock44xx_data.c
+@@ -1206,6 +1206,14 @@ static const struct clksel ocp_abe_iclk_div[] = {
+ { .parent = NULL },
+ };
+
++static struct clk mpu_periphclk = {
++ .name = "mpu_periphclk",
++ .parent = &dpll_mpu_ck,
++ .ops = &clkops_null,
++ .fixed_div = 2,
++ .recalc = &omap_fixed_divisor_recalc,
++};
++
+ static struct clk ocp_abe_iclk = {
+ .name = "ocp_abe_iclk",
+ .parent = &aess_fclk,
+@@ -3189,6 +3197,7 @@ static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
+ CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
+ CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
++ CLK("smp_twd", NULL, &mpu_periphclk, CK_443X),
+ CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
+ CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
+ CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
+@@ -3295,7 +3304,7 @@ static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
+- CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X),
++ CLK("usbhs_omap", "fs_fck", &usb_host_fs_fck, CK_443X),
+ CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
+ CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
+@@ -3306,7 +3315,7 @@ static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
+- CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X),
++ CLK("usbhs_omap", "hs_fck", &usb_host_hs_fck, CK_443X),
+ CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
+ CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
+ CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
+@@ -3314,7 +3323,7 @@ static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
+- CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
++ CLK("usbhs_omap", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
+ CLK(NULL, "usim_ck", &usim_ck, CK_443X),
+ CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
+ CLK(NULL, "usim_fck", &usim_fck, CK_443X),
+@@ -3374,8 +3383,8 @@ static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
+- CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
+- CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
++ CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_443X),
++ CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_443X),
+ CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
+ CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_443X),
+ CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_443X),
+diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
+index f7b5860..72cb12b 100644
+--- a/arch/arm/mach-omap2/clockdomain.h
++++ b/arch/arm/mach-omap2/clockdomain.h
+@@ -195,6 +195,7 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh);
+ extern void __init omap242x_clockdomains_init(void);
+ extern void __init omap243x_clockdomains_init(void);
+ extern void __init omap3xxx_clockdomains_init(void);
++extern void __init am33xx_clockdomains_init(void);
+ extern void __init omap44xx_clockdomains_init(void);
+ extern void _clkdm_add_autodeps(struct clockdomain *clkdm);
+ extern void _clkdm_del_autodeps(struct clockdomain *clkdm);
+@@ -202,6 +203,7 @@ extern void _clkdm_del_autodeps(struct clockdomain *clkdm);
+ extern struct clkdm_ops omap2_clkdm_operations;
+ extern struct clkdm_ops omap3_clkdm_operations;
+ extern struct clkdm_ops omap4_clkdm_operations;
++extern struct clkdm_ops am33xx_clkdm_operations;
+
+ extern struct clkdm_dep gfx_24xx_wkdeps[];
+ extern struct clkdm_dep dsp_24xx_wkdeps[];
+diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+index a0d68db..edcab10 100644
+--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
++++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+@@ -147,6 +147,9 @@ static void _enable_hwsup(struct clockdomain *clkdm)
+ if (cpu_is_omap24xx())
+ omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
++ else if (cpu_is_am33xx())
++ am33xx_cm_clkdm_enable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs,
++ clkdm->clktrctrl_mask);
+ else if (cpu_is_omap34xx())
+ omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+@@ -157,6 +160,9 @@ static void _disable_hwsup(struct clockdomain *clkdm)
+ if (cpu_is_omap24xx())
+ omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
++ else if (cpu_is_am33xx())
++ am33xx_cm_clkdm_disable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs,
++ clkdm->clktrctrl_mask);
+ else if (cpu_is_omap34xx())
+ omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+@@ -211,14 +217,22 @@ static int omap2_clkdm_clk_disable(struct clockdomain *clkdm)
+
+ static int omap3_clkdm_sleep(struct clockdomain *clkdm)
+ {
+- omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs,
++ if (cpu_is_am33xx())
++ am33xx_cm_clkdm_force_sleep(clkdm->cm_inst, clkdm->clkdm_offs,
++ clkdm->clktrctrl_mask);
++ else
++ omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+ return 0;
+ }
+
+ static int omap3_clkdm_wakeup(struct clockdomain *clkdm)
+ {
+- omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs,
++ if (cpu_is_am33xx())
++ am33xx_cm_clkdm_force_wakeup(clkdm->cm_inst, clkdm->clkdm_offs,
++ clkdm->clktrctrl_mask);
++ else
++ omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+ return 0;
+ }
+diff --git a/arch/arm/mach-omap2/clockdomain44xx.c b/arch/arm/mach-omap2/clockdomain44xx.c
+index 935c7f0..188afe2 100644
+--- a/arch/arm/mach-omap2/clockdomain44xx.c
++++ b/arch/arm/mach-omap2/clockdomain44xx.c
+@@ -128,3 +128,10 @@ struct clkdm_ops omap4_clkdm_operations = {
+ .clkdm_clk_enable = omap4_clkdm_clk_enable,
+ .clkdm_clk_disable = omap4_clkdm_clk_disable,
+ };
++
++struct clkdm_ops am33xx_clkdm_operations = {
++ .clkdm_sleep = omap4_clkdm_sleep,
++ .clkdm_wakeup = omap4_clkdm_wakeup,
++ .clkdm_clk_enable = omap4_clkdm_clk_enable,
++ .clkdm_clk_disable = omap4_clkdm_clk_disable,
++};
+diff --git a/arch/arm/mach-omap2/clockdomains33xx_data.c b/arch/arm/mach-omap2/clockdomains33xx_data.c
+new file mode 100644
+index 0000000..a4734e9
+--- /dev/null
++++ b/arch/arm/mach-omap2/clockdomains33xx_data.c
+@@ -0,0 +1,232 @@
++/*
++ * AM33XX Clock Domain data.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++
++#include "prcm44xx.h"
++#include "clockdomain.h"
++#include "cm.h"
++#include "cm33xx.h"
++#include "cm-regbits-33xx.h"
++
++static struct clockdomain l4ls_am33xx_clkdm = {
++ .name = "l4ls_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l3s_am33xx_clkdm = {
++ .name = "l3s_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4fw_am33xx_clkdm = {
++ .name = "l4fw_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l3_am33xx_clkdm = {
++ .name = "l3_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4hs_am33xx_clkdm = {
++ .name = "l4hs_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain ocpwp_l3_am33xx_clkdm = {
++ .name = "ocpwp_l3_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain pruss_ocp_am33xx_clkdm = {
++ .name = "pruss_ocp_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain cpsw_125mhz_am33xx_clkdm = {
++ .name = "cpsw_125mhz_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain lcdc_am33xx_clkdm = {
++ .name = "lcdc_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain clk_24mhz_am33xx_clkdm = {
++ .name = "clk_24mhz_clkdm",
++ .pwrdm = { .name = "per_pwrdm" },
++ .cm_inst = AM33XX_CM_PER_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4_wkup_am33xx_clkdm = {
++ .name = "l4_wkup_clkdm",
++ .pwrdm = { .name = "wkup_pwrdm" },
++ .cm_inst = AM33XX_CM_WKUP_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_WKUP_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l3_aon_am33xx_clkdm = {
++ .name = "l3_aon_clkdm",
++ .pwrdm = { .name = "wkup_pwrdm" },
++ .cm_inst = AM33XX_CM_WKUP_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4_wkup_aon_am33xx_clkdm = {
++ .name = "l4_wkup_aon_clkdm",
++ .pwrdm = { .name = "wkup_pwrdm" },
++ .cm_inst = AM33XX_CM_WKUP_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain mpu_am33xx_clkdm = {
++ .name = "mpu_clkdm",
++ .pwrdm = { .name = "mpu_pwrdm" },
++ .cm_inst = AM33XX_CM_MPU_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_MPU_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4_rtc_am33xx_clkdm = {
++ .name = "l4_rtc_clkdm",
++ .pwrdm = { .name = "rtc_pwrdm" },
++ .cm_inst = AM33XX_CM_RTC_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_RTC_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain gfx_l3_am33xx_clkdm = {
++ .name = "gfx_l3_clkdm",
++ .pwrdm = { .name = "gfx_pwrdm" },
++ .cm_inst = AM33XX_CM_GFX_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain gfx_l4ls_gfx_am33xx_clkdm = {
++ .name = "gfx_l4ls_gfx_clkdm",
++ .pwrdm = { .name = "gfx_pwrdm" },
++ .cm_inst = AM33XX_CM_GFX_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain l4_cefuse_am33xx_clkdm = {
++ .name = "l4_cefuse_clkdm",
++ .pwrdm = { .name = "cefuse_pwrdm" },
++ .cm_inst = AM33XX_CM_CEFUSE_MOD,
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .clkdm_offs = AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET,
++ .clktrctrl_mask = AM33XX_CLKTRCTRL_MASK,
++ .flags = (CLKDM_CAN_SWSUP | CLKDM_NO_AUTODEPS),
++};
++
++static struct clockdomain *clockdomains_am33xx[] __initdata = {
++ &l4ls_am33xx_clkdm,
++ &l3s_am33xx_clkdm,
++ &l4fw_am33xx_clkdm,
++ &l3_am33xx_clkdm,
++ &l4hs_am33xx_clkdm,
++ &ocpwp_l3_am33xx_clkdm,
++ &pruss_ocp_am33xx_clkdm,
++ &cpsw_125mhz_am33xx_clkdm,
++ &lcdc_am33xx_clkdm,
++ &clk_24mhz_am33xx_clkdm,
++ &l4_wkup_am33xx_clkdm,
++ &l3_aon_am33xx_clkdm,
++ &l4_wkup_aon_am33xx_clkdm,
++ &mpu_am33xx_clkdm,
++ &l4_rtc_am33xx_clkdm,
++ &gfx_l3_am33xx_clkdm,
++ &gfx_l4ls_gfx_am33xx_clkdm,
++ &l4_cefuse_am33xx_clkdm,
++ NULL,
++};
++
++void __init am33xx_clockdomains_init(void)
++{
++ clkdm_register_platform_funcs(&am33xx_clkdm_operations);
++ clkdm_register_clkdms(clockdomains_am33xx);
++ clkdm_complete_init();
++}
+diff --git a/arch/arm/mach-omap2/cm-regbits-33xx.h b/arch/arm/mach-omap2/cm-regbits-33xx.h
+new file mode 100644
+index 0000000..f3e2d4a
+--- /dev/null
++++ b/arch/arm/mach-omap2/cm-regbits-33xx.h
+@@ -0,0 +1,683 @@
++/*
++ * AM33XX Power Management register bits
++ *
++ * This file is automatically generated from the AM33XX hardware databases.
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++
++#ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_33XX_H
++#define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_33XX_H
++
++/*
++ * Used by CM_AUTOIDLE_DPLL_CORE, CM_AUTOIDLE_DPLL_DDR, CM_AUTOIDLE_DPLL_DISP,
++ * CM_AUTOIDLE_DPLL_MPU, CM_AUTOIDLE_DPLL_PER
++ */
++#define AM33XX_AUTO_DPLL_MODE_SHIFT 0
++#define AM33XX_AUTO_DPLL_MODE_MASK (0x7 << 0)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_ADC_FCLK_SHIFT 14
++#define AM33XX_CLKACTIVITY_ADC_FCLK_MASK (1 << 16)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CAN_CLK_SHIFT 11
++#define AM33XX_CLKACTIVITY_CAN_CLK_MASK (1 << 11)
++
++/* Used by CM_PER_CLK_24MHZ_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CLK_24MHZ_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_CLK_24MHZ_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_CPSW_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_CPSW_125MHZ_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_L4HS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CPSW_250MHZ_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_CPSW_250MHZ_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_L4HS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CPSW_50MHZ_GCLK_SHIFT 5
++#define AM33XX_CLKACTIVITY_CPSW_50MHZ_GCLK_MASK (1 << 5)
++
++/* Used by CM_PER_L4HS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CPSW_5MHZ_GCLK_SHIFT 6
++#define AM33XX_CLKACTIVITY_CPSW_5MHZ_GCLK_MASK (1 << 6)
++
++/* Used by CM_PER_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CPTS_RFT_GCLK_SHIFT 6
++#define AM33XX_CLKACTIVITY_CPTS_RFT_GCLK_MASK (1 << 6)
++
++/* Used by CM_CEFUSE_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT 9
++#define AM33XX_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK (1 << 9)
++
++/* Used by CM_L3_AON_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_DBGSYSCLK_SHIFT 2
++#define AM33XX_CLKACTIVITY_DBGSYSCLK_MASK (1 << 2)
++
++/* Used by CM_L3_AON_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_DEBUG_CLKA_SHIFT 4
++#define AM33XX_CLKACTIVITY_DEBUG_CLKA_MASK (1 << 4)
++
++/* Used by CM_PER_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_EMIF_GCLK_SHIFT 2
++#define AM33XX_CLKACTIVITY_EMIF_GCLK_MASK (1 << 2)
++
++/* Used by CM_GFX_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GFX_FCLK_SHIFT 9
++#define AM33XX_CLKACTIVITY_GFX_FCLK_MASK (1 << 9)
++
++/* Used by CM_GFX_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GFX_L3_GCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_GFX_L3_GCLK_MASK (1 << 8)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO0_GDBCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_GPIO0_GDBCLK_MASK (1 << 8)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_1_GDBCLK_SHIFT 19
++#define AM33XX_CLKACTIVITY_GPIO_1_GDBCLK_MASK (1 << 19)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_2_GDBCLK_SHIFT 20
++#define AM33XX_CLKACTIVITY_GPIO_2_GDBCLK_MASK (1 << 20)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_3_GDBCLK_SHIFT 21
++#define AM33XX_CLKACTIVITY_GPIO_3_GDBCLK_MASK (1 << 21)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_4_GDBCLK_SHIFT 22
++#define AM33XX_CLKACTIVITY_GPIO_4_GDBCLK_MASK (1 << 22)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_5_GDBCLK_SHIFT 26
++#define AM33XX_CLKACTIVITY_GPIO_5_GDBCLK_MASK (1 << 26)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_GPIO_6_GDBCLK_SHIFT 18
++#define AM33XX_CLKACTIVITY_GPIO_6_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_I2C0_GFCLK_SHIFT 11
++#define AM33XX_CLKACTIVITY_I2C0_GFCLK_MASK (1 << 11)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_I2C_FCLK_SHIFT 24
++#define AM33XX_CLKACTIVITY_I2C_FCLK_MASK (1 << 24)
++
++/* Used by CM_PER_PRUSS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_PRUSS_IEP_GCLK_SHIFT 5
++#define AM33XX_CLKACTIVITY_PRUSS_IEP_GCLK_MASK (1 << 5)
++
++/* Used by CM_PER_PRUSS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_PRUSS_OCP_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_PRUSS_OCP_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_PRUSS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_PRUSS_UART_GCLK_SHIFT 6
++#define AM33XX_CLKACTIVITY_PRUSS_UART_GCLK_MASK (1 << 6)
++
++/* Used by CM_PER_L3S_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L3S_GCLK_SHIFT 3
++#define AM33XX_CLKACTIVITY_L3S_GCLK_MASK (1 << 3)
++
++/* Used by CM_L3_AON_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L3_AON_GCLK_SHIFT 3
++#define AM33XX_CLKACTIVITY_L3_AON_GCLK_MASK (1 << 3)
++
++/* Used by CM_PER_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L3_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_L3_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_L4FW_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4FW_GCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_L4FW_GCLK_MASK (1 << 8)
++
++/* Used by CM_PER_L4HS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4HS_GCLK_SHIFT 3
++#define AM33XX_CLKACTIVITY_L4HS_GCLK_MASK (1 << 3)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4LS_GCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_L4LS_GCLK_MASK (1 << 8)
++
++/* Used by CM_GFX_L4LS_GFX_CLKSTCTRL__1 */
++#define AM33XX_CLKACTIVITY_L4LS_GFX_GCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_L4LS_GFX_GCLK_MASK (1 << 8)
++
++/* Used by CM_CEFUSE_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_L4_CEFUSE_GICLK_MASK (1 << 8)
++
++/* Used by CM_RTC_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4_RTC_GCLK_SHIFT 8
++#define AM33XX_CLKACTIVITY_L4_RTC_GCLK_MASK (1 << 8)
++
++/* Used by CM_L4_WKUP_AON_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4_WKUP_AON_GCLK_SHIFT 2
++#define AM33XX_CLKACTIVITY_L4_WKUP_AON_GCLK_MASK (1 << 2)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_L4_WKUP_GCLK_SHIFT 2
++#define AM33XX_CLKACTIVITY_L4_WKUP_GCLK_MASK (1 << 2)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_LCDC_GCLK_SHIFT 17
++#define AM33XX_CLKACTIVITY_LCDC_GCLK_MASK (1 << 17)
++
++/* Used by CM_PER_LCDC_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_LCDC_L3_OCP_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_LCDC_L3_OCP_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_LCDC_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_LCDC_L4_OCP_GCLK_SHIFT 5
++#define AM33XX_CLKACTIVITY_LCDC_L4_OCP_GCLK_MASK (1 << 5)
++
++/* Used by CM_PER_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_MCASP_GCLK_SHIFT 7
++#define AM33XX_CLKACTIVITY_MCASP_GCLK_MASK (1 << 7)
++
++/* Used by CM_PER_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_MMC_FCLK_SHIFT 3
++#define AM33XX_CLKACTIVITY_MMC_FCLK_MASK (1 << 3)
++
++/* Used by CM_MPU_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_MPU_CLK_SHIFT 2
++#define AM33XX_CLKACTIVITY_MPU_CLK_MASK (1 << 2)
++
++/* Used by CM_PER_OCPWP_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_OCPWP_L3_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_OCPWP_L3_GCLK_MASK (1 << 4)
++
++/* Used by CM_PER_OCPWP_L3_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_OCPWP_L4_GCLK_SHIFT 5
++#define AM33XX_CLKACTIVITY_OCPWP_L4_GCLK_MASK (1 << 5)
++
++/* Used by CM_RTC_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_RTC_32KCLK_SHIFT 9
++#define AM33XX_CLKACTIVITY_RTC_32KCLK_MASK (1 << 9)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_SPI_GCLK_SHIFT 25
++#define AM33XX_CLKACTIVITY_SPI_GCLK_MASK (1 << 25)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_SR_SYSCLK_SHIFT 3
++#define AM33XX_CLKACTIVITY_SR_SYSCLK_MASK (1 << 3)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER0_GCLK_SHIFT 10
++#define AM33XX_CLKACTIVITY_TIMER0_GCLK_MASK (1 << 10)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER1_GCLK_SHIFT 13
++#define AM33XX_CLKACTIVITY_TIMER1_GCLK_MASK (1 << 13)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER2_GCLK_SHIFT 14
++#define AM33XX_CLKACTIVITY_TIMER2_GCLK_MASK (1 << 14)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER3_GCLK_SHIFT 15
++#define AM33XX_CLKACTIVITY_TIMER3_GCLK_MASK (1 << 15)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER4_GCLK_SHIFT 16
++#define AM33XX_CLKACTIVITY_TIMER4_GCLK_MASK (1 << 16)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER5_GCLK_SHIFT 27
++#define AM33XX_CLKACTIVITY_TIMER5_GCLK_MASK (1 << 27)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER6_GCLK_SHIFT 28
++#define AM33XX_CLKACTIVITY_TIMER6_GCLK_MASK (1 << 28)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_TIMER7_GCLK_SHIFT 13
++#define AM33XX_CLKACTIVITY_TIMER7_GCLK_MASK (1 << 13)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_UART0_GFCLK_SHIFT 12
++#define AM33XX_CLKACTIVITY_UART0_GFCLK_MASK (1 << 12)
++
++/* Used by CM_PER_L4LS_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_UART_GFCLK_SHIFT 10
++#define AM33XX_CLKACTIVITY_UART_GFCLK_MASK (1 << 10)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_WDT0_GCLK_SHIFT 9
++#define AM33XX_CLKACTIVITY_WDT0_GCLK_MASK (1 << 9)
++
++/* Used by CM_WKUP_CLKSTCTRL */
++#define AM33XX_CLKACTIVITY_WDT1_GCLK_SHIFT 4
++#define AM33XX_CLKACTIVITY_WDT1_GCLK_MASK (1 << 4)
++
++/* Used by CLKSEL_GFX_FCLK */
++#define AM33XX_CLKDIV_SEL_GFX_FCLK_SHIFT 0
++#define AM33XX_CLKDIV_SEL_GFX_FCLK_MASK (1 << 0)
++
++/* Used by CM_CLKOUT_CTRL */
++#define AM33XX_CLKOUT2DIV_SHIFT 3
++#define AM33XX_CLKOUT2DIV_MASK (0x05 << 3)
++
++/* Used by CM_CLKOUT_CTRL */
++#define AM33XX_CLKOUT2EN_SHIFT 7
++#define AM33XX_CLKOUT2EN_MASK (1 << 7)
++
++/* Used by CM_CLKOUT_CTRL */
++#define AM33XX_CLKOUT2SOURCE_SHIFT 0
++#define AM33XX_CLKOUT2SOURCE_MASK (0x02 << 0)
++
++/*
++ * Used by CLKSEL_GPIO0_DBCLK, CLKSEL_LCDC_PIXEL_CLK, CLKSEL_TIMER2_CLK,
++ * CLKSEL_TIMER3_CLK, CLKSEL_TIMER4_CLK, CLKSEL_TIMER5_CLK, CLKSEL_TIMER6_CLK,
++ * CLKSEL_TIMER7_CLK
++ */
++#define AM33XX_CLKSEL_SHIFT 0
++#define AM33XX_CLKSEL_MASK (0x01 << 0)
++
++/*
++ * Renamed from CLKSEL Used by CLKSEL_PRUSS_OCP_CLK, CLKSEL_WDT1_CLK,
++ * CM_CPTS_RFT_CLKSEL
++ */
++#define AM33XX_CLKSEL_0_0_SHIFT 0
++#define AM33XX_CLKSEL_0_0_MASK (1 << 0)
++
++#define AM33XX_CLKSEL_0_1_SHIFT 0
++#define AM33XX_CLKSEL_0_1_MASK (3 << 0)
++
++/* Renamed from CLKSEL Used by CLKSEL_TIMER1MS_CLK */
++#define AM33XX_CLKSEL_0_2_SHIFT 0
++#define AM33XX_CLKSEL_0_2_MASK (7 << 0)
++
++/* Used by CLKSEL_GFX_FCLK */
++#define AM33XX_CLKSEL_GFX_FCLK_SHIFT 1
++#define AM33XX_CLKSEL_GFX_FCLK_MASK (1 << 1)
++
++/*
++ * Used by CM_MPU_CLKSTCTRL, CM_RTC_CLKSTCTRL, CM_PER_CLK_24MHZ_CLKSTCTRL,
++ * CM_PER_CPSW_CLKSTCTRL, CM_PER_PRUSS_CLKSTCTRL, CM_PER_L3S_CLKSTCTRL,
++ * CM_PER_L3_CLKSTCTRL, CM_PER_L4FW_CLKSTCTRL, CM_PER_L4HS_CLKSTCTRL,
++ * CM_PER_L4LS_CLKSTCTRL, CM_PER_LCDC_CLKSTCTRL, CM_PER_OCPWP_L3_CLKSTCTRL,
++ * CM_L3_AON_CLKSTCTRL, CM_L4_WKUP_AON_CLKSTCTRL, CM_WKUP_CLKSTCTRL,
++ * CM_GFX_L3_CLKSTCTRL, CM_GFX_L4LS_GFX_CLKSTCTRL__1, CM_CEFUSE_CLKSTCTRL
++ */
++#define AM33XX_CLKTRCTRL_SHIFT 0
++#define AM33XX_CLKTRCTRL_MASK (0x3 << 0)
++
++/*
++ * Used by CM_SSC_DELTAMSTEP_DPLL_CORE, CM_SSC_DELTAMSTEP_DPLL_DDR,
++ * CM_SSC_DELTAMSTEP_DPLL_DISP, CM_SSC_DELTAMSTEP_DPLL_MPU,
++ * CM_SSC_DELTAMSTEP_DPLL_PER
++ */
++#define AM33XX_DELTAMSTEP_SHIFT 0
++#define AM33XX_DELTAMSTEP_MASK (0x19 << 0)
++
++/* Used by CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP, CM_CLKSEL_DPLL_MPU */
++#define AM33XX_DPLL_BYP_CLKSEL_SHIFT 23
++#define AM33XX_DPLL_BYP_CLKSEL_MASK (1 << 23)
++
++/* Used by CM_CLKDCOLDO_DPLL_PER */
++#define AM33XX_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT 8
++#define AM33XX_DPLL_CLKDCOLDO_GATE_CTRL_MASK (1 << 8)
++
++/* Used by CM_CLKDCOLDO_DPLL_PER */
++#define AM33XX_DPLL_CLKDCOLDO_PWDN_SHIFT 12
++#define AM33XX_DPLL_CLKDCOLDO_PWDN_MASK (1 << 12)
++
++/* Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU */
++#define AM33XX_DPLL_CLKOUT_DIV_SHIFT 0
++#define AM33XX_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
++
++/* Renamed from DPLL_CLKOUT_DIV Used by CM_DIV_M2_DPLL_PER */
++#define AM33XX_DPLL_CLKOUT_DIV_0_6_SHIFT 0
++#define AM33XX_DPLL_CLKOUT_DIV_0_6_MASK (0x06 << 0)
++
++/* Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU */
++#define AM33XX_DPLL_CLKOUT_DIVCHACK_SHIFT 5
++#define AM33XX_DPLL_CLKOUT_DIVCHACK_MASK (1 << 5)
++
++/* Renamed from DPLL_CLKOUT_DIVCHACK Used by CM_DIV_M2_DPLL_PER */
++#define AM33XX_DPLL_CLKOUT_DIVCHACK_M2_PER_SHIFT 7
++#define AM33XX_DPLL_CLKOUT_DIVCHACK_M2_PER_MASK (1 << 7)
++
++/*
++ * Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU,
++ * CM_DIV_M2_DPLL_PER
++ */
++#define AM33XX_DPLL_CLKOUT_GATE_CTRL_SHIFT 8
++#define AM33XX_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8)
++
++/*
++ * Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP,
++ * CM_CLKSEL_DPLL_MPU
++ */
++#define AM33XX_DPLL_DIV_SHIFT 0
++#define AM33XX_DPLL_DIV_MASK (0x7f << 0)
++
++#define AM33XX_DPLL_PER_DIV_MASK (0xff << 0)
++
++/* Renamed from DPLL_DIV Used by CM_CLKSEL_DPLL_PERIPH */
++#define AM33XX_DPLL_DIV_0_7_SHIFT 0
++#define AM33XX_DPLL_DIV_0_7_MASK (0x07 << 0)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU
++ */
++#define AM33XX_DPLL_DRIFTGUARD_EN_SHIFT 8
++#define AM33XX_DPLL_DRIFTGUARD_EN_MASK (1 << 8)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
++ */
++#define AM33XX_DPLL_EN_SHIFT 0
++#define AM33XX_DPLL_EN_MASK (0x7 << 0)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU
++ */
++#define AM33XX_DPLL_LPMODE_EN_SHIFT 10
++#define AM33XX_DPLL_LPMODE_EN_MASK (1 << 10)
++
++/*
++ * Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP,
++ * CM_CLKSEL_DPLL_MPU
++ */
++#define AM33XX_DPLL_MULT_SHIFT 8
++#define AM33XX_DPLL_MULT_MASK (0x7ff << 8)
++
++/* Renamed from DPLL_MULT Used by CM_CLKSEL_DPLL_PERIPH */
++#define AM33XX_DPLL_MULT_PERIPH_SHIFT 8
++#define AM33XX_DPLL_MULT_PERIPH_MASK (0xfff << 8)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU
++ */
++#define AM33XX_DPLL_REGM4XEN_SHIFT 11
++#define AM33XX_DPLL_REGM4XEN_MASK (1 << 11)
++
++/* Used by CM_CLKSEL_DPLL_PERIPH */
++#define AM33XX_DPLL_SD_DIV_SHIFT 24
++#define AM33XX_DPLL_SD_DIV_MASK (24, 31)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
++ */
++#define AM33XX_DPLL_SSC_ACK_SHIFT 13
++#define AM33XX_DPLL_SSC_ACK_MASK (1 << 13)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
++ */
++#define AM33XX_DPLL_SSC_DOWNSPREAD_SHIFT 14
++#define AM33XX_DPLL_SSC_DOWNSPREAD_MASK (1 << 14)
++
++/*
++ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
++ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
++ */
++#define AM33XX_DPLL_SSC_EN_SHIFT 12
++#define AM33XX_DPLL_SSC_EN_MASK (1 << 12)
++
++/* Used by CM_DIV_M4_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT1_DIV_SHIFT 0
++#define AM33XX_HSDIVIDER_CLKOUT1_DIV_MASK (0x1f << 0)
++
++/* Used by CM_DIV_M4_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT 5
++#define AM33XX_HSDIVIDER_CLKOUT1_DIVCHACK_MASK (1 << 5)
++
++/* Used by CM_DIV_M4_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT 8
++#define AM33XX_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK (1 << 8)
++
++/* Used by CM_DIV_M4_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT1_PWDN_SHIFT 12
++#define AM33XX_HSDIVIDER_CLKOUT1_PWDN_MASK (1 << 12)
++
++/* Used by CM_DIV_M5_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT2_DIV_SHIFT 0
++#define AM33XX_HSDIVIDER_CLKOUT2_DIV_MASK (0x1f << 0)
++
++/* Used by CM_DIV_M5_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT 5
++#define AM33XX_HSDIVIDER_CLKOUT2_DIVCHACK_MASK (1 << 5)
++
++/* Used by CM_DIV_M5_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT 8
++#define AM33XX_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK (1 << 8)
++
++/* Used by CM_DIV_M5_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT2_PWDN_SHIFT 12
++#define AM33XX_HSDIVIDER_CLKOUT2_PWDN_MASK (1 << 12)
++
++/* Used by CM_DIV_M6_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT3_DIV_SHIFT 0
++#define AM33XX_HSDIVIDER_CLKOUT3_DIV_MASK (0x04 << 0)
++
++/* Used by CM_DIV_M6_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT 5
++#define AM33XX_HSDIVIDER_CLKOUT3_DIVCHACK_MASK (1 << 5)
++
++/* Used by CM_DIV_M6_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT 8
++#define AM33XX_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK (1 << 8)
++
++/* Used by CM_DIV_M6_DPLL_CORE */
++#define AM33XX_HSDIVIDER_CLKOUT3_PWDN_SHIFT 12
++#define AM33XX_HSDIVIDER_CLKOUT3_PWDN_MASK (1 << 12)
++
++/*
++ * Used by CM_MPU_MPU_CLKCTRL, CM_RTC_RTC_CLKCTRL, CM_PER_AES0_CLKCTRL,
++ * CM_PER_AES1_CLKCTRL, CM_PER_CLKDIV32K_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL,
++ * CM_PER_DCAN0_CLKCTRL, CM_PER_DCAN1_CLKCTRL, CM_PER_DES_CLKCTRL,
++ * CM_PER_ELM_CLKCTRL, CM_PER_EMIF_CLKCTRL, CM_PER_EMIF_FW_CLKCTRL,
++ * CM_PER_EPWMSS0_CLKCTRL, CM_PER_EPWMSS1_CLKCTRL, CM_PER_EPWMSS2_CLKCTRL,
++ * CM_PER_GPIO1_CLKCTRL, CM_PER_GPIO2_CLKCTRL, CM_PER_GPIO3_CLKCTRL,
++ * CM_PER_GPIO4_CLKCTRL, CM_PER_GPIO5_CLKCTRL, CM_PER_GPIO6_CLKCTRL,
++ * CM_PER_GPMC_CLKCTRL, CM_PER_I2C1_CLKCTRL, CM_PER_I2C2_CLKCTRL,
++ * CM_PER_PRUSS_CLKCTRL, CM_PER_IEEE5000_CLKCTRL, CM_PER_L3_CLKCTRL,
++ * CM_PER_L3_INSTR_CLKCTRL, CM_PER_L4FW_CLKCTRL, CM_PER_L4HS_CLKCTRL,
++ * CM_PER_L4LS_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MAILBOX0_CLKCTRL,
++ * CM_PER_MAILBOX1_CLKCTRL, CM_PER_MCASP0_CLKCTRL, CM_PER_MCASP1_CLKCTRL,
++ * CM_PER_MCASP2_CLKCTRL, CM_PER_MLB_CLKCTRL, CM_PER_MMC0_CLKCTRL,
++ * CM_PER_MMC1_CLKCTRL, CM_PER_MMC2_CLKCTRL, CM_PER_MSTR_EXPS_CLKCTRL,
++ * CM_PER_OCMCRAM_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
++ * CM_PER_PKA_CLKCTRL, CM_PER_RNG_CLKCTRL, CM_PER_SHA0_CLKCTRL,
++ * CM_PER_SLV_EXPS_CLKCTRL, CM_PER_SPARE0_CLKCTRL, CM_PER_SPARE1_CLKCTRL,
++ * CM_PER_SPARE_CLKCTRL, CM_PER_SPI0_CLKCTRL, CM_PER_SPI1_CLKCTRL,
++ * CM_PER_SPI2_CLKCTRL, CM_PER_SPI3_CLKCTRL, CM_PER_SPINLOCK_CLKCTRL,
++ * CM_PER_TIMER2_CLKCTRL, CM_PER_TIMER3_CLKCTRL, CM_PER_TIMER4_CLKCTRL,
++ * CM_PER_TIMER5_CLKCTRL, CM_PER_TIMER6_CLKCTRL, CM_PER_TIMER7_CLKCTRL,
++ * CM_PER_TPCC_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
++ * CM_PER_TPTC2_CLKCTRL, CM_PER_UART1_CLKCTRL, CM_PER_UART2_CLKCTRL,
++ * CM_PER_UART3_CLKCTRL, CM_PER_UART4_CLKCTRL, CM_PER_UART5_CLKCTRL,
++ * CM_PER_USB0_CLKCTRL, CM_WKUP_ADC_TSC_CLKCTRL, CM_WKUP_CONTROL_CLKCTRL,
++ * CM_WKUP_DEBUGSS_CLKCTRL, CM_WKUP_GPIO0_CLKCTRL, CM_WKUP_I2C0_CLKCTRL,
++ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_SMARTREFLEX0_CLKCTRL,
++ * CM_WKUP_SMARTREFLEX1_CLKCTRL, CM_WKUP_TIMER0_CLKCTRL,
++ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_UART0_CLKCTRL, CM_WKUP_WDT0_CLKCTRL,
++ * CM_WKUP_WDT1_CLKCTRL, CM_GFX_BITBLT_CLKCTRL, CM_GFX_GFX_CLKCTRL,
++ * CM_GFX_MMUCFG_CLKCTRL, CM_GFX_MMUDATA_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL
++ */
++#define AM33XX_IDLEST_SHIFT 16
++#define AM33XX_IDLEST_MASK (0x3 << 16)
++#define AM33XX_IDLEST_VAL 0x3
++
++/* Used by CM_MAC_CLKSEL */
++#define AM33XX_MII_CLK_SEL_SHIFT 2
++#define AM33XX_MII_CLK_SEL_MASK (1 << 2)
++
++/*
++ * Used by CM_SSC_MODFREQDIV_DPLL_CORE, CM_SSC_MODFREQDIV_DPLL_DDR,
++ * CM_SSC_MODFREQDIV_DPLL_DISP, CM_SSC_MODFREQDIV_DPLL_MPU,
++ * CM_SSC_MODFREQDIV_DPLL_PER
++ */
++#define AM33XX_MODFREQDIV_EXPONENT_SHIFT 8
++#define AM33XX_MODFREQDIV_EXPONENT_MASK (0x10 << 8)
++
++/*
++ * Used by CM_SSC_MODFREQDIV_DPLL_CORE, CM_SSC_MODFREQDIV_DPLL_DDR,
++ * CM_SSC_MODFREQDIV_DPLL_DISP, CM_SSC_MODFREQDIV_DPLL_MPU,
++ * CM_SSC_MODFREQDIV_DPLL_PER
++ */
++#define AM33XX_MODFREQDIV_MANTISSA_SHIFT 0
++#define AM33XX_MODFREQDIV_MANTISSA_MASK (0x06 << 0)
++
++/*
++ * Used by CM_MPU_MPU_CLKCTRL, CM_RTC_RTC_CLKCTRL, CM_PER_AES0_CLKCTRL,
++ * CM_PER_AES1_CLKCTRL, CM_PER_CLKDIV32K_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL,
++ * CM_PER_DCAN0_CLKCTRL, CM_PER_DCAN1_CLKCTRL, CM_PER_DES_CLKCTRL,
++ * CM_PER_ELM_CLKCTRL, CM_PER_EMIF_CLKCTRL, CM_PER_EMIF_FW_CLKCTRL,
++ * CM_PER_EPWMSS0_CLKCTRL, CM_PER_EPWMSS1_CLKCTRL, CM_PER_EPWMSS2_CLKCTRL,
++ * CM_PER_GPIO1_CLKCTRL, CM_PER_GPIO2_CLKCTRL, CM_PER_GPIO3_CLKCTRL,
++ * CM_PER_GPIO4_CLKCTRL, CM_PER_GPIO5_CLKCTRL, CM_PER_GPIO6_CLKCTRL,
++ * CM_PER_GPMC_CLKCTRL, CM_PER_I2C1_CLKCTRL, CM_PER_I2C2_CLKCTRL,
++ * CM_PER_PRUSS_CLKCTRL, CM_PER_IEEE5000_CLKCTRL, CM_PER_L3_CLKCTRL,
++ * CM_PER_L3_INSTR_CLKCTRL, CM_PER_L4FW_CLKCTRL, CM_PER_L4HS_CLKCTRL,
++ * CM_PER_L4LS_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MAILBOX0_CLKCTRL,
++ * CM_PER_MAILBOX1_CLKCTRL, CM_PER_MCASP0_CLKCTRL, CM_PER_MCASP1_CLKCTRL,
++ * CM_PER_MCASP2_CLKCTRL, CM_PER_MLB_CLKCTRL, CM_PER_MMC0_CLKCTRL,
++ * CM_PER_MMC1_CLKCTRL, CM_PER_MMC2_CLKCTRL, CM_PER_MSTR_EXPS_CLKCTRL,
++ * CM_PER_OCMCRAM_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
++ * CM_PER_PKA_CLKCTRL, CM_PER_RNG_CLKCTRL, CM_PER_SHA0_CLKCTRL,
++ * CM_PER_SLV_EXPS_CLKCTRL, CM_PER_SPARE0_CLKCTRL, CM_PER_SPARE1_CLKCTRL,
++ * CM_PER_SPARE_CLKCTRL, CM_PER_SPI0_CLKCTRL, CM_PER_SPI1_CLKCTRL,
++ * CM_PER_SPI2_CLKCTRL, CM_PER_SPI3_CLKCTRL, CM_PER_SPINLOCK_CLKCTRL,
++ * CM_PER_TIMER2_CLKCTRL, CM_PER_TIMER3_CLKCTRL, CM_PER_TIMER4_CLKCTRL,
++ * CM_PER_TIMER5_CLKCTRL, CM_PER_TIMER6_CLKCTRL, CM_PER_TIMER7_CLKCTRL,
++ * CM_PER_TPCC_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
++ * CM_PER_TPTC2_CLKCTRL, CM_PER_UART1_CLKCTRL, CM_PER_UART2_CLKCTRL,
++ * CM_PER_UART3_CLKCTRL, CM_PER_UART4_CLKCTRL, CM_PER_UART5_CLKCTRL,
++ * CM_PER_USB0_CLKCTRL, CM_WKUP_ADC_TSC_CLKCTRL, CM_WKUP_CONTROL_CLKCTRL,
++ * CM_WKUP_DEBUGSS_CLKCTRL, CM_WKUP_GPIO0_CLKCTRL, CM_WKUP_I2C0_CLKCTRL,
++ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_SMARTREFLEX0_CLKCTRL,
++ * CM_WKUP_SMARTREFLEX1_CLKCTRL, CM_WKUP_TIMER0_CLKCTRL,
++ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_UART0_CLKCTRL, CM_WKUP_WDT0_CLKCTRL,
++ * CM_WKUP_WDT1_CLKCTRL, CM_WKUP_WKUP_M3_CLKCTRL, CM_GFX_BITBLT_CLKCTRL,
++ * CM_GFX_GFX_CLKCTRL, CM_GFX_MMUCFG_CLKCTRL, CM_GFX_MMUDATA_CLKCTRL,
++ * CM_CEFUSE_CEFUSE_CLKCTRL
++ */
++#define AM33XX_MODULEMODE_SHIFT 0
++#define AM33XX_MODULEMODE_MASK (0x3 << 0)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_OPTCLK_DEBUG_CLKA_SHIFT 30
++#define AM33XX_OPTCLK_DEBUG_CLKA_MASK (1 << 30)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_OPTFCLKEN_DBGSYSCLK_SHIFT 19
++#define AM33XX_OPTFCLKEN_DBGSYSCLK_MASK (1 << 19)
++
++/* Used by CM_WKUP_GPIO0_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO0_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO1_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO2_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO3_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO4_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_4_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_4_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO5_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_5_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_5_GDBCLK_MASK (1 << 18)
++
++/* Used by CM_PER_GPIO6_CLKCTRL */
++#define AM33XX_OPTFCLKEN_GPIO_6_GDBCLK_SHIFT 18
++#define AM33XX_OPTFCLKEN_GPIO_6_GDBCLK_MASK (1 << 18)
++
++/*
++ * Used by CM_MPU_MPU_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL, CM_PER_PRUSS_CLKCTRL,
++ * CM_PER_IEEE5000_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MLB_CLKCTRL,
++ * CM_PER_MSTR_EXPS_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
++ * CM_PER_SPARE_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
++ * CM_PER_TPTC2_CLKCTRL, CM_PER_USB0_CLKCTRL, CM_WKUP_DEBUGSS_CLKCTRL,
++ * CM_WKUP_WKUP_M3_CLKCTRL, CM_GFX_BITBLT_CLKCTRL, CM_GFX_GFX_CLKCTRL
++ */
++#define AM33XX_STBYST_SHIFT 18
++#define AM33XX_STBYST_MASK (1 << 18)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_STM_PMD_CLKDIVSEL_SHIFT 27
++#define AM33XX_STM_PMD_CLKDIVSEL_MASK (0x29 << 27)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_STM_PMD_CLKSEL_SHIFT 22
++#define AM33XX_STM_PMD_CLKSEL_MASK (0x23 << 22)
++
++/*
++ * Used by CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDR, CM_IDLEST_DPLL_DISP,
++ * CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER
++ */
++#define AM33XX_ST_DPLL_CLK_SHIFT 0
++#define AM33XX_ST_DPLL_CLK_MASK (1 << 0)
++
++/* Used by CM_CLKDCOLDO_DPLL_PER */
++#define AM33XX_ST_DPLL_CLKDCOLDO_SHIFT 8
++#define AM33XX_ST_DPLL_CLKDCOLDO_MASK (1 << 8)
++
++/*
++ * Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU,
++ * CM_DIV_M2_DPLL_PER
++ */
++#define AM33XX_ST_DPLL_CLKOUT_SHIFT 9
++#define AM33XX_ST_DPLL_CLKOUT_MASK (1 << 9)
++
++/* Used by CM_DIV_M4_DPLL_CORE */
++#define AM33XX_ST_HSDIVIDER_CLKOUT1_SHIFT 9
++#define AM33XX_ST_HSDIVIDER_CLKOUT1_MASK (1 << 9)
++
++/* Used by CM_DIV_M5_DPLL_CORE */
++#define AM33XX_ST_HSDIVIDER_CLKOUT2_SHIFT 9
++#define AM33XX_ST_HSDIVIDER_CLKOUT2_MASK (1 << 9)
++
++/* Used by CM_DIV_M6_DPLL_CORE */
++#define AM33XX_ST_HSDIVIDER_CLKOUT3_SHIFT 9
++#define AM33XX_ST_HSDIVIDER_CLKOUT3_MASK (1 << 9)
++
++/*
++ * Used by CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDR, CM_IDLEST_DPLL_DISP,
++ * CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER
++ */
++#define AM33XX_ST_MN_BYPASS_SHIFT 8
++#define AM33XX_ST_MN_BYPASS_MASK (1 << 8)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_TRC_PMD_CLKDIVSEL_SHIFT 24
++#define AM33XX_TRC_PMD_CLKDIVSEL_MASK (0x26 << 24)
++
++/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
++#define AM33XX_TRC_PMD_CLKSEL_SHIFT 20
++#define AM33XX_TRC_PMD_CLKSEL_MASK (0x21 << 20)
++#endif
+diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.c b/arch/arm/mach-omap2/cm2xxx_3xxx.c
+index 38830d8..b2418b4 100644
+--- a/arch/arm/mach-omap2/cm2xxx_3xxx.c
++++ b/arch/arm/mach-omap2/cm2xxx_3xxx.c
+@@ -18,7 +18,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "cm.h"
+ #include "cm2xxx_3xxx.h"
+@@ -84,6 +84,16 @@ static void _write_clktrctrl(u8 c, s16 module, u32 mask)
+ omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL);
+ }
+
++static void _am33xx_write_clktrctrl(u8 c, s16 module, u16 idx, u32 mask)
++{
++ u32 v;
++
++ v = omap2_cm_read_mod_reg(module, idx);
++ v &= ~mask;
++ v |= c << __ffs(mask);
++ omap2_cm_write_mod_reg(v, module, idx);
++}
++
+ bool omap2_cm_is_clkdm_in_hwsup(s16 module, u32 mask)
+ {
+ u32 v;
+@@ -195,6 +205,30 @@ void omap2xxx_cm_set_apll96_auto_low_power_stop(void)
+ OMAP24XX_AUTO_96M_MASK);
+ }
+
++void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 clkdm, u32 mask)
++{
++ _am33xx_write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst,
++ clkdm, mask);
++}
++
++void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 clkdm, u32 mask)
++{
++ _am33xx_write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst,
++ clkdm, mask);
++}
++
++void am33xx_cm_clkdm_force_sleep(s16 inst, u16 clkdm, u32 mask)
++{
++ _am33xx_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst,
++ clkdm, mask);
++}
++
++void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 clkdm, u32 mask)
++{
++ _am33xx_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst,
++ clkdm, mask);
++}
++
+ /*
+ *
+ */
+diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.h b/arch/arm/mach-omap2/cm2xxx_3xxx.h
+index 088bbad..1418fc1 100644
+--- a/arch/arm/mach-omap2/cm2xxx_3xxx.h
++++ b/arch/arm/mach-omap2/cm2xxx_3xxx.h
+@@ -122,6 +122,12 @@ extern void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask);
+ extern void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask);
+ extern void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask);
+
++extern int am33xx_cm_wait_module_ready(u16 inst, u16 clkctrl_reg);
++extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 clkdm, u32 mask);
++extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 clkdm, u32 mask);
++extern void am33xx_cm_clkdm_force_sleep(s16 inst, u16 clkdm, u32 mask);
++extern void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 clkdm, u32 mask);
++
+ extern void omap2xxx_cm_set_dpll_disable_autoidle(void);
+ extern void omap2xxx_cm_set_dpll_auto_low_power_stop(void);
+
+diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
+new file mode 100644
+index 0000000..4134901
+--- /dev/null
++++ b/arch/arm/mach-omap2/cm33xx.h
+@@ -0,0 +1,377 @@
++/*
++ * AM33XX CM instance offset macros
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_CM_33XX_H
++#define __ARCH_ARM_MACH_OMAP2_CM_33XX_H
++
++#include "common.h"
++
++#include "cm.h"
++#include "cm-regbits-33xx.h"
++#include "cm33xx.h"
++
++/* CM base address */
++#define AM33XX_CM_BASE 0x44e00000
++
++#define AM33XX_CM_REGADDR(inst, reg) \
++ AM33XX_L4_WK_IO_ADDRESS(AM33XX_CM_BASE + (inst) + (reg))
++
++/* CM instances */
++#define AM33XX_CM_PER_MOD 0x0000
++#define AM33XX_CM_WKUP_MOD 0x0400
++#define AM33XX_CM_DPLL_MOD 0x0500
++#define AM33XX_CM_MPU_MOD 0x0600
++#define AM33XX_CM_DEVICE_MOD 0x0700
++#define AM33XX_CM_RTC_MOD 0x0800
++#define AM33XX_CM_GFX_MOD 0x0900
++#define AM33XX_CM_CEFUSE_MOD 0x0A00
++
++/* CM */
++
++/* CM.PER_CM register offsets */
++#define AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET 0x0000
++#define AM33XX_CM_PER_L4LS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0000)
++#define AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET 0x0004
++#define AM33XX_CM_PER_L3S_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0004)
++#define AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET 0x0008
++#define AM33XX_CM_PER_L4FW_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0008)
++#define AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET 0x000c
++#define AM33XX_CM_PER_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x000c)
++#define AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET 0x0014
++#define AM33XX_CM_PER_CPGMAC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0014)
++#define AM33XX_CM_PER_LCDC_CLKCTRL_OFFSET 0x0018
++#define AM33XX_CM_PER_LCDC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0018)
++#define AM33XX_CM_PER_USB0_CLKCTRL_OFFSET 0x001c
++#define AM33XX_CM_PER_USB0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x001c)
++#define AM33XX_CM_PER_MLB_CLKCTRL_OFFSET 0x0020
++#define AM33XX_CM_PER_MLB_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0020)
++#define AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET 0x0024
++#define AM33XX_CM_PER_TPTC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0024)
++#define AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET 0x0028
++#define AM33XX_CM_PER_EMIF_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0028)
++#define AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET 0x002c
++#define AM33XX_CM_PER_OCMCRAM_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x002c)
++#define AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET 0x0030
++#define AM33XX_CM_PER_GPMC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0030)
++#define AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET 0x0034
++#define AM33XX_CM_PER_MCASP0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0034)
++#define AM33XX_CM_PER_UART5_CLKCTRL_OFFSET 0x0038
++#define AM33XX_CM_PER_UART5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0038)
++#define AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET 0x003c
++#define AM33XX_CM_PER_MMC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x003c)
++#define AM33XX_CM_PER_ELM_CLKCTRL_OFFSET 0x0040
++#define AM33XX_CM_PER_ELM_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0040)
++#define AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET 0x0044
++#define AM33XX_CM_PER_I2C2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0044)
++#define AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET 0x0048
++#define AM33XX_CM_PER_I2C1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0048)
++#define AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET 0x004c
++#define AM33XX_CM_PER_SPI0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x004c)
++#define AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET 0x0050
++#define AM33XX_CM_PER_SPI1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0050)
++#define AM33XX_CM_PER_SPI2_CLKCTRL_OFFSET 0x0054
++#define AM33XX_CM_PER_SPI2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0054)
++#define AM33XX_CM_PER_SPI3_CLKCTRL_OFFSET 0x0058
++#define AM33XX_CM_PER_SPI3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0058)
++#define AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET 0x0060
++#define AM33XX_CM_PER_L4LS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0060)
++#define AM33XX_CM_PER_L4FW_CLKCTRL_OFFSET 0x0064
++#define AM33XX_CM_PER_L4FW_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0064)
++#define AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET 0x0068
++#define AM33XX_CM_PER_MCASP1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0068)
++#define AM33XX_CM_PER_UART1_CLKCTRL_OFFSET 0x006c
++#define AM33XX_CM_PER_UART1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x006c)
++#define AM33XX_CM_PER_UART2_CLKCTRL_OFFSET 0x0070
++#define AM33XX_CM_PER_UART2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0070)
++#define AM33XX_CM_PER_UART3_CLKCTRL_OFFSET 0x0074
++#define AM33XX_CM_PER_UART3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0074)
++#define AM33XX_CM_PER_UART4_CLKCTRL_OFFSET 0x0078
++#define AM33XX_CM_PER_UART4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0078)
++#define AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET 0x007c
++#define AM33XX_CM_PER_TIMER7_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x007c)
++#define AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET 0x0080
++#define AM33XX_CM_PER_TIMER2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0080)
++#define AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET 0x0084
++#define AM33XX_CM_PER_TIMER3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0084)
++#define AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET 0x0088
++#define AM33XX_CM_PER_TIMER4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0088)
++#define AM33XX_CM_PER_MCASP2_CLKCTRL_OFFSET 0x008c
++#define AM33XX_CM_PER_MCASP2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x008c)
++#define AM33XX_CM_PER_RNG_CLKCTRL_OFFSET 0x0090
++#define AM33XX_CM_PER_RNG_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0090)
++#define AM33XX_CM_PER_AES0_CLKCTRL_OFFSET 0x0094
++#define AM33XX_CM_PER_AES0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0094)
++#define AM33XX_CM_PER_AES1_CLKCTRL_OFFSET 0x0098
++#define AM33XX_CM_PER_AES1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0098)
++#define AM33XX_CM_PER_DES_CLKCTRL_OFFSET 0x009c
++#define AM33XX_CM_PER_DES_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x009c)
++#define AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET 0x00a0
++#define AM33XX_CM_PER_SHA0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a0)
++#define AM33XX_CM_PER_PKA_CLKCTRL_OFFSET 0x00a4
++#define AM33XX_CM_PER_PKA_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a4)
++#define AM33XX_CM_PER_GPIO6_CLKCTRL_OFFSET 0x00a8
++#define AM33XX_CM_PER_GPIO6_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a8)
++#define AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET 0x00ac
++#define AM33XX_CM_PER_GPIO1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00ac)
++#define AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET 0x00b0
++#define AM33XX_CM_PER_GPIO2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b0)
++#define AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET 0x00b4
++#define AM33XX_CM_PER_GPIO3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b4)
++#define AM33XX_CM_PER_GPIO4_CLKCTRL_OFFSET 0x00b8
++#define AM33XX_CM_PER_GPIO4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b8)
++#define AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET 0x00bc
++#define AM33XX_CM_PER_TPCC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00bc)
++#define AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET 0x00c0
++#define AM33XX_CM_PER_DCAN0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00c0)
++#define AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET 0x00c4
++#define AM33XX_CM_PER_DCAN1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00c4)
++#define AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET 0x00cc
++#define AM33XX_CM_PER_EPWMSS1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00cc)
++#define AM33XX_CM_PER_EMIF_FW_CLKCTRL_OFFSET 0x00d0
++#define AM33XX_CM_PER_EMIF_FW_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d0)
++#define AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET 0x00d4
++#define AM33XX_CM_PER_EPWMSS0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d4)
++#define AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET 0x00d8
++#define AM33XX_CM_PER_EPWMSS2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d8)
++#define AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET 0x00dc
++#define AM33XX_CM_PER_L3_INSTR_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00dc)
++#define AM33XX_CM_PER_L3_CLKCTRL_OFFSET 0x00e0
++#define AM33XX_CM_PER_L3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e0)
++#define AM33XX_CM_PER_IEEE5000_CLKCTRL_OFFSET 0x00e4
++#define AM33XX_CM_PER_IEEE5000_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e4)
++#define AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET 0x00e8
++#define AM33XX_CM_PER_PRUSS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e8)
++#define AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET 0x00ec
++#define AM33XX_CM_PER_TIMER5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00ec)
++#define AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET 0x00f0
++#define AM33XX_CM_PER_TIMER6_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f0)
++#define AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET 0x00f4
++#define AM33XX_CM_PER_MMC1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f4)
++#define AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET 0x00f8
++#define AM33XX_CM_PER_MMC2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f8)
++#define AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET 0x00fc
++#define AM33XX_CM_PER_TPTC1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00fc)
++#define AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET 0x0100
++#define AM33XX_CM_PER_TPTC2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0100)
++#define AM33XX_CM_PER_GPIO5_CLKCTRL_OFFSET 0x0104
++#define AM33XX_CM_PER_GPIO5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0104)
++#define AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET 0x010c
++#define AM33XX_CM_PER_SPINLOCK_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x010c)
++#define AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET 0x0110
++#define AM33XX_CM_PER_MAILBOX0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0110)
++#define AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET 0x011c
++#define AM33XX_CM_PER_L4HS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x011c)
++#define AM33XX_CM_PER_L4HS_CLKCTRL_OFFSET 0x0120
++#define AM33XX_CM_PER_L4HS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0120)
++#define AM33XX_CM_PER_MSTR_EXPS_CLKCTRL_OFFSET 0x0124
++#define AM33XX_CM_PER_MSTR_EXPS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0124)
++#define AM33XX_CM_PER_SLV_EXPS_CLKCTRL_OFFSET 0x0128
++#define AM33XX_CM_PER_SLV_EXPS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0128)
++#define AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET 0x012c
++#define AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x012c)
++#define AM33XX_CM_PER_OCPWP_CLKCTRL_OFFSET 0x0130
++#define AM33XX_CM_PER_OCPWP_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0130)
++#define AM33XX_CM_PER_MAILBOX1_CLKCTRL_OFFSET 0x0134
++#define AM33XX_CM_PER_MAILBOX1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0134)
++#define AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET 0x0140
++#define AM33XX_CM_PER_PRUSS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0140)
++#define AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET 0x0144
++#define AM33XX_CM_PER_CPSW_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0144)
++#define AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET 0x0148
++#define AM33XX_CM_PER_LCDC_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0148)
++#define AM33XX_CM_PER_CLKDIV32K_CLKCTRL_OFFSET 0x014c
++#define AM33XX_CM_PER_CLKDIV32K_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x014c)
++#define AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET 0x0150
++#define AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0150)
++
++/* CM.WKUP_CM register offsets */
++#define AM33XX_CM_WKUP_CLKSTCTRL_OFFSET 0x0000
++#define AM33XX_CM_WKUP_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0000)
++#define AM33XX_CM_WKUP_CONTROL_CLKCTRL_OFFSET 0x0004
++#define AM33XX_CM_WKUP_CONTROL_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0004)
++#define AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET 0x0008
++#define AM33XX_CM_WKUP_GPIO0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0008)
++#define AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET 0x000c
++#define AM33XX_CM_WKUP_L4WKUP_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x000c)
++#define AM33XX_CM_WKUP_TIMER0_CLKCTRL_OFFSET 0x0010
++#define AM33XX_CM_WKUP_TIMER0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0010)
++#define AM33XX_CM_WKUP_DEBUGSS_CLKCTRL_OFFSET 0x0014
++#define AM33XX_CM_WKUP_DEBUGSS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0014)
++#define AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET 0x0018
++#define AM33XX_CM_L3_AON_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0018)
++#define AM33XX_CM_AUTOIDLE_DPLL_MPU_OFFSET 0x001c
++#define AM33XX_CM_AUTOIDLE_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x001c)
++#define AM33XX_CM_IDLEST_DPLL_MPU_OFFSET 0x0020
++#define AM33XX_CM_IDLEST_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0020)
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET 0x0024
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0024)
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET 0x0028
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0028)
++#define AM33XX_CM_CLKSEL_DPLL_MPU_OFFSET 0x002c
++#define AM33XX_CM_CLKSEL_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x002c)
++#define AM33XX_CM_AUTOIDLE_DPLL_DDR_OFFSET 0x0030
++#define AM33XX_CM_AUTOIDLE_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0030)
++#define AM33XX_CM_IDLEST_DPLL_DDR_OFFSET 0x0034
++#define AM33XX_CM_IDLEST_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0034)
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DDR_OFFSET 0x0038
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0038)
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DDR_OFFSET 0x003c
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x003c)
++#define AM33XX_CM_CLKSEL_DPLL_DDR_OFFSET 0x0040
++#define AM33XX_CM_CLKSEL_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0040)
++#define AM33XX_CM_AUTOIDLE_DPLL_DISP_OFFSET 0x0044
++#define AM33XX_CM_AUTOIDLE_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0044)
++#define AM33XX_CM_IDLEST_DPLL_DISP_OFFSET 0x0048
++#define AM33XX_CM_IDLEST_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0048)
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DISP_OFFSET 0x004c
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x004c)
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DISP_OFFSET 0x0050
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0050)
++#define AM33XX_CM_CLKSEL_DPLL_DISP_OFFSET 0x0054
++#define AM33XX_CM_CLKSEL_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0054)
++#define AM33XX_CM_AUTOIDLE_DPLL_CORE_OFFSET 0x0058
++#define AM33XX_CM_AUTOIDLE_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0058)
++#define AM33XX_CM_IDLEST_DPLL_CORE_OFFSET 0x005c
++#define AM33XX_CM_IDLEST_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x005c)
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET 0x0060
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0060)
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET 0x0064
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0064)
++#define AM33XX_CM_CLKSEL_DPLL_CORE_OFFSET 0x0068
++#define AM33XX_CM_CLKSEL_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0068)
++#define AM33XX_CM_AUTOIDLE_DPLL_PER_OFFSET 0x006c
++#define AM33XX_CM_AUTOIDLE_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x006c)
++#define AM33XX_CM_IDLEST_DPLL_PER_OFFSET 0x0070
++#define AM33XX_CM_IDLEST_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0070)
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET 0x0074
++#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0074)
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x0078
++#define AM33XX_CM_SSC_MODFREQDIV_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0078)
++#define AM33XX_CM_CLKDCOLDO_DPLL_PER_OFFSET 0x007c
++#define AM33XX_CM_CLKDCOLDO_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x007c)
++#define AM33XX_CM_DIV_M4_DPLL_CORE_OFFSET 0x0080
++#define AM33XX_CM_DIV_M4_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0080)
++#define AM33XX_CM_DIV_M5_DPLL_CORE_OFFSET 0x0084
++#define AM33XX_CM_DIV_M5_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0084)
++#define AM33XX_CM_CLKMODE_DPLL_MPU_OFFSET 0x0088
++#define AM33XX_CM_CLKMODE_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0088)
++#define AM33XX_CM_CLKMODE_DPLL_PER_OFFSET 0x008c
++#define AM33XX_CM_CLKMODE_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x008c)
++#define AM33XX_CM_CLKMODE_DPLL_CORE_OFFSET 0x0090
++#define AM33XX_CM_CLKMODE_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0090)
++#define AM33XX_CM_CLKMODE_DPLL_DDR_OFFSET 0x0094
++#define AM33XX_CM_CLKMODE_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0094)
++#define AM33XX_CM_CLKMODE_DPLL_DISP_OFFSET 0x0098
++#define AM33XX_CM_CLKMODE_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0098)
++#define AM33XX_CM_CLKSEL_DPLL_PERIPH_OFFSET 0x009c
++#define AM33XX_CM_CLKSEL_DPLL_PERIPH AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x009c)
++#define AM33XX_CM_DIV_M2_DPLL_DDR_OFFSET 0x00a0
++#define AM33XX_CM_DIV_M2_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a0)
++#define AM33XX_CM_DIV_M2_DPLL_DISP_OFFSET 0x00a4
++#define AM33XX_CM_DIV_M2_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a4)
++#define AM33XX_CM_DIV_M2_DPLL_MPU_OFFSET 0x00a8
++#define AM33XX_CM_DIV_M2_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a8)
++#define AM33XX_CM_DIV_M2_DPLL_PER_OFFSET 0x00ac
++#define AM33XX_CM_DIV_M2_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00ac)
++#define AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET 0x00b0
++#define AM33XX_CM_WKUP_WKUP_M3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b0)
++#define AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET 0x00b4
++#define AM33XX_CM_WKUP_UART0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b4)
++#define AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET 0x00b8
++#define AM33XX_CM_WKUP_I2C0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b8)
++#define AM33XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET 0x00bc
++#define AM33XX_CM_WKUP_ADC_TSC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00bc)
++#define AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET 0x00c0
++#define AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c0)
++#define AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET 0x00c4
++#define AM33XX_CM_WKUP_TIMER1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c4)
++#define AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET 0x00c8
++#define AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c8)
++#define AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET 0x00cc
++#define AM33XX_CM_L4_WKUP_AON_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00cc)
++#define AM33XX_CM_WKUP_WDT0_CLKCTRL_OFFSET 0x00d0
++#define AM33XX_CM_WKUP_WDT0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d0)
++#define AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET 0x00d4
++#define AM33XX_CM_WKUP_WDT1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d4)
++#define AM33XX_CM_DIV_M6_DPLL_CORE_OFFSET 0x00d8
++#define AM33XX_CM_DIV_M6_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d8)
++
++/* CM.DPLL_CM register offsets */
++#define AM33XX_CLKSEL_TIMER7_CLK_OFFSET 0x0004
++#define AM33XX_CLKSEL_TIMER7_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0004)
++#define AM33XX_CLKSEL_TIMER2_CLK_OFFSET 0x0008
++#define AM33XX_CLKSEL_TIMER2_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0008)
++#define AM33XX_CLKSEL_TIMER3_CLK_OFFSET 0x000c
++#define AM33XX_CLKSEL_TIMER3_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x000c)
++#define AM33XX_CLKSEL_TIMER4_CLK_OFFSET 0x0010
++#define AM33XX_CLKSEL_TIMER4_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0010)
++#define AM33XX_CM_MAC_CLKSEL_OFFSET 0x0014
++#define AM33XX_CM_MAC_CLKSEL AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0014)
++#define AM33XX_CLKSEL_TIMER5_CLK_OFFSET 0x0018
++#define AM33XX_CLKSEL_TIMER5_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0018)
++#define AM33XX_CLKSEL_TIMER6_CLK_OFFSET 0x001c
++#define AM33XX_CLKSEL_TIMER6_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x001c)
++#define AM33XX_CM_CPTS_RFT_CLKSEL_OFFSET 0x0020
++#define AM33XX_CM_CPTS_RFT_CLKSEL AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0020)
++#define AM33XX_CLKSEL_TIMER1MS_CLK_OFFSET 0x0028
++#define AM33XX_CLKSEL_TIMER1MS_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0028)
++#define AM33XX_CLKSEL_GFX_FCLK_OFFSET 0x002c
++#define AM33XX_CLKSEL_GFX_FCLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x002c)
++#define AM33XX_CLKSEL_PRUSS_OCP_CLK_OFFSET 0x0030
++#define AM33XX_CLKSEL_PRUSS_OCP_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0030)
++#define AM33XX_CLKSEL_LCDC_PIXEL_CLK_OFFSET 0x0034
++#define AM33XX_CLKSEL_LCDC_PIXEL_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0034)
++#define AM33XX_CLKSEL_WDT1_CLK_OFFSET 0x0038
++#define AM33XX_CLKSEL_WDT1_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0038)
++#define AM33XX_CLKSEL_GPIO0_DBCLK_OFFSET 0x003c
++#define AM33XX_CLKSEL_GPIO0_DBCLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x003c)
++
++/* CM.MPU_CM register offsets */
++#define AM33XX_CM_MPU_CLKSTCTRL_OFFSET 0x0000
++#define AM33XX_CM_MPU_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_MPU_MOD, 0x0000)
++#define AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET 0x0004
++#define AM33XX_CM_MPU_MPU_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_MPU_MOD, 0x0004)
++
++/* CM.DEVICE_CM register offsets */
++#define AM33XX_CM_CLKOUT_CTRL_OFFSET 0x0000
++#define AM33XX_CM_CLKOUT_CTRL AM33XX_CM_REGADDR(AM33XX_CM_DEVICE_MOD, 0x0000)
++
++/* CM.RTC_CM register offsets */
++#define AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET 0x0000
++#define AM33XX_CM_RTC_RTC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_RTC_MOD, 0x0000)
++#define AM33XX_CM_RTC_CLKSTCTRL_OFFSET 0x0004
++#define AM33XX_CM_RTC_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_RTC_MOD, 0x0004)
++
++/* CM.GFX_CM register offsets */
++#define AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET 0x0000
++#define AM33XX_CM_GFX_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0000)
++#define AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET 0x0004
++#define AM33XX_CM_GFX_GFX_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0004)
++#define AM33XX_CM_GFX_BITBLT_CLKCTRL_OFFSET 0x0008
++#define AM33XX_CM_GFX_BITBLT_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0008)
++#define AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET 0x000c
++#define AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1 AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x000c)
++#define AM33XX_CM_GFX_MMUCFG_CLKCTRL_OFFSET 0x0010
++#define AM33XX_CM_GFX_MMUCFG_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0010)
++#define AM33XX_CM_GFX_MMUDATA_CLKCTRL_OFFSET 0x0014
++#define AM33XX_CM_GFX_MMUDATA_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0014)
++
++/* CM.CEFUSE_CM register offsets */
++#define AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET 0x0000
++#define AM33XX_CM_CEFUSE_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0000)
++#define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET 0x0020
++#define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0020)
++
++#endif
+diff --git a/arch/arm/mach-omap2/cm44xx.c b/arch/arm/mach-omap2/cm44xx.c
+index e96f53e..6a83630 100644
+--- a/arch/arm/mach-omap2/cm44xx.c
++++ b/arch/arm/mach-omap2/cm44xx.c
+@@ -18,7 +18,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "cm.h"
+ #include "cm1_44xx.h"
+diff --git a/arch/arm/mach-omap2/cminst33xx.h b/arch/arm/mach-omap2/cminst33xx.h
+new file mode 100644
+index 0000000..881c0af
+--- /dev/null
++++ b/arch/arm/mach-omap2/cminst33xx.h
+@@ -0,0 +1,63 @@
++/*
++ * am33xx Clock Management (CM) function prototypes
++ *
++ * Copyright (C) 2010 Nokia Corporation
++ * Paul Walmsley
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef __ARCH_ASM_MACH_OMAP2_CMINST33XX_H
++#define __ARCH_ASM_MACH_OMAP2_CMINST33XX_H
++
++extern bool am33xx_cminst_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);
++extern void am33xx_cminst_clkdm_enable_hwsup(s16 inst, u16 cdoffs);
++extern void am33xx_cminst_clkdm_disable_hwsup(s16 inst, u16 cdoffs);
++extern void am33xx_cminst_clkdm_force_sleep(s16 inst, u16 cdoffs);
++extern void am33xx_cminst_clkdm_force_wakeup(s16 inst, u16 cdoffs);
++
++extern int am33xx_cminst_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs);
++
++#ifdef CONFIG_SOC_OMAPAM33XX
++extern int am33xx_cminst_wait_module_idle(u16 inst, s16 cdoffs,
++ u16 clkctrl_offs);
++
++extern void am33xx_cminst_module_enable(u8 mode, u16 inst, s16 cdoffs,
++ u16 clkctrl_offs);
++extern void am33xx_cminst_module_disable(u16 inst, s16 cdoffs,
++ u16 clkctrl_offs);
++
++#else
++
++static inline int am33xx_cminst_wait_module_idle(u16 inst, s16 cdoffs,
++ u16 clkctrl_offs)
++{
++ return 0;
++}
++
++static inline void am33xx_cminst_module_enable(u8 mode, u16 inst,
++ s16 cdoffs, u16 clkctrl_offs)
++{
++}
++
++static inline void am33xx_cminst_module_disable(u16 inst, s16 cdoffs,
++ u16 clkctrl_offs)
++{
++}
++
++#endif
++
++/*
++ * In an ideal world, we would not export these low-level functions,
++ * but this will probably take some time to fix properly
++ */
++extern u32 am33xx_cminst_read_inst_reg(s16 inst, u16 idx);
++extern void am33xx_cminst_write_inst_reg(u32 val, s16 inst, u16 idx);
++extern u32 am33xx_cminst_rmw_inst_reg_bits(u32 mask, u32 bits,
++ s16 inst, s16 idx);
++extern u32 am33xx_cminst_set_inst_reg_bits(u32 bits, s16 inst, s16 idx);
++extern u32 am33xx_cminst_clear_inst_reg_bits(u32 bits, s16 inst, s16 idx);
++extern u32 am33xx_cminst_read_inst_reg_bits(u16 inst, s16 idx, u32 mask);
++
++#endif
+diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
+index eb2a472..1ca9e12 100644
+--- a/arch/arm/mach-omap2/cminst44xx.c
++++ b/arch/arm/mach-omap2/cminst44xx.c
+@@ -20,7 +20,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "cm.h"
+ #include "cm1_44xx.h"
+@@ -31,6 +31,7 @@
+ #include "cm-regbits-44xx.h"
+ #include "prcm44xx.h"
+ #include "prm44xx.h"
++#include "prm33xx.h"
+ #include "prcm_mpu44xx.h"
+
+ /*
+@@ -49,13 +50,21 @@
+ #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
+ #define CLKCTRL_IDLEST_DISABLED 0x3
+
+-static u32 _cm_bases[OMAP4_MAX_PRCM_PARTITIONS] = {
++static u32 **_cm_bases;
++static u32 max_cm_partitions;
++
++static u32 *omap44xx_cm_bases[] = {
+ [OMAP4430_INVALID_PRCM_PARTITION] = 0,
+- [OMAP4430_PRM_PARTITION] = OMAP4430_PRM_BASE,
+- [OMAP4430_CM1_PARTITION] = OMAP4430_CM1_BASE,
+- [OMAP4430_CM2_PARTITION] = OMAP4430_CM2_BASE,
++ [OMAP4430_PRM_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE),
++ [OMAP4430_CM1_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_CM1_BASE),
++ [OMAP4430_CM2_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE),
+ [OMAP4430_SCRM_PARTITION] = 0,
+- [OMAP4430_PRCM_MPU_PARTITION] = OMAP4430_PRCM_MPU_BASE,
++ [OMAP4430_PRCM_MPU_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE),
++};
++
++static u32 *am33xx_cm_bases[] = {
++ [OMAP4430_INVALID_PRCM_PARTITION] = 0,
++ [AM33XX_PRM_PARTITION] = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRM_BASE),
+ };
+
+ /* Private functions */
+@@ -103,19 +112,19 @@ static bool _is_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs)
+ /* Read a register in a CM instance */
+ u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx)
+ {
+- BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
++ BUG_ON(part >= max_cm_partitions ||
+ part == OMAP4430_INVALID_PRCM_PARTITION ||
+ !_cm_bases[part]);
+- return __raw_readl(OMAP2_L4_IO_ADDRESS(_cm_bases[part] + inst + idx));
++ return __raw_readl(_cm_bases[part] + ((inst + idx)/sizeof(u32)));
+ }
+
+ /* Write into a register in a CM instance */
+ void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
+ {
+- BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
++ BUG_ON(part >= max_cm_partitions ||
+ part == OMAP4430_INVALID_PRCM_PARTITION ||
+ !_cm_bases[part]);
+- __raw_writel(val, OMAP2_L4_IO_ADDRESS(_cm_bases[part] + inst + idx));
++ __raw_writel(val, _cm_bases[part] + ((inst + idx)/sizeof(u32)));
+ }
+
+ /* Read-modify-write a register in CM1. Caller must lock */
+@@ -349,3 +358,14 @@ void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
+ v &= ~OMAP4430_MODULEMODE_MASK;
+ omap4_cminst_write_inst_reg(v, part, inst, clkctrl_offs);
+ }
++
++void __init omap44xx_cminst_init(void)
++{
++ if (cpu_is_omap44xx()) {
++ _cm_bases = omap44xx_cm_bases;
++ max_cm_partitions = ARRAY_SIZE(omap44xx_cm_bases);
++ } else if (cpu_is_am33xx()) {
++ _cm_bases = am33xx_cm_bases;
++ max_cm_partitions = ARRAY_SIZE(am33xx_cm_bases);
++ }
++}
+diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h
+index a018a73..e37c779 100644
+--- a/arch/arm/mach-omap2/cminst44xx.h
++++ b/arch/arm/mach-omap2/cminst44xx.h
+@@ -19,7 +19,7 @@ extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
+
+ extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
+
+-# ifdef CONFIG_ARCH_OMAP4
++# if defined (CONFIG_ARCH_OMAP4) || defined (CONFIG_SOC_OMAPAM33XX)
+ extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
+ u16 clkctrl_offs);
+
+@@ -63,4 +63,5 @@ extern u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, s16 inst,
+ extern u32 omap4_cminst_read_inst_reg_bits(u8 part, u16 inst, s16 idx,
+ u32 mask);
+
++extern void __init omap44xx_cminst_init(void);
+ #endif
+diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
+index bcb0c58..5293192 100644
+--- a/arch/arm/mach-omap2/common-board-devices.c
++++ b/arch/arm/mach-omap2/common-board-devices.c
+@@ -33,7 +33,6 @@
+ defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
+ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+ .turbo_mode = 0,
+- .single_channel = 1, /* 0: slave, 1: master */
+ };
+
+ static struct ads7846_platform_data ads7846_config = {
+@@ -92,49 +91,3 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
+ {
+ }
+ #endif
+-
+-#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+-static struct omap_nand_platform_data nand_data;
+-
+-void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
+- int nr_parts)
+-{
+- u8 cs = 0;
+- u8 nandcs = GPMC_CS_NUM + 1;
+-
+- /* find out the chip-select on which NAND exists */
+- while (cs < GPMC_CS_NUM) {
+- u32 ret = 0;
+- ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+-
+- if ((ret & 0xC00) == 0x800) {
+- printk(KERN_INFO "Found NAND on CS%d\n", cs);
+- if (nandcs > GPMC_CS_NUM)
+- nandcs = cs;
+- }
+- cs++;
+- }
+-
+- if (nandcs > GPMC_CS_NUM) {
+- printk(KERN_INFO "NAND: Unable to find configuration "
+- "in GPMC\n ");
+- return;
+- }
+-
+- if (nandcs < GPMC_CS_NUM) {
+- nand_data.cs = nandcs;
+- nand_data.parts = parts;
+- nand_data.nr_parts = nr_parts;
+- nand_data.devsize = options;
+-
+- printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
+- if (gpmc_nand_init(&nand_data) < 0)
+- printk(KERN_ERR "Unable to register NAND device\n");
+- }
+-}
+-#else
+-void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
+- int nr_parts)
+-{
+-}
+-#endif
+diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
+index a0b4a428..72bb41b 100644
+--- a/arch/arm/mach-omap2/common-board-devices.h
++++ b/arch/arm/mach-omap2/common-board-devices.h
+@@ -10,6 +10,5 @@ struct ads7846_platform_data;
+
+ void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
+ struct ads7846_platform_data *board_pdata);
+-void omap_nand_flash_init(int opts, struct mtd_partition *parts, int n_parts);
+
+ #endif /* __OMAP_COMMON_BOARD_DEVICES__ */
+diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
+index 110e5b9..aaf4211 100644
+--- a/arch/arm/mach-omap2/common.c
++++ b/arch/arm/mach-omap2/common.c
+@@ -17,7 +17,7 @@
+ #include <linux/clk.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+ #include <plat/mux.h>
+
+@@ -110,23 +110,49 @@ void __init omap3_map_io(void)
+
+ /*
+ * Adjust TAP register base such that omap3_check_revision accesses the correct
+- * TI816X register for checking device ID (it adds 0x204 to tap base while
+- * TI816X DEVICE ID register is at offset 0x600 from control base).
++ * TI81XX register for checking device ID (it adds 0x204 to tap base while
++ * TI81XX DEVICE ID register is at offset 0x600 from control base).
+ */
+-#define TI816X_TAP_BASE (TI816X_CTRL_BASE + \
+- TI816X_CONTROL_DEVICE_ID - 0x204)
++#define TI81XX_TAP_BASE (TI81XX_CTRL_BASE + \
++ TI81XX_CONTROL_DEVICE_ID - 0x204)
+
+-static struct omap_globals ti816x_globals = {
++static struct omap_globals ti81xx_globals = {
+ .class = OMAP343X_CLASS,
+- .tap = OMAP2_L4_IO_ADDRESS(TI816X_TAP_BASE),
+- .ctrl = OMAP2_L4_IO_ADDRESS(TI816X_CTRL_BASE),
+- .prm = OMAP2_L4_IO_ADDRESS(TI816X_PRCM_BASE),
+- .cm = OMAP2_L4_IO_ADDRESS(TI816X_PRCM_BASE),
++ .tap = OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE),
++ .ctrl = OMAP2_L4_IO_ADDRESS(TI81XX_CTRL_BASE),
++ .prm = OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE),
++ .cm = OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE),
+ };
+
+-void __init omap2_set_globals_ti816x(void)
++void __init omap2_set_globals_ti81xx(void)
+ {
+- __omap2_set_globals(&ti816x_globals);
++ __omap2_set_globals(&ti81xx_globals);
++}
++
++void __init ti81xx_map_io(void)
++{
++ omapti81xx_map_common_io();
++}
++
++#define AM33XX_TAP_BASE (AM33XX_CTRL_BASE + \
++ TI81XX_CONTROL_DEVICE_ID - 0x204)
++
++static struct omap_globals am33xx_globals = {
++ .class = AM335X_CLASS,
++ .tap = AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE),
++ .ctrl = AM33XX_L4_WK_IO_ADDRESS(AM33XX_CTRL_BASE),
++ .prm = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE),
++ .cm = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE),
++};
++
++void __init omap2_set_globals_am33xx(void)
++{
++ __omap2_set_globals(&am33xx_globals);
++}
++
++void __init am33xx_map_io(void)
++{
++ omapam33xx_map_common_io();
+ }
+ #endif
+
+diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
+new file mode 100644
+index 0000000..f62fa39
+--- /dev/null
++++ b/arch/arm/mach-omap2/common.h
+@@ -0,0 +1,240 @@
++/*
++ * Header for code common to all OMAP2+ machines.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
++#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
++#ifndef __ASSEMBLER__
++
++#include <linux/delay.h>
++#include <plat/common.h>
++#include <asm/proc-fns.h>
++
++#ifdef CONFIG_SOC_OMAP2420
++extern void omap242x_map_common_io(void);
++#else
++static inline void omap242x_map_common_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_SOC_OMAP2430
++extern void omap243x_map_common_io(void);
++#else
++static inline void omap243x_map_common_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_ARCH_OMAP3
++extern void omap34xx_map_common_io(void);
++#else
++static inline void omap34xx_map_common_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_SOC_OMAPTI81XX
++extern void omapti81xx_map_common_io(void);
++#else
++static inline void omapti81xx_map_common_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_SOC_OMAPAM33XX
++extern void omapam33xx_map_common_io(void);
++#else
++static inline void omapam33xx_map_common_io(void)
++{
++}
++#endif
++
++#ifdef CONFIG_ARCH_OMAP4
++extern void omap44xx_map_common_io(void);
++#else
++static inline void omap44xx_map_common_io(void)
++{
++}
++#endif
++
++extern void omap2_init_common_infrastructure(void);
++
++extern struct sys_timer omap2_timer;
++extern struct sys_timer omap3_timer;
++extern struct sys_timer omap3_secure_timer;
++extern struct sys_timer omap3_am33xx_timer;
++extern struct sys_timer omap4_timer;
++
++void omap2420_init_early(void);
++void omap2430_init_early(void);
++void omap3430_init_early(void);
++void omap35xx_init_early(void);
++void omap3630_init_early(void);
++void omap3_init_early(void); /* Do not use this one */
++void am35xx_init_early(void);
++void ti81xx_init_early(void);
++void am33xx_init_early(void);
++void omap4430_init_early(void);
++
++/*
++ * IO bases for various OMAP processors
++ * Except the tap base, rest all the io bases
++ * listed are physical addresses.
++ */
++struct omap_globals {
++ u32 class; /* OMAP class to detect */
++ void __iomem *tap; /* Control module ID code */
++ void __iomem *sdrc; /* SDRAM Controller */
++ void __iomem *sms; /* SDRAM Memory Scheduler */
++ void __iomem *ctrl; /* System Control Module */
++ void __iomem *ctrl_pad; /* PAD Control Module */
++ void __iomem *prm; /* Power and Reset Management */
++ void __iomem *cm; /* Clock Management */
++ void __iomem *cm2;
++};
++
++void omap2_set_globals_242x(void);
++void omap2_set_globals_243x(void);
++void omap2_set_globals_3xxx(void);
++void omap2_set_globals_443x(void);
++void omap2_set_globals_ti81xx(void);
++void omap2_set_globals_am33xx(void);
++
++/* These get called from omap2_set_globals_xxxx(), do not call these */
++void omap2_set_globals_tap(struct omap_globals *);
++void omap2_set_globals_sdrc(struct omap_globals *);
++void omap2_set_globals_control(struct omap_globals *);
++void omap2_set_globals_prcm(struct omap_globals *);
++
++void omap242x_map_io(void);
++void omap243x_map_io(void);
++void omap3_map_io(void);
++void am33xx_map_io(void);
++void omap4_map_io(void);
++void ti81xx_map_io(void);
++
++/**
++ * omap_test_timeout - busy-loop, testing a condition
++ * @cond: condition to test until it evaluates to true
++ * @timeout: maximum number of microseconds in the timeout
++ * @index: loop index (integer)
++ *
++ * Loop waiting for @cond to become true or until at least @timeout
++ * microseconds have passed. To use, define some integer @index in the
++ * calling code. After running, if @index == @timeout, then the loop has
++ * timed out.
++ */
++#define omap_test_timeout(cond, timeout, index) \
++({ \
++ for (index = 0; index < timeout; index++) { \
++ if (cond) \
++ break; \
++ udelay(1); \
++ } \
++})
++
++extern struct device *omap2_get_mpuss_device(void);
++extern struct device *omap2_get_iva_device(void);
++extern struct device *omap2_get_l3_device(void);
++extern struct device *omap4_get_dsp_device(void);
++
++void omap2_init_irq(void);
++void omap3_init_irq(void);
++void ti81xx_init_irq(void);
++extern int omap_irq_pending(void);
++void omap_intc_save_context(void);
++void omap_intc_restore_context(void);
++void omap3_intc_suspend(void);
++void omap3_intc_prepare_idle(void);
++void omap3_intc_resume_idle(void);
++void omap2_intc_handle_irq(struct pt_regs *regs);
++void omap3_intc_handle_irq(struct pt_regs *regs);
++
++#ifdef CONFIG_CACHE_L2X0
++extern void __iomem *omap4_get_l2cache_base(void);
++#endif
++
++#ifdef CONFIG_SMP
++extern void __iomem *omap4_get_scu_base(void);
++#else
++static inline void __iomem *omap4_get_scu_base(void)
++{
++ return NULL;
++}
++#endif
++
++extern void __init gic_init_irq(void);
++extern void omap_smc1(u32 fn, u32 arg);
++extern void __iomem *omap4_get_sar_ram_base(void);
++extern void omap_do_wfi(void);
++
++#ifdef CONFIG_SMP
++/* Needed for secondary core boot */
++extern void omap_secondary_startup(void);
++extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
++extern void omap_auxcoreboot_addr(u32 cpu_addr);
++extern u32 omap_read_auxcoreboot0(void);
++#endif
++
++#if defined(CONFIG_SMP) && defined(CONFIG_PM)
++extern int omap4_mpuss_init(void);
++extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
++extern int omap4_finish_suspend(unsigned long cpu_state);
++extern void omap4_cpu_resume(void);
++extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
++extern u32 omap4_mpuss_read_prev_context_state(void);
++#else
++static inline int omap4_enter_lowpower(unsigned int cpu,
++ unsigned int power_state)
++{
++ cpu_do_idle();
++ return 0;
++}
++
++static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
++{
++ cpu_do_idle();
++ return 0;
++}
++
++static inline int omap4_mpuss_init(void)
++{
++ return 0;
++}
++
++static inline int omap4_finish_suspend(unsigned long cpu_state)
++{
++ return 0;
++}
++
++static inline void omap4_cpu_resume(void)
++{}
++
++static inline u32 omap4_mpuss_read_prev_context_state(void)
++{
++ return 0;
++}
++#endif
++#endif /* __ASSEMBLER__ */
++#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index e34d27f..f7fc60e 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -13,9 +13,10 @@
+ #undef DEBUG
+
+ #include <linux/kernel.h>
++#include <linux/module.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/sdrc.h>
+
+ #include "cm-regbits-34xx.h"
+@@ -190,6 +191,7 @@ void omap_ctrl_writel(u32 val, u16 offset)
+ {
+ __raw_writel(val, OMAP_CTRL_REGADDR(offset));
+ }
++EXPORT_SYMBOL_GPL(omap_ctrl_writel);
+
+ /*
+ * On OMAP4 control pad are not addressable from control
+diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
+index d4ef75d..4aeefbd 100644
+--- a/arch/arm/mach-omap2/control.h
++++ b/arch/arm/mach-omap2/control.h
+@@ -29,6 +29,8 @@
+ OMAP2_L4_IO_ADDRESS(OMAP243X_CTRL_BASE + (reg))
+ #define OMAP343X_CTRL_REGADDR(reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
++#define AM33XX_CTRL_REGADDR(reg) \
++ AM33XX_L4_WK_IO_ADDRESS(AM33XX_SCM_BASE + (reg))
+ #else
+ #define OMAP242X_CTRL_REGADDR(reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP242X_CTRL_BASE + (reg))
+@@ -36,6 +38,8 @@
+ OMAP2_L4_IO_ADDRESS(OMAP243X_CTRL_BASE + (reg))
+ #define OMAP343X_CTRL_REGADDR(reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
++#define AM33XX_CTRL_REGADDR(reg) \
++ AM33XX_L4_WK_IO_ADDRESS(AM33XX_SCM_BASE + (reg))
+ #endif /* __ASSEMBLY__ */
+
+ /*
+@@ -52,8 +56,14 @@
+ #define OMAP343X_CONTROL_PADCONFS_WKUP 0xa00
+ #define OMAP343X_CONTROL_GENERAL_WKUP 0xa60
+
+-/* TI816X spefic control submodules */
+-#define TI816X_CONTROL_DEVCONF 0x600
++/* TI81XX spefic control submodules */
++#define TI81XX_CONTROL_DEVCONF 0x600
++
++/* TI81XX CONTROL_DEVCONF register offsets */
++#define TI81XX_CONTROL_MAC_ID0_LO (TI81XX_CONTROL_DEVCONF + 0x030)
++#define TI81XX_CONTROL_MAC_ID0_HI (TI81XX_CONTROL_DEVCONF + 0x034)
++#define TI81XX_CONTROL_MAC_ID1_LO (TI81XX_CONTROL_DEVCONF + 0x038)
++#define TI81XX_CONTROL_MAC_ID1_HI (TI81XX_CONTROL_DEVCONF + 0x03c)
+
+ /* Control register offsets - read/write with omap_ctrl_{read,write}{bwl}() */
+
+@@ -244,8 +254,8 @@
+ #define OMAP3_PADCONF_SAD2D_MSTANDBY 0x250
+ #define OMAP3_PADCONF_SAD2D_IDLEACK 0x254
+
+-/* TI816X CONTROL_DEVCONF register offsets */
+-#define TI816X_CONTROL_DEVICE_ID (TI816X_CONTROL_DEVCONF + 0x000)
++/* TI81XX CONTROL_DEVCONF register offsets */
++#define TI81XX_CONTROL_DEVICE_ID (TI81XX_CONTROL_DEVCONF + 0x000)
+
+ /*
+ * REVISIT: This list of registers is not comprehensive - there are more
+@@ -338,6 +348,35 @@
+ #define AM35XX_HECC_SW_RST BIT(3)
+ #define AM35XX_VPFE_PCLK_SW_RST BIT(4)
+
++/* AM33XX CONTROL_STATUS bits */
++#define AM33XX_SYSBOOT0 (0xff << 0)
++#define AM33XX_DEVTYPE (1 << 8)
++#define AM33XX_GPMC_CS0_BW (1 << 16)
++#define AM33XX_GPMC_CS0_WAITEN (1 << 17)
++#define AM33XX_GPMC_CS0_ADMUX (0x3 << 18)
++#define AM33XX_SYSBOOT1 (0x3 << 22)
++
++/*
++ * CONTROL AM33XX STATUS register to identify boot-time configurations
++ */
++#define AM33XX_CONTROL_STATUS_OFF 0x040
++#define AM33XX_CONTROL_STATUS AM33XX_L4_WK_IO_ADDRESS(AM33XX_CTRL_BASE + \
++ AM33XX_CONTROL_STATUS_OFF)
++#define AM33XX_DEV_FEATURE 0x604
++#define AM33XX_SGX_SHIFT 29
++#define AM33XX_SGX_MASK (1 << AM33XX_SGX_SHIFT)
++
++/*
++ * CONTROL AM33XX PWMSS_CTRL register to enable time base clock Enable
++ */
++
++#define AM33XX_CONTROL_PWMSS_CTRL_OFS 0x664
++#define AM33XX_PWMSS0_TBCLKEN 0x0
++#define AM33XX_PWMSS1_TBCLKEN 0x1
++#define AM33XX_PWMSS2_TBCLKEN 0x2
++#define AM33XX_CONTROL_PWMSS_CTRL AM33XX_L4_WK_IO_ADDRESS( \
++ AM33XX_CTRL_BASE + AM33XX_CONTROL_PWMSS_CTRL_OFS)
++
+ /*
+ * CONTROL OMAP STATUS register to identify OMAP3 features
+ */
+diff --git a/arch/arm/mach-omap2/cpuidle33xx.c b/arch/arm/mach-omap2/cpuidle33xx.c
+new file mode 100644
+index 0000000..7e14de4
+--- /dev/null
++++ b/arch/arm/mach-omap2/cpuidle33xx.c
+@@ -0,0 +1,179 @@
++/*
++ * CPU idle for AM33XX SoCs
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated. http://www.ti.com/
++ *
++ * Derived from Davinci CPU idle code
++ * (arch/arm/mach-davinci/cpuidle.c)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/cpuidle.h>
++#include <linux/sched.h>
++#include <asm/proc-fns.h>
++
++#include <plat/emif.h>
++
++#include "cpuidle33xx.h"
++
++#define AM33XX_CPUIDLE_MAX_STATES 2
++
++struct am33xx_ops {
++ void (*enter) (u32 flags);
++ void (*exit) (u32 flags);
++ u32 flags;
++};
++
++/* fields in am33xx_ops.flags */
++#define AM33XX_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
++
++static struct cpuidle_driver am33xx_idle_driver = {
++ .name = "cpuidle-am33xx",
++ .owner = THIS_MODULE,
++};
++
++static DEFINE_PER_CPU(struct cpuidle_device, am33xx_cpuidle_device);
++static void __iomem *emif_base;
++
++static void am33xx_save_ddr_power(int enter, bool pdown)
++{
++ u32 val;
++
++ val = __raw_readl(emif_base + EMIF4_0_SDRAM_MGMT_CTRL);
++
++ /* TODO: Choose the mode based on memory type */
++ if (enter)
++ val = SELF_REFRESH_ENABLE(64);
++ else
++ val = SELF_REFRESH_DISABLE;
++
++ __raw_writel(val, emif_base + EMIF4_0_SDRAM_MGMT_CTRL);
++}
++
++static void am33xx_c2state_enter(u32 flags)
++{
++ am33xx_save_ddr_power(1, !!(flags & AM33XX_CPUIDLE_FLAGS_DDR2_PWDN));
++}
++
++static void am33xx_c2state_exit(u32 flags)
++{
++ am33xx_save_ddr_power(0, !!(flags & AM33XX_CPUIDLE_FLAGS_DDR2_PWDN));
++}
++
++static struct am33xx_ops am33xx_states[AM33XX_CPUIDLE_MAX_STATES] = {
++ [1] = {
++ .enter = am33xx_c2state_enter,
++ .exit = am33xx_c2state_exit,
++ },
++};
++
++/* Actual code that puts the SoC in different idle states */
++static int am33xx_enter_idle(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv, int index)
++{
++ struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
++ struct am33xx_ops *ops = cpuidle_get_statedata(state_usage);
++ struct timeval before, after;
++ int idle_time;
++
++ local_irq_disable();
++ do_gettimeofday(&before);
++
++ if (ops && ops->enter)
++ ops->enter(ops->flags);
++
++ /* Wait for interrupt state */
++ cpu_do_idle();
++ if (ops && ops->exit)
++ ops->exit(ops->flags);
++
++ do_gettimeofday(&after);
++ local_irq_enable();
++ idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
++ (after.tv_usec - before.tv_usec);
++
++ dev->last_residency = idle_time;
++
++ return index;
++}
++
++static int __init am33xx_cpuidle_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct cpuidle_device *device;
++ struct cpuidle_driver *driver = &am33xx_idle_driver;
++ struct am33xx_cpuidle_config *pdata = pdev->dev.platform_data;
++
++ device = &per_cpu(am33xx_cpuidle_device, smp_processor_id());
++
++ if (!pdata) {
++ dev_err(&pdev->dev, "cannot get platform data\n");
++ return -ENOENT;
++ }
++
++ emif_base = pdata->emif_base;
++
++ /* Wait for interrupt state */
++ driver->states[0].enter = am33xx_enter_idle;
++ driver->states[0].exit_latency = 1;
++ driver->states[0].target_residency = 10000;
++ driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
++ strcpy(driver->states[0].name, "WFI");
++ strcpy(driver->states[0].desc, "Wait for interrupt");
++
++ /* Wait for interrupt and DDR self refresh state */
++ driver->states[1].enter = am33xx_enter_idle;
++ driver->states[1].exit_latency = 100;
++ driver->states[1].target_residency = 10000;
++ driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
++ strcpy(driver->states[1].name, "DDR SR");
++ strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
++ if (pdata->ddr2_pdown)
++ am33xx_states[1].flags |= AM33XX_CPUIDLE_FLAGS_DDR2_PWDN;
++ cpuidle_set_statedata(&device->states_usage[1], &am33xx_states[1]);
++
++ device->state_count = AM33XX_CPUIDLE_MAX_STATES;
++ driver->state_count = AM33XX_CPUIDLE_MAX_STATES;
++
++ ret = cpuidle_register_driver(&am33xx_idle_driver);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register driver\n");
++ return ret;
++ }
++
++ ret = cpuidle_register_device(device);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register device\n");
++ cpuidle_unregister_driver(&am33xx_idle_driver);
++ return ret;
++ }
++
++ return 0;
++}
++
++static struct platform_driver am33xx_cpuidle_driver = {
++ .driver = {
++ .name = "cpuidle-am33xx",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init am33xx_cpuidle_init(void)
++{
++ return platform_driver_probe(&am33xx_cpuidle_driver,
++ am33xx_cpuidle_probe);
++}
++device_initcall(am33xx_cpuidle_init);
+diff --git a/arch/arm/mach-omap2/cpuidle33xx.h b/arch/arm/mach-omap2/cpuidle33xx.h
+new file mode 100644
+index 0000000..c092fba
+--- /dev/null
++++ b/arch/arm/mach-omap2/cpuidle33xx.h
+@@ -0,0 +1,24 @@
++/*
++ * TI AM33XX cpuidle platform support
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _AM33XX_CPUIDLE_H
++#define _AM33XX_CPUIDLE_H
++
++struct am33xx_cpuidle_config {
++ u32 ddr2_pdown;
++ void __iomem *emif_base;
++};
++
++#endif
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index 942bb4f..464cffd 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -25,15 +25,16 @@
+ #include <linux/sched.h>
+ #include <linux/cpuidle.h>
+ #include <linux/export.h>
++#include <linux/cpu_pm.h>
+
+ #include <plat/prcm.h>
+ #include <plat/irqs.h>
+ #include "powerdomain.h"
+ #include "clockdomain.h"
+-#include <plat/serial.h>
+
+ #include "pm.h"
+ #include "control.h"
++#include "common.h"
+
+ #ifdef CONFIG_CPU_IDLE
+
+@@ -123,9 +124,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
+ pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
+ }
+
++ /*
++ * Call idle CPU PM enter notifier chain so that
++ * VFP context is saved.
++ */
++ if (mpu_state == PWRDM_POWER_OFF)
++ cpu_pm_enter();
++
+ /* Execute ARM wfi */
+ omap_sram_idle();
+
++ /*
++ * Call idle CPU PM enter notifier chain to restore
++ * VFP context.
++ */
++ if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
++ cpu_pm_exit();
++
+ /* Re-allow idle for C1 */
+ if (index == 0) {
+ pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
+@@ -244,11 +259,6 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
+ struct omap3_idle_statedata *cx;
+ int ret;
+
+- if (!omap3_can_sleep()) {
+- new_state_idx = drv->safe_state_index;
+- goto select_state;
+- }
+-
+ /*
+ * Prevent idle completely if CAM is active.
+ * CAM does not have wakeup capability in OMAP3.
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+new file mode 100644
+index 0000000..cfdbb86
+--- /dev/null
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -0,0 +1,245 @@
++/*
++ * OMAP4 CPU idle Routines
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ * Rajendra Nayak <rnayak@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/sched.h>
++#include <linux/cpuidle.h>
++#include <linux/cpu_pm.h>
++#include <linux/export.h>
++#include <linux/clockchips.h>
++
++#include <asm/proc-fns.h>
++
++#include "common.h"
++#include "pm.h"
++#include "prm.h"
++
++#ifdef CONFIG_CPU_IDLE
++
++/* Machine specific information to be recorded in the C-state driver_data */
++struct omap4_idle_statedata {
++ u32 cpu_state;
++ u32 mpu_logic_state;
++ u32 mpu_state;
++ u8 valid;
++};
++
++static struct cpuidle_params cpuidle_params_table[] = {
++ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
++ {.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1},
++ /* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */
++ {.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1},
++ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
++ {.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1},
++};
++
++#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
++
++struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
++static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
++
++/**
++ * omap4_enter_idle - Programs OMAP4 to enter the specified state
++ * @dev: cpuidle device
++ * @drv: cpuidle driver
++ * @index: the index of state to be entered
++ *
++ * Called from the CPUidle framework to program the device to the
++ * specified low power state selected by the governor.
++ * Returns the amount of time spent in the low power state.
++ */
++static int omap4_enter_idle(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv,
++ int index)
++{
++ struct omap4_idle_statedata *cx =
++ cpuidle_get_statedata(&dev->states_usage[index]);
++ struct timespec ts_preidle, ts_postidle, ts_idle;
++ u32 cpu1_state;
++ int idle_time;
++ int new_state_idx;
++ int cpu_id = smp_processor_id();
++
++ /* Used to keep track of the total time in idle */
++ getnstimeofday(&ts_preidle);
++
++ local_irq_disable();
++ local_fiq_disable();
++
++ /*
++ * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
++ * This is necessary to honour hardware recommondation
++ * of triggeing all the possible low power modes once CPU1 is
++ * out of coherency and in OFF mode.
++ * Update dev->last_state so that governor stats reflects right
++ * data.
++ */
++ cpu1_state = pwrdm_read_pwrst(cpu1_pd);
++ if (cpu1_state != PWRDM_POWER_OFF) {
++ new_state_idx = drv->safe_state_index;
++ cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
++ }
++
++ if (index > 0)
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
++
++ /*
++ * Call idle CPU PM enter notifier chain so that
++ * VFP and per CPU interrupt context is saved.
++ */
++ if (cx->cpu_state == PWRDM_POWER_OFF)
++ cpu_pm_enter();
++
++ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
++ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
++
++ /*
++ * Call idle CPU cluster PM enter notifier chain
++ * to save GIC and wakeupgen context.
++ */
++ if ((cx->mpu_state == PWRDM_POWER_RET) &&
++ (cx->mpu_logic_state == PWRDM_POWER_OFF))
++ cpu_cluster_pm_enter();
++
++ omap4_enter_lowpower(dev->cpu, cx->cpu_state);
++
++ /*
++ * Call idle CPU PM exit notifier chain to restore
++ * VFP and per CPU IRQ context. Only CPU0 state is
++ * considered since CPU1 is managed by CPU hotplug.
++ */
++ if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
++ cpu_pm_exit();
++
++ /*
++ * Call idle CPU cluster PM exit notifier chain
++ * to restore GIC and wakeupgen context.
++ */
++ if (omap4_mpuss_read_prev_context_state())
++ cpu_cluster_pm_exit();
++
++ if (index > 0)
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
++
++ getnstimeofday(&ts_postidle);
++ ts_idle = timespec_sub(ts_postidle, ts_preidle);
++
++ local_irq_enable();
++ local_fiq_enable();
++
++ idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
++ USEC_PER_SEC;
++
++ /* Update cpuidle counters */
++ dev->last_residency = idle_time;
++
++ return index;
++}
++
++DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
++
++struct cpuidle_driver omap4_idle_driver = {
++ .name = "omap4_idle",
++ .owner = THIS_MODULE,
++};
++
++static inline void _fill_cstate(struct cpuidle_driver *drv,
++ int idx, const char *descr)
++{
++ struct cpuidle_state *state = &drv->states[idx];
++
++ state->exit_latency = cpuidle_params_table[idx].exit_latency;
++ state->target_residency = cpuidle_params_table[idx].target_residency;
++ state->flags = CPUIDLE_FLAG_TIME_VALID;
++ state->enter = omap4_enter_idle;
++ sprintf(state->name, "C%d", idx + 1);
++ strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
++}
++
++static inline struct omap4_idle_statedata *_fill_cstate_usage(
++ struct cpuidle_device *dev,
++ int idx)
++{
++ struct omap4_idle_statedata *cx = &omap4_idle_data[idx];
++ struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
++
++ cx->valid = cpuidle_params_table[idx].valid;
++ cpuidle_set_statedata(state_usage, cx);
++
++ return cx;
++}
++
++
++
++/**
++ * omap4_idle_init - Init routine for OMAP4 idle
++ *
++ * Registers the OMAP4 specific cpuidle driver to the cpuidle
++ * framework with the valid set of states.
++ */
++int __init omap4_idle_init(void)
++{
++ struct omap4_idle_statedata *cx;
++ struct cpuidle_device *dev;
++ struct cpuidle_driver *drv = &omap4_idle_driver;
++ unsigned int cpu_id = 0;
++
++ mpu_pd = pwrdm_lookup("mpu_pwrdm");
++ cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
++ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
++ if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
++ return -ENODEV;
++
++
++ drv->safe_state_index = -1;
++ dev = &per_cpu(omap4_idle_dev, cpu_id);
++ dev->cpu = cpu_id;
++
++ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
++ _fill_cstate(drv, 0, "MPUSS ON");
++ drv->safe_state_index = 0;
++ cx = _fill_cstate_usage(dev, 0);
++ cx->valid = 1; /* C1 is always valid */
++ cx->cpu_state = PWRDM_POWER_ON;
++ cx->mpu_state = PWRDM_POWER_ON;
++ cx->mpu_logic_state = PWRDM_POWER_RET;
++
++ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
++ _fill_cstate(drv, 1, "MPUSS CSWR");
++ cx = _fill_cstate_usage(dev, 1);
++ cx->cpu_state = PWRDM_POWER_OFF;
++ cx->mpu_state = PWRDM_POWER_RET;
++ cx->mpu_logic_state = PWRDM_POWER_RET;
++
++ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
++ _fill_cstate(drv, 2, "MPUSS OSWR");
++ cx = _fill_cstate_usage(dev, 2);
++ cx->cpu_state = PWRDM_POWER_OFF;
++ cx->mpu_state = PWRDM_POWER_RET;
++ cx->mpu_logic_state = PWRDM_POWER_OFF;
++
++ drv->state_count = OMAP4_NUM_STATES;
++ cpuidle_register_driver(&omap4_idle_driver);
++
++ dev->state_count = OMAP4_NUM_STATES;
++ if (cpuidle_register_device(dev)) {
++ pr_err("%s: CPUidle register device failed\n", __func__);
++ return -EIO;
++ }
++
++ return 0;
++}
++#else
++int __init omap4_idle_init(void)
++{
++ return 0;
++}
++#endif /* CONFIG_CPU_IDLE */
+diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
+index c15cfad..9e029da 100644
+--- a/arch/arm/mach-omap2/devices.c
++++ b/arch/arm/mach-omap2/devices.c
+@@ -17,13 +17,28 @@
+ #include <linux/err.h>
+ #include <linux/slab.h>
+ #include <linux/of.h>
++#include <linux/davinci_emac.h>
++#include <linux/cpsw.h>
++#include <linux/etherdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/can/platform/d_can.h>
++#include <linux/platform_data/uio_pruss.h>
++#include <linux/pwm/pwm.h>
++#include <linux/input/ti_tscadc.h>
+
+ #include <mach/hardware.h>
+ #include <mach/irqs.h>
++#include <mach/board-am335xevm.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/map.h>
+ #include <asm/pmu.h>
+
++#ifdef CONFIG_OMAP3_EDMA
++#include <mach/edma.h>
++#endif
++
++#include <asm/hardware/asp.h>
++
+ #include <plat/tc.h>
+ #include <plat/board.h>
+ #include <plat/mcbsp.h>
+@@ -32,6 +47,12 @@
+ #include <plat/omap_hwmod.h>
+ #include <plat/omap_device.h>
+ #include <plat/omap4-keypad.h>
++#include <plat/config_pwm.h>
++#include <plat/cpu.h>
++#include <plat/gpmc.h>
++
++/* LCD controller similar DA8xx */
++#include <video/da8xx-fb.h>
+
+ #include "mux.h"
+ #include "control.h"
+@@ -51,7 +72,7 @@ static int __init omap3_l3_init(void)
+ * To avoid code running on other OMAPs in
+ * multi-omap builds
+ */
+- if (!(cpu_is_omap34xx()))
++ if (!(cpu_is_omap34xx()) || (cpu_is_am33xx()))
+ return -ENODEV;
+
+ l = snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main");
+@@ -127,6 +148,99 @@ static struct platform_device omap2cam_device = {
+ };
+ #endif
+
++int __init am33xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
++{
++ int id = 0;
++ struct platform_device *pdev;
++ struct omap_hwmod *oh;
++ char *oh_name = "lcdc";
++ char *dev_name = "da8xx_lcdc";
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("Could not look up LCD%d hwmod\n", id);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(dev_name, id, oh, pdata,
++ sizeof(struct da8xx_lcdc_platform_data), NULL, 0, 0);
++ if (IS_ERR(pdev)) {
++ WARN(1, "Can't build omap_device for %s:%s.\n",
++ dev_name, oh->name);
++ return PTR_ERR(pdev);
++ }
++ return 0;
++}
++
++int __init am33xx_register_tsc(struct tsc_data *pdata)
++{
++ int id = -1;
++ struct platform_device *pdev;
++ struct omap_hwmod *oh;
++ char *oh_name = "adc_tsc";
++ char *dev_name = "tsc";
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("Could not look up TSC%d hwmod\n", id);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(dev_name, id, oh, pdata,
++ sizeof(struct tsc_data), NULL, 0, 0);
++
++ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
++ dev_name, oh->name);
++ return 0;
++}
++
++#if defined(CONFIG_SND_AM335X_SOC_EVM) || \
++ defined(CONFIG_SND_AM335X_SOC_EVM_MODULE)
++int __init am335x_register_mcasp(struct snd_platform_data *pdata, int ctrl_nr)
++{
++ int l;
++ struct omap_hwmod *oh;
++ struct platform_device *pdev;
++ char oh_name[12];
++ char *dev_name = "davinci-mcasp";
++
++ l = snprintf(oh_name, 12, "mcasp%d", ctrl_nr);
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("could not look up %s\n", oh_name);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(dev_name, ctrl_nr, oh, pdata,
++ sizeof(struct snd_platform_data), NULL, 0, 0);
++ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
++ dev_name, oh->name);
++ return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
++}
++
++#else
++int __init am335x_register_mcasp(struct snd_platform_data *pdata, int ctrl_nr)
++{
++ return 0;
++}
++#endif
++
++#if (defined(CONFIG_SND_AM33XX_SOC) || (defined(CONFIG_SND_AM33XX_SOC_MODULE)))
++struct platform_device am33xx_pcm_device = {
++ .name = "davinci-pcm-audio",
++ .id = -1,
++};
++
++static void am33xx_init_pcm(void)
++{
++ platform_device_register(&am33xx_pcm_device);
++}
++
++#else
++static inline void am33xx_init_pcm(void) {}
++#endif
++
+ static struct resource omap3isp_resources[] = {
+ {
+ .start = OMAP3430_ISP_BASE,
+@@ -299,6 +413,9 @@ OMAP_MCBSP_PLATFORM_DEVICE(5);
+
+ static void omap_init_audio(void)
+ {
++ if (cpu_is_am33xx())
++ return;
++
+ platform_device_register(&omap_mcbsp1);
+ platform_device_register(&omap_mcbsp2);
+ if (cpu_is_omap243x() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+@@ -336,6 +453,27 @@ static void omap_init_mcpdm(void)
+ static inline void omap_init_mcpdm(void) {}
+ #endif
+
++#if defined(CONFIG_SND_OMAP_SOC_DMIC) || \
++ defined(CONFIG_SND_OMAP_SOC_DMIC_MODULE)
++
++static void omap_init_dmic(void)
++{
++ struct omap_hwmod *oh;
++ struct platform_device *pdev;
++
++ oh = omap_hwmod_lookup("dmic");
++ if (!oh) {
++ printk(KERN_ERR "Could not look up mcpdm hw_mod\n");
++ return;
++ }
++
++ pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0, NULL, 0, 0);
++ WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n");
++}
++#else
++static inline void omap_init_dmic(void) {}
++#endif
++
+ #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
+
+ #include <plat/mcspi.h>
+@@ -386,6 +524,92 @@ static void omap_init_mcspi(void)
+ static inline void omap_init_mcspi(void) {}
+ #endif
+
++int __init omap_init_elm(void)
++{
++ int id = -1;
++ struct platform_device *pdev;
++ struct omap_hwmod *oh;
++ char *oh_name = "elm";
++ char *name = "omap2_elm";
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("Could not look up %s\n", oh_name);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(name, id, oh, NULL, 0, NULL, 0, 0);
++
++ if (IS_ERR(pdev)) {
++ WARN(1, "Can't build omap_device for %s:%s.\n",
++ name, oh->name);
++ return PTR_ERR(pdev);
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_SOC_OMAPAM33XX
++#define PWM_STR_LEN 10
++int __init am33xx_register_ecap(int id, struct pwmss_platform_data *pdata)
++{
++ struct platform_device *pdev;
++ struct omap_hwmod *oh;
++ char *oh_name = "ecap";
++ char dev_name[PWM_STR_LEN];
++
++ sprintf(dev_name, "ecap.%d", id);
++
++ oh = omap_hwmod_lookup(dev_name);
++ if (!oh) {
++ pr_err("Could not look up %s hwmod\n", dev_name);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(oh_name, id, oh, pdata,
++ sizeof(*pdata), NULL, 0, 0);
++
++ if (IS_ERR(pdev)) {
++ WARN(1, "Can't build omap_device for %s:%s.\n",
++ dev_name, oh->name);
++ return PTR_ERR(pdev);
++ }
++ return 0;
++}
++
++int __init am33xx_register_ehrpwm(int id, struct pwmss_platform_data *pdata)
++{
++ struct platform_device *pdev;
++ struct omap_hwmod *oh;
++ char *oh_name = "ehrpwm";
++ char dev_name[PWM_STR_LEN];
++
++ sprintf(dev_name, "ehrpwm.%d", id);
++
++ oh = omap_hwmod_lookup(dev_name);
++ if (!oh) {
++ pr_err("Could not look up %s hwmod\n", dev_name);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(oh_name, id, oh, pdata,
++ sizeof(*pdata), NULL, 0, 0);
++
++ if (IS_ERR(pdev)) {
++ WARN(1, "Can't build omap_device for %s:%s.\n",
++ dev_name, oh->name);
++ return PTR_ERR(pdev);
++ }
++ return 0;
++}
++
++#else
++static int __init am335x_register_ehrpwm(int id,
++ struct pwmss_platform_data *pdata) { }
++static int __init am335x_register_ecap(int id,
++ struct pwmss_platform_data *pdata) { }
++#endif
++
+ static struct resource omap2_pmu_resource = {
+ .start = 3,
+ .end = 3,
+@@ -408,7 +632,7 @@ static void omap_init_pmu(void)
+ {
+ if (cpu_is_omap24xx())
+ omap_pmu_device.resource = &omap2_pmu_resource;
+- else if (cpu_is_omap34xx())
++ else if (cpu_is_omap34xx() && !cpu_is_am33xx())
+ omap_pmu_device.resource = &omap3_pmu_resource;
+ else
+ return;
+@@ -469,7 +693,7 @@ static void omap_init_sham(void)
+ if (cpu_is_omap24xx()) {
+ sham_device.resource = omap2_sham_resources;
+ sham_device.num_resources = omap2_sham_resources_sz;
+- } else if (cpu_is_omap34xx()) {
++ } else if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ sham_device.resource = omap3_sham_resources;
+ sham_device.num_resources = omap3_sham_resources_sz;
+ } else {
+@@ -538,7 +762,7 @@ static void omap_init_aes(void)
+ if (cpu_is_omap24xx()) {
+ aes_device.resource = omap2_aes_resources;
+ aes_device.num_resources = omap2_aes_resources_sz;
+- } else if (cpu_is_omap34xx()) {
++ } else if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ aes_device.resource = omap3_aes_resources;
+ aes_device.num_resources = omap3_aes_resources_sz;
+ } else {
+@@ -671,6 +895,268 @@ static void omap_init_vout(void)
+ static inline void omap_init_vout(void) {}
+ #endif
+
++#if defined(CONFIG_SOC_OMAPAM33XX) && defined(CONFIG_OMAP3_EDMA)
++
++#define AM33XX_SCM_BASE_EDMA 0x00000f90
++
++static const s16 am33xx_dma_rsv_chans[][2] = {
++ /* (offset, number) */
++ {0, 2},
++ {14, 2},
++ {26, 6},
++ {48, 4},
++ {56, 8},
++ {-1, -1}
++};
++
++static const s16 am33xx_dma_rsv_slots[][2] = {
++ /* (offset, number) */
++ {0, 2},
++ {14, 2},
++ {26, 6},
++ {48, 4},
++ {56, 8},
++ {64, 127},
++ {-1, -1}
++};
++
++/* Three Transfer Controllers on AM33XX */
++static const s8 am33xx_queue_tc_mapping[][2] = {
++ /* {event queue no, TC no} */
++ {0, 0},
++ {1, 1},
++ {2, 2},
++ {-1, -1}
++};
++
++static const s8 am33xx_queue_priority_mapping[][2] = {
++ /* {event queue no, Priority} */
++ {0, 0},
++ {1, 1},
++ {2, 2},
++ {-1, -1}
++};
++
++static struct event_to_channel_map am33xx_xbar_event_mapping[] = {
++ /* {xbar event no, Channel} */
++ {1, 12}, /* SDTXEVT1 -> MMCHS2 */
++ {2, 13}, /* SDRXEVT1 -> MMCHS2 */
++ {3, -1},
++ {4, -1},
++ {5, -1},
++ {6, -1},
++ {7, -1},
++ {8, -1},
++ {9, -1},
++ {10, -1},
++ {11, -1},
++ {12, -1},
++ {13, -1},
++ {14, -1},
++ {15, -1},
++ {16, -1},
++ {17, -1},
++ {18, -1},
++ {19, -1},
++ {20, -1},
++ {21, -1},
++ {22, -1},
++ {23, -1},
++ {24, -1},
++ {25, -1},
++ {26, -1},
++ {27, -1},
++ {28, -1},
++ {29, -1},
++ {30, -1},
++ {31, -1},
++ {-1, -1}
++};
++
++/**
++ * map_xbar_event_to_channel - maps a crossbar event to a DMA channel
++ * according to the configuration provided
++ * @event: the event number for which mapping is required
++ * @channel: channel being activated
++ * @xbar_event_mapping: array that has the event to channel map
++ *
++ * Events that are routed by default are not mapped. Only events that
++ * are crossbar mapped are routed to available channels according to
++ * the configuration provided
++ *
++ * Returns zero on success, else negative errno.
++ */
++int map_xbar_event_to_channel(unsigned int event, unsigned int *channel,
++ struct event_to_channel_map *xbar_event_mapping)
++{
++ unsigned int ctrl = 0;
++ unsigned int xbar_evt_no = 0;
++ unsigned int val = 0;
++ unsigned int offset = 0;
++ unsigned int mask = 0;
++
++ ctrl = EDMA_CTLR(event);
++ xbar_evt_no = event - (edma_cc[ctrl]->num_channels);
++
++ if (event < edma_cc[ctrl]->num_channels) {
++ *channel = event;
++ } else if (event < edma_cc[ctrl]->num_events) {
++ *channel = xbar_event_mapping[xbar_evt_no].channel_no;
++ /* confirm the range */
++ if (*channel < EDMA_MAX_DMACH)
++ clear_bit(*channel, edma_cc[ctrl]->edma_unused);
++ mask = (*channel)%4;
++ offset = (*channel)/4;
++ offset *= 4;
++ offset += mask;
++ val = (unsigned int)__raw_readl(AM33XX_CTRL_REGADDR(
++ AM33XX_SCM_BASE_EDMA + offset));
++ val = val & (~(0xFF));
++ val = val | (xbar_event_mapping[xbar_evt_no].xbar_event_no);
++ __raw_writel(val,
++ AM33XX_CTRL_REGADDR(AM33XX_SCM_BASE_EDMA + offset));
++ return 0;
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static struct edma_soc_info am33xx_edma_info[] = {
++ {
++ .n_channel = 64,
++ .n_region = 4,
++ .n_slot = 256,
++ .n_tc = 3,
++ .n_cc = 1,
++ .rsv_chans = am33xx_dma_rsv_chans,
++ .rsv_slots = am33xx_dma_rsv_slots,
++ .queue_tc_mapping = am33xx_queue_tc_mapping,
++ .queue_priority_mapping = am33xx_queue_priority_mapping,
++ .is_xbar = 1,
++ .n_events = 95,
++ .xbar_event_mapping = am33xx_xbar_event_mapping,
++ .map_xbar_channel = map_xbar_event_to_channel,
++ },
++};
++
++static int __init am33xx_register_edma(void)
++{
++ int i, l;
++ struct omap_hwmod *oh[4];
++ struct platform_device *pdev;
++ struct edma_soc_info *pdata = am33xx_edma_info;
++ char oh_name[8];
++
++ if (!cpu_is_am33xx())
++ return -ENODEV;
++
++ oh[0] = omap_hwmod_lookup("tpcc");
++ if (!oh[0]) {
++ pr_err("could not look up %s\n", "tpcc");
++ return -ENODEV;
++ }
++
++ for (i = 0; i < 3; i++) {
++ l = snprintf(oh_name, 8, "tptc%d", i);
++
++ oh[i+1] = omap_hwmod_lookup(oh_name);
++ if (!oh[i+1]) {
++ pr_err("could not look up %s\n", oh_name);
++ return -ENODEV;
++ }
++ }
++
++ pdev = omap_device_build_ss("edma", 0, oh, 4, pdata, sizeof(*pdata),
++ NULL, 0, 0);
++
++ WARN(IS_ERR(pdev), "could not build omap_device for edma\n");
++
++ return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
++
++}
++
++#else
++static inline void am33xx_register_edma(void) {}
++#endif
++
++#if defined (CONFIG_SOC_OMAPAM33XX)
++struct uio_pruss_pdata am335x_pruss_uio_pdata = {
++ .pintc_base = 0x20000,
++};
++
++static struct resource am335x_pruss_resources[] = {
++ {
++ .start = AM33XX_ICSS_BASE,
++ .end = AM33XX_ICSS_BASE + AM33XX_ICSS_LEN,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_0,
++ .end = AM33XX_IRQ_ICSS0_0,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_1,
++ .end = AM33XX_IRQ_ICSS0_1,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_2,
++ .end = AM33XX_IRQ_ICSS0_2,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_3,
++ .end = AM33XX_IRQ_ICSS0_3,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_4,
++ .end = AM33XX_IRQ_ICSS0_4,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_5,
++ .end = AM33XX_IRQ_ICSS0_5,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_6,
++ .end = AM33XX_IRQ_ICSS0_6,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_ICSS0_7,
++ .end = AM33XX_IRQ_ICSS0_7,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device am335x_pruss_uio_dev = {
++ .name = "pruss_uio",
++ .id = -1,
++ .num_resources = ARRAY_SIZE(am335x_pruss_resources),
++ .resource = am335x_pruss_resources,
++ .dev = {
++ .coherent_dma_mask = 0xffffffff,
++ }
++};
++
++int __init am335x_register_pruss_uio(struct uio_pruss_pdata *config)
++{
++ am335x_pruss_uio_dev.dev.platform_data = config;
++ return platform_device_register(&am335x_pruss_uio_dev);
++}
++
++static struct platform_device am335x_sgx = {
++ .name = "sgx",
++ .id = -1,
++};
++
++#endif
++
+ /*-------------------------------------------------------------------------*/
+
+ static int __init omap2_init_devices(void)
+@@ -681,6 +1167,7 @@ static int __init omap2_init_devices(void)
+ */
+ omap_init_audio();
+ omap_init_mcpdm();
++ omap_init_dmic();
+ omap_init_camera();
+ omap_init_mbox();
+ omap_init_mcspi();
+@@ -690,11 +1177,276 @@ static int __init omap2_init_devices(void)
+ omap_init_sham();
+ omap_init_aes();
+ omap_init_vout();
+-
++ am33xx_register_edma();
++ am33xx_init_pcm();
++#if defined (CONFIG_SOC_OMAPAM33XX)
++ am335x_register_pruss_uio(&am335x_pruss_uio_pdata);
++ if (omap3_has_sgx())
++ platform_device_register(&am335x_sgx);
++#endif
+ return 0;
+ }
+ arch_initcall(omap2_init_devices);
+
++#define AM33XX_EMAC_MDIO_FREQ (1000000)
++
++static u64 am33xx_cpsw_dmamask = DMA_BIT_MASK(32);
++/* TODO : Verify the offsets */
++static struct cpsw_slave_data am33xx_cpsw_slaves[] = {
++ {
++ .slave_reg_ofs = 0x208,
++ .sliver_reg_ofs = 0xd80,
++ .phy_id = "0:00",
++ },
++ {
++ .slave_reg_ofs = 0x308,
++ .sliver_reg_ofs = 0xdc0,
++ .phy_id = "0:01",
++ },
++};
++
++static struct cpsw_platform_data am33xx_cpsw_pdata = {
++ .ss_reg_ofs = 0x1200,
++ .channels = 8,
++ .cpdma_reg_ofs = 0x800,
++ .slaves = 2,
++ .slave_data = am33xx_cpsw_slaves,
++ .ale_reg_ofs = 0xd00,
++ .ale_entries = 1024,
++ .host_port_reg_ofs = 0x108,
++ .hw_stats_reg_ofs = 0x900,
++ .bd_ram_ofs = 0x2000,
++ .bd_ram_size = SZ_8K,
++ .rx_descs = 64,
++ .mac_control = BIT(5), /* MIIEN */
++ .gigabit_en = 1,
++ .host_port_num = 0,
++ .no_bd_ram = false,
++ .version = CPSW_VERSION_2,
++};
++
++static struct mdio_platform_data am33xx_cpsw_mdiopdata = {
++ .bus_freq = AM33XX_EMAC_MDIO_FREQ,
++};
++
++static struct resource am33xx_cpsw_mdioresources[] = {
++ {
++ .start = AM33XX_CPSW_MDIO_BASE,
++ .end = AM33XX_CPSW_MDIO_BASE + SZ_256 - 1,
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++static struct platform_device am33xx_cpsw_mdiodevice = {
++ .name = "davinci_mdio",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(am33xx_cpsw_mdioresources),
++ .resource = am33xx_cpsw_mdioresources,
++ .dev.platform_data = &am33xx_cpsw_mdiopdata,
++};
++
++static struct resource am33xx_cpsw_resources[] = {
++ {
++ .start = AM33XX_CPSW_BASE,
++ .end = AM33XX_CPSW_BASE + SZ_2K - 1,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = AM33XX_CPSW_SS_BASE,
++ .end = AM33XX_CPSW_SS_BASE + SZ_256 - 1,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = AM33XX_IRQ_CPSW_C0_RX,
++ .end = AM33XX_IRQ_CPSW_C0_RX,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_DMTIMER5,
++ .end = AM33XX_IRQ_DMTIMER5,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_DMTIMER6,
++ .end = AM33XX_IRQ_DMTIMER6,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = AM33XX_IRQ_CPSW_C0,
++ .end = AM33XX_IRQ_CPSW_C0,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device am33xx_cpsw_device = {
++ .name = "cpsw",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(am33xx_cpsw_resources),
++ .resource = am33xx_cpsw_resources,
++ .dev = {
++ .platform_data = &am33xx_cpsw_pdata,
++ .dma_mask = &am33xx_cpsw_dmamask,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
++ },
++};
++
++static unsigned char am33xx_macid0[ETH_ALEN];
++static unsigned char am33xx_macid1[ETH_ALEN];
++static unsigned int am33xx_evmid;
++
++/*
++* am33xx_evmid_fillup - set up board evmid
++* @evmid - evm id which needs to be configured
++*
++* This function is called to configure board evm id.
++* IA Motor Control EVM needs special setting of MAC PHY Id.
++* This function is called when IA Motor Control EVM is detected
++* during boot-up.
++*/
++void am33xx_evmid_fillup(unsigned int evmid)
++{
++ am33xx_evmid = evmid;
++ return;
++}
++
++/*
++* am33xx_cpsw_macidfillup - setup mac adrresses
++* @eeprommacid0 - mac id 0 which needs to be configured
++* @eeprommacid1 - mac id 1 which needs to be configured
++*
++* This function is called to configure mac addresses.
++* Mac addresses are read from eeprom and this function is called
++* to store those mac adresses in am33xx_macid0 and am33xx_macid1.
++* In case, mac address read from eFuse are invalid, mac addresses
++* stored in these variable are used.
++*/
++void am33xx_cpsw_macidfillup(char *eeprommacid0, char *eeprommacid1)
++{
++ u32 i;
++
++ /* Fillup these mac addresses with the mac adresses from eeprom */
++ for (i = 0; i < ETH_ALEN; i++) {
++ am33xx_macid0[i] = eeprommacid0[i];
++ am33xx_macid1[i] = eeprommacid1[i];
++ }
++
++ return;
++}
++
++#define MII_MODE_ENABLE 0x0
++#define RMII_MODE_ENABLE 0x5
++#define RGMII_MODE_ENABLE 0xA
++#define MAC_MII_SEL 0x650
++
++void am33xx_cpsw_init(unsigned int gigen)
++{
++ u32 mac_lo, mac_hi;
++ u32 i;
++
++ mac_lo = omap_ctrl_readl(TI81XX_CONTROL_MAC_ID0_LO);
++ mac_hi = omap_ctrl_readl(TI81XX_CONTROL_MAC_ID0_HI);
++ am33xx_cpsw_slaves[0].mac_addr[0] = mac_hi & 0xFF;
++ am33xx_cpsw_slaves[0].mac_addr[1] = (mac_hi & 0xFF00) >> 8;
++ am33xx_cpsw_slaves[0].mac_addr[2] = (mac_hi & 0xFF0000) >> 16;
++ am33xx_cpsw_slaves[0].mac_addr[3] = (mac_hi & 0xFF000000) >> 24;
++ am33xx_cpsw_slaves[0].mac_addr[4] = mac_lo & 0xFF;
++ am33xx_cpsw_slaves[0].mac_addr[5] = (mac_lo & 0xFF00) >> 8;
++
++ /* Read MACID0 from eeprom if eFuse MACID is invalid */
++ if (!is_valid_ether_addr(am33xx_cpsw_slaves[0].mac_addr)) {
++ for (i = 0; i < ETH_ALEN; i++)
++ am33xx_cpsw_slaves[0].mac_addr[i] = am33xx_macid0[i];
++ }
++
++ mac_lo = omap_ctrl_readl(TI81XX_CONTROL_MAC_ID1_LO);
++ mac_hi = omap_ctrl_readl(TI81XX_CONTROL_MAC_ID1_HI);
++ am33xx_cpsw_slaves[1].mac_addr[0] = mac_hi & 0xFF;
++ am33xx_cpsw_slaves[1].mac_addr[1] = (mac_hi & 0xFF00) >> 8;
++ am33xx_cpsw_slaves[1].mac_addr[2] = (mac_hi & 0xFF0000) >> 16;
++ am33xx_cpsw_slaves[1].mac_addr[3] = (mac_hi & 0xFF000000) >> 24;
++ am33xx_cpsw_slaves[1].mac_addr[4] = mac_lo & 0xFF;
++ am33xx_cpsw_slaves[1].mac_addr[5] = (mac_lo & 0xFF00) >> 8;
++
++ /* Read MACID1 from eeprom if eFuse MACID is invalid */
++ if (!is_valid_ether_addr(am33xx_cpsw_slaves[1].mac_addr)) {
++ for (i = 0; i < ETH_ALEN; i++)
++ am33xx_cpsw_slaves[1].mac_addr[i] = am33xx_macid1[i];
++ }
++
++ if (am33xx_evmid == BEAGLE_BONE_OLD) {
++ __raw_writel(RMII_MODE_ENABLE,
++ AM33XX_CTRL_REGADDR(MAC_MII_SEL));
++ } else if (am33xx_evmid == BEAGLE_BONE_A3) {
++ __raw_writel(MII_MODE_ENABLE,
++ AM33XX_CTRL_REGADDR(MAC_MII_SEL));
++ } else if (am33xx_evmid == IND_AUT_MTR_EVM) {
++ am33xx_cpsw_slaves[0].phy_id = "0:1e";
++ am33xx_cpsw_slaves[1].phy_id = "0:00";
++ } else {
++ __raw_writel(RGMII_MODE_ENABLE,
++ AM33XX_CTRL_REGADDR(MAC_MII_SEL));
++ }
++
++ am33xx_cpsw_pdata.gigabit_en = gigen;
++
++ memcpy(am33xx_cpsw_pdata.mac_addr,
++ am33xx_cpsw_slaves[0].mac_addr, ETH_ALEN);
++ platform_device_register(&am33xx_cpsw_mdiodevice);
++ platform_device_register(&am33xx_cpsw_device);
++ clk_add_alias(NULL, dev_name(&am33xx_cpsw_mdiodevice.dev),
++ NULL, &am33xx_cpsw_device.dev);
++}
++
++#define AM33XX_DCAN_NUM_MSG_OBJS 64
++#define AM33XX_DCAN_RAMINIT_OFFSET 0x644
++#define AM33XX_DCAN_RAMINIT_START(n) (0x1 << n)
++
++static void d_can_hw_raminit(unsigned int instance, unsigned int enable)
++{
++ u32 val;
++
++ /* Read the value */
++ val = readl(AM33XX_CTRL_REGADDR(AM33XX_DCAN_RAMINIT_OFFSET));
++ if (enable) {
++ /* Set to "1" */
++ val &= ~AM33XX_DCAN_RAMINIT_START(instance);
++ val |= AM33XX_DCAN_RAMINIT_START(instance);
++ writel(val, AM33XX_CTRL_REGADDR(AM33XX_DCAN_RAMINIT_OFFSET));
++ } else {
++ /* Set to "0" */
++ val &= ~AM33XX_DCAN_RAMINIT_START(instance);
++ writel(val, AM33XX_CTRL_REGADDR(AM33XX_DCAN_RAMINIT_OFFSET));
++ }
++}
++
++/* dcan dev_attr */
++static struct d_can_platform_data am33xx_dcan_info = {
++ .num_of_msg_objs = AM33XX_DCAN_NUM_MSG_OBJS,
++ .ram_init = d_can_hw_raminit,
++ .dma_support = false,
++};
++
++void am33xx_d_can_init(unsigned int instance)
++{
++ struct omap_hwmod *oh;
++ struct platform_device *pdev;
++ char oh_name[L3_MODULES_MAX_LEN];
++
++ /* Copy string name to oh_name buffer */
++ snprintf(oh_name, L3_MODULES_MAX_LEN, "d_can%d", instance);
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("could not find %s hwmod data\n", oh_name);
++ return;
++ }
++
++ pdev = omap_device_build("d_can", instance, oh, &am33xx_dcan_info,
++ sizeof(am33xx_dcan_info), NULL, 0, 0);
++ if (IS_ERR(pdev))
++ pr_err("could not build omap_device for %s\n", oh_name);
++}
++
+ #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+ static int __init omap_init_wdt(void)
+ {
+@@ -720,3 +1472,27 @@ static int __init omap_init_wdt(void)
+ }
+ subsys_initcall(omap_init_wdt);
+ #endif
++
++int __init omap_init_gpmc(struct gpmc_devices_info *pdata, int pdata_len)
++{
++ struct omap_hwmod *oh;
++ struct platform_device *pdev;
++ char *name = "omap-gpmc";
++ char *oh_name = "gpmc";
++
++ oh = omap_hwmod_lookup(oh_name);
++ if (!oh) {
++ pr_err("Could not look up %s\n", oh_name);
++ return -ENODEV;
++ }
++
++ pdev = omap_device_build(name, -1, oh, pdata,
++ pdata_len, NULL, 0, 0);
++ if (IS_ERR(pdev)) {
++ WARN(1, "Can't build omap_device for %s:%s.\n",
++ name, oh->name);
++ return PTR_ERR(pdev);
++ }
++
++ return 0;
++}
+diff --git a/arch/arm/mach-omap2/devices.h b/arch/arm/mach-omap2/devices.h
+index f61eb6e..4725325 100644
+--- a/arch/arm/mach-omap2/devices.h
++++ b/arch/arm/mach-omap2/devices.h
+@@ -16,4 +16,12 @@ struct isp_platform_data;
+
+ int omap3_init_camera(struct isp_platform_data *pdata);
+
++int __init am335x_register_mcasp(struct snd_platform_data *pdata, int ctrl_nr);
++extern int __init am33xx_register_tsc(struct tsc_data *pdata);
++extern int __init am33xx_register_ecap(int id,
++ struct pwmss_platform_data *pdata);
++extern int __init am33xx_register_ehrpwm(int id,
++ struct pwmss_platform_data *pdata);
++extern int __init omap_init_elm(void);
++
+ #endif
+diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
+index dce9905..bc6cf86 100644
+--- a/arch/arm/mach-omap2/display.c
++++ b/arch/arm/mach-omap2/display.c
+@@ -22,12 +22,13 @@
+ #include <linux/io.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
++#include <linux/delay.h>
+
+ #include <video/omapdss.h>
+ #include <plat/omap_hwmod.h>
+ #include <plat/omap_device.h>
+ #include <plat/omap-pm.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "control.h"
+ #include "display.h"
+diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
+index fc56745..bf0db5d 100644
+--- a/arch/arm/mach-omap2/dpll3xxx.c
++++ b/arch/arm/mach-omap2/dpll3xxx.c
+@@ -142,7 +142,8 @@ static int _omap3_noncore_dpll_lock(struct clk *clk)
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+- omap3_dpll_deny_idle(clk);
++ if (ai)
++ omap3_dpll_deny_idle(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+@@ -186,8 +187,6 @@ static int _omap3_noncore_dpll_bypass(struct clk *clk)
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+- else
+- omap3_dpll_deny_idle(clk);
+
+ return r;
+ }
+@@ -216,8 +215,6 @@ static int _omap3_noncore_dpll_stop(struct clk *clk)
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+- else
+- omap3_dpll_deny_idle(clk);
+
+ return 0;
+ }
+@@ -301,10 +298,10 @@ static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
+ _omap3_noncore_dpll_bypass(clk);
+
+ /*
+- * Set jitter correction. No jitter correction for OMAP4 and 3630
+- * since freqsel field is no longer present
++ * Set jitter correction. No jitter correction for OMAP4, 3630
++ * and AM33XX since freqsel field is no longer present
+ */
+- if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
++ if (!cpu_is_omap44xx() && !cpu_is_omap3630() && !cpu_is_am33xx()) {
+ v = __raw_readl(dd->control_reg);
+ v &= ~dd->freqsel_mask;
+ v |= freqsel << __ffs(dd->freqsel_mask);
+@@ -463,8 +460,9 @@ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
+ if (dd->last_rounded_rate == 0)
+ return -EINVAL;
+
+- /* No freqsel on OMAP4 and OMAP3630 */
+- if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
++ /* No freqsel on OMAP4, OMAP3630 and AM33XX */
++ if (!cpu_is_omap44xx() && !cpu_is_omap3630() &&
++ !cpu_is_am33xx()) {
+ freqsel = _omap3_dpll_compute_freqsel(clk,
+ dd->last_rounded_n);
+ if (!freqsel)
+@@ -519,6 +517,9 @@ u32 omap3_dpll_autoidle_read(struct clk *clk)
+
+ dd = clk->dpll_data;
+
++ if (!dd->autoidle_reg)
++ return -EINVAL;
++
+ v = __raw_readl(dd->autoidle_reg);
+ v &= dd->autoidle_mask;
+ v >>= __ffs(dd->autoidle_mask);
+@@ -545,6 +546,12 @@ void omap3_dpll_allow_idle(struct clk *clk)
+
+ dd = clk->dpll_data;
+
++ if (!dd->autoidle_reg) {
++ pr_debug("clock: DPLL %s: autoidle not supported\n",
++ clk->name);
++ return;
++ }
++
+ /*
+ * REVISIT: CORE DPLL can optionally enter low-power bypass
+ * by writing 0x5 instead of 0x1. Add some mechanism to
+@@ -554,6 +561,7 @@ void omap3_dpll_allow_idle(struct clk *clk)
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
+ __raw_writel(v, dd->autoidle_reg);
++
+ }
+
+ /**
+@@ -572,6 +580,12 @@ void omap3_dpll_deny_idle(struct clk *clk)
+
+ dd = clk->dpll_data;
+
++ if (!dd->autoidle_reg) {
++ pr_debug("clock: DPLL %s: autoidle not supported\n",
++ clk->name);
++ return;
++ }
++
+ v = __raw_readl(dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
+diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
+index 8ad210b..0207a66 100644
+--- a/arch/arm/mach-omap2/gpmc-nand.c
++++ b/arch/arm/mach-omap2/gpmc-nand.c
+@@ -81,12 +81,37 @@ static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data
+ return 0;
+ }
+
+-int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
++int __devinit gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
+ {
+ int err = 0;
++ u8 cs = 0;
+ struct device *dev = &gpmc_nand_device.dev;
+
++ /* if cs not provided, find out the chip-select on which NAND exist */
++ if (gpmc_nand_data->cs > GPMC_CS_NUM)
++ while (cs < GPMC_CS_NUM) {
++ u32 ret = 0;
++ ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
++
++ if ((ret & 0xC00) == 0x800) {
++ printk(KERN_INFO "Found NAND on CS%d\n", cs);
++ gpmc_nand_data->cs = cs;
++ break;
++ }
++ cs++;
++ }
++
++ if (gpmc_nand_data->cs > GPMC_CS_NUM) {
++ printk(KERN_INFO "NAND: Unable to find configuration "
++ "in GPMC\n ");
++ return -ENODEV;
++ }
++
+ gpmc_nand_device.dev.platform_data = gpmc_nand_data;
++ gpmc_nand_data->ctrlr_suspend = gpmc_suspend;
++ gpmc_nand_data->ctrlr_resume = gpmc_resume;
++
++ printk(KERN_INFO "Registering NAND on CS%d\n", gpmc_nand_data->cs);
+
+ err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
+ &gpmc_nand_data->phys_base);
+diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
+index dfffbbf..9a276f8 100644
+--- a/arch/arm/mach-omap2/gpmc.c
++++ b/arch/arm/mach-omap2/gpmc.c
+@@ -14,6 +14,8 @@
+ */
+ #undef DEBUG
+
++#include <linux/platform_device.h>
++
+ #include <linux/irq.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -24,9 +26,11 @@
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
++#include <linux/pm_runtime.h>
+
+ #include <asm/mach-types.h>
+ #include <plat/gpmc.h>
++#include <plat/nand.h>
+
+ #include <plat/sdrc.h>
+
+@@ -49,6 +53,7 @@
+ #define GPMC_ECC_CONTROL 0x1f8
+ #define GPMC_ECC_SIZE_CONFIG 0x1fc
+ #define GPMC_ECC1_RESULT 0x200
++#define GPMC_ECC_BCH_RESULT_0 0x240
+
+ #define GPMC_CS0_OFFSET 0x60
+ #define GPMC_CS_SIZE 0x30
+@@ -91,58 +96,99 @@ struct omap3_gpmc_regs {
+ struct gpmc_cs_config cs_context[GPMC_CS_NUM];
+ };
+
+-static struct resource gpmc_mem_root;
+-static struct resource gpmc_cs_mem[GPMC_CS_NUM];
+-static DEFINE_SPINLOCK(gpmc_mem_lock);
+-static unsigned int gpmc_cs_map; /* flag for cs which are initialized */
+-static int gpmc_ecc_used = -EINVAL; /* cs using ecc engine */
+
+-static void __iomem *gpmc_base;
++#define DRIVER_NAME "omap-gpmc"
+
+-static struct clk *gpmc_l3_clk;
++struct gpmc {
++ struct device *dev;
++ void __iomem *io_base;
++ unsigned long phys_base;
++ u32 memsize;
++ unsigned int cs_map;
++ int ecc_used;
++ spinlock_t mem_lock;
++ struct resource mem_root;
++ struct resource cs_mem[GPMC_CS_NUM];
++};
+
+-static irqreturn_t gpmc_handle_irq(int irq, void *dev);
++static struct gpmc *gpmc;
+
+ static void gpmc_write_reg(int idx, u32 val)
+ {
+- __raw_writel(val, gpmc_base + idx);
++ writel(val, gpmc->io_base + idx);
+ }
+
+ static u32 gpmc_read_reg(int idx)
+ {
+- return __raw_readl(gpmc_base + idx);
++ return readl(gpmc->io_base + idx);
+ }
+
+ static void gpmc_cs_write_byte(int cs, int idx, u8 val)
+ {
+ void __iomem *reg_addr;
+
+- reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+- __raw_writeb(val, reg_addr);
++ reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
++ writeb(val, reg_addr);
+ }
+
+ static u8 gpmc_cs_read_byte(int cs, int idx)
+ {
+ void __iomem *reg_addr;
+
+- reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+- return __raw_readb(reg_addr);
++ reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
++ return readb(reg_addr);
+ }
+
+ void gpmc_cs_write_reg(int cs, int idx, u32 val)
+ {
+ void __iomem *reg_addr;
+
+- reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+- __raw_writel(val, reg_addr);
++ if (!gpmc) {
++ pr_err("%s invoked without initializing GPMC\n", __func__);
++ return;
++ }
++
++ reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
++ writel(val, reg_addr);
+ }
+
+ u32 gpmc_cs_read_reg(int cs, int idx)
+ {
+ void __iomem *reg_addr;
+
+- reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+- return __raw_readl(reg_addr);
++ if (!gpmc) {
++ pr_err("%s invoked without initializing GPMC\n", __func__);
++ return 0;
++ }
++
++ reg_addr = gpmc->io_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
++ return readl(reg_addr);
++}
++
++static struct clk *gpmc_l3_clk;
++
++static void __devinit gpmc_clk_init(struct device *dev)
++{
++ char *ck = NULL;
++
++ if (cpu_is_omap24xx())
++ ck = "core_l3_ck";
++ else if (cpu_is_omap34xx())
++ ck = "gpmc_fck";
++ else if (cpu_is_omap44xx())
++ ck = "gpmc_ck";
++
++ if (WARN_ON(!ck))
++ return;
++
++ gpmc_l3_clk = clk_get(NULL, ck);
++ if (IS_ERR(gpmc_l3_clk)) {
++ printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
++ BUG();
++ }
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
+ }
+
+ /* TODO: Add support for gpmc_fck to clock framework and use it */
+@@ -341,6 +387,11 @@ static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
+ *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
+ mask = (l >> 8) & 0x0f;
+ *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
++
++ if (cpu_is_am33xx()) {
++ *base = 0x8000000;
++ *size = 0x10000000;
++ }
+ }
+
+ static int gpmc_cs_mem_enabled(int cs)
+@@ -356,8 +407,8 @@ int gpmc_cs_set_reserved(int cs, int reserved)
+ if (cs > GPMC_CS_NUM)
+ return -ENODEV;
+
+- gpmc_cs_map &= ~(1 << cs);
+- gpmc_cs_map |= (reserved ? 1 : 0) << cs;
++ gpmc->cs_map &= ~(1 << cs);
++ gpmc->cs_map |= (reserved ? 1 : 0) << cs;
+
+ return 0;
+ }
+@@ -367,7 +418,7 @@ int gpmc_cs_reserved(int cs)
+ if (cs > GPMC_CS_NUM)
+ return -ENODEV;
+
+- return gpmc_cs_map & (1 << cs);
++ return gpmc->cs_map & (1 << cs);
+ }
+
+ static unsigned long gpmc_mem_align(unsigned long size)
+@@ -386,22 +437,22 @@ static unsigned long gpmc_mem_align(unsigned long size)
+
+ static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
+ {
+- struct resource *res = &gpmc_cs_mem[cs];
++ struct resource *res = &gpmc->cs_mem[cs];
+ int r;
+
+ size = gpmc_mem_align(size);
+- spin_lock(&gpmc_mem_lock);
++ spin_lock(&gpmc->mem_lock);
+ res->start = base;
+ res->end = base + size - 1;
+- r = request_resource(&gpmc_mem_root, res);
+- spin_unlock(&gpmc_mem_lock);
++ r = request_resource(&gpmc->mem_root, res);
++ spin_unlock(&gpmc->mem_lock);
+
+ return r;
+ }
+
+ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ {
+- struct resource *res = &gpmc_cs_mem[cs];
++ struct resource *res = &gpmc->cs_mem[cs];
+ int r = -1;
+
+ if (cs > GPMC_CS_NUM)
+@@ -411,7 +462,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ if (size > (1 << GPMC_SECTION_SHIFT))
+ return -ENOMEM;
+
+- spin_lock(&gpmc_mem_lock);
++ spin_lock(&gpmc->mem_lock);
+ if (gpmc_cs_reserved(cs)) {
+ r = -EBUSY;
+ goto out;
+@@ -419,7 +470,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ if (gpmc_cs_mem_enabled(cs))
+ r = adjust_resource(res, res->start & ~(size - 1), size);
+ if (r < 0)
+- r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
++ r = allocate_resource(&gpmc->mem_root, res, size, 0, ~0,
+ size, NULL, NULL);
+ if (r < 0)
+ goto out;
+@@ -428,24 +479,24 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ *base = res->start;
+ gpmc_cs_set_reserved(cs, 1);
+ out:
+- spin_unlock(&gpmc_mem_lock);
++ spin_unlock(&gpmc->mem_lock);
+ return r;
+ }
+ EXPORT_SYMBOL(gpmc_cs_request);
+
+ void gpmc_cs_free(int cs)
+ {
+- spin_lock(&gpmc_mem_lock);
++ spin_lock(&gpmc->mem_lock);
+ if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
+ printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
+ BUG();
+- spin_unlock(&gpmc_mem_lock);
++ spin_unlock(&gpmc->mem_lock);
+ return;
+ }
+ gpmc_cs_disable_mem(cs);
+- release_resource(&gpmc_cs_mem[cs]);
++ release_resource(&gpmc->cs_mem[cs]);
+ gpmc_cs_set_reserved(cs, 0);
+- spin_unlock(&gpmc_mem_lock);
++ spin_unlock(&gpmc->mem_lock);
+ }
+ EXPORT_SYMBOL(gpmc_cs_free);
+
+@@ -668,7 +719,7 @@ int gpmc_prefetch_reset(int cs)
+ }
+ EXPORT_SYMBOL(gpmc_prefetch_reset);
+
+-static void __init gpmc_mem_init(void)
++static void __devinit gpmc_mem_init(void)
+ {
+ int cs;
+ unsigned long boot_rom_space = 0;
+@@ -680,8 +731,8 @@ static void __init gpmc_mem_init(void)
+ /* In apollon the CS0 is mapped as 0x0000 0000 */
+ if (machine_is_omap_apollon())
+ boot_rom_space = 0;
+- gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
+- gpmc_mem_root.end = GPMC_MEM_END;
++ gpmc->mem_root.start = GPMC_MEM_START + boot_rom_space;
++ gpmc->mem_root.end = GPMC_MEM_END;
+
+ /* Reserve all regions that has been set up by bootloader */
+ for (cs = 0; cs < GPMC_CS_NUM; cs++) {
+@@ -695,85 +746,107 @@ static void __init gpmc_mem_init(void)
+ }
+ }
+
+-static int __init gpmc_init(void)
++struct device *gpmc_dev;
++
++static int __devinit gpmc_probe(struct platform_device *pdev)
+ {
+- u32 l, irq;
+- int cs, ret = -EINVAL;
+- int gpmc_irq;
+- char *ck = NULL;
++ u32 l;
++ int ret = -EINVAL;
++ struct resource *res = NULL;
++ struct gpmc_devices_info *gpmc_device = pdev->dev.platform_data;
++ void *p;
+
+- if (cpu_is_omap24xx()) {
+- ck = "core_l3_ck";
+- if (cpu_is_omap2420())
+- l = OMAP2420_GPMC_BASE;
+- else
+- l = OMAP34XX_GPMC_BASE;
+- gpmc_irq = INT_34XX_GPMC_IRQ;
+- } else if (cpu_is_omap34xx()) {
+- ck = "gpmc_fck";
+- l = OMAP34XX_GPMC_BASE;
+- gpmc_irq = INT_34XX_GPMC_IRQ;
+- } else if (cpu_is_omap44xx()) {
+- ck = "gpmc_ck";
+- l = OMAP44XX_GPMC_BASE;
+- gpmc_irq = OMAP44XX_IRQ_GPMC;
+- }
++ /* XXX: This should go away with HWMOD & runtime PM adaptation */
++ gpmc_clk_init(&pdev->dev);
+
+- if (WARN_ON(!ck))
+- return ret;
++ gpmc_dev = &pdev->dev;
+
+- gpmc_l3_clk = clk_get(NULL, ck);
+- if (IS_ERR(gpmc_l3_clk)) {
+- printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
+- BUG();
++ gpmc = devm_kzalloc(&pdev->dev, sizeof(struct gpmc), GFP_KERNEL);
++ if (!gpmc)
++ return -ENOMEM;
++
++ gpmc->dev = &pdev->dev;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ ret = -ENOENT;
++ dev_err(gpmc->dev, "Failed to get resource: memory\n");
++ goto err_res;
++ }
++ gpmc->phys_base = res->start;
++ gpmc->memsize = resource_size(res);
++
++ if (request_mem_region(gpmc->phys_base,
++ gpmc->memsize, DRIVER_NAME) == NULL) {
++ ret = -ENOMEM;
++ dev_err(gpmc->dev, "Failed to request memory region\n");
++ goto err_mem;
+ }
+
+- gpmc_base = ioremap(l, SZ_4K);
+- if (!gpmc_base) {
+- clk_put(gpmc_l3_clk);
+- printk(KERN_ERR "Could not get GPMC register memory\n");
+- BUG();
++ gpmc->io_base = ioremap(gpmc->phys_base, gpmc->memsize);
++ if (!gpmc->io_base) {
++ ret = -ENOMEM;
++ dev_err(gpmc->dev, "Failed to ioremap memory\n");
++ goto err_remap;
+ }
+
+- clk_enable(gpmc_l3_clk);
++ gpmc->ecc_used = -EINVAL;
++ spin_lock_init(&gpmc->mem_lock);
++ platform_set_drvdata(pdev, gpmc);
+
+ l = gpmc_read_reg(GPMC_REVISION);
+- printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
+- /* Set smart idle mode and automatic L3 clock gating */
+- l = gpmc_read_reg(GPMC_SYSCONFIG);
+- l &= 0x03 << 3;
+- l |= (0x02 << 3) | (1 << 0);
+- gpmc_write_reg(GPMC_SYSCONFIG, l);
++ dev_info(gpmc->dev, "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
++
+ gpmc_mem_init();
+
+- /* initalize the irq_chained */
+- irq = OMAP_GPMC_IRQ_BASE;
+- for (cs = 0; cs < GPMC_CS_NUM; cs++) {
+- irq_set_chip_and_handler(irq, &dummy_irq_chip,
+- handle_simple_irq);
+- set_irq_flags(irq, IRQF_VALID);
+- irq++;
+- }
++ for (p = gpmc_device->pdata; p; gpmc_device++, p = gpmc_device->pdata)
++ if (gpmc_device->flag & GPMC_DEVICE_NAND)
++ gpmc_nand_init((struct omap_nand_platform_data *) p);
++ return 0;
+
+- ret = request_irq(gpmc_irq,
+- gpmc_handle_irq, IRQF_SHARED, "gpmc", gpmc_base);
+- if (ret)
+- pr_err("gpmc: irq-%d could not claim: err %d\n",
+- gpmc_irq, ret);
++err_remap:
++ release_mem_region(gpmc->phys_base, gpmc->memsize);
++err_mem:
++err_res:
++ devm_kfree(&pdev->dev, gpmc);
+ return ret;
+ }
+-postcore_initcall(gpmc_init);
+
+-static irqreturn_t gpmc_handle_irq(int irq, void *dev)
++static int __devexit gpmc_remove(struct platform_device *pdev)
+ {
+- u8 cs;
++ struct gpmc *gpmc = platform_get_drvdata(pdev);
++
++ platform_set_drvdata(pdev, NULL);
++ iounmap(gpmc->io_base);
++ release_mem_region(gpmc->phys_base, gpmc->memsize);
++ devm_kfree(&pdev->dev, gpmc);
++
++ return 0;
++}
+
+- /* check cs to invoke the irq */
+- cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7;
+- if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END)
+- generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs);
++static struct platform_driver gpmc_driver = {
++ .probe = gpmc_probe,
++ .remove = __devexit_p(gpmc_remove),
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++};
++
++module_platform_driver(gpmc_driver);
+
+- return IRQ_HANDLED;
++int gpmc_suspend(void)
++{
++ omap3_gpmc_save_context();
++ pm_runtime_put_sync(gpmc_dev);
++ return 0;
++}
++
++int gpmc_resume(void)
++{
++ pm_runtime_get_sync(gpmc_dev);
++ omap3_gpmc_restore_context();
++ return 0;
+ }
+
+ #ifdef CONFIG_ARCH_OMAP3
+@@ -845,52 +918,74 @@ void omap3_gpmc_restore_context(void)
+
+ /**
+ * gpmc_enable_hwecc - enable hardware ecc functionality
++ * @ecc_type: ecc type e.g. Hamming, BCH
+ * @cs: chip select number
+ * @mode: read/write mode
+ * @dev_width: device bus width(1 for x16, 0 for x8)
+ * @ecc_size: bytes for which ECC will be generated
+ */
+-int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
++int gpmc_enable_hwecc(int ecc_type, int cs, int mode,
++ int dev_width, int ecc_size)
+ {
+- unsigned int val;
+-
+- /* check if ecc module is in used */
+- if (gpmc_ecc_used != -EINVAL)
+- return -EINVAL;
+-
+- gpmc_ecc_used = cs;
+-
+- /* clear ecc and enable bits */
+- val = ((0x00000001<<8) | 0x00000001);
+- gpmc_write_reg(GPMC_ECC_CONTROL, val);
+-
+- /* program ecc and result sizes */
+- val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
+- gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, val);
++ unsigned int bch_mod = 0, bch_wrapmode = 0, eccsize1 = 0, eccsize0 = 0;
++ unsigned int ecc_conf_val = 0, ecc_size_conf_val = 0;
+
+ switch (mode) {
+ case GPMC_ECC_READ:
+- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
++ if (ecc_type == OMAP_ECC_BCH4_CODE_HW) {
++ eccsize1 = 0xD; eccsize0 = 0x48;
++ bch_mod = 0;
++ bch_wrapmode = 0x09;
++ } else if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
++ eccsize1 = 0x2; eccsize0 = 0x1A;
++ bch_mod = 1;
++ bch_wrapmode = 0x01;
++ } else
++ eccsize1 = ((ecc_size >> 1) - 1);
+ break;
+ case GPMC_ECC_READSYN:
+- gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
+ break;
+ case GPMC_ECC_WRITE:
+- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
++ if (ecc_type == OMAP_ECC_BCH4_CODE_HW) {
++ eccsize1 = 0x20; eccsize0 = 0x00;
++ bch_mod = 0;
++ bch_wrapmode = 0x06;
++ } else if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
++ eccsize1 = 0x1c; eccsize0 = 0x00;
++ bch_mod = 1;
++ bch_wrapmode = 0x01;
++ } else
++ eccsize1 = ((ecc_size >> 1) - 1);
+ break;
+ default:
+ printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+- /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+- val = (dev_width << 7) | (cs << 1) | (0x1);
+- gpmc_write_reg(GPMC_ECC_CONFIG, val);
++ /* clear ecc and enable bits */
++ if ((ecc_type == OMAP_ECC_BCH4_CODE_HW) ||
++ (ecc_type == OMAP_ECC_BCH8_CODE_HW)) {
++ gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000001);
++ ecc_size_conf_val = (eccsize1 << 22) | (eccsize0 << 12);
++ ecc_conf_val = ((0x01 << 16) | (bch_mod << 12)
++ | (bch_wrapmode << 8) | (dev_width << 7)
++ | (0x00 << 4) | (cs << 1) | (0x1));
++ } else {
++ gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000101);
++ ecc_size_conf_val = (eccsize1 << 22) | 0x0000000F;
++ ecc_conf_val = (dev_width << 7) | (cs << 1) | (0x1);
++ }
++
++ gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, ecc_size_conf_val);
++ gpmc_write_reg(GPMC_ECC_CONFIG, ecc_conf_val);
++ gpmc_write_reg(GPMC_ECC_CONTROL, 0x00000101);
+ return 0;
+ }
++EXPORT_SYMBOL(gpmc_enable_hwecc);
+
+ /**
+ * gpmc_calculate_ecc - generate non-inverted ecc bytes
++ * @ecc_type: ecc type e.g. Hamming, BCH
+ * @cs: chip select number
+ * @dat: data pointer over which ecc is computed
+ * @ecc_code: ecc code buffer
+@@ -901,20 +996,51 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+-int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
++int gpmc_calculate_ecc(int ecc_type, int cs,
++ const u_char *dat, u_char *ecc_code)
+ {
+- unsigned int val = 0x0;
+-
+- if (gpmc_ecc_used != cs)
+- return -EINVAL;
+-
+- /* read ecc result */
+- val = gpmc_read_reg(GPMC_ECC1_RESULT);
+- *ecc_code++ = val; /* P128e, ..., P1e */
+- *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+- /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+- *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
++ unsigned int reg;
++ unsigned int val1 = 0x0, val2 = 0x0;
++ unsigned int val3 = 0x0, val4 = 0x0;
++ int i;
+
+- gpmc_ecc_used = -EINVAL;
++ if ((ecc_type == OMAP_ECC_BCH4_CODE_HW) ||
++ (ecc_type == OMAP_ECC_BCH8_CODE_HW)) {
++ for (i = 0; i < 1; i++) {
++ /*
++ * Reading HW ECC_BCH_Results
++ * 0x240-0x24C, 0x250-0x25C, 0x260-0x26C, 0x270-0x27C
++ */
++ reg = GPMC_ECC_BCH_RESULT_0 + (0x10 * i);
++ val1 = gpmc_read_reg(reg);
++ val2 = gpmc_read_reg(reg + 4);
++ if (ecc_type == OMAP_ECC_BCH8_CODE_HW) {
++ val3 = gpmc_read_reg(reg + 8);
++ val4 = gpmc_read_reg(reg + 12);
++
++ *ecc_code++ = (val4 & 0xFF);
++ *ecc_code++ = ((val3 >> 24) & 0xFF);
++ *ecc_code++ = ((val3 >> 16) & 0xFF);
++ *ecc_code++ = ((val3 >> 8) & 0xFF);
++ *ecc_code++ = (val3 & 0xFF);
++ *ecc_code++ = ((val2 >> 24) & 0xFF);
++ }
++ *ecc_code++ = ((val2 >> 16) & 0xFF);
++ *ecc_code++ = ((val2 >> 8) & 0xFF);
++ *ecc_code++ = (val2 & 0xFF);
++ *ecc_code++ = ((val1 >> 24) & 0xFF);
++ *ecc_code++ = ((val1 >> 16) & 0xFF);
++ *ecc_code++ = ((val1 >> 8) & 0xFF);
++ *ecc_code++ = (val1 & 0xFF);
++ }
++ } else {
++ /* read ecc result */
++ val1 = gpmc_read_reg(GPMC_ECC1_RESULT);
++ *ecc_code++ = val1; /* P128e, ..., P1e */
++ *ecc_code++ = val1 >> 16; /* P128o, ..., P1o */
++ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
++ *ecc_code++ = ((val1 >> 8) & 0x0f) | ((val1 >> 20) & 0xf0);
++ }
+ return 0;
+ }
++EXPORT_SYMBOL(gpmc_calculate_ecc);
+diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
+index f4a1020..93fbf39 100644
+--- a/arch/arm/mach-omap2/hsmmc.c
++++ b/arch/arm/mach-omap2/hsmmc.c
+@@ -171,6 +171,17 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
+ }
+ }
+
++static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
++{
++ u32 reg;
++
++ if (mmc->slots[0].internal_clock) {
++ reg = omap_ctrl_readl(control_devconf1_offset);
++ reg |= OMAP2_MMCSDIO2ADPCLKISEL;
++ omap_ctrl_writel(reg, control_devconf1_offset);
++ }
++}
++
+ static void hsmmc23_before_set_reg(struct device *dev, int slot,
+ int power_on, int vdd)
+ {
+@@ -179,16 +190,19 @@ static void hsmmc23_before_set_reg(struct device *dev, int slot,
+ if (mmc->slots[0].remux)
+ mmc->slots[0].remux(dev, slot, power_on);
+
+- if (power_on) {
+- /* Only MMC2 supports a CLKIN */
+- if (mmc->slots[0].internal_clock) {
+- u32 reg;
++ if (power_on)
++ hsmmc2_select_input_clk_src(mmc);
++}
+
+- reg = omap_ctrl_readl(control_devconf1_offset);
+- reg |= OMAP2_MMCSDIO2ADPCLKISEL;
+- omap_ctrl_writel(reg, control_devconf1_offset);
+- }
+- }
++static int am35x_hsmmc2_set_power(struct device *dev, int slot,
++ int power_on, int vdd)
++{
++ struct omap_mmc_platform_data *mmc = dev->platform_data;
++
++ if (power_on)
++ hsmmc2_select_input_clk_src(mmc);
++
++ return 0;
+ }
+
+ static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
+@@ -200,10 +214,12 @@ static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
+ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
+ int controller_nr)
+ {
+- if (gpio_is_valid(mmc_controller->slots[0].switch_pin))
++ if (gpio_is_valid(mmc_controller->slots[0].switch_pin) &&
++ (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
+ OMAP_PIN_INPUT_PULLUP);
+- if (gpio_is_valid(mmc_controller->slots[0].gpio_wp))
++ if (gpio_is_valid(mmc_controller->slots[0].gpio_wp) &&
++ (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
+ OMAP_PIN_INPUT_PULLUP);
+ if (cpu_is_omap34xx()) {
+@@ -288,6 +304,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ return -ENOMEM;
+ }
+
++ if (cpu_is_am33xx())
++ mmc->version = MMC_CTRL_VERSION_2;
++
+ if (c->name)
+ strncpy(hc_name, c->name, HSMMC_NAME_LEN);
+ else
+@@ -296,6 +315,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ mmc->slots[0].name = hc_name;
+ mmc->nr_slots = 1;
+ mmc->slots[0].caps = c->caps;
++ mmc->slots[0].pm_caps = c->pm_caps;
+ mmc->slots[0].internal_clock = !c->ext_clock;
+ mmc->dma_mask = 0xffffffff;
+ if (cpu_is_omap44xx())
+@@ -336,14 +356,21 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ *
+ * temporary HACK: ocr_mask instead of fixed supply
+ */
+- mmc->slots[0].ocr_mask = c->ocr_mask;
+-
+- if (cpu_is_omap3517() || cpu_is_omap3505())
+- mmc->slots[0].set_power = nop_mmc_set_power;
++ if (cpu_is_omap3505() || cpu_is_omap3517())
++ mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
++ MMC_VDD_26_27 |
++ MMC_VDD_27_28 |
++ MMC_VDD_29_30 |
++ MMC_VDD_30_31 |
++ MMC_VDD_31_32;
+ else
++ mmc->slots[0].ocr_mask = c->ocr_mask;
++
++ if (!cpu_is_omap3517() && !cpu_is_omap3505() && !cpu_is_am33xx())
+ mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+
+- if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
++ if ((cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0)) ||
++ cpu_is_am33xx())
+ mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;
+
+ switch (c->mmc) {
+@@ -363,6 +390,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ }
+ }
+
++ if (cpu_is_omap3517() || cpu_is_omap3505() || cpu_is_am33xx())
++ mmc->slots[0].set_power = nop_mmc_set_power;
++
+ /* OMAP3630 HSMMC1 supports only 4-bit */
+ if (cpu_is_omap3630() &&
+ (c->caps & MMC_CAP_8_BIT_DATA)) {
+@@ -372,6 +402,12 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ }
+ break;
+ case 2:
++ if (cpu_is_omap3517() || cpu_is_omap3505())
++ mmc->slots[0].set_power = am35x_hsmmc2_set_power;
++
++ if (cpu_is_am33xx())
++ mmc->slots[0].set_power = nop_mmc_set_power;
++
+ if (c->ext_clock)
+ c->transceiver = 1;
+ if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
+@@ -421,7 +457,9 @@ void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
+ pr_err("%s fails!\n", __func__);
+ goto done;
+ }
+- omap_hsmmc_mux(mmc_data, (ctrl_nr - 1));
++
++ if (!cpu_is_am33xx())
++ omap_hsmmc_mux(mmc_data, (ctrl_nr - 1));
+
+ name = "omap_hsmmc";
+
+diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
+index f757e78..c440973 100644
+--- a/arch/arm/mach-omap2/hsmmc.h
++++ b/arch/arm/mach-omap2/hsmmc.h
+@@ -12,6 +12,7 @@ struct omap2_hsmmc_info {
+ u8 mmc; /* controller 1/2/3 */
+ u32 caps; /* 4/8 wires and any additional host
+ * capabilities OR'd (ref. linux/mmc/host.h) */
++ u32 pm_caps; /* PM capabilities */
+ bool transceiver; /* MMC-2 option */
+ bool ext_clock; /* use external pin for input clock */
+ bool cover_only; /* No card detect - just cover switch */
+diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
+index ace9994..a12e224 100644
+--- a/arch/arm/mach-omap2/i2c.c
++++ b/arch/arm/mach-omap2/i2c.c
+@@ -21,7 +21,7 @@
+
+ #include <plat/cpu.h>
+ #include <plat/i2c.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/omap_hwmod.h>
+
+ #include "mux.h"
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index 7f47092..7b913d3 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -21,7 +21,7 @@
+
+ #include <asm/cputype.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/cpu.h>
+
+ #include <mach/id.h>
+@@ -29,7 +29,7 @@
+ #include "control.h"
+
+ static unsigned int omap_revision;
+-
++static const char *cpu_rev;
+ u32 omap_features;
+
+ unsigned int omap_rev(void)
+@@ -44,6 +44,8 @@ int omap_type(void)
+
+ if (cpu_is_omap24xx()) {
+ val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
++ } else if (cpu_is_am33xx()) {
++ val = omap_ctrl_readl(AM33XX_CONTROL_STATUS_OFF);
+ } else if (cpu_is_omap34xx()) {
+ val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
+ } else if (cpu_is_omap44xx()) {
+@@ -112,7 +114,7 @@ void omap_get_die_id(struct omap_die_id *odi)
+ odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3);
+ }
+
+-static void __init omap24xx_check_revision(void)
++void __init omap2xxx_check_revision(void)
+ {
+ int i, j;
+ u32 idcode, prod_id;
+@@ -166,13 +168,63 @@ static void __init omap24xx_check_revision(void)
+ pr_info("\n");
+ }
+
++#define OMAP3_SHOW_FEATURE(feat) \
++ if (omap3_has_ ##feat()) \
++ printk(#feat" ");
++
++static void __init omap3_cpuinfo(void)
++{
++ const char *cpu_name;
++
++ /*
++ * OMAP3430 and OMAP3530 are assumed to be same.
++ *
++ * OMAP3525, OMAP3515 and OMAP3503 can be detected only based
++ * on available features. Upon detection, update the CPU id
++ * and CPU class bits.
++ */
++ if (cpu_is_omap3630()) {
++ cpu_name = "OMAP3630";
++ } else if (cpu_is_omap3517()) {
++ /* AM35xx devices */
++ cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
++ } else if (cpu_is_ti816x()) {
++ cpu_name = "TI816X";
++ } else if (cpu_is_am335x()) {
++ cpu_name = "AM335X";
++ } else if (cpu_is_ti814x()) {
++ cpu_name = "TI814X";
++ } else if (omap3_has_iva() && omap3_has_sgx()) {
++ /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
++ cpu_name = "OMAP3430/3530";
++ } else if (omap3_has_iva()) {
++ cpu_name = "OMAP3525";
++ } else if (omap3_has_sgx()) {
++ cpu_name = "OMAP3515";
++ } else {
++ cpu_name = "OMAP3503";
++ }
++
++ /* Print verbose information */
++ pr_info("%s ES%s (", cpu_name, cpu_rev);
++
++ OMAP3_SHOW_FEATURE(l2cache);
++ OMAP3_SHOW_FEATURE(iva);
++ OMAP3_SHOW_FEATURE(sgx);
++ OMAP3_SHOW_FEATURE(neon);
++ OMAP3_SHOW_FEATURE(isp);
++ OMAP3_SHOW_FEATURE(192mhz_clk);
++
++ printk(")\n");
++}
++
+ #define OMAP3_CHECK_FEATURE(status,feat) \
+ if (((status & OMAP3_ ##feat## _MASK) \
+ >> OMAP3_ ##feat## _SHIFT) != FEAT_ ##feat## _NONE) { \
+ omap_features |= OMAP3_HAS_ ##feat; \
+ }
+
+-static void __init omap3_check_features(void)
++void __init omap3xxx_check_features(void)
+ {
+ u32 status;
+
+@@ -199,9 +251,11 @@ static void __init omap3_check_features(void)
+ * TODO: Get additional info (where applicable)
+ * e.g. Size of L2 cache.
+ */
++
++ omap3_cpuinfo();
+ }
+
+-static void __init omap4_check_features(void)
++void __init omap4xxx_check_features(void)
+ {
+ u32 si_type;
+
+@@ -226,12 +280,26 @@ static void __init omap4_check_features(void)
+ }
+ }
+
+-static void __init ti816x_check_features(void)
++void __init ti81xx_check_features(void)
+ {
+ omap_features = OMAP3_HAS_NEON;
++ omap3_cpuinfo();
+ }
+
+-static void __init omap3_check_revision(const char **cpu_rev)
++void __init am33xx_check_features(void)
++{
++ u32 status;
++
++ omap_features = OMAP3_HAS_NEON;
++
++ status = omap_ctrl_readl(AM33XX_DEV_FEATURE);
++ if (status & AM33XX_SGX_MASK)
++ omap_features |= OMAP3_HAS_SGX;
++
++ omap3_cpuinfo();
++}
++
++void __init omap3xxx_check_revision(void)
+ {
+ u32 cpuid, idcode;
+ u16 hawkeye;
+@@ -245,7 +313,7 @@ static void __init omap3_check_revision(const char **cpu_rev)
+ cpuid = read_cpuid(CPUID_ID);
+ if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
+ omap_revision = OMAP3430_REV_ES1_0;
+- *cpu_rev = "1.0";
++ cpu_rev = "1.0";
+ return;
+ }
+
+@@ -266,26 +334,26 @@ static void __init omap3_check_revision(const char **cpu_rev)
+ case 0: /* Take care of early samples */
+ case 1:
+ omap_revision = OMAP3430_REV_ES2_0;
+- *cpu_rev = "2.0";
++ cpu_rev = "2.0";
+ break;
+ case 2:
+ omap_revision = OMAP3430_REV_ES2_1;
+- *cpu_rev = "2.1";
++ cpu_rev = "2.1";
+ break;
+ case 3:
+ omap_revision = OMAP3430_REV_ES3_0;
+- *cpu_rev = "3.0";
++ cpu_rev = "3.0";
+ break;
+ case 4:
+ omap_revision = OMAP3430_REV_ES3_1;
+- *cpu_rev = "3.1";
++ cpu_rev = "3.1";
+ break;
+ case 7:
+ /* FALLTHROUGH */
+ default:
+ /* Use the latest known revision as default */
+ omap_revision = OMAP3430_REV_ES3_1_2;
+- *cpu_rev = "3.1.2";
++ cpu_rev = "3.1.2";
+ }
+ break;
+ case 0xb868:
+@@ -298,13 +366,13 @@ static void __init omap3_check_revision(const char **cpu_rev)
+ switch (rev) {
+ case 0:
+ omap_revision = OMAP3517_REV_ES1_0;
+- *cpu_rev = "1.0";
++ cpu_rev = "1.0";
+ break;
+ case 1:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = OMAP3517_REV_ES1_1;
+- *cpu_rev = "1.1";
++ cpu_rev = "1.1";
+ }
+ break;
+ case 0xb891:
+@@ -313,42 +381,66 @@ static void __init omap3_check_revision(const char **cpu_rev)
+ switch(rev) {
+ case 0: /* Take care of early samples */
+ omap_revision = OMAP3630_REV_ES1_0;
+- *cpu_rev = "1.0";
++ cpu_rev = "1.0";
+ break;
+ case 1:
+ omap_revision = OMAP3630_REV_ES1_1;
+- *cpu_rev = "1.1";
++ cpu_rev = "1.1";
+ break;
+ case 2:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = OMAP3630_REV_ES1_2;
+- *cpu_rev = "1.2";
++ cpu_rev = "1.2";
+ }
+ break;
+ case 0xb81e:
+ switch (rev) {
+ case 0:
+ omap_revision = TI8168_REV_ES1_0;
+- *cpu_rev = "1.0";
++ cpu_rev = "1.0";
+ break;
+ case 1:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = TI8168_REV_ES1_1;
+- *cpu_rev = "1.1";
++ cpu_rev = "1.1";
++ break;
++ }
++ break;
++ case 0xb944:
++ omap_revision = AM335X_REV_ES1_0;
++ cpu_rev = "1.0";
++ break;
++ case 0xb8f2:
++ switch (rev) {
++ case 0:
++ /* FALLTHROUGH */
++ case 1:
++ omap_revision = TI8148_REV_ES1_0;
++ cpu_rev = "1.0";
++ break;
++ case 2:
++ omap_revision = TI8148_REV_ES2_0;
++ cpu_rev = "2.0";
++ break;
++ case 3:
++ /* FALLTHROUGH */
++ default:
++ omap_revision = TI8148_REV_ES2_1;
++ cpu_rev = "2.1";
+ break;
+ }
+ break;
+ default:
+ /* Unknown default to latest silicon rev as default */
+ omap_revision = OMAP3630_REV_ES1_2;
+- *cpu_rev = "1.2";
++ cpu_rev = "1.2";
+ pr_warn("Warning: unknown chip type; assuming OMAP3630ES1.2\n");
+ }
+ }
+
+-static void __init omap4_check_revision(void)
++void __init omap4xxx_check_revision(void)
+ {
+ u32 idcode;
+ u16 hawkeye;
+@@ -367,7 +459,7 @@ static void __init omap4_check_revision(void)
+ * Few initial 4430 ES2.0 samples IDCODE is same as ES1.0
+ * Use ARM register to detect the correct ES version
+ */
+- if (!rev && (hawkeye != 0xb94e)) {
++ if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
+ idcode = read_cpuid(CPUID_ID);
+ rev = (idcode & 0xf) - 1;
+ }
+@@ -389,8 +481,11 @@ static void __init omap4_check_revision(void)
+ omap_revision = OMAP4430_REV_ES2_1;
+ break;
+ case 4:
+- default:
+ omap_revision = OMAP4430_REV_ES2_2;
++ break;
++ case 6:
++ default:
++ omap_revision = OMAP4430_REV_ES2_3;
+ }
+ break;
+ case 0xb94e:
+@@ -401,94 +496,23 @@ static void __init omap4_check_revision(void)
+ break;
+ }
+ break;
++ case 0xb975:
++ switch (rev) {
++ case 0:
++ default:
++ omap_revision = OMAP4470_REV_ES1_0;
++ break;
++ }
++ break;
+ default:
+ /* Unknown default to latest silicon rev as default */
+- omap_revision = OMAP4430_REV_ES2_2;
++ omap_revision = OMAP4430_REV_ES2_3;
+ }
+
+ pr_info("OMAP%04x ES%d.%d\n", omap_rev() >> 16,
+ ((omap_rev() >> 12) & 0xf), ((omap_rev() >> 8) & 0xf));
+ }
+
+-#define OMAP3_SHOW_FEATURE(feat) \
+- if (omap3_has_ ##feat()) \
+- printk(#feat" ");
+-
+-static void __init omap3_cpuinfo(const char *cpu_rev)
+-{
+- const char *cpu_name;
+-
+- /*
+- * OMAP3430 and OMAP3530 are assumed to be same.
+- *
+- * OMAP3525, OMAP3515 and OMAP3503 can be detected only based
+- * on available features. Upon detection, update the CPU id
+- * and CPU class bits.
+- */
+- if (cpu_is_omap3630()) {
+- cpu_name = "OMAP3630";
+- } else if (cpu_is_omap3517()) {
+- /* AM35xx devices */
+- cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
+- } else if (cpu_is_ti816x()) {
+- cpu_name = "TI816X";
+- } else if (omap3_has_iva() && omap3_has_sgx()) {
+- /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
+- cpu_name = "OMAP3430/3530";
+- } else if (omap3_has_iva()) {
+- cpu_name = "OMAP3525";
+- } else if (omap3_has_sgx()) {
+- cpu_name = "OMAP3515";
+- } else {
+- cpu_name = "OMAP3503";
+- }
+-
+- /* Print verbose information */
+- pr_info("%s ES%s (", cpu_name, cpu_rev);
+-
+- OMAP3_SHOW_FEATURE(l2cache);
+- OMAP3_SHOW_FEATURE(iva);
+- OMAP3_SHOW_FEATURE(sgx);
+- OMAP3_SHOW_FEATURE(neon);
+- OMAP3_SHOW_FEATURE(isp);
+- OMAP3_SHOW_FEATURE(192mhz_clk);
+-
+- printk(")\n");
+-}
+-
+-/*
+- * Try to detect the exact revision of the omap we're running on
+- */
+-void __init omap2_check_revision(void)
+-{
+- const char *cpu_rev;
+-
+- /*
+- * At this point we have an idea about the processor revision set
+- * earlier with omap2_set_globals_tap().
+- */
+- if (cpu_is_omap24xx()) {
+- omap24xx_check_revision();
+- } else if (cpu_is_omap34xx()) {
+- omap3_check_revision(&cpu_rev);
+-
+- /* TI816X doesn't have feature register */
+- if (!cpu_is_ti816x())
+- omap3_check_features();
+- else
+- ti816x_check_features();
+-
+- omap3_cpuinfo(cpu_rev);
+- return;
+- } else if (cpu_is_omap44xx()) {
+- omap4_check_revision();
+- omap4_check_features();
+- return;
+- } else {
+- pr_err("OMAP revision unknown, please fix!\n");
+- }
+-}
+-
+ /*
+ * Set up things for map_io and processor detection later on. Gets called
+ * pretty much first thing from board init. For multi-omap, this gets
+diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
+new file mode 100644
+index 0000000..4fa72c7
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/barriers.h
+@@ -0,0 +1,31 @@
++/*
++ * OMAP memory barrier header.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ * Richard Woodruff <r-woodruff2@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __MACH_BARRIERS_H
++#define __MACH_BARRIERS_H
++
++extern void omap_bus_sync(void);
++
++#define rmb() dsb()
++#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
++#define mb() wmb()
++
++#endif /* __MACH_BARRIERS_H */
+diff --git a/arch/arm/mach-omap2/include/mach/board-am335xevm.h b/arch/arm/mach-omap2/include/mach/board-am335xevm.h
+new file mode 100644
+index 0000000..a8fe93a
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/board-am335xevm.h
+@@ -0,0 +1,47 @@
++/*
++ * Code for supporting AM335X EVM.
++ *
++ * Copyright (C) {2011} Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _BOARD_AM335X_H
++#define _BOARD_AM335X_H
++
++#define BASEBOARD_I2C_ADDR 0x50
++#define DAUG_BOARD_I2C_ADDR 0x51
++#define LCD_BOARD_I2C_ADDR 0x52
++
++#define LOW_COST_EVM 0
++#define GEN_PURP_EVM 1
++#define IND_AUT_MTR_EVM 2
++#define IP_PHN_EVM 3
++#define BEAGLE_BONE_OLD 4
++#define BEAGLE_BONE_A3 5
++
++/* REVIST : check posibility of PROFILE_(x) syntax usage */
++#define PROFILE_NONE -1 /* Few EVM doesn't have profiles */
++#define PROFILE_0 (0x1 << 0)
++#define PROFILE_1 (0x1 << 1)
++#define PROFILE_2 (0x1 << 2)
++#define PROFILE_3 (0x1 << 3)
++#define PROFILE_4 (0x1 << 4)
++#define PROFILE_5 (0x1 << 5)
++#define PROFILE_6 (0x1 << 6)
++#define PROFILE_7 (0x1 << 7)
++#define PROFILE_ALL 0xFF
++
++void am33xx_evmid_fillup(unsigned int evmid);
++void am33xx_cpsw_macidfillup(char *eeprommacid0, char *eeprommacid1);
++void am33xx_cpsw_init(unsigned int gigen);
++void am33xx_d_can_init(unsigned int instance);
++
++#endif
+diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
+index 13f98e5..b9b10e5 100644
+--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
++++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
+@@ -66,12 +66,16 @@ omap_uart_lsr: .word 0
+ beq 34f @ configure OMAP3UART4
+ cmp \rp, #OMAP4UART4 @ only on 44xx
+ beq 44f @ configure OMAP4UART4
+- cmp \rp, #TI816XUART1 @ ti816x UART offsets different
++ cmp \rp, #TI81XXUART1 @ ti81Xx UART offsets different
+ beq 81f @ configure UART1
+- cmp \rp, #TI816XUART2 @ ti816x UART offsets different
++ cmp \rp, #TI81XXUART2 @ ti81Xx UART offsets different
+ beq 82f @ configure UART2
+- cmp \rp, #TI816XUART3 @ ti816x UART offsets different
++ cmp \rp, #TI81XXUART3 @ ti81Xx UART offsets different
+ beq 83f @ configure UART3
++ cmp \rp, #AM33XXUART1 @ AM33XX UART offsets different
++ beq 84f @ configure UART1
++ cmp \rp, #AM33XXUART4 @ AM33XX UART offsets different
++ beq 85f @ configure UART1
+ cmp \rp, #ZOOM_UART @ only on zoom2/3
+ beq 95f @ configure ZOOM_UART
+
+@@ -94,13 +98,17 @@ omap_uart_lsr: .word 0
+ b 98f
+ 44: mov \rp, #UART_OFFSET(OMAP4_UART4_BASE)
+ b 98f
+-81: mov \rp, #UART_OFFSET(TI816X_UART1_BASE)
++81: mov \rp, #UART_OFFSET(TI81XX_UART1_BASE)
+ b 98f
+-82: mov \rp, #UART_OFFSET(TI816X_UART2_BASE)
++82: mov \rp, #UART_OFFSET(TI81XX_UART2_BASE)
+ b 98f
+-83: mov \rp, #UART_OFFSET(TI816X_UART3_BASE)
++83: mov \rp, #UART_OFFSET(TI81XX_UART3_BASE)
++ b 98f
++84: ldr \rp, =AM33XX_UART1_BASE
++ and \rp, \rp, #0x00ffffff
++ b 97f
++85: ldr \rp, =UART_OFFSET(AM33XX_UART4_BASE)
+ b 98f
+-
+ 95: ldr \rp, =ZOOM_UART_BASE
+ str \rp, [\tmp, #0] @ omap_uart_phys
+ ldr \rp, =ZOOM_UART_VIRT
+@@ -109,6 +117,17 @@ omap_uart_lsr: .word 0
+ str \rp, [\tmp, #8] @ omap_uart_lsr
+ b 10b
+
++ /* AM33XX: Store both phys and virt address for the uart */
++97: add \rp, \rp, #0x44000000 @ phys base
++ str \rp, [\tmp, #0] @ omap_uart_phys
++ sub \rp, \rp, #0x44000000 @ phys base
++ add \rp, \rp, #0xf9000000 @ virt base
++ str \rp, [\tmp, #4] @ omap_uart_virt
++ mov \rp, #(UART_LSR << OMAP_PORT_SHIFT)
++ str \rp, [\tmp, #8] @ omap_uart_lsr
++
++ b 10b
++
+ /* Store both phys and virt address for the uart */
+ 98: add \rp, \rp, #0x48000000 @ phys base
+ str \rp, [\tmp, #0] @ omap_uart_phys
+diff --git a/arch/arm/mach-omap2/include/mach/edma.h b/arch/arm/mach-omap2/include/mach/edma.h
+new file mode 100644
+index 0000000..9f2a7e4
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/edma.h
+@@ -0,0 +1,250 @@
++/*
++ * TI EDMA3 definitions
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++/*
++ * This EDMA3 programming framework exposes two basic kinds of resource:
++ *
++ * Channel Triggers transfers, usually from a hardware event but
++ * also manually or by "chaining" from DMA completions.
++ * Each channel is coupled to a Parameter RAM (PaRAM) slot.
++ *
++ * Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM
++ * "set"), source and destination addresses, a link to a
++ * next PaRAM slot (if any), options for the transfer, and
++ * instructions for updating those addresses. There are
++ * more than twice as many slots as event channels.
++ *
++ * Each PaRAM set describes a sequence of transfers, either for one large
++ * buffer or for several discontiguous smaller buffers. An EDMA transfer
++ * is driven only from a channel, which performs the transfers specified
++ * in its PaRAM slot until there are no more transfers. When that last
++ * transfer completes, the "link" field may be used to reload the channel's
++ * PaRAM slot with a new transfer descriptor.
++ *
++ * The EDMA Channel Controller (CC) maps requests from channels into physical
++ * Transfer Controller (TC) requests when the channel triggers (by hardware
++ * or software events, or by chaining). The two physical DMA channels provided
++ * by the TCs are thus shared by many logical channels.
++ *
++ * EDMA hardware also has a "QDMA" mechanism which is not currently
++ * supported through this interface. (DSP firmware uses it though.)
++ */
++
++#ifndef EDMA_H_
++#define EDMA_H_
++
++/* PaRAM slots are laid out like this */
++struct edmacc_param {
++ unsigned int opt;
++ unsigned int src;
++ unsigned int a_b_cnt;
++ unsigned int dst;
++ unsigned int src_dst_bidx;
++ unsigned int link_bcntrld;
++ unsigned int src_dst_cidx;
++ unsigned int ccnt;
++};
++
++/* fields in edmacc_param.opt */
++#define SAM BIT(0)
++#define DAM BIT(1)
++#define SYNCDIM BIT(2)
++#define STATIC BIT(3)
++#define EDMA_FWID (0x07 << 8)
++#define TCCMODE BIT(11)
++#define EDMA_TCC(t) ((t) << 12)
++#define TCINTEN BIT(20)
++#define ITCINTEN BIT(21)
++#define TCCHEN BIT(22)
++#define ITCCHEN BIT(23)
++
++#define TRWORD (0x7<<2)
++#define PAENTRY (0x1ff<<5)
++
++/*ch_status paramater of callback function possible values*/
++#define DMA_COMPLETE 1
++#define DMA_CC_ERROR 2
++#define DMA_TC0_ERROR 3
++#define DMA_TC1_ERROR 4
++#define DMA_TC2_ERROR 5
++#define DMA_TC3_ERROR 6
++
++enum address_mode {
++ INCR = 0,
++ FIFO = 1
++};
++
++enum fifo_width {
++ W8BIT = 0,
++ W16BIT = 1,
++ W32BIT = 2,
++ W64BIT = 3,
++ W128BIT = 4,
++ W256BIT = 5
++};
++
++enum dma_event_q {
++ EVENTQ_0 = 0,
++ EVENTQ_1 = 1,
++ EVENTQ_2 = 2,
++ EVENTQ_3 = 3,
++ EVENTQ_DEFAULT = -1
++};
++
++enum sync_dimension {
++ ASYNC = 0,
++ ABSYNC = 1
++};
++
++#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
++#define EDMA_CTLR(i) ((i) >> 16)
++#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
++
++#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
++#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
++#define EDMA_CONT_PARAMS_ANY 1001
++#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
++#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
++
++#define EDMA_MAX_DMACH 64
++#define EDMA_MAX_PARAMENTRY 512
++#define EDMA_MAX_CC 2
++#define EDMA_MAX_REGION 4
++
++
++/* Mapping of crossbar event numbers to actual DMA channels*/
++struct event_to_channel_map {
++ unsigned xbar_event_no;
++ int channel_no;
++};
++
++/* actual number of DMA channels and slots on this silicon */
++struct edma {
++ /* how many dma resources of each type */
++ unsigned num_channels;
++ unsigned num_region;
++ unsigned num_slots;
++ unsigned num_tc;
++ unsigned num_cc;
++ enum dma_event_q default_queue;
++
++ /* list of channels with no even trigger; terminated by "-1" */
++ const s8 *noevent;
++
++ /* The edma_inuse bit for each PaRAM slot is clear unless the
++ * channel is in use ... by ARM or DSP, for QDMA, or whatever.
++ */
++ DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
++
++ /* The edma_unused bit for each channel is clear unless
++ * it is not being used on this platform. It uses a bit
++ * of SOC-specific initialization code.
++ */
++ DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
++
++ unsigned irq_res_start;
++ unsigned irq_res_end;
++
++ struct dma_interrupt_data {
++ void (*callback)(unsigned channel, unsigned short ch_status,
++ void *data);
++ void *data;
++ } intr_data[EDMA_MAX_DMACH];
++
++ unsigned is_xbar;
++ unsigned num_events;
++ struct event_to_channel_map *xbar_event_mapping;
++
++ /* suspend/resume backup parameters */
++ struct edmacc_param *bkp_prm_set;
++ unsigned int *bkp_ch_map; /* 64 registers */
++ unsigned int *bkp_que_num; /* 8 registers */
++ unsigned int *bkp_drae;
++ unsigned int *bkp_draeh;
++ unsigned int *bkp_qrae;
++ unsigned int bkp_sh_esr;
++ unsigned int bkp_sh_esrh;
++ unsigned int bkp_sh_eesr;
++ unsigned int bkp_sh_eesrh;
++ unsigned int bkp_sh_iesr;
++ unsigned int bkp_sh_iesrh;
++ unsigned int bkp_que_tc_map;
++ unsigned int bkp_que_pri;
++};
++
++extern struct edma *edma_cc[EDMA_MAX_CC];
++
++/* alloc/free DMA channels and their dedicated parameter RAM slots */
++int edma_alloc_channel(int channel,
++ void (*callback)(unsigned channel, u16 ch_status, void *data),
++ void *data, enum dma_event_q);
++void edma_free_channel(unsigned channel);
++
++/* alloc/free parameter RAM slots */
++int edma_alloc_slot(unsigned ctlr, int slot);
++void edma_free_slot(unsigned slot);
++
++/* alloc/free a set of contiguous parameter RAM slots */
++int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
++int edma_free_cont_slots(unsigned slot, int count);
++
++/* calls that operate on part of a parameter RAM slot */
++void edma_set_src(unsigned slot, dma_addr_t src_port,
++ enum address_mode mode, enum fifo_width);
++void edma_set_dest(unsigned slot, dma_addr_t dest_port,
++ enum address_mode mode, enum fifo_width);
++void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst);
++void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
++void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
++void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
++ u16 bcnt_rld, enum sync_dimension sync_mode);
++void edma_link(unsigned from, unsigned to);
++void edma_unlink(unsigned from);
++
++/* calls that operate on an entire parameter RAM slot */
++void edma_write_slot(unsigned slot, const struct edmacc_param *params);
++void edma_read_slot(unsigned slot, struct edmacc_param *params);
++
++/* channel control operations */
++int edma_start(unsigned channel);
++void edma_stop(unsigned channel);
++void edma_clean_channel(unsigned channel);
++void edma_clear_event(unsigned channel);
++void edma_pause(unsigned channel);
++void edma_resume(unsigned channel);
++
++/* platform_data for EDMA driver */
++struct edma_soc_info {
++
++ /* how many dma resources of each type */
++ unsigned n_channel;
++ unsigned n_region;
++ unsigned n_slot;
++ unsigned n_tc;
++ unsigned n_cc;
++ enum dma_event_q default_queue;
++
++ const s16 (*rsv_chans)[2];
++ const s16 (*rsv_slots)[2];
++ const s8 (*queue_tc_mapping)[2];
++ const s8 (*queue_priority_mapping)[2];
++ unsigned is_xbar;
++ unsigned n_events;
++ struct event_to_channel_map *xbar_event_mapping;
++ int (*map_xbar_channel)(unsigned event, unsigned *channel,
++ struct event_to_channel_map *xbar_event_map);
++};
++
++#endif
+diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
+index feb90a1..56964a0 100644
+--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
++++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
+@@ -10,146 +10,9 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <mach/io.h>
+-#include <mach/irqs.h>
+-#include <asm/hardware/gic.h>
+-
+-#include <plat/omap24xx.h>
+-#include <plat/omap34xx.h>
+-#include <plat/omap44xx.h>
+-
+-#include <plat/multi.h>
+-
+-#define OMAP2_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
+-#define OMAP3_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
+-#define OMAP4_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE)
+-#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* omap2/3 active interrupt offset */
+-#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
+
+ .macro disable_fiq
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+-/*
+- * Unoptimized irq functions for multi-omap2, 3 and 4
+- */
+-
+-#ifdef MULTI_OMAP2
+- /*
+- * Configure the interrupt base on the first interrupt.
+- * See also omap_irq_base_init for setting omap_irq_base.
+- */
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =omap_irq_base @ irq base address
+- ldr \base, [\base, #0] @ irq base value
+- .endm
+-
+- /* Check the pending interrupts. Note that base already set */
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- tst \base, #0x100 @ gic address?
+- bne 4401f @ found gic
+-
+- /* Handle omap2 and omap3 */
+- ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
+- cmp \irqnr, #0x0
+- bne 9998f
+- ldr \irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
+- cmp \irqnr, #0x0
+- bne 9998f
+- ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
+- cmp \irqnr, #0x0
+- bne 9998f
+-
+- /*
+- * ti816x has additional IRQ pending register. Checking this
+- * register on omap2 & omap3 has no effect (read as 0).
+- */
+- ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
+- cmp \irqnr, #0x0
+-9998:
+- ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
+- and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
+- b 9999f
+-
+- /* Handle omap4 */
+-4401: ldr \irqstat, [\base, #GIC_CPU_INTACK]
+- ldr \tmp, =1021
+- bic \irqnr, \irqstat, #0x1c00
+- cmp \irqnr, #15
+- cmpcc \irqnr, \irqnr
+- cmpne \irqnr, \tmp
+- cmpcs \irqnr, \irqnr
+-9999:
+- .endm
+-
+-#ifdef CONFIG_SMP
+- /* We assume that irqstat (the raw value of the IRQ acknowledge
+- * register) is preserved from the macro above.
+- * If there is an IPI, we immediately signal end of interrupt
+- * on the controller, since this requires the original irqstat
+- * value which we won't easily be able to recreate later.
+- */
+-
+- .macro test_for_ipi, irqnr, irqstat, base, tmp
+- bic \irqnr, \irqstat, #0x1c00
+- cmp \irqnr, #16
+- it cc
+- strcc \irqstat, [\base, #GIC_CPU_EOI]
+- it cs
+- cmpcs \irqnr, \irqnr
+- .endm
+-#endif /* CONFIG_SMP */
+-
+-#else /* MULTI_OMAP2 */
+-
+-
+-/*
+- * Optimized irq functions for omap2, 3 and 4
+- */
+-
+-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+- .macro get_irqnr_preamble, base, tmp
+-#ifdef CONFIG_ARCH_OMAP2
+- ldr \base, =OMAP2_IRQ_BASE
+-#else
+- ldr \base, =OMAP3_IRQ_BASE
+-#endif
+- .endm
+-
+- /* Check the pending interrupts. Note that base already set */
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
+- cmp \irqnr, #0x0
+- bne 9999f
+- ldr \irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
+- cmp \irqnr, #0x0
+- bne 9999f
+- ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
+- cmp \irqnr, #0x0
+-#ifdef CONFIG_SOC_OMAPTI816X
+- bne 9999f
+- ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
+- cmp \irqnr, #0x0
+-#endif
+-9999:
+- ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
+- and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
+-
+- .endm
+-#endif
+-
+-
+-#ifdef CONFIG_ARCH_OMAP4
+-#define HAVE_GET_IRQNR_PREAMBLE
+-#include <asm/hardware/entry-macro-gic.S>
+-
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =OMAP4_IRQ_BASE
+- .endm
+-
+-#endif
+-
+-#endif /* MULTI_OMAP2 */
+diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h
+new file mode 100644
+index 0000000..c90a435
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/omap-secure.h
+@@ -0,0 +1,57 @@
++/*
++ * omap-secure.h: OMAP Secure infrastructure header.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef OMAP_ARCH_OMAP_SECURE_H
++#define OMAP_ARCH_OMAP_SECURE_H
++
++/* Monitor error code */
++#define API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR 0xFFFFFFFE
++#define API_HAL_RET_VALUE_SERVICE_UNKNWON 0xFFFFFFFF
++
++/* HAL API error codes */
++#define API_HAL_RET_VALUE_OK 0x00
++#define API_HAL_RET_VALUE_FAIL 0x01
++
++/* Secure HAL API flags */
++#define FLAG_START_CRITICAL 0x4
++#define FLAG_IRQFIQ_MASK 0x3
++#define FLAG_IRQ_ENABLE 0x2
++#define FLAG_FIQ_ENABLE 0x1
++#define NO_FLAG 0x0
++
++/* Maximum Secure memory storage size */
++#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
++
++/* Secure low power HAL API index */
++#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
++#define OMAP4_HAL_SAVEHW_INDEX 0x1b
++#define OMAP4_HAL_SAVEALL_INDEX 0x1c
++#define OMAP4_HAL_SAVEGIC_INDEX 0x1d
++
++/* Secure Monitor mode APIs */
++#define OMAP4_MON_SCU_PWR_INDEX 0x108
++#define OMAP4_MON_L2X0_DBG_CTRL_INDEX 0x100
++#define OMAP4_MON_L2X0_CTRL_INDEX 0x102
++#define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109
++#define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113
++
++/* Secure PPA(Primary Protected Application) APIs */
++#define OMAP4_PPA_L2_POR_INDEX 0x23
++#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
++
++#ifndef __ASSEMBLER__
++
++extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
++ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
++extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
++extern phys_addr_t omap_secure_ram_mempool_base(void);
++
++#endif /* __ASSEMBLER__ */
++#endif /* OMAP_ARCH_OMAP_SECURE_H */
+diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+new file mode 100644
+index 0000000..d79321b
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+@@ -0,0 +1,39 @@
++/*
++ * OMAP WakeupGen header file
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef OMAP_ARCH_WAKEUPGEN_H
++#define OMAP_ARCH_WAKEUPGEN_H
++
++#define OMAP_WKG_CONTROL_0 0x00
++#define OMAP_WKG_ENB_A_0 0x10
++#define OMAP_WKG_ENB_B_0 0x14
++#define OMAP_WKG_ENB_C_0 0x18
++#define OMAP_WKG_ENB_D_0 0x1c
++#define OMAP_WKG_ENB_SECURE_A_0 0x20
++#define OMAP_WKG_ENB_SECURE_B_0 0x24
++#define OMAP_WKG_ENB_SECURE_C_0 0x28
++#define OMAP_WKG_ENB_SECURE_D_0 0x2c
++#define OMAP_WKG_ENB_A_1 0x410
++#define OMAP_WKG_ENB_B_1 0x414
++#define OMAP_WKG_ENB_C_1 0x418
++#define OMAP_WKG_ENB_D_1 0x41c
++#define OMAP_WKG_ENB_SECURE_A_1 0x420
++#define OMAP_WKG_ENB_SECURE_B_1 0x424
++#define OMAP_WKG_ENB_SECURE_C_1 0x428
++#define OMAP_WKG_ENB_SECURE_D_1 0x42c
++#define OMAP_AUX_CORE_BOOT_0 0x800
++#define OMAP_AUX_CORE_BOOT_1 0x804
++#define OMAP_PTMSYNCREQ_MASK 0xc00
++#define OMAP_PTMSYNCREQ_EN 0xc04
++#define OMAP_TIMESTAMPCYCLELO 0xc08
++#define OMAP_TIMESTAMPCYCLEHI 0xc0c
++
++extern int __init omap_wakeupgen_init(void);
++#endif
+diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
+deleted file mode 100644
+index e4bd87619..0000000
+--- a/arch/arm/mach-omap2/include/mach/omap4-common.h
++++ /dev/null
+@@ -1,43 +0,0 @@
+-/*
+- * omap4-common.h: OMAP4 specific common header file
+- *
+- * Copyright (C) 2010 Texas Instruments, Inc.
+- *
+- * Author:
+- * Santosh Shilimkar <santosh.shilimkar@ti.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#ifndef OMAP_ARCH_OMAP4_COMMON_H
+-#define OMAP_ARCH_OMAP4_COMMON_H
+-
+-/*
+- * wfi used in low power code. Directly opcode is used instead
+- * of instruction to avoid mulit-omap build break
+- */
+-#ifdef CONFIG_THUMB2_KERNEL
+-#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+-#else
+-#define do_wfi() \
+- __asm__ __volatile__ (".word 0xe320f003" : : : "memory")
+-#endif
+-
+-#ifdef CONFIG_CACHE_L2X0
+-extern void __iomem *l2cache_base;
+-#endif
+-
+-extern void __iomem *gic_dist_base_addr;
+-
+-extern void __init gic_init_irq(void);
+-extern void omap_smc1(u32 fn, u32 arg);
+-
+-#ifdef CONFIG_SMP
+-/* Needed for secondary core boot */
+-extern void omap_secondary_startup(void);
+-extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
+-extern void omap_auxcoreboot_addr(u32 cpu_addr);
+-extern u32 omap_read_auxcoreboot0(void);
+-#endif
+-#endif
+diff --git a/arch/arm/mach-omap2/include/mach/sram.h b/arch/arm/mach-omap2/include/mach/sram.h
+new file mode 100644
+index 0000000..7869e2f
+--- /dev/null
++++ b/arch/arm/mach-omap2/include/mach/sram.h
+@@ -0,0 +1,14 @@
++/*
++ * arch/arm/mach-omap2/include/mach/sram.h
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ARCH_ARM_SRAM_H
++#define __ARCH_ARM_SRAM_H
++
++#include <plat/sram.h>
++
++#endif
+diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
+deleted file mode 100644
+index 8663199..0000000
+--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/plat-omap/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xf8000000UL
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index 25d20ce..729be3c 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -35,15 +35,17 @@
+ #include "clock3xxx.h"
+ #include "clock44xx.h"
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/omap-pm.h>
+ #include "voltage.h"
+ #include "powerdomain.h"
++#include "prminst44xx.h"
++#include "cminst44xx.h"
+
+ #include "clockdomain.h"
+ #include <plat/omap_hwmod.h>
+ #include <plat/multi.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ /*
+ * The machine specific code may provide the extra mapping besides the
+@@ -176,14 +178,31 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+ };
+ #endif
+
+-#ifdef CONFIG_SOC_OMAPTI816X
+-static struct map_desc omapti816x_io_desc[] __initdata = {
++#ifdef CONFIG_SOC_OMAPTI81XX
++static struct map_desc omapti81xx_io_desc[] __initdata = {
++ {
++ .virtual = L4_34XX_VIRT,
++ .pfn = __phys_to_pfn(L4_34XX_PHYS),
++ .length = L4_34XX_SIZE,
++ .type = MT_DEVICE
++ }
++};
++#endif
++
++#ifdef CONFIG_SOC_OMAPAM33XX
++static struct map_desc omapam33xx_io_desc[] __initdata = {
+ {
+ .virtual = L4_34XX_VIRT,
+ .pfn = __phys_to_pfn(L4_34XX_PHYS),
+ .length = L4_34XX_SIZE,
+ .type = MT_DEVICE
+ },
++ {
++ .virtual = L4_WK_AM33XX_VIRT,
++ .pfn = __phys_to_pfn(L4_WK_AM33XX_PHYS),
++ .length = L4_WK_AM33XX_SIZE,
++ .type = MT_DEVICE
++ }
+ };
+ #endif
+
+@@ -237,6 +256,15 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
+ .length = L4_EMU_44XX_SIZE,
+ .type = MT_DEVICE,
+ },
++#ifdef CONFIG_OMAP4_ERRATA_I688
++ {
++ .virtual = OMAP4_SRAM_VA,
++ .pfn = __phys_to_pfn(OMAP4_SRAM_PA),
++ .length = PAGE_SIZE,
++ .type = MT_MEMORY_SO,
++ },
++#endif
++
+ };
+ #endif
+
+@@ -263,10 +291,17 @@ void __init omap34xx_map_common_io(void)
+ }
+ #endif
+
+-#ifdef CONFIG_SOC_OMAPTI816X
+-void __init omapti816x_map_common_io(void)
++#ifdef CONFIG_SOC_OMAPTI81XX
++void __init omapti81xx_map_common_io(void)
+ {
+- iotable_init(omapti816x_io_desc, ARRAY_SIZE(omapti816x_io_desc));
++ iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
++}
++#endif
++
++#ifdef CONFIG_SOC_OMAPAM33XX
++void __init omapam33xx_map_common_io(void)
++{
++ iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
+ }
+ #endif
+
+@@ -316,13 +351,8 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
+ return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
+ }
+
+-/* See irq.c, omap4-common.c and entry-macro.S */
+-void __iomem *omap_irq_base;
+-
+ static void __init omap_common_init_early(void)
+ {
+- omap2_check_revision();
+- omap_ioremap_init();
+ omap_init_consistent_dma_size();
+ }
+
+@@ -363,6 +393,7 @@ static void __init omap_hwmod_init_postsetup(void)
+ void __init omap2420_init_early(void)
+ {
+ omap2_set_globals_242x();
++ omap2xxx_check_revision();
+ omap_common_init_early();
+ omap2xxx_voltagedomains_init();
+ omap242x_powerdomains_init();
+@@ -375,6 +406,7 @@ void __init omap2420_init_early(void)
+ void __init omap2430_init_early(void)
+ {
+ omap2_set_globals_243x();
++ omap2xxx_check_revision();
+ omap_common_init_early();
+ omap2xxx_voltagedomains_init();
+ omap243x_powerdomains_init();
+@@ -393,6 +425,8 @@ void __init omap2430_init_early(void)
+ void __init omap3_init_early(void)
+ {
+ omap2_set_globals_3xxx();
++ omap3xxx_check_revision();
++ omap3xxx_check_features();
+ omap_common_init_early();
+ omap3xxx_voltagedomains_init();
+ omap3xxx_powerdomains_init();
+@@ -422,9 +456,11 @@ void __init am35xx_init_early(void)
+ omap3_init_early();
+ }
+
+-void __init ti816x_init_early(void)
++void __init ti81xx_init_early(void)
+ {
+- omap2_set_globals_ti816x();
++ omap2_set_globals_ti81xx();
++ omap3xxx_check_revision();
++ ti81xx_check_features();
+ omap_common_init_early();
+ omap3xxx_voltagedomains_init();
+ omap3xxx_powerdomains_init();
+@@ -433,15 +469,35 @@ void __init ti816x_init_early(void)
+ omap_hwmod_init_postsetup();
+ omap3xxx_clk_init();
+ }
++
++void __init am33xx_init_early(void)
++{
++ omap2_set_globals_am33xx();
++ omap3xxx_check_revision();
++ am33xx_check_features();
++ omap_common_init_early();
++ am33xx_voltagedomains_init();
++ omap44xx_prminst_init();
++ am33xx_powerdomains_init();
++ omap44xx_cminst_init();
++ am33xx_clockdomains_init();
++ am33xx_hwmod_init();
++ omap_hwmod_init_postsetup();
++ omap3xxx_clk_init();
++}
+ #endif
+
+ #ifdef CONFIG_ARCH_OMAP4
+ void __init omap4430_init_early(void)
+ {
+ omap2_set_globals_443x();
++ omap4xxx_check_revision();
++ omap4xxx_check_features();
+ omap_common_init_early();
+ omap44xx_voltagedomains_init();
++ omap44xx_prminst_init();
+ omap44xx_powerdomains_init();
++ omap44xx_cminst_init();
+ omap44xx_clockdomains_init();
+ omap44xx_hwmod_init();
+ omap_hwmod_init_postsetup();
+diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
+index 65f1be6..419b1a10 100644
+--- a/arch/arm/mach-omap2/irq.c
++++ b/arch/arm/mach-omap2/irq.c
+@@ -15,6 +15,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <mach/hardware.h>
++#include <asm/exception.h>
+ #include <asm/mach/irq.h>
+
+
+@@ -35,6 +36,11 @@
+ /* Number of IRQ state bits in each MIR register */
+ #define IRQ_BITS_PER_REG 32
+
++#define OMAP2_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
++#define OMAP3_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
++#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* omap2/3 active interrupt offset */
++#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
++
+ /*
+ * OMAP2 has a number of different interrupt controllers, each interrupt
+ * controller is identified as its own "bank". Register definitions are
+@@ -143,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
+
+ static void __init omap_init_irq(u32 base, int nr_irqs)
+ {
++ void __iomem *omap_irq_base;
+ unsigned long nr_of_irqs = 0;
+ unsigned int nr_banks = 0;
+ int i, j;
+@@ -186,11 +193,49 @@ void __init omap3_init_irq(void)
+ omap_init_irq(OMAP34XX_IC_BASE, 96);
+ }
+
+-void __init ti816x_init_irq(void)
++void __init ti81xx_init_irq(void)
+ {
+ omap_init_irq(OMAP34XX_IC_BASE, 128);
+ }
+
++static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
++{
++ u32 irqnr;
++
++ do {
++ irqnr = readl_relaxed(base_addr + 0x98);
++ if (irqnr)
++ goto out;
++
++ irqnr = readl_relaxed(base_addr + 0xb8);
++ if (irqnr)
++ goto out;
++
++ irqnr = readl_relaxed(base_addr + 0xd8);
++#if defined(CONFIG_SOC_OMAPTI816X) || defined(CONFIG_SOC_OMAPAM33XX)
++ if (irqnr)
++ goto out;
++ irqnr = readl_relaxed(base_addr + 0xf8);
++#endif
++
++out:
++ if (!irqnr)
++ break;
++
++ irqnr = readl_relaxed(base_addr + INTCPS_SIR_IRQ_OFFSET);
++ irqnr &= ACTIVEIRQ_MASK;
++
++ if (irqnr)
++ handle_IRQ(irqnr, regs);
++ } while (irqnr);
++}
++
++asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
++{
++ void __iomem *base_addr = OMAP2_IRQ_BASE;
++ omap_intc_handle_irq(base_addr, regs);
++}
++
+ #ifdef CONFIG_ARCH_OMAP3
+ static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
+
+@@ -263,4 +308,10 @@ void omap3_intc_resume_idle(void)
+ /* Re-enable autoidle */
+ intc_bank_write_reg(1, &irq_banks[0], INTC_SYSCONFIG);
+ }
++
++asmlinkage void __exception_irq_entry omap3_intc_handle_irq(struct pt_regs *regs)
++{
++ void __iomem *base_addr = OMAP3_IRQ_BASE;
++ omap_intc_handle_irq(base_addr, regs);
++}
+ #endif /* CONFIG_ARCH_OMAP3 */
+diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
+index 609ea2d..c822126 100644
+--- a/arch/arm/mach-omap2/mailbox.c
++++ b/arch/arm/mach-omap2/mailbox.c
+@@ -20,25 +20,29 @@
+ #include <mach/irqs.h>
+
+ #define MAILBOX_REVISION 0x000
+-#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+-#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+-#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+-#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+-#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
++#define MAILBOX_MESSAGE(m) (0x040 + 0x4 * (m))
++#define MAILBOX_FIFOSTATUS(m) (0x080 + 0x4 * (m))
++#define MAILBOX_MSGSTATUS(m) (0x0c0 + 0x4 * (m))
++#define MAILBOX_IRQSTATUS(u) (0x100 + 0x8 * (u))
++#define MAILBOX_IRQENABLE(u) (0x104 + 0x8 * (u))
+
+-#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 10 * (u))
+-#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 10 * (u))
+-#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 10 * (u))
++#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
++#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
++#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+ #define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+ #define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
++/* TODO: This can and should be based on #users and #sub-modules */
+ #define MBOX_REG_SIZE 0x120
+
+ #define OMAP4_MBOX_REG_SIZE 0x130
+
++#define AM33XX_MBOX_REG_SIZE 0x140
++
+ #define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
+ #define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
++#define AM33XX_MBOX_NR_REGS (AM33XX_MBOX_REG_SIZE / sizeof(u32))
+
+ static void __iomem *mbox_base;
+
+@@ -123,6 +127,20 @@ static int omap2_mbox_fifo_full(struct omap_mbox *mbox)
+ return mbox_read_reg(fifo->fifo_stat);
+ }
+
++static int omap2_mbox_fifo_needs_flush(struct omap_mbox *mbox)
++{
++ struct omap_mbox2_fifo *fifo =
++ &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
++ return (mbox_read_reg(fifo->msg_stat) == 0);
++}
++
++static mbox_msg_t omap2_mbox_fifo_readback(struct omap_mbox *mbox)
++{
++ struct omap_mbox2_fifo *fifo =
++ &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
++ return (mbox_msg_t) mbox_read_reg(fifo->msg);
++}
++
+ /* Mailbox IRQ handle functions */
+ static void omap2_mbox_enable_irq(struct omap_mbox *mbox,
+ omap_mbox_type_t irq)
+@@ -141,7 +159,7 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
+ struct omap_mbox2_priv *p = mbox->priv;
+ u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
+
+- if (!cpu_is_omap44xx())
++ if (!cpu_is_omap44xx() && !cpu_is_am33xx())
+ bit = mbox_read_reg(p->irqdisable) & ~bit;
+
+ mbox_write_reg(bit, p->irqdisable);
+@@ -205,19 +223,21 @@ static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
+ }
+
+ static struct omap_mbox_ops omap2_mbox_ops = {
+- .type = OMAP_MBOX_TYPE2,
+- .startup = omap2_mbox_startup,
+- .shutdown = omap2_mbox_shutdown,
+- .fifo_read = omap2_mbox_fifo_read,
+- .fifo_write = omap2_mbox_fifo_write,
+- .fifo_empty = omap2_mbox_fifo_empty,
+- .fifo_full = omap2_mbox_fifo_full,
+- .enable_irq = omap2_mbox_enable_irq,
+- .disable_irq = omap2_mbox_disable_irq,
+- .ack_irq = omap2_mbox_ack_irq,
+- .is_irq = omap2_mbox_is_irq,
+- .save_ctx = omap2_mbox_save_ctx,
+- .restore_ctx = omap2_mbox_restore_ctx,
++ .type = OMAP_MBOX_TYPE2,
++ .startup = omap2_mbox_startup,
++ .shutdown = omap2_mbox_shutdown,
++ .fifo_read = omap2_mbox_fifo_read,
++ .fifo_write = omap2_mbox_fifo_write,
++ .fifo_empty = omap2_mbox_fifo_empty,
++ .fifo_full = omap2_mbox_fifo_full,
++ .fifo_needs_flush = omap2_mbox_fifo_needs_flush,
++ .fifo_readback = omap2_mbox_fifo_readback,
++ .enable_irq = omap2_mbox_enable_irq,
++ .disable_irq = omap2_mbox_disable_irq,
++ .ack_irq = omap2_mbox_ack_irq,
++ .is_irq = omap2_mbox_is_irq,
++ .save_ctx = omap2_mbox_save_ctx,
++ .restore_ctx = omap2_mbox_restore_ctx,
+ };
+
+ /*
+@@ -229,7 +249,6 @@ static struct omap_mbox_ops omap2_mbox_ops = {
+
+ /* FIXME: the following structs should be filled automatically by the user id */
+
+-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP2)
+ /* DSP */
+ static struct omap_mbox2_priv omap2_mbox_dsp_priv = {
+ .tx_fifo = {
+@@ -252,13 +271,9 @@ struct omap_mbox mbox_dsp_info = {
+ .ops = &omap2_mbox_ops,
+ .priv = &omap2_mbox_dsp_priv,
+ };
+-#endif
+
+-#if defined(CONFIG_ARCH_OMAP3)
+ struct omap_mbox *omap3_mboxes[] = { &mbox_dsp_info, NULL };
+-#endif
+
+-#if defined(CONFIG_SOC_OMAP2420)
+ /* IVA */
+ static struct omap_mbox2_priv omap2_mbox_iva_priv = {
+ .tx_fifo = {
+@@ -283,9 +298,34 @@ static struct omap_mbox mbox_iva_info = {
+ };
+
+ struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
+-#endif
+
+-#if defined(CONFIG_ARCH_OMAP4)
++/* A8 -> Wakeup-M3 */
++static struct omap_mbox2_priv omap2_mbox_m3_priv = {
++ .tx_fifo = {
++ .msg = MAILBOX_MESSAGE(0),
++ .fifo_stat = MAILBOX_FIFOSTATUS(0),
++ .msg_stat = MAILBOX_MSGSTATUS(0),
++ },
++ /* TODO: No M3->A8 so this needs to be removed */
++ .rx_fifo = {
++ .msg = MAILBOX_MESSAGE(1),
++ .msg_stat = MAILBOX_MSGSTATUS(1),
++ },
++ .irqenable = OMAP4_MAILBOX_IRQENABLE(3),
++ .irqstatus = OMAP4_MAILBOX_IRQSTATUS(3),
++ .notfull_bit = MAILBOX_IRQ_NOTFULL(0),
++ .newmsg_bit = MAILBOX_IRQ_NEWMSG(0),
++ .irqdisable = OMAP4_MAILBOX_IRQENABLE_CLR(3),
++};
++
++struct omap_mbox wkup_m3_info = {
++ .name = "wkup_m3",
++ .ops = &omap2_mbox_ops,
++ .priv = &omap2_mbox_m3_priv,
++};
++
++struct omap_mbox *am33xx_mboxes[] = { &wkup_m3_info, NULL };
++
+ /* OMAP4 */
+ static struct omap_mbox2_priv omap2_mbox_1_priv = {
+ .tx_fifo = {
+@@ -332,7 +372,6 @@ struct omap_mbox mbox_2_info = {
+ };
+
+ struct omap_mbox *omap4_mboxes[] = { &mbox_1_info, &mbox_2_info, NULL };
+-#endif
+
+ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
+ {
+@@ -342,14 +381,15 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
+
+ if (false)
+ ;
+-#if defined(CONFIG_ARCH_OMAP3)
+- else if (cpu_is_omap34xx()) {
++ else if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ list = omap3_mboxes;
+
+ list[0]->irq = platform_get_irq(pdev, 0);
++ } else if (cpu_is_am33xx()) {
++ list = am33xx_mboxes;
++
++ list[0]->irq = platform_get_irq(pdev, 0);
+ }
+-#endif
+-#if defined(CONFIG_ARCH_OMAP2)
+ else if (cpu_is_omap2430()) {
+ list = omap2_mboxes;
+
+@@ -360,14 +400,11 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
+ list[0]->irq = platform_get_irq_byname(pdev, "dsp");
+ list[1]->irq = platform_get_irq_byname(pdev, "iva");
+ }
+-#endif
+-#if defined(CONFIG_ARCH_OMAP4)
+ else if (cpu_is_omap44xx()) {
+ list = omap4_mboxes;
+
+ list[0]->irq = list[1]->irq = platform_get_irq(pdev, 0);
+ }
+-#endif
+ else {
+ pr_err("%s: platform not supported\n", __func__);
+ return -ENODEV;
+@@ -412,7 +449,7 @@ static void __exit omap2_mbox_exit(void)
+ platform_driver_unregister(&omap2_mbox_driver);
+ }
+
+-module_init(omap2_mbox_init);
++device_initcall(omap2_mbox_init);
+ module_exit(omap2_mbox_exit);
+
+ MODULE_LICENSE("GPL v2");
+diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
+index 655e948..e1cc75d 100644
+--- a/arch/arm/mach-omap2/mux.c
++++ b/arch/arm/mach-omap2/mux.c
+@@ -32,6 +32,8 @@
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/uaccess.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
+
+ #include <asm/system.h>
+
+@@ -39,6 +41,7 @@
+
+ #include "control.h"
+ #include "mux.h"
++#include "prm.h"
+
+ #define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
+ #define OMAP_MUX_BASE_SZ 0x5ca
+@@ -306,7 +309,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
+ pad->idle = bpad->idle;
+ pad->off = bpad->off;
+
+- if (pad->flags & OMAP_DEVICE_PAD_REMUX)
++ if (pad->flags &
++ (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP))
+ nr_pads_dynamic++;
+
+ pr_debug("%s: Initialized %s\n", __func__, pad->name);
+@@ -331,7 +335,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
+ for (i = 0; i < hmux->nr_pads; i++) {
+ struct omap_device_pad *pad = &hmux->pads[i];
+
+- if (pad->flags & OMAP_DEVICE_PAD_REMUX) {
++ if (pad->flags &
++ (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) {
+ pr_debug("%s: pad %s tagged dynamic\n",
+ __func__, pad->name);
+ hmux->pads_dynamic[nr_pads_dynamic] = pad;
+@@ -351,6 +356,78 @@ err1:
+ return NULL;
+ }
+
++/**
++ * omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads
++ * @hmux: Pads for a hwmod
++ * @mpu_irqs: MPU irq array for a hwmod
++ *
++ * Scans the wakeup status of pads for a single hwmod. If an irq
++ * array is defined for this mux, the parser will call the registered
++ * ISRs for corresponding pads, otherwise the parser will stop at the
++ * first wakeup active pad and return. Returns true if there is a
++ * pending and non-served wakeup event for the mux, otherwise false.
++ */
++static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux,
++ struct omap_hwmod_irq_info *mpu_irqs)
++{
++ int i, irq;
++ unsigned int val;
++ u32 handled_irqs = 0;
++
++ for (i = 0; i < hmux->nr_pads_dynamic; i++) {
++ struct omap_device_pad *pad = hmux->pads_dynamic[i];
++
++ if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) ||
++ !(pad->idle & OMAP_WAKEUP_EN))
++ continue;
++
++ val = omap_mux_read(pad->partition, pad->mux->reg_offset);
++ if (!(val & OMAP_WAKEUP_EVENT))
++ continue;
++
++ if (!hmux->irqs)
++ return true;
++
++ irq = hmux->irqs[i];
++ /* make sure we only handle each irq once */
++ if (handled_irqs & 1 << irq)
++ continue;
++
++ handled_irqs |= 1 << irq;
++
++ generic_handle_irq(mpu_irqs[irq].irq);
++ }
++
++ return false;
++}
++
++/**
++ * _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod
++ *
++ * Checks a single hwmod for every wakeup capable pad to see if there is an
++ * active wakeup event. If this is the case, call the corresponding ISR.
++ */
++static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data)
++{
++ if (!oh->mux || !oh->mux->enabled)
++ return 0;
++ if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs))
++ generic_handle_irq(oh->mpu_irqs[0].irq);
++ return 0;
++}
++
++/**
++ * omap_hwmod_mux_handle_irq - Process pad wakeup irqs.
++ *
++ * Calls a function for each registered omap_hwmod to check
++ * pad wakeup statuses.
++ */
++static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused)
++{
++ omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL);
++ return IRQ_HANDLED;
++}
++
+ /* Assumes the calling function takes care of locking */
+ void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
+ {
+@@ -715,6 +792,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
+ static int __init omap_mux_late_init(void)
+ {
+ struct omap_mux_partition *partition;
++ int ret;
+
+ list_for_each_entry(partition, &mux_partitions, node) {
+ struct omap_mux_entry *e, *tmp;
+@@ -735,6 +813,13 @@ static int __init omap_mux_late_init(void)
+ }
+ }
+
++ ret = request_irq(omap_prcm_event_to_irq("io"),
++ omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND,
++ "hwmod_io", omap_mux_late_init);
++
++ if (ret)
++ pr_warning("mux: Failed to setup hwmod io irq %d\n", ret);
++
+ omap_mux_dbg_init();
+
+ return 0;
+diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
+index 2132308..11671be 100644
+--- a/arch/arm/mach-omap2/mux.h
++++ b/arch/arm/mach-omap2/mux.h
+@@ -11,6 +11,7 @@
+ #include "mux2430.h"
+ #include "mux34xx.h"
+ #include "mux44xx.h"
++#include "mux33xx.h"
+
+ #define OMAP_MUX_TERMINATOR 0xffff
+
+@@ -41,6 +42,28 @@
+ /* 44xx specific mux bit defines */
+ #define OMAP_WAKEUP_EVENT (1 << 15)
+
++/* am33xx specific mux bit defines */
++#define AM33XX_SLEWCTRL_FAST (0 << 6)
++#define AM33XX_SLEWCTRL_SLOW (1 << 6)
++#define AM33XX_INPUT_EN (1 << 5)
++#define AM33XX_PULL_UP (1 << 4)
++/* bit 3: 0 - enable, 1 - disable for pull enable */
++#define AM33XX_PULL_DISA (1 << 3)
++#define AM33XX_PULL_ENBL (0 << 3)
++
++/* Definition of output pin could have pull disabled, but
++ * this has not been done due to two reasons
++ * 1. AM33XX_MUX will take care of it
++ * 2. If pull was disabled for out macro, combining out & in pull on macros
++ * would disable pull resistor and AM33XX_MUX cannot take care of the
++ * correct pull setting and unintentionally pull would get disabled
++ */
++#define AM33XX_PIN_OUTPUT (0)
++#define AM33XX_PIN_OUTPUT_PULLUP (AM33XX_PULL_UP)
++#define AM33XX_PIN_INPUT (AM33XX_INPUT_EN | AM33XX_PULL_DISA)
++#define AM33XX_PIN_INPUT_PULLUP (AM33XX_INPUT_EN | AM33XX_PULL_UP)
++#define AM33XX_PIN_INPUT_PULLDOWN (AM33XX_INPUT_EN)
++
+ /* Active pin states */
+ #define OMAP_PIN_OUTPUT 0
+ #define OMAP_PIN_INPUT OMAP_INPUT_EN
+@@ -331,6 +354,12 @@ int omap4_mux_init(struct omap_board_mux *board_subset,
+ struct omap_board_mux *board_wkup_subset, int flags);
+
+ /**
++ * am33xx_mux_init() - initialize mux system along with board specific set
++ * @board_mux: Board specific mux table
++ */
++int am33xx_mux_init(struct omap_board_mux *board_mux);
++
++/**
+ * omap_mux_init - private mux init function, do not call
+ */
+ int omap_mux_init(const char *name, u32 flags,
+diff --git a/arch/arm/mach-omap2/mux33xx.c b/arch/arm/mach-omap2/mux33xx.c
+new file mode 100644
+index 0000000..25dcedb
+--- /dev/null
++++ b/arch/arm/mach-omap2/mux33xx.c
+@@ -0,0 +1,619 @@
++/*
++ * AM33XX mux data
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * Derived from: arch/arm/mach-omap2/mux34xx.c Original copyright follows:
++ *
++ * Copyright (C) 2009 Nokia
++ * Copyright (C) 2009 Texas Instruments
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include "mux.h"
++
++#ifdef CONFIG_OMAP_MUX
++
++#define _AM33XX_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7) \
++{ \
++ .reg_offset = (AM33XX_CONTROL_PADCONF_##M0##_OFFSET), \
++ .gpio = (g), \
++ .muxnames = { m0, m1, m2, m3, m4, m5, m6, m7 }, \
++}
++
++/* AM33XX pin mux super set */
++static struct omap_mux __initdata am33xx_muxmodes[] = {
++ _AM33XX_MUXENTRY(GPMC_AD0, 0,
++ "gpmc_ad0", "mmc1_dat0", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_0"),
++ _AM33XX_MUXENTRY(GPMC_AD1, 0,
++ "gpmc_ad1", "mmc1_dat1", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_1"),
++ _AM33XX_MUXENTRY(GPMC_AD2, 0,
++ "gpmc_ad2", "mmc1_dat2", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_2"),
++ _AM33XX_MUXENTRY(GPMC_AD3, 0,
++ "gpmc_ad3", "mmc1_dat3", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_3"),
++ _AM33XX_MUXENTRY(GPMC_AD4, 0,
++ "gpmc_ad4", "mmc1_dat4", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_4"),
++ _AM33XX_MUXENTRY(GPMC_AD5, 0,
++ "gpmc_ad5", "mmc1_dat5", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_5"),
++ _AM33XX_MUXENTRY(GPMC_AD6, 0,
++ "gpmc_ad6", "mmc1_dat6", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_6"),
++ _AM33XX_MUXENTRY(GPMC_AD7, 0,
++ "gpmc_ad7", "mmc1_dat7", NULL, NULL,
++ NULL, NULL, NULL, "gpio1_7"),
++ _AM33XX_MUXENTRY(GPMC_AD8, 0,
++ "gpmc_ad8", "lcd_data16", "mmc1_dat0", "mmc2_dat4",
++ NULL, NULL, NULL, "gpio0_22"),
++ _AM33XX_MUXENTRY(GPMC_AD9, 0,
++ "gpmc_ad9", "lcd_data17", "mmc1_dat1", "mmc2_dat5",
++ "ehrpwm2B", NULL, NULL, "gpio0_23"),
++ _AM33XX_MUXENTRY(GPMC_AD10, 0,
++ "gpmc_ad10", "lcd_data18", "mmc1_dat2", "mmc2_dat6",
++ NULL, NULL, NULL, "gpio0_26"),
++ _AM33XX_MUXENTRY(GPMC_AD11, 0,
++ "gpmc_ad11", "lcd_data19", "mmc1_dat3", "mmc2_dat7",
++ NULL, NULL, NULL, "gpio0_27"),
++ _AM33XX_MUXENTRY(GPMC_AD12, 0,
++ "gpmc_ad12", "lcd_data20", "mmc1_dat4", "mmc2_dat0",
++ NULL, NULL, NULL, "gpio1_12"),
++ _AM33XX_MUXENTRY(GPMC_AD13, 0,
++ "gpmc_ad13", "lcd_data21", "mmc1_dat5", "mmc2_dat1",
++ NULL, NULL, NULL, "gpio1_13"),
++ _AM33XX_MUXENTRY(GPMC_AD14, 0,
++ "gpmc_ad14", "lcd_data22", "mmc1_dat6", "mmc2_dat2",
++ NULL, NULL, NULL, "gpio1_14"),
++ _AM33XX_MUXENTRY(GPMC_AD15, 0,
++ "gpmc_ad15", "lcd_data23", "mmc1_dat7", "mmc2_dat3",
++ NULL, NULL, NULL, "gpio1_15"),
++ _AM33XX_MUXENTRY(GPMC_A0, 0,
++ "gpmc_a0", "mii2_txen", "rgmii2_tctl", "rmii2_txen",
++ NULL, NULL, NULL, "gpio1_16"),
++ _AM33XX_MUXENTRY(GPMC_A1, 0,
++ "gpmc_a1", "mii2_rxdv", "rgmii2_rctl", "mmc2_dat0",
++ NULL, NULL, NULL, "gpio1_17"),
++ _AM33XX_MUXENTRY(GPMC_A2, 0,
++ "gpmc_a2", "mii2_txd3", "rgmii2_td3", "mmc2_dat1",
++ NULL, NULL, "ehrpwm1A", "gpio1_18"),
++ _AM33XX_MUXENTRY(GPMC_A3, 0,
++ "gpmc_a3", "mii2_txd2", "rgmii2_td2", "mmc2_dat2",
++ NULL, NULL, NULL, "gpio1_19"),
++ _AM33XX_MUXENTRY(GPMC_A4, 0,
++ "gpmc_a4", "mii2_txd1", "rgmii2_td1", "rmii2_txd1",
++ "gpmc_a20", NULL, NULL, "gpio1_20"),
++ _AM33XX_MUXENTRY(GPMC_A5, 0,
++ "gpmc_a5", "mii2_txd0", "rgmii2_td0", "rmii2_txd0",
++ "gpmc_a21", NULL, NULL, "gpio1_21"),
++ _AM33XX_MUXENTRY(GPMC_A6, 0,
++ "gpmc_a6", "mii2_txclk", "rgmii2_tclk", "mmc2_dat4",
++ "gpmc_a22", NULL, NULL, "gpio1_22"),
++ _AM33XX_MUXENTRY(GPMC_A7, 0,
++ "gpmc_a7", "mii2_rxclk", "rgmii2_rclk", "mmc2_dat5",
++ NULL, NULL, NULL, "gpio1_23"),
++ _AM33XX_MUXENTRY(GPMC_A8, 0,
++ "gpmc_a8", "mii2_rxd3", "rgmii2_rd3", "mmc2_dat6",
++ NULL, NULL, "mcasp0_aclkx", "gpio1_24"),
++ _AM33XX_MUXENTRY(GPMC_A9, 0,
++ "gpmc_a9", "mii2_rxd2", "rgmii2_rd2", "mmc2_dat7",
++ NULL, NULL, "mcasp0_fsx", "gpio1_25"),
++ _AM33XX_MUXENTRY(GPMC_A10, 0,
++ "gpmc_a10", "mii2_rxd1", "rgmii2_rd1", "rmii2_rxd1",
++ NULL, NULL, "mcasp0_axr0", "gpio1_26"),
++ _AM33XX_MUXENTRY(GPMC_A11, 0,
++ "gpmc_a11", "mii2_rxd0", "rgmii2_rd0", "rmii2_rxd0",
++ NULL, NULL, "mcasp0_axr1", "gpio1_27"),
++ _AM33XX_MUXENTRY(GPMC_WAIT0, 0,
++ "gpmc_wait0", "mii2_crs", NULL, "rmii2_crs_dv",
++ "mmc1_sdcd", NULL, NULL, "gpio0_30"),
++ _AM33XX_MUXENTRY(GPMC_WPN, 0,
++ "gpmc_wpn", "mii2_rxerr", NULL, "rmii2_rxerr",
++ "mmc2_sdcd", NULL, NULL, "gpio0_31"),
++ _AM33XX_MUXENTRY(GPMC_BEN1, 0,
++ "gpmc_ben1", "mii2_col", NULL, "mmc2_dat3",
++ NULL, NULL, "mcasp0_aclkr", "gpio1_28"),
++ _AM33XX_MUXENTRY(GPMC_CSN0, 0,
++ "gpmc_csn0", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio1_29"),
++ _AM33XX_MUXENTRY(GPMC_CSN1, 0,
++ "gpmc_csn1", NULL, "mmc1_clk", NULL,
++ NULL, NULL, NULL, "gpio1_30"),
++ _AM33XX_MUXENTRY(GPMC_CSN2, 0,
++ "gpmc_csn2", NULL, "mmc1_cmd", NULL,
++ NULL, NULL, NULL, "gpio1_31"),
++ _AM33XX_MUXENTRY(GPMC_CSN3, 0,
++ "gpmc_csn3", NULL, NULL, "mmc2_cmd",
++ NULL, NULL, NULL, "gpio2_0"),
++ _AM33XX_MUXENTRY(GPMC_CLK, 0,
++ "gpmc_clk", "lcd_memory_clk_mux", NULL, "mmc2_clk",
++ NULL, NULL, "mcasp0_fsr", "gpio2_1"),
++ _AM33XX_MUXENTRY(GPMC_ADVN_ALE, 0,
++ "gpmc_advn_ale", NULL, NULL, NULL,
++ NULL, NULL, NULL, "mmc1_sdcd"),
++ _AM33XX_MUXENTRY(GPMC_OEN_REN, 0,
++ "gpmc_oen_ren", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_3"),
++ _AM33XX_MUXENTRY(GPMC_WEN, 0,
++ "gpmc_wen", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_4"),
++ _AM33XX_MUXENTRY(GPMC_BEN0_CLE, 0,
++ "gpmc_ben0_cle", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_5"),
++ _AM33XX_MUXENTRY(LCD_DATA0, 0,
++ "lcd_data0", "gpmc_a0", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_6"),
++ _AM33XX_MUXENTRY(LCD_DATA1, 0,
++ "lcd_data1", "gpmc_a1", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_7"),
++ _AM33XX_MUXENTRY(LCD_DATA2, 0,
++ "lcd_data2", "gpmc_a2", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_8"),
++ _AM33XX_MUXENTRY(LCD_DATA3, 0,
++ "lcd_data3", "gpmc_a3", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_9"),
++ _AM33XX_MUXENTRY(LCD_DATA4, 0,
++ "lcd_data4", "gpmc_a4", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_10"),
++ _AM33XX_MUXENTRY(LCD_DATA5, 0,
++ "lcd_data5", "gpmc_a5", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_11"),
++ _AM33XX_MUXENTRY(LCD_DATA6, 0,
++ "lcd_data6", "gpmc_a6", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_12"),
++ _AM33XX_MUXENTRY(LCD_DATA7, 0,
++ "lcd_data7", "gpmc_a7", NULL, NULL,
++ NULL, NULL, NULL, "gpio2_13"),
++ _AM33XX_MUXENTRY(LCD_DATA8, 0,
++ "lcd_data8", "gpmc_a12", NULL, "mcasp0_aclkx",
++ NULL, NULL, "uart2_ctsn", "gpio2_14"),
++ _AM33XX_MUXENTRY(LCD_DATA9, 0,
++ "lcd_data9", "gpmc_a13", NULL, "mcasp0_fsx",
++ NULL, NULL, "uart2_rtsn", "gpio2_15"),
++ _AM33XX_MUXENTRY(LCD_DATA10, 0,
++ "lcd_data10", "gpmc_a14", NULL, "mcasp0_axr0",
++ NULL, NULL, NULL, "gpio2_16"),
++ _AM33XX_MUXENTRY(LCD_DATA11, 0,
++ "lcd_data11", "gpmc_a15", NULL, "mcasp0_ahclkr",
++ "mcasp0_axr2", NULL, NULL, "gpio2_17"),
++ _AM33XX_MUXENTRY(LCD_DATA12, 0,
++ "lcd_data12", "gpmc_a16", NULL, "mcasp0_aclkr",
++ "mcasp0_axr2", NULL, NULL, "gpio0_8"),
++ _AM33XX_MUXENTRY(LCD_DATA13, 0,
++ "lcd_data13", "gpmc_a17", NULL, "mcasp0_fsr",
++ "mcasp0_axr3", NULL, NULL, "gpio0_9"),
++ _AM33XX_MUXENTRY(LCD_DATA14, 0,
++ "lcd_data14", "gpmc_a18", NULL, "mcasp0_axr1",
++ NULL, NULL, NULL, "gpio0_10"),
++ _AM33XX_MUXENTRY(LCD_DATA15, 0,
++ "lcd_data15", "gpmc_a19", NULL, "mcasp0_ahclkx",
++ "mcasp0_axr3", NULL, NULL, "gpio0_11"),
++ _AM33XX_MUXENTRY(LCD_VSYNC, 0,
++ "lcd_vsync", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_22"),
++ _AM33XX_MUXENTRY(LCD_HSYNC, 0,
++ "lcd_hsync", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_23"),
++ _AM33XX_MUXENTRY(LCD_PCLK, 0,
++ "lcd_pclk", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_24"),
++ _AM33XX_MUXENTRY(LCD_AC_BIAS_EN, 0,
++ "lcd_ac_bias_en", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_25"),
++ _AM33XX_MUXENTRY(MMC0_DAT3, 0,
++ "mmc0_dat3", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_26"),
++ _AM33XX_MUXENTRY(MMC0_DAT2, 0,
++ "mmc0_dat2", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_27"),
++ _AM33XX_MUXENTRY(MMC0_DAT1, 0,
++ "mmc0_dat1", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_28"),
++ _AM33XX_MUXENTRY(MMC0_DAT0, 0,
++ "mmc0_dat0", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_29"),
++ _AM33XX_MUXENTRY(MMC0_CLK, 0,
++ "mmc0_clk", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_30"),
++ _AM33XX_MUXENTRY(MMC0_CMD, 0,
++ "mmc0_cmd", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio2_31"),
++ _AM33XX_MUXENTRY(MII1_COL, 0,
++ "mii1_col", "rmii2_refclk", "spi1_sclk", NULL,
++ "mcasp1_axr2", "mmc2_dat3", "mcasp0_axr2", "gpio3_0"),
++ _AM33XX_MUXENTRY(MII1_CRS, 0,
++ "mii1_crs", "rmii1_crs_dv", "spi1_d0", "i2c1_sda",
++ "mcasp1_aclkx", NULL, NULL, "gpio3_1"),
++ _AM33XX_MUXENTRY(MII1_RXERR, 0,
++ "mii1_rxerr", "rmii1_rxerr", "spi1_d1", "i2c1_scl",
++ "mcasp1_fsx", NULL, NULL, "gpio3_2"),
++ _AM33XX_MUXENTRY(MII1_TXEN, 0,
++ "mii1_txen", "rmii1_txen", "rgmii1_tctl", NULL,
++ "mcasp1_axr0", NULL, "mmc2_cmd", "gpio3_3"),
++ _AM33XX_MUXENTRY(MII1_RXDV, 0,
++ "mii1_rxdv", NULL, "rgmii1_rctl", NULL,
++ "mcasp1_aclx", "mmc2_dat0", "mcasp0_aclkr", "gpio3_4"),
++ _AM33XX_MUXENTRY(MII1_TXD3, 0,
++ "mii1_txd3", NULL, "rgmii1_td3", NULL,
++ "mcasp1_fsx", "mmc2_dat1", "mcasp0_fsr", "gpio0_16"),
++ _AM33XX_MUXENTRY(MII1_TXD2, 0,
++ "mii1_txd2", NULL, "rgmii1_td2", NULL,
++ "mcasp1_axr0", "mmc2_dat2", "mcasp0_ahclkx", "gpio0_17"),
++ _AM33XX_MUXENTRY(MII1_TXD1, 0,
++ "mii1_txd1", "rmii1_txd1", "rgmii1_td1", "mcasp1_fsr",
++ "mcasp1_axr1", NULL, "mmc1_cmd", "gpio0_21"),
++ _AM33XX_MUXENTRY(MII1_TXD0, 0,
++ "mii1_txd0", "rmii1_txd0", "rgmii1_td0", "mcasp1_axr2",
++ "mcasp1_aclkr", NULL, "mmc1_clk", "gpio0_28"),
++ _AM33XX_MUXENTRY(MII1_TXCLK, 0,
++ "mii1_txclk", NULL, "rgmii1_tclk", "mmc0_dat7",
++ "mmc1_dat0", NULL, "mcasp0_aclkx", "gpio3_9"),
++ _AM33XX_MUXENTRY(MII1_RXCLK, 0,
++ "mii1_rxclk", NULL, "rgmii1_rclk", "mmc0_dat6",
++ "mmc1_dat1", NULL, "mcasp0_fsx", "gpio3_10"),
++ _AM33XX_MUXENTRY(MII1_RXD3, 0,
++ "mii1_rxd3", NULL, "rgmii1_rd3", "mmc0_dat5",
++ "mmc1_dat2", NULL, "mcasp0_axr0", "gpio2_18"),
++ _AM33XX_MUXENTRY(MII1_RXD2, 0,
++ "mii1_rxd2", NULL, "rgmii1_rd2", "mmc0_dat4",
++ "mmc1_dat3", NULL, "mcasp0_axr1", "gpio2_19"),
++ _AM33XX_MUXENTRY(MII1_RXD1, 0,
++ "mii1_rxd1", "rmii1_rxd1", "rgmii1_rd1", "mcasp1_axr3",
++ "mcasp1_fsr", NULL, "mmc2_clk", "gpio2_20"),
++ _AM33XX_MUXENTRY(MII1_RXD0, 0,
++ "mii1_rxd0", "rmii1_rxd0", "rgmii1_rd0", "mcasp1_ahclkx",
++ "mcasp1_ahclkr", "mcasp1_aclkr", "mcasp0_axr3", "gpio2_21"),
++ _AM33XX_MUXENTRY(MII1_REFCLK, 0,
++ "rmii1_refclk", NULL, "spi1_cs0", NULL,
++ "mcasp1_axr3", "mmc0_pow", "mcasp1_ahclkx", "gpio0_29"),
++ _AM33XX_MUXENTRY(MDIO_DATA, 0,
++ "mdio_data", NULL, NULL, NULL,
++ "mmc0_sdcd", "mmc1_cmd", "mmc2_cmd", "gpio0_0"),
++ _AM33XX_MUXENTRY(MDIO_CLK, 0,
++ "mdio_clk", NULL, NULL, NULL,
++ "mmc0_sdwp", "mmc1_clk", "mmc2_clk", "gpio0_1"),
++ _AM33XX_MUXENTRY(SPI0_SCLK, 0,
++ "spi0_sclk", "uart2_rxd", "i2c2_sda", NULL,
++ NULL, NULL, NULL, "gpio0_2"),
++ _AM33XX_MUXENTRY(SPI0_D0, 0,
++ "spi0_d0", "uart2_txd", "i2c2_scl", NULL,
++ NULL, NULL, NULL, "gpio0_3"),
++ _AM33XX_MUXENTRY(SPI0_D1, 0,
++ "spi0_d1", "mmc1_sdwp", "i2c1_sda", NULL,
++ NULL, NULL, NULL, "gpio0_4"),
++ _AM33XX_MUXENTRY(SPI0_CS0, 0,
++ "spi0_cs0", "mmc2_sdwp", "i2c1_scl", NULL,
++ NULL, NULL, NULL, "gpio0_5"),
++ _AM33XX_MUXENTRY(SPI0_CS1, 0,
++ "spi0_cs1", "uart3_rxd", NULL, "mmc0_pow",
++ NULL, "mmc0_sdcd", NULL, "gpio0_6"),
++ _AM33XX_MUXENTRY(ECAP0_IN_PWM0_OUT, 0,
++ "ecap0_in_pwm0_out", "uart3_txd", "spi1_cs1", NULL,
++ "spi1_sclk", "mmc0_sdwp", NULL, "gpio0_7"),
++ _AM33XX_MUXENTRY(UART0_CTSN, 0,
++ "uart0_ctsn", NULL, "d_can1_tx", "i2c1_sda",
++ "spi1_d0", NULL, NULL, "gpio1_8"),
++ _AM33XX_MUXENTRY(UART0_RTSN, 0,
++ "uart0_rtsn", NULL, "d_can1_rx", "i2c1_scl",
++ "spi1_d1", "spi1_cs0", NULL, "gpio1_9"),
++ _AM33XX_MUXENTRY(UART0_RXD, 0,
++ "uart0_rxd", "spi1_cs0", "d_can0_tx", "i2c2_sda",
++ NULL, NULL, NULL, "gpio1_10"),
++ _AM33XX_MUXENTRY(UART0_TXD, 0,
++ "uart0_txd", "spi1_cs1", "d_can0_rx", "i2c2_scl",
++ NULL, NULL, NULL, "gpio1_11"),
++ _AM33XX_MUXENTRY(UART1_CTSN, 0,
++ "uart1_ctsn", NULL, "d_can0_tx", "i2c2_sda",
++ "spi1_cs0", NULL, NULL, "gpio0_12"),
++ _AM33XX_MUXENTRY(UART1_RTSN, 0,
++ "uart1_rtsn", NULL, "d_can0_rx", "i2c2_scl",
++ "spi1_cs1", NULL, NULL, "gpio0_13"),
++ _AM33XX_MUXENTRY(UART1_RXD, 0,
++ "uart1_rxd", "mmc1_sdwp", "d_can1_tx", "i2c1_sda",
++ NULL, "pr1_uart0_rxd_mux1", NULL, "gpio0_14"),
++ _AM33XX_MUXENTRY(UART1_TXD, 0,
++ "uart1_txd", "mmc2_sdwp", "d_can1_rx", "i2c1_scl",
++ NULL, "pr1_uart0_txd_mux1", NULL, "gpio0_15"),
++ _AM33XX_MUXENTRY(I2C0_SDA, 0,
++ "i2c0_sda", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio3_5"),
++ _AM33XX_MUXENTRY(I2C0_SCL, 0,
++ "i2c0_scl", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio3_6"),
++ _AM33XX_MUXENTRY(MCASP0_ACLKX, 0,
++ "mcasp0_aclkx", NULL, NULL, "spi1_sclk",
++ "mmc0_sdcd", NULL, NULL, "gpio3_14"),
++ _AM33XX_MUXENTRY(MCASP0_FSX, 0,
++ "mcasp0_fsx", NULL, NULL, "spi1_d0",
++ "mmc1_sdcd", NULL, NULL, "gpio3_15"),
++ _AM33XX_MUXENTRY(MCASP0_AXR0, 0,
++ "mcasp0_axr0", NULL, NULL, "spi1_d1",
++ "mmc2_sdcd", NULL, NULL, "gpio3_16"),
++ _AM33XX_MUXENTRY(MCASP0_AHCLKR, 0,
++ "mcasp0_ahclkr", NULL, "mcasp0_axr2", "spi1_cs0",
++ NULL, NULL, NULL, "gpio3_17"),
++ _AM33XX_MUXENTRY(MCASP0_ACLKR, 0,
++ "mcasp0_aclkr", NULL, "mcasp0_axr2", "mcasp1_aclkx",
++ "mmc0_sdwp", NULL, NULL, "gpio3_18"),
++ _AM33XX_MUXENTRY(MCASP0_FSR, 0,
++ "mcasp0_fsr", NULL, "mcasp0_axr3", "mcasp1_fsx",
++ NULL, "pr1_pru0_pru_r30_5", NULL, "gpio3_19"),
++ _AM33XX_MUXENTRY(MCASP0_AXR1, 0,
++ "mcasp0_axr1", NULL, NULL, "mcasp1_axr0",
++ NULL, NULL, NULL, "gpio3_20"),
++ _AM33XX_MUXENTRY(MCASP0_AHCLKX, 0,
++ "mcasp0_ahclkx", NULL, "mcasp0_axr3", "mcasp1_axr1",
++ NULL, NULL, NULL, "gpio3_21"),
++ _AM33XX_MUXENTRY(XDMA_EVENT_INTR0, 0,
++ "xdma_event_intr0", NULL, NULL, NULL,
++ "spi1_cs1", NULL, NULL, "gpio0_19"),
++ _AM33XX_MUXENTRY(XDMA_EVENT_INTR1, 0,
++ "xdma_event_intr1", NULL, NULL, "clkout2",
++ NULL, NULL, NULL, "gpio0_20"),
++ _AM33XX_MUXENTRY(WARMRSTN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(PWRONRSTN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(NMIN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(XTALIN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(XTALOUT, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(TMS, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(TDI, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(TDO, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(TCK, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(TRSTN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(EMU0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio3_7"),
++ _AM33XX_MUXENTRY(EMU1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio3_8"),
++ _AM33XX_MUXENTRY(RTC_XTALIN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(RTC_XTALOUT, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(RTC_PWRONRSTN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(PMIC_POWER_EN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(EXT_WAKEUP, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(USB0_DRVVBUS, 0,
++ "usb0_drvvbus", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio0_18"),
++ _AM33XX_MUXENTRY(USB1_DRVVBUS, 0,
++ "usb1_drvvbus", NULL, NULL, NULL,
++ NULL, NULL, NULL, "gpio3_13"),
++ _AM33XX_MUXENTRY(DDR_RESETN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_CSN0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_CKE, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_CK, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_CKN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_CASN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_RASN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_WEN, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_BA0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_BA1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_BA2, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A2, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A3, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A4, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A5, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A6, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A7, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A8, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A9, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A10, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A11, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A12, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A13, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A14, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_A15, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_ODT, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D2, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D3, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D4, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D5, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D6, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D7, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D8, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D9, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D10, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D11, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D12, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D13, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D14, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_D15, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQM0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQM1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQS0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQSN0, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQS1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_DQSN1, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_VREF, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(DDR_VTP, 0,
++ NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN0, 0,
++ "ain0", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN1, 0,
++ "ain1", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN2, 0,
++ "ain2", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN3, 0,
++ "ain3", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN4, 0,
++ "ain4", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN5, 0,
++ "ain5", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN6, 0,
++ "ain6", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(AIN7, 0,
++ "ain7", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(VREFP, 0,
++ "vrefp", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ _AM33XX_MUXENTRY(VREFN, 0,
++ "vrefn", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL),
++ { .reg_offset = OMAP_MUX_TERMINATOR },
++};
++
++int __init am33xx_mux_init(struct omap_board_mux *board_subset)
++{
++ return omap_mux_init("core", 0, AM33XX_CONTROL_PADCONF_MUX_PBASE,
++ AM33XX_CONTROL_PADCONF_MUX_SIZE, am33xx_muxmodes,
++ NULL, board_subset, NULL);
++}
++#else
++int __init am33xx_mux_init(struct omap_board_mux *board_subset)
++{
++ return 0;
++}
++#endif
+diff --git a/arch/arm/mach-omap2/mux33xx.h b/arch/arm/mach-omap2/mux33xx.h
+new file mode 100644
+index 0000000..348c8e5
+--- /dev/null
++++ b/arch/arm/mach-omap2/mux33xx.h
+@@ -0,0 +1,245 @@
++/*
++ * AM33XX pad control register macros.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_MUX335X_H
++#define __ARCH_ARM_MACH_OMAP2_MUX335X_H
++
++#define AM33XX_CONTROL_PADCONF_MUX_PBASE 0x44E10000LU
++
++/* If pin is not defined as input, pull would get disabled.
++ * If defined as input, flags supplied will determine pull on/off.
++ */
++#define AM33XX_MUX(mode0, mux_value) \
++{ \
++ .reg_offset = (AM33XX_CONTROL_PADCONF_##mode0##_OFFSET), \
++ .value = (((mux_value) & AM33XX_INPUT_EN) ? (mux_value)\
++ : ((mux_value) | AM33XX_PULL_DISA)), \
++}
++
++/*
++ * AM33XX CONTROL_PADCONF* register offsets for pin-muxing
++ *
++ * Add AM33XX_CONTROL_PADCONF_MUX_PBASE to these values to get the
++ * absolute addresses. The macro names below are mode-0 names of
++ * corresponding pins.
++ */
++
++#define AM33XX_CONTROL_PADCONF_GPMC_AD0_OFFSET 0x0800
++#define AM33XX_CONTROL_PADCONF_GPMC_AD1_OFFSET 0x0804
++#define AM33XX_CONTROL_PADCONF_GPMC_AD2_OFFSET 0x0808
++#define AM33XX_CONTROL_PADCONF_GPMC_AD3_OFFSET 0x080C
++#define AM33XX_CONTROL_PADCONF_GPMC_AD4_OFFSET 0x0810
++#define AM33XX_CONTROL_PADCONF_GPMC_AD5_OFFSET 0x0814
++#define AM33XX_CONTROL_PADCONF_GPMC_AD6_OFFSET 0x0818
++#define AM33XX_CONTROL_PADCONF_GPMC_AD7_OFFSET 0x081C
++#define AM33XX_CONTROL_PADCONF_GPMC_AD8_OFFSET 0x0820
++#define AM33XX_CONTROL_PADCONF_GPMC_AD9_OFFSET 0x0824
++#define AM33XX_CONTROL_PADCONF_GPMC_AD10_OFFSET 0x0828
++#define AM33XX_CONTROL_PADCONF_GPMC_AD11_OFFSET 0x082C
++#define AM33XX_CONTROL_PADCONF_GPMC_AD12_OFFSET 0x0830
++#define AM33XX_CONTROL_PADCONF_GPMC_AD13_OFFSET 0x0834
++#define AM33XX_CONTROL_PADCONF_GPMC_AD14_OFFSET 0x0838
++#define AM33XX_CONTROL_PADCONF_GPMC_AD15_OFFSET 0x083C
++#define AM33XX_CONTROL_PADCONF_GPMC_A0_OFFSET 0x0840
++#define AM33XX_CONTROL_PADCONF_GPMC_A1_OFFSET 0x0844
++#define AM33XX_CONTROL_PADCONF_GPMC_A2_OFFSET 0x0848
++#define AM33XX_CONTROL_PADCONF_GPMC_A3_OFFSET 0x084C
++#define AM33XX_CONTROL_PADCONF_GPMC_A4_OFFSET 0x0850
++#define AM33XX_CONTROL_PADCONF_GPMC_A5_OFFSET 0x0854
++#define AM33XX_CONTROL_PADCONF_GPMC_A6_OFFSET 0x0858
++#define AM33XX_CONTROL_PADCONF_GPMC_A7_OFFSET 0x085C
++#define AM33XX_CONTROL_PADCONF_GPMC_A8_OFFSET 0x0860
++#define AM33XX_CONTROL_PADCONF_GPMC_A9_OFFSET 0x0864
++#define AM33XX_CONTROL_PADCONF_GPMC_A10_OFFSET 0x0868
++#define AM33XX_CONTROL_PADCONF_GPMC_A11_OFFSET 0x086C
++#define AM33XX_CONTROL_PADCONF_GPMC_WAIT0_OFFSET 0x0870
++#define AM33XX_CONTROL_PADCONF_GPMC_WPN_OFFSET 0x0874
++#define AM33XX_CONTROL_PADCONF_GPMC_BEN1_OFFSET 0x0878
++#define AM33XX_CONTROL_PADCONF_GPMC_CSN0_OFFSET 0x087C
++#define AM33XX_CONTROL_PADCONF_GPMC_CSN1_OFFSET 0x0880
++#define AM33XX_CONTROL_PADCONF_GPMC_CSN2_OFFSET 0x0884
++#define AM33XX_CONTROL_PADCONF_GPMC_CSN3_OFFSET 0x0888
++#define AM33XX_CONTROL_PADCONF_GPMC_CLK_OFFSET 0x088C
++#define AM33XX_CONTROL_PADCONF_GPMC_ADVN_ALE_OFFSET 0x0890
++#define AM33XX_CONTROL_PADCONF_GPMC_OEN_REN_OFFSET 0x0894
++#define AM33XX_CONTROL_PADCONF_GPMC_WEN_OFFSET 0x0898
++#define AM33XX_CONTROL_PADCONF_GPMC_BEN0_CLE_OFFSET 0x089C
++#define AM33XX_CONTROL_PADCONF_LCD_DATA0_OFFSET 0x08A0
++#define AM33XX_CONTROL_PADCONF_LCD_DATA1_OFFSET 0x08A4
++#define AM33XX_CONTROL_PADCONF_LCD_DATA2_OFFSET 0x08A8
++#define AM33XX_CONTROL_PADCONF_LCD_DATA3_OFFSET 0x08AC
++#define AM33XX_CONTROL_PADCONF_LCD_DATA4_OFFSET 0x08B0
++#define AM33XX_CONTROL_PADCONF_LCD_DATA5_OFFSET 0x08B4
++#define AM33XX_CONTROL_PADCONF_LCD_DATA6_OFFSET 0x08B8
++#define AM33XX_CONTROL_PADCONF_LCD_DATA7_OFFSET 0x08BC
++#define AM33XX_CONTROL_PADCONF_LCD_DATA8_OFFSET 0x08C0
++#define AM33XX_CONTROL_PADCONF_LCD_DATA9_OFFSET 0x08C4
++#define AM33XX_CONTROL_PADCONF_LCD_DATA10_OFFSET 0x08C8
++#define AM33XX_CONTROL_PADCONF_LCD_DATA11_OFFSET 0x08CC
++#define AM33XX_CONTROL_PADCONF_LCD_DATA12_OFFSET 0x08D0
++#define AM33XX_CONTROL_PADCONF_LCD_DATA13_OFFSET 0x08D4
++#define AM33XX_CONTROL_PADCONF_LCD_DATA14_OFFSET 0x08D8
++#define AM33XX_CONTROL_PADCONF_LCD_DATA15_OFFSET 0x08DC
++#define AM33XX_CONTROL_PADCONF_LCD_VSYNC_OFFSET 0x08E0
++#define AM33XX_CONTROL_PADCONF_LCD_HSYNC_OFFSET 0x08E4
++#define AM33XX_CONTROL_PADCONF_LCD_PCLK_OFFSET 0x08E8
++#define AM33XX_CONTROL_PADCONF_LCD_AC_BIAS_EN_OFFSET 0x08EC
++#define AM33XX_CONTROL_PADCONF_MMC0_DAT3_OFFSET 0x08F0
++#define AM33XX_CONTROL_PADCONF_MMC0_DAT2_OFFSET 0x08F4
++#define AM33XX_CONTROL_PADCONF_MMC0_DAT1_OFFSET 0x08F8
++#define AM33XX_CONTROL_PADCONF_MMC0_DAT0_OFFSET 0x08FC
++#define AM33XX_CONTROL_PADCONF_MMC0_CLK_OFFSET 0x0900
++#define AM33XX_CONTROL_PADCONF_MMC0_CMD_OFFSET 0x0904
++#define AM33XX_CONTROL_PADCONF_MII1_COL_OFFSET 0x0908
++#define AM33XX_CONTROL_PADCONF_MII1_CRS_OFFSET 0x090C
++#define AM33XX_CONTROL_PADCONF_MII1_RXERR_OFFSET 0x0910
++#define AM33XX_CONTROL_PADCONF_MII1_TXEN_OFFSET 0x0914
++#define AM33XX_CONTROL_PADCONF_MII1_RXDV_OFFSET 0x0918
++#define AM33XX_CONTROL_PADCONF_MII1_TXD3_OFFSET 0x091C
++#define AM33XX_CONTROL_PADCONF_MII1_TXD2_OFFSET 0x0920
++#define AM33XX_CONTROL_PADCONF_MII1_TXD1_OFFSET 0x0924
++#define AM33XX_CONTROL_PADCONF_MII1_TXD0_OFFSET 0x0928
++#define AM33XX_CONTROL_PADCONF_MII1_TXCLK_OFFSET 0x092C
++#define AM33XX_CONTROL_PADCONF_MII1_RXCLK_OFFSET 0x0930
++#define AM33XX_CONTROL_PADCONF_MII1_RXD3_OFFSET 0x0934
++#define AM33XX_CONTROL_PADCONF_MII1_RXD2_OFFSET 0x0938
++#define AM33XX_CONTROL_PADCONF_MII1_RXD1_OFFSET 0x093C
++#define AM33XX_CONTROL_PADCONF_MII1_RXD0_OFFSET 0x0940
++#define AM33XX_CONTROL_PADCONF_MII1_REFCLK_OFFSET 0x0944
++#define AM33XX_CONTROL_PADCONF_MDIO_DATA_OFFSET 0x0948
++#define AM33XX_CONTROL_PADCONF_MDIO_CLK_OFFSET 0x094C
++#define AM33XX_CONTROL_PADCONF_SPI0_SCLK_OFFSET 0x0950
++#define AM33XX_CONTROL_PADCONF_SPI0_D0_OFFSET 0x0954
++#define AM33XX_CONTROL_PADCONF_SPI0_D1_OFFSET 0x0958
++#define AM33XX_CONTROL_PADCONF_SPI0_CS0_OFFSET 0x095C
++#define AM33XX_CONTROL_PADCONF_SPI0_CS1_OFFSET 0x0960
++#define AM33XX_CONTROL_PADCONF_ECAP0_IN_PWM0_OUT_OFFSET 0x0964
++#define AM33XX_CONTROL_PADCONF_UART0_CTSN_OFFSET 0x0968
++#define AM33XX_CONTROL_PADCONF_UART0_RTSN_OFFSET 0x096C
++#define AM33XX_CONTROL_PADCONF_UART0_RXD_OFFSET 0x0970
++#define AM33XX_CONTROL_PADCONF_UART0_TXD_OFFSET 0x0974
++#define AM33XX_CONTROL_PADCONF_UART1_CTSN_OFFSET 0x0978
++#define AM33XX_CONTROL_PADCONF_UART1_RTSN_OFFSET 0x097C
++#define AM33XX_CONTROL_PADCONF_UART1_RXD_OFFSET 0x0980
++#define AM33XX_CONTROL_PADCONF_UART1_TXD_OFFSET 0x0984
++#define AM33XX_CONTROL_PADCONF_I2C0_SDA_OFFSET 0x0988
++#define AM33XX_CONTROL_PADCONF_I2C0_SCL_OFFSET 0x098C
++#define AM33XX_CONTROL_PADCONF_MCASP0_ACLKX_OFFSET 0x0990
++#define AM33XX_CONTROL_PADCONF_MCASP0_FSX_OFFSET 0x0994
++#define AM33XX_CONTROL_PADCONF_MCASP0_AXR0_OFFSET 0x0998
++#define AM33XX_CONTROL_PADCONF_MCASP0_AHCLKR_OFFSET 0x099C
++#define AM33XX_CONTROL_PADCONF_MCASP0_ACLKR_OFFSET 0x09A0
++#define AM33XX_CONTROL_PADCONF_MCASP0_FSR_OFFSET 0x09A4
++#define AM33XX_CONTROL_PADCONF_MCASP0_AXR1_OFFSET 0x09A8
++#define AM33XX_CONTROL_PADCONF_MCASP0_AHCLKX_OFFSET 0x09AC
++#define AM33XX_CONTROL_PADCONF_XDMA_EVENT_INTR0_OFFSET 0x09B0
++#define AM33XX_CONTROL_PADCONF_XDMA_EVENT_INTR1_OFFSET 0x09B4
++#define AM33XX_CONTROL_PADCONF_WARMRSTN_OFFSET 0x09B8
++#define AM33XX_CONTROL_PADCONF_PWRONRSTN_OFFSET 0x09BC
++#define AM33XX_CONTROL_PADCONF_NMIN_OFFSET 0x09C0
++#define AM33XX_CONTROL_PADCONF_XTALIN_OFFSET 0x09C4
++#define AM33XX_CONTROL_PADCONF_XTALOUT_OFFSET 0x09C8
++#define AM33XX_CONTROL_PADCONF_TMS_OFFSET 0x09D0
++#define AM33XX_CONTROL_PADCONF_TDI_OFFSET 0x09D4
++#define AM33XX_CONTROL_PADCONF_TDO_OFFSET 0x09D8
++#define AM33XX_CONTROL_PADCONF_TCK_OFFSET 0x09DC
++#define AM33XX_CONTROL_PADCONF_TRSTN_OFFSET 0x09E0
++#define AM33XX_CONTROL_PADCONF_EMU0_OFFSET 0x09E4
++#define AM33XX_CONTROL_PADCONF_EMU1_OFFSET 0x09E8
++#define AM33XX_CONTROL_PADCONF_RTC_XTALIN_OFFSET 0x09EC
++#define AM33XX_CONTROL_PADCONF_RTC_XTALOUT_OFFSET 0x09F0
++#define AM33XX_CONTROL_PADCONF_RTC_PWRONRSTN_OFFSET 0x09F8
++#define AM33XX_CONTROL_PADCONF_EXT_WAKEUP_OFFSET 0x0A00
++#define AM33XX_CONTROL_PADCONF_PMIC_POWER_EN_OFFSET 0x09F4
++#define AM33XX_CONTROL_PADCONF_RTC_KALDO_ENN_OFFSET 0x0A04
++#define AM33XX_CONTROL_PADCONF_USB0_DM_OFFSET 0x0A08
++#define AM33XX_CONTROL_PADCONF_USB0_DP_OFFSET 0x0A0C
++#define AM33XX_CONTROL_PADCONF_USB0_CE_OFFSET 0x0A10
++#define AM33XX_CONTROL_PADCONF_USB0_ID_OFFSET 0x0A14
++#define AM33XX_CONTROL_PADCONF_USB0_VBUS_OFFSET 0x0A18
++#define AM33XX_CONTROL_PADCONF_USB0_DRVVBUS_OFFSET 0x0A1C
++#define AM33XX_CONTROL_PADCONF_USB1_DM_OFFSET 0x0A20
++#define AM33XX_CONTROL_PADCONF_USB1_DP_OFFSET 0x0A24
++#define AM33XX_CONTROL_PADCONF_USB1_CE_OFFSET 0x0A28
++#define AM33XX_CONTROL_PADCONF_USB1_ID_OFFSET 0x0A2C
++#define AM33XX_CONTROL_PADCONF_USB1_VBUS_OFFSET 0x0A30
++#define AM33XX_CONTROL_PADCONF_USB1_DRVVBUS_OFFSET 0x0A34
++#define AM33XX_CONTROL_PADCONF_DDR_RESETN_OFFSET 0x0A38
++#define AM33XX_CONTROL_PADCONF_DDR_CSN0_OFFSET 0x0A3C
++#define AM33XX_CONTROL_PADCONF_DDR_CKE_OFFSET 0x0A40
++#define AM33XX_CONTROL_PADCONF_DDR_CK_OFFSET 0x0A44
++#define AM33XX_CONTROL_PADCONF_DDR_CKN_OFFSET 0x0A48
++#define AM33XX_CONTROL_PADCONF_DDR_CASN_OFFSET 0x0A4C
++#define AM33XX_CONTROL_PADCONF_DDR_RASN_OFFSET 0x0A50
++#define AM33XX_CONTROL_PADCONF_DDR_WEN_OFFSET 0x0A54
++#define AM33XX_CONTROL_PADCONF_DDR_BA0_OFFSET 0x0A58
++#define AM33XX_CONTROL_PADCONF_DDR_BA1_OFFSET 0x0A5C
++#define AM33XX_CONTROL_PADCONF_DDR_BA2_OFFSET 0x0A60
++#define AM33XX_CONTROL_PADCONF_DDR_A0_OFFSET 0x0A64
++#define AM33XX_CONTROL_PADCONF_DDR_A1_OFFSET 0x0A68
++#define AM33XX_CONTROL_PADCONF_DDR_A2_OFFSET 0x0A6C
++#define AM33XX_CONTROL_PADCONF_DDR_A3_OFFSET 0x0A70
++#define AM33XX_CONTROL_PADCONF_DDR_A4_OFFSET 0x0A74
++#define AM33XX_CONTROL_PADCONF_DDR_A5_OFFSET 0x0A78
++#define AM33XX_CONTROL_PADCONF_DDR_A6_OFFSET 0x0A7C
++#define AM33XX_CONTROL_PADCONF_DDR_A7_OFFSET 0x0A80
++#define AM33XX_CONTROL_PADCONF_DDR_A8_OFFSET 0x0A84
++#define AM33XX_CONTROL_PADCONF_DDR_A9_OFFSET 0x0A88
++#define AM33XX_CONTROL_PADCONF_DDR_A10_OFFSET 0x0A8C
++#define AM33XX_CONTROL_PADCONF_DDR_A11_OFFSET 0x0A90
++#define AM33XX_CONTROL_PADCONF_DDR_A12_OFFSET 0x0A94
++#define AM33XX_CONTROL_PADCONF_DDR_A13_OFFSET 0x0A98
++#define AM33XX_CONTROL_PADCONF_DDR_A14_OFFSET 0x0A9C
++#define AM33XX_CONTROL_PADCONF_DDR_A15_OFFSET 0x0AA0
++#define AM33XX_CONTROL_PADCONF_DDR_ODT_OFFSET 0x0AA4
++#define AM33XX_CONTROL_PADCONF_DDR_D0_OFFSET 0x0AA8
++#define AM33XX_CONTROL_PADCONF_DDR_D1_OFFSET 0x0AAC
++#define AM33XX_CONTROL_PADCONF_DDR_D2_OFFSET 0x0AB0
++#define AM33XX_CONTROL_PADCONF_DDR_D3_OFFSET 0x0AB4
++#define AM33XX_CONTROL_PADCONF_DDR_D4_OFFSET 0x0AB8
++#define AM33XX_CONTROL_PADCONF_DDR_D5_OFFSET 0x0ABC
++#define AM33XX_CONTROL_PADCONF_DDR_D6_OFFSET 0x0AC0
++#define AM33XX_CONTROL_PADCONF_DDR_D7_OFFSET 0x0AC4
++#define AM33XX_CONTROL_PADCONF_DDR_D8_OFFSET 0x0AC8
++#define AM33XX_CONTROL_PADCONF_DDR_D9_OFFSET 0x0ACC
++#define AM33XX_CONTROL_PADCONF_DDR_D10_OFFSET 0x0AD0
++#define AM33XX_CONTROL_PADCONF_DDR_D11_OFFSET 0x0AD4
++#define AM33XX_CONTROL_PADCONF_DDR_D12_OFFSET 0x0AD8
++#define AM33XX_CONTROL_PADCONF_DDR_D13_OFFSET 0x0ADC
++#define AM33XX_CONTROL_PADCONF_DDR_D14_OFFSET 0x0AE0
++#define AM33XX_CONTROL_PADCONF_DDR_D15_OFFSET 0x0AE4
++#define AM33XX_CONTROL_PADCONF_DDR_DQM0_OFFSET 0x0AE8
++#define AM33XX_CONTROL_PADCONF_DDR_DQM1_OFFSET 0x0AEC
++#define AM33XX_CONTROL_PADCONF_DDR_DQS0_OFFSET 0x0AF0
++#define AM33XX_CONTROL_PADCONF_DDR_DQSN0_OFFSET 0x0AF4
++#define AM33XX_CONTROL_PADCONF_DDR_DQS1_OFFSET 0x0AF8
++#define AM33XX_CONTROL_PADCONF_DDR_DQSN1_OFFSET 0x0AFC
++#define AM33XX_CONTROL_PADCONF_DDR_VREF_OFFSET 0x0B00
++#define AM33XX_CONTROL_PADCONF_DDR_VTP_OFFSET 0x0B04
++#define AM33XX_CONTROL_PADCONF_AIN7_OFFSET 0x0B10
++#define AM33XX_CONTROL_PADCONF_AIN6_OFFSET 0x0B14
++#define AM33XX_CONTROL_PADCONF_AIN5_OFFSET 0x0B18
++#define AM33XX_CONTROL_PADCONF_AIN4_OFFSET 0x0B1C
++#define AM33XX_CONTROL_PADCONF_AIN3_OFFSET 0x0B20
++#define AM33XX_CONTROL_PADCONF_AIN2_OFFSET 0x0B24
++#define AM33XX_CONTROL_PADCONF_AIN1_OFFSET 0x0B28
++#define AM33XX_CONTROL_PADCONF_AIN0_OFFSET 0x0B2C
++#define AM33XX_CONTROL_PADCONF_VREFP_OFFSET 0x0B30
++#define AM33XX_CONTROL_PADCONF_VREFN_OFFSET 0x0B34
++
++#define AM33XX_CONTROL_PADCONF_MUX_SIZE \
++ (AM33XX_CONTROL_PADCONF_VREFN_OFFSET + 0x4)
++
++#endif
+diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
+index 4ee6aec..b13ef7e 100644
+--- a/arch/arm/mach-omap2/omap-headsmp.S
++++ b/arch/arm/mach-omap2/omap-headsmp.S
+@@ -18,11 +18,6 @@
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+
+-/* Physical address needed since MMU not enabled yet on secondary core */
+-#define OMAP4_AUX_CORE_BOOT1_PA 0x48281804
+-
+- __INIT
+-
+ /*
+ * OMAP4 specific entry point for secondary CPU to jump from ROM
+ * code. This routine also provides a holding flag into which
+diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
+index 4976b93..adbe4d8 100644
+--- a/arch/arm/mach-omap2/omap-hotplug.c
++++ b/arch/arm/mach-omap2/omap-hotplug.c
+@@ -19,7 +19,10 @@
+ #include <linux/smp.h>
+
+ #include <asm/cacheflush.h>
+-#include <mach/omap4-common.h>
++
++#include "common.h"
++
++#include "powerdomain.h"
+
+ int platform_cpu_kill(unsigned int cpu)
+ {
+@@ -32,6 +35,8 @@ int platform_cpu_kill(unsigned int cpu)
+ */
+ void platform_cpu_die(unsigned int cpu)
+ {
++ unsigned int this_cpu;
++
+ flush_cache_all();
+ dsb();
+
+@@ -39,15 +44,15 @@ void platform_cpu_die(unsigned int cpu)
+ * we're ready for shutdown now, so do it
+ */
+ if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
+- printk(KERN_CRIT "Secure clear status failed\n");
++ pr_err("Secure clear status failed\n");
+
+ for (;;) {
+ /*
+- * Execute WFI
++ * Enter into low power state
+ */
+- do_wfi();
+-
+- if (omap_read_auxcoreboot0() == cpu) {
++ omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
++ this_cpu = smp_processor_id();
++ if (omap_read_auxcoreboot0() == this_cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
+index ac49384..b882204 100644
+--- a/arch/arm/mach-omap2/omap-iommu.c
++++ b/arch/arm/mach-omap2/omap-iommu.c
+@@ -150,8 +150,7 @@ err_out:
+ platform_device_put(omap_iommu_pdev[i]);
+ return err;
+ }
+-/* must be ready before omap3isp is probed */
+-subsys_initcall(omap_iommu_init);
++module_init(omap_iommu_init);
+
+ static void __exit omap_iommu_exit(void)
+ {
+diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+new file mode 100644
+index 0000000..1d5d010
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+@@ -0,0 +1,398 @@
++/*
++ * OMAP MPUSS low power code
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
++ * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
++ * CPU0 and CPU1 LPRM modules.
++ * CPU0, CPU1 and MPUSS each have there own power domain and
++ * hence multiple low power combinations of MPUSS are possible.
++ *
++ * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
++ * because the mode is not supported by hw constraints of dormant
++ * mode. While waking up from the dormant mode, a reset signal
++ * to the Cortex-A9 processor must be asserted by the external
++ * power controller.
++ *
++ * With architectural inputs and hardware recommendations, only
++ * below modes are supported from power gain vs latency point of view.
++ *
++ * CPU0 CPU1 MPUSS
++ * ----------------------------------------------
++ * ON ON ON
++ * ON(Inactive) OFF ON(Inactive)
++ * OFF OFF CSWR
++ * OFF OFF OSWR
++ * OFF OFF OFF(Device OFF *TBD)
++ * ----------------------------------------------
++ *
++ * Note: CPU0 is the master core and it is the last CPU to go down
++ * and first to wake-up when MPUSS low power states are excercised
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/errno.h>
++#include <linux/linkage.h>
++#include <linux/smp.h>
++
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/smp_scu.h>
++#include <asm/system.h>
++#include <asm/pgalloc.h>
++#include <asm/suspend.h>
++#include <asm/hardware/cache-l2x0.h>
++
++#include <plat/omap44xx.h>
++
++#include "common.h"
++#include "omap4-sar-layout.h"
++#include "pm.h"
++#include "prcm_mpu44xx.h"
++#include "prminst44xx.h"
++#include "prcm44xx.h"
++#include "prm44xx.h"
++#include "prm-regbits-44xx.h"
++
++#ifdef CONFIG_SMP
++
++struct omap4_cpu_pm_info {
++ struct powerdomain *pwrdm;
++ void __iomem *scu_sar_addr;
++ void __iomem *wkup_sar_addr;
++ void __iomem *l2x0_sar_addr;
++};
++
++static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
++static struct powerdomain *mpuss_pd;
++static void __iomem *sar_base;
++
++/*
++ * Program the wakeup routine address for the CPU0 and CPU1
++ * used for OFF or DORMANT wakeup.
++ */
++static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++
++ __raw_writel(addr, pm_info->wkup_sar_addr);
++}
++
++/*
++ * Set the CPUx powerdomain's previous power state
++ */
++static inline void set_cpu_next_pwrst(unsigned int cpu_id,
++ unsigned int power_state)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++
++ pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
++}
++
++/*
++ * Read CPU's previous power state
++ */
++static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++
++ return pwrdm_read_prev_pwrst(pm_info->pwrdm);
++}
++
++/*
++ * Clear the CPUx powerdomain's previous power state
++ */
++static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++
++ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
++}
++
++/*
++ * Store the SCU power status value to scratchpad memory
++ */
++static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++ u32 scu_pwr_st;
++
++ switch (cpu_state) {
++ case PWRDM_POWER_RET:
++ scu_pwr_st = SCU_PM_DORMANT;
++ break;
++ case PWRDM_POWER_OFF:
++ scu_pwr_st = SCU_PM_POWEROFF;
++ break;
++ case PWRDM_POWER_ON:
++ case PWRDM_POWER_INACTIVE:
++ default:
++ scu_pwr_st = SCU_PM_NORMAL;
++ break;
++ }
++
++ __raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
++}
++
++/* Helper functions for MPUSS OSWR */
++static inline void mpuss_clear_prev_logic_pwrst(void)
++{
++ u32 reg;
++
++ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
++ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
++ omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
++ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
++}
++
++static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
++{
++ u32 reg;
++
++ if (cpu_id) {
++ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
++ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
++ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
++ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
++ } else {
++ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
++ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
++ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
++ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
++ }
++}
++
++/**
++ * omap4_mpuss_read_prev_context_state:
++ * Function returns the MPUSS previous context state
++ */
++u32 omap4_mpuss_read_prev_context_state(void)
++{
++ u32 reg;
++
++ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
++ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
++ reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
++ return reg;
++}
++
++/*
++ * Store the CPU cluster state for L2X0 low power operations.
++ */
++static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
++{
++ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
++
++ __raw_writel(save_state, pm_info->l2x0_sar_addr);
++}
++
++/*
++ * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
++ * in every restore MPUSS OFF path.
++ */
++#ifdef CONFIG_CACHE_L2X0
++static void save_l2x0_context(void)
++{
++ u32 val;
++ void __iomem *l2x0_base = omap4_get_l2cache_base();
++
++ val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
++ __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
++ val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
++ __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
++}
++#else
++static void save_l2x0_context(void)
++{}
++#endif
++
++/**
++ * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
++ * The purpose of this function is to manage low power programming
++ * of OMAP4 MPUSS subsystem
++ * @cpu : CPU ID
++ * @power_state: Low power state.
++ *
++ * MPUSS states for the context save:
++ * save_state =
++ * 0 - Nothing lost and no need to save: MPUSS INACTIVE
++ * 1 - CPUx L1 and logic lost: MPUSS CSWR
++ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
++ * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
++ */
++int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
++{
++ unsigned int save_state = 0;
++ unsigned int wakeup_cpu;
++
++ if (omap_rev() == OMAP4430_REV_ES1_0)
++ return -ENXIO;
++
++ switch (power_state) {
++ case PWRDM_POWER_ON:
++ case PWRDM_POWER_INACTIVE:
++ save_state = 0;
++ break;
++ case PWRDM_POWER_OFF:
++ save_state = 1;
++ break;
++ case PWRDM_POWER_RET:
++ default:
++ /*
++ * CPUx CSWR is invalid hardware state. Also CPUx OSWR
++ * doesn't make much scense, since logic is lost and $L1
++ * needs to be cleaned because of coherency. This makes
++ * CPUx OSWR equivalent to CPUX OFF and hence not supported
++ */
++ WARN_ON(1);
++ return -ENXIO;
++ }
++
++ pwrdm_pre_transition();
++
++ /*
++ * Check MPUSS next state and save interrupt controller if needed.
++ * In MPUSS OSWR or device OFF, interrupt controller contest is lost.
++ */
++ mpuss_clear_prev_logic_pwrst();
++ pwrdm_clear_all_prev_pwrst(mpuss_pd);
++ if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
++ (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
++ save_state = 2;
++
++ clear_cpu_prev_pwrst(cpu);
++ cpu_clear_prev_logic_pwrst(cpu);
++ set_cpu_next_pwrst(cpu, power_state);
++ set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
++ scu_pwrst_prepare(cpu, power_state);
++ l2x0_pwrst_prepare(cpu, save_state);
++
++ /*
++ * Call low level function with targeted low power state.
++ */
++ cpu_suspend(save_state, omap4_finish_suspend);
++
++ /*
++ * Restore the CPUx power state to ON otherwise CPUx
++ * power domain can transitions to programmed low power
++ * state while doing WFI outside the low powe code. On
++ * secure devices, CPUx does WFI which can result in
++ * domain transition
++ */
++ wakeup_cpu = smp_processor_id();
++ set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
++
++ pwrdm_post_transition();
++
++ return 0;
++}
++
++/**
++ * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
++ * @cpu : CPU ID
++ * @power_state: CPU low power state.
++ */
++int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
++{
++ unsigned int cpu_state = 0;
++
++ if (omap_rev() == OMAP4430_REV_ES1_0)
++ return -ENXIO;
++
++ if (power_state == PWRDM_POWER_OFF)
++ cpu_state = 1;
++
++ clear_cpu_prev_pwrst(cpu);
++ set_cpu_next_pwrst(cpu, power_state);
++ set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup));
++ scu_pwrst_prepare(cpu, power_state);
++
++ /*
++ * CPU never retuns back if targetted power state is OFF mode.
++ * CPU ONLINE follows normal CPU ONLINE ptah via
++ * omap_secondary_startup().
++ */
++ omap4_finish_suspend(cpu_state);
++
++ set_cpu_next_pwrst(cpu, PWRDM_POWER_ON);
++ return 0;
++}
++
++
++/*
++ * Initialise OMAP4 MPUSS
++ */
++int __init omap4_mpuss_init(void)
++{
++ struct omap4_cpu_pm_info *pm_info;
++
++ if (omap_rev() == OMAP4430_REV_ES1_0) {
++ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
++ return -ENODEV;
++ }
++
++ sar_base = omap4_get_sar_ram_base();
++
++ /* Initilaise per CPU PM information */
++ pm_info = &per_cpu(omap4_pm_info, 0x0);
++ pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
++ pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
++ pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
++ pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
++ if (!pm_info->pwrdm) {
++ pr_err("Lookup failed for CPU0 pwrdm\n");
++ return -ENODEV;
++ }
++
++ /* Clear CPU previous power domain state */
++ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
++ cpu_clear_prev_logic_pwrst(0);
++
++ /* Initialise CPU0 power domain state to ON */
++ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
++
++ pm_info = &per_cpu(omap4_pm_info, 0x1);
++ pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
++ pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
++ pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
++ pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
++ if (!pm_info->pwrdm) {
++ pr_err("Lookup failed for CPU1 pwrdm\n");
++ return -ENODEV;
++ }
++
++ /* Clear CPU previous power domain state */
++ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
++ cpu_clear_prev_logic_pwrst(1);
++
++ /* Initialise CPU1 power domain state to ON */
++ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
++
++ mpuss_pd = pwrdm_lookup("mpu_pwrdm");
++ if (!mpuss_pd) {
++ pr_err("Failed to lookup MPUSS power domain\n");
++ return -ENODEV;
++ }
++ pwrdm_clear_all_prev_pwrst(mpuss_pd);
++ mpuss_clear_prev_logic_pwrst();
++
++ /* Save device type on scratchpad for low level code to use */
++ if (omap_type() != OMAP2_DEVICE_TYPE_GP)
++ __raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
++ else
++ __raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
++
++ save_l2x0_context();
++
++ return 0;
++}
++
++#endif
+diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
+new file mode 100644
+index 0000000..69f3c72
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap-secure.c
+@@ -0,0 +1,81 @@
++/*
++ * OMAP Secure API infrastructure.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ *
++ * This program is free software,you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/memblock.h>
++
++#include <asm/cacheflush.h>
++
++#include <mach/omap-secure.h>
++
++static phys_addr_t omap_secure_memblock_base;
++
++/**
++ * omap_sec_dispatcher: Routine to dispatch low power secure
++ * service routines
++ * @idx: The HAL API index
++ * @flag: The flag indicating criticality of operation
++ * @nargs: Number of valid arguments out of four.
++ * @arg1, arg2, arg3 args4: Parameters passed to secure API
++ *
++ * Return the non-zero error value on failure.
++ */
++u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
++ u32 arg3, u32 arg4)
++{
++ u32 ret;
++ u32 param[5];
++
++ param[0] = nargs;
++ param[1] = arg1;
++ param[2] = arg2;
++ param[3] = arg3;
++ param[4] = arg4;
++
++ /*
++ * Secure API needs physical address
++ * pointer for the parameters
++ */
++ flush_cache_all();
++ outer_clean_range(__pa(param), __pa(param + 5));
++ ret = omap_smc2(idx, flag, __pa(param));
++
++ return ret;
++}
++
++/* Allocate the memory to save secure ram */
++int __init omap_secure_ram_reserve_memblock(void)
++{
++ phys_addr_t paddr;
++ u32 size = OMAP_SECURE_RAM_STORAGE;
++
++ size = ALIGN(size, SZ_1M);
++ paddr = memblock_alloc(size, SZ_1M);
++ if (!paddr) {
++ pr_err("%s: failed to reserve %x bytes\n",
++ __func__, size);
++ return -ENOMEM;
++ }
++ memblock_free(paddr, size);
++ memblock_remove(paddr, size);
++
++ omap_secure_memblock_base = paddr;
++
++ return 0;
++}
++
++phys_addr_t omap_secure_ram_mempool_base(void)
++{
++ return omap_secure_memblock_base;
++}
+diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
+new file mode 100644
+index 0000000..f6441c1
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap-smc.S
+@@ -0,0 +1,80 @@
++/*
++ * OMAP44xx secure APIs file.
++ *
++ * Copyright (C) 2010 Texas Instruments, Inc.
++ * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ *
++ * This program is free software,you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++/*
++ * This is common routine to manage secure monitor API
++ * used to modify the PL310 secure registers.
++ * 'r0' contains the value to be modified and 'r12' contains
++ * the monitor API number. It uses few CPU registers
++ * internally and hence they need be backed up including
++ * link register "lr".
++ * Function signature : void omap_smc1(u32 fn, u32 arg)
++ */
++
++ENTRY(omap_smc1)
++ stmfd sp!, {r2-r12, lr}
++ mov r12, r0
++ mov r0, r1
++ dsb
++ smc #0
++ ldmfd sp!, {r2-r12, pc}
++ENDPROC(omap_smc1)
++
++/**
++ * u32 omap_smc2(u32 id, u32 falg, u32 pargs)
++ * Low level common routine for secure HAL and PPA APIs.
++ * @id: Application ID of HAL APIs
++ * @flag: Flag to indicate the criticality of operation
++ * @pargs: Physical address of parameter list starting
++ * with number of parametrs
++ */
++ENTRY(omap_smc2)
++ stmfd sp!, {r4-r12, lr}
++ mov r3, r2
++ mov r2, r1
++ mov r1, #0x0 @ Process ID
++ mov r6, #0xff
++ mov r12, #0x00 @ Secure Service ID
++ mov r7, #0
++ mcr p15, 0, r7, c7, c5, 6
++ dsb
++ dmb
++ smc #0
++ ldmfd sp!, {r4-r12, pc}
++ENDPROC(omap_smc2)
++
++ENTRY(omap_modify_auxcoreboot0)
++ stmfd sp!, {r1-r12, lr}
++ ldr r12, =0x104
++ dsb
++ smc #0
++ ldmfd sp!, {r1-r12, pc}
++ENDPROC(omap_modify_auxcoreboot0)
++
++ENTRY(omap_auxcoreboot_addr)
++ stmfd sp!, {r2-r12, lr}
++ ldr r12, =0x105
++ dsb
++ smc #0
++ ldmfd sp!, {r2-r12, pc}
++ENDPROC(omap_auxcoreboot_addr)
++
++ENTRY(omap_read_auxcoreboot0)
++ stmfd sp!, {r2-r12, lr}
++ ldr r12, =0x103
++ dsb
++ smc #0
++ mov r0, r0, lsr #9
++ ldmfd sp!, {r2-r12, pc}
++ENDPROC(omap_read_auxcoreboot0)
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 4412ddb..c1bf3ef 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -24,16 +24,37 @@
+ #include <asm/hardware/gic.h>
+ #include <asm/smp_scu.h>
+ #include <mach/hardware.h>
+-#include <mach/omap4-common.h>
++#include <mach/omap-secure.h>
++
++#include "common.h"
++
++#include "clockdomain.h"
+
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+ static DEFINE_SPINLOCK(boot_lock);
+
++void __iomem *omap4_get_scu_base(void)
++{
++ return scu_base;
++}
++
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+ /*
++ * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
++ * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
++ * init and for CPU1, a secure PPA API provided. CPU0 must be ON
++ * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
++ * OMAP443X GP devices- SMP bit isn't accessible.
++ * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
++ */
++ if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
++ omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
++ 4, 0, 0, 0, 0, 0);
++
++ /*
+ * If any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+@@ -49,6 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+ {
++ static struct clockdomain *cpu1_clkdm;
++ static bool booted;
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+@@ -64,6 +87,29 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+ omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ flush_cache_all();
+ smp_wmb();
++
++ if (!cpu1_clkdm)
++ cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
++
++ /*
++ * The SGI(Software Generated Interrupts) are not wakeup capable
++ * from low power states. This is known limitation on OMAP4 and
++ * needs to be worked around by using software forced clockdomain
++ * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
++ * software force wakeup. The clockdomain is then put back to
++ * hardware supervised mode.
++ * More details can be found in OMAP4430 TRM - Version J
++ * Section :
++ * 4.3.4.2 Power States of CPU0 and CPU1
++ */
++ if (booted) {
++ clkdm_wakeup(cpu1_clkdm);
++ clkdm_allow_idle(cpu1_clkdm);
++ } else {
++ dsb_sev();
++ booted = true;
++ }
++
+ gic_raise_softirq(cpumask_of(cpu), 1);
+
+ /*
+diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
+new file mode 100644
+index 0000000..d3d8971
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -0,0 +1,389 @@
++/*
++ * OMAP WakeupGen Source file
++ *
++ * OMAP WakeupGen is the interrupt controller extension used along
++ * with ARM GIC to wake the CPU out from low power states on
++ * external interrupts. It is responsible for generating wakeup
++ * event from the incoming interrupts and enable bits. It is
++ * implemented in MPU always ON power domain. During normal operation,
++ * WakeupGen delivers external interrupts directly to the GIC.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/platform_device.h>
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/cpu_pm.h>
++
++#include <asm/hardware/gic.h>
++
++#include <mach/omap-wakeupgen.h>
++#include <mach/omap-secure.h>
++
++#include "omap4-sar-layout.h"
++#include "common.h"
++
++#define NR_REG_BANKS 4
++#define MAX_IRQS 128
++#define WKG_MASK_ALL 0x00000000
++#define WKG_UNMASK_ALL 0xffffffff
++#define CPU_ENA_OFFSET 0x400
++#define CPU0_ID 0x0
++#define CPU1_ID 0x1
++
++static void __iomem *wakeupgen_base;
++static void __iomem *sar_base;
++static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks);
++static DEFINE_SPINLOCK(wakeupgen_lock);
++static unsigned int irq_target_cpu[NR_IRQS];
++
++/*
++ * Static helper functions.
++ */
++static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
++{
++ return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
++ (cpu * CPU_ENA_OFFSET) + (idx * 4));
++}
++
++static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
++{
++ __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
++ (cpu * CPU_ENA_OFFSET) + (idx * 4));
++}
++
++static inline void sar_writel(u32 val, u32 offset, u8 idx)
++{
++ __raw_writel(val, sar_base + offset + (idx * 4));
++}
++
++static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
++{
++ u8 i;
++
++ for (i = 0; i < NR_REG_BANKS; i++)
++ wakeupgen_writel(reg, i, cpu);
++}
++
++static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
++{
++ unsigned int spi_irq;
++
++ /*
++ * PPIs and SGIs are not supported.
++ */
++ if (irq < OMAP44XX_IRQ_GIC_START)
++ return -EINVAL;
++
++ /*
++ * Subtract the GIC offset.
++ */
++ spi_irq = irq - OMAP44XX_IRQ_GIC_START;
++ if (spi_irq > MAX_IRQS) {
++ pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
++ return -EINVAL;
++ }
++
++ /*
++ * Each WakeupGen register controls 32 interrupt.
++ * i.e. 1 bit per SPI IRQ
++ */
++ *reg_index = spi_irq >> 5;
++ *bit_posn = spi_irq %= 32;
++
++ return 0;
++}
++
++static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
++{
++ u32 val, bit_number;
++ u8 i;
++
++ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
++ return;
++
++ val = wakeupgen_readl(i, cpu);
++ val &= ~BIT(bit_number);
++ wakeupgen_writel(val, i, cpu);
++}
++
++static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
++{
++ u32 val, bit_number;
++ u8 i;
++
++ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
++ return;
++
++ val = wakeupgen_readl(i, cpu);
++ val |= BIT(bit_number);
++ wakeupgen_writel(val, i, cpu);
++}
++
++static void _wakeupgen_save_masks(unsigned int cpu)
++{
++ u8 i;
++
++ for (i = 0; i < NR_REG_BANKS; i++)
++ per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
++}
++
++static void _wakeupgen_restore_masks(unsigned int cpu)
++{
++ u8 i;
++
++ for (i = 0; i < NR_REG_BANKS; i++)
++ wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
++}
++
++/*
++ * Architecture specific Mask extension
++ */
++static void wakeupgen_mask(struct irq_data *d)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&wakeupgen_lock, flags);
++ _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
++ spin_unlock_irqrestore(&wakeupgen_lock, flags);
++}
++
++/*
++ * Architecture specific Unmask extension
++ */
++static void wakeupgen_unmask(struct irq_data *d)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&wakeupgen_lock, flags);
++ _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
++ spin_unlock_irqrestore(&wakeupgen_lock, flags);
++}
++
++/*
++ * Mask or unmask all interrupts on given CPU.
++ * 0 = Mask all interrupts on the 'cpu'
++ * 1 = Unmask all interrupts on the 'cpu'
++ * Ensure that the initial mask is maintained. This is faster than
++ * iterating through GIC registers to arrive at the correct masks.
++ */
++static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&wakeupgen_lock, flags);
++ if (set) {
++ _wakeupgen_save_masks(cpu);
++ _wakeupgen_set_all(cpu, WKG_MASK_ALL);
++ } else {
++ _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
++ _wakeupgen_restore_masks(cpu);
++ }
++ spin_unlock_irqrestore(&wakeupgen_lock, flags);
++}
++
++#ifdef CONFIG_CPU_PM
++/*
++ * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
++ * ROM code. WakeupGen IP is integrated along with GIC to manage the
++ * interrupt wakeups from CPU low power states. It manages
++ * masking/unmasking of Shared peripheral interrupts(SPI). So the
++ * interrupt enable/disable control should be in sync and consistent
++ * at WakeupGen and GIC so that interrupts are not lost.
++ */
++static void irq_save_context(void)
++{
++ u32 i, val;
++
++ if (omap_rev() == OMAP4430_REV_ES1_0)
++ return;
++
++ if (!sar_base)
++ sar_base = omap4_get_sar_ram_base();
++
++ for (i = 0; i < NR_REG_BANKS; i++) {
++ /* Save the CPUx interrupt mask for IRQ 0 to 127 */
++ val = wakeupgen_readl(i, 0);
++ sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
++ val = wakeupgen_readl(i, 1);
++ sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
++
++ /*
++ * Disable the secure interrupts for CPUx. The restore
++ * code blindly restores secure and non-secure interrupt
++ * masks from SAR RAM. Secure interrupts are not suppose
++ * to be enabled from HLOS. So overwrite the SAR location
++ * so that the secure interrupt remains disabled.
++ */
++ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
++ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
++ }
++
++ /* Save AuxBoot* registers */
++ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
++ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
++ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
++ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
++
++ /* Save SyncReq generation logic */
++ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
++ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
++ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
++ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
++
++ /* Save SyncReq generation logic */
++ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
++ __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
++ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
++ __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
++
++ /* Set the Backup Bit Mask status */
++ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
++ val |= SAR_BACKUP_STATUS_WAKEUPGEN;
++ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
++}
++
++/*
++ * Clear WakeupGen SAR backup status.
++ */
++void irq_sar_clear(void)
++{
++ u32 val;
++ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
++ val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
++ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
++}
++
++/*
++ * Save GIC and Wakeupgen interrupt context using secure API
++ * for HS/EMU devices.
++ */
++static void irq_save_secure_context(void)
++{
++ u32 ret;
++ ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
++ FLAG_START_CRITICAL,
++ 0, 0, 0, 0, 0);
++ if (ret != API_HAL_RET_VALUE_OK)
++ pr_err("GIC and Wakeupgen context save failed\n");
++}
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned int)hcpu;
++
++ switch (action) {
++ case CPU_ONLINE:
++ wakeupgen_irqmask_all(cpu, 0);
++ break;
++ case CPU_DEAD:
++ wakeupgen_irqmask_all(cpu, 1);
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block __refdata irq_hotplug_notifier = {
++ .notifier_call = irq_cpu_hotplug_notify,
++};
++
++static void __init irq_hotplug_init(void)
++{
++ register_hotcpu_notifier(&irq_hotplug_notifier);
++}
++#else
++static void __init irq_hotplug_init(void)
++{}
++#endif
++
++#ifdef CONFIG_CPU_PM
++static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
++{
++ switch (cmd) {
++ case CPU_CLUSTER_PM_ENTER:
++ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
++ irq_save_context();
++ else
++ irq_save_secure_context();
++ break;
++ case CPU_CLUSTER_PM_EXIT:
++ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
++ irq_sar_clear();
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block irq_notifier_block = {
++ .notifier_call = irq_notifier,
++};
++
++static void __init irq_pm_init(void)
++{
++ cpu_pm_register_notifier(&irq_notifier_block);
++}
++#else
++static void __init irq_pm_init(void)
++{}
++#endif
++
++/*
++ * Initialise the wakeupgen module.
++ */
++int __init omap_wakeupgen_init(void)
++{
++ int i;
++ unsigned int boot_cpu = smp_processor_id();
++
++ /* Not supported on OMAP4 ES1.0 silicon */
++ if (omap_rev() == OMAP4430_REV_ES1_0) {
++ WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
++ return -EPERM;
++ }
++
++ /* Static mapping, never released */
++ wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
++ if (WARN_ON(!wakeupgen_base))
++ return -ENOMEM;
++
++ /* Clear all IRQ bitmasks at wakeupGen level */
++ for (i = 0; i < NR_REG_BANKS; i++) {
++ wakeupgen_writel(0, i, CPU0_ID);
++ wakeupgen_writel(0, i, CPU1_ID);
++ }
++
++ /*
++ * Override GIC architecture specific functions to add
++ * OMAP WakeupGen interrupt controller along with GIC
++ */
++ gic_arch_extn.irq_mask = wakeupgen_mask;
++ gic_arch_extn.irq_unmask = wakeupgen_unmask;
++ gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
++
++ /*
++ * FIXME: Add support to set_smp_affinity() once the core
++ * GIC code has necessary hooks in place.
++ */
++
++ /* Associate all the IRQs to boot CPU like GIC init does. */
++ for (i = 0; i < NR_IRQS; i++)
++ irq_target_cpu[i] = boot_cpu;
++
++ irq_hotplug_init();
++ irq_pm_init();
++
++ return 0;
++}
+diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
+index 35ac3e5..bc16c81 100644
+--- a/arch/arm/mach-omap2/omap4-common.c
++++ b/arch/arm/mach-omap2/omap4-common.c
+@@ -15,24 +15,80 @@
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
++#include <linux/memblock.h>
+
+ #include <asm/hardware/gic.h>
+ #include <asm/hardware/cache-l2x0.h>
++#include <asm/mach/map.h>
+
+ #include <plat/irqs.h>
++#include <plat/sram.h>
+
+ #include <mach/hardware.h>
+-#include <mach/omap4-common.h>
++#include <mach/omap-wakeupgen.h>
++
++#include "common.h"
++#include "omap4-sar-layout.h"
+
+ #ifdef CONFIG_CACHE_L2X0
+-void __iomem *l2cache_base;
++static void __iomem *l2cache_base;
+ #endif
+
+-void __iomem *gic_dist_base_addr;
++static void __iomem *sar_ram_base;
++
++#ifdef CONFIG_OMAP4_ERRATA_I688
++/* Used to implement memory barrier on DRAM path */
++#define OMAP4_DRAM_BARRIER_VA 0xfe600000
++
++void __iomem *dram_sync, *sram_sync;
++
++void omap_bus_sync(void)
++{
++ if (dram_sync && sram_sync) {
++ writel_relaxed(readl_relaxed(dram_sync), dram_sync);
++ writel_relaxed(readl_relaxed(sram_sync), sram_sync);
++ isb();
++ }
++}
++
++static int __init omap_barriers_init(void)
++{
++ struct map_desc dram_io_desc[1];
++ phys_addr_t paddr;
++ u32 size;
++
++ if (!cpu_is_omap44xx())
++ return -ENODEV;
+
++ size = ALIGN(PAGE_SIZE, SZ_1M);
++ paddr = memblock_alloc(size, SZ_1M);
++ if (!paddr) {
++ pr_err("%s: failed to reserve 4 Kbytes\n", __func__);
++ return -ENOMEM;
++ }
++ memblock_free(paddr, size);
++ memblock_remove(paddr, size);
++ dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
++ dram_io_desc[0].pfn = __phys_to_pfn(paddr);
++ dram_io_desc[0].length = size;
++ dram_io_desc[0].type = MT_MEMORY_SO;
++ iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
++ dram_sync = (void __iomem *) dram_io_desc[0].virtual;
++ sram_sync = (void __iomem *) OMAP4_SRAM_VA;
++
++ pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
++ (long long) paddr, dram_io_desc[0].virtual);
++
++ return 0;
++}
++core_initcall(omap_barriers_init);
++#endif
+
+ void __init gic_init_irq(void)
+ {
++ void __iomem *omap_irq_base;
++ void __iomem *gic_dist_base_addr;
++
+ /* Static mapping, never released */
+ gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
+ BUG_ON(!gic_dist_base_addr);
+@@ -41,11 +97,18 @@ void __init gic_init_irq(void)
+ omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
+ BUG_ON(!omap_irq_base);
+
++ omap_wakeupgen_init();
++
+ gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
+ }
+
+ #ifdef CONFIG_CACHE_L2X0
+
++void __iomem *omap4_get_l2cache_base(void)
++{
++ return l2cache_base;
++}
++
+ static void omap4_l2x0_disable(void)
+ {
+ /* Disable PL310 L2 Cache controller */
+@@ -71,7 +134,8 @@ static int __init omap_l2_cache_init(void)
+
+ /* Static mapping, never released */
+ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
+- BUG_ON(!l2cache_base);
++ if (WARN_ON(!l2cache_base))
++ return -ENOMEM;
+
+ /*
+ * 16-way associativity, parity disabled
+@@ -111,3 +175,30 @@ static int __init omap_l2_cache_init(void)
+ }
+ early_initcall(omap_l2_cache_init);
+ #endif
++
++void __iomem *omap4_get_sar_ram_base(void)
++{
++ return sar_ram_base;
++}
++
++/*
++ * SAR RAM used to save and restore the HW
++ * context in low power modes
++ */
++static int __init omap4_sar_ram_init(void)
++{
++ /*
++ * To avoid code running on other OMAPs in
++ * multi-omap builds
++ */
++ if (!cpu_is_omap44xx())
++ return -ENOMEM;
++
++ /* Static mapping, never released */
++ sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
++ if (WARN_ON(!sar_ram_base))
++ return -ENOMEM;
++
++ return 0;
++}
++early_initcall(omap4_sar_ram_init);
+diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h
+new file mode 100644
+index 0000000..fe5b545
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap4-sar-layout.h
+@@ -0,0 +1,50 @@
++/*
++ * omap4-sar-layout.h: OMAP4 SAR RAM layout header file
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H
++#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
++
++/*
++ * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
++ */
++#define SAR_BANK1_OFFSET 0x0000
++#define SAR_BANK2_OFFSET 0x1000
++#define SAR_BANK3_OFFSET 0x2000
++#define SAR_BANK4_OFFSET 0x3000
++
++/* Scratch pad memory offsets from SAR_BANK1 */
++#define SCU_OFFSET0 0xd00
++#define SCU_OFFSET1 0xd04
++#define OMAP_TYPE_OFFSET 0xd10
++#define L2X0_SAVE_OFFSET0 0xd14
++#define L2X0_SAVE_OFFSET1 0xd18
++#define L2X0_AUXCTRL_OFFSET 0xd1c
++#define L2X0_PREFETCH_CTRL_OFFSET 0xd20
++
++/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
++#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04
++#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08
++
++#define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500)
++#define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504)
++#define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508)
++
++/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */
++#define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684)
++#define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694)
++#define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4)
++#define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4)
++#define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4)
++#define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8)
++#define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc)
++#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0)
++#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10
++
++#endif
+diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S
+deleted file mode 100644
+index e69d37d..0000000
+--- a/arch/arm/mach-omap2/omap44xx-smc.S
++++ /dev/null
+@@ -1,57 +0,0 @@
+-/*
+- * OMAP44xx secure APIs file.
+- *
+- * Copyright (C) 2010 Texas Instruments, Inc.
+- * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
+- *
+- *
+- * This program is free software,you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/linkage.h>
+-
+-/*
+- * This is common routine to manage secure monitor API
+- * used to modify the PL310 secure registers.
+- * 'r0' contains the value to be modified and 'r12' contains
+- * the monitor API number. It uses few CPU registers
+- * internally and hence they need be backed up including
+- * link register "lr".
+- * Function signature : void omap_smc1(u32 fn, u32 arg)
+- */
+-
+-ENTRY(omap_smc1)
+- stmfd sp!, {r2-r12, lr}
+- mov r12, r0
+- mov r0, r1
+- dsb
+- smc #0
+- ldmfd sp!, {r2-r12, pc}
+-ENDPROC(omap_smc1)
+-
+-ENTRY(omap_modify_auxcoreboot0)
+- stmfd sp!, {r1-r12, lr}
+- ldr r12, =0x104
+- dsb
+- smc #0
+- ldmfd sp!, {r1-r12, pc}
+-ENDPROC(omap_modify_auxcoreboot0)
+-
+-ENTRY(omap_auxcoreboot_addr)
+- stmfd sp!, {r2-r12, lr}
+- ldr r12, =0x105
+- dsb
+- smc #0
+- ldmfd sp!, {r2-r12, pc}
+-ENDPROC(omap_auxcoreboot_addr)
+-
+-ENTRY(omap_read_auxcoreboot0)
+- stmfd sp!, {r2-r12, lr}
+- ldr r12, =0x103
+- dsb
+- smc #0
+- mov r0, r0, lsr #9
+- ldmfd sp!, {r2-r12, pc}
+-ENDPROC(omap_read_auxcoreboot0)
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 207a2ff..bc14f9f 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -136,8 +136,9 @@
+ #include <linux/list.h>
+ #include <linux/mutex.h>
+ #include <linux/spinlock.h>
++#include <linux/slab.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/cpu.h>
+ #include "clockdomain.h"
+ #include "powerdomain.h"
+@@ -149,6 +150,7 @@
+ #include "cminst44xx.h"
+ #include "prm2xxx_3xxx.h"
+ #include "prm44xx.h"
++#include "prm33xx.h"
+ #include "prminst44xx.h"
+ #include "mux.h"
+
+@@ -381,6 +383,51 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
+ }
+
+ /**
++ * _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux
++ * @oh: struct omap_hwmod *
++ * @set_wake: bool value indicating to set (true) or clear (false) wakeup enable
++ *
++ * Set or clear the I/O pad wakeup flag in the mux entries for the
++ * hwmod @oh. This function changes the @oh->mux->pads_dynamic array
++ * in memory. If the hwmod is currently idled, and the new idle
++ * values don't match the previous ones, this function will also
++ * update the SCM PADCTRL registers. Otherwise, if the hwmod is not
++ * currently idled, this function won't touch the hardware: the new
++ * mux settings are written to the SCM PADCTRL registers when the
++ * hwmod is idled. No return value.
++ */
++static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake)
++{
++ struct omap_device_pad *pad;
++ bool change = false;
++ u16 prev_idle;
++ int j;
++
++ if (!oh->mux || !oh->mux->enabled)
++ return;
++
++ for (j = 0; j < oh->mux->nr_pads_dynamic; j++) {
++ pad = oh->mux->pads_dynamic[j];
++
++ if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP))
++ continue;
++
++ prev_idle = pad->idle;
++
++ if (set_wake)
++ pad->idle |= OMAP_WAKEUP_EN;
++ else
++ pad->idle &= ~OMAP_WAKEUP_EN;
++
++ if (prev_idle != pad->idle)
++ change = true;
++ }
++
++ if (change && oh->_state == _HWMOD_STATE_IDLE)
++ omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
++}
++
++/**
+ * _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware
+ * @oh: struct omap_hwmod *
+ *
+@@ -688,45 +735,83 @@ static void _disable_optional_clocks(struct omap_hwmod *oh)
+ */
+ static void _enable_module(struct omap_hwmod *oh)
+ {
+- /* The module mode does not exist prior OMAP4 */
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
++ /* The module mode does not exist prior OMAP4 & AM33xx */
++ if (!cpu_is_omap44xx() && !cpu_is_am33xx())
+ return;
+
+ if (!oh->clkdm || !oh->prcm.omap4.modulemode)
+ return;
+
+ pr_debug("omap_hwmod: %s: _enable_module: %d\n",
+- oh->name, oh->prcm.omap4.modulemode);
++ oh->name, oh->prcm.omap4.modulemode);
+
+ omap4_cminst_module_enable(oh->prcm.omap4.modulemode,
+- oh->clkdm->prcm_partition,
+- oh->clkdm->cm_inst,
+- oh->clkdm->clkdm_offs,
+- oh->prcm.omap4.clkctrl_offs);
++ oh->clkdm->prcm_partition,
++ oh->clkdm->cm_inst,
++ oh->clkdm->clkdm_offs,
++ oh->prcm.omap4.clkctrl_offs);
++}
++
++/**
++ * _omap4_wait_target_disable - wait for a module to be disabled on OMAP4
++ * @oh: struct omap_hwmod *
++ *
++ * Wait for a module @oh to enter slave idle. Returns 0 if the module
++ * does not have an IDLEST bit or if the module successfully enters
++ * slave idle; otherwise, pass along the return value of the
++ * appropriate *_cm*_wait_module_idle() function.
++ */
++static int _omap4_wait_target_disable(struct omap_hwmod *oh)
++{
++ if (!cpu_is_omap44xx() && !cpu_is_am33xx())
++ return 0;
++
++ if (!oh)
++ return -EINVAL;
++
++ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
++ return 0;
++
++ if (oh->flags & HWMOD_NO_IDLEST)
++ return 0;
++
++ return omap4_cminst_wait_module_idle(oh->clkdm->prcm_partition,
++ oh->clkdm->cm_inst,
++ oh->clkdm->clkdm_offs,
++ oh->prcm.omap4.clkctrl_offs);
+ }
+
+ /**
+- * _disable_module - enable CLKCTRL modulemode on OMAP4
++ * _omap4_disable_module - enable CLKCTRL modulemode on OMAP4
+ * @oh: struct omap_hwmod *
+ *
+ * Disable the PRCM module mode related to the hwmod @oh.
+- * No return value.
++ * Return EINVAL if the modulemode is not supported and 0 in case of success.
+ */
+-static void _disable_module(struct omap_hwmod *oh)
++static int _omap4_disable_module(struct omap_hwmod *oh)
+ {
+- /* The module mode does not exist prior OMAP4 */
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+- return;
++ int v;
++
++ /* The module mode does not exist prior OMAP4 & AM33xx */
++ if (!cpu_is_omap44xx() && !cpu_is_am33xx())
++ return -EINVAL;
+
+ if (!oh->clkdm || !oh->prcm.omap4.modulemode)
+- return;
++ return -EINVAL;
+
+- pr_debug("omap_hwmod: %s: _disable_module\n", oh->name);
++ pr_debug("omap_hwmod: %s: %s\n", oh->name, __func__);
+
+ omap4_cminst_module_disable(oh->clkdm->prcm_partition,
+- oh->clkdm->cm_inst,
+- oh->clkdm->clkdm_offs,
+- oh->prcm.omap4.clkctrl_offs);
++ oh->clkdm->cm_inst,
++ oh->clkdm->clkdm_offs,
++ oh->prcm.omap4.clkctrl_offs);
++
++ v = _omap4_wait_target_disable(oh);
++ if (v)
++ pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
++ oh->name);
++
++ return 0;
+ }
+
+ /**
+@@ -1051,7 +1136,7 @@ static struct omap_hwmod *_lookup(const char *name)
+ */
+ static int _init_clkdm(struct omap_hwmod *oh)
+ {
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
++ if (cpu_is_omap24xx() || (cpu_is_omap34xx() && !cpu_is_am33xx()))
+ return 0;
+
+ if (!oh->clkdm_name) {
+@@ -1133,11 +1218,13 @@ static int _wait_target_ready(struct omap_hwmod *oh)
+
+ /* XXX check clock enable states */
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+- ret = omap2_cm_wait_module_ready(oh->prcm.omap2.module_offs,
+- oh->prcm.omap2.idlest_reg_id,
+- oh->prcm.omap2.idlest_idle_bit);
+- } else if (cpu_is_omap44xx()) {
++ /*
++ * In order to use omap4 cminst code for am33xx family of devices,
++ * first check cpu_is_am33xx here.
++ *
++ * Note: cpu_is_omap34xx is true for am33xx device as well.
++ */
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ if (!oh->clkdm)
+ return -EINVAL;
+
+@@ -1145,6 +1232,11 @@ static int _wait_target_ready(struct omap_hwmod *oh)
+ oh->clkdm->cm_inst,
+ oh->clkdm->clkdm_offs,
+ oh->prcm.omap4.clkctrl_offs);
++ } else if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ ret = omap2_cm_wait_module_ready(
++ oh->prcm.omap2.module_offs,
++ oh->prcm.omap2.idlest_reg_id,
++ oh->prcm.omap2.idlest_idle_bit);
+ } else {
+ BUG();
+ };
+@@ -1153,36 +1245,6 @@ static int _wait_target_ready(struct omap_hwmod *oh)
+ }
+
+ /**
+- * _wait_target_disable - wait for a module to be disabled
+- * @oh: struct omap_hwmod *
+- *
+- * Wait for a module @oh to enter slave idle. Returns 0 if the module
+- * does not have an IDLEST bit or if the module successfully enters
+- * slave idle; otherwise, pass along the return value of the
+- * appropriate *_cm*_wait_module_idle() function.
+- */
+-static int _wait_target_disable(struct omap_hwmod *oh)
+-{
+- /* TODO: For now just handle OMAP4+ */
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+- return 0;
+-
+- if (!oh)
+- return -EINVAL;
+-
+- if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+- return 0;
+-
+- if (oh->flags & HWMOD_NO_IDLEST)
+- return 0;
+-
+- return omap4_cminst_wait_module_idle(oh->clkdm->prcm_partition,
+- oh->clkdm->cm_inst,
+- oh->clkdm->clkdm_offs,
+- oh->prcm.omap4.clkctrl_offs);
+-}
+-
+-/**
+ * _lookup_hardreset - fill register bit info for this hwmod/reset line
+ * @oh: struct omap_hwmod *
+ * @name: name of the reset line in the context of this hwmod
+@@ -1234,14 +1296,20 @@ static int _assert_hardreset(struct omap_hwmod *oh, const char *name)
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+- return omap2_prm_assert_hardreset(oh->prcm.omap2.module_offs,
+- ohri.rst_shift);
+- else if (cpu_is_omap44xx())
++ /*
++ * In order to use omap4 prm code for am33xx family of devices,
++ * first check cpu_is_am33xx here.
++ *
++ * Note: cpu_is_omap34xx is true for am33xx device as well.
++ */
++ if (cpu_is_omap44xx() || cpu_is_am33xx())
+ return omap4_prminst_assert_hardreset(ohri.rst_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
++ else if (cpu_is_omap24xx() || cpu_is_omap34xx())
++ return omap2_prm_assert_hardreset(oh->prcm.omap2.module_offs,
++ ohri.rst_shift);
+ else
+ return -EINVAL;
+ }
+@@ -1268,11 +1336,13 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+- ret = omap2_prm_deassert_hardreset(oh->prcm.omap2.module_offs,
+- ohri.rst_shift,
+- ohri.st_shift);
+- } else if (cpu_is_omap44xx()) {
++ /*
++ * In order to use omap4 prm code for am33xx family of devices,
++ * first check cpu_is_am33xx here.
++ *
++ * Note: cpu_is_omap34xx is true for am33xx device as well.
++ */
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ if (ohri.st_shift)
+ pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
+ oh->name, name);
+@@ -1280,6 +1350,10 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
++ } else if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ ret = omap2_prm_deassert_hardreset(oh->prcm.omap2.module_offs,
++ ohri.rst_shift,
++ ohri.st_shift);
+ } else {
+ return -EINVAL;
+ }
+@@ -1310,14 +1384,20 @@ static int _read_hardreset(struct omap_hwmod *oh, const char *name)
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+- return omap2_prm_is_hardreset_asserted(oh->prcm.omap2.module_offs,
+- ohri.st_shift);
+- } else if (cpu_is_omap44xx()) {
++ /*
++ * In order to use omap4 prm code for am33xx family of devices,
++ * first check cpu_is_am33xx here.
++ *
++ * Note: cpu_is_omap34xx is true for am33xx device as well.
++ */
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ return omap4_prminst_is_hardreset_asserted(ohri.rst_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
++ } else if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ return omap2_prm_is_hardreset_asserted(oh->prcm.omap2.module_offs,
++ ohri.st_shift);
+ } else {
+ return -EINVAL;
+ }
+@@ -1441,6 +1521,25 @@ static int _enable(struct omap_hwmod *oh)
+
+ pr_debug("omap_hwmod: %s: enabling\n", oh->name);
+
++ /*
++ * hwmods with HWMOD_INIT_NO_IDLE flag set are left
++ * in enabled state at init.
++ * Now that someone is really trying to enable them,
++ * just ensure that the hwmod mux is set.
++ */
++ if (oh->_int_flags & _HWMOD_SKIP_ENABLE) {
++ /*
++ * If the caller has mux data populated, do the mux'ing
++ * which wouldn't have been done as part of the _enable()
++ * done during setup.
++ */
++ if (oh->mux)
++ omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
++
++ oh->_int_flags &= ~_HWMOD_SKIP_ENABLE;
++ return 0;
++ }
++
+ if (oh->_state != _HWMOD_STATE_INITIALIZED &&
+ oh->_state != _HWMOD_STATE_IDLE &&
+ oh->_state != _HWMOD_STATE_DISABLED) {
+@@ -1524,8 +1623,6 @@ static int _enable(struct omap_hwmod *oh)
+ */
+ static int _idle(struct omap_hwmod *oh)
+ {
+- int ret;
+-
+ pr_debug("omap_hwmod: %s: idling\n", oh->name);
+
+ if (oh->_state != _HWMOD_STATE_ENABLED) {
+@@ -1537,11 +1634,9 @@ static int _idle(struct omap_hwmod *oh)
+ if (oh->class->sysc)
+ _idle_sysc(oh);
+ _del_initiator_dep(oh, mpu_oh);
+- _disable_module(oh);
+- ret = _wait_target_disable(oh);
+- if (ret)
+- pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
+- oh->name);
++
++ _omap4_disable_module(oh);
++
+ /*
+ * The module must be in idle mode before disabling any parents
+ * clocks. Otherwise, the parent clock might be disabled before
+@@ -1642,11 +1737,7 @@ static int _shutdown(struct omap_hwmod *oh)
+ if (oh->_state == _HWMOD_STATE_ENABLED) {
+ _del_initiator_dep(oh, mpu_oh);
+ /* XXX what about the other system initiators here? dma, dsp */
+- _disable_module(oh);
+- ret = _wait_target_disable(oh);
+- if (ret)
+- pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
+- oh->name);
++ _omap4_disable_module(oh);
+ _disable_clocks(oh);
+ if (oh->clkdm)
+ clkdm_hwmod_disable(oh->clkdm, oh);
+@@ -1744,8 +1835,10 @@ static int _setup(struct omap_hwmod *oh, void *data)
+ * it should be set by the core code as a runtime flag during startup
+ */
+ if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
+- (postsetup_state == _HWMOD_STATE_IDLE))
++ (postsetup_state == _HWMOD_STATE_IDLE)) {
++ oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+ postsetup_state = _HWMOD_STATE_ENABLED;
++ }
+
+ if (postsetup_state == _HWMOD_STATE_IDLE)
+ _idle(oh);
+@@ -1857,6 +1950,35 @@ error:
+ }
+
+ /**
++ * omap_hwmod_set_master_standbymode - set the hwmod's OCP master standbymode
++ * @oh: struct omap_hwmod *
++ * @standbymode: MSTANDBY field bits (shifted to bit 0)
++ *
++ * Sets the IP block's OCP master staandby mode in hardware, and updates our
++ * local copy. Intended to be used by drivers that have some erratum
++ * that requires direct manipulation of the MSTANDBY bits. Returns
++ * -EINVAL if @oh is null, or passes along the return value from
++ * _set_master_standbymode().
++ *
++ */
++int omap_hwmod_set_master_standbymode(struct omap_hwmod *oh, u8 standbymode)
++{
++ u32 v;
++ int retval = 0;
++
++ if (!oh)
++ return -EINVAL;
++
++ v = oh->_sysc_cache;
++
++ retval = _set_master_standbymode(oh, standbymode, &v);
++ if (!retval)
++ _write_sysconfig(v, oh);
++
++ return retval;
++}
++
++/**
+ * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode
+ * @oh: struct omap_hwmod *
+ * @idlemode: SIDLEMODE field bits (shifted to bit 0)
+@@ -2416,6 +2538,7 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
+ v = oh->_sysc_cache;
+ _enable_wakeup(oh, &v);
+ _write_sysconfig(v, oh);
++ _set_idle_ioring_wakeup(oh, true);
+ spin_unlock_irqrestore(&oh->_lock, flags);
+
+ return 0;
+@@ -2446,6 +2569,7 @@ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
+ v = oh->_sysc_cache;
+ _disable_wakeup(oh, &v);
+ _write_sysconfig(v, oh);
++ _set_idle_ioring_wakeup(oh, false);
+ spin_unlock_irqrestore(&oh->_lock, flags);
+
+ return 0;
+@@ -2662,3 +2786,57 @@ int omap_hwmod_no_setup_reset(struct omap_hwmod *oh)
+
+ return 0;
+ }
++
++/**
++ * omap_hwmod_pad_route_irq - route an I/O pad wakeup to a particular MPU IRQ
++ * @oh: struct omap_hwmod * containing hwmod mux entries
++ * @pad_idx: array index in oh->mux of the hwmod mux entry to route wakeup
++ * @irq_idx: the hwmod mpu_irqs array index of the IRQ to trigger on wakeup
++ *
++ * When an I/O pad wakeup arrives for the dynamic or wakeup hwmod mux
++ * entry number @pad_idx for the hwmod @oh, trigger the interrupt
++ * service routine for the hwmod's mpu_irqs array index @irq_idx. If
++ * this function is not called for a given pad_idx, then the ISR
++ * associated with @oh's first MPU IRQ will be triggered when an I/O
++ * pad wakeup occurs on that pad. Note that @pad_idx is the index of
++ * the _dynamic or wakeup_ entry: if there are other entries not
++ * marked with OMAP_DEVICE_PAD_WAKEUP or OMAP_DEVICE_PAD_REMUX, these
++ * entries are NOT COUNTED in the dynamic pad index. This function
++ * must be called separately for each pad that requires its interrupt
++ * to be re-routed this way. Returns -EINVAL if there is an argument
++ * problem or if @oh does not have hwmod mux entries or MPU IRQs;
++ * returns -ENOMEM if memory cannot be allocated; or 0 upon success.
++ *
++ * XXX This function interface is fragile. Rather than using array
++ * indexes, which are subject to unpredictable change, it should be
++ * using hwmod IRQ names, and some other stable key for the hwmod mux
++ * pad records.
++ */
++int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx)
++{
++ int nr_irqs;
++
++ might_sleep();
++
++ if (!oh || !oh->mux || !oh->mpu_irqs || pad_idx < 0 ||
++ pad_idx >= oh->mux->nr_pads_dynamic)
++ return -EINVAL;
++
++ /* Check the number of available mpu_irqs */
++ for (nr_irqs = 0; oh->mpu_irqs[nr_irqs].irq >= 0; nr_irqs++)
++ ;
++
++ if (irq_idx >= nr_irqs)
++ return -EINVAL;
++
++ if (!oh->mux->irqs) {
++ /* XXX What frees this? */
++ oh->mux->irqs = kzalloc(sizeof(int) * oh->mux->nr_pads_dynamic,
++ GFP_KERNEL);
++ if (!oh->mux->irqs)
++ return -ENOMEM;
++ }
++ oh->mux->irqs[pad_idx] = irq_idx;
++
++ return 0;
++}
+diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+new file mode 100644
+index 0000000..9d3c9a5
+--- /dev/null
++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+@@ -0,0 +1,3412 @@
++/*
++ * omap_hwmod_33xx_data.c: Hardware modules present on the AM33XX chips
++ *
++ * Copyright (C) {2011} Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This file is automatically generated from the AM33XX hardware databases.
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <plat/omap_hwmod.h>
++#include <plat/cpu.h>
++#include <plat/gpio.h>
++#include <plat/dma.h>
++#include <plat/mmc.h>
++#include <plat/mcspi.h>
++#include <plat/i2c.h>
++
++#include "omap_hwmod_common_data.h"
++#include "control.h"
++#include "cm33xx.h"
++#include "prm33xx.h"
++
++/* Backward references (IPs with Bus Master capability) */
++static struct omap_hwmod am33xx_mpu_hwmod;
++static struct omap_hwmod am33xx_l3_main_hwmod;
++static struct omap_hwmod am33xx_l3slow_hwmod;
++static struct omap_hwmod am33xx_l4wkup_hwmod;
++static struct omap_hwmod am33xx_l4per_hwmod;
++static struct omap_hwmod am33xx_uart1_hwmod;
++static struct omap_hwmod am33xx_uart2_hwmod;
++static struct omap_hwmod am33xx_uart3_hwmod;
++static struct omap_hwmod am33xx_uart4_hwmod;
++static struct omap_hwmod am33xx_uart5_hwmod;
++static struct omap_hwmod am33xx_uart6_hwmod;
++static struct omap_hwmod am33xx_timer0_hwmod;
++static struct omap_hwmod am33xx_timer1_hwmod;
++static struct omap_hwmod am33xx_timer2_hwmod;
++static struct omap_hwmod am33xx_timer3_hwmod;
++static struct omap_hwmod am33xx_timer4_hwmod;
++static struct omap_hwmod am33xx_timer5_hwmod;
++static struct omap_hwmod am33xx_timer6_hwmod;
++static struct omap_hwmod am33xx_timer7_hwmod;
++static struct omap_hwmod am33xx_wd_timer1_hwmod;
++static struct omap_hwmod am33xx_tpcc_hwmod;
++static struct omap_hwmod am33xx_tptc0_hwmod;
++static struct omap_hwmod am33xx_tptc1_hwmod;
++static struct omap_hwmod am33xx_tptc2_hwmod;
++static struct omap_hwmod am33xx_dcan0_hwmod;
++static struct omap_hwmod am33xx_dcan1_hwmod;
++static struct omap_hwmod am33xx_gpio0_hwmod;
++static struct omap_hwmod am33xx_gpio1_hwmod;
++static struct omap_hwmod am33xx_gpio2_hwmod;
++static struct omap_hwmod am33xx_gpio3_hwmod;
++static struct omap_hwmod am33xx_i2c1_hwmod;
++static struct omap_hwmod am33xx_i2c2_hwmod;
++static struct omap_hwmod am33xx_i2c3_hwmod;
++static struct omap_hwmod am33xx_usbss_hwmod;
++static struct omap_hwmod am33xx_mmc0_hwmod;
++static struct omap_hwmod am33xx_mmc1_hwmod;
++static struct omap_hwmod am33xx_mmc2_hwmod;
++static struct omap_hwmod am33xx_spi0_hwmod;
++static struct omap_hwmod am33xx_spi1_hwmod;
++static struct omap_hwmod am33xx_elm_hwmod;
++static struct omap_hwmod am33xx_adc_tsc_hwmod;
++static struct omap_hwmod am33xx_mcasp0_hwmod;
++static struct omap_hwmod am33xx_mcasp1_hwmod;
++static struct omap_hwmod am33xx_ehrpwm0_hwmod;
++static struct omap_hwmod am33xx_ehrpwm1_hwmod;
++static struct omap_hwmod am33xx_ehrpwm2_hwmod;
++static struct omap_hwmod am33xx_ecap0_hwmod;
++static struct omap_hwmod am33xx_ecap1_hwmod;
++static struct omap_hwmod am33xx_ecap2_hwmod;
++static struct omap_hwmod am33xx_gpmc_hwmod;
++static struct omap_hwmod am33xx_lcdc_hwmod;
++static struct omap_hwmod am33xx_mailbox_hwmod;
++static struct omap_hwmod am33xx_cpgmac0_hwmod;
++
++/*
++ * Interconnects hwmod structures
++ * hwmods that compose the global AM33XX OCP interconnect
++ */
++
++/* MPU -> L3_SLOW Peripheral interface */
++static struct omap_hwmod_ocp_if am33xx_mpu__l3_slow = {
++ .master = &am33xx_mpu_hwmod,
++ .slave = &am33xx_l3slow_hwmod,
++ .user = OCP_USER_MPU,
++};
++
++/* L3 SLOW -> L4_PER Peripheral interface */
++static struct omap_hwmod_ocp_if am33xx_l3_slow__l4_per = {
++ .master = &am33xx_l3slow_hwmod,
++ .slave = &am33xx_l4per_hwmod,
++ .user = OCP_USER_MPU,
++};
++
++/* L3 SLOW -> L4_WKUP Peripheral interface */
++static struct omap_hwmod_ocp_if am33xx_l3_slow__l4_wkup = {
++ .master = &am33xx_l3slow_hwmod,
++ .slave = &am33xx_l4wkup_hwmod,
++ .user = OCP_USER_MPU,
++};
++
++/* Master interfaces on the L4_WKUP interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l3_slow_masters[] = {
++ &am33xx_l3_slow__l4_per,
++ &am33xx_l3_slow__l4_wkup,
++};
++
++/* Slave interfaces on the L3_SLOW interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l3_slow_slaves[] = {
++ &am33xx_mpu__l3_slow,
++};
++
++static struct omap_hwmod am33xx_l3slow_hwmod = {
++ .name = "l3_slow",
++ .class = &l3_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .masters = am33xx_l3_slow_masters,
++ .masters_cnt = ARRAY_SIZE(am33xx_l3_slow_masters),
++ .slaves = am33xx_l3_slow_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_l3_slow_slaves),
++};
++
++/* L4 PER -> DCAN0 */
++static struct omap_hwmod_addr_space am33xx_dcan0_addrs[] = {
++ {
++ .pa_start = 0x481CC000,
++ .pa_end = 0x481CC000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_per__dcan0 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_dcan0_hwmod,
++ .clk = "dcan0_ick",
++ .addr = am33xx_dcan0_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* L4 PER -> DCAN1 */
++static struct omap_hwmod_addr_space am33xx_dcan1_addrs[] = {
++ {
++ .pa_start = 0x481D0000,
++ .pa_end = 0x481D0000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_dcan1_hwmod,
++ .clk = "dcan1_ick",
++ .addr = am33xx_dcan1_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* L4 PER -> GPIO2 */
++static struct omap_hwmod_addr_space am33xx_gpio1_addrs[] = {
++ {
++ .pa_start = 0x4804C000,
++ .pa_end = 0x4804C000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_per__gpio1 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_gpio1_hwmod,
++ .clk = "l4ls_gclk",
++ .addr = am33xx_gpio1_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* L4 PER -> GPIO3 */
++static struct omap_hwmod_addr_space am33xx_gpio2_addrs[] = {
++ {
++ .pa_start = 0x481AC000,
++ .pa_end = 0x481AC000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_per__gpio2 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_gpio2_hwmod,
++ .clk = "l4ls_gclk",
++ .addr = am33xx_gpio2_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* L4 PER -> GPIO4 */
++static struct omap_hwmod_addr_space am33xx_gpio3_addrs[] = {
++ {
++ .pa_start = 0x481AE000,
++ .pa_end = 0x481AE000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_per__gpio3 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_gpio3_hwmod,
++ .clk = "l4ls_gclk",
++ .addr = am33xx_gpio3_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* Master interfaces on the L4_PER interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l4_per_masters[] = {
++ &am33xx_l4_per__dcan0,
++ &am33xx_l4_per__dcan1,
++ &am33xx_l4_per__gpio1,
++ &am33xx_l4_per__gpio2,
++ &am33xx_l4_per__gpio3,
++};
++
++/* Slave interfaces on the L4_PER interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l4_per_slaves[] = {
++ &am33xx_l3_slow__l4_per,
++};
++
++static struct omap_hwmod am33xx_l4per_hwmod = {
++ .name = "l4_per",
++ .class = &l4_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .masters = am33xx_l4_per_masters,
++ .masters_cnt = ARRAY_SIZE(am33xx_l4_per_masters),
++ .slaves = am33xx_l4_per_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_l4_per_slaves),
++};
++
++/* L4 WKUP -> I2C1 */
++static struct omap_hwmod_addr_space am33xx_i2c1_addr_space[] = {
++ {
++ .pa_start = 0x44E0B000,
++ .pa_end = 0x44E0B000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_wkup_i2c1 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_i2c1_hwmod,
++ .addr = am33xx_i2c1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++/* L4 WKUP -> GPIO1 */
++static struct omap_hwmod_addr_space am33xx_gpio0_addrs[] = {
++ {
++ .pa_start = 0x44E07000,
++ .pa_end = 0x44E07000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_wkup__gpio0 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_gpio0_hwmod,
++ .clk = "l4ls_gclk",
++ .addr = am33xx_gpio0_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* Master interfaces on the L4_WKUP interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l4_wkup_masters[] = {
++ &am33xx_l4_wkup__gpio0,
++};
++/* Slave interfaces on the L4_WKUP interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l4_wkup_slaves[] = {
++ &am33xx_l3_slow__l4_wkup,
++};
++
++static struct omap_hwmod am33xx_l4wkup_hwmod = {
++ .name = "l4_wkup",
++ .class = &l4_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .masters = am33xx_l4_wkup_masters,
++ .masters_cnt = ARRAY_SIZE(am33xx_l4_wkup_masters),
++ .slaves = am33xx_l4_wkup_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_l4_wkup_slaves),
++};
++
++/* adc_tsc */
++static struct omap_hwmod_class_sysconfig am33xx_adc_tsc_sysc = {
++ .rev_offs = 0x00,
++ .sysc_offs = 0x10,
++ .sysc_flags = SYSC_HAS_SIDLEMODE,
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_adc_tsc_hwmod_class = {
++ .name = "adc_tsc",
++ .sysc = &am33xx_adc_tsc_sysc,
++};
++
++/* L4 WKUP -> ADC_TSC */
++static struct omap_hwmod_addr_space am33xx_adc_tsc_addrs[] = {
++ {
++ .pa_start = 0x44E0D000,
++ .pa_end = 0x44E0D000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_wkup_adc_tsc = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_adc_tsc_hwmod,
++ .clk = "adc_tsc_ick",
++ .addr = am33xx_adc_tsc_addrs,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_adc_tsc_slaves[] = {
++ &am33xx_l4_wkup_adc_tsc,
++};
++
++static struct omap_hwmod_irq_info am33xx_adc_tsc_irqs[] = {
++ { .irq = 16 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_adc_tsc_hwmod = {
++ .name = "adc_tsc",
++ .class = &am33xx_adc_tsc_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_adc_tsc_irqs,
++ .main_clk = "adc_tsc_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_adc_tsc_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_adc_tsc_slaves),
++};
++
++/* 'aes' class */
++static struct omap_hwmod_class am33xx_aes_hwmod_class = {
++ .name = "aes",
++};
++
++/* aes0 */
++static struct omap_hwmod_irq_info am33xx_aes0_irqs[] = {
++ { .irq = 102 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_aes0_hwmod = {
++ .name = "aes0",
++ .class = &am33xx_aes_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_aes0_irqs,
++ .main_clk = "aes0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_AES0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* cefuse */
++static struct omap_hwmod_class am33xx_cefuse_hwmod_class = {
++ .name = "cefuse",
++};
++
++static struct omap_hwmod am33xx_cefuse_hwmod = {
++ .name = "cefuse",
++ .class = &am33xx_cefuse_hwmod_class,
++ .clkdm_name = "l4_cefuse_clkdm",
++ .main_clk = "cefuse_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* clkdiv32k */
++static struct omap_hwmod_class am33xx_clkdiv32k_hwmod_class = {
++ .name = "clkdiv32k",
++};
++
++static struct omap_hwmod am33xx_clkdiv32k_hwmod = {
++ .name = "clkdiv32k",
++ .class = &am33xx_clkdiv32k_hwmod_class,
++ .clkdm_name = "clk_24mhz_clkdm",
++ .main_clk = "clkdiv32k_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_CLKDIV32K_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++};
++
++/* control */
++static struct omap_hwmod_class am33xx_control_hwmod_class = {
++ .name = "control",
++};
++
++static struct omap_hwmod_irq_info am33xx_control_irqs[] = {
++ { .irq = 8 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_control_hwmod = {
++ .name = "control",
++ .class = &am33xx_control_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_control_irqs,
++ .main_clk = "control_fck",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_CONTROL_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* cpgmac0 */
++static struct omap_hwmod_class_sysconfig am33xx_cpgmac_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x8,
++ .syss_offs = 0x4,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
++ SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE |
++ MSTANDBY_NO),
++ .sysc_fields = &omap_hwmod_sysc_type3,
++};
++
++static struct omap_hwmod_class am33xx_cpgmac0_hwmod_class = {
++ .name = "cpgmac0",
++ .sysc = &am33xx_cpgmac_sysc,
++};
++
++struct omap_hwmod_addr_space am33xx_cpgmac0_addr_space[] = {
++ {
++ .pa_start = 0x4A101200,
++ .pa_end = 0x4A101200 + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__cpgmac0 = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_cpgmac0_hwmod,
++ .addr = am33xx_cpgmac0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_cpgmac0_slaves[] = {
++ &am33xx_l3_main__cpgmac0,
++};
++
++static struct omap_hwmod_irq_info am33xx_cpgmac0_irqs[] = {
++ { .name = "c0_rx_thresh_pend", .irq = 40 },
++ { .name = "c0_rx_pend", .irq = 41 },
++ { .name = "c0_tx_pend", .irq = 42 },
++ { .name = "c0_misc_pend", .irq = 43 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_cpgmac0_hwmod = {
++ .name = "cpgmac0",
++ .class = &am33xx_cpgmac0_hwmod_class,
++ .clkdm_name = "cpsw_125mhz_clkdm",
++ .mpu_irqs = am33xx_cpgmac0_irqs,
++ .main_clk = "cpgmac0_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_cpgmac0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_cpgmac0_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
++ HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++};
++
++/* 'dcan' class */
++static struct omap_hwmod_class am33xx_dcan_hwmod_class = {
++ .name = "d_can",
++};
++
++/* dcan0 slave ports */
++static struct omap_hwmod_ocp_if *am33xx_dcan0_slaves[] = {
++ &am33xx_l4_per__dcan0,
++};
++
++/* dcan0 */
++static struct omap_hwmod_irq_info am33xx_dcan0_irqs[] = {
++ { .name = "d_can_ms", .irq = 52 },
++ { .name = "d_can_mo", .irq = 53 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_dcan0_hwmod = {
++ .name = "d_can0",
++ .class = &am33xx_dcan_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_dcan0_irqs,
++ .main_clk = "dcan0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_dcan0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_dcan0_slaves),
++};
++
++/* dcan1 slave ports */
++static struct omap_hwmod_ocp_if *am33xx_dcan1_slaves[] = {
++ &am33xx_l4_per__dcan1,
++};
++
++/* dcan1 */
++static struct omap_hwmod_irq_info am33xx_dcan1_irqs[] = {
++ { .name = "d_can_ms", .irq = 55 },
++ { .name = "d_can_mo", .irq = 56 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_dcan1_hwmod = {
++ .name = "d_can1",
++ .class = &am33xx_dcan_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_dcan1_irqs,
++ .main_clk = "dcan1_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_dcan1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_dcan1_slaves),
++};
++
++/* debugss */
++static struct omap_hwmod_class am33xx_debugss_hwmod_class = {
++ .name = "debugss",
++};
++
++static struct omap_hwmod am33xx_debugss_hwmod = {
++ .name = "debugss",
++ .class = &am33xx_debugss_hwmod_class,
++ .clkdm_name = "l3_aon_clkdm",
++ .main_clk = "debugss_ick",
++#ifdef CONFIG_DEBUG_JTAG_ENABLE
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++#endif
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_DEBUGSS_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* elm */
++static struct omap_hwmod_class_sysconfig am33xx_elm_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
++ SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class am33xx_elm_hwmod_class = {
++ .name = "elm",
++ .sysc = &am33xx_elm_sysc,
++};
++
++static struct omap_hwmod_irq_info am33xx_elm_irqs[] = {
++ { .irq = 4 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_elm_addr_space[] = {
++ {
++ .pa_start = 0x48080000,
++ .pa_end = 0x48080000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__elm = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_elm_hwmod,
++ .addr = am33xx_elm_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_elm_slaves[] = {
++ &am33xx_l4_core__elm,
++};
++
++static struct omap_hwmod am33xx_elm_hwmod = {
++ .name = "elm",
++ .class = &am33xx_elm_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_elm_irqs,
++ .main_clk = "elm_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_ELM_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_elm_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_elm_slaves),
++};
++
++/* emif_fw */
++static struct omap_hwmod_class am33xx_emif_fw_hwmod_class = {
++ .name = "emif_fw",
++};
++
++static struct omap_hwmod am33xx_emif_fw_hwmod = {
++ .name = "emif_fw",
++ .class = &am33xx_emif_fw_hwmod_class,
++ .clkdm_name = "l4fw_clkdm",
++ .main_clk = "emif_fw_fck",
++ .flags = HWMOD_INIT_NO_RESET | HWMOD_INIT_NO_IDLE,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EMIF_FW_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'epwmss' class */
++static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x4,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
++ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
++ .name = "epwmss",
++ .sysc = &am33xx_epwmss_sysc,
++};
++
++/* ehrpwm0 */
++static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = {
++ { .irq = 86 },
++ { .irq = 58 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48300000,
++ .pa_end = 0x48300000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48300000 + SZ_512,
++ .pa_end = 0x48300000 + SZ_512 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ehrpwm0 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ehrpwm0_hwmod,
++ .addr = am33xx_ehrpwm0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ehrpwm0_slaves[] = {
++ &am33xx_l4_core__ehrpwm0,
++};
++
++static struct omap_hwmod_opt_clk ehrpwm0_opt_clks[] = {
++ { .role = "tbclk", .clk = "ehrpwm0_tbclk" },
++};
++
++static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
++ .name = "ehrpwm.0",
++ .mpu_irqs = am33xx_ehrpwm0_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .main_clk = "epwmss0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ehrpwm0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ehrpwm0_slaves),
++ .opt_clks = ehrpwm0_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(ehrpwm0_opt_clks),
++};
++
++/* ehrpwm1 */
++static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = {
++ { .irq = 87 },
++ { .irq = 59 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48302000,
++ .pa_end = 0x48302000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48302000 + SZ_512,
++ .pa_end = 0x48302000 + SZ_512 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ehrpwm1 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ehrpwm1_hwmod,
++ .addr = am33xx_ehrpwm1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ehrpwm1_slaves[] = {
++ &am33xx_l4_core__ehrpwm1,
++};
++
++static struct omap_hwmod_opt_clk ehrpwm1_opt_clks[] = {
++ { .role = "tbclk", .clk = "ehrpwm1_tbclk" },
++};
++
++static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
++ .name = "ehrpwm.1",
++ .mpu_irqs = am33xx_ehrpwm1_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .main_clk = "epwmss1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ehrpwm1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ehrpwm1_slaves),
++ .opt_clks = ehrpwm1_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(ehrpwm1_opt_clks),
++};
++
++/* ehrpwm2 */
++static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = {
++ { .irq = 39 },
++ { .irq = 60 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48304000,
++ .pa_end = 0x48304000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48304000 + SZ_512,
++ .pa_end = 0x48304000 + SZ_512 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ehrpwm2 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ehrpwm2_hwmod,
++ .addr = am33xx_ehrpwm2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ehrpwm2_slaves[] = {
++ &am33xx_l4_core__ehrpwm2,
++};
++
++static struct omap_hwmod_opt_clk ehrpwm2_opt_clks[] = {
++ { .role = "tbclk", .clk = "ehrpwm2_tbclk" },
++};
++
++static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
++ .name = "ehrpwm.2",
++ .mpu_irqs = am33xx_ehrpwm2_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .main_clk = "epwmss2_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ehrpwm2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ehrpwm2_slaves),
++ .opt_clks = ehrpwm2_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(ehrpwm2_opt_clks),
++};
++
++/* ecap0 */
++static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = {
++ { .irq = 31 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48300000,
++ .pa_end = 0x48300000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48300000 + SZ_256,
++ .pa_end = 0x48300000 + SZ_256 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ecap0 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ecap0_hwmod,
++ .addr = am33xx_ecap0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ecap0_slaves[] = {
++ &am33xx_l4_core__ecap0,
++};
++
++static struct omap_hwmod am33xx_ecap0_hwmod = {
++ .name = "ecap.0",
++ .mpu_irqs = am33xx_ecap0_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .main_clk = "epwmss0_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ecap0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ecap0_slaves),
++};
++
++/* ecap1 */
++static struct omap_hwmod_irq_info am33xx_ecap1_irqs[] = {
++ { .irq = 47 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48302000,
++ .pa_end = 0x48302000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48302000 + SZ_256,
++ .pa_end = 0x48302000 + SZ_256 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ecap1 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ecap1_hwmod,
++ .addr = am33xx_ecap1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ecap1_slaves[] = {
++ &am33xx_l4_core__ecap1,
++};
++
++static struct omap_hwmod am33xx_ecap1_hwmod = {
++ .name = "ecap.1",
++ .mpu_irqs = am33xx_ecap1_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .main_clk = "epwmss1_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ecap1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ecap1_slaves),
++};
++
++/* ecap2 */
++static struct omap_hwmod_irq_info am33xx_ecap2_irqs[] = {
++ { .irq = 61 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {
++/*
++ * Splitting the resources to handle access of PWMSS config space and module
++ * specific part independently
++ */
++ {
++ .pa_start = 0x48304000,
++ .pa_end = 0x48304000 + SZ_16 - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT
++ },
++ {
++ .pa_start = 0x48304000 + SZ_256,
++ .pa_end = 0x48304000 + SZ_256 + SZ_256 - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__ecap2 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_ecap2_hwmod,
++ .addr = am33xx_ecap2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_ecap2_slaves[] = {
++ &am33xx_l4_core__ecap2,
++};
++
++static struct omap_hwmod am33xx_ecap2_hwmod = {
++ .name = "ecap.2",
++ .mpu_irqs = am33xx_ecap2_irqs,
++ .class = &am33xx_epwmss_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .main_clk = "epwmss2_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_ecap2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_ecap2_slaves),
++};
++
++static struct omap_hwmod_class_sysconfig am33xx_gpio_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0114,
++ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
++ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
++ SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++/* 'gpio' class */
++static struct omap_hwmod_class am33xx_gpio_hwmod_class = {
++ .name = "gpio",
++ .sysc = &am33xx_gpio_sysc,
++ .rev = 2,
++};
++
++static struct omap_gpio_dev_attr gpio_dev_attr = {
++ .bank_width = 32,
++ .dbck_flag = true,
++};
++
++/* gpio0 */
++static struct omap_hwmod_ocp_if *am33xx_gpio0_slaves[] = {
++ &am33xx_l4_wkup__gpio0,
++};
++
++static struct omap_hwmod_opt_clk gpio0_opt_clks[] = {
++ { .role = "dbclk", .clk = "gpio0_dbclk" },
++};
++
++static struct omap_hwmod_irq_info am33xx_gpio0_irqs[] = {
++ { .irq = 96 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_gpio0_hwmod = {
++ .name = "gpio1",
++ .class = &am33xx_gpio_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_gpio0_irqs,
++ .main_clk = "gpio0_ick",
++ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .opt_clks = gpio0_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(gpio0_opt_clks),
++ .dev_attr = &gpio_dev_attr,
++ .slaves = am33xx_gpio0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_gpio0_slaves),
++};
++
++/* gpio1 */
++static struct omap_hwmod_irq_info am33xx_gpio1_irqs[] = {
++ { .irq = 98 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_gpio1_slaves[] = {
++ &am33xx_l4_per__gpio1,
++};
++
++static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
++ { .role = "dbclk", .clk = "gpio1_dbclk" },
++};
++
++static struct omap_hwmod am33xx_gpio1_hwmod = {
++ .name = "gpio2",
++ .class = &am33xx_gpio_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_gpio1_irqs,
++ .main_clk = "gpio1_ick",
++ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .opt_clks = gpio1_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
++ .dev_attr = &gpio_dev_attr,
++ .slaves = am33xx_gpio1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_gpio1_slaves),
++};
++
++/* gpio2 */
++static struct omap_hwmod_irq_info am33xx_gpio2_irqs[] = {
++ { .irq = 32 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_gpio2_slaves[] = {
++ &am33xx_l4_per__gpio2,
++};
++
++static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
++ { .role = "dbclk", .clk = "gpio2_dbclk" },
++};
++
++static struct omap_hwmod am33xx_gpio2_hwmod = {
++ .name = "gpio3",
++ .class = &am33xx_gpio_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_gpio2_irqs,
++ .main_clk = "gpio2_ick",
++ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .opt_clks = gpio2_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
++ .dev_attr = &gpio_dev_attr,
++ .slaves = am33xx_gpio2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_gpio2_slaves),
++};
++
++/* gpio3 */
++static struct omap_hwmod_irq_info am33xx_gpio3_irqs[] = {
++ { .irq = 62 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_gpio3_slaves[] = {
++ &am33xx_l4_per__gpio3,
++};
++
++static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
++ { .role = "dbclk", .clk = "gpio3_dbclk" },
++};
++
++static struct omap_hwmod am33xx_gpio3_hwmod = {
++ .name = "gpio4",
++ .class = &am33xx_gpio_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_gpio3_irqs,
++ .main_clk = "gpio3_ick",
++ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .opt_clks = gpio3_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
++ .dev_attr = &gpio_dev_attr,
++ .slaves = am33xx_gpio3_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_gpio3_slaves),
++};
++
++/* gpmc */
++static struct omap_hwmod_class_sysconfig gpmc_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x10,
++ .syss_offs = 0x14,
++ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class am33xx_gpmc_hwmod_class = {
++ .name = "gpmc",
++ .sysc = &gpmc_sysc,
++};
++
++struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {
++ {
++ .pa_start = 0x50000000,
++ .pa_end = 0x50000000 + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__gpmc = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_gpmc_hwmod,
++ .addr = am33xx_gpmc_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_gpmc_slaves[] = {
++ &am33xx_l3_main__gpmc,
++};
++
++static struct omap_hwmod_irq_info am33xx_gpmc_irqs[] = {
++ { .irq = 100 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_gpmc_hwmod = {
++ .name = "gpmc",
++ .class = &am33xx_gpmc_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .mpu_irqs = am33xx_gpmc_irqs,
++ .main_clk = "gpmc_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_gpmc_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_gpmc_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
++ HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++};
++
++/* 'i2c' class */
++static struct omap_hwmod_class_sysconfig am33xx_i2c_sysc = {
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0090,
++ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
++ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_i2c_dev_attr i2c_dev_attr = {
++ .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE |
++ OMAP_I2C_FLAG_RESET_REGS_POSTIDLE,
++};
++
++static struct omap_hwmod_class i2c_class = {
++ .name = "i2c",
++ .sysc = &am33xx_i2c_sysc,
++ .rev = OMAP_I2C_IP_VERSION_2,
++ .reset = &omap_i2c_reset,
++};
++
++/* I2C1 */
++static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
++ { .irq = 70 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info i2c1_edma_reqs[] = {
++ { .name = "tx", .dma_req = 0, },
++ { .name = "rx", .dma_req = 0, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_i2c1_slaves[] = {
++ &am33xx_l4_wkup_i2c1,
++};
++
++static struct omap_hwmod am33xx_i2c1_hwmod = {
++ .name = "i2c1",
++ .class = &i2c_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = i2c1_mpu_irqs,
++ .main_clk = "i2c1_fck",
++ .sdma_reqs = i2c1_edma_reqs,
++ .flags = HWMOD_16BIT_REG,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &i2c_dev_attr,
++ .slaves = am33xx_i2c1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_i2c1_slaves),
++};
++
++/* i2c2 */
++static struct omap_hwmod_addr_space am33xx_i2c2_addr_space[] = {
++ {
++ .pa_start = 0x4802A000,
++ .pa_end = 0x4802A000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am335_l4_per_i2c2 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_i2c2_hwmod,
++ .addr = am33xx_i2c2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
++ { .irq = 71 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info i2c2_edma_reqs[] = {
++ { .name = "tx", .dma_req = 0, },
++ { .name = "rx", .dma_req = 0, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_i2c2_slaves[] = {
++ &am335_l4_per_i2c2,
++};
++
++static struct omap_hwmod am33xx_i2c2_hwmod = {
++ .name = "i2c2",
++ .class = &i2c_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = i2c2_mpu_irqs,
++ .main_clk = "i2c2_fck",
++ .sdma_reqs = i2c2_edma_reqs,
++ .flags = HWMOD_16BIT_REG,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &i2c_dev_attr,
++ .slaves = am33xx_i2c2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_i2c2_slaves),
++};
++
++/* I2C3 */
++static struct omap_hwmod_addr_space am33xx_i2c3_addr_space[] = {
++ {
++ .pa_start = 0x4819C000,
++ .pa_end = 0x4819C000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am335_l4_per_i2c3 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_i2c3_hwmod,
++ .addr = am33xx_i2c3_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_i2c3_slaves[] = {
++ &am335_l4_per_i2c3,
++};
++
++static struct omap_hwmod_dma_info i2c3_edma_reqs[] = {
++ { .name = "tx", .dma_req = 0, },
++ { .name = "rx", .dma_req = 0, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
++ { .irq = 30 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_i2c3_hwmod = {
++ .name = "i2c3",
++ .class = &i2c_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = i2c3_mpu_irqs,
++ .main_clk = "i2c3_fck",
++ .sdma_reqs = i2c3_edma_reqs,
++ .flags = HWMOD_16BIT_REG,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &i2c_dev_attr,
++ .slaves = am33xx_i2c3_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_i2c3_slaves),
++};
++
++
++/* ieee5000 */
++static struct omap_hwmod_class am33xx_ieee5000_hwmod_class = {
++ .name = "ieee5000",
++};
++
++static struct omap_hwmod am33xx_ieee5000_hwmod = {
++ .name = "ieee5000",
++ .class = &am33xx_ieee5000_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .main_clk = "ieee5000_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_IEEE5000_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++
++/* 'l3' class */
++static struct omap_hwmod_class am33xx_l3_hwmod_class = {
++ .name = "l3",
++};
++
++/* l4_hs */
++static struct omap_hwmod am33xx_l4_hs_hwmod = {
++ .name = "l4_hs",
++ .class = &am33xx_l3_hwmod_class,
++ .clkdm_name = "l4hs_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_L4HS_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* l3_instr */
++static struct omap_hwmod am33xx_l3_instr_hwmod = {
++ .name = "l3_instr",
++ .class = &am33xx_l3_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* l3_main */
++static struct omap_hwmod am33xx_l3_main_hwmod = {
++ .name = "l3_main",
++ .class = &am33xx_l3_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_L3_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'l4fw' class */
++static struct omap_hwmod_class am33xx_l4fw_hwmod_class = {
++ .name = "l4fw",
++};
++
++/* l4fw */
++static struct omap_hwmod am33xx_l4fw_hwmod = {
++ .name = "l4fw",
++ .class = &am33xx_l4fw_hwmod_class,
++ .clkdm_name = "l4fw_clkdm",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_L4FW_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'l4ls' class */
++static struct omap_hwmod_class am33xx_l4ls_hwmod_class = {
++ .name = "l4ls",
++};
++
++/* l4ls */
++static struct omap_hwmod am33xx_l4ls_hwmod = {
++ .name = "l4ls",
++ .class = &am33xx_l4ls_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .main_clk = "l4ls_gclk",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* lcdc */
++static struct omap_hwmod_class_sysconfig lcdc_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x54,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_lcdc_hwmod_class = {
++ .name = "lcdc",
++ .sysc = &lcdc_sysc,
++};
++
++static struct omap_hwmod_irq_info am33xx_lcdc_irqs[] = {
++ { .irq = 36 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_lcdc_addr_space[] = {
++ {
++ .pa_start = 0x4830E000,
++ .pa_end = 0x4830E000 + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__lcdc = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_lcdc_hwmod,
++ .addr = am33xx_lcdc_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_lcdc_slaves[] = {
++ &am33xx_l3_main__lcdc,
++};
++
++static struct omap_hwmod am33xx_lcdc_hwmod = {
++ .name = "lcdc",
++ .class = &am33xx_lcdc_hwmod_class,
++ .clkdm_name = "lcdc_clkdm",
++ .mpu_irqs = am33xx_lcdc_irqs,
++ .main_clk = "lcdc_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_LCDC_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_lcdc_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_lcdc_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
++};
++
++/*
++ * 'mailbox' class
++ * mailbox module allowing communication between the on-chip processors using a
++ * queued mailbox-interrupt mechanism.
++ */
++
++static struct omap_hwmod_class_sysconfig am33xx_mailbox_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_mailbox_hwmod_class = {
++ .name = "mailbox",
++ .sysc = &am33xx_mailbox_sysc,
++};
++
++static struct omap_hwmod_irq_info am33xx_mailbox_irqs[] = {
++ { .irq = 77 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_mailbox_addrs[] = {
++ {
++ .pa_start = 0x480C8000,
++ .pa_end = 0x480C8000 + (SZ_4K - 1),
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++/* l4_cfg -> mailbox */
++static struct omap_hwmod_ocp_if am33xx_l4_per__mailbox = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_mailbox_hwmod,
++ .addr = am33xx_mailbox_addrs,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mailbox_slaves[] = {
++ &am33xx_l4_per__mailbox,
++};
++
++static struct omap_hwmod am33xx_mailbox_hwmod = {
++ .name = "mailbox",
++ .class = &am33xx_mailbox_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_mailbox_irqs,
++ .main_clk = "mailbox0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_mailbox_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mailbox_slaves),
++};
++
++/* 'mcasp' class */
++
++static struct omap_hwmod_class_sysconfig am33xx_mcasp_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x4,
++ .sysc_flags = SYSC_HAS_SIDLEMODE,
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type3,
++};
++
++static struct omap_hwmod_class am33xx_mcasp_hwmod_class = {
++ .name = "mcasp",
++ .sysc = &am33xx_mcasp_sysc,
++};
++
++/* mcasp0 */
++static struct omap_hwmod_irq_info am33xx_mcasp0_irqs[] = {
++ { .name = "ax", .irq = 80, },
++ { .name = "ar", .irq = 81, },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info am33xx_mcasp0_edma_reqs[] = {
++ { .name = "tx", .dma_req = AM33XX_DMA_MCASP0_X, },
++ { .name = "rx", .dma_req = AM33XX_DMA_MCASP0_R, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_mcasp0_addr_space[] = {
++ {
++ .pa_start = 0x48038000,
++ .pa_end = 0x48038000 + (SZ_1K * 12) - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l3_slow__mcasp0 = {
++ .master = &am33xx_l3slow_hwmod,
++ .slave = &am33xx_mcasp0_hwmod,
++ .clk = "mcasp0_ick",
++ .addr = am33xx_mcasp0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mcasp0_slaves[] = {
++ &am33xx_l3_slow__mcasp0,
++};
++
++static struct omap_hwmod am33xx_mcasp0_hwmod = {
++ .name = "mcasp0",
++ .class = &am33xx_mcasp_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .mpu_irqs = am33xx_mcasp0_irqs,
++ .sdma_reqs = am33xx_mcasp0_edma_reqs,
++ .main_clk = "mcasp0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_mcasp0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mcasp0_slaves),
++};
++
++/* mcasp1 */
++static struct omap_hwmod_irq_info am33xx_mcasp1_irqs[] = {
++ { .name = "ax", .irq = 82, },
++ { .name = "ar", .irq = 83, },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info am33xx_mcasp1_edma_reqs[] = {
++ { .name = "tx", .dma_req = AM33XX_DMA_MCASP1_X, },
++ { .name = "rx", .dma_req = AM33XX_DMA_MCASP1_R, },
++ { .dma_req = -1 }
++};
++
++
++static struct omap_hwmod_addr_space am33xx_mcasp1_addr_space[] = {
++ {
++ .pa_start = 0x4803C000,
++ .pa_end = 0x4803C000 + (SZ_1K * 12) - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l3_slow__mcasp1 = {
++ .master = &am33xx_l3slow_hwmod,
++ .slave = &am33xx_mcasp1_hwmod,
++ .clk = "mcasp1_ick",
++ .addr = am33xx_mcasp1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mcasp1_slaves[] = {
++ &am33xx_l3_slow__mcasp1,
++};
++
++static struct omap_hwmod am33xx_mcasp1_hwmod = {
++ .name = "mcasp1",
++ .class = &am33xx_mcasp_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .mpu_irqs = am33xx_mcasp1_irqs,
++ .sdma_reqs = am33xx_mcasp1_edma_reqs,
++ .main_clk = "mcasp1_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_mcasp1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mcasp1_slaves),
++};
++
++/* 'mmc' class */
++static struct omap_hwmod_class_sysconfig am33xx_mmc_sysc = {
++ .rev_offs = 0x1fc,
++ .sysc_offs = 0x10,
++ .syss_offs = 0x14,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class am33xx_mmc_hwmod_class = {
++ .name = "mmc",
++ .sysc = &am33xx_mmc_sysc,
++};
++
++/* mmc0 */
++static struct omap_hwmod_irq_info am33xx_mmc0_irqs[] = {
++ { .irq = 64 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info am33xx_mmc0_edma_reqs[] = {
++ { .name = "tx", .dma_req = 24, },
++ { .name = "rx", .dma_req = 25, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_mmc0_addr_space[] = {
++ {
++ .pa_start = 0x48060100,
++ .pa_end = 0x48060100 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4ls__mmc0 = {
++ .master = &am33xx_l4ls_hwmod,
++ .slave = &am33xx_mmc0_hwmod,
++ .clk = "mmc0_ick",
++ .addr = am33xx_mmc0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mmc0_slaves[] = {
++ &am33xx_l4ls__mmc0,
++};
++
++static struct omap_mmc_dev_attr am33xx_mmc0_dev_attr = {
++ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
++};
++
++static struct omap_hwmod am33xx_mmc0_hwmod = {
++ .name = "mmc1",
++ .class = &am33xx_mmc_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_mmc0_irqs,
++ .main_clk = "mmc0_fck",
++ .sdma_reqs = am33xx_mmc0_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &am33xx_mmc0_dev_attr,
++ .slaves = am33xx_mmc0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mmc0_slaves),
++};
++
++/* mmc1 */
++static struct omap_hwmod_irq_info am33xx_mmc1_irqs[] = {
++ { .irq = 28 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info am33xx_mmc1_edma_reqs[] = {
++ { .name = "tx", .dma_req = 2, },
++ { .name = "rx", .dma_req = 3, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_mmc1_addr_space[] = {
++ {
++ .pa_start = 0x481D8100,
++ .pa_end = 0x481D8100 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4ls__mmc1 = {
++ .master = &am33xx_l4ls_hwmod,
++ .slave = &am33xx_mmc1_hwmod,
++ .clk = "mmc1_ick",
++ .addr = am33xx_mmc1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mmc1_slaves[] = {
++ &am33xx_l4ls__mmc1,
++};
++
++static struct omap_mmc_dev_attr am33xx_mmc1_dev_attr = {
++ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
++};
++
++static struct omap_hwmod am33xx_mmc1_hwmod = {
++ .name = "mmc2",
++ .class = &am33xx_mmc_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_mmc1_irqs,
++ .main_clk = "mmc1_fck",
++ .sdma_reqs = am33xx_mmc1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &am33xx_mmc1_dev_attr,
++ .slaves = am33xx_mmc1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mmc1_slaves),
++};
++
++/* mmc2 */
++static struct omap_hwmod_irq_info am33xx_mmc2_irqs[] = {
++ { .irq = 29 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_dma_info am33xx_mmc2_edma_reqs[] = {
++ { .name = "tx", .dma_req = 64, },
++ { .name = "rx", .dma_req = 65, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_mmc2_addr_space[] = {
++ {
++ .pa_start = 0x47810100,
++ .pa_end = 0x47810100 + SZ_64K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l3_main__mmc2 = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_mmc2_hwmod,
++ .clk = "mmc2_ick",
++ .addr = am33xx_mmc2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mmc2_slaves[] = {
++ &am33xx_l3_main__mmc2,
++};
++
++static struct omap_mmc_dev_attr am33xx_mmc2_dev_attr = {
++ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
++};
++static struct omap_hwmod am33xx_mmc2_hwmod = {
++ .name = "mmc3",
++ .class = &am33xx_mmc_hwmod_class,
++ .clkdm_name = "l3s_clkdm",
++ .mpu_irqs = am33xx_mmc2_irqs,
++ .main_clk = "mmc2_fck",
++ .sdma_reqs = am33xx_mmc2_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &am33xx_mmc2_dev_attr,
++ .slaves = am33xx_mmc2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mmc2_slaves),
++};
++
++/* Master interfaces on the MPU interconnect */
++static struct omap_hwmod_ocp_if *am33xx_l3_mpu_masters[] = {
++ &am33xx_mpu__l3_slow,
++};
++
++/* mpu */
++static struct omap_hwmod am33xx_mpu_hwmod = {
++ .name = "mpu",
++ .class = &mpu_hwmod_class,
++ .clkdm_name = "mpu_clkdm",
++ .main_clk = "mpu_fck",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .masters = am33xx_l3_mpu_masters,
++ .masters_cnt = ARRAY_SIZE(am33xx_l3_mpu_masters),
++};
++
++/* ocmcram */
++static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
++ .name = "ocmcram",
++};
++
++static struct omap_hwmod am33xx_ocmcram_hwmod = {
++ .name = "ocmcram",
++ .class = &am33xx_ocmcram_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .main_clk = "ocmcram_ick",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* ocpwp */
++static struct omap_hwmod_class am33xx_ocpwp_hwmod_class = {
++ .name = "ocpwp",
++};
++
++static struct omap_hwmod am33xx_ocpwp_hwmod = {
++ .name = "ocpwp",
++ .class = &am33xx_ocpwp_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .main_clk = "ocpwp_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_OCPWP_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* rtc */
++static struct omap_hwmod_class am33xx_rtc_hwmod_class = {
++ .name = "rtc",
++};
++
++static struct omap_hwmod_irq_info am33xx_rtc_irqs[] = {
++ { .irq = 75 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_rtc_hwmod = {
++ .name = "rtc",
++ .class = &am33xx_rtc_hwmod_class,
++ .clkdm_name = "l4_rtc_clkdm",
++ .mpu_irqs = am33xx_rtc_irqs,
++ .main_clk = "rtc_fck",
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET), /* ??? */
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* sha0 */
++static struct omap_hwmod_class am33xx_sha0_hwmod_class = {
++ .name = "sha0",
++};
++
++static struct omap_hwmod_irq_info am33xx_sha0_irqs[] = {
++ { .irq = 108 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_sha0_hwmod = {
++ .name = "sha0",
++ .class = &am33xx_sha0_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_sha0_irqs,
++ .main_clk = "sha0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'smartreflex' class */
++static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
++ .name = "smartreflex",
++};
++
++/* smartreflex0 */
++static struct omap_hwmod_irq_info am33xx_smartreflex0_irqs[] = {
++ { .irq = 120 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_smartreflex0_hwmod = {
++ .name = "smartreflex0",
++ .class = &am33xx_smartreflex_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_smartreflex0_irqs,
++ .main_clk = "smartreflex0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* smartreflex1 */
++static struct omap_hwmod_irq_info am33xx_smartreflex1_irqs[] = {
++ { .irq = 121 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_smartreflex1_hwmod = {
++ .name = "smartreflex1",
++ .class = &am33xx_smartreflex_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_smartreflex1_irqs,
++ .main_clk = "smartreflex1_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'spi' class */
++static struct omap_hwmod_class_sysconfig am33xx_mcspi_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0110,
++ .syss_offs = 0x0114,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
++ SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class am33xx_spi_hwmod_class = {
++ .name = "mcspi",
++ .sysc = &am33xx_mcspi_sysc,
++ .rev = OMAP4_MCSPI_REV,
++};
++
++/* spi0 */
++static struct omap_hwmod_irq_info am33xx_spi0_irqs[] = {
++ { .irq = 65 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_dma_info am33xx_mcspi0_edma_reqs[] = {
++ { .name = "rx0", .dma_req = 17 },
++ { .name = "tx0", .dma_req = 16 },
++ { .name = "rx1", .dma_req = 19 },
++ { .name = "tx1", .dma_req = 18 },
++ { .dma_req = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_mcspi0_addr_space[] = {
++ {
++ .pa_start = 0x48030000,
++ .pa_end = 0x48030000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__mcspi0 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_spi0_hwmod,
++ .clk = "spi0_ick",
++ .addr = am33xx_mcspi0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mcspi0_slaves[] = {
++ &am33xx_l4_core__mcspi0,
++};
++
++struct omap2_mcspi_dev_attr mcspi_attrib = {
++ .num_chipselect = 2,
++};
++static struct omap_hwmod am33xx_spi0_hwmod = {
++ .name = "spi0",
++ .class = &am33xx_spi_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_spi0_irqs,
++ .main_clk = "spi0_fck",
++ .sdma_reqs = am33xx_mcspi0_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &mcspi_attrib,
++ .slaves = am33xx_mcspi0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mcspi0_slaves),
++};
++
++/* spi1 */
++static struct omap_hwmod_irq_info am33xx_spi1_irqs[] = {
++ { .irq = 125 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_dma_info am33xx_mcspi1_edma_reqs[] = {
++ { .name = "rx0", .dma_req = 43 },
++ { .name = "tx0", .dma_req = 42 },
++ { .name = "rx1", .dma_req = 45 },
++ { .name = "tx1", .dma_req = 44 },
++ { .dma_req = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_mcspi1_addr_space[] = {
++ {
++ .pa_start = 0x481A0000,
++ .pa_end = 0x481A0000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l4_core__mcspi1 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_spi1_hwmod,
++ .clk = "spi1_ick",
++ .addr = am33xx_mcspi1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_mcspi1_slaves[] = {
++ &am33xx_l4_core__mcspi1,
++};
++static struct omap_hwmod am33xx_spi1_hwmod = {
++ .name = "spi1",
++ .class = &am33xx_spi_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_spi1_irqs,
++ .main_clk = "spi1_fck",
++ .sdma_reqs = am33xx_mcspi1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .dev_attr = &mcspi_attrib,
++ .slaves = am33xx_mcspi1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_mcspi1_slaves),
++};
++
++/* spinlock */
++static struct omap_hwmod_class am33xx_spinlock_hwmod_class = {
++ .name = "spinlock",
++};
++
++static struct omap_hwmod am33xx_spinlock_hwmod = {
++ .name = "spinlock",
++ .class = &am33xx_spinlock_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .main_clk = "spinlock_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++};
++
++/* 'timer 0 & 2-7' class */
++static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_timer_hwmod_class = {
++ .name = "timer",
++ .sysc = &am33xx_timer_sysc,
++};
++
++/* timer0 */
++/* l4 wkup -> timer0 interface */
++static struct omap_hwmod_addr_space am33xx_timer0_addr_space[] = {
++ {
++ .pa_start = 0x44E05000,
++ .pa_end = 0x44E05000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4wkup__timer0 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_timer0_hwmod,
++ .clk = "timer0_ick",
++ .addr = am33xx_timer0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer0_slaves[] = {
++ &am33xx_l4wkup__timer0,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer0_irqs[] = {
++ { .irq = 66 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer0_hwmod = {
++ .name = "timer0",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_timer0_irqs,
++ .main_clk = "timer0_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_TIMER0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer0_slaves),
++ .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
++};
++
++/* timer1 1ms */
++static struct omap_hwmod_class_sysconfig am33xx_timer1ms_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
++ SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class am33xx_timer1ms_hwmod_class = {
++ .name = "timer",
++ .sysc = &am33xx_timer1ms_sysc,
++};
++
++/* l4 wkup -> timer1 interface */
++static struct omap_hwmod_addr_space am33xx_timer1_addr_space[] = {
++ {
++ .pa_start = 0x44E31000,
++ .pa_end = 0x44E31000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4wkup__timer1 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_timer1_hwmod,
++ .clk = "timer1_ick",
++ .addr = am33xx_timer1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer1_slaves[] = {
++ &am33xx_l4wkup__timer1,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer1_irqs[] = {
++ { .irq = 67 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer1_hwmod = {
++ .name = "timer1",
++ .class = &am33xx_timer1ms_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_timer1_irqs,
++ .main_clk = "timer1_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer1_slaves),
++};
++
++/* timer2 */
++/* l4 per -> timer2 interface */
++static struct omap_hwmod_addr_space am33xx_timer2_addr_space[] = {
++ {
++ .pa_start = 0x48040000,
++ .pa_end = 0x48040000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer2 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer2_hwmod,
++ .clk = "timer2_ick",
++ .addr = am33xx_timer2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer2_slaves[] = {
++ &am33xx_l4per__timer2,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer2_irqs[] = {
++ { .irq = 68 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer2_hwmod = {
++ .name = "timer2",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer2_irqs,
++ .main_clk = "timer2_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer2_slaves),
++};
++
++/* timer3 */
++/* l4 per -> timer3 interface */
++static struct omap_hwmod_addr_space am33xx_timer3_addr_space[] = {
++ {
++ .pa_start = 0x48042000,
++ .pa_end = 0x48042000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer3 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer3_hwmod,
++ .clk = "timer3_ick",
++ .addr = am33xx_timer3_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer3_slaves[] = {
++ &am33xx_l4per__timer3,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer3_irqs[] = {
++ { .irq = 69 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer3_hwmod = {
++ .name = "timer3",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer3_irqs,
++ .main_clk = "timer3_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer3_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer3_slaves),
++};
++
++/* timer4 */
++/* l4 per -> timer4 interface */
++static struct omap_hwmod_addr_space am33xx_timer4_addr_space[] = {
++ {
++ .pa_start = 0x48044000,
++ .pa_end = 0x48044000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer4 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer4_hwmod,
++ .clk = "timer4_ick",
++ .addr = am33xx_timer4_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer4_slaves[] = {
++ &am33xx_l4per__timer4,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer4_irqs[] = {
++ { .irq = 92 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer4_hwmod = {
++ .name = "timer4",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer4_irqs,
++ .main_clk = "timer4_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer4_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer4_slaves),
++};
++
++/* timer5 */
++/* l4 per -> timer5 interface */
++static struct omap_hwmod_addr_space am33xx_timer5_addr_space[] = {
++ {
++ .pa_start = 0x48046000,
++ .pa_end = 0x48046000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer5 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer5_hwmod,
++ .clk = "timer5_ick",
++ .addr = am33xx_timer5_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer5_slaves[] = {
++ &am33xx_l4per__timer5,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer5_irqs[] = {
++ { .irq = 93 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer5_hwmod = {
++ .name = "timer5",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer5_irqs,
++ .main_clk = "timer5_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer5_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer5_slaves),
++};
++
++/* timer6 */
++/* l4 per -> timer6 interface */
++static struct omap_hwmod_addr_space am33xx_timer6_addr_space[] = {
++ {
++ .pa_start = 0x48048000,
++ .pa_end = 0x48048000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer6 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer6_hwmod,
++ .clk = "timer6_ick",
++ .addr = am33xx_timer6_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer6_slaves[] = {
++ &am33xx_l4per__timer6,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer6_irqs[] = {
++ { .irq = 94 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer6_hwmod = {
++ .name = "timer6",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer6_irqs,
++ .main_clk = "timer6_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer6_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer6_slaves),
++};
++
++/* timer7 */
++/* l4 per -> timer7 interface */
++static struct omap_hwmod_addr_space am33xx_timer7_addr_space[] = {
++ {
++ .pa_start = 0x4804A000,
++ .pa_end = 0x4804A000 + SZ_1K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4per__timer7 = {
++ .master = &am33xx_l4per_hwmod,
++ .slave = &am33xx_timer7_hwmod,
++ .clk = "timer7_ick",
++ .addr = am33xx_timer7_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_timer7_slaves[] = {
++ &am33xx_l4per__timer7,
++};
++
++static struct omap_hwmod_irq_info am33xx_timer7_irqs[] = {
++ { .irq = 95 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod am33xx_timer7_hwmod = {
++ .name = "timer7",
++ .class = &am33xx_timer_hwmod_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_timer7_irqs,
++ .main_clk = "timer7_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_timer7_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_timer7_slaves),
++};
++
++/* tpcc */
++#define AM33XX_TPCC_BASE 0x49000000
++#define AM33XX_TPTC0_BASE 0x49800000
++#define AM33XX_TPTC1_BASE 0x49900000
++#define AM33XX_TPTC2_BASE 0x49a00000
++
++/* 'tpcc' class */
++static struct omap_hwmod_class am33xx_tpcc_hwmod_class = {
++ .name = "tpcc",
++};
++
++static struct omap_hwmod_irq_info am33xx_tpcc_irqs[] = {
++ { .name = "edma0", .irq = 12 },
++ { .name = "edma0_mperr", .irq = 13, },
++ { .name = "edma0_err", .irq = 14 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_tpcc_addr_space[] = {
++ {
++ .name = "edma_cc0",
++ .pa_start = AM33XX_TPCC_BASE,
++ .pa_end = AM33XX_TPCC_BASE + SZ_32K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l3_main__tpcc = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_tpcc_hwmod,
++ .addr = am33xx_tpcc_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_tpcc_slaves[] = {
++ &am33xx_l3_main__tpcc,
++};
++
++static struct omap_hwmod am33xx_tpcc_hwmod = {
++ .name = "tpcc",
++ .class = &am33xx_tpcc_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_tpcc_irqs,
++ .main_clk = "tpcc_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_tpcc_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_tpcc_slaves),
++};
++
++static struct omap_hwmod_class_sysconfig am33xx_tptc_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x10,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_MIDLEMODE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_SMART | MSTANDBY_FORCE),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++/* 'tptc' class */
++static struct omap_hwmod_class am33xx_tptc_hwmod_class = {
++ .name = "tptc",
++ .sysc = &am33xx_tptc_sysc,
++};
++
++/* tptc0 */
++static struct omap_hwmod_irq_info am33xx_tptc0_irqs[] = {
++ { .irq = 112 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_tptc0_addr_space[] = {
++ {
++ .name = "edma_tc0",
++ .pa_start = AM33XX_TPTC0_BASE,
++ .pa_end = AM33XX_TPTC0_BASE + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__tptc0 = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_tptc0_hwmod,
++ .addr = am33xx_tptc0_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_tptc0_slaves[] = {
++ &am33xx_l3_main__tptc0,
++};
++
++static struct omap_hwmod am33xx_tptc0_hwmod = {
++ .name = "tptc0",
++ .class = &am33xx_tptc_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_tptc0_irqs,
++ .main_clk = "tptc0_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_tptc0_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_tptc0_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
++};
++
++/* tptc1 */
++static struct omap_hwmod_irq_info am33xx_tptc1_irqs[] = {
++ { .irq = 113 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_tptc1_addr_space[] = {
++ {
++ .name = "edma_tc1",
++ .pa_start = AM33XX_TPTC1_BASE,
++ .pa_end = AM33XX_TPTC1_BASE + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__tptc1 = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_tptc1_hwmod,
++ .addr = am33xx_tptc1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_tptc1_slaves[] = {
++ &am33xx_l3_main__tptc1,
++};
++
++static struct omap_hwmod am33xx_tptc1_hwmod = {
++ .name = "tptc1",
++ .class = &am33xx_tptc_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_tptc1_irqs,
++ .main_clk = "tptc1_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_tptc1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_tptc1_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
++};
++
++/* tptc2 */
++static struct omap_hwmod_irq_info am33xx_tptc2_irqs[] = {
++ { .irq = 114 },
++ { .irq = -1 }
++};
++
++struct omap_hwmod_addr_space am33xx_tptc2_addr_space[] = {
++ {
++ .name = "edma_tc2",
++ .pa_start = AM33XX_TPTC2_BASE,
++ .pa_end = AM33XX_TPTC2_BASE + SZ_8K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++ { }
++};
++
++struct omap_hwmod_ocp_if am33xx_l3_main__tptc2 = {
++ .master = &am33xx_l3_main_hwmod,
++ .slave = &am33xx_tptc2_hwmod,
++ .addr = am33xx_tptc2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_tptc2_slaves[] = {
++ &am33xx_l3_main__tptc2,
++};
++
++static struct omap_hwmod am33xx_tptc2_hwmod = {
++ .name = "tptc2",
++ .class = &am33xx_tptc_hwmod_class,
++ .clkdm_name = "l3_clkdm",
++ .mpu_irqs = am33xx_tptc2_irqs,
++ .main_clk = "tptc2_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_tptc2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_tptc2_slaves),
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
++};
++
++/* 'uart' class */
++static struct omap_hwmod_class_sysconfig uart_sysc = {
++ .rev_offs = 0x50,
++ .sysc_offs = 0x54,
++ .syss_offs = 0x58,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class uart_class = {
++ .name = "uart",
++ .sysc = &uart_sysc,
++};
++
++/* uart1 */
++static struct omap_hwmod_dma_info uart1_edma_reqs[] = {
++ { .name = "tx", .dma_req = 26, },
++ { .name = "rx", .dma_req = 27, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_uart1_addr_space[] = {
++ {
++ .pa_start = 0x44E09000,
++ .pa_end = 0x44E09000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_wkup__uart1 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_uart1_hwmod,
++ .clk = "uart1_ick",
++ .addr = am33xx_uart1_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart1_irqs[] = {
++ { .irq = 72 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart1_slaves[] = {
++ &am33xx_l4_wkup__uart1,
++};
++
++static struct omap_hwmod am33xx_uart1_hwmod = {
++ .name = "uart1",
++ .class = &uart_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .mpu_irqs = am33xx_uart1_irqs,
++ .main_clk = "uart1_fck",
++ .sdma_reqs = uart1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart1_slaves),
++};
++
++/* uart2 */
++static struct omap_hwmod_addr_space am33xx_uart2_addr_space[] = {
++ {
++ .pa_start = 0x48022000,
++ .pa_end = 0x48022000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_ls__uart2 = {
++ .slave = &am33xx_uart2_hwmod,
++ .clk = "uart2_ick",
++ .addr = am33xx_uart2_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
++ { .irq = 73 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart2_slaves[] = {
++ &am33xx_l4_ls__uart2,
++};
++
++static struct omap_hwmod am33xx_uart2_hwmod = {
++ .name = "uart2",
++ .class = &uart_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_uart2_irqs,
++ .main_clk = "uart2_fck",
++ .sdma_reqs = uart1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_UART1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart2_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart2_slaves),
++};
++
++/* uart3 */
++static struct omap_hwmod_dma_info uart3_edma_reqs[] = {
++ { .name = "tx", .dma_req = 30, },
++ { .name = "rx", .dma_req = 31, },
++ { .dma_req = -1 }
++};
++
++static struct omap_hwmod_addr_space am33xx_uart3_addr_space[] = {
++ {
++ .pa_start = 0x48024000,
++ .pa_end = 0x48024000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_ls__uart3 = {
++ .slave = &am33xx_uart3_hwmod,
++ .clk = "uart3_ick",
++ .addr = am33xx_uart3_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart3_irqs[] = {
++ { .irq = 74 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart3_slaves[] = {
++ &am33xx_l4_ls__uart3,
++};
++
++static struct omap_hwmod am33xx_uart3_hwmod = {
++ .name = "uart3",
++ .class = &uart_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_uart3_irqs,
++ .main_clk = "uart3_fck",
++ .sdma_reqs = uart3_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_UART2_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart3_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart3_slaves),
++};
++
++/* uart4 */
++static struct omap_hwmod_addr_space am33xx_uart4_addr_space[] = {
++ {
++ .pa_start = 0x481A6000,
++ .pa_end = 0x481A6000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_ls__uart4 = {
++ .slave = &am33xx_uart4_hwmod,
++ .clk = "uart4_ick",
++ .addr = am33xx_uart4_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart4_irqs[] = {
++ { .irq = 44 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart4_slaves[] = {
++ &am33xx_l4_ls__uart4,
++};
++
++static struct omap_hwmod am33xx_uart4_hwmod = {
++ .name = "uart4",
++ .class = &uart_class,
++ .mpu_irqs = am33xx_uart4_irqs,
++ .main_clk = "uart4_fck",
++ .clkdm_name = "l4ls_clkdm",
++ .sdma_reqs = uart1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_UART3_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart4_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart4_slaves),
++};
++
++/* uart5 */
++static struct omap_hwmod_addr_space am33xx_uart5_addr_space[] = {
++ {
++ .pa_start = 0x481A8000,
++ .pa_end = 0x481A8000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_ls__uart5 = {
++ .slave = &am33xx_uart5_hwmod,
++ .clk = "uart5_ick",
++ .addr = am33xx_uart5_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart5_irqs[] = {
++ { .irq = 45 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart5_slaves[] = {
++ &am33xx_l4_ls__uart5,
++};
++
++static struct omap_hwmod am33xx_uart5_hwmod = {
++ .name = "uart5",
++ .class = &uart_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_uart5_irqs,
++ .main_clk = "uart5_fck",
++ .sdma_reqs = uart1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_UART4_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart5_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart5_slaves),
++};
++
++/* uart6 */
++static struct omap_hwmod_addr_space am33xx_uart6_addr_space[] = {
++ {
++ .pa_start = 0x481AA000,
++ .pa_end = 0x481AA000 + SZ_8K - 1,
++ .flags = ADDR_TYPE_RT,
++ },
++ { }
++};
++
++static struct omap_hwmod_ocp_if am33xx_l4_ls__uart6 = {
++ .slave = &am33xx_uart6_hwmod,
++ .clk = "uart6_ick",
++ .addr = am33xx_uart6_addr_space,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_irq_info am33xx_uart6_irqs[] = {
++ { .irq = 46 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if *am33xx_uart6_slaves[] = {
++ &am33xx_l4_ls__uart6,
++};
++
++static struct omap_hwmod am33xx_uart6_hwmod = {
++ .name = "uart6",
++ .class = &uart_class,
++ .clkdm_name = "l4ls_clkdm",
++ .mpu_irqs = am33xx_uart6_irqs,
++ .main_clk = "uart6_fck",
++ .sdma_reqs = uart1_edma_reqs,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_UART5_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_uart6_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_uart6_slaves),
++};
++
++/* 'wd_timer' class */
++static struct omap_hwmod_class am33xx_wd_timer_hwmod_class = {
++ .name = "wd_timer",
++};
++
++static struct omap_hwmod_addr_space am33xx_wd_timer1_addrs[] = {
++ {
++ .pa_start = 0x44E35000,
++ .pa_end = 0x44E35000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++/* l4_wkup -> wd_timer1 */
++static struct omap_hwmod_ocp_if am33xx_l4wkup__wd_timer1 = {
++ .master = &am33xx_l4wkup_hwmod,
++ .slave = &am33xx_wd_timer1_hwmod,
++ .addr = am33xx_wd_timer1_addrs,
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_wd_timer1_slaves[] = {
++ &am33xx_l4wkup__wd_timer1,
++};
++
++/*
++ * TODO: device.c file uses hardcoded name for watchdog timer
++ * driver "wd_timer2, so we are also using same name as of now...
++ */
++static struct omap_hwmod am33xx_wd_timer1_hwmod = {
++ .name = "wd_timer2",
++ .class = &am33xx_wd_timer_hwmod_class,
++ .clkdm_name = "l4_wkup_clkdm",
++ .main_clk = "wdt1_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_wd_timer1_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_wd_timer1_slaves),
++};
++
++/* wkup_m3 */
++static struct omap_hwmod_class am33xx_wkup_m3_hwmod_class = {
++ .name = "wkup_m3",
++};
++
++static struct omap_hwmod_rst_info am33xx_wkup_m3_resets[] = {
++ { .name = "wkup_m3", .rst_shift = 3, .st_shift = 5 },
++};
++
++static struct omap_hwmod am33xx_wkup_m3_hwmod = {
++ .name = "wkup_m3",
++ .class = &am33xx_wkup_m3_hwmod_class,
++ .clkdm_name = "l4_wkup_aon_clkdm",
++ .main_clk = "wkup_m3_fck",
++ .flags = HWMOD_INIT_NO_RESET, /* Keep hardreset asserted */
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET,
++ .rstctrl_offs = AM33XX_RM_WKUP_RSTCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .rst_lines = am33xx_wkup_m3_resets,
++ .rst_lines_cnt = ARRAY_SIZE(am33xx_wkup_m3_resets),
++};
++
++/* usbss */
++/* L3 SLOW -> USBSS interface */
++static struct omap_hwmod_addr_space am33xx_usbss_addr_space[] = {
++ {
++ .name = "usbss",
++ .pa_start = 0x47400000,
++ .pa_end = 0x47400000 + SZ_4K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ {
++ .name = "musb0",
++ .pa_start = 0x47401000,
++ .pa_end = 0x47401000 + SZ_2K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ {
++ .name = "musb1",
++ .pa_start = 0x47401800,
++ .pa_end = 0x47401800 + SZ_2K - 1,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++static struct omap_hwmod_class_sysconfig am33xx_usbhsotg_sysc = {
++ .rev_offs = 0x0,
++ .sysc_offs = 0x10,
++ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class am33xx_usbotg_class = {
++ .name = "usbotg",
++ .sysc = &am33xx_usbhsotg_sysc,
++};
++
++static struct omap_hwmod_irq_info am33xx_usbss_mpu_irqs[] = {
++ { .name = "usbss-irq", .irq = 17, },
++ { .name = "musb0-irq", .irq = 18, },
++ { .name = "musb1-irq", .irq = 19, },
++ { .irq = -1, },
++};
++
++static struct omap_hwmod_ocp_if am33xx_l3_slow__usbss = {
++ .master = &am33xx_l3slow_hwmod,
++ .slave = &am33xx_usbss_hwmod,
++ .clk = "usbotg_ick",
++ .addr = am33xx_usbss_addr_space,
++ .user = OCP_USER_MPU,
++ .flags = OCPIF_SWSUP_IDLE,
++};
++
++static struct omap_hwmod_ocp_if *am33xx_usbss_slaves[] = {
++ &am33xx_l3_slow__usbss,
++};
++
++static struct omap_hwmod am33xx_usbss_hwmod = {
++ .name = "usb_otg_hs",
++ .class = &am33xx_usbotg_class,
++ .clkdm_name = "l3s_clkdm",
++ .mpu_irqs = am33xx_usbss_mpu_irqs,
++ .main_clk = "usbotg_fck",
++ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_USB0_CLKCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = am33xx_usbss_slaves,
++ .slaves_cnt = ARRAY_SIZE(am33xx_usbss_slaves),
++ .class = &am33xx_usbotg_class,
++ .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
++};
++
++/* gfx */
++/* Pseudo hwmod for reset control purpose only */
++static struct omap_hwmod_class am33xx_gfx_hwmod_class = {
++ .name = "gfx",
++};
++
++static struct omap_hwmod_rst_info am33xx_gfx_resets[] = {
++ { .name = "gfx", .rst_shift = 0 },
++};
++
++static struct omap_hwmod am33xx_gfx_hwmod = {
++ .name = "gfx",
++ .class = &am33xx_gfx_hwmod_class,
++ .clkdm_name = "gfx_l3_clkdm",
++ .main_clk = "gfx_fclk",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET,
++ .rstctrl_offs = AM33XX_RM_GFX_RSTCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .rst_lines = am33xx_gfx_resets,
++ .rst_lines_cnt = ARRAY_SIZE(am33xx_gfx_resets),
++};
++
++/* PRUSS */
++/* Pseudo hwmod for reset control purpose only */
++static struct omap_hwmod_class am33xx_pruss_hwmod_class = {
++ .name = "pruss",
++};
++
++static struct omap_hwmod_rst_info am33xx_pruss_resets[] = {
++ { .name = "pruss", .rst_shift = 1 },
++};
++
++static struct omap_hwmod am33xx_pruss_hwmod = {
++ .name = "pruss",
++ .class = &am33xx_pruss_hwmod_class,
++ .clkdm_name = "pruss_ocp_clkdm",
++ .main_clk = "pruss_uart_gclk",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET,
++ .rstctrl_offs = AM33XX_RM_PER_RSTCTRL_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .rst_lines = am33xx_pruss_resets,
++ .rst_lines_cnt = ARRAY_SIZE(am33xx_pruss_resets),
++};
++
++static __initdata struct omap_hwmod *am33xx_hwmods[] = {
++ /* l3 class */
++ &am33xx_l3_instr_hwmod,
++ &am33xx_l3_main_hwmod,
++ /* l3s class */
++ &am33xx_l3slow_hwmod,
++ /* l4hs class */
++ &am33xx_l4_hs_hwmod,
++ /* l4fw class */
++ &am33xx_l4fw_hwmod,
++ /* l4ls class */
++ &am33xx_l4ls_hwmod,
++ /* l4per class */
++ &am33xx_l4per_hwmod,
++ /* l4wkup class */
++ &am33xx_l4wkup_hwmod,
++ /* clkdiv32k class */
++ &am33xx_clkdiv32k_hwmod,
++ /* mpu class */
++ &am33xx_mpu_hwmod,
++ /* adc_tsc class */
++ &am33xx_adc_tsc_hwmod,
++ /* aes class */
++ &am33xx_aes0_hwmod,
++ /* cefuse class */
++ &am33xx_cefuse_hwmod,
++ /* control class */
++ &am33xx_control_hwmod,
++ /* dcan class */
++ &am33xx_dcan0_hwmod,
++ &am33xx_dcan1_hwmod,
++ /* debugss class */
++ &am33xx_debugss_hwmod,
++ /* elm class */
++ &am33xx_elm_hwmod,
++ /* emif_fw class */
++ &am33xx_emif_fw_hwmod,
++ /* epwmss class */
++ &am33xx_ehrpwm0_hwmod,
++ &am33xx_ehrpwm1_hwmod,
++ &am33xx_ehrpwm2_hwmod,
++ &am33xx_ecap0_hwmod,
++ &am33xx_ecap1_hwmod,
++ &am33xx_ecap2_hwmod,
++ /* gpio class */
++ &am33xx_gpio0_hwmod,
++ &am33xx_gpio1_hwmod,
++ &am33xx_gpio2_hwmod,
++ &am33xx_gpio3_hwmod,
++ /* gpmc class */
++ &am33xx_gpmc_hwmod,
++ /* i2c class */
++ &am33xx_i2c1_hwmod,
++ &am33xx_i2c2_hwmod,
++ &am33xx_i2c3_hwmod,
++ /* ieee5000 class */
++ &am33xx_ieee5000_hwmod,
++ /* mailbox class */
++ &am33xx_mailbox_hwmod,
++ /* mcasp class */
++ &am33xx_mcasp0_hwmod,
++ &am33xx_mcasp1_hwmod,
++ /* mmc class */
++ &am33xx_mmc0_hwmod,
++ &am33xx_mmc1_hwmod,
++ &am33xx_mmc2_hwmod,
++ /* ocmcram class */
++ &am33xx_ocmcram_hwmod,
++ /* ocpwp class */
++ &am33xx_ocpwp_hwmod,
++ /* rtc class */
++ &am33xx_rtc_hwmod,
++ /* sha0 class */
++ &am33xx_sha0_hwmod,
++ /* smartreflex class */
++ &am33xx_smartreflex0_hwmod,
++ &am33xx_smartreflex1_hwmod,
++ /* spi class */
++ &am33xx_spi0_hwmod,
++ &am33xx_spi1_hwmod,
++ /* spinlock class */
++ &am33xx_spinlock_hwmod,
++ /* uart class */
++ &am33xx_uart1_hwmod,
++ &am33xx_uart2_hwmod,
++ &am33xx_uart3_hwmod,
++ &am33xx_uart4_hwmod,
++ &am33xx_uart5_hwmod,
++ &am33xx_uart6_hwmod,
++ /* timer class */
++ &am33xx_timer0_hwmod,
++ &am33xx_timer1_hwmod,
++ &am33xx_timer2_hwmod,
++ &am33xx_timer3_hwmod,
++ &am33xx_timer4_hwmod,
++ &am33xx_timer5_hwmod,
++ &am33xx_timer6_hwmod,
++ &am33xx_timer7_hwmod,
++ /* wkup_m3 class */
++ &am33xx_wkup_m3_hwmod,
++ /* wd_timer class */
++ &am33xx_wd_timer1_hwmod,
++ /* usbss hwmod */
++ &am33xx_usbss_hwmod,
++ /* cpgmac0 class */
++ &am33xx_cpgmac0_hwmod,
++ /* tptc class */
++ &am33xx_tptc0_hwmod,
++ &am33xx_tptc1_hwmod,
++ &am33xx_tptc2_hwmod,
++ /* tpcc class */
++ &am33xx_tpcc_hwmod,
++ /* LCDC class */
++ &am33xx_lcdc_hwmod,
++ /* gfx/sgx */
++ &am33xx_gfx_hwmod,
++ /* pruss */
++ &am33xx_pruss_hwmod,
++ NULL,
++};
++
++int __init am33xx_hwmod_init(void)
++{
++ return omap_hwmod_register(am33xx_hwmods);
++}
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index eef43e2..5324e8d 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -84,6 +84,8 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod;
+ static struct omap_hwmod omap3xxx_mcbsp5_hwmod;
+ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod;
+ static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod;
++static struct omap_hwmod omap3xxx_usb_host_hs_hwmod;
++static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod;
+
+ /* L3 -> L4_CORE interface */
+ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
+@@ -164,6 +166,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod;
+ static struct omap_hwmod omap3xxx_uart2_hwmod;
+ static struct omap_hwmod omap3xxx_uart3_hwmod;
+ static struct omap_hwmod omap3xxx_uart4_hwmod;
++static struct omap_hwmod am35xx_uart4_hwmod;
+ static struct omap_hwmod omap3xxx_usbhsotg_hwmod;
+
+ /* l3_core -> usbhsotg interface */
+@@ -299,6 +302,23 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+ };
+
++/* AM35xx: L4 CORE -> UART4 interface */
++static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = {
++ {
++ .pa_start = OMAP3_UART4_AM35XX_BASE,
++ .pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1,
++ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
++ },
++};
++
++static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = {
++ .master = &omap3xxx_l4_core_hwmod,
++ .slave = &am35xx_uart4_hwmod,
++ .clk = "uart4_ick",
++ .addr = am35xx_uart4_addr_space,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
+ /* L4 CORE -> I2C1 interface */
+ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
+ .master = &omap3xxx_l4_core_hwmod,
+@@ -1162,6 +1182,7 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = {
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .clockact = CLOCKACT_TEST_ICLK,
+ .sysc_fields = &omap_hwmod_sysc_type1,
+ };
+
+@@ -1309,6 +1330,39 @@ static struct omap_hwmod omap3xxx_uart4_hwmod = {
+ .class = &omap2_uart_class,
+ };
+
++static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = {
++ { .irq = INT_35XX_UART4_IRQ, },
++};
++
++static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = {
++ { .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, },
++ { .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, },
++};
++
++static struct omap_hwmod_ocp_if *am35xx_uart4_slaves[] = {
++ &am35xx_l4_core__uart4,
++};
++
++static struct omap_hwmod am35xx_uart4_hwmod = {
++ .name = "uart4",
++ .mpu_irqs = am35xx_uart4_mpu_irqs,
++ .sdma_reqs = am35xx_uart4_sdma_reqs,
++ .main_clk = "uart4_fck",
++ .prcm = {
++ .omap2 = {
++ .module_offs = CORE_MOD,
++ .prcm_reg_id = 1,
++ .module_bit = OMAP3430_EN_UART4_SHIFT,
++ .idlest_reg_id = 1,
++ .idlest_idle_bit = OMAP3430_EN_UART4_SHIFT,
++ },
++ },
++ .slaves = am35xx_uart4_slaves,
++ .slaves_cnt = ARRAY_SIZE(am35xx_uart4_slaves),
++ .class = &omap2_uart_class,
++};
++
++
+ static struct omap_hwmod_class i2c_class = {
+ .name = "i2c",
+ .sysc = &i2c_sysc,
+@@ -1636,7 +1690,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
+
+ static struct omap_hwmod omap3xxx_i2c1_hwmod = {
+ .name = "i2c1",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap2_i2c1_mpu_irqs,
+ .sdma_reqs = omap2_i2c1_sdma_reqs,
+ .main_clk = "i2c1_fck",
+@@ -1670,7 +1724,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
+
+ static struct omap_hwmod omap3xxx_i2c2_hwmod = {
+ .name = "i2c2",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap2_i2c2_mpu_irqs,
+ .sdma_reqs = omap2_i2c2_sdma_reqs,
+ .main_clk = "i2c2_fck",
+@@ -1715,7 +1769,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
+
+ static struct omap_hwmod omap3xxx_i2c3_hwmod = {
+ .name = "i2c3",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = i2c3_mpu_irqs,
+ .sdma_reqs = i2c3_sdma_reqs,
+ .main_clk = "i2c3_fck",
+@@ -3072,7 +3126,35 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = {
+ .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
+ };
+
+-static struct omap_hwmod omap3xxx_mmc1_hwmod = {
++/* See 35xx errata 2.1.1.128 in SPRZ278F */
++static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = {
++ .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT |
++ OMAP_HSMMC_BROKEN_MULTIBLOCK_READ),
++};
++
++static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
++ .name = "mmc1",
++ .mpu_irqs = omap34xx_mmc1_mpu_irqs,
++ .sdma_reqs = omap34xx_mmc1_sdma_reqs,
++ .opt_clks = omap34xx_mmc1_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
++ .main_clk = "mmchs1_fck",
++ .prcm = {
++ .omap2 = {
++ .module_offs = CORE_MOD,
++ .prcm_reg_id = 1,
++ .module_bit = OMAP3430_EN_MMC1_SHIFT,
++ .idlest_reg_id = 1,
++ .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
++ },
++ },
++ .dev_attr = &mmc1_pre_es3_dev_attr,
++ .slaves = omap3xxx_mmc1_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves),
++ .class = &omap34xx_mmc_class,
++};
++
++static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
+ .name = "mmc1",
+ .mpu_irqs = omap34xx_mmc1_mpu_irqs,
+ .sdma_reqs = omap34xx_mmc1_sdma_reqs,
+@@ -3115,7 +3197,34 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
+ &omap3xxx_l4_core__mmc2,
+ };
+
+-static struct omap_hwmod omap3xxx_mmc2_hwmod = {
++/* See 35xx errata 2.1.1.128 in SPRZ278F */
++static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = {
++ .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
++};
++
++static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
++ .name = "mmc2",
++ .mpu_irqs = omap34xx_mmc2_mpu_irqs,
++ .sdma_reqs = omap34xx_mmc2_sdma_reqs,
++ .opt_clks = omap34xx_mmc2_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
++ .main_clk = "mmchs2_fck",
++ .prcm = {
++ .omap2 = {
++ .module_offs = CORE_MOD,
++ .prcm_reg_id = 1,
++ .module_bit = OMAP3430_EN_MMC2_SHIFT,
++ .idlest_reg_id = 1,
++ .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
++ },
++ },
++ .dev_attr = &mmc2_pre_es3_dev_attr,
++ .slaves = omap3xxx_mmc2_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves),
++ .class = &omap34xx_mmc_class,
++};
++
++static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
+ .name = "mmc2",
+ .mpu_irqs = omap34xx_mmc2_mpu_irqs,
+ .sdma_reqs = omap34xx_mmc2_sdma_reqs,
+@@ -3177,13 +3286,223 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
+ .class = &omap34xx_mmc_class,
+ };
+
++/*
++ * 'usb_host_hs' class
++ * high-speed multi-port usb host controller
++ */
++static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = {
++ .master = &omap3xxx_usb_host_hs_hwmod,
++ .slave = &omap3xxx_l3_main_hwmod,
++ .clk = "core_l3_ick",
++ .user = OCP_USER_MPU,
++};
++
++static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
++ SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
++ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = {
++ .name = "usb_host_hs",
++ .sysc = &omap3xxx_usb_host_hs_sysc,
++};
++
++static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_masters[] = {
++ &omap3xxx_usb_host_hs__l3_main_2,
++};
++
++static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = {
++ {
++ .name = "uhh",
++ .pa_start = 0x48064000,
++ .pa_end = 0x480643ff,
++ .flags = ADDR_TYPE_RT
++ },
++ {
++ .name = "ohci",
++ .pa_start = 0x48064400,
++ .pa_end = 0x480647ff,
++ },
++ {
++ .name = "ehci",
++ .pa_start = 0x48064800,
++ .pa_end = 0x48064cff,
++ },
++ {}
++};
++
++static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = {
++ .master = &omap3xxx_l4_core_hwmod,
++ .slave = &omap3xxx_usb_host_hs_hwmod,
++ .clk = "usbhost_ick",
++ .addr = omap3xxx_usb_host_hs_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_slaves[] = {
++ &omap3xxx_l4_core__usb_host_hs,
++};
++
++static struct omap_hwmod_opt_clk omap3xxx_usb_host_hs_opt_clks[] = {
++ { .role = "ehci_logic_fck", .clk = "usbhost_120m_fck", },
++};
++
++static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
++ { .name = "ohci-irq", .irq = 76 },
++ { .name = "ehci-irq", .irq = 77 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
++ .name = "usb_host_hs",
++ .class = &omap3xxx_usb_host_hs_hwmod_class,
++ .clkdm_name = "l3_init_clkdm",
++ .mpu_irqs = omap3xxx_usb_host_hs_irqs,
++ .main_clk = "usbhost_48m_fck",
++ .prcm = {
++ .omap2 = {
++ .module_offs = OMAP3430ES2_USBHOST_MOD,
++ .prcm_reg_id = 1,
++ .module_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
++ .idlest_reg_id = 1,
++ .idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT,
++ .idlest_stdby_bit = OMAP3430ES2_ST_USBHOST_STDBY_SHIFT,
++ },
++ },
++ .opt_clks = omap3xxx_usb_host_hs_opt_clks,
++ .opt_clks_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_opt_clks),
++ .slaves = omap3xxx_usb_host_hs_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_slaves),
++ .masters = omap3xxx_usb_host_hs_masters,
++ .masters_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_masters),
++
++ /*
++ * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
++ * id: i660
++ *
++ * Description:
++ * In the following configuration :
++ * - USBHOST module is set to smart-idle mode
++ * - PRCM asserts idle_req to the USBHOST module ( This typically
++ * happens when the system is going to a low power mode : all ports
++ * have been suspended, the master part of the USBHOST module has
++ * entered the standby state, and SW has cut the functional clocks)
++ * - an USBHOST interrupt occurs before the module is able to answer
++ * idle_ack, typically a remote wakeup IRQ.
++ * Then the USB HOST module will enter a deadlock situation where it
++ * is no more accessible nor functional.
++ *
++ * Workaround:
++ * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
++ */
++
++ /*
++ * Errata: USB host EHCI may stall when entering smart-standby mode
++ * Id: i571
++ *
++ * Description:
++ * When the USBHOST module is set to smart-standby mode, and when it is
++ * ready to enter the standby state (i.e. all ports are suspended and
++ * all attached devices are in suspend mode), then it can wrongly assert
++ * the Mstandby signal too early while there are still some residual OCP
++ * transactions ongoing. If this condition occurs, the internal state
++ * machine may go to an undefined state and the USB link may be stuck
++ * upon the next resume.
++ *
++ * Workaround:
++ * Don't use smart standby; use only force standby,
++ * hence HWMOD_SWSUP_MSTANDBY
++ */
++
++ /*
++ * During system boot; If the hwmod framework resets the module
++ * the module will have smart idle settings; which can lead to deadlock
++ * (above Errata Id:i660); so, dont reset the module during boot;
++ * Use HWMOD_INIT_NO_RESET.
++ */
++
++ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
++ HWMOD_INIT_NO_RESET,
++};
++
++/*
++ * 'usb_tll_hs' class
++ * usb_tll_hs module is the adapter on the usb_host_hs ports
++ */
++static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_AUTOIDLE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = {
++ .name = "usb_tll_hs",
++ .sysc = &omap3xxx_usb_tll_hs_sysc,
++};
++
++static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
++ { .name = "tll-irq", .irq = 78 },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = {
++ {
++ .name = "tll",
++ .pa_start = 0x48062000,
++ .pa_end = 0x48062fff,
++ .flags = ADDR_TYPE_RT
++ },
++ {}
++};
++
++static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = {
++ .master = &omap3xxx_l4_core_hwmod,
++ .slave = &omap3xxx_usb_tll_hs_hwmod,
++ .clk = "usbtll_ick",
++ .addr = omap3xxx_usb_tll_hs_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++static struct omap_hwmod_ocp_if *omap3xxx_usb_tll_hs_slaves[] = {
++ &omap3xxx_l4_core__usb_tll_hs,
++};
++
++static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
++ .name = "usb_tll_hs",
++ .class = &omap3xxx_usb_tll_hs_hwmod_class,
++ .clkdm_name = "l3_init_clkdm",
++ .mpu_irqs = omap3xxx_usb_tll_hs_irqs,
++ .main_clk = "usbtll_fck",
++ .prcm = {
++ .omap2 = {
++ .module_offs = CORE_MOD,
++ .prcm_reg_id = 3,
++ .module_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
++ .idlest_reg_id = 3,
++ .idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT,
++ },
++ },
++ .slaves = omap3xxx_usb_tll_hs_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_tll_hs_slaves),
++};
++
+ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
+ &omap3xxx_l3_main_hwmod,
+ &omap3xxx_l4_core_hwmod,
+ &omap3xxx_l4_per_hwmod,
+ &omap3xxx_l4_wkup_hwmod,
+- &omap3xxx_mmc1_hwmod,
+- &omap3xxx_mmc2_hwmod,
+ &omap3xxx_mmc3_hwmod,
+ &omap3xxx_mpu_hwmod,
+
+@@ -3198,12 +3517,12 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
+ &omap3xxx_timer9_hwmod,
+ &omap3xxx_timer10_hwmod,
+ &omap3xxx_timer11_hwmod,
+- &omap3xxx_timer12_hwmod,
+
+ &omap3xxx_wd_timer2_hwmod,
+ &omap3xxx_uart1_hwmod,
+ &omap3xxx_uart2_hwmod,
+ &omap3xxx_uart3_hwmod,
++
+ /* dss class */
+ &omap3xxx_dss_dispc_hwmod,
+ &omap3xxx_dss_dsi1_hwmod,
+@@ -3245,6 +3564,12 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
+ NULL,
+ };
+
++/* GP-only hwmods */
++static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = {
++ &omap3xxx_timer12_hwmod,
++ NULL
++};
++
+ /* 3430ES1-only hwmods */
+ static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
+ &omap3430es1_dss_core_hwmod,
+@@ -3255,6 +3580,22 @@ static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
+ static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
+ &omap3xxx_dss_core_hwmod,
+ &omap3xxx_usbhsotg_hwmod,
++ &omap3xxx_usb_host_hs_hwmod,
++ &omap3xxx_usb_tll_hs_hwmod,
++ NULL
++};
++
++/* <= 3430ES3-only hwmods */
++static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = {
++ &omap3xxx_pre_es3_mmc1_hwmod,
++ &omap3xxx_pre_es3_mmc2_hwmod,
++ NULL
++};
++
++/* 3430ES3+-only hwmods */
++static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = {
++ &omap3xxx_es3plus_mmc1_hwmod,
++ &omap3xxx_es3plus_mmc2_hwmod,
+ NULL
+ };
+
+@@ -3276,12 +3617,21 @@ static __initdata struct omap_hwmod *omap36xx_hwmods[] = {
+ &omap36xx_sr2_hwmod,
+ &omap3xxx_usbhsotg_hwmod,
+ &omap3xxx_mailbox_hwmod,
++ &omap3xxx_usb_host_hs_hwmod,
++ &omap3xxx_usb_tll_hs_hwmod,
++ &omap3xxx_es3plus_mmc1_hwmod,
++ &omap3xxx_es3plus_mmc2_hwmod,
+ NULL
+ };
+
+ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
+ &omap3xxx_dss_core_hwmod, /* XXX ??? */
+ &am35xx_usbhsotg_hwmod,
++ &am35xx_uart4_hwmod,
++ &omap3xxx_usb_host_hs_hwmod,
++ &omap3xxx_usb_tll_hs_hwmod,
++ &omap3xxx_es3plus_mmc1_hwmod,
++ &omap3xxx_es3plus_mmc2_hwmod,
+ NULL
+ };
+
+@@ -3296,6 +3646,13 @@ int __init omap3xxx_hwmod_init(void)
+ if (r < 0)
+ return r;
+
++ /* Register GP-only hwmods. */
++ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
++ r = omap_hwmod_register(omap3xxx_gp_hwmods);
++ if (r < 0)
++ return r;
++ }
++
+ rev = omap_rev();
+
+ /*
+@@ -3334,6 +3691,21 @@ int __init omap3xxx_hwmod_init(void)
+ h = omap3430es2plus_hwmods;
+ };
+
++ if (h) {
++ r = omap_hwmod_register(h);
++ if (r < 0)
++ return r;
++ }
++
++ h = NULL;
++ if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 ||
++ rev == OMAP3430_REV_ES2_1) {
++ h = omap3430_pre_es3_hwmods;
++ } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 ||
++ rev == OMAP3430_REV_ES3_1_2) {
++ h = omap3430_es3plus_hwmods;
++ };
++
+ if (h)
+ r = omap_hwmod_register(h);
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+index daaf165..31a3084 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+@@ -53,6 +53,7 @@ static struct omap_hwmod omap44xx_dmm_hwmod;
+ static struct omap_hwmod omap44xx_dsp_hwmod;
+ static struct omap_hwmod omap44xx_dss_hwmod;
+ static struct omap_hwmod omap44xx_emif_fw_hwmod;
++static struct omap_hwmod omap44xx_fdif_hwmod;
+ static struct omap_hwmod omap44xx_hsi_hwmod;
+ static struct omap_hwmod omap44xx_ipu_hwmod;
+ static struct omap_hwmod omap44xx_iss_hwmod;
+@@ -70,6 +71,8 @@ static struct omap_hwmod omap44xx_mmc2_hwmod;
+ static struct omap_hwmod omap44xx_mpu_hwmod;
+ static struct omap_hwmod omap44xx_mpu_private_hwmod;
+ static struct omap_hwmod omap44xx_usb_otg_hs_hwmod;
++static struct omap_hwmod omap44xx_usb_host_hs_hwmod;
++static struct omap_hwmod omap44xx_usb_tll_hs_hwmod;
+
+ /*
+ * Interconnects omap_hwmod structures
+@@ -346,6 +349,14 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+ };
+
++/* fdif -> l3_main_2 */
++static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
++ .master = &omap44xx_fdif_hwmod,
++ .slave = &omap44xx_l3_main_2_hwmod,
++ .clk = "l3_div_ck",
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
+ /* hsi -> l3_main_2 */
+ static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
+ .master = &omap44xx_hsi_hwmod,
+@@ -415,6 +426,7 @@ static struct omap_hwmod_ocp_if omap44xx_usb_otg_hs__l3_main_2 = {
+ /* l3_main_2 slave ports */
+ static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = {
+ &omap44xx_dma_system__l3_main_2,
++ &omap44xx_fdif__l3_main_2,
+ &omap44xx_hsi__l3_main_2,
+ &omap44xx_ipu__l3_main_2,
+ &omap44xx_iss__l3_main_2,
+@@ -1029,6 +1041,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
+
+ static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
+ {
++ .name = "mpu",
+ .pa_start = 0x4012e000,
+ .pa_end = 0x4012e07f,
+ .flags = ADDR_TYPE_RT
+@@ -1047,6 +1060,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
+
+ static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
+ {
++ .name = "dma",
+ .pa_start = 0x4902e000,
+ .pa_end = 0x4902e07f,
+ .flags = ADDR_TYPE_RT
+@@ -1797,6 +1811,79 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
+ };
+
+ /*
++ * 'fdif' class
++ * face detection hw accelerator module
++ */
++
++static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
++ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
++ .name = "fdif",
++ .sysc = &omap44xx_fdif_sysc,
++};
++
++/* fdif */
++static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
++ { .irq = 69 + OMAP44XX_IRQ_GIC_START },
++ { .irq = -1 }
++};
++
++/* fdif master ports */
++static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
++ &omap44xx_fdif__l3_main_2,
++};
++
++static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
++ {
++ .pa_start = 0x4a10a000,
++ .pa_end = 0x4a10a1ff,
++ .flags = ADDR_TYPE_RT
++ },
++ { }
++};
++
++/* l4_cfg -> fdif */
++static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
++ .master = &omap44xx_l4_cfg_hwmod,
++ .slave = &omap44xx_fdif_hwmod,
++ .clk = "l4_div_ck",
++ .addr = omap44xx_fdif_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++/* fdif slave ports */
++static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
++ &omap44xx_l4_cfg__fdif,
++};
++
++static struct omap_hwmod omap44xx_fdif_hwmod = {
++ .name = "fdif",
++ .class = &omap44xx_fdif_hwmod_class,
++ .clkdm_name = "iss_clkdm",
++ .mpu_irqs = omap44xx_fdif_irqs,
++ .main_clk = "fdif_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
++ .context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .slaves = omap44xx_fdif_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap44xx_fdif_slaves),
++ .masters = omap44xx_fdif_masters,
++ .masters_cnt = ARRAY_SIZE(omap44xx_fdif_masters),
++};
++
++/*
+ * 'gpio' class
+ * general purpose io module
+ */
+@@ -2246,6 +2333,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = {
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP),
++ .clockact = CLOCKACT_TEST_ICLK,
+ .sysc_fields = &omap_hwmod_sysc_type1,
+ };
+
+@@ -2300,7 +2388,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
+ .name = "i2c1",
+ .class = &omap44xx_i2c_hwmod_class,
+ .clkdm_name = "l4_per_clkdm",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap44xx_i2c1_irqs,
+ .sdma_reqs = omap44xx_i2c1_sdma_reqs,
+ .main_clk = "i2c1_fck",
+@@ -2356,7 +2444,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
+ .name = "i2c2",
+ .class = &omap44xx_i2c_hwmod_class,
+ .clkdm_name = "l4_per_clkdm",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap44xx_i2c2_irqs,
+ .sdma_reqs = omap44xx_i2c2_sdma_reqs,
+ .main_clk = "i2c2_fck",
+@@ -2412,7 +2500,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
+ .name = "i2c3",
+ .class = &omap44xx_i2c_hwmod_class,
+ .clkdm_name = "l4_per_clkdm",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap44xx_i2c3_irqs,
+ .sdma_reqs = omap44xx_i2c3_sdma_reqs,
+ .main_clk = "i2c3_fck",
+@@ -2468,7 +2556,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {
+ .name = "i2c4",
+ .class = &omap44xx_i2c_hwmod_class,
+ .clkdm_name = "l4_per_clkdm",
+- .flags = HWMOD_16BIT_REG,
++ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
+ .mpu_irqs = omap44xx_i2c4_irqs,
+ .sdma_reqs = omap44xx_i2c4_sdma_reqs,
+ .main_clk = "i2c4_fck",
+@@ -5276,6 +5364,207 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
+ .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves),
+ };
+
++/*
++ * 'usb_host_hs' class
++ * high-speed multi-port usb host controller
++ */
++static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = {
++ .master = &omap44xx_usb_host_hs_hwmod,
++ .slave = &omap44xx_l3_main_2_hwmod,
++ .clk = "l3_div_ck",
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
++ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
++ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
++ .sysc_fields = &omap_hwmod_sysc_type2,
++};
++
++static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = {
++ .name = "usb_host_hs",
++ .sysc = &omap44xx_usb_host_hs_sysc,
++};
++
++static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = {
++ &omap44xx_usb_host_hs__l3_main_2,
++};
++
++static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = {
++ {
++ .name = "uhh",
++ .pa_start = 0x4a064000,
++ .pa_end = 0x4a0647ff,
++ .flags = ADDR_TYPE_RT
++ },
++ {
++ .name = "ohci",
++ .pa_start = 0x4a064800,
++ .pa_end = 0x4a064bff,
++ },
++ {
++ .name = "ehci",
++ .pa_start = 0x4a064c00,
++ .pa_end = 0x4a064fff,
++ },
++ {}
++};
++
++static struct omap_hwmod_irq_info omap44xx_usb_host_hs_irqs[] = {
++ { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START },
++ { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = {
++ .master = &omap44xx_l4_cfg_hwmod,
++ .slave = &omap44xx_usb_host_hs_hwmod,
++ .clk = "l4_div_ck",
++ .addr = omap44xx_usb_host_hs_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = {
++ &omap44xx_l4_cfg__usb_host_hs,
++};
++
++static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
++ .name = "usb_host_hs",
++ .class = &omap44xx_usb_host_hs_hwmod_class,
++ .clkdm_name = "l3_init_clkdm",
++ .main_clk = "usb_host_hs_fck",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET,
++ .context_offs = OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET,
++ .modulemode = MODULEMODE_SWCTRL,
++ },
++ },
++ .mpu_irqs = omap44xx_usb_host_hs_irqs,
++ .slaves = omap44xx_usb_host_hs_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves),
++ .masters = omap44xx_usb_host_hs_masters,
++ .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters),
++
++ /*
++ * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
++ * id: i660
++ *
++ * Description:
++ * In the following configuration :
++ * - USBHOST module is set to smart-idle mode
++ * - PRCM asserts idle_req to the USBHOST module ( This typically
++ * happens when the system is going to a low power mode : all ports
++ * have been suspended, the master part of the USBHOST module has
++ * entered the standby state, and SW has cut the functional clocks)
++ * - an USBHOST interrupt occurs before the module is able to answer
++ * idle_ack, typically a remote wakeup IRQ.
++ * Then the USB HOST module will enter a deadlock situation where it
++ * is no more accessible nor functional.
++ *
++ * Workaround:
++ * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
++ */
++
++ /*
++ * Errata: USB host EHCI may stall when entering smart-standby mode
++ * Id: i571
++ *
++ * Description:
++ * When the USBHOST module is set to smart-standby mode, and when it is
++ * ready to enter the standby state (i.e. all ports are suspended and
++ * all attached devices are in suspend mode), then it can wrongly assert
++ * the Mstandby signal too early while there are still some residual OCP
++ * transactions ongoing. If this condition occurs, the internal state
++ * machine may go to an undefined state and the USB link may be stuck
++ * upon the next resume.
++ *
++ * Workaround:
++ * Don't use smart standby; use only force standby,
++ * hence HWMOD_SWSUP_MSTANDBY
++ */
++
++ /*
++ * During system boot; If the hwmod framework resets the module
++ * the module will have smart idle settings; which can lead to deadlock
++ * (above Errata Id:i660); so, dont reset the module during boot;
++ * Use HWMOD_INIT_NO_RESET.
++ */
++
++ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
++ HWMOD_INIT_NO_RESET,
++};
++
++/*
++ * 'usb_tll_hs' class
++ * usb_tll_hs module is the adapter on the usb_host_hs ports
++ */
++static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_AUTOIDLE),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
++static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = {
++ .name = "usb_tll_hs",
++ .sysc = &omap44xx_usb_tll_hs_sysc,
++};
++
++static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = {
++ { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START },
++ { .irq = -1 }
++};
++
++static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = {
++ {
++ .name = "tll",
++ .pa_start = 0x4a062000,
++ .pa_end = 0x4a063fff,
++ .flags = ADDR_TYPE_RT
++ },
++ {}
++};
++
++static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = {
++ .master = &omap44xx_l4_cfg_hwmod,
++ .slave = &omap44xx_usb_tll_hs_hwmod,
++ .clk = "l4_div_ck",
++ .addr = omap44xx_usb_tll_hs_addrs,
++ .user = OCP_USER_MPU | OCP_USER_SDMA,
++};
++
++static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = {
++ &omap44xx_l4_cfg__usb_tll_hs,
++};
++
++static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = {
++ .name = "usb_tll_hs",
++ .class = &omap44xx_usb_tll_hs_hwmod_class,
++ .clkdm_name = "l3_init_clkdm",
++ .main_clk = "usb_tll_hs_ick",
++ .prcm = {
++ .omap4 = {
++ .clkctrl_offs = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET,
++ .context_offs = OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET,
++ .modulemode = MODULEMODE_HWCTRL,
++ },
++ },
++ .mpu_irqs = omap44xx_usb_tll_hs_irqs,
++ .slaves = omap44xx_usb_tll_hs_slaves,
++ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves),
++};
++
+ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
+
+ /* dmm class */
+@@ -5327,6 +5616,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
+ &omap44xx_dss_rfbi_hwmod,
+ &omap44xx_dss_venc_hwmod,
+
++ /* fdif class */
++ &omap44xx_fdif_hwmod,
++
+ /* gpio class */
+ &omap44xx_gpio1_hwmod,
+ &omap44xx_gpio2_hwmod,
+@@ -5415,13 +5707,16 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
+ &omap44xx_uart3_hwmod,
+ &omap44xx_uart4_hwmod,
+
++ /* usb host class */
++ &omap44xx_usb_host_hs_hwmod,
++ &omap44xx_usb_tll_hs_hwmod,
++
+ /* usb_otg_hs class */
+ &omap44xx_usb_otg_hs_hwmod,
+
+ /* wd_timer class */
+ &omap44xx_wd_timer2_hwmod,
+ &omap44xx_wd_timer3_hwmod,
+-
+ NULL,
+ };
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
+index 51e5418..063925c 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
+@@ -49,6 +49,28 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
+ .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
+ };
+
++/**
++ * struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme.
++ * Used by some IPs on AM33xx
++ */
++struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3 = {
++ .midle_shift = SYSC_TYPE3_MIDLEMODE_SHIFT,
++ .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT,
++};
++
++/**
++ * struct omap_hwmod_sysc_type4 - TYPE4 sysconfig scheme.
++ * Used by some IPs on AM33xx
++ */
++struct omap_hwmod_sysc_fields omap_hwmod_sysc_type4 = {
++ .midle_shift = SYSC_TYPE4_MIDLEMODE_SHIFT,
++ .clkact_shift = SYSC_TYPE4_CLOCKACTIVITY_SHIFT,
++ .sidle_shift = SYSC_TYPE4_SIDLEMODE_SHIFT,
++ .enwkup_shift = SYSC_TYPE4_ENAWAKEUP_SHIFT,
++ .srst_shift = SYSC_TYPE4_SOFTRESET_SHIFT,
++ .autoidle_shift = SYSC_TYPE4_AUTOIDLE_SHIFT,
++};
++
+ struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+ .manager_count = 2,
+ .has_framedonetv_irq = 0
+diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
+index 58775e3..b97a54a 100644
+--- a/arch/arm/mach-omap2/omap_phy_internal.c
++++ b/arch/arm/mach-omap2/omap_phy_internal.c
+@@ -29,6 +29,8 @@
+ #include <linux/usb.h>
+
+ #include <plat/usb.h>
++#include <plat/am33xx.h>
++#include <plat/ti81xx.h>
+ #include "control.h"
+
+ /* OMAP control module register for UTMI PHY */
+@@ -185,7 +187,7 @@ void am35x_musb_reset(void)
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+ }
+
+-void am35x_musb_phy_power(u8 on)
++void am35x_musb_phy_power(u8 id, u8 on)
+ {
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 devconf2;
+@@ -260,3 +262,43 @@ void am35x_set_mode(u8 musb_mode)
+
+ omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+ }
++
++void ti81xx_musb_phy_power(u8 id, u8 on)
++{
++ void __iomem *scm_base = NULL;
++ u32 usbphycfg;
++
++ if (cpu_is_ti816x())
++ scm_base = ioremap(TI81XX_SCM_BASE, SZ_2K);
++ else if (cpu_is_am33xx())
++ scm_base = ioremap(AM33XX_SCM_BASE, SZ_2K);
++
++ if (!scm_base) {
++ pr_err("system control module ioremap failed\n");
++ return;
++ }
++
++ usbphycfg = __raw_readl(scm_base + (id ? USBCTRL1 : USBCTRL0));
++
++ if (on) {
++ if (cpu_is_ti816x()) {
++ usbphycfg |= id ? TI816X_USBPHY1_NORMAL_MODE :
++ TI816X_USBPHY0_NORMAL_MODE;
++ usbphycfg &= ~TI816X_USBPHY_REFCLK_OSC;
++ } else if (cpu_is_am33xx()) {
++ usbphycfg &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN);
++ usbphycfg |= (USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN);
++ }
++ } else {
++ if (cpu_is_ti816x())
++ usbphycfg &= ~((id ? TI816X_USBPHY1_NORMAL_MODE :
++ TI816X_USBPHY0_NORMAL_MODE)
++ | TI816X_USBPHY_REFCLK_OSC);
++ else if (cpu_is_am33xx())
++ usbphycfg |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
++
++ }
++ __raw_writel(usbphycfg, scm_base + (id ? USBCTRL1 : USBCTRL0));
++
++ iounmap(scm_base);
++}
+diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
+index f515a1a..91f3c2e 100644
+--- a/arch/arm/mach-omap2/omap_twl.c
++++ b/arch/arm/mach-omap2/omap_twl.c
+@@ -285,7 +285,7 @@ int __init omap3_twl_init(void)
+ {
+ struct voltagedomain *voltdm;
+
+- if (!cpu_is_omap34xx())
++ if (!cpu_is_omap34xx() || cpu_is_am33xx())
+ return -ENODEV;
+
+ if (cpu_is_omap3630()) {
+diff --git a/arch/arm/mach-omap2/opp2xxx.h b/arch/arm/mach-omap2/opp2xxx.h
+index 8affc66..8fae534 100644
+--- a/arch/arm/mach-omap2/opp2xxx.h
++++ b/arch/arm/mach-omap2/opp2xxx.h
+@@ -51,7 +51,7 @@ struct prcm_config {
+ unsigned long cm_clksel2_pll; /* dpllx1 or x2 out */
+ unsigned long cm_clksel_mdm; /* modem dividers 2430 only */
+ unsigned long base_sdrc_rfr; /* base refresh timing for a set */
+- unsigned char flags;
++ unsigned short flags;
+ };
+
+
+diff --git a/arch/arm/mach-omap2/opp3xxx_data.c b/arch/arm/mach-omap2/opp3xxx_data.c
+index d95f3f9..0e540c8 100644
+--- a/arch/arm/mach-omap2/opp3xxx_data.c
++++ b/arch/arm/mach-omap2/opp3xxx_data.c
+@@ -150,6 +150,26 @@ static struct omap_opp_def __initdata omap36xx_opp_def_list[] = {
+ OPP_INITIALIZER("iva", false, 800000000, OMAP3630_VDD_MPU_OPP1G_UV),
+ };
+
++/* 33xx */
++
++/* VDD1 */
++
++#define AM33XX_VDD_MPU_OPP50_UV 950000
++#define AM33XX_VDD_MPU_OPP100_UV 1100000
++#define AM33XX_VDD_MPU_OPP120_UV 1200000
++#define AM33XX_VDD_MPU_OPPTURBO_UV 1260000
++
++static struct omap_opp_def __initdata am33xx_opp_def_list[] = {
++ /* MPU OPP1 - OPP50 */
++ OPP_INITIALIZER("mpu", true, 275000000, AM33XX_VDD_MPU_OPP50_UV),
++ /* MPU OPP2 - OPP100 */
++ OPP_INITIALIZER("mpu", true, 500000000, AM33XX_VDD_MPU_OPP100_UV),
++ /* MPU OPP3 - OPP120 */
++ OPP_INITIALIZER("mpu", true, 600000000, AM33XX_VDD_MPU_OPP120_UV),
++ /* MPU OPP4 - OPPTurbo */
++ OPP_INITIALIZER("mpu", true, 720000000, AM33XX_VDD_MPU_OPPTURBO_UV),
++};
++
+ /**
+ * omap3_opp_init() - initialize omap3 opp table
+ */
+@@ -163,6 +183,9 @@ int __init omap3_opp_init(void)
+ if (cpu_is_omap3630())
+ r = omap_init_opp_table(omap36xx_opp_def_list,
+ ARRAY_SIZE(omap36xx_opp_def_list));
++ else if (cpu_is_am33xx())
++ r = omap_init_opp_table(am33xx_opp_def_list,
++ ARRAY_SIZE(am33xx_opp_def_list));
+ else
+ r = omap_init_opp_table(omap34xx_opp_def_list,
+ ARRAY_SIZE(omap34xx_opp_def_list));
+diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
+index 00bff46..5f98ef8 100644
+--- a/arch/arm/mach-omap2/pm.c
++++ b/arch/arm/mach-omap2/pm.c
+@@ -18,7 +18,7 @@
+
+ #include <plat/omap-pm.h>
+ #include <plat/omap_device.h>
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "voltage.h"
+ #include "powerdomain.h"
+@@ -198,7 +198,7 @@ exit:
+
+ static void __init omap3_init_voltages(void)
+ {
+- if (!cpu_is_omap34xx())
++ if (!cpu_is_omap34xx() || cpu_is_am33xx())
+ return;
+
+ omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
+diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
+index 4e166ad..68aeafc 100644
+--- a/arch/arm/mach-omap2/pm.h
++++ b/arch/arm/mach-omap2/pm.h
+@@ -21,6 +21,7 @@ extern void omap_sram_idle(void);
+ extern int omap3_can_sleep(void);
+ extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
+ extern int omap3_idle_init(void);
++extern int omap4_idle_init(void);
+
+ #if defined(CONFIG_PM_OPP)
+ extern int omap3_opp_init(void);
+@@ -95,6 +96,15 @@ extern unsigned int save_secure_ram_context_sz;
+
+ extern void omap3_save_scratchpad_contents(void);
+
++/* 33xx */
++/* am33xx_do_wfi function pointer and size, for copy to SRAM */
++extern void am33xx_do_wfi(void);
++extern unsigned int am33xx_do_wfi_sz;
++/* ... and its pointer from SRAM after copy */
++extern void (*am33xx_do_wfi_sram)(void);
++/* The resume location */
++extern void am33xx_resume_vector(void);
++
+ #define PM_RTA_ERRATUM_i608 (1 << 0)
+ #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
+
+diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
+index cf0c216..b8822f8 100644
+--- a/arch/arm/mach-omap2/pm24xx.c
++++ b/arch/arm/mach-omap2/pm24xx.c
+@@ -30,7 +30,6 @@
+ #include <linux/irq.h>
+ #include <linux/time.h>
+ #include <linux/gpio.h>
+-#include <linux/console.h>
+
+ #include <asm/mach/time.h>
+ #include <asm/mach/irq.h>
+@@ -42,6 +41,7 @@
+ #include <plat/dma.h>
+ #include <plat/board.h>
+
++#include "common.h"
+ #include "prm2xxx_3xxx.h"
+ #include "prm-regbits-24xx.h"
+ #include "cm2xxx_3xxx.h"
+@@ -126,27 +126,11 @@ static void omap2_enter_full_retention(void)
+ if (omap_irq_pending())
+ goto no_sleep;
+
+- /* Block console output in case it is on one of the OMAP UARTs */
+- if (!is_suspending())
+- if (!console_trylock())
+- goto no_sleep;
+-
+- omap_uart_prepare_idle(0);
+- omap_uart_prepare_idle(1);
+- omap_uart_prepare_idle(2);
+-
+ /* Jump to SRAM suspend code */
+ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
+ OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
+ OMAP_SDRC_REGADDR(SDRC_POWER));
+
+- omap_uart_resume_idle(2);
+- omap_uart_resume_idle(1);
+- omap_uart_resume_idle(0);
+-
+- if (!is_suspending())
+- console_unlock();
+-
+ no_sleep:
+ omap2_gpio_resume_after_idle();
+
+@@ -238,8 +222,6 @@ static int omap2_can_sleep(void)
+ {
+ if (omap2_fclks_active())
+ return 0;
+- if (!omap_uart_can_sleep())
+- return 0;
+ if (osc_ck->usecount > 1)
+ return 0;
+ if (omap_dma_running())
+@@ -290,7 +272,6 @@ static int omap2_pm_suspend(void)
+ mir1 = omap_readl(0x480fe0a4);
+ omap_writel(1 << 5, 0x480fe0ac);
+
+- omap_uart_prepare_suspend();
+ omap2_enter_full_retention();
+
+ omap_writel(mir1, 0x480fe0a4);
+diff --git a/arch/arm/mach-omap2/pm33xx.c b/arch/arm/mach-omap2/pm33xx.c
+new file mode 100644
+index 0000000..70bcb42
+--- /dev/null
++++ b/arch/arm/mach-omap2/pm33xx.c
+@@ -0,0 +1,569 @@
++/*
++ * AM33XX Power Management Routines
++ *
++ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/err.h>
++#include <linux/firmware.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/suspend.h>
++#include <linux/completion.h>
++#include <linux/pm_runtime.h>
++
++#include <plat/prcm.h>
++#include <plat/mailbox.h>
++#include <plat/sram.h>
++#include <plat/omap_hwmod.h>
++#include <plat/omap_device.h>
++
++#include <asm/suspend.h>
++#include <asm/proc-fns.h>
++#include <asm/sizes.h>
++
++#include "pm.h"
++#include "cm33xx.h"
++#include "pm33xx.h"
++#include "control.h"
++#include "clockdomain.h"
++#include "powerdomain.h"
++
++void (*am33xx_do_wfi_sram)(void);
++
++#define DS_MODE DS0_ID /* DS0/1_ID */
++#define MODULE_DISABLE 0x0
++#define MODULE_ENABLE 0x2
++
++#ifdef CONFIG_SUSPEND
++
++void __iomem *ipc_regs;
++void __iomem *m3_eoi;
++void __iomem *m3_code;
++
++bool enable_deep_sleep = true;
++static suspend_state_t suspend_state = PM_SUSPEND_ON;
++
++static struct device *mpu_dev;
++static struct omap_mbox *m3_mbox;
++static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm;
++static struct clockdomain *gfx_l3_clkdm, *gfx_l4ls_clkdm;
++
++static struct am33xx_padconf lp_padconf;
++static int gmii_sel;
++
++static int core_suspend_stat = -1;
++static int m3_state = M3_STATE_UNKNOWN;
++
++static int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *);
++static int am33xx_verify_lp_state(void);
++static void am33xx_m3_state_machine_reset(void);
++
++static DECLARE_COMPLETION(a8_m3_sync);
++
++static void save_padconf(void)
++{
++ lp_padconf.mii1_col = readl(AM33XX_CTRL_REGADDR(0x0908));
++ lp_padconf.mii1_crs = readl(AM33XX_CTRL_REGADDR(0x090c));
++ lp_padconf.mii1_rxerr = readl(AM33XX_CTRL_REGADDR(0x0910));
++ lp_padconf.mii1_txen = readl(AM33XX_CTRL_REGADDR(0x0914));
++ lp_padconf.mii1_rxdv = readl(AM33XX_CTRL_REGADDR(0x0918));
++ lp_padconf.mii1_txd3 = readl(AM33XX_CTRL_REGADDR(0x091c));
++ lp_padconf.mii1_txd2 = readl(AM33XX_CTRL_REGADDR(0x0920));
++ lp_padconf.mii1_txd1 = readl(AM33XX_CTRL_REGADDR(0x0924));
++ lp_padconf.mii1_txd0 = readl(AM33XX_CTRL_REGADDR(0x0928));
++ lp_padconf.mii1_txclk = readl(AM33XX_CTRL_REGADDR(0x092c));
++ lp_padconf.mii1_rxclk = readl(AM33XX_CTRL_REGADDR(0x0930));
++ lp_padconf.mii1_rxd3 = readl(AM33XX_CTRL_REGADDR(0x0934));
++ lp_padconf.mii1_rxd2 = readl(AM33XX_CTRL_REGADDR(0x0938));
++ lp_padconf.mii1_rxd1 = readl(AM33XX_CTRL_REGADDR(0x093c));
++ lp_padconf.mii1_rxd0 = readl(AM33XX_CTRL_REGADDR(0x0940));
++ lp_padconf.rmii1_refclk = readl(AM33XX_CTRL_REGADDR(0x0944));
++ lp_padconf.mdio_data = readl(AM33XX_CTRL_REGADDR(0x0948));
++ lp_padconf.mdio_clk = readl(AM33XX_CTRL_REGADDR(0x094c));
++ gmii_sel = readl(AM33XX_CTRL_REGADDR(0x0650));
++}
++
++static void restore_padconf(void)
++{
++ writel(lp_padconf.mii1_col, AM33XX_CTRL_REGADDR(0x0908));
++ writel(lp_padconf.mii1_crs, AM33XX_CTRL_REGADDR(0x090c));
++ writel(lp_padconf.mii1_rxerr, AM33XX_CTRL_REGADDR(0x0910));
++ writel(lp_padconf.mii1_txen, AM33XX_CTRL_REGADDR(0x0914));
++ writel(lp_padconf.mii1_rxdv, AM33XX_CTRL_REGADDR(0x0918));
++ writel(lp_padconf.mii1_txd3, AM33XX_CTRL_REGADDR(0x091c));
++ writel(lp_padconf.mii1_txd2, AM33XX_CTRL_REGADDR(0x0920));
++ writel(lp_padconf.mii1_txd1, AM33XX_CTRL_REGADDR(0x0924));
++ writel(lp_padconf.mii1_txd0, AM33XX_CTRL_REGADDR(0x0928));
++ writel(lp_padconf.mii1_txclk, AM33XX_CTRL_REGADDR(0x092c));
++ writel(lp_padconf.mii1_rxclk, AM33XX_CTRL_REGADDR(0x0930));
++ writel(lp_padconf.mii1_rxd3, AM33XX_CTRL_REGADDR(0x0934));
++ writel(lp_padconf.mii1_rxd2, AM33XX_CTRL_REGADDR(0x0938));
++ writel(lp_padconf.mii1_rxd1, AM33XX_CTRL_REGADDR(0x093c));
++ writel(lp_padconf.mii1_rxd0, AM33XX_CTRL_REGADDR(0x0940));
++ writel(lp_padconf.rmii1_refclk, AM33XX_CTRL_REGADDR(0x0944));
++ writel(lp_padconf.mdio_data, AM33XX_CTRL_REGADDR(0x0948));
++ writel(lp_padconf.mdio_clk, AM33XX_CTRL_REGADDR(0x094c));
++ writel(gmii_sel, AM33XX_CTRL_REGADDR(0x0650));
++}
++
++static int am33xx_pm_prepare_late(void)
++{
++ int ret = 0;
++
++ save_padconf();
++
++ return ret;
++}
++
++static void am33xx_pm_finish(void)
++{
++ restore_padconf();
++}
++
++static int am33xx_do_sram_idle(long unsigned int state)
++{
++ am33xx_do_wfi_sram();
++ return 0;
++}
++
++static int am33xx_pm_suspend(void)
++{
++ int state, ret = 0;
++
++ struct omap_hwmod *cpgmac_oh, *gpmc_oh, *usb_oh;
++
++ cpgmac_oh = omap_hwmod_lookup("cpgmac0");
++ usb_oh = omap_hwmod_lookup("usb_otg_hs");
++ gpmc_oh = omap_hwmod_lookup("gpmc");
++
++ omap_hwmod_enable(cpgmac_oh);
++ omap_hwmod_enable(usb_oh);
++ omap_hwmod_enable(gpmc_oh);
++
++ omap_hwmod_idle(cpgmac_oh);
++ omap_hwmod_idle(usb_oh);
++ omap_hwmod_idle(gpmc_oh);
++
++ if (gfx_l3_clkdm && gfx_l4ls_clkdm) {
++ clkdm_sleep(gfx_l3_clkdm);
++ clkdm_sleep(gfx_l4ls_clkdm);
++ }
++
++ /* Try to put GFX to sleep */
++ if (gfx_pwrdm)
++ pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF);
++ else
++ pr_err("Could not program GFX to low power state\n");
++
++ writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL);
++
++ ret = cpu_suspend(0, am33xx_do_sram_idle);
++
++ writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL);
++
++ if (gfx_pwrdm) {
++ state = pwrdm_read_pwrst(gfx_pwrdm);
++ if (state != PWRDM_POWER_OFF)
++ pr_err("GFX domain did not transition to low power state\n");
++ else
++ pr_info("GFX domain entered low power state\n");
++ }
++
++ /* XXX: Why do we need to wakeup the clockdomains? */
++ if(gfx_l3_clkdm && gfx_l4ls_clkdm) {
++ clkdm_wakeup(gfx_l3_clkdm);
++ clkdm_wakeup(gfx_l4ls_clkdm);
++ }
++
++ core_suspend_stat = ret;
++
++ return ret;
++}
++
++static int am33xx_pm_enter(suspend_state_t unused)
++{
++ int ret = 0;
++
++ switch (suspend_state) {
++ case PM_SUSPEND_STANDBY:
++ case PM_SUSPEND_MEM:
++ ret = am33xx_pm_suspend();
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static int am33xx_pm_begin(suspend_state_t state)
++{
++ int ret = 0;
++
++ disable_hlt();
++
++ am33xx_lp_ipc.resume_addr = DS_RESUME_ADDR;
++ am33xx_lp_ipc.sleep_mode = DS_MODE;
++ am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT;
++ am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT;
++
++ am33xx_ipc_cmd(&am33xx_lp_ipc);
++
++ m3_state = M3_STATE_MSG_FOR_LP;
++
++ omap_mbox_enable_irq(m3_mbox, IRQ_RX);
++
++ ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD);
++ if (ret) {
++ pr_err("A8<->CM3 MSG for LP failed\n");
++ am33xx_m3_state_machine_reset();
++ ret = -1;
++ }
++
++ if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) {
++ pr_err("A8<->CM3 sync failure\n");
++ am33xx_m3_state_machine_reset();
++ ret = -1;
++ } else {
++ pr_debug("Message sent for entering %s\n",
++ (DS_MODE == DS0_ID ? "DS0" : "DS1"));
++ omap_mbox_disable_irq(m3_mbox, IRQ_RX);
++ }
++
++ suspend_state = state;
++ return ret;
++}
++
++static void am33xx_m3_state_machine_reset(void)
++{
++ int ret = 0;
++
++ am33xx_lp_ipc.resume_addr = 0x0;
++ am33xx_lp_ipc.sleep_mode = 0xe;
++ am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT;
++ am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT;
++
++ am33xx_ipc_cmd(&am33xx_lp_ipc);
++
++ m3_state = M3_STATE_MSG_FOR_RESET;
++
++ ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD);
++ if (!ret) {
++ pr_debug("Message sent for resetting M3 state machine\n");
++ if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000)))
++ pr_err("A8<->CM3 sync failure\n");
++ } else {
++ pr_err("Could not reset M3 state machine!!!\n");
++ m3_state = M3_STATE_UNKNOWN;
++ }
++}
++
++static void am33xx_pm_end(void)
++{
++ int ret;
++
++ suspend_state = PM_SUSPEND_ON;
++
++ ret = am33xx_verify_lp_state();
++
++ omap_mbox_enable_irq(m3_mbox, IRQ_RX);
++
++ am33xx_m3_state_machine_reset();
++
++ enable_hlt();
++
++ return;
++}
++
++static const struct platform_suspend_ops am33xx_pm_ops = {
++ .begin = am33xx_pm_begin,
++ .end = am33xx_pm_end,
++ .enter = am33xx_pm_enter,
++ .valid = suspend_valid_only_mem,
++ .prepare = am33xx_pm_prepare_late,
++ .finish = am33xx_pm_finish,
++};
++
++int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *data)
++{
++ writel(data->resume_addr, ipc_regs);
++ writel(data->sleep_mode, ipc_regs + 0x4);
++ writel(data->ipc_data1, ipc_regs + 0x8);
++ writel(data->ipc_data2, ipc_regs + 0xc);
++
++ return 0;
++}
++
++/* return 0 if no reset M3 needed, 1 otherwise */
++static int am33xx_verify_lp_state(void)
++{
++ int status, ret = 0;
++
++ if (core_suspend_stat) {
++ pr_err("Kernel core reported suspend failure\n");
++ ret = -1;
++ goto clear_old_status;
++ }
++
++ status = readl(ipc_regs + 0x4);
++ status &= 0xffff0000;
++
++ if (status == 0x0) {
++ pr_info("Successfully transitioned all domains to low power state\n");
++ goto clear_old_status;
++ } else if (status == 0x10000) {
++ pr_err("Could not enter low power state\n"
++ "Please check for active clocks in PER domain\n");
++ ret = -1;
++ goto clear_old_status;
++ } else {
++ pr_err("Something is terribly wrong :(\nStatus = %0x\n",
++ status);
++ ret = -1;
++ }
++
++clear_old_status:
++ /* After decoding write back the bad status */
++ status = readl(ipc_regs + 0x4);
++ status &= 0xffff0000;
++ status |= 0x10000;
++ writel(status, ipc_regs + 0x4);
++
++ return ret;
++}
++
++/*
++ * Dummy notifier for the mailbox
++ * TODO: Can this be completely removed?
++ */
++int wkup_m3_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
++{
++ return 0;
++}
++
++static struct notifier_block wkup_m3_mbox_notifier = {
++ .notifier_call = wkup_m3_mbox_msg,
++};
++
++static irqreturn_t wkup_m3_txev_handler(int irq, void *unused)
++{
++ writel(0x1, m3_eoi);
++
++ if (m3_state == M3_STATE_RESET) {
++ m3_state = M3_STATE_INITED;
++ } else if (m3_state == M3_STATE_MSG_FOR_RESET) {
++ m3_state = M3_STATE_INITED;
++ omap_mbox_msg_rx_flush(m3_mbox);
++ if (m3_mbox->ops->ack_irq)
++ m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX);
++ complete(&a8_m3_sync);
++ } else if (m3_state == M3_STATE_MSG_FOR_LP) {
++ omap_mbox_msg_rx_flush(m3_mbox);
++ if (m3_mbox->ops->ack_irq)
++ m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX);
++ complete(&a8_m3_sync);
++ } else if (m3_state == M3_STATE_UNKNOWN) {
++ pr_err("IRQ %d with CM3 in unknown state\n", irq);
++ omap_mbox_msg_rx_flush(m3_mbox);
++ if (m3_mbox->ops->ack_irq)
++ m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX);
++ return IRQ_NONE;
++ }
++
++ writel(0x0, m3_eoi);
++
++ return IRQ_HANDLED;
++}
++
++/* Initiliaze WKUP_M3, load the binary blob and let it run */
++static int wkup_m3_init(void)
++{
++ struct clk *m3_clk;
++ struct omap_hwmod *wkup_m3_oh;
++ const struct firmware *firmware;
++ int ret = 0;
++
++ wkup_m3_oh = omap_hwmod_lookup("wkup_m3");
++
++ if (!wkup_m3_oh) {
++ pr_err("%s: could not find omap_hwmod\n", __func__);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ ipc_regs = ioremap(A8_M3_IPC_REGS, 0x4*8);
++ if (!ipc_regs) {
++ pr_err("Could not ioremap the IPC area\b");
++ ret = -ENOMEM;
++ goto exit;
++ }
++
++ m3_eoi = ioremap(M3_TXEV_EOI, 0x4);
++ if (!m3_eoi) {
++ pr_err("Could not ioremap the EOI register\n");
++ ret = -ENOMEM;
++ goto err1;
++ }
++
++ /* Reserve the MBOX for sending messages to M3 */
++ m3_mbox = omap_mbox_get("wkup_m3", &wkup_m3_mbox_notifier);
++ if (IS_ERR(m3_mbox)) {
++ pr_err("Could not reserve mailbox for A8->M3 IPC\n");
++ ret = -ENODEV;
++ goto err2;
++ }
++
++ /* Enable access to the M3 code and data area from A8 */
++ m3_clk = clk_get(NULL, "wkup_m3_fck");
++ if (IS_ERR(m3_clk)) {
++ pr_err("%s failed to enable WKUP_M3 clock\n", __func__);
++ goto err3;
++ }
++
++ if (clk_enable(m3_clk)) {
++ pr_err("%s WKUP_M3: clock enable Failed\n", __func__);
++ goto err4;
++ }
++
++ m3_code = ioremap(M3_UMEM, SZ_16K);
++ if (!m3_code) {
++ pr_err("%s Could not ioremap M3 code space\n", __func__);
++ ret = -ENOMEM;
++ goto err5;
++ }
++
++ pr_info("Trying to load am335x-pm-firmware.bin (60 secs timeout)\n");
++
++ ret = request_firmware(&firmware, "am335x-pm-firmware.bin", mpu_dev);
++ if (ret < 0) {
++ dev_err(mpu_dev, "request_firmware failed\n");
++ goto err6;
++ } else {
++ memcpy(m3_code, firmware->data, firmware->size);
++ pr_info("Copied the M3 firmware to UMEM\n");
++ }
++
++ ret = request_irq(AM33XX_IRQ_M3_M3SP_TXEV, wkup_m3_txev_handler,
++ IRQF_DISABLED, "wkup_m3_txev", NULL);
++ if (ret) {
++ pr_err("%s request_irq failed for 0x%x\n", __func__,
++ AM33XX_IRQ_M3_M3SP_TXEV);
++ goto err6;
++ }
++
++ m3_state = M3_STATE_RESET;
++
++ ret = omap_hwmod_deassert_hardreset(wkup_m3_oh, "wkup_m3");
++ if (ret) {
++ pr_err("Could not deassert the reset for WKUP_M3\n");
++ goto err6;
++ } else {
++ return 0;
++ }
++
++err6:
++ release_firmware(firmware);
++ iounmap(m3_code);
++err5:
++ clk_disable(m3_clk);
++err4:
++ clk_put(m3_clk);
++err3:
++ omap_mbox_put(m3_mbox, &wkup_m3_mbox_notifier);
++err2:
++ iounmap(m3_eoi);
++err1:
++ iounmap(ipc_regs);
++exit:
++ return ret;
++}
++
++/*
++ * Initiate sleep transition for other clockdomains, if
++ * they are not used
++ */
++static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
++{
++ if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
++ atomic_read(&clkdm->usecount) == 0)
++ clkdm_sleep(clkdm);
++ return 0;
++}
++#endif /* CONFIG_SUSPEND */
++
++/*
++ * Push the minimal suspend-resume code to SRAM
++ */
++void am33xx_push_sram_idle(void)
++{
++ am33xx_do_wfi_sram = omap_sram_push(am33xx_do_wfi, am33xx_do_wfi_sz);
++}
++
++static int __init am33xx_pm_init(void)
++{
++ int ret;
++
++ if (!cpu_is_am33xx())
++ return -ENODEV;
++
++ pr_info("Power Management for AM33XX family\n");
++
++#ifdef CONFIG_SUSPEND
++ (void) clkdm_for_each(clkdms_setup, NULL);
++
++ /* CEFUSE domain should be turned off post bootup */
++ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
++ if (cefuse_pwrdm == NULL)
++ printk(KERN_ERR "Failed to get cefuse_pwrdm\n");
++ else
++ pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF);
++
++ gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
++ if (gfx_pwrdm == NULL)
++ printk(KERN_ERR "Failed to get gfx_pwrdm\n");
++
++ gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm");
++ if (gfx_l3_clkdm == NULL)
++ printk(KERN_ERR "Failed to get gfx_l3_clkdm\n");
++
++ gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
++ if (gfx_l4ls_clkdm == NULL)
++ printk(KERN_ERR "Failed to get gfx_l4ls_gfx_clkdm\n");
++
++ mpu_dev = omap_device_get_by_hwmod_name("mpu");
++
++ if (!mpu_dev) {
++ pr_warning("%s: unable to get the mpu device\n", __func__);
++ return -EINVAL;
++ }
++
++ ret = wkup_m3_init();
++
++ if (ret) {
++ pr_err("Could not initialise WKUP_M3. "
++ "Power management will be compromised\n");
++ enable_deep_sleep = false;
++ }
++
++ if (enable_deep_sleep)
++ suspend_set_ops(&am33xx_pm_ops);
++#endif /* CONFIG_SUSPEND */
++
++ return ret;
++}
++late_initcall(am33xx_pm_init);
+diff --git a/arch/arm/mach-omap2/pm33xx.h b/arch/arm/mach-omap2/pm33xx.h
+new file mode 100644
+index 0000000..f72c28e
+--- /dev/null
++++ b/arch/arm/mach-omap2/pm33xx.h
+@@ -0,0 +1,151 @@
++/*
++ * AM33XX Power Management Routines
++ *
++ * Copyright (C) 2012 Texas Instruments Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_PM33XX_H
++#define __ARCH_ARM_MACH_OMAP2_PM33XX_H
++
++#include <mach/hardware.h> /* XXX Is this the right one to include? */
++
++#ifndef __ASSEMBLER__
++extern void __iomem *am33xx_get_ram_base(void);
++
++struct a8_wkup_m3_ipc_data {
++ int resume_addr;
++ int sleep_mode;
++ int ipc_data1;
++ int ipc_data2;
++} am33xx_lp_ipc;
++
++struct am33xx_padconf {
++ int mii1_col;
++ int mii1_crs;
++ int mii1_rxerr;
++ int mii1_txen;
++ int mii1_rxdv;
++ int mii1_txd3;
++ int mii1_txd2;
++ int mii1_txd1;
++ int mii1_txd0;
++ int mii1_txclk;
++ int mii1_rxclk;
++ int mii1_rxd3;
++ int mii1_rxd2;
++ int mii1_rxd1;
++ int mii1_rxd0;
++ int rmii1_refclk;
++ int mdio_data;
++ int mdio_clk;
++};
++#endif /* ASSEMBLER */
++
++#define M3_TXEV_EOI (AM33XX_CTRL_BASE + 0x1324)
++#define A8_M3_IPC_REGS (AM33XX_CTRL_BASE + 0x1328)
++#define DS_RESUME_ADDR 0x40300340
++#define DS_IPC_DEFAULT 0xffffffff
++#define M3_UMEM 0x44D00000
++
++#define DS0_ID 0x3
++#define DS1_ID 0x5
++
++#define M3_STATE_UNKNOWN -1
++#define M3_STATE_RESET 0
++#define M3_STATE_INITED 1
++#define M3_STATE_MSG_FOR_LP 2
++#define M3_STATE_MSG_FOR_RESET 3
++
++/* DDR offsets */
++#define DDR_CMD0_IOCTRL (AM33XX_CTRL_BASE + 0x1404)
++#define DDR_CMD1_IOCTRL (AM33XX_CTRL_BASE + 0x1408)
++#define DDR_CMD2_IOCTRL (AM33XX_CTRL_BASE + 0x140C)
++#define DDR_DATA0_IOCTRL (AM33XX_CTRL_BASE + 0x1440)
++#define DDR_DATA1_IOCTRL (AM33XX_CTRL_BASE + 0x1444)
++
++#define DDR_IO_CTRL (AM33XX_CTRL_BASE + 0x0E04)
++#define VTP0_CTRL_REG (AM33XX_CTRL_BASE + 0x0E0C)
++#define DDR_CKE_CTRL (AM33XX_CTRL_BASE + 0x131C)
++#define DDR_PHY_BASE_ADDR (AM33XX_CTRL_BASE + 0x2000)
++
++#define CMD0_CTRL_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x01C)
++#define CMD0_CTRL_SLAVE_FORCE_0 (DDR_PHY_BASE_ADDR + 0x020)
++#define CMD0_CTRL_SLAVE_DELAY_0 (DDR_PHY_BASE_ADDR + 0x024)
++#define CMD0_DLL_LOCK_DIFF_0 (DDR_PHY_BASE_ADDR + 0x028)
++#define CMD0_INVERT_CLKOUT_0 (DDR_PHY_BASE_ADDR + 0x02C)
++
++#define CMD1_CTRL_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x050)
++#define CMD1_CTRL_SLAVE_FORCE_0 (DDR_PHY_BASE_ADDR + 0x054)
++#define CMD1_CTRL_SLAVE_DELAY_0 (DDR_PHY_BASE_ADDR + 0x058)
++#define CMD1_DLL_LOCK_DIFF_0 (DDR_PHY_BASE_ADDR + 0x05C)
++#define CMD1_INVERT_CLKOUT_0 (DDR_PHY_BASE_ADDR + 0x060)
++
++#define CMD2_CTRL_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x084)
++#define CMD2_CTRL_SLAVE_FORCE_0 (DDR_PHY_BASE_ADDR + 0x088)
++#define CMD2_CTRL_SLAVE_DELAY_0 (DDR_PHY_BASE_ADDR + 0x08C)
++#define CMD2_DLL_LOCK_DIFF_0 (DDR_PHY_BASE_ADDR + 0x090)
++#define CMD2_INVERT_CLKOUT_0 (DDR_PHY_BASE_ADDR + 0x094)
++
++#define DATA0_RD_DQS_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x0C8)
++#define DATA0_RD_DQS_SLAVE_RATIO_1 (DDR_PHY_BASE_ADDR + 0x0CC)
++
++#define DATA0_WR_DQS_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x0DC)
++#define DATA0_WR_DQS_SLAVE_RATIO_1 (DDR_PHY_BASE_ADDR + 0x0E0)
++
++#define DATA0_WRLVL_INIT_RATIO_0 (DDR_PHY_BASE_ADDR + 0x0F0)
++#define DATA0_WRLVL_INIT_RATIO_1 (DDR_PHY_BASE_ADDR + 0x0F4)
++
++#define DATA0_GATELVL_INIT_RATIO_0 (DDR_PHY_BASE_ADDR + 0x0FC)
++#define DATA0_GATELVL_INIT_RATIO_1 (DDR_PHY_BASE_ADDR + 0x100)
++
++#define DATA0_FIFO_WE_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x108)
++#define DATA0_FIFO_WE_SLAVE_RATIO_1 (DDR_PHY_BASE_ADDR + 0x10C)
++
++#define DATA0_WR_DATA_SLAVE_RATIO_0 (DDR_PHY_BASE_ADDR + 0x120)
++#define DATA0_WR_DATA_SLAVE_RATIO_1 (DDR_PHY_BASE_ADDR + 0x124)
++
++#define DATA0_DLL_LOCK_DIFF_0 (DDR_PHY_BASE_ADDR + 0x138)
++
++#define DATA0_RANK0_DELAYS_0 (DDR_PHY_BASE_ADDR + 0x134)
++#define DATA1_RANK0_DELAYS_0 (DDR_PHY_BASE_ADDR + 0x1D8)
++
++/* Temp placeholder for the values we want in the registers */
++#define EMIF_READ_LATENCY 0x100005 /* Enable Dynamic Power Down */
++#define EMIF_TIM1 0x0666B3C9
++#define EMIF_TIM2 0x243631CA
++#define EMIF_TIM3 0x0000033F
++#define EMIF_SDCFG 0x41805332
++#define EMIF_SDREF 0x0000081a
++#define EMIF_SDMGT 0x80000000
++#define EMIF_SDRAM 0x00004650
++#define EMIF_PHYCFG 0x2
++
++#define DDR2_DLL_LOCK_DIFF 0x0
++#define DDR2_RD_DQS 0x12
++#define DDR2_PHY_FIFO_WE 0x80
++
++#define DDR_PHY_RESET (0x1 << 10)
++#define DDR_PHY_READY (0x1 << 2)
++#define DDR2_RATIO 0x80
++#define CMD_FORCE 0x00
++#define CMD_DELAY 0x00
++
++#define DDR2_INVERT_CLKOUT 0x00
++#define DDR2_WR_DQS 0x00
++#define DDR2_PHY_WRLVL 0x00
++#define DDR2_PHY_GATELVL 0x00
++#define DDR2_PHY_WR_DATA 0x40
++#define PHY_RANK0_DELAY 0x01
++#define PHY_DLL_LOCK_DIFF 0x0
++#define DDR_IOCTRL_VALUE 0x18B
++
++#define VTP_CTRL_READY (0x1 << 5)
++#define VTP_CTRL_ENABLE (0x1 << 6)
++#define VTP_CTRL_LOCK_EN (0x1 << 4)
++#define VTP_CTRL_START_EN (0x1)
++
++#endif
+diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
+index efa6649..1c2e2eb 100644
+--- a/arch/arm/mach-omap2/pm34xx.c
++++ b/arch/arm/mach-omap2/pm34xx.c
+@@ -28,7 +28,6 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+-#include <linux/console.h>
+ #include <trace/events/power.h>
+
+ #include <asm/suspend.h>
+@@ -36,12 +35,12 @@
+ #include <plat/sram.h>
+ #include "clockdomain.h"
+ #include "powerdomain.h"
+-#include <plat/serial.h>
+ #include <plat/sdrc.h>
+ #include <plat/prcm.h>
+ #include <plat/gpmc.h>
+ #include <plat/dma.h>
+
++#include "common.h"
+ #include "cm2xxx_3xxx.h"
+ #include "cm-regbits-34xx.h"
+ #include "prm-regbits-34xx.h"
+@@ -53,15 +52,6 @@
+
+ #ifdef CONFIG_SUSPEND
+ static suspend_state_t suspend_state = PM_SUSPEND_ON;
+-static inline bool is_suspending(void)
+-{
+- return (suspend_state != PM_SUSPEND_ON) && console_suspend_enabled;
+-}
+-#else
+-static inline bool is_suspending(void)
+-{
+- return false;
+-}
+ #endif
+
+ /* pm34xx errata defined in pm.h */
+@@ -194,7 +184,7 @@ static void omap3_save_secure_ram_context(void)
+ * that any peripheral wake-up events occurring while attempting to
+ * clear the PM_WKST_x are detected and cleared.
+ */
+-static int prcm_clear_mod_irqs(s16 module, u8 regs)
++static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
+ {
+ u32 wkst, fclk, iclk, clken;
+ u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
+@@ -206,6 +196,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
+
+ wkst = omap2_prm_read_mod_reg(module, wkst_off);
+ wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
++ wkst &= ~ignore_bits;
+ if (wkst) {
+ iclk = omap2_cm_read_mod_reg(module, iclk_off);
+ fclk = omap2_cm_read_mod_reg(module, fclk_off);
+@@ -221,6 +212,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
+ omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
+ omap2_prm_write_mod_reg(wkst, module, wkst_off);
+ wkst = omap2_prm_read_mod_reg(module, wkst_off);
++ wkst &= ~ignore_bits;
+ c++;
+ }
+ omap2_cm_write_mod_reg(iclk, module, iclk_off);
+@@ -230,76 +222,35 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
+ return c;
+ }
+
+-static int _prcm_int_handle_wakeup(void)
++static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
+ {
+ int c;
+
+- c = prcm_clear_mod_irqs(WKUP_MOD, 1);
+- c += prcm_clear_mod_irqs(CORE_MOD, 1);
+- c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
+- if (omap_rev() > OMAP3430_REV_ES1_0) {
+- c += prcm_clear_mod_irqs(CORE_MOD, 3);
+- c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
+- }
++ c = prcm_clear_mod_irqs(WKUP_MOD, 1,
++ ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
+
+- return c;
++ return c ? IRQ_HANDLED : IRQ_NONE;
+ }
+
+-/*
+- * PRCM Interrupt Handler
+- *
+- * The PRM_IRQSTATUS_MPU register indicates if there are any pending
+- * interrupts from the PRCM for the MPU. These bits must be cleared in
+- * order to clear the PRCM interrupt. The PRCM interrupt handler is
+- * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
+- * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
+- * register indicates that a wake-up event is pending for the MPU and
+- * this bit can only be cleared if the all the wake-up events latched
+- * in the various PM_WKST_x registers have been cleared. The interrupt
+- * handler is implemented using a do-while loop so that if a wake-up
+- * event occurred during the processing of the prcm interrupt handler
+- * (setting a bit in the corresponding PM_WKST_x register and thus
+- * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
+- * this would be handled.
+- */
+-static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
++static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
+ {
+- u32 irqenable_mpu, irqstatus_mpu;
+- int c = 0;
+-
+- irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
+- OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+- irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
+- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+- irqstatus_mpu &= irqenable_mpu;
+-
+- do {
+- if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
+- OMAP3430_IO_ST_MASK)) {
+- c = _prcm_int_handle_wakeup();
+-
+- /*
+- * Is the MPU PRCM interrupt handler racing with the
+- * IVA2 PRCM interrupt handler ?
+- */
+- WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
+- "but no wakeup sources are marked\n");
+- } else {
+- /* XXX we need to expand our PRCM interrupt handler */
+- WARN(1, "prcm: WARNING: PRCM interrupt received, but "
+- "no code to handle it (%08x)\n", irqstatus_mpu);
+- }
+-
+- omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
+- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+-
+- irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
+- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+- irqstatus_mpu &= irqenable_mpu;
++ int c;
+
+- } while (irqstatus_mpu);
++ /*
++ * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
++ * these are handled in a separate handler to avoid acking
++ * IO events before parsing in mux code
++ */
++ c = prcm_clear_mod_irqs(WKUP_MOD, 1,
++ OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
++ c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
++ c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
++ if (omap_rev() > OMAP3430_REV_ES1_0) {
++ c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
++ c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
++ }
+
+- return IRQ_HANDLED;
++ return c ? IRQ_HANDLED : IRQ_NONE;
+ }
+
+ static void omap34xx_save_context(u32 *save)
+@@ -375,20 +326,11 @@ void omap_sram_idle(void)
+ omap3_enable_io_chain();
+ }
+
+- /* Block console output in case it is on one of the OMAP UARTs */
+- if (!is_suspending())
+- if (per_next_state < PWRDM_POWER_ON ||
+- core_next_state < PWRDM_POWER_ON)
+- if (!console_trylock())
+- goto console_still_active;
+-
+ pwrdm_pre_transition();
+
+ /* PER */
+ if (per_next_state < PWRDM_POWER_ON) {
+ per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
+- omap_uart_prepare_idle(2);
+- omap_uart_prepare_idle(3);
+ omap2_gpio_prepare_for_idle(per_going_off);
+ if (per_next_state == PWRDM_POWER_OFF)
+ omap3_per_save_context();
+@@ -396,8 +338,6 @@ void omap_sram_idle(void)
+
+ /* CORE */
+ if (core_next_state < PWRDM_POWER_ON) {
+- omap_uart_prepare_idle(0);
+- omap_uart_prepare_idle(1);
+ if (core_next_state == PWRDM_POWER_OFF) {
+ omap3_core_save_context();
+ omap3_cm_save_context();
+@@ -446,8 +386,6 @@ void omap_sram_idle(void)
+ omap3_sram_restore_context();
+ omap2_sms_restore_context();
+ }
+- omap_uart_resume_idle(0);
+- omap_uart_resume_idle(1);
+ if (core_next_state == PWRDM_POWER_OFF)
+ omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
+ OMAP3430_GR_MOD,
+@@ -463,14 +401,8 @@ void omap_sram_idle(void)
+ omap2_gpio_resume_after_idle();
+ if (per_prev_state == PWRDM_POWER_OFF)
+ omap3_per_restore_context();
+- omap_uart_resume_idle(2);
+- omap_uart_resume_idle(3);
+ }
+
+- if (!is_suspending())
+- console_unlock();
+-
+-console_still_active:
+ /* Disable IO-PAD and IO-CHAIN wakeup */
+ if (omap3_has_io_wakeup() &&
+ (per_next_state < PWRDM_POWER_ON ||
+@@ -484,21 +416,11 @@ console_still_active:
+ clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
+ }
+
+-int omap3_can_sleep(void)
+-{
+- if (!omap_uart_can_sleep())
+- return 0;
+- return 1;
+-}
+-
+ static void omap3_pm_idle(void)
+ {
+ local_irq_disable();
+ local_fiq_disable();
+
+- if (!omap3_can_sleep())
+- goto out;
+-
+ if (omap_irq_pending() || need_resched())
+ goto out;
+
+@@ -532,7 +454,6 @@ static int omap3_pm_suspend(void)
+ goto restore;
+ }
+
+- omap_uart_prepare_suspend();
+ omap3_intc_suspend();
+
+ omap_sram_idle();
+@@ -579,22 +500,27 @@ static int omap3_pm_begin(suspend_state_t state)
+ {
+ disable_hlt();
+ suspend_state = state;
+- omap_uart_enable_irqs(0);
++ omap_prcm_irq_prepare();
+ return 0;
+ }
+
+ static void omap3_pm_end(void)
+ {
+ suspend_state = PM_SUSPEND_ON;
+- omap_uart_enable_irqs(1);
+ enable_hlt();
+ return;
+ }
+
++static void omap3_pm_finish(void)
++{
++ omap_prcm_irq_complete();
++}
++
+ static const struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap3_pm_begin,
+ .end = omap3_pm_end,
+ .enter = omap3_pm_enter,
++ .finish = omap3_pm_finish,
+ .valid = suspend_valid_only_mem,
+ };
+ #endif /* CONFIG_SUSPEND */
+@@ -679,7 +605,9 @@ static void __init prcm_setup_regs(void)
+ OMAP3630_GRPSEL_UART4_MASK : 0;
+
+ /* XXX This should be handled by hwmod code or SCM init code */
+- omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
++ /* This causes MUSB failure on AM3517 so disable it. */
++ if (!cpu_is_omap3517() && !cpu_is_omap3505())
++ omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
+
+ /*
+ * Enable control of expternal oscillator through
+@@ -700,10 +628,6 @@ static void __init prcm_setup_regs(void)
+ OMAP3430_GRPSEL_GPT1_MASK |
+ OMAP3430_GRPSEL_GPT12_MASK,
+ WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
+- /* For some reason IO doesn't generate wakeup event even if
+- * it is selected to mpu wakeup goup */
+- omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
+- OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+
+ /* Enable PM_WKEN to support DSS LPR */
+ omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
+@@ -868,7 +792,7 @@ static int __init omap3_pm_init(void)
+ struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
+ int ret;
+
+- if (!cpu_is_omap34xx())
++ if (!cpu_is_omap34xx() || cpu_is_am33xx())
+ return -ENODEV;
+
+ if (!omap3_has_io_chain_ctrl())
+@@ -880,12 +804,21 @@ static int __init omap3_pm_init(void)
+ * supervised mode for powerdomains */
+ prcm_setup_regs();
+
+- ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
+- (irq_handler_t)prcm_interrupt_handler,
+- IRQF_DISABLED, "prcm", NULL);
++ ret = request_irq(omap_prcm_event_to_irq("wkup"),
++ _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
++
++ if (ret) {
++ pr_err("pm: Failed to request pm_wkup irq\n");
++ goto err1;
++ }
++
++ /* IO interrupt is shared with mux code */
++ ret = request_irq(omap_prcm_event_to_irq("io"),
++ _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
++ omap3_pm_init);
++
+ if (ret) {
+- printk(KERN_ERR "request_irq failed to register for 0x%x\n",
+- INT_34XX_PRCM_MPU_IRQ);
++ pr_err("pm: Failed to request pm_io irq\n");
+ goto err1;
+ }
+
+diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
+index 59a870b..c264ef7 100644
+--- a/arch/arm/mach-omap2/pm44xx.c
++++ b/arch/arm/mach-omap2/pm44xx.c
+@@ -1,8 +1,9 @@
+ /*
+ * OMAP4 Power Management Routines
+ *
+- * Copyright (C) 2010 Texas Instruments, Inc.
++ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -16,14 +17,17 @@
+ #include <linux/err.h>
+ #include <linux/slab.h>
+
++#include "common.h"
++#include "clockdomain.h"
+ #include "powerdomain.h"
+-#include <mach/omap4-common.h>
++#include "pm.h"
+
+ struct power_state {
+ struct powerdomain *pwrdm;
+ u32 next_state;
+ #ifdef CONFIG_SUSPEND
+ u32 saved_state;
++ u32 saved_logic_state;
+ #endif
+ struct list_head node;
+ };
+@@ -33,7 +37,50 @@ static LIST_HEAD(pwrst_list);
+ #ifdef CONFIG_SUSPEND
+ static int omap4_pm_suspend(void)
+ {
+- do_wfi();
++ struct power_state *pwrst;
++ int state, ret = 0;
++ u32 cpu_id = smp_processor_id();
++
++ /* Save current powerdomain state */
++ list_for_each_entry(pwrst, &pwrst_list, node) {
++ pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
++ pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
++ }
++
++ /* Set targeted power domain states by suspend */
++ list_for_each_entry(pwrst, &pwrst_list, node) {
++ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
++ pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF);
++ }
++
++ /*
++ * For MPUSS to hit power domain retention(CSWR or OSWR),
++ * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
++ * since CPU power domain CSWR is not supported by hardware
++ * Only master CPU follows suspend path. All other CPUs follow
++ * CPU hotplug path in system wide suspend. On OMAP4, CPU power
++ * domain CSWR is not supported by hardware.
++ * More details can be found in OMAP4430 TRM section 4.3.4.2.
++ */
++ omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
++
++ /* Restore next powerdomain state */
++ list_for_each_entry(pwrst, &pwrst_list, node) {
++ state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
++ if (state > pwrst->next_state) {
++ pr_info("Powerdomain (%s) didn't enter "
++ "target state %d\n",
++ pwrst->pwrdm->name, pwrst->next_state);
++ ret = -1;
++ }
++ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
++ pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
++ }
++ if (ret)
++ pr_crit("Could not enter target state in pm_suspend\n");
++ else
++ pr_info("Successfully put all powerdomains to target state\n");
++
+ return 0;
+ }
+
+@@ -73,6 +120,22 @@ static const struct platform_suspend_ops omap_pm_ops = {
+ };
+ #endif /* CONFIG_SUSPEND */
+
++/*
++ * Enable hardware supervised mode for all clockdomains if it's
++ * supported. Initiate sleep transition for other clockdomains, if
++ * they are not used
++ */
++static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
++{
++ if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
++ clkdm_allow_idle(clkdm);
++ else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
++ atomic_read(&clkdm->usecount) == 0)
++ clkdm_sleep(clkdm);
++ return 0;
++}
++
++
+ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
+ {
+ struct power_state *pwrst;
+@@ -80,14 +143,48 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
+ if (!pwrdm->pwrsts)
+ return 0;
+
++ /*
++ * Skip CPU0 and CPU1 power domains. CPU1 is programmed
++ * through hotplug path and CPU0 explicitly programmed
++ * further down in the code path
++ */
++ if (!strncmp(pwrdm->name, "cpu", 3))
++ return 0;
++
++ /*
++ * FIXME: Remove this check when core retention is supported
++ * Only MPUSS power domain is added in the list.
++ */
++ if (strcmp(pwrdm->name, "mpu_pwrdm"))
++ return 0;
++
+ pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
+ if (!pwrst)
+ return -ENOMEM;
++
+ pwrst->pwrdm = pwrdm;
+- pwrst->next_state = PWRDM_POWER_ON;
++ pwrst->next_state = PWRDM_POWER_RET;
+ list_add(&pwrst->node, &pwrst_list);
+
+- return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state);
++ return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
++}
++
++/**
++ * omap_default_idle - OMAP4 default ilde routine.'
++ *
++ * Implements OMAP4 memory, IO ordering requirements which can't be addressed
++ * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
++ * by secondary CPU with CONFIG_CPUIDLE.
++ */
++static void omap_default_idle(void)
++{
++ local_irq_disable();
++ local_fiq_disable();
++
++ omap_do_wfi();
++
++ local_fiq_enable();
++ local_irq_enable();
+ }
+
+ /**
+@@ -99,10 +196,17 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
+ static int __init omap4_pm_init(void)
+ {
+ int ret;
++ struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
++ struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
+
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+
++ if (omap_rev() == OMAP4430_REV_ES1_0) {
++ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
++ return -ENODEV;
++ }
++
+ pr_err("Power Management for TI OMAP4.\n");
+
+ ret = pwrdm_for_each(pwrdms_setup, NULL);
+@@ -111,10 +215,51 @@ static int __init omap4_pm_init(void)
+ goto err2;
+ }
+
++ /*
++ * The dynamic dependency between MPUSS -> MEMIF and
++ * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
++ * expected. The hardware recommendation is to enable static
++ * dependencies for these to avoid system lock ups or random crashes.
++ */
++ mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
++ emif_clkdm = clkdm_lookup("l3_emif_clkdm");
++ l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
++ l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
++ l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
++ ducati_clkdm = clkdm_lookup("ducati_clkdm");
++ if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
++ (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
++ goto err2;
++
++ ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
++ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
++ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
++ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
++ ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
++ ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
++ if (ret) {
++ pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
++ "wakeup dependency\n");
++ goto err2;
++ }
++
++ ret = omap4_mpuss_init();
++ if (ret) {
++ pr_err("Failed to initialise OMAP4 MPUSS\n");
++ goto err2;
++ }
++
++ (void) clkdm_for_each(clkdms_setup, NULL);
++
+ #ifdef CONFIG_SUSPEND
+ suspend_set_ops(&omap_pm_ops);
+ #endif /* CONFIG_SUSPEND */
+
++ /* Overwrite the default arch_idle() */
++ pm_idle = omap_default_idle;
++
++ omap4_idle_init();
++
+ err2:
+ return ret;
+ }
+diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
+index 0d72a8a..437e51d 100644
+--- a/arch/arm/mach-omap2/powerdomain.h
++++ b/arch/arm/mach-omap2/powerdomain.h
+@@ -67,9 +67,9 @@
+
+ /*
+ * Maximum number of clockdomains that can be associated with a powerdomain.
+- * CORE powerdomain on OMAP4 is the worst case
++ * CORE powerdomain on AM33XX is the worst case
+ */
+-#define PWRDM_MAX_CLKDMS 9
++#define PWRDM_MAX_CLKDMS 11
+
+ /* XXX A completely arbitrary number. What is reasonable here? */
+ #define PWRDM_TRANSITION_BAILOUT 100000
+@@ -92,6 +92,8 @@ struct powerdomain;
+ * @pwrdm_clkdms: Clockdomains in this powerdomain
+ * @node: list_head linking all powerdomains
+ * @voltdm_node: list_head linking all powerdomains in a voltagedomain
++ * @pwrstctrl_offs: XXX_PWRSTCTRL reg offset from prcm_offs
++ * @pwrstst_offs: XXX_PWRSTST reg offset from prcm_offs
+ * @state:
+ * @state_counter:
+ * @timer:
+@@ -121,6 +123,8 @@ struct powerdomain {
+ unsigned ret_logic_off_counter;
+ unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS];
+
++ u8 pwrstctrl_offs;
++ u8 pwrstst_offs;
+ #ifdef CONFIG_PM_DEBUG
+ s64 timer;
+ s64 state_timer[PWRDM_MAX_PWRSTS];
+@@ -223,10 +227,12 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
+ extern void omap242x_powerdomains_init(void);
+ extern void omap243x_powerdomains_init(void);
+ extern void omap3xxx_powerdomains_init(void);
++extern void am33xx_powerdomains_init(void);
+ extern void omap44xx_powerdomains_init(void);
+
+ extern struct pwrdm_ops omap2_pwrdm_operations;
+ extern struct pwrdm_ops omap3_pwrdm_operations;
++extern struct pwrdm_ops am33xx_pwrdm_operations;
+ extern struct pwrdm_ops omap4_pwrdm_operations;
+
+ /* Common Internal functions used across OMAP rev's */
+diff --git a/arch/arm/mach-omap2/powerdomain44xx.c b/arch/arm/mach-omap2/powerdomain44xx.c
+index a7880af..b088540 100644
+--- a/arch/arm/mach-omap2/powerdomain44xx.c
++++ b/arch/arm/mach-omap2/powerdomain44xx.c
+@@ -28,7 +28,7 @@ static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+ omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK,
+ (pwrst << OMAP_POWERSTATE_SHIFT),
+ pwrdm->prcm_partition,
+- pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL);
++ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ return 0;
+ }
+
+@@ -37,7 +37,7 @@ static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
+ u32 v;
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTCTRL);
++ pwrdm->pwrstctrl_offs);
+ v &= OMAP_POWERSTATE_MASK;
+ v >>= OMAP_POWERSTATE_SHIFT;
+
+@@ -49,7 +49,7 @@ static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+ u32 v;
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTST);
++ pwrdm->pwrstst_offs);
+ v &= OMAP_POWERSTATEST_MASK;
+ v >>= OMAP_POWERSTATEST_SHIFT;
+
+@@ -61,7 +61,7 @@ static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
+ u32 v;
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTST);
++ pwrdm->pwrstst_offs);
+ v &= OMAP4430_LASTPOWERSTATEENTERED_MASK;
+ v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT;
+
+@@ -73,7 +73,7 @@ static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK,
+ (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT),
+ pwrdm->prcm_partition,
+- pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL);
++ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ return 0;
+ }
+
+@@ -82,7 +82,7 @@ static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm)
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK,
+ OMAP4430_LASTPOWERSTATEENTERED_MASK,
+ pwrdm->prcm_partition,
+- pwrdm->prcm_offs, OMAP4_PM_PWSTST);
++ pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ return 0;
+ }
+
+@@ -93,7 +93,7 @@ static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
+ v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK);
+ omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v,
+ pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTCTRL);
++ pwrdm->pwrstctrl_offs);
+
+ return 0;
+ }
+@@ -107,7 +107,7 @@ static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank,
+
+ omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)),
+ pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTCTRL);
++ pwrdm->pwrstctrl_offs);
+
+ return 0;
+ }
+@@ -131,7 +131,7 @@ static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
+ u32 v;
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTST);
++ pwrdm->pwrstst_offs);
+ v &= OMAP4430_LOGICSTATEST_MASK;
+ v >>= OMAP4430_LOGICSTATEST_SHIFT;
+
+@@ -157,7 +157,7 @@ static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
+ m = omap2_pwrdm_get_mem_bank_stst_mask(bank);
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTST);
++ pwrdm->pwrstst_offs);
+ v &= m;
+ v >>= __ffs(m);
+
+@@ -171,7 +171,7 @@ static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank)
+ m = omap2_pwrdm_get_mem_bank_retst_mask(bank);
+
+ v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+- OMAP4_PM_PWSTCTRL);
++ pwrdm->pwrstctrl_offs);
+ v &= m;
+ v >>= __ffs(m);
+
+@@ -191,7 +191,7 @@ static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm)
+ /* XXX Is this udelay() value meaningful? */
+ while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition,
+ pwrdm->prcm_offs,
+- OMAP4_PM_PWSTST) &
++ pwrdm->pwrstst_offs) &
+ OMAP_INTRANSITION_MASK) &&
+ (c++ < PWRDM_TRANSITION_BAILOUT))
+ udelay(1);
+diff --git a/arch/arm/mach-omap2/powerdomains33xx_data.c b/arch/arm/mach-omap2/powerdomains33xx_data.c
+new file mode 100644
+index 0000000..32a75e5
+--- /dev/null
++++ b/arch/arm/mach-omap2/powerdomains33xx_data.c
+@@ -0,0 +1,134 @@
++/*
++ * AM33XX Power domains framework
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++
++#include "powerdomain.h"
++#include "prcm-common.h"
++#include "prm33xx.h"
++#include "prcm44xx.h"
++
++static struct powerdomain gfx_33xx_pwrdm = {
++ .name = "gfx_pwrdm",
++ .voltdm = { .name = "core" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_GFX_MOD,
++ .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts_logic_ret = PWRSTS_OFF_RET,
++ .pwrstctrl_offs = AM33XX_PM_GFX_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_GFX_PWRSTST_OFFSET,
++ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
++ .banks = 1,
++ .pwrsts_mem_ret = {
++ [0] = PWRSTS_OFF_RET, /* gfx_mem */
++ },
++ .pwrsts_mem_on = {
++ [0] = PWRSTS_ON, /* gfx_mem */
++ },
++};
++
++static struct powerdomain rtc_33xx_pwrdm = {
++ .name = "rtc_pwrdm",
++ .voltdm = { .name = "rtc" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_RTC_MOD,
++ .pwrsts = PWRSTS_ON,
++ .pwrstctrl_offs = AM33XX_PM_RTC_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_RTC_PWRSTST_OFFSET,
++};
++
++static struct powerdomain wkup_33xx_pwrdm = {
++ .name = "wkup_pwrdm",
++ .voltdm = { .name = "core" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_WKUP_MOD,
++ .pwrsts = PWRSTS_ON,
++ .pwrstctrl_offs = AM33XX_PM_WKUP_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_WKUP_PWRSTST_OFFSET,
++};
++
++static struct powerdomain per_33xx_pwrdm = {
++ .name = "per_pwrdm",
++ .voltdm = { .name = "core" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_PER_MOD,
++ .pwrsts = PWRSTS_OFF_RET_ON,
++ .pwrsts_logic_ret = PWRSTS_OFF_RET,
++ .pwrstctrl_offs = AM33XX_PM_PER_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_PER_PWRSTST_OFFSET,
++ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
++ .banks = 3,
++ .pwrsts_mem_ret = {
++ [0] = PWRSTS_OFF_RET, /* pruss_mem */
++ [1] = PWRSTS_OFF_RET, /* per_mem */
++ [2] = PWRSTS_OFF_RET, /* ram_mem */
++ },
++ .pwrsts_mem_on = {
++ [0] = PWRSTS_ON, /* pruss_mem */
++ [1] = PWRSTS_ON, /* per_mem */
++ [2] = PWRSTS_ON, /* ram_mem */
++ },
++};
++
++static struct powerdomain mpu_33xx_pwrdm = {
++ .name = "mpu_pwrdm",
++ .voltdm = { .name = "mpu" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_MPU_MOD,
++ .pwrsts = PWRSTS_OFF_RET_ON,
++ .pwrsts_logic_ret = PWRSTS_OFF_RET,
++ .pwrstctrl_offs = AM33XX_PM_MPU_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_MPU_PWRSTST_OFFSET,
++ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
++ .banks = 3,
++ .pwrsts_mem_ret = {
++ [0] = PWRSTS_OFF_RET, /* mpu_l1 */
++ [1] = PWRSTS_OFF_RET, /* mpu_l2 */
++ [2] = PWRSTS_OFF_RET, /* mpu_ram */
++ },
++ .pwrsts_mem_on = {
++ [0] = PWRSTS_ON, /* mpu_l1 */
++ [1] = PWRSTS_ON, /* mpu_l2 */
++ [2] = PWRSTS_ON, /* mpu_ram */
++ },
++};
++
++static struct powerdomain cefuse_33xx_pwrdm = {
++ .name = "cefuse_pwrdm",
++ .voltdm = { .name = "core" },
++ .prcm_partition = AM33XX_PRM_PARTITION,
++ .prcm_offs = AM33XX_PRM_CEFUSE_MOD,
++ .pwrsts = PWRSTS_OFF_ON,
++ .pwrstctrl_offs = AM33XX_PM_CEFUSE_PWRSTCTRL_OFFSET,
++ .pwrstst_offs = AM33XX_PM_CEFUSE_PWRSTST_OFFSET,
++};
++
++static struct powerdomain *powerdomains_am33xx[] __initdata = {
++ &gfx_33xx_pwrdm,
++ &rtc_33xx_pwrdm,
++ &wkup_33xx_pwrdm,
++ &per_33xx_pwrdm,
++ &mpu_33xx_pwrdm,
++ &cefuse_33xx_pwrdm,
++ NULL,
++};
++
++void __init am33xx_powerdomains_init(void)
++{
++ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
++ pwrdm_register_pwrdms(powerdomains_am33xx);
++ pwrdm_complete_init();
++}
+diff --git a/arch/arm/mach-omap2/powerdomains44xx_data.c b/arch/arm/mach-omap2/powerdomains44xx_data.c
+index 704664c..7cf8dcd0 100644
+--- a/arch/arm/mach-omap2/powerdomains44xx_data.c
++++ b/arch/arm/mach-omap2/powerdomains44xx_data.c
+@@ -352,7 +352,16 @@ static struct powerdomain *powerdomains_omap44xx[] __initdata = {
+
+ void __init omap44xx_powerdomains_init(void)
+ {
++ int i;
++
+ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
++ /* Initialise PRM reg offs to default value */
++ for (i = 0; powerdomains_omap44xx[i] != NULL; i++) {
++ struct powerdomain *pwrdm = powerdomains_omap44xx[i];
++
++ pwrdm->pwrstctrl_offs = OMAP4_PM_PWSTCTRL;
++ pwrdm->pwrstst_offs = OMAP4_PM_PWSTST;
++ }
+ pwrdm_register_pwrdms(powerdomains_omap44xx);
+ pwrdm_complete_init();
+ }
+diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
+index 0363dcb..5aa5435 100644
+--- a/arch/arm/mach-omap2/prcm-common.h
++++ b/arch/arm/mach-omap2/prcm-common.h
+@@ -4,7 +4,7 @@
+ /*
+ * OMAP2/3 PRCM base and module definitions
+ *
+- * Copyright (C) 2007-2009 Texas Instruments, Inc.
++ * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+@@ -201,6 +201,8 @@
+ #define OMAP3430_EN_MMC2_SHIFT 25
+ #define OMAP3430_EN_MMC1_MASK (1 << 24)
+ #define OMAP3430_EN_MMC1_SHIFT 24
++#define OMAP3430_EN_UART4_MASK (1 << 23)
++#define OMAP3430_EN_UART4_SHIFT 23
+ #define OMAP3430_EN_MCSPI4_MASK (1 << 21)
+ #define OMAP3430_EN_MCSPI4_SHIFT 21
+ #define OMAP3430_EN_MCSPI3_MASK (1 << 20)
+@@ -408,6 +410,79 @@
+ extern void __iomem *prm_base;
+ extern void __iomem *cm_base;
+ extern void __iomem *cm2_base;
++
++/**
++ * struct omap_prcm_irq - describes a PRCM interrupt bit
++ * @name: a short name describing the interrupt type, e.g. "wkup" or "io"
++ * @offset: the bit shift of the interrupt inside the IRQ{ENABLE,STATUS} regs
++ * @priority: should this interrupt be handled before @priority=false IRQs?
++ *
++ * Describes interrupt bits inside the PRM_IRQ{ENABLE,STATUS}_MPU* registers.
++ * On systems with multiple PRM MPU IRQ registers, the bitfields read from
++ * the registers are concatenated, so @offset could be > 31 on these systems -
++ * see omap_prm_irq_handler() for more details. I/O ring interrupts should
++ * have @priority set to true.
++ */
++struct omap_prcm_irq {
++ const char *name;
++ unsigned int offset;
++ bool priority;
++};
++
++/**
++ * struct omap_prcm_irq_setup - PRCM interrupt controller details
++ * @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register
++ * @mask: PRM register offset for the first PRM_IRQENABLE_MPU register
++ * @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers
++ * @nr_irqs: number of entries in the @irqs array
++ * @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs)
++ * @irq: MPU IRQ asserted when a PRCM interrupt arrives
++ * @read_pending_irqs: fn ptr to determine if any PRCM IRQs are pending
++ * @ocp_barrier: fn ptr to force buffered PRM writes to complete
++ * @save_and_clear_irqen: fn ptr to save and clear IRQENABLE regs
++ * @restore_irqen: fn ptr to save and clear IRQENABLE regs
++ * @saved_mask: IRQENABLE regs are saved here during suspend
++ * @priority_mask: 1 bit per IRQ, set to 1 if omap_prcm_irq.priority = true
++ * @base_irq: base dynamic IRQ number, returned from irq_alloc_descs() in init
++ * @suspended: set to true after Linux suspend code has called our ->prepare()
++ * @suspend_save_flag: set to true after IRQ masks have been saved and disabled
++ *
++ * @saved_mask, @priority_mask, @base_irq, @suspended, and
++ * @suspend_save_flag are populated dynamically, and are not to be
++ * specified in static initializers.
++ */
++struct omap_prcm_irq_setup {
++ u16 ack;
++ u16 mask;
++ u8 nr_regs;
++ u8 nr_irqs;
++ const struct omap_prcm_irq *irqs;
++ int irq;
++ void (*read_pending_irqs)(unsigned long *events);
++ void (*ocp_barrier)(void);
++ void (*save_and_clear_irqen)(u32 *saved_mask);
++ void (*restore_irqen)(u32 *saved_mask);
++ u32 *saved_mask;
++ u32 *priority_mask;
++ int base_irq;
++ bool suspended;
++ bool suspend_save_flag;
++};
++
++/* OMAP_PRCM_IRQ: convenience macro for creating struct omap_prcm_irq records */
++#define OMAP_PRCM_IRQ(_name, _offset, _priority) { \
++ .name = _name, \
++ .offset = _offset, \
++ .priority = _priority \
++ }
++
++extern void omap_prcm_irq_cleanup(void);
++extern int omap_prcm_register_chain_handler(
++ struct omap_prcm_irq_setup *irq_setup);
++extern int omap_prcm_event_to_irq(const char *event);
++extern void omap_prcm_irq_prepare(void);
++extern void omap_prcm_irq_complete(void);
++
+ # endif
+
+ #endif
+diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
+index 597e2da..267d07c 100644
+--- a/arch/arm/mach-omap2/prcm.c
++++ b/arch/arm/mach-omap2/prcm.c
+@@ -26,7 +26,7 @@
+ #include <linux/export.h>
+
+ #include <mach/system.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/prcm.h>
+ #include <plat/irqs.h>
+
+@@ -35,6 +35,7 @@
+ #include "cm2xxx_3xxx.h"
+ #include "prm2xxx_3xxx.h"
+ #include "prm44xx.h"
++#include "prm33xx.h"
+ #include "prminst44xx.h"
+ #include "prm-regbits-24xx.h"
+ #include "prm-regbits-44xx.h"
+@@ -67,6 +68,10 @@ static void omap_prcm_arch_reset(char mode, const char *cmd)
+ omap2xxx_clk_prepare_for_reboot();
+
+ prcm_offs = WKUP_MOD;
++ } else if (cpu_is_am33xx()) {
++ prcm_offs = AM33XX_PRM_DEVICE_MOD;
++ omap2_prm_set_mod_reg_bits(OMAP4430_RST_GLOBAL_COLD_SW_MASK,
++ prcm_offs, AM33XX_PRM_RSTCTRL_OFFSET);
+ } else if (cpu_is_omap34xx()) {
+ prcm_offs = OMAP3430_GR_MOD;
+ omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0));
+diff --git a/arch/arm/mach-omap2/prcm44xx.h b/arch/arm/mach-omap2/prcm44xx.h
+index 7334ffb..02e5a8b 100644
+--- a/arch/arm/mach-omap2/prcm44xx.h
++++ b/arch/arm/mach-omap2/prcm44xx.h
+@@ -31,6 +31,8 @@
+ #define OMAP4430_CM2_PARTITION 3
+ #define OMAP4430_SCRM_PARTITION 4
+ #define OMAP4430_PRCM_MPU_PARTITION 5
++/* AM33XX PRCM is closer to OMAP4, so try to reuse all API's */
++#define AM33XX_PRM_PARTITION 1
+
+ /*
+ * OMAP4_MAX_PRCM_PARTITIONS: set to the highest value of the PRCM partition
+diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.c b/arch/arm/mach-omap2/prcm_mpu44xx.c
+index 171fe17..ca669b5 100644
+--- a/arch/arm/mach-omap2/prcm_mpu44xx.c
++++ b/arch/arm/mach-omap2/prcm_mpu44xx.c
+@@ -15,7 +15,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prcm_mpu44xx.h"
+ #include "cm-regbits-44xx.h"
+diff --git a/arch/arm/mach-omap2/prm-regbits-33xx.h b/arch/arm/mach-omap2/prm-regbits-33xx.h
+new file mode 100644
+index 0000000..f716ae1
+--- /dev/null
++++ b/arch/arm/mach-omap2/prm-regbits-33xx.h
+@@ -0,0 +1,357 @@
++/*
++ * AM33XX Power Management register bits
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_33XX_H
++#define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_33XX_H
++
++#include "prm.h"
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ABBOFF_ACT_EXPORT_SHIFT 1
++#define AM33XX_ABBOFF_ACT_EXPORT_MASK (1 << 1)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ABBOFF_SLEEP_EXPORT_SHIFT 2
++#define AM33XX_ABBOFF_SLEEP_EXPORT_MASK (1 << 2)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_AIPOFF_SHIFT 8
++#define AM33XX_AIPOFF_MASK (1 << 8)
++
++/* Used by PM_WKUP_PWRSTST */
++#define AM33XX_DEBUGSS_MEM_STATEST_SHIFT 17
++#define AM33XX_DEBUGSS_MEM_STATEST_MASK (0x3 << 17)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_DISABLE_RTA_EXPORT_SHIFT 0
++#define AM33XX_DISABLE_RTA_EXPORT_MASK (1 << 0)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_DPLL_CORE_RECAL_EN_SHIFT 12
++#define AM33XX_DPLL_CORE_RECAL_EN_MASK (1 << 12)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_DPLL_CORE_RECAL_ST_SHIFT 12
++#define AM33XX_DPLL_CORE_RECAL_ST_MASK (1 << 12)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_DPLL_DDR_RECAL_EN_SHIFT 14
++#define AM33XX_DPLL_DDR_RECAL_EN_MASK (1 << 14)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_DPLL_DDR_RECAL_ST_SHIFT 14
++#define AM33XX_DPLL_DDR_RECAL_ST_MASK (1 << 14)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_DPLL_DISP_RECAL_EN_SHIFT 15
++#define AM33XX_DPLL_DISP_RECAL_EN_MASK (1 << 15)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_DPLL_DISP_RECAL_ST_SHIFT 13
++#define AM33XX_DPLL_DISP_RECAL_ST_MASK (1 << 13)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_DPLL_MPU_RECAL_EN_SHIFT 11
++#define AM33XX_DPLL_MPU_RECAL_EN_MASK (1 << 11)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_DPLL_MPU_RECAL_ST_SHIFT 11
++#define AM33XX_DPLL_MPU_RECAL_ST_MASK (1 << 11)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_DPLL_PER_RECAL_EN_SHIFT 13
++#define AM33XX_DPLL_PER_RECAL_EN_MASK (1 << 13)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_DPLL_PER_RECAL_ST_SHIFT 15
++#define AM33XX_DPLL_PER_RECAL_ST_MASK (1 << 15)
++
++/* Used by RM_WKUP_RSTST */
++#define AM33XX_EMULATION_M3_RST_SHIFT 6
++#define AM33XX_EMULATION_M3_RST_MASK (1 << 6)
++
++/* Used by RM_MPU_RSTST */
++#define AM33XX_EMULATION_MPU_RST_SHIFT 5
++#define AM33XX_EMULATION_MPU_RST_MASK (1 << 5)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ENFUNC1_EXPORT_SHIFT 3
++#define AM33XX_ENFUNC1_EXPORT_MASK (1 << 3)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ENFUNC3_EXPORT_SHIFT 5
++#define AM33XX_ENFUNC3_EXPORT_MASK (1 << 5)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ENFUNC4_SHIFT 6
++#define AM33XX_ENFUNC4_MASK (1 << 6)
++
++/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
++#define AM33XX_ENFUNC5_SHIFT 7
++#define AM33XX_ENFUNC5_MASK (1 << 7)
++
++/* Used by PRM_RSTST */
++#define AM33XX_EXTERNAL_WARM_RST_SHIFT 5
++#define AM33XX_EXTERNAL_WARM_RST_MASK (1 << 5)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_FORCEWKUP_EN_SHIFT 10
++#define AM33XX_FORCEWKUP_EN_MASK (1 << 10)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_FORCEWKUP_ST_SHIFT 10
++#define AM33XX_FORCEWKUP_ST_MASK (1 << 10)
++
++/* Used by PM_GFX_PWRSTCTRL */
++#define AM33XX_GFX_MEM_ONSTATE_SHIFT 17
++#define AM33XX_GFX_MEM_ONSTATE_MASK (0x3 << 17)
++
++/* Used by PM_GFX_PWRSTCTRL */
++#define AM33XX_GFX_MEM_RETSTATE_SHIFT 6
++#define AM33XX_GFX_MEM_RETSTATE_MASK (1 << 6)
++
++/* Used by PM_GFX_PWRSTST */
++#define AM33XX_GFX_MEM_STATEST_SHIFT 4
++#define AM33XX_GFX_MEM_STATEST_MASK (0x3 << 4)
++
++/* Used by RM_GFX_RSTCTRL, RM_GFX_RSTST */
++#define AM33XX_GFX_RST_SHIFT 0
++#define AM33XX_GFX_RST_MASK (1 << 0)
++
++/* Used by PRM_RSTST */
++#define AM33XX_GLOBAL_COLD_RST_SHIFT 0
++#define AM33XX_GLOBAL_COLD_RST_MASK (1 << 0)
++
++/* Used by PRM_RSTST */
++#define AM33XX_GLOBAL_WARM_SW_RST_SHIFT 1
++#define AM33XX_GLOBAL_WARM_SW_RST_MASK (1 << 1)
++
++/* Used by RM_WKUP_RSTST */
++#define AM33XX_ICECRUSHER_M3_RST_SHIFT 7
++#define AM33XX_ICECRUSHER_M3_RST_MASK (1 << 7)
++
++/* Used by RM_MPU_RSTST */
++#define AM33XX_ICECRUSHER_MPU_RST_SHIFT 6
++#define AM33XX_ICECRUSHER_MPU_RST_MASK (1 << 6)
++
++/* Used by PRM_RSTST */
++#define AM33XX_ICEPICK_RST_SHIFT 9
++#define AM33XX_ICEPICK_RST_MASK (1 << 9)
++
++/* Used by RM_PER_RSTCTRL */
++#define AM33XX_ICSS_LRST_SHIFT 1
++#define AM33XX_ICSS_LRST_MASK (1 << 1)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_ICSS_MEM_ONSTATE_SHIFT 5
++#define AM33XX_ICSS_MEM_ONSTATE_MASK (0x3 << 5)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_ICSS_MEM_RETSTATE_SHIFT 7
++#define AM33XX_ICSS_MEM_RETSTATE_MASK (1 << 7)
++
++/* Used by PM_PER_PWRSTST */
++#define AM33XX_ICSS_MEM_STATEST_SHIFT 23
++#define AM33XX_ICSS_MEM_STATEST_MASK (0x3 << 23)
++
++/*
++ * Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST,
++ * PM_WKUP_PWRSTST, PM_RTC_PWRSTST
++ */
++#define AM33XX_INTRANSITION_SHIFT 20
++#define AM33XX_INTRANSITION_MASK (1 << 20)
++
++/* Used by PM_CEFUSE_PWRSTST */
++#define AM33XX_LASTPOWERSTATEENTERED_SHIFT 24
++#define AM33XX_LASTPOWERSTATEENTERED_MASK (0x3 << 24)
++
++/* Used by PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL, PM_RTC_PWRSTCTRL */
++#define AM33XX_LOGICRETSTATE_SHIFT 2
++#define AM33XX_LOGICRETSTATE_MASK (1 << 2)
++
++/* Renamed from LOGICRETSTATE Used by PM_PER_PWRSTCTRL, PM_WKUP_PWRSTCTRL */
++#define AM33XX_LOGICRETSTATE_3_3_SHIFT 3
++#define AM33XX_LOGICRETSTATE_3_3_MASK (1 << 3)
++
++/*
++ * Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST,
++ * PM_WKUP_PWRSTST, PM_RTC_PWRSTST
++ */
++#define AM33XX_LOGICSTATEST_SHIFT 2
++#define AM33XX_LOGICSTATEST_MASK (1 << 2)
++
++/*
++ * Used by PM_GFX_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL, PM_PER_PWRSTCTRL,
++ * PM_MPU_PWRSTCTRL, PM_WKUP_PWRSTCTRL, PM_RTC_PWRSTCTRL
++ */
++#define AM33XX_LOWPOWERSTATECHANGE_SHIFT 4
++#define AM33XX_LOWPOWERSTATECHANGE_MASK (1 << 4)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_L1_ONSTATE_SHIFT 18
++#define AM33XX_MPU_L1_ONSTATE_MASK (0x3 << 18)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_L1_RETSTATE_SHIFT 22
++#define AM33XX_MPU_L1_RETSTATE_MASK (1 << 22)
++
++/* Used by PM_MPU_PWRSTST */
++#define AM33XX_MPU_L1_STATEST_SHIFT 6
++#define AM33XX_MPU_L1_STATEST_MASK (0x3 << 6)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_L2_ONSTATE_SHIFT 20
++#define AM33XX_MPU_L2_ONSTATE_MASK (0x3 << 20)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_L2_RETSTATE_SHIFT 23
++#define AM33XX_MPU_L2_RETSTATE_MASK (1 << 23)
++
++/* Used by PM_MPU_PWRSTST */
++#define AM33XX_MPU_L2_STATEST_SHIFT 8
++#define AM33XX_MPU_L2_STATEST_MASK (0x3 << 8)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_RAM_ONSTATE_SHIFT 16
++#define AM33XX_MPU_RAM_ONSTATE_MASK (0x3 << 16)
++
++/* Used by PM_MPU_PWRSTCTRL */
++#define AM33XX_MPU_RAM_RETSTATE_SHIFT 24
++#define AM33XX_MPU_RAM_RETSTATE_MASK (1 << 24)
++
++/* Used by PM_MPU_PWRSTST */
++#define AM33XX_MPU_RAM_STATEST_SHIFT 4
++#define AM33XX_MPU_RAM_STATEST_MASK (0x3 << 4)
++
++/* Used by PRM_RSTST */
++#define AM33XX_MPU_SECURITY_VIOL_RST_SHIFT 2
++#define AM33XX_MPU_SECURITY_VIOL_RST_MASK (1 << 2)
++
++/* Used by PRM_SRAM_COUNT */
++#define AM33XX_PCHARGECNT_VALUE_SHIFT 0
++#define AM33XX_PCHARGECNT_VALUE_MASK (0x3f << 0)
++
++/* Used by RM_PER_RSTCTRL */
++#define AM33XX_PCI_LRST_SHIFT 0
++#define AM33XX_PCI_LRST_MASK (1 << 0)
++
++/* Renamed from PCI_LRST Used by RM_PER_RSTST */
++#define AM33XX_PCI_LRST_5_5_SHIFT 5
++#define AM33XX_PCI_LRST_5_5_MASK (1 << 5)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_PER_MEM_ONSTATE_SHIFT 25
++#define AM33XX_PER_MEM_ONSTATE_MASK (0x3 << 25)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_PER_MEM_RETSTATE_SHIFT 29
++#define AM33XX_PER_MEM_RETSTATE_MASK (1 << 29)
++
++/* Used by PM_PER_PWRSTST */
++#define AM33XX_PER_MEM_STATEST_SHIFT 17
++#define AM33XX_PER_MEM_STATEST_MASK (0x3 << 17)
++
++/*
++ * Used by PM_GFX_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL, PM_PER_PWRSTCTRL,
++ * PM_MPU_PWRSTCTRL
++ */
++#define AM33XX_POWERSTATE_SHIFT 0
++#define AM33XX_POWERSTATE_MASK (0x3 << 0)
++
++/* Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST */
++#define AM33XX_POWERSTATEST_SHIFT 0
++#define AM33XX_POWERSTATEST_MASK (0x3 << 0)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_RAM_MEM_ONSTATE_SHIFT 30
++#define AM33XX_RAM_MEM_ONSTATE_MASK (0x3 << 30)
++
++/* Used by PM_PER_PWRSTCTRL */
++#define AM33XX_RAM_MEM_RETSTATE_SHIFT 27
++#define AM33XX_RAM_MEM_RETSTATE_MASK (1 << 27)
++
++/* Used by PM_PER_PWRSTST */
++#define AM33XX_RAM_MEM_STATEST_SHIFT 21
++#define AM33XX_RAM_MEM_STATEST_MASK (0x3 << 21)
++
++/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
++#define AM33XX_RETMODE_ENABLE_SHIFT 0
++#define AM33XX_RETMODE_ENABLE_MASK (1 << 0)
++
++/* Used by REVISION_PRM */
++#define AM33XX_REV_SHIFT 0
++#define AM33XX_REV_MASK (0xff << 0)
++
++/* Used by PRM_RSTTIME */
++#define AM33XX_RSTTIME1_SHIFT 0
++#define AM33XX_RSTTIME1_MASK (0xff << 0)
++
++/* Used by PRM_RSTTIME */
++#define AM33XX_RSTTIME2_SHIFT 8
++#define AM33XX_RSTTIME2_MASK (0x1f << 8)
++
++/* Used by PRM_RSTCTRL */
++#define AM33XX_RST_GLOBAL_COLD_SW_SHIFT 1
++#define AM33XX_RST_GLOBAL_COLD_SW_MASK (1 << 1)
++
++/* Used by PRM_RSTCTRL */
++#define AM33XX_RST_GLOBAL_WARM_SW_SHIFT 0
++#define AM33XX_RST_GLOBAL_WARM_SW_MASK (1 << 0)
++
++/* Used by PRM_SRAM_COUNT */
++#define AM33XX_SLPCNT_VALUE_SHIFT 16
++#define AM33XX_SLPCNT_VALUE_MASK (0xff << 16)
++
++/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
++#define AM33XX_SRAMLDO_STATUS_SHIFT 8
++#define AM33XX_SRAMLDO_STATUS_MASK (1 << 8)
++
++/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
++#define AM33XX_SRAM_IN_TRANSITION_SHIFT 9
++#define AM33XX_SRAM_IN_TRANSITION_MASK (1 << 9)
++
++/* Used by PRM_SRAM_COUNT */
++#define AM33XX_STARTUP_COUNT_SHIFT 24
++#define AM33XX_STARTUP_COUNT_MASK (0xff << 24)
++
++/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
++#define AM33XX_TRANSITION_EN_SHIFT 8
++#define AM33XX_TRANSITION_EN_MASK (1 << 8)
++
++/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
++#define AM33XX_TRANSITION_ST_SHIFT 8
++#define AM33XX_TRANSITION_ST_MASK (1 << 8)
++
++/* Used by PRM_SRAM_COUNT */
++#define AM33XX_VSETUPCNT_VALUE_SHIFT 8
++#define AM33XX_VSETUPCNT_VALUE_MASK (0xff << 8)
++
++/* Used by PRM_RSTST */
++#define AM33XX_WDT0_RST_SHIFT 3
++#define AM33XX_WDT0_RST_MASK (1 << 3)
++
++/* Used by PRM_RSTST */
++#define AM33XX_WDT1_RST_SHIFT 4
++#define AM33XX_WDT1_RST_MASK (1 << 4)
++
++/* Used by RM_WKUP_RSTCTRL */
++#define AM33XX_WKUP_M3_LRST_SHIFT 3
++#define AM33XX_WKUP_M3_LRST_MASK (1 << 3)
++
++/* Renamed from WKUP_M3_LRST Used by RM_WKUP_RSTST */
++#define AM33XX_WKUP_M3_LRST_5_5_SHIFT 5
++#define AM33XX_WKUP_M3_LRST_5_5_MASK (1 << 5)
++
++#endif
+diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
+index f02d87f..fa803c6 100644
+--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
++++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
+@@ -1,7 +1,7 @@
+ /*
+ * OMAP2/3 PRM module functions
+ *
+- * Copyright (C) 2010 Texas Instruments, Inc.
++ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ * Copyright (C) 2010 Nokia Corporation
+ * Benoît Cousson
+ * Paul Walmsley
+@@ -16,7 +16,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/cpu.h>
+ #include <plat/prcm.h>
+
+@@ -27,6 +27,24 @@
+ #include "prm-regbits-24xx.h"
+ #include "prm-regbits-34xx.h"
+
++static const struct omap_prcm_irq omap3_prcm_irqs[] = {
++ OMAP_PRCM_IRQ("wkup", 0, 0),
++ OMAP_PRCM_IRQ("io", 9, 1),
++};
++
++static struct omap_prcm_irq_setup omap3_prcm_irq_setup = {
++ .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET,
++ .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET,
++ .nr_regs = 1,
++ .irqs = omap3_prcm_irqs,
++ .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs),
++ .irq = INT_34XX_PRCM_MPU_IRQ,
++ .read_pending_irqs = &omap3xxx_prm_read_pending_irqs,
++ .ocp_barrier = &omap3xxx_prm_ocp_barrier,
++ .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen,
++ .restore_irqen = &omap3xxx_prm_restore_irqen,
++};
++
+ u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
+ {
+ return __raw_readl(prm_base + module + idx);
+@@ -212,3 +230,80 @@ u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
+ {
+ return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset);
+ }
++
++/**
++ * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
++ * @events: ptr to a u32, preallocated by caller
++ *
++ * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM
++ * MPU IRQs, and store the result into the u32 pointed to by @events.
++ * No return value.
++ */
++void omap3xxx_prm_read_pending_irqs(unsigned long *events)
++{
++ u32 mask, st;
++
++ /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */
++ mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
++ st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
++
++ events[0] = mask & st;
++}
++
++/**
++ * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
++ *
++ * Force any buffered writes to the PRM IP block to complete. Needed
++ * by the PRM IRQ handler, which reads and writes directly to the IP
++ * block, to avoid race conditions after acknowledging or clearing IRQ
++ * bits. No return value.
++ */
++void omap3xxx_prm_ocp_barrier(void)
++{
++ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
++}
++
++/**
++ * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg
++ * @saved_mask: ptr to a u32 array to save IRQENABLE bits
++ *
++ * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask
++ * must be allocated by the caller. Intended to be used in the PRM
++ * interrupt handler suspend callback. The OCP barrier is needed to
++ * ensure the write to disable PRM interrupts reaches the PRM before
++ * returning; otherwise, spurious interrupts might occur. No return
++ * value.
++ */
++void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask)
++{
++ saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD,
++ OMAP3_PRM_IRQENABLE_MPU_OFFSET);
++ omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
++
++ /* OCP barrier */
++ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
++}
++
++/**
++ * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args
++ * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
++ *
++ * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended
++ * to be used in the PRM interrupt handler resume callback to restore
++ * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP
++ * barrier should be needed here; any pending PRM interrupts will fire
++ * once the writes reach the PRM. No return value.
++ */
++void omap3xxx_prm_restore_irqen(u32 *saved_mask)
++{
++ omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD,
++ OMAP3_PRM_IRQENABLE_MPU_OFFSET);
++}
++
++static int __init omap3xxx_prcm_init(void)
++{
++ if (cpu_is_omap34xx() && !cpu_is_am33xx())
++ return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
++ return 0;
++}
++subsys_initcall(omap3xxx_prcm_init);
+diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
+index cef533d..70ac2a1 100644
+--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
++++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
+@@ -1,7 +1,7 @@
+ /*
+ * OMAP2/3 Power/Reset Management (PRM) register definitions
+ *
+- * Copyright (C) 2007-2009 Texas Instruments, Inc.
++ * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Paul Walmsley
+ *
+@@ -314,6 +314,13 @@ void omap3_prm_vp_clear_txdone(u8 vp_id);
+ extern u32 omap3_prm_vcvp_read(u8 offset);
+ extern void omap3_prm_vcvp_write(u32 val, u8 offset);
+ extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
++
++/* PRM interrupt-related functions */
++extern void omap3xxx_prm_read_pending_irqs(unsigned long *events);
++extern void omap3xxx_prm_ocp_barrier(void);
++extern void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask);
++extern void omap3xxx_prm_restore_irqen(u32 *saved_mask);
++
+ #endif /* CONFIG_ARCH_OMAP4 */
+
+ #endif
+diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h
+new file mode 100644
+index 0000000..aa1e8c7
+--- /dev/null
++++ b/arch/arm/mach-omap2/prm33xx.h
+@@ -0,0 +1,118 @@
++/*
++ * AM33XX PRM instance offset macros
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_PRM33XX_H
++#define __ARCH_ARM_MACH_OMAP2_PRM33XX_H
++
++#include "prcm-common.h"
++#include "prm.h"
++
++#define AM33XX_PRM_BASE 0x44E00000
++
++#define AM33XX_PRM_REGADDR(inst, reg) \
++ AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRM_BASE + (inst) + (reg))
++
++/* PRM instances */
++#define AM33XX_PRM_OCP_SOCKET_MOD 0x0B00
++#define AM33XX_PRM_PER_MOD 0x0C00
++#define AM33XX_PRM_WKUP_MOD 0x0D00
++#define AM33XX_PRM_MPU_MOD 0x0E00
++#define AM33XX_PRM_DEVICE_MOD 0x0F00
++#define AM33XX_PRM_RTC_MOD 0x1000
++#define AM33XX_PRM_GFX_MOD 0x1100
++#define AM33XX_PRM_CEFUSE_MOD 0x1200
++
++/* PRM */
++
++/* PRM.OCP_SOCKET_PRM register offsets */
++#define AM33XX_REVISION_PRM_OFFSET 0x0000
++#define AM33XX_REVISION_PRM AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0000)
++#define AM33XX_PRM_IRQSTATUS_MPU_OFFSET 0x0004
++#define AM33XX_PRM_IRQSTATUS_MPU AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0004)
++#define AM33XX_PRM_IRQENABLE_MPU_OFFSET 0x0008
++#define AM33XX_PRM_IRQENABLE_MPU AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0008)
++#define AM33XX_PRM_IRQSTATUS_M3_OFFSET 0x000c
++#define AM33XX_PRM_IRQSTATUS_M3 AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x000c)
++#define AM33XX_PRM_IRQENABLE_M3_OFFSET 0x0010
++#define AM33XX_PRM_IRQENABLE_M3 AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0010)
++
++/* PRM.PER_PRM register offsets */
++#define AM33XX_RM_PER_RSTCTRL_OFFSET 0x0000
++#define AM33XX_RM_PER_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0000)
++#define AM33XX_RM_PER_RSTST_OFFSET 0x0004
++#define AM33XX_RM_PER_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0004)
++#define AM33XX_PM_PER_PWRSTST_OFFSET 0x0008
++#define AM33XX_PM_PER_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0008)
++#define AM33XX_PM_PER_PWRSTCTRL_OFFSET 0x000c
++#define AM33XX_PM_PER_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x000c)
++
++/* PRM.WKUP_PRM register offsets */
++#define AM33XX_RM_WKUP_RSTCTRL_OFFSET 0x0000
++#define AM33XX_RM_WKUP_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0000)
++#define AM33XX_PM_WKUP_PWRSTCTRL_OFFSET 0x0004
++#define AM33XX_PM_WKUP_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0004)
++#define AM33XX_PM_WKUP_PWRSTST_OFFSET 0x0008
++#define AM33XX_PM_WKUP_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0008)
++#define AM33XX_RM_WKUP_RSTST_OFFSET 0x000c
++#define AM33XX_RM_WKUP_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x000c)
++
++/* PRM.MPU_PRM register offsets */
++#define AM33XX_PM_MPU_PWRSTCTRL_OFFSET 0x0000
++#define AM33XX_PM_MPU_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0000)
++#define AM33XX_PM_MPU_PWRSTST_OFFSET 0x0004
++#define AM33XX_PM_MPU_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0004)
++#define AM33XX_RM_MPU_RSTST_OFFSET 0x0008
++#define AM33XX_RM_MPU_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0008)
++
++/* PRM.DEVICE_PRM register offsets */
++#define AM33XX_PRM_RSTCTRL_OFFSET 0x0000
++#define AM33XX_PRM_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0000)
++#define AM33XX_PRM_RSTTIME_OFFSET 0x0004
++#define AM33XX_PRM_RSTTIME AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0004)
++#define AM33XX_PRM_RSTST_OFFSET 0x0008
++#define AM33XX_PRM_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0008)
++#define AM33XX_PRM_SRAM_COUNT_OFFSET 0x000c
++#define AM33XX_PRM_SRAM_COUNT AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x000c)
++#define AM33XX_PRM_LDO_SRAM_CORE_SETUP_OFFSET 0x0010
++#define AM33XX_PRM_LDO_SRAM_CORE_SETUP AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0010)
++#define AM33XX_PRM_LDO_SRAM_CORE_CTRL_OFFSET 0x0014
++#define AM33XX_PRM_LDO_SRAM_CORE_CTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0014)
++#define AM33XX_PRM_LDO_SRAM_MPU_SETUP_OFFSET 0x0018
++#define AM33XX_PRM_LDO_SRAM_MPU_SETUP AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0018)
++#define AM33XX_PRM_LDO_SRAM_MPU_CTRL_OFFSET 0x001c
++#define AM33XX_PRM_LDO_SRAM_MPU_CTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x001c)
++
++/* PRM.RTC_PRM register offsets */
++#define AM33XX_PM_RTC_PWRSTCTRL_OFFSET 0x0000
++#define AM33XX_PM_RTC_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_RTC_MOD, 0x0000)
++#define AM33XX_PM_RTC_PWRSTST_OFFSET 0x0004
++#define AM33XX_PM_RTC_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_RTC_MOD, 0x0004)
++
++/* PRM.GFX_PRM register offsets */
++#define AM33XX_PM_GFX_PWRSTCTRL_OFFSET 0x0000
++#define AM33XX_PM_GFX_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0000)
++#define AM33XX_RM_GFX_RSTCTRL_OFFSET 0x0004
++#define AM33XX_RM_GFX_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0004)
++#define AM33XX_PM_GFX_PWRSTST_OFFSET 0x0010
++#define AM33XX_PM_GFX_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0010)
++#define AM33XX_RM_GFX_RSTST_OFFSET 0x0014
++#define AM33XX_RM_GFX_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0014)
++
++/* PRM.CEFUSE_PRM register offsets */
++#define AM33XX_PM_CEFUSE_PWRSTCTRL_OFFSET 0x0000
++#define AM33XX_PM_CEFUSE_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0000)
++#define AM33XX_PM_CEFUSE_PWRSTST_OFFSET 0x0004
++#define AM33XX_PM_CEFUSE_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004)
++#endif
+diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
+index 495a31a..33dd655 100644
+--- a/arch/arm/mach-omap2/prm44xx.c
++++ b/arch/arm/mach-omap2/prm44xx.c
+@@ -17,7 +17,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/cpu.h>
+ #include <plat/prcm.h>
+
+@@ -27,6 +27,24 @@
+ #include "prcm44xx.h"
+ #include "prminst44xx.h"
+
++static const struct omap_prcm_irq omap4_prcm_irqs[] = {
++ OMAP_PRCM_IRQ("wkup", 0, 0),
++ OMAP_PRCM_IRQ("io", 9, 1),
++};
++
++static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
++ .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
++ .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET,
++ .nr_regs = 2,
++ .irqs = omap4_prcm_irqs,
++ .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
++ .irq = OMAP44XX_IRQ_PRCM,
++ .read_pending_irqs = &omap44xx_prm_read_pending_irqs,
++ .ocp_barrier = &omap44xx_prm_ocp_barrier,
++ .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
++ .restore_irqen = &omap44xx_prm_restore_irqen,
++};
++
+ /* PRM low-level functions */
+
+ /* Read a register in a CM/PRM instance in the PRM module */
+@@ -121,3 +139,101 @@ u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
+ OMAP4430_PRM_DEVICE_INST,
+ offset);
+ }
++
++static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs)
++{
++ u32 mask, st;
++
++ /* XXX read mask from RAM? */
++ mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs);
++ st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs);
++
++ return mask & st;
++}
++
++/**
++ * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
++ * @events: ptr to two consecutive u32s, preallocated by caller
++ *
++ * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM
++ * MPU IRQs, and store the result into the two u32s pointed to by @events.
++ * No return value.
++ */
++void omap44xx_prm_read_pending_irqs(unsigned long *events)
++{
++ events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET,
++ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
++
++ events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET,
++ OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
++}
++
++/**
++ * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
++ *
++ * Force any buffered writes to the PRM IP block to complete. Needed
++ * by the PRM IRQ handler, which reads and writes directly to the IP
++ * block, to avoid race conditions after acknowledging or clearing IRQ
++ * bits. No return value.
++ */
++void omap44xx_prm_ocp_barrier(void)
++{
++ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
++ OMAP4_REVISION_PRM_OFFSET);
++}
++
++/**
++ * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs
++ * @saved_mask: ptr to a u32 array to save IRQENABLE bits
++ *
++ * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to
++ * @saved_mask. @saved_mask must be allocated by the caller.
++ * Intended to be used in the PRM interrupt handler suspend callback.
++ * The OCP barrier is needed to ensure the write to disable PRM
++ * interrupts reaches the PRM before returning; otherwise, spurious
++ * interrupts might occur. No return value.
++ */
++void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
++{
++ saved_mask[0] =
++ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
++ saved_mask[1] =
++ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
++
++ omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
++ omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
++
++ /* OCP barrier */
++ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
++ OMAP4_REVISION_PRM_OFFSET);
++}
++
++/**
++ * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args
++ * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
++ *
++ * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from
++ * @saved_mask. Intended to be used in the PRM interrupt handler resume
++ * callback to restore values saved by omap44xx_prm_save_and_clear_irqen().
++ * No OCP barrier should be needed here; any pending PRM interrupts will fire
++ * once the writes reach the PRM. No return value.
++ */
++void omap44xx_prm_restore_irqen(u32 *saved_mask)
++{
++ omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
++ omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST,
++ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
++}
++
++static int __init omap4xxx_prcm_init(void)
++{
++ if (cpu_is_omap44xx())
++ return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
++ return 0;
++}
++subsys_initcall(omap4xxx_prcm_init);
+diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
+index 3d66ccd..7978092 100644
+--- a/arch/arm/mach-omap2/prm44xx.h
++++ b/arch/arm/mach-omap2/prm44xx.h
+@@ -1,7 +1,7 @@
+ /*
+ * OMAP44xx PRM instance offset macros
+ *
+- * Copyright (C) 2009-2010 Texas Instruments, Inc.
++ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+@@ -763,6 +763,12 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
+ extern void omap4_prm_vcvp_write(u32 val, u8 offset);
+ extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
+
++/* PRM interrupt-related functions */
++extern void omap44xx_prm_read_pending_irqs(unsigned long *events);
++extern void omap44xx_prm_ocp_barrier(void);
++extern void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask);
++extern void omap44xx_prm_restore_irqen(u32 *saved_mask);
++
+ # endif
+
+ #endif
+diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
+new file mode 100644
+index 0000000..860118a
+--- /dev/null
++++ b/arch/arm/mach-omap2/prm_common.c
+@@ -0,0 +1,320 @@
++/*
++ * OMAP2+ common Power & Reset Management (PRM) IP block functions
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Tero Kristo <t-kristo@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ *
++ * For historical purposes, the API used to configure the PRM
++ * interrupt handler refers to it as the "PRCM interrupt." The
++ * underlying registers are located in the PRM on OMAP3/4.
++ *
++ * XXX This code should eventually be moved to a PRM driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++
++#include <mach/system.h>
++#include <plat/common.h>
++#include <plat/prcm.h>
++#include <plat/irqs.h>
++
++#include "prm2xxx_3xxx.h"
++#include "prm44xx.h"
++
++/*
++ * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
++ * XXX this is technically not needed, since
++ * omap_prcm_register_chain_handler() could allocate this based on the
++ * actual amount of memory needed for the SoC
++ */
++#define OMAP_PRCM_MAX_NR_PENDING_REG 2
++
++/*
++ * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
++ * by the PRCM interrupt handler code. There will be one 'chip' per
++ * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have
++ * one "chip" and OMAP4 will have two.)
++ */
++static struct irq_chip_generic **prcm_irq_chips;
++
++/*
++ * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
++ * is currently running on. Defined and passed by initialization code
++ * that calls omap_prcm_register_chain_handler().
++ */
++static struct omap_prcm_irq_setup *prcm_irq_setup;
++
++/* Private functions */
++
++/*
++ * Move priority events from events to priority_events array
++ */
++static void omap_prcm_events_filter_priority(unsigned long *events,
++ unsigned long *priority_events)
++{
++ int i;
++
++ for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
++ priority_events[i] =
++ events[i] & prcm_irq_setup->priority_mask[i];
++ events[i] ^= priority_events[i];
++ }
++}
++
++/*
++ * PRCM Interrupt Handler
++ *
++ * This is a common handler for the OMAP PRCM interrupts. Pending
++ * interrupts are detected by a call to prcm_pending_events and
++ * dispatched accordingly. Clearing of the wakeup events should be
++ * done by the SoC specific individual handlers.
++ */
++static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
++{
++ unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
++ unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++ unsigned int virtirq;
++ int nr_irqs = prcm_irq_setup->nr_regs * 32;
++
++ /*
++ * If we are suspended, mask all interrupts from PRCM level,
++ * this does not ack them, and they will be pending until we
++ * re-enable the interrupts, at which point the
++ * omap_prcm_irq_handler will be executed again. The
++ * _save_and_clear_irqen() function must ensure that the PRM
++ * write to disable all IRQs has reached the PRM before
++ * returning, or spurious PRCM interrupts may occur during
++ * suspend.
++ */
++ if (prcm_irq_setup->suspended) {
++ prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
++ prcm_irq_setup->suspend_save_flag = true;
++ }
++
++ /*
++ * Loop until all pending irqs are handled, since
++ * generic_handle_irq() can cause new irqs to come
++ */
++ while (!prcm_irq_setup->suspended) {
++ prcm_irq_setup->read_pending_irqs(pending);
++
++ /* No bit set, then all IRQs are handled */
++ if (find_first_bit(pending, nr_irqs) >= nr_irqs)
++ break;
++
++ omap_prcm_events_filter_priority(pending, priority_pending);
++
++ /*
++ * Loop on all currently pending irqs so that new irqs
++ * cannot starve previously pending irqs
++ */
++
++ /* Serve priority events first */
++ for_each_set_bit(virtirq, priority_pending, nr_irqs)
++ generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
++
++ /* Serve normal events next */
++ for_each_set_bit(virtirq, pending, nr_irqs)
++ generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
++ }
++ if (chip->irq_ack)
++ chip->irq_ack(&desc->irq_data);
++ if (chip->irq_eoi)
++ chip->irq_eoi(&desc->irq_data);
++ chip->irq_unmask(&desc->irq_data);
++
++ prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
++}
++
++/* Public functions */
++
++/**
++ * omap_prcm_event_to_irq - given a PRCM event name, returns the
++ * corresponding IRQ on which the handler should be registered
++ * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
++ *
++ * Returns the Linux internal IRQ ID corresponding to @name upon success,
++ * or -ENOENT upon failure.
++ */
++int omap_prcm_event_to_irq(const char *name)
++{
++ int i;
++
++ if (!prcm_irq_setup || !name)
++ return -ENOENT;
++
++ for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
++ if (!strcmp(prcm_irq_setup->irqs[i].name, name))
++ return prcm_irq_setup->base_irq +
++ prcm_irq_setup->irqs[i].offset;
++
++ return -ENOENT;
++}
++
++/**
++ * omap_prcm_irq_cleanup - reverses memory allocated and other steps
++ * done by omap_prcm_register_chain_handler()
++ *
++ * No return value.
++ */
++void omap_prcm_irq_cleanup(void)
++{
++ int i;
++
++ if (!prcm_irq_setup) {
++ pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
++ return;
++ }
++
++ if (prcm_irq_chips) {
++ for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
++ if (prcm_irq_chips[i])
++ irq_remove_generic_chip(prcm_irq_chips[i],
++ 0xffffffff, 0, 0);
++ prcm_irq_chips[i] = NULL;
++ }
++ kfree(prcm_irq_chips);
++ prcm_irq_chips = NULL;
++ }
++
++ kfree(prcm_irq_setup->saved_mask);
++ prcm_irq_setup->saved_mask = NULL;
++
++ kfree(prcm_irq_setup->priority_mask);
++ prcm_irq_setup->priority_mask = NULL;
++
++ irq_set_chained_handler(prcm_irq_setup->irq, NULL);
++
++ if (prcm_irq_setup->base_irq > 0)
++ irq_free_descs(prcm_irq_setup->base_irq,
++ prcm_irq_setup->nr_regs * 32);
++ prcm_irq_setup->base_irq = 0;
++}
++
++void omap_prcm_irq_prepare(void)
++{
++ prcm_irq_setup->suspended = true;
++}
++
++void omap_prcm_irq_complete(void)
++{
++ prcm_irq_setup->suspended = false;
++
++ /* If we have not saved the masks, do not attempt to restore */
++ if (!prcm_irq_setup->suspend_save_flag)
++ return;
++
++ prcm_irq_setup->suspend_save_flag = false;
++
++ /*
++ * Re-enable all masked PRCM irq sources, this causes the PRCM
++ * interrupt to fire immediately if the events were masked
++ * previously in the chain handler
++ */
++ prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
++}
++
++/**
++ * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
++ * handler based on provided parameters
++ * @irq_setup: hardware data about the underlying PRM/PRCM
++ *
++ * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up
++ * one generic IRQ chip per PRM interrupt status/enable register pair.
++ * Returns 0 upon success, -EINVAL if called twice or if invalid
++ * arguments are passed, or -ENOMEM on any other error.
++ */
++int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
++{
++ int nr_regs = irq_setup->nr_regs;
++ u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
++ int offset, i;
++ struct irq_chip_generic *gc;
++ struct irq_chip_type *ct;
++
++ if (!irq_setup)
++ return -EINVAL;
++
++ if (prcm_irq_setup) {
++ pr_err("PRCM: already initialized; won't reinitialize\n");
++ return -EINVAL;
++ }
++
++ if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
++ pr_err("PRCM: nr_regs too large\n");
++ return -EINVAL;
++ }
++
++ prcm_irq_setup = irq_setup;
++
++ prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
++ prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
++ prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
++ GFP_KERNEL);
++
++ if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
++ !prcm_irq_setup->priority_mask) {
++ pr_err("PRCM: kzalloc failed\n");
++ goto err;
++ }
++
++ memset(mask, 0, sizeof(mask));
++
++ for (i = 0; i < irq_setup->nr_irqs; i++) {
++ offset = irq_setup->irqs[i].offset;
++ mask[offset >> 5] |= 1 << (offset & 0x1f);
++ if (irq_setup->irqs[i].priority)
++ irq_setup->priority_mask[offset >> 5] |=
++ 1 << (offset & 0x1f);
++ }
++
++ irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
++
++ irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
++ 0);
++
++ if (irq_setup->base_irq < 0) {
++ pr_err("PRCM: failed to allocate irq descs: %d\n",
++ irq_setup->base_irq);
++ goto err;
++ }
++
++ for (i = 0; i <= irq_setup->nr_regs; i++) {
++ gc = irq_alloc_generic_chip("PRCM", 1,
++ irq_setup->base_irq + i * 32, prm_base,
++ handle_level_irq);
++
++ if (!gc) {
++ pr_err("PRCM: failed to allocate generic chip\n");
++ goto err;
++ }
++ ct = gc->chip_types;
++ ct->chip.irq_ack = irq_gc_ack_set_bit;
++ ct->chip.irq_mask = irq_gc_mask_clr_bit;
++ ct->chip.irq_unmask = irq_gc_mask_set_bit;
++
++ ct->regs.ack = irq_setup->ack + i * 4;
++ ct->regs.mask = irq_setup->mask + i * 4;
++
++ irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
++ prcm_irq_chips[i] = gc;
++ }
++
++ return 0;
++
++err:
++ omap_prcm_irq_cleanup();
++ return -ENOMEM;
++}
+diff --git a/arch/arm/mach-omap2/prminst33xx.h b/arch/arm/mach-omap2/prminst33xx.h
+new file mode 100644
+index 0000000..c9a2ba5
+--- /dev/null
++++ b/arch/arm/mach-omap2/prminst33xx.h
+@@ -0,0 +1,29 @@
++/*
++ * AM33XX Power/Reset Management (PRM) function prototypes
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ASM_MACH_OMAP2_PRMINST33XX_H
++#define __ARCH_ASM_MACH_OMAP2_PRMINST33XX_H
++
++extern u32 am33xx_prminst_read_inst_reg(s16 inst, u16 idx);
++extern void am33xx_prminst_write_inst_reg(u32 val, s16 inst, u16 idx);
++extern u32 am33xx_prminst_rmw_inst_reg_bits(u32 mask, u32 bits,
++ s16 inst, s16 idx);
++extern u32 am33xx_prminst_is_hardreset_asserted(s16 domain, s16 idx, u32 mask);
++extern int am33xx_prminst_assert_hardreset(s16 prm_mod, u8 shift);
++extern int am33xx_prminst_deassert_hardreset(s16 prm_mod, u8 rst_shift,
++ u8 st_shift);
++extern void am33xx_prm_global_warm_sw_reset(void);
++
++#endif
+diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
+index 3a7bab1..5fd13b4 100644
+--- a/arch/arm/mach-omap2/prminst44xx.c
++++ b/arch/arm/mach-omap2/prminst44xx.c
+@@ -16,40 +16,48 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm44xx.h"
++#include "prm33xx.h"
+ #include "prminst44xx.h"
+ #include "prm-regbits-44xx.h"
+ #include "prcm44xx.h"
+ #include "prcm_mpu44xx.h"
+
+-static u32 _prm_bases[OMAP4_MAX_PRCM_PARTITIONS] = {
++static u32 **_prm_bases;
++static u32 max_prm_partitions;
++
++static u32 *omap44xx_prm_bases[] = {
+ [OMAP4430_INVALID_PRCM_PARTITION] = 0,
+- [OMAP4430_PRM_PARTITION] = OMAP4430_PRM_BASE,
++ [OMAP4430_PRM_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE),
+ [OMAP4430_CM1_PARTITION] = 0,
+ [OMAP4430_CM2_PARTITION] = 0,
+ [OMAP4430_SCRM_PARTITION] = 0,
+- [OMAP4430_PRCM_MPU_PARTITION] = OMAP4430_PRCM_MPU_BASE,
++ [OMAP4430_PRCM_MPU_PARTITION] = OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE),
++};
++
++static u32 *am33xx_prm_bases[] = {
++ [OMAP4430_INVALID_PRCM_PARTITION] = 0,
++ [AM33XX_PRM_PARTITION] = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRM_BASE),
+ };
+
+ /* Read a register in a PRM instance */
+ u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx)
+ {
+- BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
++ BUG_ON(part >= max_prm_partitions ||
+ part == OMAP4430_INVALID_PRCM_PARTITION ||
+ !_prm_bases[part]);
+- return __raw_readl(OMAP2_L4_IO_ADDRESS(_prm_bases[part] + inst +
+- idx));
++ return __raw_readl(_prm_bases[part] + ((inst + idx)/sizeof(u32)));
+ }
+
+ /* Write into a register in a PRM instance */
+ void omap4_prminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
+ {
+- BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
++ BUG_ON(part >= max_prm_partitions ||
+ part == OMAP4430_INVALID_PRCM_PARTITION ||
+ !_prm_bases[part]);
+- __raw_writel(val, OMAP2_L4_IO_ADDRESS(_prm_bases[part] + inst + idx));
++ __raw_writel(val, _prm_bases[part] + ((inst + idx)/sizeof(u32)));
+ }
+
+ /* Read-modify-write a register in PRM. Caller must lock */
+@@ -174,3 +182,14 @@ void omap4_prminst_global_warm_sw_reset(void)
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_RSTCTRL_OFFSET);
+ }
++
++void __init omap44xx_prminst_init(void)
++{
++ if (cpu_is_omap44xx()) {
++ _prm_bases = omap44xx_prm_bases;
++ max_prm_partitions = ARRAY_SIZE(omap44xx_prm_bases);
++ } else if (cpu_is_am33xx()) {
++ _prm_bases = am33xx_prm_bases;
++ max_prm_partitions = ARRAY_SIZE(am33xx_prm_bases);
++ }
++}
+diff --git a/arch/arm/mach-omap2/prminst44xx.h b/arch/arm/mach-omap2/prminst44xx.h
+index 46f2efb..9a44c68 100644
+--- a/arch/arm/mach-omap2/prminst44xx.h
++++ b/arch/arm/mach-omap2/prminst44xx.h
+@@ -29,5 +29,5 @@ extern int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
+ u16 rstctrl_offs);
+ extern int omap4_prminst_deassert_hardreset(u8 shift, u8 part, s16 inst,
+ u16 rstctrl_offs);
+-
++extern void __init omap44xx_prminst_init(void);
+ #endif
+diff --git a/arch/arm/mach-omap2/sdram-nokia.c b/arch/arm/mach-omap2/sdram-nokia.c
+index 14caa22..7479d7e 100644
+--- a/arch/arm/mach-omap2/sdram-nokia.c
++++ b/arch/arm/mach-omap2/sdram-nokia.c
+@@ -1,7 +1,7 @@
+ /*
+ * SDRC register values for Nokia boards
+ *
+- * Copyright (C) 2008, 2010 Nokia Corporation
++ * Copyright (C) 2008, 2010-2011 Nokia Corporation
+ *
+ * Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ *
+@@ -18,7 +18,7 @@
+ #include <linux/io.h>
+
+ #include <plat/io.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/clock.h>
+ #include <plat/sdrc.h>
+
+@@ -107,14 +107,37 @@ static const struct sdram_timings nokia_195dot2mhz_timings[] = {
+ },
+ };
+
++static const struct sdram_timings nokia_200mhz_timings[] = {
++ {
++ .casl = 3,
++ .tDAL = 30000,
++ .tDPL = 15000,
++ .tRRD = 10000,
++ .tRCD = 20000,
++ .tRP = 15000,
++ .tRAS = 40000,
++ .tRC = 55000,
++ .tRFC = 140000,
++ .tXSR = 200000,
++
++ .tREF = 7800,
++
++ .tXP = 2,
++ .tCKE = 4,
++ .tWTR = 2
++ },
++};
++
+ static const struct {
+ long rate;
+ struct sdram_timings const *data;
+ } nokia_timings[] = {
+ { 83000000, nokia_166mhz_timings },
+ { 97600000, nokia_97dot6mhz_timings },
++ { 100000000, nokia_200mhz_timings },
+ { 166000000, nokia_166mhz_timings },
+ { 195200000, nokia_195dot2mhz_timings },
++ { 200000000, nokia_200mhz_timings },
+ };
+ static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1];
+
+diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
+index 8f27828..e3d345f 100644
+--- a/arch/arm/mach-omap2/sdrc.c
++++ b/arch/arm/mach-omap2/sdrc.c
+@@ -23,7 +23,7 @@
+ #include <linux/clk.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/clock.h>
+ #include <plat/sram.h>
+
+diff --git a/arch/arm/mach-omap2/sdrc2xxx.c b/arch/arm/mach-omap2/sdrc2xxx.c
+index ccdb010..791a63c 100644
+--- a/arch/arm/mach-omap2/sdrc2xxx.c
++++ b/arch/arm/mach-omap2/sdrc2xxx.c
+@@ -24,7 +24,7 @@
+ #include <linux/clk.h>
+ #include <linux/io.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/clock.h>
+ #include <plat/sram.h>
+
+diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
+index 9992dbf..bfa8ae3 100644
+--- a/arch/arm/mach-omap2/serial.c
++++ b/arch/arm/mach-omap2/serial.c
+@@ -19,26 +19,21 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+-#include <linux/serial_reg.h>
+ #include <linux/clk.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+-#include <linux/serial_8250.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/console.h>
+
+-#ifdef CONFIG_SERIAL_OMAP
+ #include <plat/omap-serial.h>
+-#endif
+-
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/board.h>
+-#include <plat/clock.h>
+ #include <plat/dma.h>
+ #include <plat/omap_hwmod.h>
+ #include <plat/omap_device.h>
++#include <plat/omap-pm.h>
+
+ #include "prm2xxx_3xxx.h"
+ #include "pm.h"
+@@ -47,603 +42,226 @@
+ #include "control.h"
+ #include "mux.h"
+
+-#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
+-#define UART_OMAP_WER 0x17 /* Wake-up enable register */
+-
+-#define UART_ERRATA_FIFO_FULL_ABORT (0x1 << 0)
+-#define UART_ERRATA_i202_MDR1_ACCESS (0x1 << 1)
+-
+ /*
+- * NOTE: By default the serial timeout is disabled as it causes lost characters
+- * over the serial ports. This means that the UART clocks will stay on until
+- * disabled via sysfs. This also causes that any deeper omap sleep states are
+- * blocked.
++ * NOTE: By default the serial auto_suspend timeout is disabled as it causes
++ * lost characters over the serial ports. This means that the UART clocks will
++ * stay on until power/autosuspend_delay is set for the uart from sysfs.
++ * This also causes that any deeper omap sleep states are blocked.
+ */
+-#define DEFAULT_TIMEOUT 0
++#define DEFAULT_AUTOSUSPEND_DELAY -1
+
+ #define MAX_UART_HWMOD_NAME_LEN 16
+
+ struct omap_uart_state {
+ int num;
+ int can_sleep;
+- struct timer_list timer;
+- u32 timeout;
+-
+- void __iomem *wk_st;
+- void __iomem *wk_en;
+- u32 wk_mask;
+- u32 padconf;
+- u32 dma_enabled;
+-
+- struct clk *ick;
+- struct clk *fck;
+- int clocked;
+-
+- int irq;
+- int regshift;
+- int irqflags;
+- void __iomem *membase;
+- resource_size_t mapbase;
+
+ struct list_head node;
+ struct omap_hwmod *oh;
+ struct platform_device *pdev;
+-
+- u32 errata;
+-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+- int context_valid;
+-
+- /* Registers to be saved/restored for OFF-mode */
+- u16 dll;
+- u16 dlh;
+- u16 ier;
+- u16 sysc;
+- u16 scr;
+- u16 wer;
+- u16 mcr;
+-#endif
+ };
+
+ static LIST_HEAD(uart_list);
+ static u8 num_uarts;
++static u8 console_uart_id = -1;
++static u8 no_console_suspend;
++static u8 uart_debug;
++
++#define DEFAULT_RXDMA_POLLRATE 1 /* RX DMA polling rate (us) */
++#define DEFAULT_RXDMA_BUFSIZE 4096 /* RX DMA buffer size */
++#define DEFAULT_RXDMA_TIMEOUT (3 * HZ)/* RX DMA timeout (jiffies) */
++
++static struct omap_uart_port_info omap_serial_default_info[] __initdata = {
++ {
++ .dma_enabled = false,
++ .dma_rx_buf_size = DEFAULT_RXDMA_BUFSIZE,
++ .dma_rx_poll_rate = DEFAULT_RXDMA_POLLRATE,
++ .dma_rx_timeout = DEFAULT_RXDMA_TIMEOUT,
++ .autosuspend_timeout = DEFAULT_AUTOSUSPEND_DELAY,
++ },
++};
+
+-static inline unsigned int __serial_read_reg(struct uart_port *up,
+- int offset)
+-{
+- offset <<= up->regshift;
+- return (unsigned int)__raw_readb(up->membase + offset);
+-}
+-
+-static inline unsigned int serial_read_reg(struct omap_uart_state *uart,
+- int offset)
++#ifdef CONFIG_PM
++static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
+ {
+- offset <<= uart->regshift;
+- return (unsigned int)__raw_readb(uart->membase + offset);
+-}
++ struct omap_device *od = to_omap_device(pdev);
+
+-static inline void __serial_write_reg(struct uart_port *up, int offset,
+- int value)
+-{
+- offset <<= up->regshift;
+- __raw_writeb(value, up->membase + offset);
+-}
++ if (!od)
++ return;
+
+-static inline void serial_write_reg(struct omap_uart_state *uart, int offset,
+- int value)
+-{
+- offset <<= uart->regshift;
+- __raw_writeb(value, uart->membase + offset);
++ if (enable)
++ omap_hwmod_enable_wakeup(od->hwmods[0]);
++ else
++ omap_hwmod_disable_wakeup(od->hwmods[0]);
+ }
+
+ /*
+- * Internal UARTs need to be initialized for the 8250 autoconfig to work
+- * properly. Note that the TX watermark initialization may not be needed
+- * once the 8250.c watermark handling code is merged.
++ * Errata i291: [UART]:Cannot Acknowledge Idle Requests
++ * in Smartidle Mode When Configured for DMA Operations.
++ * WA: configure uart in force idle mode.
+ */
+-
+-static inline void __init omap_uart_reset(struct omap_uart_state *uart)
++static void omap_uart_set_noidle(struct platform_device *pdev)
+ {
+- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+- serial_write_reg(uart, UART_OMAP_SCR, 0x08);
+- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+-}
+-
+-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
++ struct omap_device *od = to_omap_device(pdev);
+
+-/*
+- * Work Around for Errata i202 (3430 - 1.12, 3630 - 1.6)
+- * The access to uart register after MDR1 Access
+- * causes UART to corrupt data.
+- *
+- * Need a delay =
+- * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+- * give 10 times as much
+- */
+-static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val,
+- u8 fcr_val)
+-{
+- u8 timeout = 255;
+-
+- serial_write_reg(uart, UART_OMAP_MDR1, mdr1_val);
+- udelay(2);
+- serial_write_reg(uart, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT |
+- UART_FCR_CLEAR_RCVR);
+- /*
+- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+- * TX_FIFO_E bit is 1.
+- */
+- while (UART_LSR_THRE != (serial_read_reg(uart, UART_LSR) &
+- (UART_LSR_THRE | UART_LSR_DR))) {
+- timeout--;
+- if (!timeout) {
+- /* Should *never* happen. we warn and carry on */
+- dev_crit(&uart->pdev->dev, "Errata i202: timedout %x\n",
+- serial_read_reg(uart, UART_LSR));
+- break;
+- }
+- udelay(1);
+- }
++ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
+ }
+
+-static void omap_uart_save_context(struct omap_uart_state *uart)
++static void omap_uart_set_forceidle(struct platform_device *pdev)
+ {
+- u16 lcr = 0;
++ struct omap_device *od = to_omap_device(pdev);
+
+- if (!enable_off_mode)
+- return;
+-
+- lcr = serial_read_reg(uart, UART_LCR);
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
+- uart->dll = serial_read_reg(uart, UART_DLL);
+- uart->dlh = serial_read_reg(uart, UART_DLM);
+- serial_write_reg(uart, UART_LCR, lcr);
+- uart->ier = serial_read_reg(uart, UART_IER);
+- uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
+- uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
+- uart->wer = serial_read_reg(uart, UART_OMAP_WER);
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
+- uart->mcr = serial_read_reg(uart, UART_MCR);
+- serial_write_reg(uart, UART_LCR, lcr);
+-
+- uart->context_valid = 1;
++ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE);
+ }
+
+-static void omap_uart_restore_context(struct omap_uart_state *uart)
+-{
+- u16 efr = 0;
+-
+- if (!enable_off_mode)
+- return;
+-
+- if (!uart->context_valid)
+- return;
+-
+- uart->context_valid = 0;
+-
+- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
+- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0);
+- else
+- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+-
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
+- efr = serial_read_reg(uart, UART_EFR);
+- serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
+- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
+- serial_write_reg(uart, UART_IER, 0x0);
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
+- serial_write_reg(uart, UART_DLL, uart->dll);
+- serial_write_reg(uart, UART_DLM, uart->dlh);
+- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
+- serial_write_reg(uart, UART_IER, uart->ier);
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
+- serial_write_reg(uart, UART_MCR, uart->mcr);
+- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
+- serial_write_reg(uart, UART_EFR, efr);
+- serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
+- serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
+- serial_write_reg(uart, UART_OMAP_WER, uart->wer);
+- serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
+-
+- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
+- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1);
+- else
+- /* UART 16x mode */
+- serial_write_reg(uart, UART_OMAP_MDR1,
+- UART_OMAP_MDR1_16X_MODE);
+-}
+ #else
+-static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
+-static inline void omap_uart_restore_context(struct omap_uart_state *uart) {}
+-#endif /* CONFIG_PM && CONFIG_ARCH_OMAP3 */
+-
+-static inline void omap_uart_enable_clocks(struct omap_uart_state *uart)
+-{
+- if (uart->clocked)
+- return;
+-
+- omap_device_enable(uart->pdev);
+- uart->clocked = 1;
+- omap_uart_restore_context(uart);
+-}
+-
+-#ifdef CONFIG_PM
+-
+-static inline void omap_uart_disable_clocks(struct omap_uart_state *uart)
+-{
+- if (!uart->clocked)
+- return;
+-
+- omap_uart_save_context(uart);
+- uart->clocked = 0;
+- omap_device_idle(uart->pdev);
+-}
+-
+-static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
+-{
+- /* Set wake-enable bit */
+- if (uart->wk_en && uart->wk_mask) {
+- u32 v = __raw_readl(uart->wk_en);
+- v |= uart->wk_mask;
+- __raw_writel(v, uart->wk_en);
+- }
+-
+- /* Ensure IOPAD wake-enables are set */
+- if (cpu_is_omap34xx() && uart->padconf) {
+- u16 v = omap_ctrl_readw(uart->padconf);
+- v |= OMAP3_PADCONF_WAKEUPENABLE0;
+- omap_ctrl_writew(v, uart->padconf);
+- }
+-}
+-
+-static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
+-{
+- /* Clear wake-enable bit */
+- if (uart->wk_en && uart->wk_mask) {
+- u32 v = __raw_readl(uart->wk_en);
+- v &= ~uart->wk_mask;
+- __raw_writel(v, uart->wk_en);
+- }
+-
+- /* Ensure IOPAD wake-enables are cleared */
+- if (cpu_is_omap34xx() && uart->padconf) {
+- u16 v = omap_ctrl_readw(uart->padconf);
+- v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
+- omap_ctrl_writew(v, uart->padconf);
+- }
+-}
+-
+-static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
+- int enable)
+-{
+- u8 idlemode;
+-
+- if (enable) {
+- /**
+- * Errata 2.15: [UART]:Cannot Acknowledge Idle Requests
+- * in Smartidle Mode When Configured for DMA Operations.
+- */
+- if (uart->dma_enabled)
+- idlemode = HWMOD_IDLEMODE_FORCE;
+- else
+- idlemode = HWMOD_IDLEMODE_SMART;
+- } else {
+- idlemode = HWMOD_IDLEMODE_NO;
+- }
+-
+- omap_hwmod_set_slave_idlemode(uart->oh, idlemode);
+-}
+-
+-static void omap_uart_block_sleep(struct omap_uart_state *uart)
+-{
+- omap_uart_enable_clocks(uart);
+-
+- omap_uart_smart_idle_enable(uart, 0);
+- uart->can_sleep = 0;
+- if (uart->timeout)
+- mod_timer(&uart->timer, jiffies + uart->timeout);
+- else
+- del_timer(&uart->timer);
+-}
+-
+-static void omap_uart_allow_sleep(struct omap_uart_state *uart)
+-{
+- if (device_may_wakeup(&uart->pdev->dev))
+- omap_uart_enable_wakeup(uart);
+- else
+- omap_uart_disable_wakeup(uart);
+-
+- if (!uart->clocked)
+- return;
+-
+- omap_uart_smart_idle_enable(uart, 1);
+- uart->can_sleep = 1;
+- del_timer(&uart->timer);
+-}
+-
+-static void omap_uart_idle_timer(unsigned long data)
+-{
+- struct omap_uart_state *uart = (struct omap_uart_state *)data;
+-
+- omap_uart_allow_sleep(uart);
+-}
+-
+-void omap_uart_prepare_idle(int num)
+-{
+- struct omap_uart_state *uart;
+-
+- list_for_each_entry(uart, &uart_list, node) {
+- if (num == uart->num && uart->can_sleep) {
+- omap_uart_disable_clocks(uart);
+- return;
+- }
+- }
+-}
+-
+-void omap_uart_resume_idle(int num)
+-{
+- struct omap_uart_state *uart;
+-
+- list_for_each_entry(uart, &uart_list, node) {
+- if (num == uart->num && uart->can_sleep) {
+- omap_uart_enable_clocks(uart);
+-
+- /* Check for IO pad wakeup */
+- if (cpu_is_omap34xx() && uart->padconf) {
+- u16 p = omap_ctrl_readw(uart->padconf);
+-
+- if (p & OMAP3_PADCONF_WAKEUPEVENT0)
+- omap_uart_block_sleep(uart);
+- }
+-
+- /* Check for normal UART wakeup */
+- if (__raw_readl(uart->wk_st) & uart->wk_mask)
+- omap_uart_block_sleep(uart);
+- return;
+- }
+- }
+-}
+-
+-void omap_uart_prepare_suspend(void)
+-{
+- struct omap_uart_state *uart;
+-
+- list_for_each_entry(uart, &uart_list, node) {
+- omap_uart_allow_sleep(uart);
+- }
+-}
+-
+-int omap_uart_can_sleep(void)
+-{
+- struct omap_uart_state *uart;
+- int can_sleep = 1;
+-
+- list_for_each_entry(uart, &uart_list, node) {
+- if (!uart->clocked)
+- continue;
+-
+- if (!uart->can_sleep) {
+- can_sleep = 0;
+- continue;
+- }
+-
+- /* This UART can now safely sleep. */
+- omap_uart_allow_sleep(uart);
+- }
+-
+- return can_sleep;
+-}
++static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
++{}
++static void omap_uart_set_noidle(struct platform_device *pdev) {}
++static void omap_uart_set_forceidle(struct platform_device *pdev) {}
++#endif /* CONFIG_PM */
+
+-/**
+- * omap_uart_interrupt()
+- *
+- * This handler is used only to detect that *any* UART interrupt has
+- * occurred. It does _nothing_ to handle the interrupt. Rather,
+- * any UART interrupt will trigger the inactivity timer so the
+- * UART will not idle or sleep for its timeout period.
+- *
+- **/
+-/* static int first_interrupt; */
+-static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
+-{
+- struct omap_uart_state *uart = dev_id;
++#ifdef CONFIG_OMAP_MUX
++static struct omap_device_pad default_uart1_pads[] __initdata = {
++ {
++ .name = "uart1_cts.uart1_cts",
++ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart1_rts.uart1_rts",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart1_tx.uart1_tx",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart1_rx.uart1_rx",
++ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
++ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ },
++};
+
+- omap_uart_block_sleep(uart);
++static struct omap_device_pad default_uart2_pads[] __initdata = {
++ {
++ .name = "uart2_cts.uart2_cts",
++ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart2_rts.uart2_rts",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart2_tx.uart2_tx",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart2_rx.uart2_rx",
++ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
++ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ },
++};
+
+- return IRQ_NONE;
+-}
++static struct omap_device_pad default_uart3_pads[] __initdata = {
++ {
++ .name = "uart3_cts_rctx.uart3_cts_rctx",
++ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart3_rts_sd.uart3_rts_sd",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart3_tx_irtx.uart3_tx_irtx",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart3_rx_irrx.uart3_rx_irrx",
++ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
++ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
++ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
++ },
++};
+
+-static void omap_uart_idle_init(struct omap_uart_state *uart)
+-{
+- int ret;
+-
+- uart->can_sleep = 0;
+- uart->timeout = DEFAULT_TIMEOUT;
+- setup_timer(&uart->timer, omap_uart_idle_timer,
+- (unsigned long) uart);
+- if (uart->timeout)
+- mod_timer(&uart->timer, jiffies + uart->timeout);
+- omap_uart_smart_idle_enable(uart, 0);
+-
+- if (cpu_is_omap34xx() && !cpu_is_ti816x()) {
+- u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
+- u32 wk_mask = 0;
+- u32 padconf = 0;
+-
+- /* XXX These PRM accesses do not belong here */
+- uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
+- uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1);
+- switch (uart->num) {
+- case 0:
+- wk_mask = OMAP3430_ST_UART1_MASK;
+- padconf = 0x182;
+- break;
+- case 1:
+- wk_mask = OMAP3430_ST_UART2_MASK;
+- padconf = 0x17a;
+- break;
+- case 2:
+- wk_mask = OMAP3430_ST_UART3_MASK;
+- padconf = 0x19e;
+- break;
+- case 3:
+- wk_mask = OMAP3630_ST_UART4_MASK;
+- padconf = 0x0d2;
+- break;
+- }
+- uart->wk_mask = wk_mask;
+- uart->padconf = padconf;
+- } else if (cpu_is_omap24xx()) {
+- u32 wk_mask = 0;
+- u32 wk_en = PM_WKEN1, wk_st = PM_WKST1;
+-
+- switch (uart->num) {
+- case 0:
+- wk_mask = OMAP24XX_ST_UART1_MASK;
+- break;
+- case 1:
+- wk_mask = OMAP24XX_ST_UART2_MASK;
+- break;
+- case 2:
+- wk_en = OMAP24XX_PM_WKEN2;
+- wk_st = OMAP24XX_PM_WKST2;
+- wk_mask = OMAP24XX_ST_UART3_MASK;
+- break;
+- }
+- uart->wk_mask = wk_mask;
+- if (cpu_is_omap2430()) {
+- uart->wk_en = OMAP2430_PRM_REGADDR(CORE_MOD, wk_en);
+- uart->wk_st = OMAP2430_PRM_REGADDR(CORE_MOD, wk_st);
+- } else if (cpu_is_omap2420()) {
+- uart->wk_en = OMAP2420_PRM_REGADDR(CORE_MOD, wk_en);
+- uart->wk_st = OMAP2420_PRM_REGADDR(CORE_MOD, wk_st);
+- }
+- } else {
+- uart->wk_en = NULL;
+- uart->wk_st = NULL;
+- uart->wk_mask = 0;
+- uart->padconf = 0;
+- }
++static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {
++ {
++ .name = "gpmc_wait2.uart4_tx",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "gpmc_wait3.uart4_rx",
++ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
++ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
++ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
++ },
++};
+
+- uart->irqflags |= IRQF_SHARED;
+- ret = request_threaded_irq(uart->irq, NULL, omap_uart_interrupt,
+- IRQF_SHARED, "serial idle", (void *)uart);
+- WARN_ON(ret);
+-}
++static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {
++ {
++ .name = "uart4_tx.uart4_tx",
++ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
++ },
++ {
++ .name = "uart4_rx.uart4_rx",
++ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
++ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
++ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
++ },
++};
+
+-void omap_uart_enable_irqs(int enable)
++static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+ {
+- int ret;
+- struct omap_uart_state *uart;
+-
+- list_for_each_entry(uart, &uart_list, node) {
+- if (enable) {
+- pm_runtime_put_sync(&uart->pdev->dev);
+- ret = request_threaded_irq(uart->irq, NULL,
+- omap_uart_interrupt,
+- IRQF_SHARED,
+- "serial idle",
+- (void *)uart);
+- } else {
+- pm_runtime_get_noresume(&uart->pdev->dev);
+- free_irq(uart->irq, (void *)uart);
++ switch (bdata->id) {
++ case 0:
++ bdata->pads = default_uart1_pads;
++ bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads);
++ break;
++ case 1:
++ bdata->pads = default_uart2_pads;
++ bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads);
++ break;
++ case 2:
++ bdata->pads = default_uart3_pads;
++ bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads);
++ break;
++ case 3:
++ if (cpu_is_omap44xx()) {
++ bdata->pads = default_omap4_uart4_pads;
++ bdata->pads_cnt =
++ ARRAY_SIZE(default_omap4_uart4_pads);
++ } else if (cpu_is_omap3630()) {
++ bdata->pads = default_omap36xx_uart4_pads;
++ bdata->pads_cnt =
++ ARRAY_SIZE(default_omap36xx_uart4_pads);
+ }
++ break;
++ default:
++ break;
+ }
+ }
+-
+-static ssize_t sleep_timeout_show(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- struct omap_device *odev = to_omap_device(pdev);
+- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
+-
+- return sprintf(buf, "%u\n", uart->timeout / HZ);
+-}
+-
+-static ssize_t sleep_timeout_store(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t n)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- struct omap_device *odev = to_omap_device(pdev);
+- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
+- unsigned int value;
+-
+- if (sscanf(buf, "%u", &value) != 1) {
+- dev_err(dev, "sleep_timeout_store: Invalid value\n");
+- return -EINVAL;
+- }
+-
+- uart->timeout = value * HZ;
+- if (uart->timeout)
+- mod_timer(&uart->timer, jiffies + uart->timeout);
+- else
+- /* A zero value means disable timeout feature */
+- omap_uart_block_sleep(uart);
+-
+- return n;
+-}
+-
+-static DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show,
+- sleep_timeout_store);
+-#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
+ #else
+-static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
+-static void omap_uart_block_sleep(struct omap_uart_state *uart)
+-{
+- /* Needed to enable UART clocks when built without CONFIG_PM */
+- omap_uart_enable_clocks(uart);
+-}
+-#define DEV_CREATE_FILE(dev, attr)
+-#endif /* CONFIG_PM */
+-
+-#ifndef CONFIG_SERIAL_OMAP
+-/*
+- * Override the default 8250 read handler: mem_serial_in()
+- * Empty RX fifo read causes an abort on omap3630 and omap4
+- * This function makes sure that an empty rx fifo is not read on these silicons
+- * (OMAP1/2/3430 are not affected)
+- */
+-static unsigned int serial_in_override(struct uart_port *up, int offset)
+-{
+- if (UART_RX == offset) {
+- unsigned int lsr;
+- lsr = __serial_read_reg(up, UART_LSR);
+- if (!(lsr & UART_LSR_DR))
+- return -EPERM;
+- }
+-
+- return __serial_read_reg(up, offset);
+-}
++static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
++#endif
+
+-static void serial_out_override(struct uart_port *up, int offset, int value)
++char *cmdline_find_option(char *str)
+ {
+- unsigned int status, tmout = 10000;
++ extern char *saved_command_line;
+
+- status = __serial_read_reg(up, UART_LSR);
+- while (!(status & UART_LSR_THRE)) {
+- /* Wait up to 10ms for the character(s) to be sent. */
+- if (--tmout == 0)
+- break;
+- udelay(1);
+- status = __serial_read_reg(up, UART_LSR);
+- }
+- __serial_write_reg(up, offset, value);
++ return strstr(saved_command_line, str);
+ }
+-#endif
+
+ static int __init omap_serial_early_init(void)
+ {
+- int i = 0;
+-
+ do {
+ char oh_name[MAX_UART_HWMOD_NAME_LEN];
+ struct omap_hwmod *oh;
+ struct omap_uart_state *uart;
++ char uart_name[MAX_UART_HWMOD_NAME_LEN];
+
+ snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN,
+- "uart%d", i + 1);
++ "uart%d", num_uarts + 1);
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh)
+ break;
+@@ -653,21 +271,35 @@ static int __init omap_serial_early_init(void)
+ return -ENODEV;
+
+ uart->oh = oh;
+- uart->num = i++;
++ uart->num = num_uarts++;
+ list_add_tail(&uart->node, &uart_list);
+- num_uarts++;
+-
+- /*
+- * NOTE: omap_hwmod_setup*() has not yet been called,
+- * so no hwmod functions will work yet.
+- */
+-
+- /*
+- * During UART early init, device need to be probed
+- * to determine SoC specific init before omap_device
+- * is ready. Therefore, don't allow idle here
+- */
+- uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
++ snprintf(uart_name, MAX_UART_HWMOD_NAME_LEN,
++ "%s%d", OMAP_SERIAL_NAME, uart->num);
++
++ if (cmdline_find_option(uart_name)) {
++ console_uart_id = uart->num;
++
++ if (console_loglevel >= 10) {
++ uart_debug = true;
++ pr_info("%s used as console in debug mode"
++ " uart%d clocks will not be"
++ " gated", uart_name, uart->num);
++ }
++
++ if (cmdline_find_option("no_console_suspend"))
++ no_console_suspend = true;
++
++ /*
++ * omap-uart can be used for earlyprintk logs
++ * So if omap-uart is used as console then prevent
++ * uart reset and idle to get logs from omap-uart
++ * until uart console driver is available to take
++ * care for console messages.
++ * Idling or resetting omap-uart while printing logs
++ * early boot logs can stall the boot-up.
++ */
++ oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
++ }
+ } while (1);
+
+ return 0;
+@@ -677,6 +309,7 @@ core_initcall(omap_serial_early_init);
+ /**
+ * omap_serial_init_port() - initialize single serial port
+ * @bdata: port specific board data pointer
++ * @info: platform specific data pointer
+ *
+ * This function initialies serial driver for given port only.
+ * Platforms can call this function instead of omap_serial_init()
+@@ -685,7 +318,8 @@ core_initcall(omap_serial_early_init);
+ * Don't mix calls to omap_serial_init_port() and omap_serial_init(),
+ * use only one of the two.
+ */
+-void __init omap_serial_init_port(struct omap_board_data *bdata)
++void __init omap_serial_init_port(struct omap_board_data *bdata,
++ struct omap_uart_port_info *info)
+ {
+ struct omap_uart_state *uart;
+ struct omap_hwmod *oh;
+@@ -693,15 +327,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
+ void *pdata = NULL;
+ u32 pdata_size = 0;
+ char *name;
+-#ifndef CONFIG_SERIAL_OMAP
+- struct plat_serial8250_port ports[2] = {
+- {},
+- {.flags = 0},
+- };
+- struct plat_serial8250_port *p = &ports[0];
+-#else
+ struct omap_uart_port_info omap_up;
+-#endif
+
+ if (WARN_ON(!bdata))
+ return;
+@@ -713,66 +339,34 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
+ list_for_each_entry(uart, &uart_list, node)
+ if (bdata->id == uart->num)
+ break;
++ if (!info)
++ info = omap_serial_default_info;
+
+ oh = uart->oh;
+- uart->dma_enabled = 0;
+-#ifndef CONFIG_SERIAL_OMAP
+- name = "serial8250";
+-
+- /*
+- * !! 8250 driver does not use standard IORESOURCE* It
+- * has it's own custom pdata that can be taken from
+- * the hwmod resource data. But, this needs to be
+- * done after the build.
+- *
+- * ?? does it have to be done before the register ??
+- * YES, because platform_device_data_add() copies
+- * pdata, it does not use a pointer.
+- */
+- p->flags = UPF_BOOT_AUTOCONF;
+- p->iotype = UPIO_MEM;
+- p->regshift = 2;
+- p->uartclk = OMAP24XX_BASE_BAUD * 16;
+- p->irq = oh->mpu_irqs[0].irq;
+- p->mapbase = oh->slaves[0]->addr->pa_start;
+- p->membase = omap_hwmod_get_mpu_rt_va(oh);
+- p->irqflags = IRQF_SHARED;
+- p->private_data = uart;
+-
+- /*
+- * omap44xx, ti816x: Never read empty UART fifo
+- * omap3xxx: Never read empty UART fifo on UARTs
+- * with IP rev >=0x52
+- */
+- uart->regshift = p->regshift;
+- uart->membase = p->membase;
+- if (cpu_is_omap44xx() || cpu_is_ti816x())
+- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
+- else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF)
+- >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
+- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
+-
+- if (uart->errata & UART_ERRATA_FIFO_FULL_ABORT) {
+- p->serial_in = serial_in_override;
+- p->serial_out = serial_out_override;
+- }
+-
+- pdata = &ports[0];
+- pdata_size = 2 * sizeof(struct plat_serial8250_port);
+-#else
+-
+ name = DRIVER_NAME;
+
+- omap_up.dma_enabled = uart->dma_enabled;
++ omap_up.dma_enabled = info->dma_enabled;
+ omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
+- omap_up.mapbase = oh->slaves[0]->addr->pa_start;
+- omap_up.membase = omap_hwmod_get_mpu_rt_va(oh);
+- omap_up.irqflags = IRQF_SHARED;
+- omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
++ omap_up.flags = UPF_BOOT_AUTOCONF;
++ omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
++ omap_up.set_forceidle = omap_uart_set_forceidle;
++ omap_up.set_noidle = omap_uart_set_noidle;
++ omap_up.enable_wakeup = omap_uart_enable_wakeup;
++ omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
++ omap_up.dma_rx_timeout = info->dma_rx_timeout;
++ omap_up.dma_rx_poll_rate = info->dma_rx_poll_rate;
++ omap_up.autosuspend_timeout = info->autosuspend_timeout;
++
++ /* Enable the MDR1 Errata i202 for OMAP2430/3xxx/44xx */
++ if (!cpu_is_omap2420() && !cpu_is_ti816x())
++ omap_up.errata |= UART_ERRATA_i202_MDR1_ACCESS;
++
++ /* Enable DMA Mode Force Idle Errata i291 for omap34xx/3630 */
++ if ((cpu_is_omap34xx() || cpu_is_omap3630()) && !cpu_is_am33xx())
++ omap_up.errata |= UART_ERRATA_i291_DMA_FORCEIDLE;
+
+ pdata = &omap_up;
+ pdata_size = sizeof(struct omap_uart_port_info);
+-#endif
+
+ if (WARN_ON(!oh))
+ return;
+@@ -782,64 +376,29 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
+ WARN(IS_ERR(pdev), "Could not build omap_device for %s: %s.\n",
+ name, oh->name);
+
+- omap_device_disable_idle_on_suspend(pdev);
++ if ((console_uart_id == bdata->id) && no_console_suspend)
++ omap_device_disable_idle_on_suspend(pdev);
++
+ oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
+
+- uart->irq = oh->mpu_irqs[0].irq;
+- uart->regshift = 2;
+- uart->mapbase = oh->slaves[0]->addr->pa_start;
+- uart->membase = omap_hwmod_get_mpu_rt_va(oh);
+ uart->pdev = pdev;
+
+ oh->dev_attr = uart;
+
+- console_lock(); /* in case the earlycon is on the UART */
+-
+- /*
+- * Because of early UART probing, UART did not get idled
+- * on init. Now that omap_device is ready, ensure full idle
+- * before doing omap_device_enable().
+- */
+- omap_hwmod_idle(uart->oh);
+-
+- omap_device_enable(uart->pdev);
+- omap_uart_idle_init(uart);
+- omap_uart_reset(uart);
+- omap_hwmod_enable_wakeup(uart->oh);
+- omap_device_idle(uart->pdev);
+-
+- /*
+- * Need to block sleep long enough for interrupt driven
+- * driver to start. Console driver is in polling mode
+- * so device needs to be kept enabled while polling driver
+- * is in use.
+- */
+- if (uart->timeout)
+- uart->timeout = (30 * HZ);
+- omap_uart_block_sleep(uart);
+- uart->timeout = DEFAULT_TIMEOUT;
+-
+- console_unlock();
+-
+- if ((cpu_is_omap34xx() && uart->padconf) ||
+- (uart->wk_en && uart->wk_mask)) {
++ if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads)
++ && !uart_debug)
+ device_init_wakeup(&pdev->dev, true);
+- DEV_CREATE_FILE(&pdev->dev, &dev_attr_sleep_timeout);
+- }
+-
+- /* Enable the MDR1 errata for OMAP3 */
+- if (cpu_is_omap34xx() && !cpu_is_ti816x())
+- uart->errata |= UART_ERRATA_i202_MDR1_ACCESS;
+ }
+
+ /**
+- * omap_serial_init() - initialize all supported serial ports
++ * omap_serial_board_init() - initialize all supported serial ports
++ * @info: platform specific data pointer
+ *
+ * Initializes all available UARTs as serial ports. Platforms
+ * can call this function when they want to have default behaviour
+ * for serial ports (e.g initialize them all as serial ports).
+ */
+-void __init omap_serial_init(void)
++void __init omap_serial_board_init(struct omap_uart_port_info *info)
+ {
+ struct omap_uart_state *uart;
+ struct omap_board_data bdata;
+@@ -849,7 +408,26 @@ void __init omap_serial_init(void)
+ bdata.flags = 0;
+ bdata.pads = NULL;
+ bdata.pads_cnt = 0;
+- omap_serial_init_port(&bdata);
+
++ if (cpu_is_omap44xx() || (cpu_is_omap34xx() &&
++ !cpu_is_am33xx()))
++ omap_serial_fill_default_pads(&bdata);
++
++ if (!info)
++ omap_serial_init_port(&bdata, NULL);
++ else
++ omap_serial_init_port(&bdata, &info[uart->num]);
+ }
+ }
++
++/**
++ * omap_serial_init() - initialize all supported serial ports
++ *
++ * Initializes all available UARTs.
++ * Platforms can call this function when they want to have default behaviour
++ * for serial ports (e.g initialize them all as serial ports).
++ */
++void __init omap_serial_init(void)
++{
++ omap_serial_board_init(NULL);
++}
+diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
+new file mode 100644
+index 0000000..69b49ea
+--- /dev/null
++++ b/arch/arm/mach-omap2/sleep33xx.S
+@@ -0,0 +1,751 @@
++/*
++ * Low level suspend code for AM33XX SoCs
++ *
++ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/linkage.h>
++#include <linux/init.h>
++#include <asm/memory.h>
++#include <asm/assembler.h>
++#include <mach/io.h>
++#include <plat/emif.h>
++#include "cm33xx.h"
++
++#include <plat/emif.h>
++#include <plat/sram.h>
++
++#include "cm33xx.h"
++#include "pm33xx.h"
++#include "prm33xx.h"
++#include "control.h"
++
++/* We should probably pass in the virtual address of PRCM, Control and EMIF
++ * along with the physical addresses
++ * load it into the registers and then continue
++ */
++ .align 3
++ENTRY(am33xx_do_wfi)
++ stmfd sp!, {r4 - r11, lr} @ save registers on stack
++
++ .macro pll_bypass, name, clk_mode_addr, idlest_addr
++pll_bypass_\name:
++ ldr r0, \clk_mode_addr
++ ldr r1, [r0]
++ bic r1, r1, #(7 << 0)
++ orr r1, r1, #0x5
++ str r1, [r0]
++ ldr r0, \idlest_addr
++wait_pll_bypass_\name:
++ ldr r1, [r0]
++ tst r1, #0x0
++ bne wait_pll_bypass_\name
++ .endm
++
++ .macro pll_lock, name, clk_mode_addr, idlest_addr
++pll_lock_\name:
++ ldr r0, \clk_mode_addr
++ ldr r1, [r0]
++ bic r1, r1, #(7 << 0)
++ orr r1, r1, #0x7
++ str r1, [r0]
++ ldr r0, \idlest_addr
++wait_pll_lock_\name:
++ ldr r1, [r0]
++ tst r1, #0x1
++ bne wait_pll_lock_\name
++ .endm
++
++ /* EMIF config for low power mode */
++ ldr r0, emif_addr_func
++ blx r0
++
++ str r0, emif_addr_virt
++
++ /* Ensure that all the writes to DDR leave the A8 */
++ dsb
++ dmb
++ isb
++
++ add r1, r0, #EMIF4_0_SDRAM_MGMT_CTRL
++ ldr r2, [r1]
++ orr r2, r2, #0xa0 @ a reasonable delay for entering SR
++ str r2, [r1, #0]
++
++ ldr r2, ddr_start @ do a dummy access to DDR
++ ldr r3, [r2, #0]
++ ldr r3, [r1, #0]
++ orr r3, r3, #0x200 @ now set the LP MODE to Self-Refresh
++ str r3, [r1, #0]
++ str r2, [r1, #4] @ write to shadow register also
++
++ mov r1, #0x1000 @ Give some time for the system to enter SR
++wait_sr:
++ subs r1, r1, #1
++ bne wait_sr
++
++ /* Disable EMIF at this point */
++ ldr r1, virt_emif_clkctrl
++ ldr r2, [r1]
++ bic r2, r2, #(3 << 0)
++ str r2, [r1]
++
++ ldr r1, virt_emif_clkctrl
++wait_emif_disable:
++ ldr r2, [r1]
++ ldr r3, module_disabled_val
++ cmp r2, r3
++ bne wait_emif_disable
++
++ /* Weak pull down for DQ, DM */
++ ldr r1, virt_ddr_io_pull1
++ ldr r2, susp_io_pull
++ str r2, [r1]
++
++ ldr r1, virt_ddr_io_pull2
++ ldr r2, susp_io_pull
++ str r2, [r1]
++
++ /* Disable VTP with N & P = 0x1 */
++ ldr r1, virt_ddr_vtp_ctrl
++ ldr r2, susp_vtp_ctrl_val
++ str r2, [r1]
++
++ /* IO to work in mDDR mode */
++ ldr r0, virt_ddr_io_ctrl
++ ldr r1, [r0]
++ mov r2, #1
++ mov r3, r2, lsl #28
++ str r3, [r0]
++
++ /* Enable SRAM LDO ret mode */
++ ldr r0, virt_sram_ldo_addr
++ ldr r1, [r0]
++ orr r1, #1
++ str r1, [r0]
++
++ /* Put the PLLs in bypass mode */
++ pll_bypass core, virt_core_clk_mode, virt_core_idlest
++ pll_bypass ddr, virt_ddr_clk_mode, virt_ddr_idlest
++ pll_bypass disp, virt_disp_clk_mode, virt_disp_idlest
++ pll_bypass per, virt_per_clk_mode, virt_per_idlest
++ pll_bypass mpu, virt_mpu_clk_mode, virt_mpu_idlest
++
++ dsb
++ dmb
++ isb
++
++ wfi
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ /* We come here in case of an abort */
++
++ /* Relock the PLLs */
++ pll_lock mpu_abt, virt_mpu_clk_mode, virt_mpu_idlest
++ pll_lock per_abt, virt_per_clk_mode, virt_per_idlest
++ pll_lock disp_abt, virt_disp_clk_mode, virt_disp_idlest
++ pll_lock ddr_abt, virt_ddr_clk_mode, virt_ddr_idlest
++ pll_lock core_abt, virt_core_clk_mode, virt_core_idlest
++
++ /* Disable SRAM LDO ret mode */
++ ldr r0, virt_sram_ldo_addr
++ ldr r1, [r0]
++ bic r1, #1
++ str r1, [r0]
++
++ /* IO to work in DDR mode */
++ ldr r0, virt_ddr_io_ctrl
++ ldr r1, [r0]
++ mov r2, #0x0
++ mov r3, r2, lsl #28
++ str r3, [r0]
++
++ /* Restore the pull for DQ, DM */
++ ldr r1, virt_ddr_io_pull1
++ ldr r2, resume_io_pull1
++ str r2, [r1]
++
++ ldr r1, virt_ddr_io_pull2
++ ldr r2, resume_io_pull2
++ str r2, [r1]
++
++ /* Enable VTP */
++config_vtp_abt:
++ ldr r0, virt_ddr_vtp_ctrl
++ ldr r1, [r0]
++ mov r2, #0x0 @ clear the register
++ str r2, [r0]
++ mov r2, #0x6 @ write the filter value
++ str r2, [r0]
++
++ ldr r1, [r0]
++ ldr r2, vtp_enable @ set the enable bit
++ orr r2, r2, r1
++ str r2, [r0]
++
++ ldr r1, [r0] @ toggle the CLRZ bit
++ bic r1, #1
++ str r1, [r0]
++
++ ldr r1, [r0]
++ orr r1, #1
++ str r1, [r0]
++
++poll_vtp_ready_abt:
++ ldr r1, [r0] @ poll for VTP ready
++ tst r1, #(1 << 5)
++ beq poll_vtp_ready_abt
++
++ /* Enable EMIF */
++ ldr r1, virt_emif_clkctrl
++ mov r2, #0x2
++ str r2, [r1]
++wait_emif_enable:
++ ldr r3, [r1]
++ cmp r2, r3
++ bne wait_emif_enable
++
++ /* Disable EMIF self-refresh */
++ ldr r0, emif_addr_virt
++ add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL
++ ldr r1, [r0]
++ bic r1, r1, #(0x7 << 7)
++ str r1, [r0]
++
++ mov r0, #7
++ ldmfd sp!, {r4 - r11, pc} @ restore regs and return
++
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ /* Take the PLLs out of LP_BYPASS */
++ pll_lock mpu, phys_mpu_clk_mode, phys_mpu_idlest
++ pll_lock per, phys_per_clk_mode, phys_per_idlest
++ pll_lock disp, phys_disp_clk_mode, phys_disp_idlest
++ pll_lock ddr, phys_ddr_clk_mode, phys_ddr_idlest
++ pll_lock core, phys_core_clk_mode, phys_core_idlest
++
++ /* Disable SRAM LDO ret mode */
++ ldr r0, phys_sram_ldo_addr
++ ldr r1, [r0]
++ bic r1, #1
++ str r1, [r0]
++
++ /* Restore the pull for DQ, DM */
++ ldr r1, phys_ddr_io_pull1
++ ldr r2, resume_io_pull1
++ str r2, [r1]
++
++ ldr r1, phys_ddr_io_pull2
++ ldr r2, resume_io_pull2
++ str r2, [r1]
++
++ /* Disable EMIF self-refresh */
++ ldr r0, emif_phys_addr
++ add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL
++ ldr r1, [r0]
++ bic r1, r1, #(0x7 << 7)
++ str r1, [r0]
++
++ /* Take out IO of mDDR mode */
++ ldr r0, phys_ddr_io_ctrl
++ ldr r1, [r0]
++ bic r1, r1, #28
++ str r1, [r0]
++
++/*
++ * Instead of harcoding the EMIF and DDR PHY related settings
++ * in this file, the sane thing to do would have been to backup
++ * the register contents during suspend and restore it back in
++ * the resume path. However, due to the Si errata related to
++ * DDR PHY registers, these registers are read-only. So, we'll
++ * need to hardcode atleast the DDR PHY configuration over here.
++ * We _could_ back up the EMIF registers but in order to be
++ * consistent with the DDR setup procedure we skip this for now.
++ * The person updating the DDR PHY config values is expected
++ * to update the EMIF config values also.
++ */
++
++config_vtp:
++ ldr r0, vtp0_addr
++ ldr r1, [r0]
++ mov r2, #0x0 @ clear the register
++ str r2, [r0]
++ mov r2, #0x6 @ write the filter value
++ str r2, [r0]
++
++ ldr r1, [r0]
++ ldr r2, vtp_enable @ set the enable bit
++ orr r2, r2, r1
++ str r2, [r0]
++
++ ldr r1, [r0] @ toggle the CLRZ bit
++ bic r1, #1
++ str r1, [r0]
++
++ ldr r1, [r0]
++ orr r1, #1
++ str r1, [r0]
++
++poll_vtp_ready:
++ ldr r1, [r0] @ poll for VTP ready
++ tst r1, #(1 << 5)
++ beq poll_vtp_ready
++
++cmd_macro_config:
++ ldr r0, ddr_phy_base
++ ldr r1, [r0]
++ ldr r2, ddr2_ratio_val
++ mov r3, r2
++ @ TODO: Need to use proper variable here
++ mov r4, #0
++ str r3, [r0, #28] @cmd0
++ str r4, [r0, #32]
++ str r4, [r0, #36]
++ str r4, [r0, #40]
++ str r4, [r0, #44]
++ str r3, [r0, #80] @cmd1
++ str r4, [r0, #84]
++ str r4, [r0, #88]
++ str r4, [r0, #92]
++ str r4, [r0, #96]
++ str r3, [r0, #132] @cmd2
++ str r4, [r0, #136]
++ str r4, [r0, #140]
++ str r4, [r0, #144]
++ str r4, [r0, #148]
++
++ mov r3, #0x0
++ bl data_macro_config
++ mov r3, #0xa4
++ bl data_macro_config
++ b setup_rank_delays
++
++data_macro_config:
++ ldr r0, ddr_phy_base
++ add r0, r0, r3
++rd_dqs:
++ ldr r1, data0_rd_dqs_slave_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #200]
++ ldr r1, data0_rd_dqs_slave_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #204]
++wr_dqs:
++ ldr r1, data0_wr_dqs_slave_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #220]
++ ldr r1, data0_wr_dqs_slave_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #224]
++wr_lvl:
++ ldr r1, data0_wr_lvl_init_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #240]
++ ldr r1, data0_wr_lvl_init_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #244]
++gate_lvl:
++ ldr r1, data0_gate_lvl_init_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #248]
++ ldr r1, data0_gate_lvl_init_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #256]
++we_slv:
++ ldr r1, data0_wr_lvl_slave_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #264]
++ ldr r1, data0_wr_lvl_slave_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #268]
++wr_data:
++ ldr r1, data0_wr_data_slave_ratio0_val
++ mov r2, r1
++ /* shift by 30, 20, 10 and orr */
++ mov r5, r2, lsl #10
++ mov r6, r2, lsl #20
++ mov r7, r2, lsl #30
++ orr r2, r2, r5
++ orr r2, r2, r6
++ orr r2, r2, r7
++ /* Done with crazy bit ops. store it now */
++ str r2, [r0, #288]
++ ldr r1, data0_wr_data_slave_ratio1_val
++ mov r2, r1
++ mov r5, r2, lsr #2
++ mov r2, r5
++ str r2, [r0, #292]
++dll_lock:
++ ldr r1, data0_dll_lock_diff_val
++ mov r2, r1
++ str r2, [r0, #312]
++
++setup_rank_delays:
++ ldr r1, data0_rank0_delay0_val
++ mov r2, r1
++ str r2, [r0, #308]
++ ldr r1, data1_rank0_delay1_val
++ mov r2, r1
++ str r2, [r0, #472]
++
++setup_io_ctrl:
++ ldr r0, control_base
++ ldr r1, ddr_ioctrl_val
++ mov r2, r1
++ ldr r4, ddr_cmd_offset
++ mov r3, r4
++ str r2, [r0, r3] @cmd0 0x1404
++ add r3, r3, #4
++ str r2, [r0, r3] @cmd1 0x1408
++ add r3, r3, #4
++ str r2, [r0, r3] @cmd2 0x140c
++ ldr r4, ddr_data_offset
++ mov r3, r4
++ str r2, [r0, r3] @data0 0x1440
++ add r3, r3, #4
++ str r2, [r0, r3] @data1 0x1444
++
++misc_config:
++ ldr r1, ddr_io_ctrl_addr
++ ldr r2, [r1]
++ and r2, #0xefffffff
++ str r2, [r1]
++ ldr r1, ddr_cke_addr
++ ldr r2, [r1]
++ orr r2, #0x00000001
++ str r2, [r1]
++
++config_emif_timings:
++ mov r3, #1275068416 @ 0x4c000000
++disable_sr:
++ mov r4, #0
++ str r4, [r3, #56] @ 0x38
++ ldr r4, emif_rd_lat_val
++ mov r2, r4
++rd_lat:
++ str r2, [r3, #228] @ 0xe4
++ str r2, [r3, #232] @ 0xe8
++ str r2, [r3, #236] @ 0xec
++timing1:
++ ldr r4, emif_timing1_val
++ mov r2, r4
++ str r2, [r3, #24]
++ str r2, [r3, #28]
++timing2:
++ ldr r4, emif_timing2_val
++ mov r2, r4
++ str r2, [r3, #32]
++ str r2, [r3, #36] @ 0x24
++timing3:
++ ldr r4, emif_timing3_val
++ mov r2, r4
++ str r2, [r3, #40] @ 0x28
++ str r2, [r3, #44] @ 0x2c
++sdcfg1:
++ ldr r4, emif_sdcfg_val
++ mov r2, r4
++ str r2, [r3, #8]
++ str r2, [r3, #12]
++ref_ctrl_const:
++ ldr r4, emif_ref_ctrl_const_val
++ mov r2, r4
++ str r2, [r3, #16]
++ str r2, [r3, #20]
++
++ /* GEL had a loop with init value of 5000 */
++ mov r0, #0x1000
++wait_loop1:
++ subs r0, r0, #1
++ bne wait_loop1
++
++ref_ctrl_actual:
++ ldr r4, emif_ref_ctrl_val
++ mov r2, r4
++ str r2, [r3, #16]
++ str r2, [r3, #20]
++sdcfg2:
++ ldr r4, emif_sdcfg_val
++ mov r2, r4
++ str r2, [r3, #8]
++ str r2, [r3, #12]
++
++ /* Back from la-la-land. Kill some time for sanity to settle in */
++ mov r0, #0x1000
++wait_loop2:
++ subs r0, r0, #1
++ bne wait_loop2
++
++ /* We are back. Branch to the common CPU resume routine */
++ENTRY(am33xx_resume_vector)
++ ldr pc, resume_addr
++
++/*
++ * Local variables
++ */
++
++resume_addr:
++ .word cpu_resume - PAGE_OFFSET + 0x80000000
++
++emif_addr_func:
++ .word am33xx_get_ram_base
++emif_phys_addr:
++ .word AM33XX_EMIF0_BASE
++
++emif_pm_ctrl:
++ .word EMIF4_0_SDRAM_MGMT_CTRL
++ddr_start:
++ .word PAGE_OFFSET
++
++virt_mpu_idlest:
++ .word AM33XX_CM_IDLEST_DPLL_MPU
++virt_mpu_clk_mode:
++ .word AM33XX_CM_CLKMODE_DPLL_MPU
++
++phys_pll_mod:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD
++phys_mpu_clk_mode:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_MPU_OFFSET
++phys_mpu_idlest:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_MPU_OFFSET
++
++virt_core_idlest:
++ .word AM33XX_CM_IDLEST_DPLL_CORE
++virt_core_clk_mode:
++ .word AM33XX_CM_CLKMODE_DPLL_CORE
++phys_core_clk_mode:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_CORE_OFFSET
++phys_core_idlest:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_CORE_OFFSET
++
++virt_per_idlest:
++ .word AM33XX_CM_IDLEST_DPLL_PER
++virt_per_clk_mode:
++ .word AM33XX_CM_CLKMODE_DPLL_PER
++phys_per_clk_mode:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_PER_OFFSET
++phys_per_idlest:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_PER_OFFSET
++
++virt_disp_idlest:
++ .word AM33XX_CM_IDLEST_DPLL_DISP
++virt_disp_clk_mode:
++ .word AM33XX_CM_CLKMODE_DPLL_DISP
++phys_disp_clk_mode:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DISP_OFFSET
++phys_disp_idlest:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DISP_OFFSET
++
++virt_ddr_idlest:
++ .word AM33XX_CM_IDLEST_DPLL_DDR
++virt_ddr_clk_mode:
++ .word AM33XX_CM_CLKMODE_DPLL_DDR
++phys_ddr_clk_mode:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DDR_OFFSET
++phys_ddr_idlest:
++ .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DDR_OFFSET
++
++virt_sram_ldo_addr:
++ .word AM33XX_PRM_LDO_SRAM_MPU_CTRL
++phys_sram_ldo_addr:
++ .word AM33XX_PRM_BASE + AM33XX_PRM_DEVICE_MOD + AM33XX_PRM_LDO_SRAM_MPU_CTRL_OFFSET
++
++virt_emif_clkctrl:
++ .word AM33XX_CM_PER_EMIF_CLKCTRL
++phys_emif_clkctrl:
++ .word AM33XX_CM_BASE + AM33XX_CM_PER_MOD + AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET
++module_disabled_val:
++ .word 0x30000
++
++/* DDR related stuff */
++vtp0_addr:
++ .word VTP0_CTRL_REG
++vtp_enable:
++ .word VTP_CTRL_ENABLE
++vtp_start_en:
++ .word VTP_CTRL_START_EN
++vtp_ready:
++ .word VTP_CTRL_READY
++
++ddr_phy_base:
++ .word DDR_PHY_BASE_ADDR
++ddr2_ratio_val:
++ .word DDR2_RATIO
++data0_rd_dqs_slave_ratio0_val:
++ .word DDR2_RD_DQS
++data0_rd_dqs_slave_ratio1_val:
++ .word DDR2_RD_DQS
++data0_wr_dqs_slave_ratio0_val:
++ .word DDR2_WR_DQS
++data0_wr_dqs_slave_ratio1_val:
++ .word DDR2_WR_DQS
++data0_wr_lvl_init_ratio0_val:
++ .word DDR2_PHY_WRLVL
++data0_wr_lvl_init_ratio1_val:
++ .word DDR2_PHY_WRLVL
++data0_gate_lvl_init_ratio0_val:
++ .word DDR2_PHY_GATELVL
++data0_gate_lvl_init_ratio1_val:
++ .word DDR2_PHY_GATELVL
++data0_wr_lvl_slave_ratio0_val:
++ .word DDR2_PHY_FIFO_WE
++data0_wr_lvl_slave_ratio1_val:
++ .word DDR2_PHY_FIFO_WE
++data0_wr_data_slave_ratio0_val:
++ .word DDR2_PHY_WR_DATA
++data0_wr_data_slave_ratio1_val:
++ .word DDR2_PHY_WR_DATA
++data0_dll_lock_diff_val:
++ .word PHY_DLL_LOCK_DIFF
++
++data0_rank0_delay0_val:
++ .word PHY_RANK0_DELAY
++data1_rank0_delay1_val:
++ .word PHY_RANK0_DELAY
++
++control_base:
++ .word AM33XX_CTRL_BASE
++ddr_io_ctrl_addr:
++ .word DDR_IO_CTRL
++ddr_ioctrl_val:
++ .word 0x18B
++ddr_cmd_offset:
++ .word 0x1404
++ddr_data_offset:
++ .word 0x1440
++virt_ddr_io_ctrl:
++ .word AM33XX_CTRL_REGADDR(0x0E04)
++phys_ddr_io_ctrl:
++ .word DDR_IO_CTRL
++virt_ddr_vtp_ctrl:
++ .word AM33XX_CTRL_REGADDR(0x0E0C)
++phys_ddr_vtp_ctrl:
++ .word VTP0_CTRL_REG
++virt_ddr_io_pull1:
++ .word AM33XX_CTRL_REGADDR(0x1440)
++phys_ddr_io_pull1:
++ .word AM33XX_CTRL_BASE + (0x1440)
++virt_ddr_io_pull2:
++ .word AM33XX_CTRL_REGADDR(0x1444)
++phys_ddr_io_pull2:
++ .word AM33XX_CTRL_BASE + (0x1444)
++virt_ddr_io_pull3:
++ .word AM33XX_CTRL_REGADDR(0x1448)
++phys_ddr_io_pull3:
++ .word AM33XX_CTRL_BASE + (0x1448)
++
++ddr_cke_addr:
++ .word DDR_CKE_CTRL
++emif_rd_lat_val:
++ .word EMIF_READ_LATENCY
++emif_timing1_val:
++ .word EMIF_TIM1
++emif_timing2_val:
++ .word EMIF_TIM2
++emif_timing3_val:
++ .word EMIF_TIM3
++emif_sdcfg_val:
++ .word EMIF_SDCFG
++emif_ref_ctrl_const_val:
++ .word 0x4650
++emif_ref_ctrl_val:
++ .word EMIF_SDREF
++
++susp_io_pull:
++ .word 0x3FF00003
++resume_io_pull1:
++ .word 0x18B
++resume_io_pull2:
++ .word 0x18B
++dyn_pd_val:
++ .word 0x100000
++susp_sdram_config:
++ .word 0x40805332
++susp_vtp_ctrl_val:
++ .word 0x10117
++emif_addr_virt:
++ .word 0xDEADBEEF
++
++
++ENTRY(am33xx_do_wfi_sz)
++ .word . - am33xx_do_wfi
+diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
+new file mode 100644
+index 0000000..abd2834
+--- /dev/null
++++ b/arch/arm/mach-omap2/sleep44xx.S
+@@ -0,0 +1,379 @@
++/*
++ * OMAP44xx sleep code.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc.
++ * Santosh Shilimkar <santosh.shilimkar@ti.com>
++ *
++ * This program is free software,you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/system.h>
++#include <asm/smp_scu.h>
++#include <asm/memory.h>
++#include <asm/hardware/cache-l2x0.h>
++
++#include <plat/omap44xx.h>
++#include <mach/omap-secure.h>
++
++#include "common.h"
++#include "omap4-sar-layout.h"
++
++#if defined(CONFIG_SMP) && defined(CONFIG_PM)
++
++.macro DO_SMC
++ dsb
++ smc #0
++ dsb
++.endm
++
++ppa_zero_params:
++ .word 0x0
++
++ppa_por_params:
++ .word 1, 0
++
++/*
++ * =============================
++ * == CPU suspend finisher ==
++ * =============================
++ *
++ * void omap4_finish_suspend(unsigned long cpu_state)
++ *
++ * This function code saves the CPU context and performs the CPU
++ * power down sequence. Calling WFI effectively changes the CPU
++ * power domains states to the desired target power state.
++ *
++ * @cpu_state : contains context save state (r0)
++ * 0 - No context lost
++ * 1 - CPUx L1 and logic lost: MPUSS CSWR
++ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
++ * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
++ * @return: This function never returns for CPU OFF and DORMANT power states.
++ * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
++ * from this follows a full CPU reset path via ROM code to CPU restore code.
++ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
++ * It returns to the caller for CPU INACTIVE and ON power states or in case
++ * CPU failed to transition to targeted OFF/DORMANT state.
++ */
++ENTRY(omap4_finish_suspend)
++ stmfd sp!, {lr}
++ cmp r0, #0x0
++ beq do_WFI @ No lowpower state, jump to WFI
++
++ /*
++ * Flush all data from the L1 data cache before disabling
++ * SCTLR.C bit.
++ */
++ bl omap4_get_sar_ram_base
++ ldr r9, [r0, #OMAP_TYPE_OFFSET]
++ cmp r9, #0x1 @ Check for HS device
++ bne skip_secure_l1_clean
++ mov r0, #SCU_PM_NORMAL
++ mov r1, #0xFF @ clean seucre L1
++ stmfd r13!, {r4-r12, r14}
++ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
++ DO_SMC
++ ldmfd r13!, {r4-r12, r14}
++skip_secure_l1_clean:
++ bl v7_flush_dcache_all
++
++ /*
++ * Clear the SCTLR.C bit to prevent further data cache
++ * allocation. Clearing SCTLR.C would make all the data accesses
++ * strongly ordered and would not hit the cache.
++ */
++ mrc p15, 0, r0, c1, c0, 0
++ bic r0, r0, #(1 << 2) @ Disable the C bit
++ mcr p15, 0, r0, c1, c0, 0
++ isb
++
++ /*
++ * Invalidate L1 data cache. Even though only invalidate is
++ * necessary exported flush API is used here. Doing clean
++ * on already clean cache would be almost NOP.
++ */
++ bl v7_flush_dcache_all
++
++ /*
++ * Switch the CPU from Symmetric Multiprocessing (SMP) mode
++ * to AsymmetricMultiprocessing (AMP) mode by programming
++ * the SCU power status to DORMANT or OFF mode.
++ * This enables the CPU to be taken out of coherency by
++ * preventing the CPU from receiving cache, TLB, or BTB
++ * maintenance operations broadcast by other CPUs in the cluster.
++ */
++ bl omap4_get_sar_ram_base
++ mov r8, r0
++ ldr r9, [r8, #OMAP_TYPE_OFFSET]
++ cmp r9, #0x1 @ Check for HS device
++ bne scu_gp_set
++ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
++ ands r0, r0, #0x0f
++ ldreq r0, [r8, #SCU_OFFSET0]
++ ldrne r0, [r8, #SCU_OFFSET1]
++ mov r1, #0x00
++ stmfd r13!, {r4-r12, r14}
++ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
++ DO_SMC
++ ldmfd r13!, {r4-r12, r14}
++ b skip_scu_gp_set
++scu_gp_set:
++ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
++ ands r0, r0, #0x0f
++ ldreq r1, [r8, #SCU_OFFSET0]
++ ldrne r1, [r8, #SCU_OFFSET1]
++ bl omap4_get_scu_base
++ bl scu_power_mode
++skip_scu_gp_set:
++ mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
++ tst r0, #(1 << 18)
++ mrcne p15, 0, r0, c1, c0, 1
++ bicne r0, r0, #(1 << 6) @ Disable SMP bit
++ mcrne p15, 0, r0, c1, c0, 1
++ isb
++ dsb
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Clean and invalidate the L2 cache.
++ * Common cache-l2x0.c functions can't be used here since it
++ * uses spinlocks. We are out of coherency here with data cache
++ * disabled. The spinlock implementation uses exclusive load/store
++ * instruction which can fail without data cache being enabled.
++ * OMAP4 hardware doesn't support exclusive monitor which can
++ * overcome exclusive access issue. Because of this, CPU can
++ * lead to deadlock.
++ */
++ bl omap4_get_sar_ram_base
++ mov r8, r0
++ mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
++ ands r5, r5, #0x0f
++ ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
++ ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
++ cmp r0, #3
++ bne do_WFI
++#ifdef CONFIG_PL310_ERRATA_727915
++ mov r0, #0x03
++ mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
++ DO_SMC
++#endif
++ bl omap4_get_l2cache_base
++ mov r2, r0
++ ldr r0, =0xffff
++ str r0, [r2, #L2X0_CLEAN_INV_WAY]
++wait:
++ ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
++ ldr r1, =0xffff
++ ands r0, r0, r1
++ bne wait
++#ifdef CONFIG_PL310_ERRATA_727915
++ mov r0, #0x00
++ mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
++ DO_SMC
++#endif
++l2x_sync:
++ bl omap4_get_l2cache_base
++ mov r2, r0
++ mov r0, #0x0
++ str r0, [r2, #L2X0_CACHE_SYNC]
++sync:
++ ldr r0, [r2, #L2X0_CACHE_SYNC]
++ ands r0, r0, #0x1
++ bne sync
++#endif
++
++do_WFI:
++ bl omap_do_wfi
++
++ /*
++ * CPU is here when it failed to enter OFF/DORMANT or
++ * no low power state was attempted.
++ */
++ mrc p15, 0, r0, c1, c0, 0
++ tst r0, #(1 << 2) @ Check C bit enabled?
++ orreq r0, r0, #(1 << 2) @ Enable the C bit
++ mcreq p15, 0, r0, c1, c0, 0
++ isb
++
++ /*
++ * Ensure the CPU power state is set to NORMAL in
++ * SCU power state so that CPU is back in coherency.
++ * In non-coherent mode CPU can lock-up and lead to
++ * system deadlock.
++ */
++ mrc p15, 0, r0, c1, c0, 1
++ tst r0, #(1 << 6) @ Check SMP bit enabled?
++ orreq r0, r0, #(1 << 6)
++ mcreq p15, 0, r0, c1, c0, 1
++ isb
++ bl omap4_get_sar_ram_base
++ mov r8, r0
++ ldr r9, [r8, #OMAP_TYPE_OFFSET]
++ cmp r9, #0x1 @ Check for HS device
++ bne scu_gp_clear
++ mov r0, #SCU_PM_NORMAL
++ mov r1, #0x00
++ stmfd r13!, {r4-r12, r14}
++ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
++ DO_SMC
++ ldmfd r13!, {r4-r12, r14}
++ b skip_scu_gp_clear
++scu_gp_clear:
++ bl omap4_get_scu_base
++ mov r1, #SCU_PM_NORMAL
++ bl scu_power_mode
++skip_scu_gp_clear:
++ isb
++ dsb
++ ldmfd sp!, {pc}
++ENDPROC(omap4_finish_suspend)
++
++/*
++ * ============================
++ * == CPU resume entry point ==
++ * ============================
++ *
++ * void omap4_cpu_resume(void)
++ *
++ * ROM code jumps to this function while waking up from CPU
++ * OFF or DORMANT state. Physical address of the function is
++ * stored in the SAR RAM while entering to OFF or DORMANT mode.
++ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
++ */
++ENTRY(omap4_cpu_resume)
++ /*
++ * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
++ * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
++ * init and for CPU1, a secure PPA API provided. CPU0 must be ON
++ * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
++ * OMAP443X GP devices- SMP bit isn't accessible.
++ * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
++ */
++ ldr r8, =OMAP44XX_SAR_RAM_BASE
++ ldr r9, [r8, #OMAP_TYPE_OFFSET]
++ cmp r9, #0x1 @ Skip if GP device
++ bne skip_ns_smp_enable
++ mrc p15, 0, r0, c0, c0, 5
++ ands r0, r0, #0x0f
++ beq skip_ns_smp_enable
++ppa_actrl_retry:
++ mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
++ adr r3, ppa_zero_params @ Pointer to parameters
++ mov r1, #0x0 @ Process ID
++ mov r2, #0x4 @ Flag
++ mov r6, #0xff
++ mov r12, #0x00 @ Secure Service ID
++ DO_SMC
++ cmp r0, #0x0 @ API returns 0 on success.
++ beq enable_smp_bit
++ b ppa_actrl_retry
++enable_smp_bit:
++ mrc p15, 0, r0, c1, c0, 1
++ tst r0, #(1 << 6) @ Check SMP bit enabled?
++ orreq r0, r0, #(1 << 6)
++ mcreq p15, 0, r0, c1, c0, 1
++ isb
++skip_ns_smp_enable:
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Restore the L2 AUXCTRL and enable the L2 cache.
++ * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
++ * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
++ * register r0 contains value to be programmed.
++ * L2 cache is already invalidate by ROM code as part
++ * of MPUSS OFF wakeup path.
++ */
++ ldr r2, =OMAP44XX_L2CACHE_BASE
++ ldr r0, [r2, #L2X0_CTRL]
++ and r0, #0x0f
++ cmp r0, #1
++ beq skip_l2en @ Skip if already enabled
++ ldr r3, =OMAP44XX_SAR_RAM_BASE
++ ldr r1, [r3, #OMAP_TYPE_OFFSET]
++ cmp r1, #0x1 @ Check for HS device
++ bne set_gp_por
++ ldr r0, =OMAP4_PPA_L2_POR_INDEX
++ ldr r1, =OMAP44XX_SAR_RAM_BASE
++ ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
++ adr r3, ppa_por_params
++ str r4, [r3, #0x04]
++ mov r1, #0x0 @ Process ID
++ mov r2, #0x4 @ Flag
++ mov r6, #0xff
++ mov r12, #0x00 @ Secure Service ID
++ DO_SMC
++ b set_aux_ctrl
++set_gp_por:
++ ldr r1, =OMAP44XX_SAR_RAM_BASE
++ ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
++ ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
++ DO_SMC
++set_aux_ctrl:
++ ldr r1, =OMAP44XX_SAR_RAM_BASE
++ ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
++ ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
++ DO_SMC
++ mov r0, #0x1
++ ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
++ DO_SMC
++skip_l2en:
++#endif
++
++ b cpu_resume @ Jump to generic resume
++ENDPROC(omap4_cpu_resume)
++#endif
++
++#ifndef CONFIG_OMAP4_ERRATA_I688
++ENTRY(omap_bus_sync)
++ mov pc, lr
++ENDPROC(omap_bus_sync)
++#endif
++
++ENTRY(omap_do_wfi)
++ stmfd sp!, {lr}
++ /* Drain interconnect write buffers. */
++ bl omap_bus_sync
++
++ /*
++ * Execute an ISB instruction to ensure that all of the
++ * CP15 register changes have been committed.
++ */
++ isb
++
++ /*
++ * Execute a barrier instruction to ensure that all cache,
++ * TLB and branch predictor maintenance operations issued
++ * by any CPU in the cluster have completed.
++ */
++ dsb
++ dmb
++
++ /*
++ * Execute a WFI instruction and wait until the
++ * STANDBYWFI output is asserted to indicate that the
++ * CPU is in idle and low power state. CPU can specualatively
++ * prefetch the instructions so add NOPs after WFI. Sixteen
++ * NOPs as per Cortex-A9 pipeline.
++ */
++ wfi @ Wait For Interrupt
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ ldmfd sp!, {pc}
++ENDPROC(omap_do_wfi)
+diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
+index cf246b3..9dd9345 100644
+--- a/arch/arm/mach-omap2/smartreflex.c
++++ b/arch/arm/mach-omap2/smartreflex.c
+@@ -26,7 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/pm_runtime.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "pm.h"
+ #include "smartreflex.h"
+diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
+index 037b0d7..1af3871 100644
+--- a/arch/arm/mach-omap2/timer.c
++++ b/arch/arm/mach-omap2/timer.c
+@@ -41,12 +41,16 @@
+ #include <plat/dmtimer.h>
+ #include <asm/localtimer.h>
+ #include <asm/sched_clock.h>
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/omap_hwmod.h>
+ #include <plat/omap_device.h>
+ #include <plat/omap-pm.h>
++#include <plat/clock.h>
+
++#include "clockdomain.h"
+ #include "powerdomain.h"
++#include "cm2xxx_3xxx.h"
++#include "cminst44xx.h"
+
+ /* Parent clocks, eventually these will come from the clock framework */
+
+@@ -56,6 +60,7 @@
+ #define OMAP2_32K_SOURCE "func_32k_ck"
+ #define OMAP3_32K_SOURCE "omap_32k_fck"
+ #define OMAP4_32K_SOURCE "sys_32k_ck"
++#define AM33XX_RTC32K_SOURCE "clk_32768_ck"
+
+ #ifdef CONFIG_OMAP_32K_TIMER
+ #define OMAP2_CLKEV_SOURCE OMAP2_32K_SOURCE
+@@ -139,6 +144,67 @@ static struct clock_event_device clockevent_gpt = {
+ .set_mode = omap2_gp_timer_set_mode,
+ };
+
++static int _is_timer_idle(struct omap_hwmod *oh)
++{
++ int ret;
++
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
++ if (!oh->clkdm)
++ return -EINVAL;
++ ret = omap4_cminst_wait_module_ready(oh->clkdm->prcm_partition,
++ oh->clkdm->cm_inst,
++ oh->clkdm->clkdm_offs,
++ oh->prcm.omap4.clkctrl_offs);
++ } else if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ ret = omap2_cm_wait_module_ready(
++ oh->prcm.omap2.module_offs,
++ oh->prcm.omap2.idlest_reg_id,
++ oh->prcm.omap2.idlest_idle_bit);
++ } else {
++ BUG();
++ }
++
++ return ret;
++}
++
++static int omap_dm_timer_switch_src(struct omap_hwmod *oh,
++ struct omap_dm_timer *timer, const char *fck_source)
++{
++ struct clk *src, *cur_parent;
++ int res;
++
++ src = clk_get(NULL, fck_source);
++ if (IS_ERR(src))
++ return -EINVAL;
++
++ /* Reserve HW/clock-tree default source for fallback */
++ cur_parent = clk_get_parent(timer->fclk);
++
++ /* Switch to configured source */
++ res = __omap_dm_timer_set_source(timer->fclk, src);
++ if (IS_ERR_VALUE(res))
++ pr_warning("%s: timer%i cannot set source\n",
++ __func__, timer->id);
++
++ /* Check whether timer module is went into idle state */
++ res = _is_timer_idle(oh);
++ if (res && cur_parent) {
++ /* Fallback to default timer source */
++ pr_warning("%s: Switching to HW default clocksource(%s) for "
++ "timer%i, this may impact timekeeping in low "
++ "power state\n",
++ __func__, cur_parent->name, timer->id);
++
++ res = __omap_dm_timer_set_source(timer->fclk, cur_parent);
++ if (IS_ERR_VALUE(res))
++ pr_warning("%s: timer%i cannot set source\n",
++ __func__, timer->id);
++ }
++ clk_put(src);
++
++ return res;
++}
++
+ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
+ int gptimer_id,
+ const char *fck_source)
+@@ -155,6 +221,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
+ return -ENODEV;
+
+ timer->irq = oh->mpu_irqs[0].irq;
++ timer->id = gptimer_id;
+ timer->phys_base = oh->slaves[0]->addr->pa_start;
+ size = oh->slaves[0]->addr->pa_end - timer->phys_base;
+
+@@ -178,21 +245,10 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
+
+ omap_hwmod_enable(oh);
+
+- sys_timer_reserved |= (1 << (gptimer_id - 1));
++ sys_timer_reserved |= (1 << (gptimer_id));
+
+ if (gptimer_id != 12) {
+- struct clk *src;
+-
+- src = clk_get(NULL, fck_source);
+- if (IS_ERR(src)) {
+- res = -EINVAL;
+- } else {
+- res = __omap_dm_timer_set_source(timer->fclk, src);
+- if (IS_ERR_VALUE(res))
+- pr_warning("%s: timer%i cannot set source\n",
+- __func__, gptimer_id);
+- clk_put(src);
+- }
++ res = omap_dm_timer_switch_src(oh, timer, fck_source);
+ }
+ __omap_dm_timer_init_regs(timer);
+ __omap_dm_timer_reset(timer, 1, 1);
+@@ -309,6 +365,35 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
+ }
+ #endif
+
++static void omap_dmtimer_resume(void)
++{
++ char name[10];
++ struct omap_hwmod *oh;
++
++ sprintf(name, "timer%d", clkev.id);
++ oh = omap_hwmod_lookup(name);
++ if (!oh)
++ return;
++
++ omap_hwmod_enable(oh);
++ __omap_dm_timer_load_start(&clkev,
++ OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
++ __omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
++}
++
++static void omap_dmtimer_suspend(void)
++{
++ char name[10];
++ struct omap_hwmod *oh;
++
++ sprintf(name, "timer%d", clkev.id);
++ oh = omap_hwmod_lookup(name);
++ if (!oh)
++ return;
++
++ omap_hwmod_idle(oh);
++}
++
+ #define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \
+ clksrc_nr, clksrc_src) \
+ static void __init omap##name##_timer_init(void) \
+@@ -319,7 +404,9 @@ static void __init omap##name##_timer_init(void) \
+
+ #define OMAP_SYS_TIMER(name) \
+ struct sys_timer omap##name##_timer = { \
+- .init = omap##name##_timer_init, \
++ .init = omap##name##_timer_init, \
++ .suspend = omap_dmtimer_suspend, \
++ .resume = omap_dmtimer_resume, \
+ };
+
+ #ifdef CONFIG_ARCH_OMAP2
+@@ -333,6 +420,8 @@ OMAP_SYS_TIMER(3)
+ OMAP_SYS_TIMER_INIT(3_secure, OMAP3_SECURE_TIMER, OMAP3_CLKEV_SOURCE,
+ 2, OMAP3_MPU_SOURCE)
+ OMAP_SYS_TIMER(3_secure)
++OMAP_SYS_TIMER_INIT(3_am33xx, 2, OMAP4_MPU_SOURCE, 1, AM33XX_RTC32K_SOURCE)
++OMAP_SYS_TIMER(3_am33xx)
+ #endif
+
+ #ifdef CONFIG_ARCH_OMAP4
+@@ -460,7 +549,7 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
+ pdata->timer_ip_version = oh->class->rev;
+
+ /* Mark clocksource and clockevent timers as reserved */
+- if ((sys_timer_reserved >> (id - 1)) & 0x1)
++ if ((sys_timer_reserved & (0x1 << id)))
+ pdata->reserved = 1;
+
+ pwrdm = omap_hwmod_get_pwrdm(oh);
+diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
+index 89ae298..771dc78 100644
+--- a/arch/arm/mach-omap2/usb-host.c
++++ b/arch/arm/mach-omap2/usb-host.c
+@@ -28,51 +28,28 @@
+ #include <mach/hardware.h>
+ #include <mach/irqs.h>
+ #include <plat/usb.h>
++#include <plat/omap_device.h>
+
+ #include "mux.h"
+
+ #ifdef CONFIG_MFD_OMAP_USB_HOST
+
+-#define OMAP_USBHS_DEVICE "usbhs-omap"
+-
+-static struct resource usbhs_resources[] = {
+- {
+- .name = "uhh",
+- .flags = IORESOURCE_MEM,
+- },
+- {
+- .name = "tll",
+- .flags = IORESOURCE_MEM,
+- },
+- {
+- .name = "ehci",
+- .flags = IORESOURCE_MEM,
+- },
+- {
+- .name = "ehci-irq",
+- .flags = IORESOURCE_IRQ,
+- },
+- {
+- .name = "ohci",
+- .flags = IORESOURCE_MEM,
+- },
+- {
+- .name = "ohci-irq",
+- .flags = IORESOURCE_IRQ,
+- }
+-};
+-
+-static struct platform_device usbhs_device = {
+- .name = OMAP_USBHS_DEVICE,
+- .id = 0,
+- .num_resources = ARRAY_SIZE(usbhs_resources),
+- .resource = usbhs_resources,
+-};
++#define OMAP_USBHS_DEVICE "usbhs_omap"
++#define USBHS_UHH_HWMODNAME "usb_host_hs"
++#define USBHS_TLL_HWMODNAME "usb_tll_hs"
+
+ static struct usbhs_omap_platform_data usbhs_data;
+ static struct ehci_hcd_omap_platform_data ehci_data;
+ static struct ohci_hcd_omap_platform_data ohci_data;
+
++static struct omap_device_pm_latency omap_uhhtll_latency[] = {
++ {
++ .deactivate_func = omap_device_idle_hwmods,
++ .activate_func = omap_device_enable_hwmods,
++ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
++ },
++};
++
+ /* MUX settings for EHCI pins */
+ /*
+ * setup_ehci_io_mux - initialize IO pad mux for USBHOST
+@@ -508,7 +485,10 @@ static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
+
+ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
+ {
+- int i;
++ struct omap_hwmod *oh[2];
++ struct omap_device *od;
++ int bus_id = -1;
++ int i;
+
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++) {
+ usbhs_data.port_mode[i] = pdata->port_mode[i];
+@@ -523,44 +503,34 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
+ usbhs_data.ohci_data = &ohci_data;
+
+ if (cpu_is_omap34xx()) {
+- usbhs_resources[0].start = OMAP34XX_UHH_CONFIG_BASE;
+- usbhs_resources[0].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
+- usbhs_resources[1].start = OMAP34XX_USBTLL_BASE;
+- usbhs_resources[1].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1;
+- usbhs_resources[2].start = OMAP34XX_EHCI_BASE;
+- usbhs_resources[2].end = OMAP34XX_EHCI_BASE + SZ_1K - 1;
+- usbhs_resources[3].start = INT_34XX_EHCI_IRQ;
+- usbhs_resources[4].start = OMAP34XX_OHCI_BASE;
+- usbhs_resources[4].end = OMAP34XX_OHCI_BASE + SZ_1K - 1;
+- usbhs_resources[5].start = INT_34XX_OHCI_IRQ;
+ setup_ehci_io_mux(pdata->port_mode);
+ setup_ohci_io_mux(pdata->port_mode);
+ } else if (cpu_is_omap44xx()) {
+- usbhs_resources[0].start = OMAP44XX_UHH_CONFIG_BASE;
+- usbhs_resources[0].end = OMAP44XX_UHH_CONFIG_BASE + SZ_1K - 1;
+- usbhs_resources[1].start = OMAP44XX_USBTLL_BASE;
+- usbhs_resources[1].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1;
+- usbhs_resources[2].start = OMAP44XX_HSUSB_EHCI_BASE;
+- usbhs_resources[2].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
+- usbhs_resources[3].start = OMAP44XX_IRQ_EHCI;
+- usbhs_resources[4].start = OMAP44XX_HSUSB_OHCI_BASE;
+- usbhs_resources[4].end = OMAP44XX_HSUSB_OHCI_BASE + SZ_1K - 1;
+- usbhs_resources[5].start = OMAP44XX_IRQ_OHCI;
+ setup_4430ehci_io_mux(pdata->port_mode);
+ setup_4430ohci_io_mux(pdata->port_mode);
+ }
+
+- if (platform_device_add_data(&usbhs_device,
+- &usbhs_data, sizeof(usbhs_data)) < 0) {
+- printk(KERN_ERR "USBHS platform_device_add_data failed\n");
+- goto init_end;
++ oh[0] = omap_hwmod_lookup(USBHS_UHH_HWMODNAME);
++ if (!oh[0]) {
++ pr_err("Could not look up %s\n", USBHS_UHH_HWMODNAME);
++ return;
+ }
+
+- if (platform_device_register(&usbhs_device) < 0)
+- printk(KERN_ERR "USBHS platform_device_register failed\n");
++ oh[1] = omap_hwmod_lookup(USBHS_TLL_HWMODNAME);
++ if (!oh[1]) {
++ pr_err("Could not look up %s\n", USBHS_TLL_HWMODNAME);
++ return;
++ }
+
+-init_end:
+- return;
++ od = omap_device_build_ss(OMAP_USBHS_DEVICE, bus_id, oh, 2,
++ (void *)&usbhs_data, sizeof(usbhs_data),
++ omap_uhhtll_latency,
++ ARRAY_SIZE(omap_uhhtll_latency), false);
++ if (IS_ERR(od)) {
++ pr_err("Could not build hwmod devices %s,%s\n",
++ USBHS_UHH_HWMODNAME, USBHS_TLL_HWMODNAME);
++ return;
++ }
+ }
+
+ #else
+@@ -570,5 +540,3 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
+ }
+
+ #endif
+-
+-
+diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
+index 2679750..7bc40a0 100644
+--- a/arch/arm/mach-omap2/usb-musb.c
++++ b/arch/arm/mach-omap2/usb-musb.c
+@@ -34,28 +34,22 @@
+ #include "mux.h"
+
+ static struct musb_hdrc_config musb_config = {
++ .fifo_mode = 4,
+ .multipoint = 1,
+ .dyn_fifo = 1,
+ .num_eps = 16,
+ .ram_bits = 12,
+ };
+
+-static struct musb_hdrc_platform_data musb_plat = {
+-#ifdef CONFIG_USB_MUSB_OTG
+- .mode = MUSB_OTG,
+-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+- .mode = MUSB_HOST,
+-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+- .mode = MUSB_PERIPHERAL,
+-#endif
+- /* .clock is set dynamically */
+- .config = &musb_config,
+-
+- /* REVISIT charge pump on TWL4030 can supply up to
+- * 100 mA ... but this value is board-specific, like
+- * "mode", and should be passed to usb_musb_init().
+- */
+- .power = 50, /* up to 100 mA */
++static struct musb_hdrc_platform_data musb_plat[] = {
++ {
++ .config = &musb_config,
++ .clock = "ick",
++ },
++ {
++ .config = &musb_config,
++ .clock = "ick",
++ },
+ };
+
+ static u64 musb_dmamask = DMA_BIT_MASK(32);
+@@ -84,15 +78,27 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
+ * REVISIT: This line can be removed once all the platforms using
+ * musb_core.c have been converted to use use clkdev.
+ */
+- musb_plat.clock = "ick";
+- musb_plat.board_data = board_data;
+- musb_plat.power = board_data->power >> 1;
+- musb_plat.mode = board_data->mode;
+- musb_plat.extvbus = board_data->extvbus;
++ musb_plat[0].clock = "ick";
++ musb_plat[0].board_data = board_data;
++ musb_plat[0].power = board_data->power >> 1;
++ musb_plat[0].mode = board_data->mode;
++ musb_plat[0].extvbus = board_data->extvbus;
++
++ /*
++ * OMAP3630/AM35x platform has MUSB RTL-1.8 which has the fix for the
++ * issue restricting active endpoints to use first 8K of FIFO space.
++ * This issue restricts OMAP35x platform to use fifo_mode '5'.
++ */
++ if (cpu_is_omap3430())
++ musb_config.fifo_mode = 5;
+
+ if (cpu_is_omap3517() || cpu_is_omap3505()) {
+ oh_name = "am35x_otg_hs";
+ name = "musb-am35x";
++ } else if (cpu_is_ti81xx() || cpu_is_am33xx()) {
++ board_data->set_phy_power = ti81xx_musb_phy_power;
++ oh_name = "usb_otg_hs";
++ name = "musb-ti81xx";
+ } else {
+ oh_name = "usb_otg_hs";
+ name = "musb-omap2430";
+diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
+index 031d116..e78f5a8 100644
+--- a/arch/arm/mach-omap2/vc.c
++++ b/arch/arm/mach-omap2/vc.c
+@@ -292,9 +292,8 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
+ u32 val;
+
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+- pr_err("%s: PMIC info requried to configure vc for"
+- "vdd_%s not populated.Hence cannot initialize vc\n",
+- __func__, voltdm->name);
++ pr_err("%s: missing PMIC for vdd_%s.\n",
++ __func__, voltdm->name);
+ return;
+ }
+
+diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
+index cfe348e..a5ec7f8f 100644
+--- a/arch/arm/mach-omap2/vc3xxx_data.c
++++ b/arch/arm/mach-omap2/vc3xxx_data.c
+@@ -18,7 +18,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm-regbits-34xx.h"
+ #include "voltage.h"
+diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
+index 2740a96..d70b930 100644
+--- a/arch/arm/mach-omap2/vc44xx_data.c
++++ b/arch/arm/mach-omap2/vc44xx_data.c
+@@ -18,7 +18,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm44xx.h"
+ #include "prm-regbits-44xx.h"
+diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
+index 1f8fdf7..8a36342 100644
+--- a/arch/arm/mach-omap2/voltage.c
++++ b/arch/arm/mach-omap2/voltage.c
+@@ -27,7 +27,7 @@
+ #include <linux/slab.h>
+ #include <linux/clk.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm-regbits-34xx.h"
+ #include "prm-regbits-44xx.h"
+diff --git a/arch/arm/mach-omap2/voltage.h b/arch/arm/mach-omap2/voltage.h
+index 16a1b09..a7c43c1 100644
+--- a/arch/arm/mach-omap2/voltage.h
++++ b/arch/arm/mach-omap2/voltage.h
+@@ -156,6 +156,7 @@ int omap_voltage_late_init(void);
+
+ extern void omap2xxx_voltagedomains_init(void);
+ extern void omap3xxx_voltagedomains_init(void);
++extern void am33xx_voltagedomains_init(void);
+ extern void omap44xx_voltagedomains_init(void);
+
+ struct voltagedomain *voltdm_lookup(const char *name);
+diff --git a/arch/arm/mach-omap2/voltagedomains33xx_data.c b/arch/arm/mach-omap2/voltagedomains33xx_data.c
+new file mode 100644
+index 0000000..965458d
+--- /dev/null
++++ b/arch/arm/mach-omap2/voltagedomains33xx_data.c
+@@ -0,0 +1,43 @@
++/*
++ * AM33XX voltage domain data
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++
++#include "voltage.h"
++
++static struct voltagedomain am33xx_voltdm_mpu = {
++ .name = "mpu",
++};
++
++static struct voltagedomain am33xx_voltdm_core = {
++ .name = "core",
++};
++
++static struct voltagedomain am33xx_voltdm_rtc = {
++ .name = "rtc",
++};
++
++static struct voltagedomain *voltagedomains_am33xx[] __initdata = {
++ &am33xx_voltdm_mpu,
++ &am33xx_voltdm_core,
++ &am33xx_voltdm_rtc,
++ NULL,
++};
++
++void __init am33xx_voltagedomains_init(void)
++{
++ voltdm_init(voltagedomains_am33xx);
++}
+diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+index 071101d..c005e2f 100644
+--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
++++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+@@ -18,7 +18,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+ #include <plat/cpu.h>
+
+ #include "prm-regbits-34xx.h"
+@@ -31,6 +31,14 @@
+ * VDD data
+ */
+
++/* OMAP3-common voltagedomain data */
++
++static struct voltagedomain omap3_voltdm_wkup = {
++ .name = "wakeup",
++};
++
++/* 34xx/36xx voltagedomain data */
++
+ static const struct omap_vfsm_instance omap3_vdd1_vfsm = {
+ .voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET,
+ .voltsetup_mask = OMAP3430_SETUP_TIME1_MASK,
+@@ -63,10 +71,6 @@ static struct voltagedomain omap3_voltdm_core = {
+ .vp = &omap3_vp_core,
+ };
+
+-static struct voltagedomain omap3_voltdm_wkup = {
+- .name = "wakeup",
+-};
+-
+ static struct voltagedomain *voltagedomains_omap3[] __initdata = {
+ &omap3_voltdm_mpu,
+ &omap3_voltdm_core,
+@@ -74,11 +78,30 @@ static struct voltagedomain *voltagedomains_omap3[] __initdata = {
+ NULL,
+ };
+
++/* AM35xx voltagedomain data */
++
++static struct voltagedomain am35xx_voltdm_mpu = {
++ .name = "mpu_iva",
++};
++
++static struct voltagedomain am35xx_voltdm_core = {
++ .name = "core",
++};
++
++static struct voltagedomain *voltagedomains_am35xx[] __initdata = {
++ &am35xx_voltdm_mpu,
++ &am35xx_voltdm_core,
++ &omap3_voltdm_wkup,
++ NULL,
++};
++
++
+ static const char *sys_clk_name __initdata = "sys_ck";
+
+ void __init omap3xxx_voltagedomains_init(void)
+ {
+ struct voltagedomain *voltdm;
++ struct voltagedomain **voltdms;
+ int i;
+
+ /*
+@@ -93,8 +116,13 @@ void __init omap3xxx_voltagedomains_init(void)
+ omap3_voltdm_core.volt_data = omap34xx_vddcore_volt_data;
+ }
+
+- for (i = 0; voltdm = voltagedomains_omap3[i], voltdm; i++)
++ if (cpu_is_omap3517() || cpu_is_omap3505())
++ voltdms = voltagedomains_am35xx;
++ else
++ voltdms = voltagedomains_omap3;
++
++ for (i = 0; voltdm = voltdms[i], voltdm; i++)
+ voltdm->sys_clk.name = sys_clk_name;
+
+- voltdm_init(voltagedomains_omap3);
++ voltdm_init(voltdms);
+ };
+diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
+index c4584e9..4e11d02 100644
+--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
++++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
+@@ -21,7 +21,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm-regbits-44xx.h"
+ #include "prm44xx.h"
+diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
+index 3b52027..fde109c 100644
+--- a/arch/arm/mach-omap2/vp.c
++++ b/arch/arm/mach-omap2/vp.c
+@@ -1,7 +1,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "voltage.h"
+ #include "vp.h"
+@@ -42,7 +42,8 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
+ u32 vddmin, vddmax, vstepmin, vstepmax;
+
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+- pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
++ pr_err("%s: missing PMIC for vdd_%s.\n",
++ __func__, voltdm->name);
+ return;
+ }
+
+diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
+index 260c554..bd89f80 100644
+--- a/arch/arm/mach-omap2/vp3xxx_data.c
++++ b/arch/arm/mach-omap2/vp3xxx_data.c
+@@ -19,7 +19,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm-regbits-34xx.h"
+ #include "voltage.h"
+diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
+index b4e7704..8c031d1 100644
+--- a/arch/arm/mach-omap2/vp44xx_data.c
++++ b/arch/arm/mach-omap2/vp44xx_data.c
+@@ -19,7 +19,7 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+
+-#include <plat/common.h>
++#include "common.h"
+
+ #include "prm44xx.h"
+ #include "prm-regbits-44xx.h"
+diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
+index c519610..e9d9afd 100644
+--- a/arch/arm/mach-orion5x/include/mach/io.h
++++ b/arch/arm/mach-orion5x/include/mach/io.h
+@@ -15,31 +15,6 @@
+
+ #define IO_SPACE_LIMIT 0xffffffff
+
+-static inline void __iomem *
+-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
+-{
+- void __iomem *retval;
+- unsigned long offs = paddr - ORION5X_REGS_PHYS_BASE;
+- if (mtype == MT_DEVICE && size && offs < ORION5X_REGS_SIZE &&
+- size <= ORION5X_REGS_SIZE && offs + size <= ORION5X_REGS_SIZE) {
+- retval = (void __iomem *)ORION5X_REGS_VIRT_BASE + offs;
+- } else {
+- retval = __arm_ioremap(paddr, size, mtype);
+- }
+-
+- return retval;
+-}
+-
+-static inline void
+-__arch_iounmap(void __iomem *addr)
+-{
+- if (addr < (void __iomem *)ORION5X_REGS_VIRT_BASE ||
+- addr >= (void __iomem *)(ORION5X_REGS_VIRT_BASE + ORION5X_REGS_SIZE))
+- __iounmap(addr);
+-}
+-
+-#define __arch_ioremap __arch_ioremap
+-#define __arch_iounmap __arch_iounmap
+ #define __io(a) __typesafe_io(a)
+ #define __mem_pci(a) (a)
+
+diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h
+deleted file mode 100644
+index 06b50ae..0000000
+--- a/arch/arm/mach-orion5x/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,5 +0,0 @@
+-/*
+- * arch/arm/mach-orion5x/include/mach/vmalloc.h
+- */
+-
+-#define VMALLOC_END 0xfd800000UL
+diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
+index 34d0834..ad871bd 100644
+--- a/arch/arm/mach-picoxcell/common.c
++++ b/arch/arm/mach-picoxcell/common.c
+@@ -11,6 +11,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
++#include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+
+ #include <asm/mach/arch.h>
+@@ -33,22 +34,20 @@ static const char *picoxcell_dt_match[] = {
+ };
+
+ static const struct of_device_id vic_of_match[] __initconst = {
+- { .compatible = "arm,pl192-vic" },
++ { .compatible = "arm,pl192-vic", .data = vic_of_init, },
+ { /* Sentinel */ }
+ };
+
+ static void __init picoxcell_init_irq(void)
+ {
+- vic_init(IO_ADDRESS(PICOXCELL_VIC0_BASE), 0, ~0, 0);
+- vic_init(IO_ADDRESS(PICOXCELL_VIC1_BASE), 32, ~0, 0);
+- irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC0_BASE, 0);
+- irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC1_BASE, 32);
++ of_irq_init(vic_of_match);
+ }
+
+ DT_MACHINE_START(PICOXCELL, "Picochip picoXcell")
+ .map_io = picoxcell_map_io,
+ .nr_irqs = ARCH_NR_IRQS,
+ .init_irq = picoxcell_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &picoxcell_timer,
+ .init_machine = picoxcell_init_machine,
+ .dt_compat = picoxcell_dt_match,
+diff --git a/arch/arm/mach-picoxcell/include/mach/entry-macro.S b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
+index a6b09f7..9b505ac 100644
+--- a/arch/arm/mach-picoxcell/include/mach/entry-macro.S
++++ b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
+@@ -9,11 +9,8 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <mach/irqs.h>
+-#include <mach/map.h>
++ .macro disable_fiq
++ .endm
+
+-#define VA_VIC0 IO_ADDRESS(PICOXCELL_VIC0_BASE)
+-#define VA_VIC1 IO_ADDRESS(PICOXCELL_VIC1_BASE)
+-
+-#include <asm/entry-macro-vic2.S>
++ .macro arch_ret_to_user, tmp1, tmp2
++ .endm
+diff --git a/arch/arm/mach-picoxcell/include/mach/vmalloc.h b/arch/arm/mach-picoxcell/include/mach/vmalloc.h
+deleted file mode 100644
+index 0216cc4..0000000
+--- a/arch/arm/mach-picoxcell/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/*
+- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-#define VMALLOC_END 0xfe000000UL
+diff --git a/arch/arm/mach-pnx4008/include/mach/system.h b/arch/arm/mach-pnx4008/include/mach/system.h
+index 5dda2bb..5d6384a 100644
+--- a/arch/arm/mach-pnx4008/include/mach/system.h
++++ b/arch/arm/mach-pnx4008/include/mach/system.h
+@@ -32,7 +32,7 @@ static void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ #endif
+diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+deleted file mode 100644
+index 184913c..0000000
+--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-pnx4008/include/mach/vmalloc.h
+- *
+- * Author: Vitaly Wool <source@mvista.com>
+- *
+- * 2006 (c) MontaVista Software, Inc. This file is licensed under
+- * the terms of the GNU General Public License version 2. This program
+- * is licensed "as is" without any warranty of any kind, whether express
+- * or implied.
+- */
+-
+-/*
+- * Just any arbitrary offset to the start of the vmalloc VM area: the
+- * current 8MB value just means that there will be a 8MB "hole" after the
+- * physical memory until the kernel virtual memory starts. That means that
+- * any out-of-bounds memory accesses will hopefully be caught.
+- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+- * area for the same reason. ;)
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-prima2/include/mach/map.h b/arch/arm/mach-prima2/include/mach/map.h
+index 66b1ae2..6f24353 100644
+--- a/arch/arm/mach-prima2/include/mach/map.h
++++ b/arch/arm/mach-prima2/include/mach/map.h
+@@ -9,8 +9,10 @@
+ #ifndef __MACH_PRIMA2_MAP_H__
+ #define __MACH_PRIMA2_MAP_H__
+
+-#include <mach/vmalloc.h>
++#include <linux/const.h>
+
+-#define SIRFSOC_VA(x) (VMALLOC_END + ((x) & 0x00FFF000))
++#define SIRFSOC_VA_BASE _AC(0xFEC00000, UL)
++
++#define SIRFSOC_VA(x) (SIRFSOC_VA_BASE + ((x) & 0x00FFF000))
+
+ #endif
+diff --git a/arch/arm/mach-prima2/include/mach/vmalloc.h b/arch/arm/mach-prima2/include/mach/vmalloc.h
+deleted file mode 100644
+index c9f90fe..0000000
+--- a/arch/arm/mach-prima2/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-/*
+- * arch/arm/ach-prima2/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2010 – 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+- *
+- * Licensed under GPLv2 or later.
+- */
+-
+-#ifndef __MACH_VMALLOC_H
+-#define __MACH_VMALLOC_H
+-
+-#include <linux/const.h>
+-
+-#define VMALLOC_END _AC(0xFEC00000, UL)
+-
+-#endif
+diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S
+index a73bc86..260c0c1 100644
+--- a/arch/arm/mach-pxa/include/mach/entry-macro.S
++++ b/arch/arm/mach-pxa/include/mach/entry-macro.S
+@@ -7,45 +7,9 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <mach/irqs.h>
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- mrc p15, 0, \tmp, c0, c0, 0 @ CPUID
+- mov \tmp, \tmp, lsr #13
+- and \tmp, \tmp, #0x7 @ Core G
+- cmp \tmp, #1
+- bhi 1002f
+-
+- @ Core Generation 1 (PXA25x)
+- mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000
+- add \base, \base, #0x00d00000
+- ldr \irqstat, [\base, #0] @ ICIP
+- ldr \irqnr, [\base, #4] @ ICMR
+-
+- ands \irqnr, \irqstat, \irqnr
+- beq 1001f
+- rsb \irqstat, \irqnr, #0
+- and \irqstat, \irqstat, \irqnr
+- clz \irqnr, \irqstat
+- rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0))
+- b 1001f
+-1002:
+- @ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx)
+- mrc p6, 0, \irqstat, c5, c0, 0 @ ICHP
+- tst \irqstat, #0x80000000
+- beq 1001f
+- bic \irqstat, \irqstat, #0x80000000
+- mov \irqnr, \irqstat, lsr #16
+- add \irqnr, \irqnr, #(PXA_IRQ(0))
+-1001:
+- .endm
+diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h
+deleted file mode 100644
+index bfecfbf..0000000
+--- a/arch/arm/mach-pxa/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/*
+- * arch/arm/mach-pxa/include/mach/vmalloc.h
+- *
+- * Author: Nicolas Pitre
+- * Copyright: (C) 2001 MontaVista Software Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#define VMALLOC_END (0xe8000000UL)
+diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
+index b938fc2..4f47a76 100644
+--- a/arch/arm/mach-pxa/mioa701.c
++++ b/arch/arm/mach-pxa/mioa701.c
+@@ -752,6 +752,7 @@ static void mioa701_machine_exit(void)
+
+ MACHINE_START(MIOA701, "MIO A701")
+ .atag_offset = 0x100,
++ .restart_mode = 's',
+ .map_io = &pxa27x_map_io,
+ .init_irq = &pxa27x_init_irq,
+ .handle_irq = &pxa27x_handle_irq,
+diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
+index 50c8331..afcb48a 100644
+--- a/arch/arm/mach-pxa/poodle.c
++++ b/arch/arm/mach-pxa/poodle.c
+@@ -420,17 +420,11 @@ static void poodle_poweroff(void)
+ arm_machine_restart('h', NULL);
+ }
+
+-static void poodle_restart(char mode, const char *cmd)
+-{
+- arm_machine_restart('h', cmd);
+-}
+-
+ static void __init poodle_init(void)
+ {
+ int ret = 0;
+
+ pm_power_off = poodle_poweroff;
+- arm_pm_restart = poodle_restart;
+
+ PCFR |= PCFR_OPDE;
+
+diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
+index 01e9d64..b8bcda1 100644
+--- a/arch/arm/mach-pxa/reset.c
++++ b/arch/arm/mach-pxa/reset.c
+@@ -88,7 +88,7 @@ void arch_reset(char mode, const char *cmd)
+ switch (mode) {
+ case 's':
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ break;
+ case 'g':
+ do_gpio_reset();
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index 953a919..2f57d94 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -982,6 +982,7 @@ static void __init spitz_fixup(struct tag *tags, char **cmdline,
+
+ #ifdef CONFIG_MACH_SPITZ
+ MACHINE_START(SPITZ, "SHARP Spitz")
++ .restart_mode = 'g',
+ .fixup = spitz_fixup,
+ .map_io = pxa27x_map_io,
+ .init_irq = pxa27x_init_irq,
+@@ -993,6 +994,7 @@ MACHINE_END
+
+ #ifdef CONFIG_MACH_BORZOI
+ MACHINE_START(BORZOI, "SHARP Borzoi")
++ .restart_mode = 'g',
+ .fixup = spitz_fixup,
+ .map_io = pxa27x_map_io,
+ .init_irq = pxa27x_init_irq,
+@@ -1004,6 +1006,7 @@ MACHINE_END
+
+ #ifdef CONFIG_MACH_AKITA
+ MACHINE_START(AKITA, "SHARP Akita")
++ .restart_mode = 'g',
+ .fixup = spitz_fixup,
+ .map_io = pxa27x_map_io,
+ .init_irq = pxa27x_init_irq,
+diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
+index 402b0c9..ef64530 100644
+--- a/arch/arm/mach-pxa/tosa.c
++++ b/arch/arm/mach-pxa/tosa.c
+@@ -970,6 +970,7 @@ static void __init fixup_tosa(struct tag *tags, char **cmdline,
+ }
+
+ MACHINE_START(TOSA, "SHARP Tosa")
++ .restart_mode = 'g',
+ .fixup = fixup_tosa,
+ .map_io = pxa25x_map_io,
+ .nr_irqs = TOSA_NR_IRQS,
+diff --git a/arch/arm/mach-realview/include/mach/entry-macro.S b/arch/arm/mach-realview/include/mach/entry-macro.S
+index 4071164..e8a5179 100644
+--- a/arch/arm/mach-realview/include/mach/entry-macro.S
++++ b/arch/arm/mach-realview/include/mach/entry-macro.S
+@@ -7,8 +7,6 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <asm/hardware/entry-macro-gic.S>
+
+ .macro disable_fiq
+ .endm
+diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h
+deleted file mode 100644
+index a2a4c68..0000000
+--- a/arch/arm/mach-realview/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/*
+- * arch/arm/mach-realview/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2003 ARM Limited
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xf8000000UL
+diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
+index 026c66a..1ca944a 100644
+--- a/arch/arm/mach-realview/realview_eb.c
++++ b/arch/arm/mach-realview/realview_eb.c
+@@ -91,8 +91,8 @@ static struct map_desc realview_eb_io_desc[] __initdata = {
+
+ static struct map_desc realview_eb11mp_io_desc[] __initdata = {
+ {
+- .virtual = IO_ADDRESS(REALVIEW_EB11MP_GIC_CPU_BASE),
+- .pfn = __phys_to_pfn(REALVIEW_EB11MP_GIC_CPU_BASE),
++ .virtual = IO_ADDRESS(REALVIEW_EB11MP_SCU_BASE),
++ .pfn = __phys_to_pfn(REALVIEW_EB11MP_SCU_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+@@ -469,6 +469,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
+ .init_early = realview_init_early,
+ .init_irq = gic_init_irq,
+ .timer = &realview_eb_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = realview_eb_init,
+ #ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
+index c057540..bd8fec8 100644
+--- a/arch/arm/mach-realview/realview_pb1176.c
++++ b/arch/arm/mach-realview/realview_pb1176.c
+@@ -392,6 +392,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
+ .init_early = realview_init_early,
+ .init_irq = gic_init_irq,
+ .timer = &realview_pb1176_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = realview_pb1176_init,
+ #ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
+index 671ad6d..fa73ba8 100644
+--- a/arch/arm/mach-realview/realview_pb11mp.c
++++ b/arch/arm/mach-realview/realview_pb11mp.c
+@@ -366,6 +366,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
+ .init_early = realview_init_early,
+ .init_irq = gic_init_irq,
+ .timer = &realview_pb11mp_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = realview_pb11mp_init,
+ #ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
+index cbf22df..6e5f2b9 100644
+--- a/arch/arm/mach-realview/realview_pba8.c
++++ b/arch/arm/mach-realview/realview_pba8.c
+@@ -316,6 +316,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
+ .init_early = realview_init_early,
+ .init_irq = gic_init_irq,
+ .timer = &realview_pba8_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = realview_pba8_init,
+ #ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
+index 63c4114..7aabc21 100644
+--- a/arch/arm/mach-realview/realview_pbx.c
++++ b/arch/arm/mach-realview/realview_pbx.c
+@@ -98,8 +98,8 @@ static struct map_desc realview_pbx_io_desc[] __initdata = {
+
+ static struct map_desc realview_local_io_desc[] __initdata = {
+ {
+- .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_GIC_CPU_BASE),
+- .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_GIC_CPU_BASE),
++ .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_SCU_BASE),
++ .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_SCU_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+@@ -399,6 +399,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
+ .init_early = realview_init_early,
+ .init_irq = gic_init_irq,
+ .timer = &realview_pbx_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = realview_pbx_init,
+ #ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+diff --git a/arch/arm/mach-rpc/include/mach/system.h b/arch/arm/mach-rpc/include/mach/system.h
+index 45c7b93..a354f4d 100644
+--- a/arch/arm/mach-rpc/include/mach/system.h
++++ b/arch/arm/mach-rpc/include/mach/system.h
+@@ -23,5 +23,5 @@ static inline void arch_reset(char mode, const char *cmd)
+ /*
+ * Jump into the ROM
+ */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
+deleted file mode 100644
+index fb70022..0000000
+--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * arch/arm/mach-rpc/include/mach/vmalloc.h
+- *
+- * Copyright (C) 1997 Russell King
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#define VMALLOC_END 0xdc000000UL
+diff --git a/arch/arm/mach-s3c2410/include/mach/system-reset.h b/arch/arm/mach-s3c2410/include/mach/system-reset.h
+index 6faadce..913893d 100644
+--- a/arch/arm/mach-s3c2410/include/mach/system-reset.h
++++ b/arch/arm/mach-s3c2410/include/mach/system-reset.h
+@@ -19,7 +19,7 @@ static void
+ arch_reset(char mode, const char *cmd)
+ {
+ if (mode == 's') {
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ if (s3c24xx_reset_hook)
+@@ -28,5 +28,5 @@ arch_reset(char mode, const char *cmd)
+ arch_wdt_reset();
+
+ /* we'll take a jump through zero as a poor second */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+deleted file mode 100644
+index 7a311e8..0000000
+--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* arch/arm/mach-s3c2410/include/mach/vmalloc.h
+- *
+- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+- * http://www.simtec.co.uk/products/SWLINUX/
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * S3C2410 vmalloc definition
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
+index dd36260..dc2bc15 100644
+--- a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
+@@ -12,7 +12,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <mach/map.h>
+-#include <mach/irqs.h>
++ .macro disable_fiq
++ .endm
+
+-#include <asm/entry-macro-vic2.S>
++ .macro arch_ret_to_user, tmp1, tmp2
++ .endm
+diff --git a/arch/arm/mach-s3c64xx/include/mach/system.h b/arch/arm/mach-s3c64xx/include/mach/system.h
+index 2e58cb7..d8ca578 100644
+--- a/arch/arm/mach-s3c64xx/include/mach/system.h
++++ b/arch/arm/mach-s3c64xx/include/mach/system.h
+@@ -24,7 +24,7 @@ static void arch_reset(char mode, const char *cmd)
+ arch_wdt_reset();
+
+ /* if all else fails, or mode was for soft, jump to 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ #endif /* __ASM_ARCH_IRQ_H */
+diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 23f75e5..0000000
+--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+- *
+- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+- * http://www.simtec.co.uk/products/SWLINUX/
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * S3C6400 vmalloc definition
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
+index 8eba88e..2bbc14d 100644
+--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
++++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
+@@ -30,6 +30,7 @@
+
+ #include <video/platform_lcd.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/irq.h>
+@@ -236,6 +237,7 @@ MACHINE_START(ANW6410, "A&W6410")
+ .atag_offset = 0x100,
+
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = anw6410_map_io,
+ .init_machine = anw6410_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index d04b654..988ac2e 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -37,6 +37,7 @@
+ #include <linux/mfd/wm831x/irq.h>
+ #include <linux/mfd/wm831x/gpio.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+
+@@ -711,6 +712,7 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
+ /* Maintainer: Mark Brown <broonie@opensource.wolfsonmicro.com> */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = crag6410_map_io,
+ .init_machine = crag6410_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
+index 952f75f..c5955f3 100644
+--- a/arch/arm/mach-s3c64xx/mach-hmt.c
++++ b/arch/arm/mach-s3c64xx/mach-hmt.c
+@@ -29,6 +29,7 @@
+ #include <mach/hardware.h>
+ #include <mach/map.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/irq.h>
+ #include <asm/mach-types.h>
+
+@@ -267,6 +268,7 @@ MACHINE_START(HMT, "Airgoo-HMT")
+ /* Maintainer: Peter Korsgaard <jacmet@sunsite.dk> */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = hmt_map_io,
+ .init_machine = hmt_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
+index 1bc85c3..4415c85 100644
+--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
++++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
+@@ -24,6 +24,7 @@
+ #include <linux/serial_core.h>
+ #include <linux/types.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+@@ -345,6 +346,7 @@ MACHINE_START(MINI6410, "MINI6410")
+ /* Maintainer: Darius Augulis <augulis.darius@gmail.com> */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = mini6410_map_io,
+ .init_machine = mini6410_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
+index cb13cba..9b2c610 100644
+--- a/arch/arm/mach-s3c64xx/mach-ncp.c
++++ b/arch/arm/mach-s3c64xx/mach-ncp.c
+@@ -25,6 +25,7 @@
+
+ #include <video/platform_lcd.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/irq.h>
+@@ -99,6 +100,7 @@ MACHINE_START(NCP, "NCP")
+ /* Maintainer: Samsung Electronics */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = ncp_map_io,
+ .init_machine = ncp_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
+index 87281e4..dbab49f 100644
+--- a/arch/arm/mach-s3c64xx/mach-real6410.c
++++ b/arch/arm/mach-s3c64xx/mach-real6410.c
+@@ -25,6 +25,7 @@
+ #include <linux/serial_core.h>
+ #include <linux/types.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+@@ -326,6 +327,7 @@ MACHINE_START(REAL6410, "REAL6410")
+ .atag_offset = 0x100,
+
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = real6410_map_io,
+ .init_machine = real6410_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
+index 94c831d..0539452 100644
+--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
++++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
+@@ -17,6 +17,7 @@
+ #include <linux/leds.h>
+ #include <linux/platform_device.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -148,6 +149,7 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
+ /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smartq_map_io,
+ .init_machine = smartq5_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
+index f112547..a58d1ba 100644
+--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
++++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
+@@ -17,6 +17,7 @@
+ #include <linux/leds.h>
+ #include <linux/platform_device.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -164,6 +165,7 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
+ /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
+ .atag_offset = 0x100,
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smartq_map_io,
+ .init_machine = smartq7_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
+index 73450c2..be28a59 100644
+--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
++++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
+@@ -22,6 +22,7 @@
+
+ #include <asm/mach-types.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/irq.h>
+@@ -88,6 +89,7 @@ MACHINE_START(SMDK6400, "SMDK6400")
+ .atag_offset = 0x100,
+
+ .init_irq = s3c6400_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdk6400_map_io,
+ .init_machine = smdk6400_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+index 8bc8edd..0830915 100644
+--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
++++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+@@ -43,6 +43,7 @@
+
+ #include <video/platform_lcd.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/mach/irq.h>
+@@ -700,6 +701,7 @@ MACHINE_START(SMDK6410, "SMDK6410")
+ .atag_offset = 0x100,
+
+ .init_irq = s3c6410_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdk6410_map_io,
+ .init_machine = smdk6410_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
+index 10b62b4..fbb246d 100644
+--- a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
++++ b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
+@@ -10,7 +10,8 @@
+ * published by the Free Software Foundation.
+ */
+
+-#include <mach/map.h>
+-#include <plat/irqs.h>
++ .macro disable_fiq
++ .endm
+
+-#include <asm/entry-macro-vic2.S>
++ .macro arch_ret_to_user, tmp1, tmp2
++ .endm
+diff --git a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
+deleted file mode 100644
+index 38dcc71..0000000
+--- a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* linux/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+- * http://www.samsung.com
+- *
+- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * S3C6400 vmalloc definition
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
+index 4a1250c..c272c3f 100644
+--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
++++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
+@@ -27,6 +27,7 @@
+
+ #include <video/platform_lcd.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/irq.h>
+@@ -242,6 +243,7 @@ MACHINE_START(SMDK6440, "SMDK6440")
+ .atag_offset = 0x100,
+
+ .init_irq = s5p6440_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdk6440_map_io,
+ .init_machine = smdk6440_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
+index 0ab129e..7a47009 100644
+--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
++++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
+@@ -27,6 +27,7 @@
+
+ #include <video/platform_lcd.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/irq.h>
+@@ -262,6 +263,7 @@ MACHINE_START(SMDK6450, "SMDK6450")
+ .atag_offset = 0x100,
+
+ .init_irq = s5p6450_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdk6450_map_io,
+ .init_machine = smdk6450_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5pc100/include/mach/entry-macro.S b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
+index ba76af0..b8c242e 100644
+--- a/arch/arm/mach-s5pc100/include/mach/entry-macro.S
++++ b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
+@@ -12,39 +12,14 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <asm/hardware/vic.h>
+-#include <mach/map.h>
+-#include <plat/irqs.h>
+-
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+- ldr \base, =VA_VIC0
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- @ check the vic0
+- mov \irqnr, # S5P_IRQ_OFFSET + 31
+- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
+- teq \irqstat, #0
+-
+- @ otherwise try vic1
+- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
+- addeq \irqnr, \irqnr, #32
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- @ otherwise try vic2
+- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
+- addeq \irqnr, \irqnr, #32
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- clzne \irqstat, \irqstat
+- subne \irqnr, \irqnr, \irqstat
+ .endm
+diff --git a/arch/arm/mach-s5pc100/include/mach/vmalloc.h b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
+deleted file mode 100644
+index 44c8e57..0000000
+--- a/arch/arm/mach-s5pc100/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/* arch/arm/mach-s5pc100/include/mach/vmalloc.h
+- *
+- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * S3C6400 vmalloc definition
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
+index 26f5c91..93ebe3a 100644
+--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
++++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
+@@ -25,6 +25,7 @@
+ #include <linux/input.h>
+ #include <linux/pwm_backlight.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+
+@@ -250,6 +251,7 @@ MACHINE_START(SMDKC100, "SMDKC100")
+ /* Maintainer: Byungho Min <bhmin@samsung.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pc100_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdkc100_map_io,
+ .init_machine = smdkc100_machine_init,
+ .timer = &s3c24xx_timer,
+diff --git a/arch/arm/mach-s5pv210/include/mach/entry-macro.S b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
+index 3aa41ac..bebca1b 100644
+--- a/arch/arm/mach-s5pv210/include/mach/entry-macro.S
++++ b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
+@@ -10,45 +10,8 @@
+ * published by the Free Software Foundation.
+ */
+
+-#include <asm/hardware/vic.h>
+-#include <mach/map.h>
+-#include <plat/irqs.h>
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =VA_VIC0
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+-
+- @ check the vic0
+- mov \irqnr, # S5P_IRQ_OFFSET + 31
+- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
+- teq \irqstat, #0
+-
+- @ otherwise try vic1
+- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
+- addeq \irqnr, \irqnr, #32
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- @ otherwise try vic2
+- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
+- addeq \irqnr, \irqnr, #32
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- @ otherwise try vic3
+- addeq \tmp, \base, #(VA_VIC3 - VA_VIC0)
+- addeq \irqnr, \irqnr, #32
+- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+- teqeq \irqstat, #0
+-
+- clzne \irqstat, \irqstat
+- subne \irqnr, \irqnr, \irqstat
+- .endm
+diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+deleted file mode 100644
+index a6c659d..0000000
+--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/* linux/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+- *
+- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+- *
+- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+- * http://www.samsung.com/
+- *
+- * Based on arch/arm/mach-s5p6442/include/mach/vmalloc.h
+- *
+- * S5PV210 vmalloc definition
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+-*/
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H __FILE__
+-
+-#define VMALLOC_END 0xF6000000UL
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
+index 5811a96..71ca956 100644
+--- a/arch/arm/mach-s5pv210/mach-aquila.c
++++ b/arch/arm/mach-s5pv210/mach-aquila.c
+@@ -22,6 +22,7 @@
+ #include <linux/input.h>
+ #include <linux/gpio.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/setup.h>
+@@ -680,6 +681,7 @@ MACHINE_START(AQUILA, "Aquila")
+ Kyungmin Park <kyungmin.park@samsung.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pv210_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = aquila_map_io,
+ .init_machine = aquila_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
+index 15edcae..448fd9e 100644
+--- a/arch/arm/mach-s5pv210/mach-goni.c
++++ b/arch/arm/mach-s5pv210/mach-goni.c
+@@ -27,6 +27,7 @@
+ #include <linux/gpio.h>
+ #include <linux/interrupt.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/setup.h>
+@@ -956,6 +957,7 @@ MACHINE_START(GONI, "GONI")
+ /* Maintainers: Kyungmin Park <kyungmin.park@samsung.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pv210_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = goni_map_io,
+ .init_machine = goni_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
+index f7266bb..c2531ff 100644
+--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
++++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
+@@ -15,6 +15,7 @@
+ #include <linux/i2c.h>
+ #include <linux/sysdev.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/setup.h>
+@@ -138,6 +139,7 @@ MACHINE_START(SMDKC110, "SMDKC110")
+ /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pv210_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdkc110_map_io,
+ .init_machine = smdkc110_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
+index 8662ef6..3ac9e57 100644
+--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
++++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
+@@ -20,6 +20,7 @@
+ #include <linux/delay.h>
+ #include <linux/pwm_backlight.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/setup.h>
+@@ -316,6 +317,7 @@ MACHINE_START(SMDKV210, "SMDKV210")
+ /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pv210_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = smdkv210_map_io,
+ .init_machine = smdkv210_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-s5pv210/mach-torbreck.c b/arch/arm/mach-s5pv210/mach-torbreck.c
+index 97cc066..df70fcb 100644
+--- a/arch/arm/mach-s5pv210/mach-torbreck.c
++++ b/arch/arm/mach-s5pv210/mach-torbreck.c
+@@ -14,6 +14,7 @@
+ #include <linux/init.h>
+ #include <linux/serial_core.h>
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/setup.h>
+@@ -127,6 +128,7 @@ MACHINE_START(TORBRECK, "TORBRECK")
+ /* Maintainer: Hyunchul Ko <ghcstop@gmail.com> */
+ .atag_offset = 0x100,
+ .init_irq = s5pv210_init_irq,
++ .handle_irq = vic_handle_irq,
+ .map_io = torbreck_map_io,
+ .init_machine = torbreck_machine_init,
+ .timer = &s5p_timer,
+diff --git a/arch/arm/mach-sa1100/include/mach/system.h b/arch/arm/mach-sa1100/include/mach/system.h
+index ba9da9f..345d35b 100644
+--- a/arch/arm/mach-sa1100/include/mach/system.h
++++ b/arch/arm/mach-sa1100/include/mach/system.h
+@@ -14,7 +14,7 @@ static inline void arch_reset(char mode, const char *cmd)
+ {
+ if (mode == 's') {
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ } else {
+ /* Use on-chip reset capability */
+ RSRR = RSRR_SWR;
+diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h
+deleted file mode 100644
+index b3d0023..0000000
+--- a/arch/arm/mach-sa1100/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,4 +0,0 @@
+-/*
+- * arch/arm/mach-sa1100/include/mach/vmalloc.h
+- */
+-#define VMALLOC_END (0xe8000000UL)
+diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
+index feda3ca..f4b25d8 100644
+--- a/arch/arm/mach-shark/core.c
++++ b/arch/arm/mach-shark/core.c
+@@ -29,7 +29,6 @@
+ void arch_reset(char mode, const char *cmd)
+ {
+ short temp;
+- local_irq_disable();
+ /* Reset the Machine via pc[3] of the sequoia chipset */
+ outw(0x09,0x24);
+ temp=inw(0x26);
+diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
+deleted file mode 100644
+index b10df98..0000000
+--- a/arch/arm/mach-shark/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,4 +0,0 @@
+-/*
+- * arch/arm/mach-shark/include/mach/vmalloc.h
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
+index 737bdc6..5ca1f9d 100644
+--- a/arch/arm/mach-shmobile/Makefile
++++ b/arch/arm/mach-shmobile/Makefile
+@@ -28,7 +28,6 @@ pfc-$(CONFIG_ARCH_SH73A0) += pfc-sh73a0.o
+ obj-$(CONFIG_ARCH_SH7367) += entry-intc.o
+ obj-$(CONFIG_ARCH_SH7377) += entry-intc.o
+ obj-$(CONFIG_ARCH_SH7372) += entry-intc.o
+-obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o
+
+ # PM objects
+ obj-$(CONFIG_SUSPEND) += suspend.o
+diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
+index 7119b87..f71fa3c 100644
+--- a/arch/arm/mach-shmobile/board-ag5evm.c
++++ b/arch/arm/mach-shmobile/board-ag5evm.c
+@@ -609,7 +609,7 @@ MACHINE_START(AG5EVM, "ag5evm")
+ .map_io = ag5evm_map_io,
+ .nr_irqs = NR_IRQS_LEGACY,
+ .init_irq = sh73a0_init_irq,
+- .handle_irq = shmobile_handle_irq_gic,
++ .handle_irq = gic_handle_irq,
+ .init_machine = ag5evm_init,
+ .timer = &ag5evm_timer,
+ MACHINE_END
+diff --git a/arch/arm/mach-shmobile/entry-gic.S b/arch/arm/mach-shmobile/entry-gic.S
+deleted file mode 100644
+index e20239b..0000000
+--- a/arch/arm/mach-shmobile/entry-gic.S
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/*
+- * ARM Interrupt demux handler using GIC
+- *
+- * Copyright (C) 2010 Magnus Damm
+- * Copyright (C) 2011 Paul Mundt
+- * Copyright (C) 2010 - 2011 Renesas Solutions Corp.
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <asm/assembler.h>
+-#include <asm/entry-macro-multi.S>
+-#include <asm/hardware/gic.h>
+-#include <asm/hardware/entry-macro-gic.S>
+-
+- arch_irq_handler shmobile_handle_irq_gic
+diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
+index 834bd6c..4bf82c1 100644
+--- a/arch/arm/mach-shmobile/include/mach/common.h
++++ b/arch/arm/mach-shmobile/include/mach/common.h
+@@ -7,7 +7,6 @@ extern void shmobile_secondary_vector(void);
+ struct clk;
+ extern int clk_init(void);
+ extern void shmobile_handle_irq_intc(struct pt_regs *);
+-extern void shmobile_handle_irq_gic(struct pt_regs *);
+ extern struct platform_suspend_ops shmobile_suspend_ops;
+ struct cpuidle_driver;
+ extern void (*shmobile_cpuidle_modes[])(void);
+diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
+index 8d4a416..2a57b29 100644
+--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
++++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
+@@ -18,14 +18,5 @@
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- .endm
+-
+- .macro test_for_ipi, irqnr, irqstat, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+diff --git a/arch/arm/mach-shmobile/include/mach/system.h b/arch/arm/mach-shmobile/include/mach/system.h
+index 76a687e..956ac18 100644
+--- a/arch/arm/mach-shmobile/include/mach/system.h
++++ b/arch/arm/mach-shmobile/include/mach/system.h
+@@ -8,7 +8,7 @@ static inline void arch_idle(void)
+
+ static inline void arch_reset(char mode, const char *cmd)
+ {
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ #endif
+diff --git a/arch/arm/mach-shmobile/include/mach/vmalloc.h b/arch/arm/mach-shmobile/include/mach/vmalloc.h
+deleted file mode 100644
+index 2b8fd8b..0000000
+--- a/arch/arm/mach-shmobile/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-#ifndef __ASM_MACH_VMALLOC_H
+-#define __ASM_MACH_VMALLOC_H
+-
+-/* Vmalloc at ... - 0xe5ffffff */
+-#define VMALLOC_END 0xe6000000UL
+-
+-#endif /* __ASM_MACH_VMALLOC_H */
+diff --git a/arch/arm/mach-spear3xx/include/mach/entry-macro.S b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
+index 53da422..de3bb41 100644
+--- a/arch/arm/mach-spear3xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
+@@ -11,35 +11,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <asm/hardware/vic.h>
+-#include <mach/hardware.h>
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \base, =VA_SPEAR3XX_ML1_VIC_BASE
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+- teq \irqstat, #0
+- beq 1001f @ this will set/reset
+- @ zero register
+- /*
+- * Following code will find bit position of least significang
+- * bit set in irqstat, using following equation
+- * least significant bit set in n = (n & ~(n-1))
+- */
+- sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+- mvn \tmp, \tmp @ tmp = ~tmp
+- and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+- /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+- clz \tmp, \irqstat @ tmp = leading zeros
+- rsb \irqnr, \tmp, #0x1F @ irqnr = 32 - tmp - 1
+-
+-1001: /* EQ will be set if no irqs pending */
+- .endm
+diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
+deleted file mode 100644
+index df977b3..0000000
+--- a/arch/arm/mach-spear3xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * arch/arm/mach-spear3xx/include/mach/vmalloc.h
+- *
+- * Defining Vmalloc area for SPEAr3xx machine family
+- *
+- * Copyright (C) 2009 ST Microelectronics
+- * Viresh Kumar<viresh.kumar@st.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#ifndef __MACH_VMALLOC_H
+-#define __MACH_VMALLOC_H
+-
+-#include <plat/vmalloc.h>
+-
+-#endif /* __MACH_VMALLOC_H */
+diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
+index a5ff98e..61068ba 100644
+--- a/arch/arm/mach-spear3xx/spear300_evb.c
++++ b/arch/arm/mach-spear3xx/spear300_evb.c
+@@ -11,6 +11,7 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+ #include <mach/generic.h>
+@@ -67,6 +68,7 @@ MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
+ .atag_offset = 0x100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &spear3xx_timer,
+ .init_machine = spear300_evb_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
+index 45d180d..7903abe 100644
+--- a/arch/arm/mach-spear3xx/spear310_evb.c
++++ b/arch/arm/mach-spear3xx/spear310_evb.c
+@@ -11,6 +11,7 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+ #include <mach/generic.h>
+@@ -73,6 +74,7 @@ MACHINE_START(SPEAR310, "ST-SPEAR310-EVB")
+ .atag_offset = 0x100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &spear3xx_timer,
+ .init_machine = spear310_evb_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
+index 2287984..e9751f9 100644
+--- a/arch/arm/mach-spear3xx/spear320_evb.c
++++ b/arch/arm/mach-spear3xx/spear320_evb.c
+@@ -11,6 +11,7 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+ #include <mach/generic.h>
+@@ -71,6 +72,7 @@ MACHINE_START(SPEAR320, "ST-SPEAR320-EVB")
+ .atag_offset = 0x100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &spear3xx_timer,
+ .init_machine = spear320_evb_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-spear6xx/include/mach/entry-macro.S b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
+index 8a0b0ed..d490a91 100644
+--- a/arch/arm/mach-spear6xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
+@@ -11,44 +11,8 @@
+ * warranty of any kind, whether express or implied.
+ */
+
+-#include <asm/hardware/vic.h>
+-#include <mach/hardware.h>
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \base, =VA_SPEAR6XX_CPU_VIC_PRI_BASE
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+- mov \irqnr, #0
+- teq \irqstat, #0
+- bne 1001f
+- ldr \base, =VA_SPEAR6XX_CPU_VIC_SEC_BASE
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+- teq \irqstat, #0
+- beq 1002f @ this will set/reset
+- @ zero register
+- mov \irqnr, #32
+-1001:
+- /*
+- * Following code will find bit position of least significang
+- * bit set in irqstat, using following equation
+- * least significant bit set in n = (n & ~(n-1))
+- */
+- sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+- mvn \tmp, \tmp @ tmp = ~tmp
+- and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+- /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+- clz \tmp, \irqstat @ tmp = leading zeros
+-
+- rsb \tmp, \tmp, #0x1F @ tmp = 32 - tmp - 1
+- add \irqnr, \irqnr, \tmp
+-
+-1002: /* EQ will be set if no irqs pending */
+- .endm
+diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
+deleted file mode 100644
+index 4a0b56c..0000000
+--- a/arch/arm/mach-spear6xx/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * arch/arm/mach-spear6xx/include/mach/vmalloc.h
+- *
+- * Defining Vmalloc area for SPEAr6xx machine family
+- *
+- * Copyright (C) 2009 ST Microelectronics
+- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#ifndef __MACH_VMALLOC_H
+-#define __MACH_VMALLOC_H
+-
+-#include <plat/vmalloc.h>
+-
+-#endif /* __MACH_VMALLOC_H */
+diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
+index 8238fe3..ff139ed 100644
+--- a/arch/arm/mach-spear6xx/spear600_evb.c
++++ b/arch/arm/mach-spear6xx/spear600_evb.c
+@@ -11,6 +11,7 @@
+ * warranty of any kind, whether express or implied.
+ */
+
++#include <asm/hardware/vic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+ #include <mach/generic.h>
+@@ -46,6 +47,7 @@ MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
+ .atag_offset = 0x100,
+ .map_io = spear6xx_map_io,
+ .init_irq = spear6xx_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &spear6xx_timer,
+ .init_machine = spear600_evb_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt.c
+index 74743ad..f6f03ce 100644
+--- a/arch/arm/mach-tegra/board-dt.c
++++ b/arch/arm/mach-tegra/board-dt.c
+@@ -32,6 +32,7 @@
+ #include <linux/i2c.h>
+ #include <linux/i2c-tegra.h>
+
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/time.h>
+@@ -130,6 +131,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra (Flattened Device Tree)")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_dt_init,
+ .dt_compat = tegra_dt_board_compat,
+diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
+index f0bdc5e..fd190a8 100644
+--- a/arch/arm/mach-tegra/board-harmony.c
++++ b/arch/arm/mach-tegra/board-harmony.c
+@@ -31,6 +31,7 @@
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/time.h>
++#include <asm/hardware/gic.h>
+ #include <asm/setup.h>
+
+ #include <mach/tegra_wm8903_pdata.h>
+@@ -187,6 +188,7 @@ MACHINE_START(HARMONY, "harmony")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_harmony_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
+index 55c55ba..0b7e1cf 100644
+--- a/arch/arm/mach-tegra/board-paz00.c
++++ b/arch/arm/mach-tegra/board-paz00.c
+@@ -29,6 +29,7 @@
+ #include <linux/gpio.h>
+ #include <linux/rfkill-gpio.h>
+
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/time.h>
+@@ -190,6 +191,7 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_paz00_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
+index bf13ea3..7328379 100644
+--- a/arch/arm/mach-tegra/board-seaboard.c
++++ b/arch/arm/mach-tegra/board-seaboard.c
+@@ -34,6 +34,7 @@
+
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+
+ #include "board.h"
+ #include "board-seaboard.h"
+@@ -284,6 +285,7 @@ MACHINE_START(SEABOARD, "seaboard")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_seaboard_init,
+ MACHINE_END
+@@ -293,6 +295,7 @@ MACHINE_START(KAEN, "kaen")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_kaen_init,
+ MACHINE_END
+@@ -302,6 +305,7 @@ MACHINE_START(WARIO, "wario")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_wario_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
+index 1a6617b..60a36a2 100644
+--- a/arch/arm/mach-tegra/board-trimslice.c
++++ b/arch/arm/mach-tegra/board-trimslice.c
+@@ -26,6 +26,7 @@
+ #include <linux/i2c.h>
+ #include <linux/gpio.h>
+
++#include <asm/hardware/gic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/setup.h>
+@@ -176,6 +177,7 @@ MACHINE_START(TRIMSLICE, "trimslice")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra_init_early,
+ .init_irq = tegra_init_irq,
++ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra_trimslice_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-tegra/include/mach/entry-macro.S b/arch/arm/mach-tegra/include/mach/entry-macro.S
+index dd165c5..ac11262 100644
+--- a/arch/arm/mach-tegra/include/mach/entry-macro.S
++++ b/arch/arm/mach-tegra/include/mach/entry-macro.S
+@@ -12,30 +12,15 @@
+ * GNU General Public License for more details.
+ *
+ */
+-#include <mach/iomap.h>
+-#include <mach/io.h>
+-
+-#if defined(CONFIG_ARM_GIC)
+-#define HAVE_GET_IRQNR_PREAMBLE
+-#include <asm/hardware/entry-macro-gic.S>
+-
+- /* Uses the GIC interrupt controller built into the cpu */
+-#define ICTRL_BASE (IO_CPU_VIRT + 0x100)
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- movw \base, #(ICTRL_BASE & 0x0000ffff)
+- movt \base, #((ICTRL_BASE & 0xffff0000) >> 16)
++ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+- .macro arch_ret_to_user, tmp1, tmp2
+- .endm
+-#else
++#if !defined(CONFIG_ARM_GIC)
+ /* legacy interrupt controller for AP16 */
+- .macro disable_fiq
+- .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ @ enable imprecise aborts
+@@ -46,9 +31,6 @@
+ orr \base, #0x0000f000
+ .endm
+
+- .macro arch_ret_to_user, tmp1, tmp2
+- .endm
+-
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqnr, [\base, #0x20] @ EVT_IRQ_STS
+ cmp \irqnr, #0x80
+diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
+index 35a011f..f15deff 100644
+--- a/arch/arm/mach-tegra/include/mach/io.h
++++ b/arch/arm/mach-tegra/include/mach/io.h
+@@ -71,12 +71,6 @@
+
+ #ifndef __ASSEMBLER__
+
+-#define __arch_ioremap tegra_ioremap
+-#define __arch_iounmap tegra_iounmap
+-
+-void __iomem *tegra_ioremap(unsigned long phys, size_t size, unsigned int type);
+-void tegra_iounmap(volatile void __iomem *addr);
+-
+ #define IO_ADDRESS(n) (IO_TO_VIRT(n))
+
+ #ifdef CONFIG_TEGRA_PCI
+diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/arch/arm/mach-tegra/include/mach/vmalloc.h
+deleted file mode 100644
+index fd6aa65..0000000
+--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,28 +0,0 @@
+-/*
+- * arch/arm/mach-tegra/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2010 Google, Inc.
+- *
+- * Author:
+- * Colin Cross <ccross@google.com>
+- * Erik Gilling <konkers@google.com>
+- *
+- * This software is licensed under the terms of the GNU General Public
+- * License version 2, as published by the Free Software Foundation, and
+- * may be copied, distributed, and modified under those terms.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- */
+-
+-#ifndef __MACH_TEGRA_VMALLOC_H
+-#define __MACH_TEGRA_VMALLOC_H
+-
+-#include <asm/sizes.h>
+-
+-#define VMALLOC_END 0xFE000000UL
+-
+-#endif
+diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
+index 5489f8b..d23ee2d 100644
+--- a/arch/arm/mach-tegra/io.c
++++ b/arch/arm/mach-tegra/io.c
+@@ -60,24 +60,3 @@ void __init tegra_map_common_io(void)
+ {
+ iotable_init(tegra_io_desc, ARRAY_SIZE(tegra_io_desc));
+ }
+-
+-/*
+- * Intercept ioremap() requests for addresses in our fixed mapping regions.
+- */
+-void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type)
+-{
+- void __iomem *v = IO_ADDRESS(p);
+- if (v == NULL)
+- v = __arm_ioremap(p, size, type);
+- return v;
+-}
+-EXPORT_SYMBOL(tegra_ioremap);
+-
+-void tegra_iounmap(volatile void __iomem *addr)
+-{
+- unsigned long virt = (unsigned long)addr;
+-
+- if (virt >= VMALLOC_START && virt < VMALLOC_END)
+- __iounmap(addr);
+-}
+-EXPORT_SYMBOL(tegra_iounmap);
+diff --git a/arch/arm/mach-u300/include/mach/entry-macro.S b/arch/arm/mach-u300/include/mach/entry-macro.S
+index 20731ae..7181d6a 100644
+--- a/arch/arm/mach-u300/include/mach/entry-macro.S
++++ b/arch/arm/mach-u300/include/mach/entry-macro.S
+@@ -8,33 +8,9 @@
+ * Low-level IRQ helper macros for ST-Ericsson U300
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+-#include <mach/hardware.h>
+-#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON0_BASE
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
+- mov \irqnr, #0
+- teq \irqstat, #0
+- bne 1002f
+-1001: ldr \base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON1_BASE
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
+- mov \irqnr, #32
+- teq \irqstat, #0
+- beq 1003f
+-1002: tst \irqstat, #1
+- bne 1003f
+- add \irqnr, \irqnr, #1
+- movs \irqstat, \irqstat, lsr #1
+- bne 1002b
+-1003: /* EQ will be set if no irqs pending */
+- .endm
+diff --git a/arch/arm/mach-u300/include/mach/system.h b/arch/arm/mach-u300/include/mach/system.h
+index 8daf136..6b6fef7 100644
+--- a/arch/arm/mach-u300/include/mach/system.h
++++ b/arch/arm/mach-u300/include/mach/system.h
+@@ -27,8 +27,6 @@ static void arch_reset(char mode, const char *cmd)
+ case 's':
+ case 'h':
+ printk(KERN_CRIT "RESET: shutting down/rebooting system\n");
+- /* Disable interrupts */
+- local_irq_disable();
+ #ifdef CONFIG_COH901327_WATCHDOG
+ coh901327_watchdog_reset();
+ #endif
+diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h
+deleted file mode 100644
+index ec423b9..0000000
+--- a/arch/arm/mach-u300/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/*
+- *
+- * arch/arm/mach-u300/include/mach/vmalloc.h
+- *
+- *
+- * Copyright (C) 2006-2009 ST-Ericsson AB
+- * License terms: GNU General Public License (GPL) version 2
+- * Virtual memory allocations
+- * End must be above the I/O registers and on an even 2MiB boundary.
+- * Author: Linus Walleij <linus.walleij@stericsson.com>
+- */
+-#define VMALLOC_END 0xfe800000UL
+diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c
+index 89422ee..4a4fd33 100644
+--- a/arch/arm/mach-u300/u300.c
++++ b/arch/arm/mach-u300/u300.c
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <mach/hardware.h>
+ #include <mach/platform.h>
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+ #include <asm/memory.h>
+@@ -49,6 +50,7 @@ MACHINE_START(U300, MACH_U300_STRING)
+ .atag_offset = BOOT_PARAMS_OFFSET,
+ .map_io = u300_map_io,
+ .init_irq = u300_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &u300_timer,
+ .init_machine = u300_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
+index bdd7b80..de1f5f8 100644
+--- a/arch/arm/mach-ux500/board-mop500.c
++++ b/arch/arm/mach-ux500/board-mop500.c
+@@ -33,6 +33,7 @@
+ #include <linux/leds.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
++#include <asm/hardware/gic.h>
+
+ #include <plat/i2c.h>
+ #include <plat/ste_dma40.h>
+@@ -695,6 +696,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
+ .init_irq = ux500_init_irq,
+ /* we re-use nomadik timer here */
+ .timer = &ux500_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = mop500_init_machine,
+ MACHINE_END
+
+@@ -703,6 +705,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
+ .map_io = u8500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = hrefv60_init_machine,
+ MACHINE_END
+
+@@ -712,5 +715,6 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
+ .init_irq = ux500_init_irq,
+ /* we re-use nomadik timer here */
+ .timer = &ux500_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = snowball_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
+index 82025ba..fe1569b 100644
+--- a/arch/arm/mach-ux500/board-u5500.c
++++ b/arch/arm/mach-ux500/board-u5500.c
+@@ -12,6 +12,7 @@
+ #include <linux/i2c.h>
+ #include <linux/mfd/ab5500/ab5500.h>
+
++#include <asm/hardware/gic.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+
+@@ -149,5 +150,6 @@ MACHINE_START(U5500, "ST-Ericsson U5500 Platform")
+ .map_io = u5500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = u5500_init_machine,
+ MACHINE_END
+diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
+index 071bba9..e16299e 100644
+--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
++++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
+@@ -10,8 +10,6 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <asm/hardware/entry-macro-gic.S>
+
+ .macro disable_fiq
+ .endm
+diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h
+deleted file mode 100644
+index a4945cb..0000000
+--- a/arch/arm/mach-ux500/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/*
+- * Copyright (C) 2009 ST-Ericsson
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xf0000000UL
+diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
+index e340a54..4d8dfc1 100644
+--- a/arch/arm/mach-versatile/core.c
++++ b/arch/arm/mach-versatile/core.c
+@@ -141,11 +141,6 @@ static struct map_desc versatile_io_desc[] __initdata = {
+ },
+ #ifdef CONFIG_MACH_VERSATILE_AB
+ {
+- .virtual = IO_ADDRESS(VERSATILE_GPIO0_BASE),
+- .pfn = __phys_to_pfn(VERSATILE_GPIO0_BASE),
+- .length = SZ_4K,
+- .type = MT_DEVICE
+- }, {
+ .virtual = IO_ADDRESS(VERSATILE_IB2_BASE),
+ .pfn = __phys_to_pfn(VERSATILE_IB2_BASE),
+ .length = SZ_64M,
+diff --git a/arch/arm/mach-versatile/include/mach/entry-macro.S b/arch/arm/mach-versatile/include/mach/entry-macro.S
+index e6f7c16..b6f0dbf 100644
+--- a/arch/arm/mach-versatile/include/mach/entry-macro.S
++++ b/arch/arm/mach-versatile/include/mach/entry-macro.S
+@@ -7,39 +7,9 @@
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+-#include <mach/hardware.h>
+-#include <mach/platform.h>
+-#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- ldr \base, =IO_ADDRESS(VERSATILE_VIC_BASE)
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
+- mov \irqnr, #0
+- teq \irqstat, #0
+- beq 1003f
+-
+-1001: tst \irqstat, #15
+- bne 1002f
+- add \irqnr, \irqnr, #4
+- movs \irqstat, \irqstat, lsr #4
+- bne 1001b
+-1002: tst \irqstat, #1
+- bne 1003f
+- add \irqnr, \irqnr, #1
+- movs \irqstat, \irqstat, lsr #1
+- bne 1002b
+-1003: /* EQ will be set if no irqs pending */
+-
+-@ clz \irqnr, \irqstat
+-@1003: /* EQ will be set if we reach MAXIRQNUM */
+- .endm
+-
+diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
+deleted file mode 100644
+index 7d8e069..0000000
+--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/*
+- * arch/arm/mach-versatile/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2003 ARM Limited
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd8000000UL
+diff --git a/arch/arm/mach-versatile/versatile_ab.c b/arch/arm/mach-versatile/versatile_ab.c
+index fda4866..c83a1f3 100644
+--- a/arch/arm/mach-versatile/versatile_ab.c
++++ b/arch/arm/mach-versatile/versatile_ab.c
+@@ -27,6 +27,7 @@
+
+ #include <mach/hardware.h>
+ #include <asm/irq.h>
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+
+ #include <asm/mach/arch.h>
+@@ -39,6 +40,7 @@ MACHINE_START(VERSATILE_AB, "ARM-Versatile AB")
+ .map_io = versatile_map_io,
+ .init_early = versatile_init_early,
+ .init_irq = versatile_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &versatile_timer,
+ .init_machine = versatile_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
+index 54e037c..f4d1e0f 100644
+--- a/arch/arm/mach-versatile/versatile_dt.c
++++ b/arch/arm/mach-versatile/versatile_dt.c
+@@ -24,6 +24,7 @@
+ #include <linux/init.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
++#include <asm/hardware/vic.h>
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+
+@@ -45,6 +46,7 @@ DT_MACHINE_START(VERSATILE_PB, "ARM-Versatile (Device Tree Support)")
+ .map_io = versatile_map_io,
+ .init_early = versatile_init_early,
+ .init_irq = versatile_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &versatile_timer,
+ .init_machine = versatile_dt_init,
+ .dt_compat = versatile_dt_match,
+diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
+index feaf9cb..4d31eeb 100644
+--- a/arch/arm/mach-versatile/versatile_pb.c
++++ b/arch/arm/mach-versatile/versatile_pb.c
+@@ -28,6 +28,7 @@
+ #include <linux/io.h>
+
+ #include <mach/hardware.h>
++#include <asm/hardware/vic.h>
+ #include <asm/irq.h>
+ #include <asm/mach-types.h>
+
+@@ -107,6 +108,7 @@ MACHINE_START(VERSATILE_PB, "ARM-Versatile PB")
+ .map_io = versatile_map_io,
+ .init_early = versatile_init_early,
+ .init_irq = versatile_init_irq,
++ .handle_irq = vic_handle_irq,
+ .timer = &versatile_timer,
+ .init_machine = versatile_pb_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-vexpress/include/mach/entry-macro.S b/arch/arm/mach-vexpress/include/mach/entry-macro.S
+index 73c1129..a14f9e6 100644
+--- a/arch/arm/mach-vexpress/include/mach/entry-macro.S
++++ b/arch/arm/mach-vexpress/include/mach/entry-macro.S
+@@ -1,5 +1,3 @@
+-#include <asm/hardware/entry-macro-gic.S>
+-
+ .macro disable_fiq
+ .endm
+
+diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
+deleted file mode 100644
+index f43a36e..0000000
+--- a/arch/arm/mach-vexpress/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/*
+- * arch/arm/mach-vexpress/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2003 ARM Limited
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xf8000000UL
+diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
+index 1fafc32..7aa07a8 100644
+--- a/arch/arm/mach-vexpress/v2m.c
++++ b/arch/arm/mach-vexpress/v2m.c
+@@ -23,6 +23,7 @@
+ #include <asm/hardware/arm_timer.h>
+ #include <asm/hardware/timer-sp.h>
+ #include <asm/hardware/sp810.h>
++#include <asm/hardware/gic.h>
+
+ #include <mach/ct-ca9x4.h>
+ #include <mach/motherboard.h>
+@@ -448,5 +449,6 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
+ .init_early = v2m_init_early,
+ .init_irq = v2m_init_irq,
+ .timer = &v2m_timer,
++ .handle_irq = gic_handle_irq,
+ .init_machine = v2m_init,
+ MACHINE_END
+diff --git a/arch/arm/mach-vt8500/include/mach/vmalloc.h b/arch/arm/mach-vt8500/include/mach/vmalloc.h
+deleted file mode 100644
+index 4642290..0000000
+--- a/arch/arm/mach-vt8500/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- * arch/arm/mach-vt8500/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2000 Russell King.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- */
+-#define VMALLOC_END 0xd0000000UL
+diff --git a/arch/arm/mach-w90x900/include/mach/system.h b/arch/arm/mach-w90x900/include/mach/system.h
+index ce228bd..68875a1 100644
+--- a/arch/arm/mach-w90x900/include/mach/system.h
++++ b/arch/arm/mach-w90x900/include/mach/system.h
+@@ -33,7 +33,7 @@ static void arch_reset(char mode, const char *cmd)
+ {
+ if (mode == 's') {
+ /* Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ } else {
+ __raw_writel(WTE | WTRE | WTCLK, WTCR);
+ }
+diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h
+deleted file mode 100644
+index b067e44..0000000
+--- a/arch/arm/mach-w90x900/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,23 +0,0 @@
+-/*
+- * arch/arm/mach-w90x900/include/mach/vmalloc.h
+- *
+- * Copyright (c) 2008 Nuvoton technology corporation
+- * All rights reserved.
+- *
+- * Wan ZongShun <mcuos.com@gmail.com>
+- *
+- * Based on arch/arm/mach-s3c2410/include/mach/vmalloc.h
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- */
+-
+-#ifndef __ASM_ARCH_VMALLOC_H
+-#define __ASM_ARCH_VMALLOC_H
+-
+-#define VMALLOC_END (0xe0000000UL)
+-
+-#endif /* __ASM_ARCH_VMALLOC_H */
+diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
+index 7bf143c..b466e24 100644
+--- a/arch/arm/mach-w90x900/irq.c
++++ b/arch/arm/mach-w90x900/irq.c
+@@ -28,6 +28,8 @@
+ #include <mach/hardware.h>
+ #include <mach/regs-irq.h>
+
++#include "nuc9xx.h"
++
+ struct group_irq {
+ unsigned long gpen;
+ unsigned int enabled;
+diff --git a/arch/arm/mach-w90x900/nuc910.h b/arch/arm/mach-w90x900/nuc910.h
+index 83e9ba5..b14c71a 100644
+--- a/arch/arm/mach-w90x900/nuc910.h
++++ b/arch/arm/mach-w90x900/nuc910.h
+@@ -12,14 +12,7 @@
+ * published by the Free Software Foundation.
+ *
+ */
+-
+-struct map_desc;
+-struct sys_timer;
+-
+-/* core initialisation functions */
+-
+-extern void nuc900_init_irq(void);
+-extern struct sys_timer nuc900_timer;
++#include "nuc9xx.h"
+
+ /* extern file from nuc910.c */
+
+diff --git a/arch/arm/mach-w90x900/nuc950.h b/arch/arm/mach-w90x900/nuc950.h
+index 98a1148..6e9de30 100644
+--- a/arch/arm/mach-w90x900/nuc950.h
++++ b/arch/arm/mach-w90x900/nuc950.h
+@@ -12,14 +12,7 @@
+ * published by the Free Software Foundation.
+ *
+ */
+-
+-struct map_desc;
+-struct sys_timer;
+-
+-/* core initialisation functions */
+-
+-extern void nuc900_init_irq(void);
+-extern struct sys_timer nuc900_timer;
++#include "nuc9xx.h"
+
+ /* extern file from nuc950.c */
+
+diff --git a/arch/arm/mach-w90x900/nuc960.h b/arch/arm/mach-w90x900/nuc960.h
+index f0c07cb..9f6df9a 100644
+--- a/arch/arm/mach-w90x900/nuc960.h
++++ b/arch/arm/mach-w90x900/nuc960.h
+@@ -12,14 +12,7 @@
+ * published by the Free Software Foundation.
+ *
+ */
+-
+-struct map_desc;
+-struct sys_timer;
+-
+-/* core initialisation functions */
+-
+-extern void nuc900_init_irq(void);
+-extern struct sys_timer nuc900_timer;
++#include "nuc9xx.h"
+
+ /* extern file from nuc960.c */
+
+diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h
+new file mode 100644
+index 0000000..847c4f3
+--- /dev/null
++++ b/arch/arm/mach-w90x900/nuc9xx.h
+@@ -0,0 +1,23 @@
++/*
++ * arch/arm/mach-w90x900/nuc9xx.h
++ *
++ * Copied from nuc910.h, which had:
++ *
++ * Copyright (c) 2008 Nuvoton corporation
++ *
++ * Header file for NUC900 CPU support
++ *
++ * Wan ZongShun <mcuos.com@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++struct map_desc;
++struct sys_timer;
++
++/* core initialisation functions */
++
++extern void nuc900_init_irq(void);
++extern struct sys_timer nuc900_timer;
+diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
+index a2c4e2d..fa27c49 100644
+--- a/arch/arm/mach-w90x900/time.c
++++ b/arch/arm/mach-w90x900/time.c
+@@ -33,6 +33,8 @@
+ #include <mach/map.h>
+ #include <mach/regs-timer.h>
+
++#include "nuc9xx.h"
++
+ #define RESETINT 0x1f
+ #define PERIOD (0x01 << 27)
+ #define ONESHOT (0x00 << 27)
+diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
+index 73e9368..ab5cfdd 100644
+--- a/arch/arm/mach-zynq/common.c
++++ b/arch/arm/mach-zynq/common.c
+@@ -112,6 +112,7 @@ static const char *xilinx_dt_match[] = {
+ MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
+ .map_io = xilinx_map_io,
+ .init_irq = xilinx_irq_init,
++ .handle_irq = gic_handle_irq,
+ .init_machine = xilinx_init_machine,
+ .timer = &xttcpss_sys_timer,
+ .dt_compat = xilinx_dt_match,
+diff --git a/arch/arm/mach-zynq/include/mach/entry-macro.S b/arch/arm/mach-zynq/include/mach/entry-macro.S
+index 3cfc01b..d621fb7 100644
+--- a/arch/arm/mach-zynq/include/mach/entry-macro.S
++++ b/arch/arm/mach-zynq/include/mach/entry-macro.S
+@@ -20,9 +20,6 @@
+ * GNU General Public License for more details.
+ */
+
+-#include <mach/hardware.h>
+-#include <asm/hardware/entry-macro-gic.S>
+-
+ .macro disable_fiq
+ .endm
+
+diff --git a/arch/arm/mach-zynq/include/mach/vmalloc.h b/arch/arm/mach-zynq/include/mach/vmalloc.h
+deleted file mode 100644
+index 2398eff..0000000
+--- a/arch/arm/mach-zynq/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* arch/arm/mach-zynq/include/mach/vmalloc.h
+- *
+- * Copyright (C) 2011 Xilinx
+- *
+- * This software is licensed under the terms of the GNU General Public
+- * License version 2, as published by the Free Software Foundation, and
+- * may be copied, distributed, and modified under those terms.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __MACH_VMALLOC_H__
+-#define __MACH_VMALLOC_H__
+-
+-#define VMALLOC_END 0xE0000000UL
+-
+-#endif
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 1aa664a..98d64fa 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -128,7 +128,20 @@ static void __dma_free_buffer(struct page *page, size_t size)
+ */
+ static pte_t **consistent_pte;
+
++#ifdef CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE
++
++#if (CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE == 0)
++#undef CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE
++#define CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE 2
++#endif
++
++#define DEFAULT_CONSISTENT_DMA_SIZE \
++ (((CONFIG_FB_DA8XX_CONSISTENT_DMA_SIZE + 1) & ~1) * 1024 * 1024)
++
++#else
+ #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
++#endif
++
+
+ unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+
+diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
+index 2be9139..296ad2e 100644
+--- a/arch/arm/mm/idmap.c
++++ b/arch/arm/mm/idmap.c
+@@ -78,7 +78,7 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
+ * the user-mode pages. This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+-void setup_mm_for_reboot(char mode)
++void setup_mm_for_reboot(void)
+ {
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index fbdd12e..786addd 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -20,7 +20,6 @@
+ #include <linux/highmem.h>
+ #include <linux/gfp.h>
+ #include <linux/memblock.h>
+-#include <linux/sort.h>
+
+ #include <asm/mach-types.h>
+ #include <asm/prom.h>
+@@ -134,30 +133,18 @@ void show_mem(unsigned int filter)
+ }
+
+ static void __init find_limits(unsigned long *min, unsigned long *max_low,
+- unsigned long *max_high)
++ unsigned long *max_high)
+ {
+ struct meminfo *mi = &meminfo;
+ int i;
+
+- *min = -1UL;
+- *max_low = *max_high = 0;
+-
+- for_each_bank (i, mi) {
+- struct membank *bank = &mi->bank[i];
+- unsigned long start, end;
+-
+- start = bank_pfn_start(bank);
+- end = bank_pfn_end(bank);
+-
+- if (*min > start)
+- *min = start;
+- if (*max_high < end)
+- *max_high = end;
+- if (bank->highmem)
+- continue;
+- if (*max_low < end)
+- *max_low = end;
+- }
++ /* This assumes the meminfo array is properly sorted */
++ *min = bank_pfn_start(&mi->bank[0]);
++ for_each_bank (i, mi)
++ if (mi->bank[i].highmem)
++ break;
++ *max_low = bank_pfn_end(&mi->bank[i - 1]);
++ *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
+ }
+
+ static void __init arm_bootmem_init(unsigned long start_pfn,
+@@ -319,19 +306,10 @@ static void arm_memory_present(void)
+ }
+ #endif
+
+-static int __init meminfo_cmp(const void *_a, const void *_b)
+-{
+- const struct membank *a = _a, *b = _b;
+- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+-}
+-
+ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
+ {
+ int i;
+
+- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+-
+ memblock_init();
+ for (i = 0; i < mi->nr_banks; i++)
+ memblock_add(mi->bank[i].start, mi->bank[i].size);
+@@ -403,8 +381,6 @@ void __init bootmem_init(void)
+ */
+ arm_bootmem_free(min, max_low, max_high);
+
+- high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
+-
+ /*
+ * This doesn't seem to be used by the Linux memory manager any
+ * more, but is used by ll_rw_block. If we can get rid of it, we
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index bdb248c..12c7ad2 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -36,12 +36,6 @@
+ #include <asm/mach/map.h>
+ #include "mm.h"
+
+-/*
+- * Used by ioremap() and iounmap() code to mark (super)section-mapped
+- * I/O regions in vm_struct->flags field.
+- */
+-#define VM_ARM_SECTION_MAPPING 0x80000000
+-
+ int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+ {
+@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
+ return NULL;
+
+- /*
+- * Don't allow RAM to be mapped - this causes problems with ARMv6+
+- */
+- if (WARN_ON(pfn_valid(pfn)))
+- return NULL;
+-
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ */
+ size = PAGE_ALIGN(offset + size);
+
++ /*
++ * Try to reuse one of the static mapping whenever possible.
++ */
++ read_lock(&vmlist_lock);
++ for (area = vmlist; area; area = area->next) {
++ if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
++ break;
++ if (!(area->flags & VM_ARM_STATIC_MAPPING))
++ continue;
++ if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
++ continue;
++ if (__phys_to_pfn(area->phys_addr) > pfn ||
++ __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
++ continue;
++ /* we can drop the lock here as we know *area is static */
++ read_unlock(&vmlist_lock);
++ addr = (unsigned long)area->addr;
++ addr += __pfn_to_phys(pfn) - area->phys_addr;
++ return (void __iomem *) (offset + addr);
++ }
++ read_unlock(&vmlist_lock);
++
++ /*
++ * Don't allow RAM to be mapped - this causes problems with ARMv6+
++ */
++ if (WARN_ON(pfn_valid(pfn)))
++ return NULL;
++
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+@@ -313,28 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+ void __iounmap(volatile void __iomem *io_addr)
+ {
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+-#ifndef CONFIG_SMP
+- struct vm_struct **p, *tmp;
++ struct vm_struct *vm;
+
+- /*
+- * If this is a section based mapping we need to handle it
+- * specially as the VM subsystem does not know how to handle
+- * such a beast. We need the lock here b/c we need to clear
+- * all the mappings before the area can be reclaimed
+- * by someone else.
+- */
+- write_lock(&vmlist_lock);
+- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+- if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+- if (tmp->flags & VM_ARM_SECTION_MAPPING) {
+- unmap_area_sections((unsigned long)tmp->addr,
+- tmp->size);
+- }
++ read_lock(&vmlist_lock);
++ for (vm = vmlist; vm; vm = vm->next) {
++ if (vm->addr > addr)
++ break;
++ if (!(vm->flags & VM_IOREMAP))
++ continue;
++ /* If this is a static mapping we must leave it alone */
++ if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
++ (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
++ read_unlock(&vmlist_lock);
++ return;
++ }
++#ifndef CONFIG_SMP
++ /*
++ * If this is a section based mapping we need to handle it
++ * specially as the VM subsystem does not know how to handle
++ * such a beast.
++ */
++ if ((vm->addr == addr) &&
++ (vm->flags & VM_ARM_SECTION_MAPPING)) {
++ unmap_area_sections((unsigned long)vm->addr, vm->size);
+ break;
+ }
+- }
+- write_unlock(&vmlist_lock);
+ #endif
++ }
++ read_unlock(&vmlist_lock);
+
+ vunmap(addr);
+ }
+diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
+index ad7cce3..70f6d3ea 100644
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type);
+
+ extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
++/*
++ * ARM specific vm_struct->flags bits.
++ */
++
++/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
++#define VM_ARM_SECTION_MAPPING 0x80000000
++
++/* permanent static mappings from iotable_init() */
++#define VM_ARM_STATIC_MAPPING 0x40000000
++
++/* mapping type (attributes) for permanent static mappings */
++#define VM_ARM_MTYPE(mt) ((mt) << 20)
++#define VM_ARM_MTYPE_MASK (0x1f << 20)
++
+ #endif
+
+ #ifdef CONFIG_ZONE_DMA
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index dc8c550..27e366a 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -15,6 +15,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/memblock.h>
+ #include <linux/fs.h>
++#include <linux/vmalloc.h>
+
+ #include <asm/cputype.h>
+ #include <asm/sections.h>
+@@ -529,13 +530,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
+
+ #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
+
+-static void __init *early_alloc(unsigned long sz)
++static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
+ {
+- void *ptr = __va(memblock_alloc(sz, sz));
++ void *ptr = __va(memblock_alloc(sz, align));
+ memset(ptr, 0, sz);
+ return ptr;
+ }
+
++static void __init *early_alloc(unsigned long sz)
++{
++ return early_alloc_aligned(sz, sz);
++}
++
+ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+ {
+ if (pmd_none(*pmd)) {
+@@ -685,9 +691,10 @@ static void __init create_mapping(struct map_desc *md)
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+- md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
++ md->virtual >= PAGE_OFFSET &&
++ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx"
+- " at 0x%08lx overlaps vmalloc space\n",
++ " at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+@@ -729,18 +736,33 @@ static void __init create_mapping(struct map_desc *md)
+ */
+ void __init iotable_init(struct map_desc *io_desc, int nr)
+ {
+- int i;
++ struct map_desc *md;
++ struct vm_struct *vm;
++
++ if (!nr)
++ return;
+
+- for (i = 0; i < nr; i++)
+- create_mapping(io_desc + i);
++ vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
++
++ for (md = io_desc; nr; md++, nr--) {
++ create_mapping(md);
++ vm->addr = (void *)(md->virtual & PAGE_MASK);
++ vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
++ vm->phys_addr = __pfn_to_phys(md->pfn);
++ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
++ vm->flags |= VM_ARM_MTYPE(md->type);
++ vm->caller = iotable_init;
++ vm_area_add_early(vm++);
++ }
+ }
+
+-static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
++static void * __initdata vmalloc_min =
++ (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
+
+ /*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+- * area - the default is 128m.
++ * area - the default is 240m.
+ */
+ static int __init early_vmalloc(char *arg)
+ {
+@@ -860,6 +882,7 @@ void __init sanity_check_meminfo(void)
+ }
+ #endif
+ meminfo.nr_banks = j;
++ high_memory = __va(lowmem_limit - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
+ }
+
+@@ -890,10 +913,10 @@ static inline void prepare_page_table(void)
+
+ /*
+ * Clear out all the kernel space mappings, except for the first
+- * memory bank, up to the end of the vmalloc region.
++ * memory bank, up to the vmalloc region.
+ */
+ for (addr = __phys_to_virt(end);
+- addr < VMALLOC_END; addr += PMD_SIZE)
++ addr < VMALLOC_START; addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+ }
+
+@@ -920,8 +943,8 @@ void __init arm_mm_memblock_reserve(void)
+ }
+
+ /*
+- * Set up device the mappings. Since we clear out the page tables for all
+- * mappings above VMALLOC_END, we will remove any debug device mappings.
++ * Set up the device mappings. Since we clear out the page tables for all
++ * mappings above VMALLOC_START, we will remove any debug device mappings.
+ * This means you have to be careful how you debug this function, or any
+ * called function. This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
+@@ -936,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
+ */
+ vectors_page = early_alloc(PAGE_SIZE);
+
+- for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
++ for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ /*
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index 941a98c..4fc6794 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
+
+ void __init sanity_check_meminfo(void)
+ {
++ phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
++ high_memory = __va(end - 1) + 1;
+ }
+
+ /*
+@@ -43,7 +45,7 @@ void __init paging_init(struct machine_desc *mdesc)
+ /*
+ * We don't need to do anything here for nommu machines.
+ */
+-void setup_mm_for_reboot(char mode)
++void setup_mm_for_reboot(void)
+ {
+ }
+
+diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile
+index 69b09c1..90f7153 100644
+--- a/arch/arm/plat-iop/Makefile
++++ b/arch/arm/plat-iop/Makefile
+@@ -10,7 +10,6 @@ obj-$(CONFIG_ARCH_IOP32X) += i2c.o
+ obj-$(CONFIG_ARCH_IOP32X) += pci.o
+ obj-$(CONFIG_ARCH_IOP32X) += setup.o
+ obj-$(CONFIG_ARCH_IOP32X) += time.o
+-obj-$(CONFIG_ARCH_IOP32X) += io.o
+ obj-$(CONFIG_ARCH_IOP32X) += cp6.o
+ obj-$(CONFIG_ARCH_IOP32X) += adma.o
+ obj-$(CONFIG_ARCH_IOP32X) += pmu.o
+@@ -21,7 +20,6 @@ obj-$(CONFIG_ARCH_IOP33X) += i2c.o
+ obj-$(CONFIG_ARCH_IOP33X) += pci.o
+ obj-$(CONFIG_ARCH_IOP33X) += setup.o
+ obj-$(CONFIG_ARCH_IOP33X) += time.o
+-obj-$(CONFIG_ARCH_IOP33X) += io.o
+ obj-$(CONFIG_ARCH_IOP33X) += cp6.o
+ obj-$(CONFIG_ARCH_IOP33X) += adma.o
+ obj-$(CONFIG_ARCH_IOP33X) += pmu.o
+diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c
+deleted file mode 100644
+index e15bc17..0000000
+--- a/arch/arm/plat-iop/io.c
++++ /dev/null
+@@ -1,59 +0,0 @@
+-/*
+- * iop3xx custom ioremap implementation
+- * Copyright (c) 2006, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- *
+- */
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/io.h>
+-#include <mach/hardware.h>
+-
+-void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
+- unsigned int mtype)
+-{
+- void __iomem * retval;
+-
+- switch (cookie) {
+- case IOP3XX_PCI_LOWER_IO_PA ... IOP3XX_PCI_UPPER_IO_PA:
+- retval = (void *) IOP3XX_PCI_IO_PHYS_TO_VIRT(cookie);
+- break;
+- case IOP3XX_PERIPHERAL_PHYS_BASE ... IOP3XX_PERIPHERAL_UPPER_PA:
+- retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
+- break;
+- default:
+- retval = __arm_ioremap_caller(cookie, size, mtype,
+- __builtin_return_address(0));
+- }
+-
+- return retval;
+-}
+-EXPORT_SYMBOL(__iop3xx_ioremap);
+-
+-void __iop3xx_iounmap(void __iomem *addr)
+-{
+- extern void __iounmap(volatile void __iomem *addr);
+-
+- switch ((u32) addr) {
+- case IOP3XX_PCI_LOWER_IO_VA ... IOP3XX_PCI_UPPER_IO_VA:
+- case IOP3XX_PERIPHERAL_VIRT_BASE ... IOP3XX_PERIPHERAL_UPPER_VA:
+- goto skip;
+- }
+- __iounmap(addr);
+-
+-skip:
+- return;
+-}
+-EXPORT_SYMBOL(__iop3xx_iounmap);
+diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
+index b9f0f5f..076db84f 100644
+--- a/arch/arm/plat-mxc/Makefile
++++ b/arch/arm/plat-mxc/Makefile
+@@ -5,7 +5,6 @@
+ # Common support
+ obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
+
+-obj-$(CONFIG_ARM_GIC) += gic.o
+ obj-$(CONFIG_MXC_TZIC) += tzic.o
+ obj-$(CONFIG_MXC_AVIC) += avic.o
+
+diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
+deleted file mode 100644
+index 12f8f81..0000000
+--- a/arch/arm/plat-mxc/gic.c
++++ /dev/null
+@@ -1,41 +0,0 @@
+-/*
+- * Copyright 2011 Freescale Semiconductor, Inc.
+- * Copyright 2011 Linaro Ltd.
+- *
+- * The code contained herein is licensed under the GNU General Public
+- * License. You may obtain a copy of the GNU General Public License
+- * Version 2 or later at the following locations:
+- *
+- * http://www.opensource.org/licenses/gpl-license.html
+- * http://www.gnu.org/copyleft/gpl.html
+- */
+-
+-#include <linux/io.h>
+-#include <asm/exception.h>
+-#include <asm/localtimer.h>
+-#include <asm/hardware/gic.h>
+-#ifdef CONFIG_SMP
+-#include <asm/smp.h>
+-#endif
+-
+-asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+-{
+- u32 irqstat, irqnr;
+-
+- do {
+- irqstat = readl_relaxed(gic_cpu_base_addr + GIC_CPU_INTACK);
+- irqnr = irqstat & 0x3ff;
+- if (irqnr == 1023)
+- break;
+-
+- if (irqnr > 15 && irqnr < 1021)
+- handle_IRQ(irqnr, regs);
+-#ifdef CONFIG_SMP
+- else {
+- writel_relaxed(irqstat, gic_cpu_base_addr +
+- GIC_CPU_EOI);
+- handle_IPI(irqnr, regs);
+- }
+-#endif
+- } while (1);
+-}
+diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
+index c75f254..6698cae 100644
+--- a/arch/arm/plat-mxc/include/mach/common.h
++++ b/arch/arm/plat-mxc/include/mach/common.h
+@@ -89,7 +89,6 @@ extern void imx_print_silicon_rev(const char *cpu, int srev);
+
+ void avic_handle_irq(struct pt_regs *);
+ void tzic_handle_irq(struct pt_regs *);
+-void gic_handle_irq(struct pt_regs *);
+
+ #define imx1_handle_irq avic_handle_irq
+ #define imx21_handle_irq avic_handle_irq
+diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
+index ca5cf26..def5d30 100644
+--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
++++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
+@@ -9,19 +9,8 @@
+ * published by the Free Software Foundation.
+ */
+
+-/* Unused, we use CONFIG_MULTI_IRQ_HANDLER */
+-
+ .macro disable_fiq
+ .endm
+
+- .macro get_irqnr_preamble, base, tmp
+- .endm
+-
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+-
+- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+- .endm
+-
+- .macro test_for_ipi, irqnr, irqstat, base, tmp
+- .endm
+diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
+index 97b19e7..2b7c08d 100644
+--- a/arch/arm/plat-mxc/include/mach/mx1.h
++++ b/arch/arm/plat-mxc/include/mach/mx1.h
+@@ -12,8 +12,6 @@
+ #ifndef __MACH_MX1_H__
+ #define __MACH_MX1_H__
+
+-#include <mach/vmalloc.h>
+-
+ /*
+ * Memory map
+ */
+diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h
+deleted file mode 100644
+index ef6379c..0000000
+--- a/arch/arm/plat-mxc/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/*
+- * Copyright (C) 2000 Russell King.
+- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __ASM_ARCH_MXC_VMALLOC_H__
+-#define __ASM_ARCH_MXC_VMALLOC_H__
+-
+-/* vmalloc ending address */
+-#define VMALLOC_END 0xf4000000UL
+-
+-#endif /* __ASM_ARCH_MXC_VMALLOC_H__ */
+diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
+index d65fb31..7e5c76e 100644
+--- a/arch/arm/plat-mxc/system.c
++++ b/arch/arm/plat-mxc/system.c
+@@ -71,7 +71,7 @@ void arch_reset(char mode, const char *cmd)
+ mdelay(50);
+
+ /* we'll take a jump through zero as a poor second */
+- cpu_reset(0);
++ soft_restart(0);
+ }
+
+ void mxc_arch_reset_init(void __iomem *base)
+diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
+index aa59f42..734009a 100644
+--- a/arch/arm/plat-omap/Kconfig
++++ b/arch/arm/plat-omap/Kconfig
+@@ -24,6 +24,8 @@ config ARCH_OMAP2PLUS
+ select CLKDEV_LOOKUP
+ select GENERIC_IRQ_CHIP
+ select OMAP_DM_TIMER
++ select USE_OF
++ select PROC_DEVICETREE
+ help
+ "Systems based on OMAP2, OMAP3 or OMAP4"
+
+diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
+index 9852622..500f671 100644
+--- a/arch/arm/plat-omap/Makefile
++++ b/arch/arm/plat-omap/Makefile
+@@ -3,8 +3,15 @@
+ #
+
+ # Common support
+-obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
+- usb.o fb.o io.o counter_32k.o
++obj-y := common.o sram.o clock.o devices.o mux.o \
++ usb.o fb.o counter_32k.o
++
++ifeq ($(CONFIG_OMAP3_EDMA),y)
++ obj-y += sdma2edma.o
++else
++ obj-y += dma.o
++endif
++
+ obj-m :=
+ obj-n :=
+ obj- :=
+@@ -19,7 +26,6 @@ obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
+
+ obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
+
+-obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
+ obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
+ obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
+ obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
+diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
+index d9f10a3..06383b5 100644
+--- a/arch/arm/plat-omap/common.c
++++ b/arch/arm/plat-omap/common.c
+@@ -14,6 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
++#include <linux/dma-mapping.h>
+ #include <linux/omapfb.h>
+
+ #include <plat/common.h>
+@@ -21,6 +22,8 @@
+ #include <plat/vram.h>
+ #include <plat/dsp.h>
+
++#include <plat/omap-secure.h>
++
+
+ #define NO_LENGTH_CHECK 0xffffffff
+
+@@ -65,4 +68,12 @@ void __init omap_reserve(void)
+ omapfb_reserve_sdram_memblock();
+ omap_vram_reserve_sdram_memblock();
+ omap_dsp_reserve_sdram_memblock();
++ omap_secure_ram_reserve_memblock();
++}
++
++void __init omap_init_consistent_dma_size(void)
++{
++#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
++ init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
++#endif
+ }
+diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
+deleted file mode 100644
+index da4f68d..0000000
+--- a/arch/arm/plat-omap/cpu-omap.c
++++ /dev/null
+@@ -1,171 +0,0 @@
+-/*
+- * linux/arch/arm/plat-omap/cpu-omap.c
+- *
+- * CPU frequency scaling for OMAP
+- *
+- * Copyright (C) 2005 Nokia Corporation
+- * Written by Tony Lindgren <tony@atomide.com>
+- *
+- * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/sched.h>
+-#include <linux/cpufreq.h>
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/err.h>
+-#include <linux/clk.h>
+-#include <linux/io.h>
+-
+-#include <mach/hardware.h>
+-#include <plat/clock.h>
+-#include <asm/system.h>
+-
+-#define VERY_HI_RATE 900000000
+-
+-static struct cpufreq_frequency_table *freq_table;
+-
+-#ifdef CONFIG_ARCH_OMAP1
+-#define MPU_CLK "mpu"
+-#else
+-#define MPU_CLK "virt_prcm_set"
+-#endif
+-
+-static struct clk *mpu_clk;
+-
+-/* TODO: Add support for SDRAM timing changes */
+-
+-static int omap_verify_speed(struct cpufreq_policy *policy)
+-{
+- if (freq_table)
+- return cpufreq_frequency_table_verify(policy, freq_table);
+-
+- if (policy->cpu)
+- return -EINVAL;
+-
+- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+- policy->cpuinfo.max_freq);
+-
+- policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
+- policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
+- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+- policy->cpuinfo.max_freq);
+- return 0;
+-}
+-
+-static unsigned int omap_getspeed(unsigned int cpu)
+-{
+- unsigned long rate;
+-
+- if (cpu)
+- return 0;
+-
+- rate = clk_get_rate(mpu_clk) / 1000;
+- return rate;
+-}
+-
+-static int omap_target(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- unsigned int relation)
+-{
+- struct cpufreq_freqs freqs;
+- int ret = 0;
+-
+- /* Ensure desired rate is within allowed range. Some govenors
+- * (ondemand) will just pass target_freq=0 to get the minimum. */
+- if (target_freq < policy->min)
+- target_freq = policy->min;
+- if (target_freq > policy->max)
+- target_freq = policy->max;
+-
+- freqs.old = omap_getspeed(0);
+- freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
+- freqs.cpu = 0;
+-
+- if (freqs.old == freqs.new)
+- return ret;
+-
+- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+-#ifdef CONFIG_CPU_FREQ_DEBUG
+- printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n",
+- freqs.old, freqs.new);
+-#endif
+- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+-
+- return ret;
+-}
+-
+-static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
+-{
+- int result = 0;
+-
+- mpu_clk = clk_get(NULL, MPU_CLK);
+- if (IS_ERR(mpu_clk))
+- return PTR_ERR(mpu_clk);
+-
+- if (policy->cpu != 0)
+- return -EINVAL;
+-
+- policy->cur = policy->min = policy->max = omap_getspeed(0);
+-
+- clk_init_cpufreq_table(&freq_table);
+- if (freq_table) {
+- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+- if (!result)
+- cpufreq_frequency_table_get_attr(freq_table,
+- policy->cpu);
+- } else {
+- policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
+- policy->cpuinfo.max_freq = clk_round_rate(mpu_clk,
+- VERY_HI_RATE) / 1000;
+- }
+-
+- /* FIXME: what's the actual transition time? */
+- policy->cpuinfo.transition_latency = 300 * 1000;
+-
+- return 0;
+-}
+-
+-static int omap_cpu_exit(struct cpufreq_policy *policy)
+-{
+- clk_exit_cpufreq_table(&freq_table);
+- clk_put(mpu_clk);
+- return 0;
+-}
+-
+-static struct freq_attr *omap_cpufreq_attr[] = {
+- &cpufreq_freq_attr_scaling_available_freqs,
+- NULL,
+-};
+-
+-static struct cpufreq_driver omap_driver = {
+- .flags = CPUFREQ_STICKY,
+- .verify = omap_verify_speed,
+- .target = omap_target,
+- .get = omap_getspeed,
+- .init = omap_cpu_init,
+- .exit = omap_cpu_exit,
+- .name = "omap",
+- .attr = omap_cpufreq_attr,
+-};
+-
+-static int __init omap_cpufreq_init(void)
+-{
+- return cpufreq_register_driver(&omap_driver);
+-}
+-
+-arch_initcall(omap_cpufreq_init);
+-
+-/*
+- * if ever we want to remove this, upon cleanup call:
+- *
+- * cpufreq_unregister_driver()
+- * cpufreq_frequency_table_put_attr()
+- */
+-
+diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
+index c22217c..002fb4d 100644
+--- a/arch/arm/plat-omap/dma.c
++++ b/arch/arm/plat-omap/dma.c
+@@ -1034,6 +1034,18 @@ dma_addr_t omap_get_dma_src_pos(int lch)
+ if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
+ offset = p->dma_read(CSAC, lch);
+
++ if (!cpu_is_omap15xx()) {
++ /*
++ * CDAC == 0 indicates that the DMA transfer on the channel has
++ * not been started (no data has been transferred so far).
++ * Return the programmed source start address in this case.
++ */
++ if (likely(p->dma_read(CDAC, lch)))
++ offset = p->dma_read(CSAC, lch);
++ else
++ offset = p->dma_read(CSSA, lch);
++ }
++
+ if (cpu_class_is_omap1())
+ offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
+
+@@ -1062,8 +1074,16 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
+ * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
+ * read before the DMA controller finished disabling the channel.
+ */
+- if (!cpu_is_omap15xx() && offset == 0)
++ if (!cpu_is_omap15xx() && offset == 0) {
+ offset = p->dma_read(CDAC, lch);
++ /*
++ * CDAC == 0 indicates that the DMA transfer on the channel has
++ * not been started (no data has been transferred so far).
++ * Return the programmed destination start address in this case.
++ */
++ if (unlikely(!offset))
++ offset = p->dma_read(CDSA, lch);
++ }
+
+ if (cpu_class_is_omap1())
+ offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
+diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
+index af3b92b..329586a 100644
+--- a/arch/arm/plat-omap/dmtimer.c
++++ b/arch/arm/plat-omap/dmtimer.c
+@@ -134,7 +134,6 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
+ int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+ {
+ struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
+- int ret;
+
+ timer->fclk = clk_get(&timer->pdev->dev, "fck");
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
+@@ -146,10 +145,8 @@ int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+ if (pdata->needs_manual_reset)
+ omap_dm_timer_reset(timer);
+
+- ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+-
+ timer->posted = 1;
+- return ret;
++ return 0;
+ }
+
+ struct omap_dm_timer *omap_dm_timer_request(void)
+@@ -494,6 +491,40 @@ int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+ }
+ EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
+
++int omap_dm_timer_set_capture(struct omap_dm_timer *timer, bool lht,
++ bool hlt, bool cm)
++{
++ u32 l;
++
++ if (unlikely(!timer))
++ return -EINVAL;
++
++ omap_dm_timer_enable(timer);
++ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
++
++ if (lht && hlt)
++ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES;
++ else if (lht)
++ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH;
++ else if (hlt)
++ l |= OMAP_TIMER_CTRL_TCM_HIGHTOLOW;
++ else
++ l &= ~OMAP_TIMER_CTRL_TCM_BOTHEDGES;
++
++ if (cm)
++ l |= OMAP_TIMER_CTRL_CAPTMODE;
++ else
++ l &= ~OMAP_TIMER_CTRL_CAPTMODE;
++
++ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
++
++ /* Save the context */
++ timer->context.tclr = l;
++ omap_dm_timer_disable(timer);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(omap_dm_timer_set_capture);
++
+ int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
+ {
+ u32 l;
+diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
+index db071bc..b7fccc8 100644
+--- a/arch/arm/plat-omap/i2c.c
++++ b/arch/arm/plat-omap/i2c.c
+@@ -148,7 +148,8 @@ static inline int omap2_i2c_add_bus(int bus_id)
+ struct omap_i2c_bus_platform_data *pdata;
+ struct omap_i2c_dev_attr *dev_attr;
+
+- omap2_i2c_mux_pins(bus_id);
++ if (!cpu_is_am33xx())
++ omap2_i2c_mux_pins(bus_id);
+
+ l = snprintf(oh_name, MAX_OMAP_I2C_HWMOD_NAME_LEN, "i2c%d", bus_id);
+ WARN(l >= MAX_OMAP_I2C_HWMOD_NAME_LEN,
+@@ -179,6 +180,8 @@ static inline int omap2_i2c_add_bus(int bus_id)
+ */
+ if (cpu_is_omap34xx())
+ pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
++
++ pdata->device_reset = omap_device_reset;
+ pdev = omap_device_build(name, bus_id, oh, pdata,
+ sizeof(struct omap_i2c_bus_platform_data),
+ NULL, 0, 0);
+diff --git a/arch/arm/plat-omap/include/plat/am33xx.h b/arch/arm/plat-omap/include/plat/am33xx.h
+new file mode 100644
+index 0000000..a16e72c
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/am33xx.h
+@@ -0,0 +1,82 @@
++/*
++ * This file contains the address info for various AM33XX modules.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ASM_ARCH_AM33XX_H
++#define __ASM_ARCH_AM33XX_H
++
++#define L4_SLOW_AM33XX_BASE 0x48000000
++
++#define AM33XX_SCM_BASE 0x44E10000
++#define AM33XX_CTRL_BASE AM33XX_SCM_BASE
++#define AM33XX_PRCM_BASE 0x44E00000
++
++#define AM33XX_EMIF0_BASE 0x4C000000
++
++#define AM33XX_GPIO0_BASE 0x44E07000
++#define AM33XX_GPIO1_BASE 0x4804C000
++#define AM33XX_GPIO2_BASE 0x481AC000
++#define AM33XX_GPIO3_BASE 0x481AE000
++
++#define AM33XX_TIMER0_BASE 0x44E05000
++#define AM33XX_TIMER1_BASE 0x44E31000
++#define AM33XX_TIMER2_BASE 0x48040000
++#define AM33XX_TIMER3_BASE 0x48042000
++#define AM33XX_TIMER4_BASE 0x48044000
++#define AM33XX_TIMER5_BASE 0x48046000
++#define AM33XX_TIMER6_BASE 0x48048000
++#define AM33XX_TIMER7_BASE 0x4804A000
++
++#define AM33XX_WDT1_BASE 0x44E35000
++
++#define AM33XX_TSC_BASE 0x44E0D000
++#define AM33XX_RTC_BASE 0x44E3E000
++
++#define AM33XX_ASP0_BASE 0x48038000
++#define AM33XX_ASP1_BASE 0x4803C000
++
++#define AM33XX_MAILBOX0_BASE 0x480C8000
++
++#define AM33XX_MMC0_BASE 0x48060100
++#define AM33XX_MMC1_BASE 0x481D8100
++#define AM33XX_MMC2_BASE 0x47810100
++
++#define AM33XX_I2C0_BASE 0x44E0B000
++#define AM33XX_I2C1_BASE 0x4802A000
++#define AM33XX_I2C2_BASE 0x4819C000
++
++#define AM33XX_SPI0_BASE 0x48030000
++#define AM33XX_SPI1_BASE 0x481A0000
++
++#define AM33XX_USBSS_BASE 0x47400000
++#define AM33XX_USB0_BASE 0x47401000
++#define AM33XX_USB1_BASE 0x47401800
++
++#define AM33XX_ELM_BASE 0x48080000
++
++#define AM33XX_ASP0_BASE 0x48038000
++#define AM33XX_ASP1_BASE 0x4803C000
++
++#define AM33XX_CPSW_BASE 0x4A100000
++#define AM33XX_CPSW_MDIO_BASE 0x4A101000
++#define AM33XX_CPSW_SS_BASE 0x4A101200
++
++#define AM33XX_ICSS_BASE 0x4A300000
++#define AM33XX_ICSS_LEN 0x3FFFF
++
++#define AM33XX_EPWMSS0_BASE 0x48300000
++#define AM33XX_EPWMSS1_BASE 0x48302000
++#define AM33XX_EPWMSS2_BASE 0x48304000
++
++#endif /* __ASM_ARCH_AM33XX_H */
+diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
+index 387a963..7b9a934 100644
+--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
++++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
+@@ -40,6 +40,8 @@ struct omap_clk {
+ #define CK_443X (1 << 11)
+ #define CK_TI816X (1 << 12)
+ #define CK_446X (1 << 13)
++#define CK_AM33XX (1 << 14) /* AM33xx specific clocks */
++#define CK_1710 (1 << 15) /* 1710 extra for rate selection */
+
+
+ #define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
+diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
+index eb73ab4..240a7b9 100644
+--- a/arch/arm/plat-omap/include/plat/clock.h
++++ b/arch/arm/plat-omap/include/plat/clock.h
+@@ -59,6 +59,8 @@ struct clkops {
+ #define RATE_IN_4430 (1 << 5)
+ #define RATE_IN_TI816X (1 << 6)
+ #define RATE_IN_4460 (1 << 7)
++#define RATE_IN_AM33XX (1 << 8)
++#define RATE_IN_TI814X (1 << 9)
+
+ #define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
+ #define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
+@@ -84,7 +86,7 @@ struct clkops {
+ struct clksel_rate {
+ u32 val;
+ u8 div;
+- u8 flags;
++ u16 flags;
+ };
+
+ /**
+diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
+index 3ff3e36..257f977 100644
+--- a/arch/arm/plat-omap/include/plat/common.h
++++ b/arch/arm/plat-omap/include/plat/common.h
+@@ -27,97 +27,15 @@
+ #ifndef __ARCH_ARM_MACH_OMAP_COMMON_H
+ #define __ARCH_ARM_MACH_OMAP_COMMON_H
+
+-#include <linux/delay.h>
+-
+ #include <plat/i2c.h>
+ #include <plat/omap_hwmod.h>
+
+-struct sys_timer;
+-
+-extern void omap_map_common_io(void);
+-extern struct sys_timer omap1_timer;
+-extern struct sys_timer omap2_timer;
+-extern struct sys_timer omap3_timer;
+-extern struct sys_timer omap3_secure_timer;
+-extern struct sys_timer omap4_timer;
+-extern bool omap_32k_timer_init(void);
+ extern int __init omap_init_clocksource_32k(void);
+ extern unsigned long long notrace omap_32k_sched_clock(void);
+
+ extern void omap_reserve(void);
+-
+-void omap2420_init_early(void);
+-void omap2430_init_early(void);
+-void omap3430_init_early(void);
+-void omap35xx_init_early(void);
+-void omap3630_init_early(void);
+-void omap3_init_early(void); /* Do not use this one */
+-void am35xx_init_early(void);
+-void ti816x_init_early(void);
+-void omap4430_init_early(void);
+-
+ extern int omap_dss_reset(struct omap_hwmod *);
+
+ void omap_sram_init(void);
+
+-/*
+- * IO bases for various OMAP processors
+- * Except the tap base, rest all the io bases
+- * listed are physical addresses.
+- */
+-struct omap_globals {
+- u32 class; /* OMAP class to detect */
+- void __iomem *tap; /* Control module ID code */
+- void __iomem *sdrc; /* SDRAM Controller */
+- void __iomem *sms; /* SDRAM Memory Scheduler */
+- void __iomem *ctrl; /* System Control Module */
+- void __iomem *ctrl_pad; /* PAD Control Module */
+- void __iomem *prm; /* Power and Reset Management */
+- void __iomem *cm; /* Clock Management */
+- void __iomem *cm2;
+-};
+-
+-void omap2_set_globals_242x(void);
+-void omap2_set_globals_243x(void);
+-void omap2_set_globals_3xxx(void);
+-void omap2_set_globals_443x(void);
+-void omap2_set_globals_ti816x(void);
+-
+-/* These get called from omap2_set_globals_xxxx(), do not call these */
+-void omap2_set_globals_tap(struct omap_globals *);
+-void omap2_set_globals_sdrc(struct omap_globals *);
+-void omap2_set_globals_control(struct omap_globals *);
+-void omap2_set_globals_prcm(struct omap_globals *);
+-
+-void omap242x_map_io(void);
+-void omap243x_map_io(void);
+-void omap3_map_io(void);
+-void omap4_map_io(void);
+-
+-
+-/**
+- * omap_test_timeout - busy-loop, testing a condition
+- * @cond: condition to test until it evaluates to true
+- * @timeout: maximum number of microseconds in the timeout
+- * @index: loop index (integer)
+- *
+- * Loop waiting for @cond to become true or until at least @timeout
+- * microseconds have passed. To use, define some integer @index in the
+- * calling code. After running, if @index == @timeout, then the loop has
+- * timed out.
+- */
+-#define omap_test_timeout(cond, timeout, index) \
+-({ \
+- for (index = 0; index < timeout; index++) { \
+- if (cond) \
+- break; \
+- udelay(1); \
+- } \
+-})
+-
+-extern struct device *omap2_get_mpuss_device(void);
+-extern struct device *omap2_get_iva_device(void);
+-extern struct device *omap2_get_l3_device(void);
+-extern struct device *omap4_get_dsp_device(void);
+-
+ #endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
+diff --git a/arch/arm/plat-omap/include/plat/config_pwm.h b/arch/arm/plat-omap/include/plat/config_pwm.h
+new file mode 100644
+index 0000000..a0c568a
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/config_pwm.h
+@@ -0,0 +1,26 @@
++#ifndef __CONFIG_CONFIG_PWM
++#define __CONFIG_CONFIG_PWM
++
++#define AM33XX_CONFIG_BASE (0x0)
++#define AM33XX_CONFIG_SIZE (AM33XX_CONFIG_BASE + 0x10)
++#define AM33XX_ECAP_BASE (0x0100)
++#define AM33XX_ECAP_SIZE (AM33XX_ECAP_BASE + 0x080)
++#define AM33XX_EQEP_BASE (0x0180)
++#define AM33XX_EQeP_SIZE (AM33XX_EQEP_BASE + 0x080)
++#define AM33XX_EPWM_BASE (0x0200)
++#define AM33XX_EPWM_SIZE (AM33XX_EPWM_BASE + 0x0100)
++
++#define PWMSS_CLKCONFIG (0x08)
++#define ECAP_CLK_EN (0x0)
++#define ECAP_CLK_STOP_REQ (0x1)
++#define EQEP_CLK_EN (0x4)
++#define EQEP_CLK_STOP_REQ (0x5)
++#define EPWM_CLK_EN (0x8)
++#define EPWM_CLK_STOP_REQ (0x9)
++
++#define SET (1)
++#define CLEAR (0)
++
++#define PWM_CON_ID_STRING_LENGTH (12)
++
++#endif
+diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
+index 408a12f..16aae2b 100644
+--- a/arch/arm/plat-omap/include/plat/cpu.h
++++ b/arch/arm/plat-omap/include/plat/cpu.h
+@@ -69,6 +69,7 @@ unsigned int omap_rev(void);
+ * cpu_is_omap343x(): True for OMAP3430
+ * cpu_is_omap443x(): True for OMAP4430
+ * cpu_is_omap446x(): True for OMAP4460
++ * cpu_is_omap447x(): True for OMAP4470
+ */
+ #define GET_OMAP_CLASS (omap_rev() & 0xff)
+
+@@ -78,6 +79,22 @@ static inline int is_omap ##class (void) \
+ return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
+ }
+
++#define GET_AM_CLASS ((omap_rev() >> 24) & 0xff)
++
++#define IS_AM_CLASS(class, id) \
++static inline int is_am ##class (void) \
++{ \
++ return (GET_AM_CLASS == (id)) ? 1 : 0; \
++}
++
++#define GET_TI_CLASS ((omap_rev() >> 24) & 0xff)
++
++#define IS_TI_CLASS(class, id) \
++static inline int is_ti ##class (void) \
++{ \
++ return (GET_TI_CLASS == (id)) ? 1 : 0; \
++}
++
+ #define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
+
+ #define IS_OMAP_SUBCLASS(subclass, id) \
+@@ -92,12 +109,21 @@ static inline int is_ti ##subclass (void) \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+ }
+
++#define IS_AM_SUBCLASS(subclass, id) \
++static inline int is_am ##subclass (void) \
++{ \
++ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
++}
++
+ IS_OMAP_CLASS(7xx, 0x07)
+ IS_OMAP_CLASS(15xx, 0x15)
+ IS_OMAP_CLASS(16xx, 0x16)
+ IS_OMAP_CLASS(24xx, 0x24)
+ IS_OMAP_CLASS(34xx, 0x34)
+ IS_OMAP_CLASS(44xx, 0x44)
++IS_AM_CLASS(33xx, 0x33)
++
++IS_TI_CLASS(81xx, 0x81)
+
+ IS_OMAP_SUBCLASS(242x, 0x242)
+ IS_OMAP_SUBCLASS(243x, 0x243)
+@@ -105,8 +131,11 @@ IS_OMAP_SUBCLASS(343x, 0x343)
+ IS_OMAP_SUBCLASS(363x, 0x363)
+ IS_OMAP_SUBCLASS(443x, 0x443)
+ IS_OMAP_SUBCLASS(446x, 0x446)
++IS_OMAP_SUBCLASS(447x, 0x447)
+
+ IS_TI_SUBCLASS(816x, 0x816)
++IS_TI_SUBCLASS(814x, 0x814)
++IS_AM_SUBCLASS(335x, 0x335)
+
+ #define cpu_is_omap7xx() 0
+ #define cpu_is_omap15xx() 0
+@@ -116,10 +145,15 @@ IS_TI_SUBCLASS(816x, 0x816)
+ #define cpu_is_omap243x() 0
+ #define cpu_is_omap34xx() 0
+ #define cpu_is_omap343x() 0
++#define cpu_is_ti81xx() 0
+ #define cpu_is_ti816x() 0
++#define cpu_is_ti814x() 0
++#define cpu_is_am33xx() 0
++#define cpu_is_am335x() 0
+ #define cpu_is_omap44xx() 0
+ #define cpu_is_omap443x() 0
+ #define cpu_is_omap446x() 0
++#define cpu_is_omap447x() 0
+
+ #if defined(MULTI_OMAP1)
+ # if defined(CONFIG_ARCH_OMAP730)
+@@ -322,7 +356,11 @@ IS_OMAP_TYPE(3517, 0x3517)
+ # undef cpu_is_omap3530
+ # undef cpu_is_omap3505
+ # undef cpu_is_omap3517
++# undef cpu_is_ti81xx
+ # undef cpu_is_ti816x
++# undef cpu_is_ti814x
++# undef cpu_is_am33xx
++# undef cpu_is_am335x
+ # define cpu_is_omap3430() is_omap3430()
+ # define cpu_is_omap3503() (cpu_is_omap3430() && \
+ (!omap3_has_iva()) && \
+@@ -339,16 +377,22 @@ IS_OMAP_TYPE(3517, 0x3517)
+ !omap3_has_sgx())
+ # undef cpu_is_omap3630
+ # define cpu_is_omap3630() is_omap363x()
++# define cpu_is_ti81xx() is_ti81xx()
+ # define cpu_is_ti816x() is_ti816x()
++# define cpu_is_ti814x() is_ti814x()
++# define cpu_is_am33xx() is_am33xx()
++# define cpu_is_am335x() is_am335x()
+ #endif
+
+ # if defined(CONFIG_ARCH_OMAP4)
+ # undef cpu_is_omap44xx
+ # undef cpu_is_omap443x
+ # undef cpu_is_omap446x
++# undef cpu_is_omap447x
+ # define cpu_is_omap44xx() is_omap44xx()
+ # define cpu_is_omap443x() is_omap443x()
+ # define cpu_is_omap446x() is_omap446x()
++# define cpu_is_omap447x() is_omap447x()
+ # endif
+
+ /* Macros to detect if we have OMAP1 or OMAP2 */
+@@ -386,16 +430,34 @@ IS_OMAP_TYPE(3517, 0x3517)
+ #define TI8168_REV_ES1_0 TI816X_CLASS
+ #define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8))
+
++#define TI814X_CLASS 0x81400034
++#define TI8148_REV_ES1_0 TI814X_CLASS
++#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
++#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
++
++#define AM335X_CLASS 0x33500034
++#define AM335X_REV_ES1_0 AM335X_CLASS
++
+ #define OMAP443X_CLASS 0x44300044
+ #define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
+ #define OMAP4430_REV_ES2_0 (OMAP443X_CLASS | (0x20 << 8))
+ #define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
+ #define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
++#define OMAP4430_REV_ES2_3 (OMAP443X_CLASS | (0x23 << 8))
+
+ #define OMAP446X_CLASS 0x44600044
+ #define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
+
+-void omap2_check_revision(void);
++#define OMAP447X_CLASS 0x44700044
++#define OMAP4470_REV_ES1_0 (OMAP447X_CLASS | (0x10 << 8))
++
++void omap2xxx_check_revision(void);
++void omap3xxx_check_revision(void);
++void omap4xxx_check_revision(void);
++void omap3xxx_check_features(void);
++void ti81xx_check_features(void);
++void am33xx_check_features(void);
++void omap4xxx_check_features(void);
+
+ /*
+ * Runtime detection of OMAP3 features
+diff --git a/arch/arm/plat-omap/include/plat/dma-33xx.h b/arch/arm/plat-omap/include/plat/dma-33xx.h
+new file mode 100644
+index 0000000..bebdaa7
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/dma-33xx.h
+@@ -0,0 +1,87 @@
++/*
++ * AM33XX SDMA channel definitions
++ *
++ * This file is automatically generated from the AM33XX hardware databases.
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_AM33XX_DMA_H
++#define __ARCH_ARM_MACH_OMAP2_AM33XX_DMA_H
++
++
++#define AM33XX_DMA_ICSS0_7 0
++#define AM33XX_DMA_ICSS0_6 1
++#define AM33XX_DMA_MMCHS1_W 2
++#define AM33XX_DMA_MMCHS1_R 3
++#define AM33XX_DMA_AESEIP36T0_CTXIN 4
++#define AM33XX_DMA_AESEIP36T0_DIN 5
++#define AM33XX_DMA_AESEIP36T0_DOUT 6
++#define AM33XX_DMA_AESEIP36T0_CTXOUT 7
++#define AM33XX_DMA_MCASP0_X 8
++#define AM33XX_DMA_MCASP0_R 9
++#define AM33XX_DMA_MCASP1_X 10
++#define AM33XX_DMA_MCASP1_R 11
++#define AM33XX_DMA_MCASP2_X 12
++#define AM33XX_DMA_MCASP2_R 13
++#define AM33XX_DMA_PWMSS0_EPWM 14
++#define AM33XX_DMA_PWMSS1_EPWM 15
++#define AM33XX_DMA_SPIOCP0_CH0W 16
++#define AM33XX_DMA_SPIOCP0_CH0R 17
++#define AM33XX_DMA_SPIOCP0_CH1W 18
++#define AM33XX_DMA_SPIOCP0_CH1R 19
++#define AM33XX_DMA_SPIOCP3_CH1W 20
++#define AM33XX_DMA_SPIOCP3_CH1R 21
++#define AM33XX_DMA_GPIO 22
++#define AM33XX_DMA_GPIO1 23
++#define AM33XX_DMA_MMCHS0_W 24
++#define AM33XX_DMA_MMCHS0_R 25
++#define AM33XX_DMA_UART0_0 26
++#define AM33XX_DMA_UART0_1 27
++#define AM33XX_DMA_UART1_0 28
++#define AM33XX_DMA_UART1_1 29
++#define AM33XX_DMA_UART2_0 30
++#define AM33XX_DMA_UART2_1 31
++#define AM33XX_DMA_DESEIP16T0_IN 32
++#define AM33XX_DMA_DESEIP16T0 33
++#define AM33XX_DMA_DESEIP16T0_OUT 34
++#define AM33XX_DMA_SHAEIP57T0_CTXIN 35
++#define AM33XX_DMA_SHAEIP57T0_DIN 36
++#define AM33XX_DMA_SHAEIP57T0_CTXOUT 37
++#define AM33XX_DMA_PWMSS0_ECAP 38
++#define AM33XX_DMA_PWMSS1_ECAP 39
++#define AM33XX_DMA_DCAN_1 40
++#define AM33XX_DMA_DCAN_2 41
++#define AM33XX_DMA_SPIOCP1_CH0W 42
++#define AM33XX_DMA_SPIOCP1_CH0R 43
++#define AM33XX_DMA_SPIOCP1_CH1W 44
++#define AM33XX_DMA_SPIOCP1_CH1R 45
++#define AM33XX_DMA_PWMSS0_EQEP 46
++#define AM33XX_DMA_DCAN_3 47
++#define AM33XX_DMA_TIMER_4 48
++#define AM33XX_DMA_TIMER_5 49
++#define AM33XX_DMA_TIMER_6 50
++#define AM33XX_DMA_TIMER_7 51
++#define AM33XX_DMA_GPM 52
++#define AM33XX_DMA_ADC0 53
++#define AM33XX_DMA_PWMSS1_EQEP 56
++#define AM33XX_DMA_ADC1 57
++#define AM33XX_DMA_MSHSI2COCP0_TX 58
++#define AM33XX_DMA_MSHSI2COCP0_RX 59
++#define AM33XX_DMA_MSHSI2COCP1_TX 60
++#define AM33XX_DMA_MSHSI2COCP1_RX 61
++#define AM33XX_DMA_PWMSS2_ECAP 62
++#define AM33XX_DMA_PWMSS2_EPW 63
++#define AM33XX_DMA_MMCHS2_W 64 /* xBar */
++#define AM33XX_DMA_MMCHS2_R 65 /* xBar */
++
++#endif
+diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
+index dc562a5..c865dbc 100644
+--- a/arch/arm/plat-omap/include/plat/dma.h
++++ b/arch/arm/plat-omap/include/plat/dma.h
+@@ -30,6 +30,7 @@
+
+ /* Move omap4 specific defines to dma-44xx.h */
+ #include "dma-44xx.h"
++#include "dma-33xx.h"
+
+ /* DMA channels for omap1 */
+ #define OMAP_DMA_NO_DEVICE 0
+diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
+index 9418f00..d9432b0 100644
+--- a/arch/arm/plat-omap/include/plat/dmtimer.h
++++ b/arch/arm/plat-omap/include/plat/dmtimer.h
+@@ -47,6 +47,7 @@
+
+ /* timer interrupt enable bits */
+ #define OMAP_TIMER_INT_CAPTURE (1 << 2)
++#define OMAP_TIMER_INT_CAPTURE_RESET (0 << 2)
+ #define OMAP_TIMER_INT_OVERFLOW (1 << 1)
+ #define OMAP_TIMER_INT_MATCH (1 << 0)
+
+@@ -127,6 +128,8 @@ int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned
+ int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, unsigned int value);
+ int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match);
+ int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger);
++int omap_dm_timer_set_capture(struct omap_dm_timer *timer,
++ bool lht, bool hlt, bool cm);
+ int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler);
+
+ int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value);
+diff --git a/arch/arm/plat-omap/include/plat/elm.h b/arch/arm/plat-omap/include/plat/elm.h
+new file mode 100644
+index 0000000..50b4c8a
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/elm.h
+@@ -0,0 +1,33 @@
++/*
++ * BCH Error Location Module for TI81xx
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef OMAP_ELM_H
++#define OMAP_ELM_H
++
++enum omap_bch_ecc {
++ OMAP_BCH4_ECC = 0,
++ OMAP_BCH8_ECC,
++ OMAP_BCH16_ECC,
++};
++
++#define BCH8_ECC_BYTES (512)
++#define BCH8_ECC_OOB_BYTES (13)
++#define BCH_MAX_ECC_BYTES_PER_SECTOR (28)
++#define BCH8_ECC_MAX ((BCH8_ECC_BYTES + BCH8_ECC_OOB_BYTES) * 8)
++
++int omap_elm_decode_bch_error(int bch_type, char *ecc_calc,
++ unsigned int *err_loc);
++void omap_configure_elm(struct mtd_info *mtdi, int bch_type);
++#endif /* OMAP_ELM_H */
+diff --git a/arch/arm/plat-omap/include/plat/emif.h b/arch/arm/plat-omap/include/plat/emif.h
+new file mode 100644
+index 0000000..314c126
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/emif.h
+@@ -0,0 +1,41 @@
++/*
++ * EMIF register definitions for TI81xx and AM33xx
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __EMIF_H
++#define __EMIF_H
++
++#define EMIF_MOD_ID_REV (0x0)
++#define EMIF4_0_SDRAM_STATUS (0x04)
++#define EMIF4_0_SDRAM_CONFIG (0x08)
++#define EMIF4_0_SDRAM_CONFIG2 (0x0C)
++#define EMIF4_0_SDRAM_REF_CTRL (0x10)
++#define EMIF4_0_SDRAM_REF_CTRL_SHADOW (0x14)
++#define EMIF4_0_SDRAM_TIM_1 (0x18)
++#define EMIF4_0_SDRAM_TIM_1_SHADOW (0x1C)
++#define EMIF4_0_SDRAM_TIM_2 (0x20)
++#define EMIF4_0_SDRAM_TIM_2_SHADOW (0x24)
++#define EMIF4_0_SDRAM_TIM_3 (0x28)
++#define EMIF4_0_SDRAM_TIM_3_SHADOW (0x2C)
++#define EMIF4_0_SDRAM_MGMT_CTRL (0x38)
++#define EMIF4_0_SDRAM_MGMT_CTRL_SHD (0x3C)
++#define EMIF4_0_DDR_PHY_CTRL_1 (0xE4)
++#define EMIF4_0_DDR_PHY_CTRL_1_SHADOW (0xE8)
++#define EMIF4_0_DDR_PHY_CTRL_2 (0xEC)
++#define EMIF4_0_IODFT_TLGC (0x60)
++
++#define SELF_REFRESH_ENABLE(m) (0x2 << 8 | (m << 4))
++#define SELF_REFRESH_DISABLE (0x0 << 8)
++
++#endif /* __EMIF_H */
+diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
+index 1527929..2b81fc6 100644
+--- a/arch/arm/plat-omap/include/plat/gpmc.h
++++ b/arch/arm/plat-omap/include/plat/gpmc.h
+@@ -92,6 +92,8 @@ enum omap_ecc {
+ OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
+ /* 1-bit ecc: stored at beginning of spare area as romcode */
+ OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
++ OMAP_ECC_BCH4_CODE_HW, /* gpmc bch detection & s/w method correction */
++ OMAP_ECC_BCH8_CODE_HW, /* gpmc bch detection & s/w method correction */
+ };
+
+ /*
+@@ -131,6 +133,21 @@ struct gpmc_timings {
+ u16 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+ };
+
++
++struct gpmc_devices_info {
++ void *pdata;
++ int flag;
++};
++
++#define GPMC_DEVICE_NAND (1 << 0)
++#define GPMC_DEVICE_ONENAND (1 << 1)
++#define GPMC_DEVICE_NOR (1 << 2)
++#define GPMC_DEVICE_SMC91X (1 << 3)
++#define GPMC_DEVICE_SMS911X (1 << 4)
++#define GPMC_DEVICE_TUSB6010 (1 << 5)
++
++extern int omap_init_gpmc(struct gpmc_devices_info *pdata, int pdata_len);
++
+ extern unsigned int gpmc_ns_to_ticks(unsigned int time_ns);
+ extern unsigned int gpmc_ps_to_ticks(unsigned int time_ps);
+ extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
+@@ -155,6 +172,8 @@ extern int gpmc_cs_configure(int cs, int cmd, int wval);
+ extern int gpmc_nand_read(int cs, int cmd);
+ extern int gpmc_nand_write(int cs, int cmd, int wval);
+
+-int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size);
+-int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code);
++int gpmc_enable_hwecc(int ecc, int cs, int mode, int dev_width, int ecc_size);
++int gpmc_calculate_ecc(int ecc, int cs, const u_char *dat, u_char *ecc_code);
++int gpmc_suspend(void);
++int gpmc_resume(void);
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
+index e87efe1..e897978 100644
+--- a/arch/arm/plat-omap/include/plat/hardware.h
++++ b/arch/arm/plat-omap/include/plat/hardware.h
+@@ -286,6 +286,7 @@
+ #include <plat/omap24xx.h>
+ #include <plat/omap34xx.h>
+ #include <plat/omap44xx.h>
+-#include <plat/ti816x.h>
++#include <plat/ti81xx.h>
++#include <plat/am33xx.h>
+
+ #endif /* __ASM_ARCH_OMAP_HARDWARE_H */
+diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
+index 7f2969e..0696bae 100644
+--- a/arch/arm/plat-omap/include/plat/io.h
++++ b/arch/arm/plat-omap/include/plat/io.h
+@@ -73,6 +73,9 @@
+ #define OMAP4_L3_IO_OFFSET 0xb4000000
+ #define OMAP4_L3_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_IO_OFFSET) /* L3 */
+
++#define AM33XX_L4_WK_IO_OFFSET 0xb5000000
++#define AM33XX_L4_WK_IO_ADDRESS(pa) IOMEM((pa) + AM33XX_L4_WK_IO_OFFSET)
++
+ #define OMAP4_L3_PER_IO_OFFSET 0xb1100000
+ #define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
+
+@@ -154,6 +157,15 @@
+ #define L4_34XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
+
+ /*
++ * ----------------------------------------------------------------------------
++ * AM33XX specific IO mapping
++ * ----------------------------------------------------------------------------
++ */
++#define L4_WK_AM33XX_PHYS L4_WK_AM33XX_BASE
++#define L4_WK_AM33XX_VIRT (L4_WK_AM33XX_PHYS + AM33XX_L4_WK_IO_OFFSET)
++#define L4_WK_AM33XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
++
++/*
+ * Need to look at the Size 4M for L4.
+ * VPOM3430 was not working for Int controller
+ */
+@@ -247,8 +259,6 @@
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+
+-void omap_ioremap_init(void);
+-
+ extern u8 omap_readb(u32 pa);
+ extern u16 omap_readw(u32 pa);
+ extern u32 omap_readl(u32 pa);
+@@ -257,83 +267,9 @@ extern void omap_writew(u16 v, u32 pa);
+ extern void omap_writel(u32 v, u32 pa);
+
+ struct omap_sdrc_params;
+-
+-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+-void omap7xx_map_io(void);
+-#else
+-static inline void omap_map_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_ARCH_OMAP15XX
+-void omap15xx_map_io(void);
+-#else
+-static inline void omap15xx_map_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_ARCH_OMAP16XX
+-void omap16xx_map_io(void);
+-#else
+-static inline void omap16xx_map_io(void)
+-{
+-}
+-#endif
+-
+-void omap1_init_early(void);
+-
+-#ifdef CONFIG_SOC_OMAP2420
+-extern void omap242x_map_common_io(void);
+-#else
+-static inline void omap242x_map_common_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_SOC_OMAP2430
+-extern void omap243x_map_common_io(void);
+-#else
+-static inline void omap243x_map_common_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_ARCH_OMAP3
+-extern void omap34xx_map_common_io(void);
+-#else
+-static inline void omap34xx_map_common_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_SOC_OMAPTI816X
+-extern void omapti816x_map_common_io(void);
+-#else
+-static inline void omapti816x_map_common_io(void)
+-{
+-}
+-#endif
+-
+-#ifdef CONFIG_ARCH_OMAP4
+-extern void omap44xx_map_common_io(void);
+-#else
+-static inline void omap44xx_map_common_io(void)
+-{
+-}
+-#endif
+-
+-extern void omap2_init_common_infrastructure(void);
+ extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1);
+
+-#define __arch_ioremap omap_ioremap
+-#define __arch_iounmap omap_iounmap
+-
+-void __iomem *omap_ioremap(unsigned long phys, size_t size, unsigned int type);
+-void omap_iounmap(volatile void __iomem *addr);
+-
+ extern void __init omap_init_consistent_dma_size(void);
+
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/irqs-33xx.h b/arch/arm/plat-omap/include/plat/irqs-33xx.h
+new file mode 100644
+index 0000000..3e12d83
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/irqs-33xx.h
+@@ -0,0 +1,143 @@
++/*
++ * AM33XX interrupts.
++ *
++ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ARCH_ARM_MACH_OMAP2_AM33XX_IRQS_H
++#define __ARCH_ARM_MACH_OMAP2_AM33XX_IRQS_H
++
++
++#define AM33XX_IRQ_ELM 4
++#define AM33XX_IRQ_PI_NMI 7
++#define AM33XX_IRQ_CONTROL_PLATFORM 8
++#define AM33XX_IRQ_L3_FLAGMUX0 9
++#define AM33XX_IRQ_L3_FLAGMUX1 10
++#define AM33XX_IRQ_PRCM 11
++#define AM33XX_IRQ_TPCC0_INT_PO0 12
++#define AM33XX_IRQ_TPCC0_MPINT_PO 13
++#define AM33XX_IRQ_TPCC0_ERRINT_PO 14
++#define AM33XX_IRQ_WDT0 15
++#define AM33XX_IRQ_ADC_GEN 16
++#define AM33XX_IRQ_USBSS 17
++#define AM33XX_IRQ_USB0 18
++#define AM33XX_IRQ_USB1 19
++#define AM33XX_IRQ_ICSS0_0 20
++#define AM33XX_IRQ_ICSS0_1 21
++#define AM33XX_IRQ_ICSS0_2 22
++#define AM33XX_IRQ_ICSS0_3 23
++#define AM33XX_IRQ_ICSS0_4 24
++#define AM33XX_IRQ_ICSS0_5 25
++#define AM33XX_IRQ_ICSS0_6 26
++#define AM33XX_IRQ_ICSS0_7 27
++#define AM33XX_IRQ_MMCHS1 28
++#define AM33XX_IRQ_MMCHS2 29
++#define AM33XX_IRQ_MSHSI2COCP2 30
++#define AM33XX_IRQ_PWMSS0_ECAP 31
++#define AM33XX_IRQ_GPIO2_1 32
++#define AM33XX_IRQ_GPIO2_2 33
++#define AM33XX_IRQ_USB_P 34
++#define AM33XX_IRQ_PCI_SLV 35
++#define AM33XX_IRQ_LCD 36
++#define AM33XX_IRQ_THALIAIRQ 37
++#define AM33XX_IRQ_BB_2DHWA 38
++#define AM33XX_IRQ_PWMSS2_EPWM 39
++#define AM33XX_IRQ_CPSW_C0_RX 40
++#define AM33XX_IRQ_CPSW_RX 41
++#define AM33XX_IRQ_CPSW_TX 42
++#define AM33XX_IRQ_CPSW_C0 43
++#define AM33XX_IRQ_UART3 44
++#define AM33XX_IRQ_UART4 45
++#define AM33XX_IRQ_UART5 46
++#define AM33XX_IRQ_PWMSS1_ECAP 47
++#define AM33XX_IRQ_PCI0 48
++#define AM33XX_IRQ_PCI1 49
++#define AM33XX_IRQ_PCI2 50
++#define AM33XX_IRQ_PCI3 51
++#define AM33XX_IRQ_DCAN0_0 52
++#define AM33XX_IRQ_DCAN0_1 53
++#define AM33XX_IRQ_DCAN0_UERR 54
++#define AM33XX_IRQ_DCAN1_0 55
++#define AM33XX_IRQ_DCAN1_1 56
++#define AM33XX_IRQ_DCAN1_UERR 57
++#define AM33XX_IRQ_PWMSS0 58
++#define AM33XX_IRQ_PWMSS1 59
++#define AM33XX_IRQ_PWMSS2 60
++#define AM33XX_IRQ_PWMSS2_ECAP 61
++#define AM33XX_IRQ_GPIO3_1 62
++#define AM33XX_IRQ_GPIO3_2 63
++#define AM33XX_IRQ_MMCHS0 64
++#define AM33XX_IRQ_MCSPIOCP0 65
++#define AM33XX_IRQ_DMTIMER0 66
++#define AM33XX_IRQ_DMTIMER1 67
++#define AM33XX_IRQ_DMTIMER2 68
++#define AM33XX_IRQ_DMTIMER3 69
++#define AM33XX_IRQ_MSHSI2COCP0 70
++#define AM33XX_IRQ_MSHSI2COCP1 71
++#define AM33XX_IRQ_UART0 72
++#define AM33XX_IRQ_UART1 73
++#define AM33XX_IRQ_UART2 74
++#define AM33XX_IRQ_RTC_TIMER 75
++#define AM33XX_IRQ_RTC_ALARM 76
++#define AM33XX_IRQ_MAILBOX 77
++#define AM33XX_IRQ_M3_M3SP_TXEV 78
++#define AM33XX_IRQ_PWMSS0_EQEP 79
++#define AM33XX_IRQ_MCASP0_AX 80
++#define AM33XX_IRQ_MCASP0_AR 81
++#define AM33XX_IRQ_MCASP1_AX 82
++#define AM33XX_IRQ_MCASP1_AR 83
++#define AM33XX_IRQ_MCASP2_X 84
++#define AM33XX_IRQ_MCASP2_R 85
++#define AM33XX_IRQ_PWMSS0_EPWM 86
++#define AM33XX_IRQ_PWMSS1_EPWM 87
++#define AM33XX_IRQ_PWMSS1_EQEP 88
++#define AM33XX_IRQ_PWMSS2_EQEP 89
++#define AM33XX_IRQ_DMA 90
++#define AM33XX_IRQ_WDT1 91
++#define AM33XX_IRQ_DMTIMER4 92
++#define AM33XX_IRQ_DMTIMER5 93
++#define AM33XX_IRQ_DMTIMER6 94
++#define AM33XX_IRQ_DMTIMER7 95
++#define AM33XX_IRQ_GPIO0_1 96
++#define AM33XX_IRQ_GPIO0_2 97
++#define AM33XX_IRQ_GPIO1_1 98
++#define AM33XX_IRQ_GPIO1_2 99
++#define AM33XX_IRQ_GPMC0 100
++#define AM33XX_IRQ_EMI 101
++#define AM33XX_IRQ_AESEIP36t0_S 102
++#define AM33XX_IRQ_AESEIP36t0_P 103
++#define AM33XX_IRQ_AESEIP36t1_S 104
++#define AM33XX_IRQ_AESEIP36t1_P 105
++#define AM33XX_IRQ_DESEIP16t0_S 106
++#define AM33XX_IRQ_DESEIP16t0_P 107
++#define AM33XX_IRQ_SHAEIP57t0_S 108
++#define AM33XX_IRQ_SHAEIP57t0_P 109
++#define AM33XX_IRQ_PKAEIP29t0_S 110
++#define AM33XX_IRQ_RNGEIP75t0 111
++#define AM33XX_IRQ_TPTC0 112
++#define AM33XX_IRQ_TPTC1 113
++#define AM33XX_IRQ_TPTC2 114
++#define AM33XX_IRQ_TSC 115
++#define AM33XX_IRQ_SDMA0 116
++#define AM33XX_IRQ_SDMA1 117
++#define AM33XX_IRQ_SDMA2 118
++#define AM33XX_IRQ_SDMA3 119
++#define AM33XX_IRQ_SMARTREFLEX0 120
++#define AM33XX_IRQ_SMARTREFLEX1 121
++#define AM33XX_IRQ_NETRA_MMU 122
++#define AM33XX_IRQ_DMA0 123
++#define AM33XX_IRQ_DMA1 124
++#define AM33XX_IRQ_SPI1 125
++#define AM33XX_IRQ_SPI2 126
++#define AM33XX_IRQ_SPI 127
++
++#endif
+diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
+index 30e1071..cc15272 100644
+--- a/arch/arm/plat-omap/include/plat/irqs.h
++++ b/arch/arm/plat-omap/include/plat/irqs.h
+@@ -30,6 +30,7 @@
+
+ /* All OMAP4 specific defines are moved to irqs-44xx.h */
+ #include "irqs-44xx.h"
++#include "irqs-33xx.h"
+
+ /*
+ * IRQ numbers for interrupt handler 1
+@@ -357,7 +358,7 @@
+ #define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69
+ #define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70
+ #define INT_35XX_USBOTG_IRQ 71
+-#define INT_35XX_UART4 84
++#define INT_35XX_UART4_IRQ 84
+ #define INT_35XX_CCDC_VD0_IRQ 88
+ #define INT_35XX_CCDC_VD1_IRQ 92
+ #define INT_35XX_CCDC_VD2_IRQ 93
+@@ -433,22 +434,8 @@
+
+ #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
+
+-#define INTCPS_NR_MIR_REGS 3
+-#define INTCPS_NR_IRQS 96
+-
+-#ifndef __ASSEMBLY__
+-extern void __iomem *omap_irq_base;
+-void omap1_init_irq(void);
+-void omap2_init_irq(void);
+-void omap3_init_irq(void);
+-void ti816x_init_irq(void);
+-extern int omap_irq_pending(void);
+-void omap_intc_save_context(void);
+-void omap_intc_restore_context(void);
+-void omap3_intc_suspend(void);
+-void omap3_intc_prepare_idle(void);
+-void omap3_intc_resume_idle(void);
+-#endif
++#define INTCPS_NR_MIR_REGS 4
++#define INTCPS_NR_IRQS 128
+
+ #include <mach/hardware.h>
+
+diff --git a/arch/arm/plat-omap/include/plat/lcdc.h b/arch/arm/plat-omap/include/plat/lcdc.h
+new file mode 100644
+index 0000000..f8bcdec
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/lcdc.h
+@@ -0,0 +1,21 @@
++/*
++ * Header file for LCD controller
++ *
++ * Copyright (C) {2011} Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
++ * GNU General Public License for more details.
++ **/
++
++#ifndef __OMAP2_LCDC_H
++#define __OMAP2_LCDC_H
++
++struct platform_device *am33xx_register_lcdc(
++ struct da8xx_lcdc_platform_data *pdata);
++#endif
+diff --git a/arch/arm/plat-omap/include/plat/mailbox.h b/arch/arm/plat-omap/include/plat/mailbox.h
+index cc3921e..e136529 100644
+--- a/arch/arm/plat-omap/include/plat/mailbox.h
++++ b/arch/arm/plat-omap/include/plat/mailbox.h
+@@ -29,6 +29,8 @@ struct omap_mbox_ops {
+ void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg);
+ int (*fifo_empty)(struct omap_mbox *mbox);
+ int (*fifo_full)(struct omap_mbox *mbox);
++ int (*fifo_needs_flush)(struct omap_mbox *mbox);
++ mbox_msg_t (*fifo_readback)(struct omap_mbox *mbox);
+ /* irq */
+ void (*enable_irq)(struct omap_mbox *mbox,
+ omap_mbox_irq_t irq);
+@@ -61,6 +63,7 @@ struct omap_mbox {
+ struct blocking_notifier_head notifier;
+ };
+
++int omap_mbox_msg_rx_flush(struct omap_mbox *mbox);
+ int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg);
+ void omap_mbox_init_seq(struct omap_mbox *);
+
+diff --git a/arch/arm/plat-omap/include/plat/mcspi.h b/arch/arm/plat-omap/include/plat/mcspi.h
+index 3d51b18..a357eb2 100644
+--- a/arch/arm/plat-omap/include/plat/mcspi.h
++++ b/arch/arm/plat-omap/include/plat/mcspi.h
+@@ -18,9 +18,6 @@ struct omap2_mcspi_dev_attr {
+
+ struct omap2_mcspi_device_config {
+ unsigned turbo_mode:1;
+-
+- /* Do we want one channel enabled at the same time? */
+- unsigned single_channel:1;
+ };
+
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
+index 94cf70a..e5b4db1 100644
+--- a/arch/arm/plat-omap/include/plat/mmc.h
++++ b/arch/arm/plat-omap/include/plat/mmc.h
+@@ -50,6 +50,11 @@
+ #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
+ #define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
+
++enum {
++ MMC_CTRL_VERSION_1 = 0, /* OMAP class devicess */
++ MMC_CTRL_VERSION_2 /* AM33XX class devices */
++};
++
+ struct omap_mmc_dev_attr {
+ u8 flags;
+ };
+@@ -96,6 +101,7 @@ struct omap_mmc_platform_data {
+ */
+ u8 wires; /* Used for the MMC driver on omap1 and 2420 */
+ u32 caps; /* Used for the MMC driver on 2430 and later */
++ u32 pm_caps; /* PM capabilities of the mmc */
+
+ /*
+ * nomux means "standard" muxing is wrong on this board, and
+@@ -166,6 +172,8 @@ struct omap_mmc_platform_data {
+ unsigned int ban_openended:1;
+
+ } slots[OMAP_MMC_MAX_SLOTS];
++
++ u8 version;
+ };
+
+ /* called from board-specific card detection service routine */
+diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
+index 67fc506..9212a2e 100644
+--- a/arch/arm/plat-omap/include/plat/nand.h
++++ b/arch/arm/plat-omap/include/plat/nand.h
+@@ -29,6 +29,9 @@ struct omap_nand_platform_data {
+ unsigned long phys_base;
+ int devsize;
+ enum omap_ecc ecc_opt;
++ bool elm_used;
++ int (*ctrlr_suspend) (void);
++ int (*ctrlr_resume) (void);
+ };
+
+ /* minimum size for IO mapping */
+diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h
+new file mode 100644
+index 0000000..64f9d1c
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/omap-secure.h
+@@ -0,0 +1,13 @@
++#ifndef __OMAP_SECURE_H__
++#define __OMAP_SECURE_H__
++
++#include <linux/types.h>
++
++#ifdef CONFIG_ARCH_OMAP2PLUS
++extern int omap_secure_ram_reserve_memblock(void);
++#else
++static inline void omap_secure_ram_reserve_memblock(void)
++{ }
++#endif
++
++#endif /* __OMAP_SECURE_H__ */
+diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
+index 2682043..7f5281e 100644
+--- a/arch/arm/plat-omap/include/plat/omap-serial.h
++++ b/arch/arm/plat-omap/include/plat/omap-serial.h
+@@ -19,6 +19,7 @@
+
+ #include <linux/serial_core.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_qos.h>
+
+ #include <plat/mux.h>
+
+@@ -33,6 +34,8 @@
+
+ #define OMAP_MODE13X_SPEED 230400
+
++#define OMAP_UART_SCR_TX_EMPTY 0x08
++
+ /* WER = 0x7F
+ * Enable module level wakeup in WER reg
+ */
+@@ -51,18 +54,27 @@
+
+ #define OMAP_UART_DMA_CH_FREE -1
+
+-#define RX_TIMEOUT (3 * HZ)
+-#define OMAP_MAX_HSUART_PORTS 4
++#define OMAP_MAX_HSUART_PORTS 6
+
+ #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+
++#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
++#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
++
+ struct omap_uart_port_info {
+ bool dma_enabled; /* To specify DMA Mode */
+ unsigned int uartclk; /* UART clock rate */
+- void __iomem *membase; /* ioremap cookie or NULL */
+- resource_size_t mapbase; /* resource base */
+- unsigned long irqflags; /* request_irq flags */
+ upf_t flags; /* UPF_* flags */
++ u32 errata;
++ unsigned int dma_rx_buf_size;
++ unsigned int dma_rx_timeout;
++ unsigned int autosuspend_timeout;
++ unsigned int dma_rx_poll_rate;
++
++ int (*get_context_loss_count)(struct device *);
++ void (*set_forceidle)(struct platform_device *);
++ void (*set_noidle)(struct platform_device *);
++ void (*enable_wakeup)(struct platform_device *, bool);
+ };
+
+ struct uart_omap_dma {
+@@ -86,8 +98,9 @@ struct uart_omap_dma {
+ spinlock_t rx_lock;
+ /* timer to poll activity on rx dma */
+ struct timer_list rx_timer;
+- int rx_buf_size;
+- int rx_timeout;
++ unsigned int rx_buf_size;
++ unsigned int rx_poll_rate;
++ unsigned int rx_timeout;
+ };
+
+ struct uart_omap_port {
+@@ -100,6 +113,10 @@ struct uart_omap_port {
+ unsigned char mcr;
+ unsigned char fcr;
+ unsigned char efr;
++ unsigned char dll;
++ unsigned char dlh;
++ unsigned char mdr1;
++ unsigned char scr;
+
+ int use_dma;
+ /*
+@@ -111,6 +128,14 @@ struct uart_omap_port {
+ unsigned char msr_saved_flags;
+ char name[20];
+ unsigned long port_activity;
++ u32 context_loss_cnt;
++ u32 errata;
++ u8 wakeups_enabled;
++
++ struct pm_qos_request pm_qos_request;
++ u32 latency;
++ u32 calc_latency;
++ struct work_struct qos_work;
+ };
+
+ #endif /* __OMAP_SERIAL_H__ */
+diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
+index b9e8588..0d818ac 100644
+--- a/arch/arm/plat-omap/include/plat/omap34xx.h
++++ b/arch/arm/plat-omap/include/plat/omap34xx.h
+@@ -35,6 +35,8 @@
+ #define L4_EMU_34XX_BASE 0x54000000
+ #define L3_34XX_BASE 0x68000000
+
++#define L4_WK_AM33XX_BASE 0x44C00000
++
+ #define OMAP3430_32KSYNCT_BASE 0x48320000
+ #define OMAP3430_CM_BASE 0x48004800
+ #define OMAP3430_PRM_BASE 0x48306800
+diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
+index ea2b8a6..c0d478e 100644
+--- a/arch/arm/plat-omap/include/plat/omap44xx.h
++++ b/arch/arm/plat-omap/include/plat/omap44xx.h
+@@ -45,6 +45,7 @@
+ #define OMAP44XX_WKUPGEN_BASE 0x48281000
+ #define OMAP44XX_MCPDM_BASE 0x40132000
+ #define OMAP44XX_MCPDM_L3_BASE 0x49032000
++#define OMAP44XX_SAR_RAM_BASE 0x4a326000
+
+ #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
+ #define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
+diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
+index 51423d2..71aa35d 100644
+--- a/arch/arm/plat-omap/include/plat/omap_device.h
++++ b/arch/arm/plat-omap/include/plat/omap_device.h
+@@ -100,6 +100,13 @@ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+ struct omap_device_pm_latency *pm_lats,
+ int pm_lats_cnt, int is_early_device);
+
++struct omap_device *omap_device_alloc(struct platform_device *pdev,
++ struct omap_hwmod **ohs, int oh_cnt,
++ struct omap_device_pm_latency *pm_lats,
++ int pm_lats_cnt);
++void omap_device_delete(struct omap_device *od);
++int omap_device_register(struct platform_device *pdev);
++
+ void __iomem *omap_device_get_rt_va(struct omap_device *od);
+ struct device *omap_device_get_by_hwmod_name(const char *oh_name);
+
+@@ -116,6 +123,7 @@ int omap_device_enable_hwmods(struct omap_device *od);
+
+ int omap_device_disable_clocks(struct omap_device *od);
+ int omap_device_enable_clocks(struct omap_device *od);
++int omap_device_reset(struct device *dev);
+
+ /*
+ * Entries should be kept in latency order ascending
+diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
+index 8b372ed..a3b8b5c 100644
+--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
++++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
+@@ -41,6 +41,8 @@ struct omap_device;
+
+ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1;
+ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
++extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
++extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type4;
+
+ /*
+ * OCP SYSCONFIG bit shifts/masks TYPE1. These are for IPs compliant
+@@ -70,6 +72,32 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
+ #define SYSC_TYPE2_MIDLEMODE_SHIFT 4
+ #define SYSC_TYPE2_MIDLEMODE_MASK (0x3 << SYSC_TYPE2_MIDLEMODE_SHIFT)
+
++/*
++ * OCP SYSCONFIG bit shifts/masks TYPE3.
++ * This is applicable for some IPs present in AM33XX
++ */
++#define SYSC_TYPE3_SIDLEMODE_SHIFT 0
++#define SYSC_TYPE3_SIDLEMODE_MASK (0x3 << SYSC_TYPE3_SIDLEMODE_SHIFT)
++#define SYSC_TYPE3_MIDLEMODE_SHIFT 2
++#define SYSC_TYPE3_MIDLEMODE_MASK (0x3 << SYSC_TYPE3_MIDLEMODE_SHIFT)
++
++/*
++ * OCP SYSCONFIG bit shifts/masks TYPE4.
++ * This is applicable for some IPs present in AM33XX
++ */
++#define SYSC_TYPE4_CLOCKACTIVITY_SHIFT 8
++#define SYSC_TYPE4_CLOCKACTIVITY_MASK (0x3 << SYSC_CLOCKACTIVITY_SHIFT)
++#define SYSC_TYPE4_MIDLEMODE_SHIFT 5
++#define SYSC_TYPE4_MIDLEMODE_MASK (0x3 << SYSC_MIDLEMODE_SHIFT)
++#define SYSC_TYPE4_SIDLEMODE_SHIFT 3
++#define SYSC_TYPE4_SIDLEMODE_MASK (0x3 << SYSC_SIDLEMODE_SHIFT)
++#define SYSC_TYPE4_ENAWAKEUP_SHIFT 2
++#define SYSC_TYPE4_ENAWAKEUP_MASK (1 << SYSC_ENAWAKEUP_SHIFT)
++#define SYSC_TYPE4_SOFTRESET_SHIFT 1
++#define SYSC_TYPE4_SOFTRESET_MASK (1 << SYSC_SOFTRESET_SHIFT)
++#define SYSC_TYPE4_AUTOIDLE_SHIFT 0
++#define SYSC_TYPE4_AUTOIDLE_MASK (1 << SYSC_AUTOIDLE_SHIFT)
++
+ /* OCP SYSSTATUS bit shifts/masks */
+ #define SYSS_RESETDONE_SHIFT 0
+ #define SYSS_RESETDONE_MASK (1 << SYSS_RESETDONE_SHIFT)
+@@ -97,6 +125,7 @@ struct omap_hwmod_mux_info {
+ struct omap_device_pad *pads;
+ int nr_pads_dynamic;
+ struct omap_device_pad **pads_dynamic;
++ int *irqs;
+ bool enabled;
+ };
+
+@@ -416,10 +445,13 @@ struct omap_hwmod_omap4_prcm {
+ * _HWMOD_NO_MPU_PORT: no path exists for the MPU to write to this module
+ * _HWMOD_WAKEUP_ENABLED: set when the omap_hwmod code has enabled ENAWAKEUP
+ * _HWMOD_SYSCONFIG_LOADED: set when the OCP_SYSCONFIG value has been cached
++ * _HWMOD_SKIP_ENABLE: set if hwmod enabled during init (HWMOD_INIT_NO_IDLE) -
++ * causes the first call to _enable() to only update the pinmux
+ */
+ #define _HWMOD_NO_MPU_PORT (1 << 0)
+ #define _HWMOD_WAKEUP_ENABLED (1 << 1)
+ #define _HWMOD_SYSCONFIG_LOADED (1 << 2)
++#define _HWMOD_SKIP_ENABLE (1 << 3)
+
+ /*
+ * omap_hwmod._state definitions
+@@ -565,6 +597,7 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
+ int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
+ int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
+
++int omap_hwmod_set_master_standbymode(struct omap_hwmod *oh, u8 idlemode);
+ int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
+ int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle);
+
+@@ -604,6 +637,8 @@ int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
+
+ int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
+
++int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx);
++
+ /*
+ * Chip variant-specific hwmod init routines - XXX should be converted
+ * to use initcalls once the initial boot ordering is straightened out
+@@ -612,5 +647,6 @@ extern int omap2420_hwmod_init(void);
+ extern int omap2430_hwmod_init(void);
+ extern int omap3xxx_hwmod_init(void);
+ extern int omap44xx_hwmod_init(void);
++extern int am33xx_hwmod_init(void);
+
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
+index 1ab9fd6..d15ddb2 100644
+--- a/arch/arm/plat-omap/include/plat/serial.h
++++ b/arch/arm/plat-omap/include/plat/serial.h
+@@ -44,6 +44,7 @@
+ #define OMAP3_UART2_BASE OMAP2_UART2_BASE
+ #define OMAP3_UART3_BASE 0x49020000
+ #define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */
++#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */
+
+ /* OMAP4 serial ports */
+ #define OMAP4_UART1_BASE OMAP2_UART1_BASE
+@@ -51,14 +52,22 @@
+ #define OMAP4_UART3_BASE 0x48020000
+ #define OMAP4_UART4_BASE 0x4806e000
+
+-/* TI816X serial ports */
+-#define TI816X_UART1_BASE 0x48020000
+-#define TI816X_UART2_BASE 0x48022000
+-#define TI816X_UART3_BASE 0x48024000
++/* TI81XX serial ports */
++#define TI81XX_UART1_BASE 0x48020000
++#define TI81XX_UART2_BASE 0x48022000
++#define TI81XX_UART3_BASE 0x48024000
+
+ /* AM3505/3517 UART4 */
+ #define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
+
++/* AM33XX serial port */
++#define AM33XX_UART1_BASE 0x44E09000
++#define AM33XX_UART2_BASE 0x48022000
++#define AM33XX_UART3_BASE 0x48024000
++#define AM33XX_UART4_BASE 0x481A6000
++#define AM33XX_UART5_BASE 0x481A8000
++#define AM33XX_UART6_BASE 0x481AA000
++
+ /* External port on Zoom2/3 */
+ #define ZOOM_UART_BASE 0x10000000
+ #define ZOOM_UART_VIRT 0xfa400000
+@@ -89,9 +98,11 @@
+ #define OMAP4UART2 OMAP2UART2
+ #define OMAP4UART3 43
+ #define OMAP4UART4 44
+-#define TI816XUART1 81
+-#define TI816XUART2 82
+-#define TI816XUART3 83
++#define TI81XXUART1 81
++#define TI81XXUART2 82
++#define TI81XXUART3 83
++#define AM33XXUART1 84
++#define AM33XXUART4 85
+ #define ZOOM_UART 95 /* Only on zoom2/3 */
+
+ /* This is only used by 8250.c for omap1510 */
+@@ -106,15 +117,13 @@
+ #ifndef __ASSEMBLER__
+
+ struct omap_board_data;
++struct omap_uart_port_info;
+
+ extern void omap_serial_init(void);
+-extern void omap_serial_init_port(struct omap_board_data *bdata);
+ extern int omap_uart_can_sleep(void);
+-extern void omap_uart_check_wakeup(void);
+-extern void omap_uart_prepare_suspend(void);
+-extern void omap_uart_prepare_idle(int num);
+-extern void omap_uart_resume_idle(int num);
+-extern void omap_uart_enable_irqs(int enable);
++extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
++extern void omap_serial_init_port(struct omap_board_data *bdata,
++ struct omap_uart_port_info *platform_data);
+ #endif
+
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
+index f500fc3..4558416 100644
+--- a/arch/arm/plat-omap/include/plat/sram.h
++++ b/arch/arm/plat-omap/include/plat/sram.h
+@@ -12,17 +12,24 @@
+ #define __ARCH_ARM_OMAP_SRAM_H
+
+ #ifndef __ASSEMBLY__
++#include <linux/slab.h>
++#include <linux/genalloc.h>
+ #include <asm/fncpy.h>
+
+-extern void *omap_sram_push_address(unsigned long size);
++extern struct gen_pool *omap_gen_pool;
+
+-/* Macro to push a function to the internal SRAM, using the fncpy API */
+-#define omap_sram_push(funcp, size) ({ \
+- typeof(&(funcp)) _res = NULL; \
+- void *_sram_address = omap_sram_push_address(size); \
+- if (_sram_address) \
+- _res = fncpy(_sram_address, &(funcp), size); \
+- _res; \
++/*
++ * Note that fncpy requires the SRAM address to be aligned to an 8-byte
++ * boundary, so the min_alloc_order for the pool is set appropriately.
++ */
++#define omap_sram_push(funcp, size) ({ \
++ typeof(&(funcp)) _res; \
++ size_t _sz = size; \
++ void *_sram = (void *) gen_pool_alloc(omap_gen_pool, _sz); \
++ _res = (_sram ? fncpy(_sram, &(funcp), _sz) : NULL); \
++ if (!_res) \
++ pr_err("Not enough space in SRAM\n"); \
++ _res; \
+ })
+
+ extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
+@@ -82,8 +89,10 @@ extern u32 omap3_sram_configure_core_dpll(
+ extern unsigned long omap3_sram_configure_core_dpll_sz;
+
+ #ifdef CONFIG_PM
++extern void am33xx_push_sram_idle(void);
+ extern void omap_push_sram_idle(void);
+ #else
++static inline void am33xx_push_sram_idle(void) {}
+ static inline void omap_push_sram_idle(void) {}
+ #endif /* CONFIG_PM */
+
+@@ -95,6 +104,11 @@ static inline void omap_push_sram_idle(void) {}
+ */
+ #define OMAP2_SRAM_PA 0x40200000
+ #define OMAP3_SRAM_PA 0x40200000
++#ifdef CONFIG_OMAP4_ERRATA_I688
++#define OMAP4_SRAM_PA 0x40304000
++#define OMAP4_SRAM_VA 0xfe404000
++#else
+ #define OMAP4_SRAM_PA 0x40300000
+-
++#endif
++#define AM33XX_SRAM_PA 0x40300000
+ #endif
+diff --git a/arch/arm/plat-omap/include/plat/ti816x.h b/arch/arm/plat-omap/include/plat/ti816x.h
+deleted file mode 100644
+index 50510f5..0000000
+--- a/arch/arm/plat-omap/include/plat/ti816x.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * This file contains the address data for various TI816X modules.
+- *
+- * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation version 2.
+- *
+- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+- * kind, whether express or implied; without even the implied warranty
+- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __ASM_ARCH_TI816X_H
+-#define __ASM_ARCH_TI816X_H
+-
+-#define L4_SLOW_TI816X_BASE 0x48000000
+-
+-#define TI816X_SCM_BASE 0x48140000
+-#define TI816X_CTRL_BASE TI816X_SCM_BASE
+-#define TI816X_PRCM_BASE 0x48180000
+-
+-#define TI816X_ARM_INTC_BASE 0x48200000
+-
+-#endif /* __ASM_ARCH_TI816X_H */
+diff --git a/arch/arm/plat-omap/include/plat/ti81xx.h b/arch/arm/plat-omap/include/plat/ti81xx.h
+new file mode 100644
+index 0000000..8f9843f
+--- /dev/null
++++ b/arch/arm/plat-omap/include/plat/ti81xx.h
+@@ -0,0 +1,27 @@
++/*
++ * This file contains the address data for various TI81XX modules.
++ *
++ * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ASM_ARCH_TI81XX_H
++#define __ASM_ARCH_TI81XX_H
++
++#define L4_SLOW_TI81XX_BASE 0x48000000
++
++#define TI81XX_SCM_BASE 0x48140000
++#define TI81XX_CTRL_BASE TI81XX_SCM_BASE
++#define TI81XX_PRCM_BASE 0x48180000
++
++#define TI81XX_ARM_INTC_BASE 0x48200000
++
++#endif /* __ASM_ARCH_TI81XX_H */
+diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
+index 2f472e9..8686776 100644
+--- a/arch/arm/plat-omap/include/plat/uncompress.h
++++ b/arch/arm/plat-omap/include/plat/uncompress.h
+@@ -99,9 +99,13 @@ static inline void flush(void)
+ #define DEBUG_LL_ZOOM(mach) \
+ _DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
+
+-#define DEBUG_LL_TI816X(p, mach) \
+- _DEBUG_LL_ENTRY(mach, TI816X_UART##p##_BASE, OMAP_PORT_SHIFT, \
+- TI816XUART##p)
++#define DEBUG_LL_TI81XX(p, mach) \
++ _DEBUG_LL_ENTRY(mach, TI81XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
++ TI81XXUART##p)
++
++#define DEBUG_LL_AM33XX(p, mach) \
++ _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
++ AM33XXUART##p)
+
+ static inline void __arch_decomp_setup(unsigned long arch_id)
+ {
+@@ -157,6 +161,7 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
+ DEBUG_LL_OMAP3(3, cm_t3730);
+ DEBUG_LL_OMAP3(3, craneboard);
+ DEBUG_LL_OMAP3(3, devkit8000);
++ DEBUG_LL_OMAP3(3, encore);
+ DEBUG_LL_OMAP3(3, igep0020);
+ DEBUG_LL_OMAP3(3, igep0030);
+ DEBUG_LL_OMAP3(3, nokia_rm680);
+@@ -171,14 +176,23 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
+ /* omap4 based boards using UART3 */
+ DEBUG_LL_OMAP4(3, omap_4430sdp);
+ DEBUG_LL_OMAP4(3, omap4_panda);
++ DEBUG_LL_OMAP4(3, pcm049);
+
+ /* zoom2/3 external uart */
+ DEBUG_LL_ZOOM(omap_zoom2);
+ DEBUG_LL_ZOOM(omap_zoom3);
+
+ /* TI8168 base boards using UART3 */
+- DEBUG_LL_TI816X(3, ti8168evm);
++ DEBUG_LL_TI81XX(3, ti8168evm);
++
++ /* TI8148 base boards using UART1 */
++ DEBUG_LL_TI81XX(1, ti8148evm);
++
++ /* AM33XX base boards using UART1 */
++ DEBUG_LL_AM33XX(1, am335xevm);
+
++ /* AM33XX IA boards using UART4 */
++ DEBUG_LL_AM33XX(4, am335xiaevm);
+ } while (0);
+ }
+
+diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
+index 17d3c93..9158f5e 100644
+--- a/arch/arm/plat-omap/include/plat/usb.h
++++ b/arch/arm/plat-omap/include/plat/usb.h
+@@ -88,7 +88,8 @@ struct omap_musb_board_data {
+ u8 mode;
+ u16 power;
+ unsigned extvbus:1;
+- void (*set_phy_power)(u8 on);
++ u8 instances;
++ void (*set_phy_power)(u8 id, u8 on);
+ void (*clear_irq)(void);
+ void (*set_mode)(u8 mode);
+ void (*reset)(void);
+@@ -100,9 +101,6 @@ extern void usb_musb_init(struct omap_musb_board_data *board_data);
+
+ extern void usbhs_init(const struct usbhs_omap_board_data *pdata);
+
+-extern int omap_usbhs_enable(struct device *dev);
+-extern void omap_usbhs_disable(struct device *dev);
+-
+ extern int omap4430_phy_power(struct device *dev, int ID, int on);
+ extern int omap4430_phy_set_clk(struct device *dev, int on);
+ extern int omap4430_phy_init(struct device *dev);
+@@ -111,9 +109,10 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend);
+ #endif
+
+ extern void am35x_musb_reset(void);
+-extern void am35x_musb_phy_power(u8 on);
++extern void am35x_musb_phy_power(u8 id, u8 on);
+ extern void am35x_musb_clear_irq(void);
+ extern void am35x_set_mode(u8 musb_mode);
++extern void ti81xx_musb_phy_power(u8 id, u8 on);
+
+ /*
+ * FIXME correct answer depends on hmc_mode,
+@@ -273,6 +272,46 @@ static inline void omap2_usbfs_init(struct omap_usb_config *pdata)
+ #define CONF2_OTGPWRDN (1 << 2)
+ #define CONF2_DATPOL (1 << 1)
+
++/* TI81XX specific definitions */
++#define USBCTRL0 0x620
++#define USBSTAT0 0x624
++#define USBCTRL1 0x628
++#define USBSTAT1 0x62c
++
++/* TI816X PHY controls bits */
++#define TI816X_USBPHY0_NORMAL_MODE (1 << 0)
++#define TI816X_USBPHY1_NORMAL_MODE (1 << 1)
++#define TI816X_USBPHY_REFCLK_OSC (1 << 8)
++
++/* TI814X PHY controls bits */
++#define USBPHY_CM_PWRDN (1 << 0)
++#define USBPHY_OTG_PWRDN (1 << 1)
++#define USBPHY_CHGDET_DIS (1 << 2)
++#define USBPHY_CHGDET_RSTRT (1 << 3)
++#define USBPHY_SRCONDM (1 << 4)
++#define USBPHY_SINKONDP (1 << 5)
++#define USBPHY_CHGISINK_EN (1 << 6)
++#define USBPHY_CHGVSRC_EN (1 << 7)
++#define USBPHY_DMPULLUP (1 << 8)
++#define USBPHY_DPPULLUP (1 << 9)
++#define USBPHY_CDET_EXTCTL (1 << 10)
++#define USBPHY_GPIO_MODE (1 << 12)
++#define USBPHY_DPGPIO_PD (1 << 17)
++#define USBPHY_DMGPIO_PD (1 << 18)
++#define USBPHY_OTGVDET_EN (1 << 19)
++#define USBPHY_OTGSESSEND_EN (1 << 20)
++#define USBPHY_DATA_POLARITY (1 << 23)
++
++/* TI81XX only PHY bits */
++#define TI81XX_USBPHY_DPOPBUFCTL (1 << 13)
++#define TI81XX_USBPHY_DMOPBUFCTL (1 << 14)
++#define TI81XX_USBPHY_DPINPUT (1 << 15)
++#define TI81XX_USBPHY_DMINPUT (1 << 16)
++
++/* AM335X only PHY bits */
++#define AM335X_USBPHY_GPIO_SIG_INV (1 << 13)
++#define AM335X_USBPHY_GPIO_SIG_CROSS (1 << 14)
++
+ #if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_USB)
+ u32 omap1_usb0_init(unsigned nwires, unsigned is_device);
+ u32 omap1_usb1_init(unsigned nwires);
+@@ -293,4 +332,49 @@ static inline u32 omap1_usb2_init(unsigned nwires, unsigned alt_pingroup)
+ }
+ #endif
+
++/* DMA registers */
++#define TI81XX_USB_AUTOREQ_REG 0xd0
++#define TI81XX_USB_TEARDOWN_REG 0xd8
++#define USB_AUTOREQ_REG 0x14
++#define USB_TEARDOWN_REG 0x1c
++#define MOP_SOP_INTR_ENABLE 0x64
++/* 0x68-0x6c Reserved */
++#define USB_TX_MODE_REG 0x70 /* Transparent, CDC, [Generic] RNDIS */
++#define USB_RX_MODE_REG 0x74 /* Transparent, CDC, [Generic] RNDIS */
++#define EP_COUNT_MODE_REG 0x78
++#define USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x80 + (((n) - 1) << 2))
++
++#define QUEUE_THRESHOLD_INTR_ENABLE_REG 0xc0
++#define QUEUE_63_THRESHOLD_REG 0xc4
++#define QUEUE_63_THRESHOLD_INTR_CLEAR_REG 0xc8
++#define QUEUE_65_THRESHOLD_REG 0xd4
++#define QUEUE_65_THRESHOLD_INTR_CLEAR_REG 0xd8
++
++/* Mode register bits */
++#define USB_MODE_SHIFT(n) ((((n) - 1) << 1))
++#define USB_MODE_MASK(n) (3 << USB_MODE_SHIFT(n))
++#define USB_RX_MODE_SHIFT(n) USB_MODE_SHIFT(n)
++#define USB_TX_MODE_SHIFT(n) USB_MODE_SHIFT(n)
++#define USB_RX_MODE_MASK(n) USB_MODE_MASK(n)
++#define USB_TX_MODE_MASK(n) USB_MODE_MASK(n)
++#define USB_TRANSPARENT_MODE 0
++#define USB_RNDIS_MODE 1
++#define USB_CDC_MODE 2
++#define USB_GENERIC_RNDIS_MODE 3
++
++/* AutoReq register bits */
++#define USB_RX_AUTOREQ_SHIFT(n) (((n) - 1) << 1)
++#define USB_RX_AUTOREQ_MASK(n) (3 << USB_RX_AUTOREQ_SHIFT(n))
++#define USB_NO_AUTOREQ 0
++#define USB_AUTOREQ_ALL_BUT_EOP 1
++#define USB_AUTOREQ_ALWAYS 3
++
++/* Teardown register bits */
++#define USB_TX_TDOWN_SHIFT(n) (16 + (n))
++#define USB_TX_TDOWN_MASK(n) (1 << USB_TX_TDOWN_SHIFT(n))
++#define USB_RX_TDOWN_SHIFT(n) (n)
++#define USB_RX_TDOWN_MASK(n) (1 << USB_RX_TDOWN_SHIFT(n))
++
++#define USB_CPPI41_NUM_CH 15
++
+ #endif /* __ASM_ARCH_OMAP_USB_H */
+diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
+deleted file mode 100644
+index 333871f..0000000
+--- a/arch/arm/plat-omap/io.c
++++ /dev/null
+@@ -1,159 +0,0 @@
+-/*
+- * Common io.c file
+- * This file is created by Russell King <rmk+kernel@arm.linux.org.uk>
+- *
+- * Copyright (C) 2009 Texas Instruments
+- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#include <linux/module.h>
+-#include <linux/io.h>
+-#include <linux/mm.h>
+-#include <linux/dma-mapping.h>
+-
+-#include <plat/omap7xx.h>
+-#include <plat/omap1510.h>
+-#include <plat/omap16xx.h>
+-#include <plat/omap24xx.h>
+-#include <plat/omap34xx.h>
+-#include <plat/omap44xx.h>
+-
+-#define BETWEEN(p,st,sz) ((p) >= (st) && (p) < ((st) + (sz)))
+-#define XLATE(p,pst,vst) ((void __iomem *)((p) - (pst) + (vst)))
+-
+-static int initialized;
+-
+-/*
+- * Intercept ioremap() requests for addresses in our fixed mapping regions.
+- */
+-void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
+-{
+-
+- WARN(!initialized, "Do not use ioremap before init_early\n");
+-
+-#ifdef CONFIG_ARCH_OMAP1
+- if (cpu_class_is_omap1()) {
+- if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
+- return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
+- }
+- if (cpu_is_omap7xx()) {
+- if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE))
+- return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START);
+-
+- if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE))
+- return XLATE(p, OMAP7XX_DSPREG_BASE,
+- OMAP7XX_DSPREG_START);
+- }
+- if (cpu_is_omap15xx()) {
+- if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
+- return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);
+-
+- if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
+- return XLATE(p, OMAP1510_DSPREG_BASE,
+- OMAP1510_DSPREG_START);
+- }
+- if (cpu_is_omap16xx()) {
+- if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
+- return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);
+-
+- if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
+- return XLATE(p, OMAP16XX_DSPREG_BASE,
+- OMAP16XX_DSPREG_START);
+- }
+-#endif
+-#ifdef CONFIG_ARCH_OMAP2
+- if (cpu_is_omap24xx()) {
+- if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
+- return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
+- if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
+- return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
+- }
+- if (cpu_is_omap2420()) {
+- if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
+- return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
+- if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
+- return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
+- if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
+- return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
+- }
+- if (cpu_is_omap2430()) {
+- if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
+- return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
+- if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
+- return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
+- if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
+- return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
+- if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
+- return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
+- }
+-#endif
+-#ifdef CONFIG_ARCH_OMAP3
+- if (cpu_is_ti816x()) {
+- if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
+- return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
+- } else if (cpu_is_omap34xx()) {
+- if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
+- return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
+- if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
+- return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
+- if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
+- return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
+- if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
+- return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
+- if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
+- return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
+- if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
+- return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
+- if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
+- return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
+- }
+-#endif
+-#ifdef CONFIG_ARCH_OMAP4
+- if (cpu_is_omap44xx()) {
+- if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
+- return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
+- if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
+- return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
+- if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
+- return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
+- if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE))
+- return XLATE(p, OMAP44XX_EMIF1_PHYS, \
+- OMAP44XX_EMIF1_VIRT);
+- if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE))
+- return XLATE(p, OMAP44XX_EMIF2_PHYS, \
+- OMAP44XX_EMIF2_VIRT);
+- if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE))
+- return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT);
+- if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
+- return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
+- if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
+- return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
+- }
+-#endif
+- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
+-}
+-EXPORT_SYMBOL(omap_ioremap);
+-
+-void omap_iounmap(volatile void __iomem *addr)
+-{
+- unsigned long virt = (unsigned long)addr;
+-
+- if (virt >= VMALLOC_START && virt < VMALLOC_END)
+- __iounmap(addr);
+-}
+-EXPORT_SYMBOL(omap_iounmap);
+-
+-void __init omap_init_consistent_dma_size(void)
+-{
+-#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
+- init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
+-#endif
+-}
+-
+-void __init omap_ioremap_init(void)
+-{
+- initialized++;
+-}
+diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
+index ad80112..c2d550b27 100644
+--- a/arch/arm/plat-omap/mailbox.c
++++ b/arch/arm/plat-omap/mailbox.c
+@@ -59,6 +59,14 @@ static inline int mbox_fifo_full(struct omap_mbox *mbox)
+ {
+ return mbox->ops->fifo_full(mbox);
+ }
++static inline int mbox_fifo_needs_flush(struct omap_mbox *mbox)
++{
++ return mbox->ops->fifo_needs_flush(mbox);
++}
++static inline mbox_msg_t mbox_fifo_readback(struct omap_mbox *mbox)
++{
++ return mbox->ops->fifo_readback(mbox);
++}
+
+ /* Mailbox IRQ handle functions */
+ static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+@@ -116,6 +124,28 @@ out:
+ }
+ EXPORT_SYMBOL(omap_mbox_msg_send);
+
++/*
++ * Flush the Rx FIFO by reading back the messages
++ * Since the normal expectation is that the Rx will do the
++ * reading, add a debug message to indicate if we really flush
++ * returns the no. of messages read back
++ */
++int omap_mbox_msg_rx_flush(struct omap_mbox *mbox)
++{
++ int ret = 0;
++ mbox_msg_t msg;
++
++ while (!mbox_fifo_needs_flush(mbox)) {
++ ret++;
++ msg = mbox_fifo_readback(mbox);
++ }
++ if (ret)
++ pr_debug("Flushed %s Rx FIFO via %d readbacks\n", mbox->name, ret);
++
++ return ret;
++}
++EXPORT_SYMBOL(omap_mbox_msg_rx_flush);
++
+ static void mbox_tx_tasklet(unsigned long tx_data)
+ {
+ struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
+diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
+index e8d9869..3733877 100644
+--- a/arch/arm/plat-omap/omap_device.c
++++ b/arch/arm/plat-omap/omap_device.c
+@@ -97,14 +97,7 @@
+ #define USE_WAKEUP_LAT 0
+ #define IGNORE_WAKEUP_LAT 1
+
+-static int omap_device_register(struct platform_device *pdev);
+ static int omap_early_device_register(struct platform_device *pdev);
+-static struct omap_device *omap_device_alloc(struct platform_device *pdev,
+- struct omap_hwmod **ohs, int oh_cnt,
+- struct omap_device_pm_latency *pm_lats,
+- int pm_lats_cnt);
+-static void omap_device_delete(struct omap_device *od);
+-
+
+ static struct omap_device_pm_latency omap_default_latency[] = {
+ {
+@@ -509,7 +502,7 @@ static int omap_device_fill_resources(struct omap_device *od,
+ *
+ * Returns an struct omap_device pointer or ERR_PTR() on error;
+ */
+-static struct omap_device *omap_device_alloc(struct platform_device *pdev,
++struct omap_device *omap_device_alloc(struct platform_device *pdev,
+ struct omap_hwmod **ohs, int oh_cnt,
+ struct omap_device_pm_latency *pm_lats,
+ int pm_lats_cnt)
+@@ -591,7 +584,7 @@ oda_exit1:
+ return ERR_PTR(ret);
+ }
+
+-static void omap_device_delete(struct omap_device *od)
++void omap_device_delete(struct omap_device *od)
+ {
+ if (!od)
+ return;
+@@ -817,7 +810,7 @@ static struct dev_pm_domain omap_device_pm_domain = {
+ * platform_device_register() on the underlying platform_device.
+ * Returns the return value of platform_device_register().
+ */
+-static int omap_device_register(struct platform_device *pdev)
++int omap_device_register(struct platform_device *pdev)
+ {
+ pr_debug("omap_device: %s: registering\n", pdev->name);
+
+@@ -1130,6 +1123,28 @@ int omap_device_enable_clocks(struct omap_device *od)
+ return 0;
+ }
+
++/**
++ * omap_device_reset - reset the module.
++ * @dev: struct device *
++ *
++ * Reset all the hwmods associated with the device @dev.
++ */
++int omap_device_reset(struct device *dev)
++{
++ int r = 0;
++ int i;
++ struct platform_device *pdev = to_platform_device(dev);
++ struct omap_device *odev = to_omap_device(pdev);
++ struct omap_hwmod *oh;
++
++ for (i = 0; i < odev->hwmods_cnt; i++) {
++ oh = odev->hwmods[i];
++ r |= omap_hwmod_reset(oh);
++ }
++ return r;
++}
++
++
+ struct device omap_device_parent = {
+ .init_name = "omap",
+ .parent = &platform_bus,
+diff --git a/arch/arm/plat-omap/sdma2edma.c b/arch/arm/plat-omap/sdma2edma.c
+new file mode 100644
+index 0000000..c3c3a1e
+--- /dev/null
++++ b/arch/arm/plat-omap/sdma2edma.c
+@@ -0,0 +1,359 @@
++/*
++ * sdma2edma.c
++ *
++ * SDMA to EDMA3 Wrapper.
++ *
++ * NOTE: Since we are invoking EDMA API, comments for all APIs in this file
++ * are EDMA specific.
++ *
++ * Copyright (C) 2010-2011 Texas Instruments.
++ * Author: Mansoor Ahamed <mansoor.ahamed@ti.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/spinlock.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++
++#include <asm/system.h>
++#include <mach/hardware.h>
++#include <plat/dma.h>
++#include <plat/tc.h>
++
++/* some edma specific hacks which might change */
++#include <mach/edma.h>
++
++/**
++ * omap_request_dma - allocate DMA channel and paired parameter RAM
++ * @dev_id: specific channel to allocate; negative for "any unmapped channel"
++ * @callback: optional; to be issued on DMA completion or errors
++ * @data: passed to callback
++ * @dma_ch_out: allocated channel number returned in this variable
++ *
++ * This allocates a DMA channel and its associated parameter RAM slot.
++ * The parameter RAM is initialized to hold a dummy transfer.
++ *
++ * Normal use is to pass a specific channel number as @channel, to make
++ * use of hardware events mapped to that channel. When the channel will
++ * be used only for software triggering or event chaining, channels not
++ * mapped to hardware events (or mapped to unused events) are preferable.
++ *
++ * DMA transfers start from a channel using edma_start(), or by
++ * chaining. When the transfer described in that channel's parameter RAM
++ * slot completes, that slot's data may be reloaded through a link.
++ *
++ * DMA errors are only reported to the @callback associated with the
++ * channel driving that transfer, but transfer completion callbacks can
++ * be sent to another channel under control of the TCC field in
++ * the option word of the transfer's parameter RAM set. Drivers must not
++ * use DMA transfer completion callbacks for channels they did not allocate.
++ * (The same applies to TCC codes used in transfer chaining.)
++ *
++ * TODO: -
++ * . In the edma call, last param i.e TC hard coded to EVENTQ_2
++ * . The callback's ch_status which should be used in McSPI driver
++ * to stop/clean EDMA is currently ignored in some driver (eg. McSPI)
++ */
++int omap_request_dma(int dev_id, const char *dev_name,
++ void (*callback)(int lch, u16 ch_status, void *data),
++ void *data, int *dma_ch_out)
++{
++ struct edmacc_param p_ram;
++ typedef void (*EDMA_CALLBACK)(unsigned, u16, void*);
++ EDMA_CALLBACK edma_callback = (EDMA_CALLBACK)(callback);
++
++ *dma_ch_out = edma_alloc_channel(dev_id, edma_callback, data, EVENTQ_2);
++ if (*dma_ch_out < 0)
++ return -1;
++
++ /* enable interrupts */
++ edma_read_slot(*dma_ch_out, &p_ram);
++ p_ram.opt |= TCINTEN | EDMA_TCC(EDMA_CHAN_SLOT(*dma_ch_out));
++ edma_write_slot(*dma_ch_out, &p_ram);
++
++ return 0;
++}
++EXPORT_SYMBOL(omap_request_dma);
++
++/**
++ * omap_free_dma - deallocate DMA channel
++ * @lch: dma channel returned from edma_alloc_channel()
++ *
++ * This deallocates the DMA channel and associated parameter RAM slot
++ * allocated by omap_request_dma().
++ *
++ * Callers are responsible for ensuring the channel is inactive, and
++ * will not be reactivated by linking, chaining, or software calls to
++ * omap_start_dma().
++ */
++void omap_free_dma(int lch)
++{
++ edma_free_channel((unsigned)lch);
++}
++EXPORT_SYMBOL(omap_free_dma);
++
++/**
++ * omap_start_dma - start dma on a channel
++ * @lch: channel being activated
++ *
++ * Channels with event associations will be triggered by their hardware
++ * events, and channels without such associations will be triggered by
++ * software. (At this writing there is no interface for using software
++ * triggers except with channels that don't support hardware triggers.)
++ *
++ */
++void omap_start_dma(int lch)
++{
++ edma_start((unsigned)lch);
++}
++EXPORT_SYMBOL(omap_start_dma);
++
++/**
++ * omap_stop_dma - stops dma on the channel passed
++ * @lch: channel being deactivated
++ *
++ * When @lch is a channel, any active transfer is paused and
++ * all pending hardware events are cleared. The current transfer
++ * may not be resumed, and the channel's Parameter RAM should be
++ * reinitialized before being reused.
++ */
++void omap_stop_dma(int lch)
++{
++ edma_stop((unsigned)lch);
++}
++EXPORT_SYMBOL(omap_stop_dma);
++
++/**
++ * omap_cleanup_dma - Bring back DMA to initial state
++ * @lch: channel being cleaned up
++ *
++ * It cleans ParamEntry and bring back EDMA to initial state if media has
++ * been removed before EDMA has finished.It is usedful for removable media.
++ *
++ *
++ * FIXME this should not be needed ... edma_stop() should suffice.
++ *
++ */
++void omap_cleanup_dma(int lch)
++{
++ edma_clean_channel((unsigned)lch);
++}
++EXPORT_SYMBOL(omap_cleanup_dma);
++
++/**
++ * omap_set_dma_transfer_params - configure DMA transfer parameters
++ * @lch: parameter RAM slot being configured
++ * @data_type: how many bytes per array (at least one)
++ * @elem_count: how many arrays per frame (at least one)
++ * @frame_count: how many frames per block (at least one)
++ * @sync_mode: ASYNC or ABSYNC
++ * @dma_trigger: device id (not used)
++ * @src_or_dst_synch: not used
++ *
++ * See the EDMA3 documentation to understand how to configure and link
++ * transfers using the fields in PaRAM slots. If you are not doing it
++ * all at once with edma_write_slot(), you will use this routine
++ * plus two calls each for source and destination, setting the initial
++ * address and saying how to index that address.
++ *
++ * An example of an A-Synchronized transfer is a serial link using a
++ * single word shift register. In that case, @acnt would be equal to
++ * that word size; the serial controller issues a DMA synchronization
++ * event to transfer each word, and memory access by the DMA transfer
++ * controller will be word-at-a-time.
++ *
++ * An example of an AB-Synchronized transfer is a device using a FIFO.
++ * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
++ * The controller with the FIFO issues DMA synchronization events when
++ * the FIFO threshold is reached, and the DMA transfer controller will
++ * transfer one frame to (or from) the FIFO. It will probably use
++ * efficient burst modes to access memory.
++ *
++ * . dma_trigger and channel number, this is ignored for EDMA
++ * . Setting bcnt_rld same as bcnt
++ * TODO
++ * . what is src_or_dst_synch?
++ */
++void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
++ int frame_count, int sync_mode,
++ int dma_trigger, int src_or_dst_synch)
++{
++ int d_type[3] = {1, 2, 4};
++ if ((enum sync_dimension)sync_mode > ABSYNC) {
++ printk(KERN_ERR "SDMA2EDMA: Line:%d : Param \'sync_mode\' out"
++ " of range\n", __LINE__);
++ return;
++ }
++
++ /* translate data_type */
++ data_type = d_type[data_type];
++
++ edma_set_transfer_params(lch, (u16)data_type, (u16)elem_count,
++ (u16)frame_count, (u16)elem_count,
++ (enum sync_dimension)sync_mode);
++}
++EXPORT_SYMBOL(omap_set_dma_transfer_params);
++
++/**
++ * omap_set_dma_dest_params - Set initial DMA destination addr in param RAM slot
++ * @lch: parameter RAM slot being configured
++ * @dest_port: not used
++ * @dest_amode: INCR, except in very rare cases
++ * @dest_start: physical address of destination (memory, controller FIFO, etc)
++ * @dst_ei: byte offset between destination arrays in a frame
++ * @dst_fi: byte offset between destination frames in a block
++ *
++ * Note that the destination address is modified during the DMA transfer
++ * according to edma_set_dest_index().
++ *
++ * TODO
++ * . Not sure about dst_ei and dst_fi
++ * . fifo_width for edma is not available in sdma API hence setting it to
++ * W8BIT
++ * . dest_port is ignored
++ */
++void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
++ unsigned long dest_start, int dst_ei, int dst_fi)
++{
++ if ((enum address_mode)dest_amode > FIFO) {
++ printk(KERN_ERR "SDMA2EDMA: Line:%d : Param \'dest_amode\' out"
++ " of range\n", __LINE__);
++ return;
++ }
++
++ edma_set_dest((unsigned)lch, (dma_addr_t)dest_start,
++ !dest_amode, W32BIT);
++ edma_set_dest_index((unsigned)(lch), (s16)dst_ei, (s16)dst_fi);
++}
++EXPORT_SYMBOL(omap_set_dma_dest_params);
++
++/**
++ * omap_set_dma_src_params - Set initial DMA source addr in param RAM slot
++ * @lch: parameter RAM slot being configured
++ * @src_port: not used
++ * @src_amode: INCR, except in very rare cases
++ * @src_start: physical address of destination (memory, controller FIFO, etc)
++ * @src_ei: byte offset between destination arrays in a frame
++ * @src_fi: byte offset between destination frames in a block
++ *
++ * Note that the source address is modified during the DMA transfer
++ * according to edma_set_src_index().
++ *
++ * TODO
++ * . Not sure about src_ei and src_fi
++ * . fifo_width for edma is not available in sdma API hence setting it to
++ * W8BIT
++ * . src_port is ignored
++ */
++void omap_set_dma_src_params(int lch, int src_port, int src_amode,
++ unsigned long src_start, int src_ei, int src_fi)
++{
++ if ((enum address_mode)src_amode > FIFO) {
++ printk(KERN_ERR "SDMA2EDMA: Line:%d : Param \'src_amode\' out "
++ "of range\n", __LINE__);
++ return;
++ }
++
++ edma_set_src((unsigned)lch, (dma_addr_t)src_start,
++ !src_amode, W32BIT);
++ edma_set_src_index((unsigned)(lch), (s16)src_ei, (s16)src_fi);
++}
++EXPORT_SYMBOL(omap_set_dma_src_params);
++
++void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
++{
++ printk(KERN_WARNING "omap_set_dma_src_burst_mode: un-supported SDMA"
++ " wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
++
++void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
++{
++ printk(KERN_WARNING "omap_set_dma_dest_burst_mode: un-supported SDMA"
++ " wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
++
++dma_addr_t omap_get_dma_dst_pos(int lch)
++{
++ printk(KERN_WARNING "omap_get_dma_dst_pos: un-supported in SDMA"
++ " wrapper\n");
++ return 0;
++}
++EXPORT_SYMBOL(omap_get_dma_dst_pos);
++
++int omap_get_dma_active_status(int lch)
++{
++ printk(KERN_WARNING "omap_get_dma_active_status: un-supported in SDMA"
++ " wrapper\n");
++ return 0;
++}
++EXPORT_SYMBOL(omap_get_dma_active_status);
++
++void omap_dma_global_context_save(void)
++{
++ printk(KERN_WARNING "omap_dma_global_context_save: un-supported in SDMA"
++ " wrapper\n");
++}
++EXPORT_SYMBOL(omap_dma_global_context_save);
++
++void omap_dma_global_context_restore(void)
++{
++ printk(KERN_WARNING "omap_dma_global_context_restore: un-supported in"
++ "SDMA wrapper\n");
++}
++EXPORT_SYMBOL(omap_dma_global_context_restore);
++
++int omap_dma_running(void)
++{
++ printk(KERN_WARNING "omap_dma_running: un-supported in SDMA wrapper\n");
++
++ return 0;
++}
++EXPORT_SYMBOL(omap_dma_running);
++
++void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
++{
++ printk(KERN_WARNING "omap_set_dma_color_mode: un-supported in"
++ "SDMA wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_color_mode);
++
++void omap_set_dma_dest_data_pack(int lch, int enable)
++{
++ printk(KERN_WARNING "omap_set_dma_dest_data_pack: un-supported in"
++ "SDMA wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
++
++void omap_set_dma_src_data_pack(int lch, int enable)
++{
++ printk(KERN_WARNING "omap_set_dma_src_data_pack: un-supported in"
++ "SDMA wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_src_data_pack);
++
++void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
++{
++ printk(KERN_WARNING "omap_set_dma_write_mode: un-supported in"
++ "SDMA wrapper\n");
++}
++EXPORT_SYMBOL(omap_set_dma_write_mode);
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index 8b28664..09544e1 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -40,7 +40,11 @@
+ #define OMAP1_SRAM_PA 0x20000000
+ #define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800)
+ #define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000)
++#ifdef CONFIG_OMAP4_ERRATA_I688
++#define OMAP4_SRAM_PUB_PA OMAP4_SRAM_PA
++#else
+ #define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
++#endif
+
+ #if defined(CONFIG_ARCH_OMAP2PLUS)
+ #define SRAM_BOOTLOADER_SZ 0x00
+@@ -65,7 +69,6 @@
+ static unsigned long omap_sram_start;
+ static void __iomem *omap_sram_base;
+ static unsigned long omap_sram_size;
+-static void __iomem *omap_sram_ceil;
+
+ /*
+ * Depending on the target RAMFS firewall setup, the public usable amount of
+@@ -82,7 +85,7 @@ static int is_sram_locked(void)
+ __raw_writel(0xCFDE, OMAP24XX_VA_READPERM0); /* all i-read */
+ __raw_writel(0xCFDE, OMAP24XX_VA_WRITEPERM0); /* all i-write */
+ }
+- if (cpu_is_omap34xx()) {
++ if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ __raw_writel(0xFFFF, OMAP34XX_VA_REQINFOPERM0); /* all q-vects */
+ __raw_writel(0xFFFF, OMAP34XX_VA_READPERM0); /* all i-read */
+ __raw_writel(0xFFFF, OMAP34XX_VA_WRITEPERM0); /* all i-write */
+@@ -94,6 +97,9 @@ static int is_sram_locked(void)
+ return 1; /* assume locked with no PPA or security driver */
+ }
+
++struct gen_pool *omap_gen_pool;
++EXPORT_SYMBOL_GPL(omap_gen_pool);
++
+ /*
+ * The amount of SRAM depends on the core type.
+ * Note that we cannot try to test for SRAM here because writes
+@@ -104,7 +110,7 @@ static void __init omap_detect_sram(void)
+ {
+ if (cpu_class_is_omap2()) {
+ if (is_sram_locked()) {
+- if (cpu_is_omap34xx()) {
++ if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ omap_sram_start = OMAP3_SRAM_PUB_PA;
+ if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
+ (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
+@@ -120,12 +126,15 @@ static void __init omap_detect_sram(void)
+ omap_sram_size = 0x800; /* 2K */
+ }
+ } else {
+- if (cpu_is_omap34xx()) {
++ if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ omap_sram_start = OMAP3_SRAM_PA;
+ omap_sram_size = 0x10000; /* 64K */
+ } else if (cpu_is_omap44xx()) {
+ omap_sram_start = OMAP4_SRAM_PA;
+ omap_sram_size = 0xe000; /* 56K */
++ } else if (cpu_is_am33xx()) {
++ omap_sram_start = AM33XX_SRAM_PA;
++ omap_sram_size = 0x10000; /* 64K */
+ } else {
+ omap_sram_start = OMAP2_SRAM_PA;
+ if (cpu_is_omap242x())
+@@ -141,11 +150,9 @@ static void __init omap_detect_sram(void)
+ omap_sram_size = 0x32000; /* 200K */
+ else if (cpu_is_omap15xx())
+ omap_sram_size = 0x30000; /* 192K */
+- else if (cpu_is_omap1610() || cpu_is_omap1621() ||
+- cpu_is_omap1710())
++ else if (cpu_is_omap1610() || cpu_is_omap1611() ||
++ cpu_is_omap1621() || cpu_is_omap1710())
+ omap_sram_size = 0x4000; /* 16K */
+- else if (cpu_is_omap1611())
+- omap_sram_size = SZ_256K;
+ else {
+ pr_err("Could not detect SRAM size\n");
+ omap_sram_size = 0x4000;
+@@ -163,6 +170,10 @@ static void __init omap_map_sram(void)
+ if (omap_sram_size == 0)
+ return;
+
++#ifdef CONFIG_OMAP4_ERRATA_I688
++ omap_sram_start += PAGE_SIZE;
++ omap_sram_size -= SZ_16K;
++#endif
+ if (cpu_is_omap34xx()) {
+ /*
+ * SRAM must be marked as non-cached on OMAP3 since the
+@@ -182,39 +193,24 @@ static void __init omap_map_sram(void)
+ return;
+ }
+
+- omap_sram_ceil = omap_sram_base + omap_sram_size;
+-
+ /*
+ * Looks like we need to preserve some bootloader code at the
+ * beginning of SRAM for jumping to flash for reboot to work...
+ */
+ memset((void *)omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
+ omap_sram_size - SRAM_BOOTLOADER_SZ);
+-}
+-
+-/*
+- * Memory allocator for SRAM: calculates the new ceiling address
+- * for pushing a function using the fncpy API.
+- *
+- * Note that fncpy requires the returned address to be aligned
+- * to an 8-byte boundary.
+- */
+-void *omap_sram_push_address(unsigned long size)
+-{
+- unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
+-
+- available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
+-
+- if (size > available) {
+- pr_err("Not enough space in SRAM\n");
+- return NULL;
++ {
++ /* The first SRAM_BOOTLOADER_SZ of SRAM are reserved */
++ void *base = (void *)omap_sram_base + SRAM_BOOTLOADER_SZ;
++ phys_addr_t phys = omap_sram_start + SRAM_BOOTLOADER_SZ;
++ size_t len = omap_sram_size - SRAM_BOOTLOADER_SZ;
++
++ omap_gen_pool = gen_pool_create(ilog2(FNCPY_ALIGN), -1);
++ if (omap_gen_pool)
++ WARN_ON(gen_pool_add_virt(omap_gen_pool,
++ (unsigned long)base, phys, len, -1));
++ WARN_ON(!omap_gen_pool);
+ }
+-
+- new_ceil -= size;
+- new_ceil = ROUND_DOWN(new_ceil, FNCPY_ALIGN);
+- omap_sram_ceil = IOMEM(new_ceil);
+-
+- return (void *)omap_sram_ceil;
+ }
+
+ #ifdef CONFIG_ARCH_OMAP1
+@@ -224,6 +220,9 @@ static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
+ void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
+ {
+ BUG_ON(!_omap_sram_reprogram_clock);
++ /* On 730, bit 13 must always be 1 */
++ if (cpu_is_omap7xx())
++ ckctl |= 0x2000;
+ _omap_sram_reprogram_clock(dpllctl, ckctl);
+ }
+
+@@ -340,8 +339,6 @@ u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
+ #ifdef CONFIG_PM
+ void omap3_sram_restore_context(void)
+ {
+- omap_sram_ceil = omap_sram_base + omap_sram_size;
+-
+ _omap3_sram_configure_core_dpll =
+ omap_sram_push(omap3_sram_configure_core_dpll,
+ omap3_sram_configure_core_dpll_sz);
+@@ -359,6 +356,12 @@ static inline int omap34xx_sram_init(void)
+ return 0;
+ }
+
++static inline int am33xx_sram_init(void)
++{
++ am33xx_push_sram_idle();
++ return 0;
++}
++
+ int __init omap_sram_init(void)
+ {
+ omap_detect_sram();
+@@ -370,8 +373,10 @@ int __init omap_sram_init(void)
+ omap242x_sram_init();
+ else if (cpu_is_omap2430())
+ omap243x_sram_init();
+- else if (cpu_is_omap34xx())
++ else if (cpu_is_omap34xx() && !cpu_is_am33xx())
+ omap34xx_sram_init();
++ else if (cpu_is_am33xx())
++ am33xx_sram_init();
+
+ return 0;
+ }
+diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
+index 9b9968f..8167ce6 100644
+--- a/arch/arm/plat-s5p/Kconfig
++++ b/arch/arm/plat-s5p/Kconfig
+@@ -11,6 +11,7 @@ config PLAT_S5P
+ default y
+ select ARM_VIC if !ARCH_EXYNOS4
+ select ARM_GIC if ARCH_EXYNOS4
++ select GIC_NON_BANKED if ARCH_EXYNOS4
+ select NO_IOPORT
+ select ARCH_REQUIRE_GPIOLIB
+ select S3C_GPIO_TRACK
+diff --git a/arch/arm/plat-spear/include/plat/system.h b/arch/arm/plat-spear/include/plat/system.h
+index a235fa0..1171f22 100644
+--- a/arch/arm/plat-spear/include/plat/system.h
++++ b/arch/arm/plat-spear/include/plat/system.h
+@@ -31,7 +31,7 @@ static inline void arch_reset(char mode, const char *cmd)
+ {
+ if (mode == 's') {
+ /* software reset, Jump into ROM at address 0 */
+- cpu_reset(0);
++ soft_restart(0);
+ } else {
+ /* hardware reset, Use on-chip reset capability */
+ sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
+deleted file mode 100644
+index 8c8b24d..0000000
+--- a/arch/arm/plat-spear/include/plat/vmalloc.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * arch/arm/plat-spear/include/plat/vmalloc.h
+- *
+- * Defining Vmalloc area for SPEAr platform
+- *
+- * Copyright (C) 2009 ST Microelectronics
+- * Viresh Kumar<viresh.kumar@st.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#ifndef __PLAT_VMALLOC_H
+-#define __PLAT_VMALLOC_H
+-
+-#define VMALLOC_END 0xF0000000UL
+-
+-#endif /* __PLAT_VMALLOC_H */
+diff --git a/arch/arm/plat-tcc/include/mach/vmalloc.h b/arch/arm/plat-tcc/include/mach/vmalloc.h
+deleted file mode 100644
+index 99414d9..0000000
+--- a/arch/arm/plat-tcc/include/mach/vmalloc.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/*
+- * Author: <linux@telechips.com>
+- * Created: June 10, 2008
+- *
+- * Copyright (C) 2000 Russell King.
+- * Copyright (C) 2008-2009 Telechips
+- *
+- * Licensed under the terms of the GPL v2.
+- */
+-#define VMALLOC_END 0xf0000000UL
+diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
+index ccbe16f..217ea76 100644
+--- a/arch/arm/tools/mach-types
++++ b/arch/arm/tools/mach-types
+@@ -1126,3 +1126,5 @@ atdgp318 MACH_ATDGP318 ATDGP318 3494
+ m28evk MACH_M28EVK M28EVK 3613
+ smdk4212 MACH_SMDK4212 SMDK4212 3638
+ smdk4412 MACH_SMDK4412 SMDK4412 3765
++am335xevm MACH_AM335XEVM AM335XEVM 3589
++am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684
+diff --git a/drivers/Kconfig b/drivers/Kconfig
+index b5e6f24..ba3886c 100644
+--- a/drivers/Kconfig
++++ b/drivers/Kconfig
+@@ -2,6 +2,8 @@ menu "Device Drivers"
+
+ source "drivers/base/Kconfig"
+
++source "drivers/cbus/Kconfig"
++
+ source "drivers/connector/Kconfig"
+
+ source "drivers/mtd/Kconfig"
+@@ -60,6 +62,8 @@ source "drivers/pinctrl/Kconfig"
+
+ source "drivers/gpio/Kconfig"
+
++source "drivers/pwm/Kconfig"
++
+ source "drivers/w1/Kconfig"
+
+ source "drivers/power/Kconfig"
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 1b31421..9deecef 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -8,6 +8,7 @@
+ # GPIO must come after pinctrl as gpios may need to mux pins etc
+ obj-y += pinctrl/
+ obj-y += gpio/
++obj-$(CONFIG_GENERIC_PWM) += pwm/
+ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
+@@ -78,7 +79,7 @@ obj-$(CONFIG_GAMEPORT) += input/gameport/
+ obj-$(CONFIG_INPUT) += input/
+ obj-$(CONFIG_I2O) += message/
+ obj-$(CONFIG_RTC_LIB) += rtc/
+-obj-y += i2c/ media/
++obj-y += i2c/ media/ cbus/
+ obj-$(CONFIG_PPS) += pps/
+ obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
+ obj-$(CONFIG_W1) += w1/
+diff --git a/drivers/cbus/Kconfig b/drivers/cbus/Kconfig
+new file mode 100644
+index 0000000..41d96e7
+--- /dev/null
++++ b/drivers/cbus/Kconfig
+@@ -0,0 +1,86 @@
++#
++# CBUS device configuration
++#
++
++menu "CBUS support"
++
++config CBUS
++ bool "CBUS support on OMAP"
++ ---help---
++ CBUS is a proprietary serial protocol by Nokia. It is mainly
++ used for accessing Energy Management auxiliary chips.
++
++ If you want CBUS support, you should say Y here.
++
++config CBUS_TAHVO
++ depends on CBUS
++ bool "Support for Tahvo"
++ ---help---
++ Tahvo is a mixed signal ASIC with some system features
++
++ If you want Tahvo support, you should say Y here.
++
++if CBUS_TAHVO
++
++config CBUS_TAHVO_USB
++ depends on USB
++ depends on ARCH_OMAP
++ select USB_OTG_UTILS
++ tristate "Support for Tahvo USB transceiver"
++ ---help---
++ If you want Tahvo support for USB transceiver, say Y or M here.
++
++config CBUS_TAHVO_USB_HOST_BY_DEFAULT
++ depends on CBUS_TAHVO_USB && USB_OTG
++ boolean "Device in USB host mode by default"
++ ---help---
++ Say Y here, if you want the device to enter USB host mode
++ by default on bootup.
++
++endif # CBUS_TAHVO
++
++config CBUS_RETU
++ depends on CBUS
++ bool "Support for Retu"
++ ---help---
++ Retu is a mixed signal ASIC with some system features
++
++ If you want Retu support, you should say Y here.
++
++if CBUS_RETU
++
++config CBUS_RETU_POWERBUTTON
++ depends on INPUT
++ bool "Support for Retu power button"
++ ---help---
++ The power button on Nokia 770 is connected to the Retu ASIC.
++
++ If you want support for the Retu power button, you should say Y here.
++
++config CBUS_RETU_RTC
++ depends on RTC_CLASS
++ depends on ARCH_OMAP
++ tristate "Support for Retu pseudo-RTC"
++ ---help---
++ Say Y here if you want support for the device that alleges to be an
++ RTC in Retu. This will expose a sysfs interface for it.
++
++config CBUS_RETU_WDT
++ depends on SYSFS && WATCHDOG
++ depends on ARCH_OMAP
++ tristate "Support for Retu watchdog timer"
++ ---help---
++ Say Y here if you want support for the watchdog in Retu. This will
++ expose a sysfs interface to grok it.
++
++config CBUS_RETU_HEADSET
++ depends on SYSFS
++ tristate "Support for headset detection with Retu/Vilma"
++ ---help---
++ Say Y here if you want support detecting a headset that's connected
++ to Retu/Vilma. Detection state and events are exposed through
++ sysfs.
++
++endif # CBUS_RETU
++
++endmenu
+diff --git a/drivers/cbus/Makefile b/drivers/cbus/Makefile
+new file mode 100644
+index 0000000..483c3ca
+--- /dev/null
++++ b/drivers/cbus/Makefile
+@@ -0,0 +1,13 @@
++#
++# Makefile for CBUS.
++#
++
++obj-$(CONFIG_CBUS) += cbus.o
++obj-$(CONFIG_CBUS_TAHVO) += tahvo.o
++obj-$(CONFIG_CBUS_RETU) += retu.o
++obj-$(CONFIG_CBUS_TAHVO_USB) += tahvo-usb.o
++
++obj-$(CONFIG_CBUS_RETU_POWERBUTTON) += retu-pwrbutton.o
++obj-$(CONFIG_CBUS_RETU_RTC) += retu-rtc.o
++obj-$(CONFIG_CBUS_RETU_WDT) += retu-wdt.o
++obj-$(CONFIG_CBUS_RETU_HEADSET) += retu-headset.o
+diff --git a/drivers/cbus/cbus.c b/drivers/cbus/cbus.c
+new file mode 100644
+index 0000000..45b01fd
+--- /dev/null
++++ b/drivers/cbus/cbus.c
+@@ -0,0 +1,328 @@
++/*
++ * drivers/cbus/cbus.c
++ *
++ * Support functions for CBUS serial protocol
++ *
++ * Copyright (C) 2004-2010 Nokia Corporation
++ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
++ * David Weinehall <david.weinehall@nokia.com>, and
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * Several updates and cleanups by Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/export.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/gpio.h>
++#include <linux/platform_device.h>
++#include <linux/platform_data/cbus.h>
++
++#include "cbus.h"
++
++#define CBUS_XFER_READ 1
++#define CBUS_XFER_WRITE 0
++
++struct cbus_host {
++ /* host lock */
++ spinlock_t lock;
++
++ struct device *dev;
++
++ int clk_gpio;
++ int dat_gpio;
++ int sel_gpio;
++};
++
++/**
++ * cbus_send_bit - sends one bit over the bus
++ * @host: the host we're using
++ * @bit: one bit of information to send
++ * @input: whether to set data pin as input after sending
++ */
++static int cbus_send_bit(struct cbus_host *host, unsigned bit,
++ unsigned input)
++{
++ int ret = 0;
++
++ gpio_set_value(host->dat_gpio, bit ? 1 : 0);
++ gpio_set_value(host->clk_gpio, 1);
++
++ /* The data bit is read on the rising edge of CLK */
++ if (input)
++ ret = gpio_direction_input(host->dat_gpio);
++
++ gpio_set_value(host->clk_gpio, 0);
++
++ return ret;
++}
++
++/**
++ * cbus_send_data - sends @len amount of data over the bus
++ * @host: the host we're using
++ * @data: the data to send
++ * @len: size of the transfer
++ * @input: whether to set data pin as input after sending
++ */
++static int cbus_send_data(struct cbus_host *host, unsigned data, unsigned len,
++ unsigned input)
++{
++ int ret = 0;
++ int i;
++
++ for (i = len; i > 0; i--) {
++ ret = cbus_send_bit(host, data & (1 << (i - 1)),
++ input && (i == 1));
++ if (ret < 0)
++ goto out;
++ }
++
++out:
++ return ret;
++}
++
++/**
++ * cbus_receive_bit - receives one bit from the bus
++ * @host: the host we're using
++ */
++static int cbus_receive_bit(struct cbus_host *host)
++{
++ int ret;
++
++ gpio_set_value(host->clk_gpio, 1);
++ ret = gpio_get_value(host->dat_gpio);
++ if (ret < 0)
++ goto out;
++ gpio_set_value(host->clk_gpio, 0);
++
++out:
++ return ret;
++}
++
++/**
++ * cbus_receive_data - receives @len data from the bus
++ * @host: the host we're using
++ * @len: the length of data to receive
++ */
++static int cbus_receive_data(struct cbus_host *host, unsigned len)
++{
++ int ret = 0;
++ int i;
++
++ for (i = 16; i > 0; i--) {
++ int bit = cbus_receive_bit(host);
++
++ if (bit < 0)
++ goto out;
++
++ if (bit)
++ ret |= 1 << (i - 1);
++ }
++
++out:
++ return ret;
++}
++
++/**
++ * cbus_transfer - transfers data over the bus
++ * @host: the host we're using
++ * @rw: read/write flag
++ * @dev: device address
++ * @reg: register address
++ * @data: if @rw == 0 data to send otherwise 0
++ */
++static int cbus_transfer(struct cbus_host *host, unsigned rw, unsigned dev,
++ unsigned reg, unsigned data)
++{
++ unsigned long flags;
++ int input = 0;
++ int ret = 0;
++
++ /* We don't want interrupts disturbing our transfer */
++ spin_lock_irqsave(&host->lock, flags);
++
++ /* Reset state and start of transfer, SEL stays down during transfer */
++ gpio_set_value(host->sel_gpio, 0);
++
++ /* Set the DAT pin to output */
++ gpio_direction_output(host->dat_gpio, 1);
++
++ /* Send the device address */
++ ret = cbus_send_data(host, dev, 3, 0);
++ if (ret < 0) {
++ dev_dbg(host->dev, "failed sending device addr\n");
++ goto out;
++ }
++
++ /* Send the rw flag */
++ ret = cbus_send_bit(host, rw, 0);
++ if (ret < 0) {
++ dev_dbg(host->dev, "failed sending read/write flag\n");
++ goto out;
++ }
++
++ /* Send the register address */
++ if (rw)
++ input = true;
++
++ ret = cbus_send_data(host, reg, 5, input);
++ if (ret < 0) {
++ dev_dbg(host->dev, "failed sending register addr\n");
++ goto out;
++ }
++
++ if (!rw) {
++ ret = cbus_send_data(host, data, 16, 0);
++ if (ret < 0) {
++ dev_dbg(host->dev, "failed sending data\n");
++ goto out;
++ }
++ } else {
++ gpio_set_value(host->clk_gpio, 1);
++
++ ret = cbus_receive_data(host, 16);
++ if (ret < 0) {
++ dev_dbg(host->dev, "failed receiving data\n");
++ goto out;
++ }
++ }
++
++ /* Indicate end of transfer, SEL goes up until next transfer */
++ gpio_set_value(host->sel_gpio, 1);
++ gpio_set_value(host->clk_gpio, 1);
++ gpio_set_value(host->clk_gpio, 0);
++
++out:
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ return ret;
++}
++
++/**
++ * cbus_read_reg - reads a given register from the device
++ * @child: the child device
++ * @dev: device address
++ * @reg: register address
++ */
++int cbus_read_reg(struct device *child, unsigned dev, unsigned reg)
++{
++ struct cbus_host *host = dev_get_drvdata(child->parent);
++
++ return cbus_transfer(host, CBUS_XFER_READ, dev, reg, 0);
++}
++EXPORT_SYMBOL(cbus_read_reg);
++
++/**
++ * cbus_write_reg - writes to a given register of the device
++ * @child: the child device
++ * @dev: device address
++ * @reg: register address
++ * @val: data to be written to @reg
++ */
++int cbus_write_reg(struct device *child, unsigned dev, unsigned reg,
++ unsigned val)
++{
++ struct cbus_host *host = dev_get_drvdata(child->parent);
++
++ return cbus_transfer(host, CBUS_XFER_WRITE, dev, reg, val);
++}
++EXPORT_SYMBOL(cbus_write_reg);
++
++static int __devinit cbus_bus_probe(struct platform_device *pdev)
++{
++ struct cbus_host *chost;
++ struct cbus_host_platform_data *pdata = pdev->dev.platform_data;
++ int ret;
++
++ chost = kzalloc(sizeof(*chost), GFP_KERNEL);
++ if (chost == NULL)
++ return -ENOMEM;
++
++ spin_lock_init(&chost->lock);
++
++ chost->clk_gpio = pdata->clk_gpio;
++ chost->dat_gpio = pdata->dat_gpio;
++ chost->sel_gpio = pdata->sel_gpio;
++ chost->dev = &pdev->dev;
++
++ ret = gpio_request(chost->clk_gpio, "CBUS clk");
++ if (ret < 0)
++ goto exit1;
++
++ ret = gpio_request(chost->dat_gpio, "CBUS data");
++ if (ret < 0)
++ goto exit2;
++
++ ret = gpio_request(chost->sel_gpio, "CBUS sel");
++ if (ret < 0)
++ goto exit3;
++
++ gpio_direction_output(chost->clk_gpio, 0);
++ gpio_direction_input(chost->dat_gpio);
++ gpio_direction_output(chost->sel_gpio, 1);
++
++ gpio_set_value(chost->clk_gpio, 1);
++ gpio_set_value(chost->clk_gpio, 0);
++
++ platform_set_drvdata(pdev, chost);
++
++ return 0;
++exit3:
++ gpio_free(chost->dat_gpio);
++exit2:
++ gpio_free(chost->clk_gpio);
++exit1:
++ kfree(chost);
++
++ return ret;
++}
++
++static int __devexit cbus_bus_remove(struct platform_device *pdev)
++{
++ struct cbus_host *chost = platform_get_drvdata(pdev);
++
++ gpio_free(chost->sel_gpio);
++ gpio_free(chost->dat_gpio);
++ gpio_free(chost->clk_gpio);
++
++ kfree(chost);
++
++ return 0;
++}
++
++static struct platform_driver cbus_driver = {
++ .probe = cbus_bus_probe,
++ .remove = __devexit_p(cbus_bus_remove),
++ .driver = {
++ .name = "cbus",
++ },
++};
++
++module_platform_driver(cbus_driver);
++
++MODULE_DESCRIPTION("CBUS serial protocol");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Juha Yrjölä");
++MODULE_AUTHOR("David Weinehall");
++MODULE_AUTHOR("Mikko Ylinen");
++MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
++
+diff --git a/drivers/cbus/cbus.h b/drivers/cbus/cbus.h
+new file mode 100644
+index 0000000..5380d173
+--- /dev/null
++++ b/drivers/cbus/cbus.h
+@@ -0,0 +1,33 @@
++/*
++ * drivers/cbus/cbus.h
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com> and
++ * David Weinehall <david.weinehall@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __DRIVERS_CBUS_CBUS_H
++#define __DRIVERS_CBUS_CBUS_H
++
++#define CBUS_RETU_DEVICE_ID 0x01
++#define CBUS_TAHVO_DEVICE_ID 0x02
++
++extern int cbus_read_reg(struct device *, unsigned dev, unsigned reg);
++extern int cbus_write_reg(struct device *, unsigned dev, unsigned reg,
++ unsigned val);
++
++#endif /* __DRIVERS_CBUS_CBUS_H */
+diff --git a/drivers/cbus/retu-headset.c b/drivers/cbus/retu-headset.c
+new file mode 100644
+index 0000000..576b0e6
+--- /dev/null
++++ b/drivers/cbus/retu-headset.c
+@@ -0,0 +1,350 @@
++/**
++ * Retu/Vilma headset detection
++ *
++ * Copyright (C) 2006 Nokia Corporation
++ *
++ * Written by Juha Yrjölä
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/input.h>
++#include <linux/platform_device.h>
++
++#include "retu.h"
++
++#define RETU_ADC_CHANNEL_HOOKDET 0x05
++
++#define RETU_HEADSET_KEY KEY_PHONE
++
++struct retu_headset {
++ spinlock_t lock;
++ struct mutex mutex;
++ struct device *dev;
++ struct input_dev *idev;
++ unsigned bias_enabled;
++ unsigned detection_enabled;
++ unsigned pressed;
++ struct timer_list enable_timer;
++ struct timer_list detect_timer;
++ int irq;
++};
++
++static void retu_headset_set_bias(struct retu_headset *hs, int enable)
++{
++ if (enable) {
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_AUDTXR,
++ (1 << 0) | (1 << 1), 0);
++ msleep(2);
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_AUDTXR,
++ 1 << 3, 0);
++ } else {
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_AUDTXR, 0,
++ (1 << 0) | (1 << 1) | (1 << 3));
++ }
++}
++
++static void retu_headset_enable(struct retu_headset *hs)
++{
++ mutex_lock(&hs->mutex);
++ if (!hs->bias_enabled) {
++ hs->bias_enabled = 1;
++ retu_headset_set_bias(hs, 1);
++ }
++ mutex_unlock(&hs->mutex);
++}
++
++static void retu_headset_disable(struct retu_headset *hs)
++{
++ mutex_lock(&hs->mutex);
++ if (hs->bias_enabled) {
++ hs->bias_enabled = 0;
++ retu_headset_set_bias(hs, 0);
++ }
++ mutex_unlock(&hs->mutex);
++}
++
++static void retu_headset_det_enable(struct retu_headset *hs)
++{
++ mutex_lock(&hs->mutex);
++ if (!hs->detection_enabled) {
++ hs->detection_enabled = 1;
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_CC1,
++ (1 << 10) | (1 << 8), 0);
++ }
++ mutex_unlock(&hs->mutex);
++}
++
++static void retu_headset_det_disable(struct retu_headset *hs)
++{
++ unsigned long flags;
++
++ mutex_lock(&hs->mutex);
++ if (hs->detection_enabled) {
++ hs->detection_enabled = 0;
++ del_timer_sync(&hs->enable_timer);
++ del_timer_sync(&hs->detect_timer);
++ spin_lock_irqsave(&hs->lock, flags);
++ if (hs->pressed)
++ input_report_key(hs->idev, RETU_HEADSET_KEY, 0);
++ spin_unlock_irqrestore(&hs->lock, flags);
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_CC1, 0,
++ (1 << 10) | (1 << 8));
++ }
++ mutex_unlock(&hs->mutex);
++}
++
++static ssize_t retu_headset_hookdet_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int val;
++
++ val = retu_read_adc(dev, RETU_ADC_CHANNEL_HOOKDET);
++ return sprintf(buf, "%d\n", val);
++}
++
++static DEVICE_ATTR(hookdet, S_IRUGO, retu_headset_hookdet_show, NULL);
++
++static ssize_t retu_headset_enable_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct retu_headset *hs = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%u\n", hs->bias_enabled);
++}
++
++static ssize_t retu_headset_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct retu_headset *hs = dev_get_drvdata(dev);
++ int enable;
++
++ if (sscanf(buf, "%u", &enable) != 1)
++ return -EINVAL;
++ if (enable)
++ retu_headset_enable(hs);
++ else
++ retu_headset_disable(hs);
++ return count;
++}
++
++static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR | S_IWGRP,
++ retu_headset_enable_show, retu_headset_enable_store);
++
++static ssize_t retu_headset_enable_det_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct retu_headset *hs = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%u\n", hs->detection_enabled);
++}
++
++static ssize_t retu_headset_enable_det_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct retu_headset *hs = dev_get_drvdata(dev);
++ int enable;
++
++ if (sscanf(buf, "%u", &enable) != 1)
++ return -EINVAL;
++ if (enable)
++ retu_headset_det_enable(hs);
++ else
++ retu_headset_det_disable(hs);
++ return count;
++}
++
++static DEVICE_ATTR(enable_det, S_IRUGO | S_IWUSR | S_IWGRP,
++ retu_headset_enable_det_show,
++ retu_headset_enable_det_store);
++
++static irqreturn_t retu_headset_hook_interrupt(int irq, void *_hs)
++{
++ struct retu_headset *hs = _hs;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hs->lock, flags);
++ if (!hs->pressed) {
++ /* Headset button was just pressed down. */
++ hs->pressed = 1;
++ input_report_key(hs->idev, RETU_HEADSET_KEY, 1);
++ }
++ spin_unlock_irqrestore(&hs->lock, flags);
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_CC1, 0,
++ (1 << 10) | (1 << 8));
++ mod_timer(&hs->enable_timer, jiffies + msecs_to_jiffies(50));
++
++ return IRQ_HANDLED;
++}
++
++static void retu_headset_enable_timer(unsigned long arg)
++{
++ struct retu_headset *hs = (struct retu_headset *) arg;
++
++ retu_set_clear_reg_bits(hs->dev, RETU_REG_CC1,
++ (1 << 10) | (1 << 8), 0);
++ mod_timer(&hs->detect_timer, jiffies + msecs_to_jiffies(350));
++}
++
++static void retu_headset_detect_timer(unsigned long arg)
++{
++ struct retu_headset *hs = (struct retu_headset *) arg;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hs->lock, flags);
++ if (hs->pressed) {
++ hs->pressed = 0;
++ input_report_key(hs->idev, RETU_HEADSET_KEY, 0);
++ }
++ spin_unlock_irqrestore(&hs->lock, flags);
++}
++
++static int __devinit retu_headset_probe(struct platform_device *pdev)
++{
++ struct retu_headset *hs;
++ int irq;
++ int r;
++
++ hs = kzalloc(sizeof(*hs), GFP_KERNEL);
++ if (hs == NULL)
++ return -ENOMEM;
++
++ hs->dev = &pdev->dev;
++
++ hs->idev = input_allocate_device();
++ if (hs->idev == NULL) {
++ r = -ENOMEM;
++ goto err1;
++ }
++ hs->idev->name = "retu-headset";
++ hs->idev->dev.parent = &pdev->dev;
++ set_bit(EV_KEY, hs->idev->evbit);
++ set_bit(RETU_HEADSET_KEY, hs->idev->keybit);
++ r = input_register_device(hs->idev);
++ if (r < 0)
++ goto err2;
++
++ r = device_create_file(&pdev->dev, &dev_attr_hookdet);
++ if (r < 0)
++ goto err3;
++ r = device_create_file(&pdev->dev, &dev_attr_enable);
++ if (r < 0)
++ goto err4;
++ r = device_create_file(&pdev->dev, &dev_attr_enable_det);
++ if (r < 0)
++ goto err5;
++ platform_set_drvdata(pdev, hs);
++
++ spin_lock_init(&hs->lock);
++ mutex_init(&hs->mutex);
++ setup_timer(&hs->enable_timer, retu_headset_enable_timer,
++ (unsigned long) hs);
++ setup_timer(&hs->detect_timer, retu_headset_detect_timer,
++ (unsigned long) hs);
++
++ irq = platform_get_irq(pdev, 0);
++ hs->irq = irq;
++
++ r = request_threaded_irq(irq, NULL, retu_headset_hook_interrupt, 0,
++ "hookdet", hs);
++ if (r != 0) {
++ dev_err(&pdev->dev, "hookdet IRQ not available\n");
++ goto err6;
++ }
++
++ return 0;
++err6:
++ device_remove_file(&pdev->dev, &dev_attr_enable_det);
++err5:
++ device_remove_file(&pdev->dev, &dev_attr_enable);
++err4:
++ device_remove_file(&pdev->dev, &dev_attr_hookdet);
++err3:
++ input_unregister_device(hs->idev);
++err2:
++ input_free_device(hs->idev);
++err1:
++ kfree(hs);
++ return r;
++}
++
++static int __devexit retu_headset_remove(struct platform_device *pdev)
++{
++ struct retu_headset *hs = platform_get_drvdata(pdev);
++
++ device_remove_file(&pdev->dev, &dev_attr_hookdet);
++ device_remove_file(&pdev->dev, &dev_attr_enable);
++ device_remove_file(&pdev->dev, &dev_attr_enable_det);
++ retu_headset_disable(hs);
++ retu_headset_det_disable(hs);
++ free_irq(hs->irq, hs);
++ input_unregister_device(hs->idev);
++ input_free_device(hs->idev);
++
++ return 0;
++}
++
++static int retu_headset_suspend(struct platform_device *pdev,
++ pm_message_t mesg)
++{
++ struct retu_headset *hs = platform_get_drvdata(pdev);
++
++ mutex_lock(&hs->mutex);
++ if (hs->bias_enabled)
++ retu_headset_set_bias(hs, 0);
++ mutex_unlock(&hs->mutex);
++
++ return 0;
++}
++
++static int retu_headset_resume(struct platform_device *pdev)
++{
++ struct retu_headset *hs = platform_get_drvdata(pdev);
++
++ mutex_lock(&hs->mutex);
++ if (hs->bias_enabled)
++ retu_headset_set_bias(hs, 1);
++ mutex_unlock(&hs->mutex);
++
++ return 0;
++}
++
++static struct platform_driver retu_headset_driver = {
++ .probe = retu_headset_probe,
++ .remove = __devexit_p(retu_headset_remove),
++ .suspend = retu_headset_suspend,
++ .resume = retu_headset_resume,
++ .driver = {
++ .name = "retu-headset",
++ },
++};
++
++module_platform_driver(retu_headset_driver);
++
++MODULE_ALIAS("platform:retu-headset");
++MODULE_DESCRIPTION("Retu/Vilma headset detection");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Juha Yrjölä");
+diff --git a/drivers/cbus/retu-pwrbutton.c b/drivers/cbus/retu-pwrbutton.c
+new file mode 100644
+index 0000000..98ad005
+--- /dev/null
++++ b/drivers/cbus/retu-pwrbutton.c
+@@ -0,0 +1,157 @@
++/**
++ * drivers/cbus/retu-pwrbutton.c
++ *
++ * Driver for sending retu power button event to input-layer
++ *
++ * Copyright (C) 2004-2010 Nokia Corporation
++ *
++ * Written by
++ * Ari Saastamoinen <ari.saastamoinen@elektrobit.com>
++ * Juha Yrjola <juha.yrjola@solidboot.com>
++ *
++ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/input.h>
++#include <linux/jiffies.h>
++#include <linux/bitops.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++
++#include "retu.h"
++
++#define RETU_STATUS_PWRONX (1 << 5)
++
++#define PWRBTN_DELAY 20
++#define PWRBTN_UP 0
++#define PWRBTN_PRESSED 1
++
++struct retu_pwrbutton {
++ struct input_dev *idev;
++ struct device *dev;
++
++ int state;
++ int irq;
++};
++
++static irqreturn_t retubutton_irq(int irq, void *_pwr)
++{
++ struct retu_pwrbutton *pwr = _pwr;
++ int state;
++
++ if (retu_read_reg(pwr->dev, RETU_REG_STATUS) & RETU_STATUS_PWRONX)
++ state = PWRBTN_UP;
++ else
++ state = PWRBTN_PRESSED;
++
++ if (pwr->state != state) {
++ input_report_key(pwr->idev, KEY_POWER, state);
++ input_sync(pwr->idev);
++ pwr->state = state;
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int __devinit retubutton_probe(struct platform_device *pdev)
++{
++ struct retu_pwrbutton *pwr;
++ int ret = 0;
++
++ pwr = kzalloc(sizeof(*pwr), GFP_KERNEL);
++ if (!pwr) {
++ dev_err(&pdev->dev, "not enough memory\n");
++ ret = -ENOMEM;
++ goto err0;
++ }
++
++ pwr->dev = &pdev->dev;
++ pwr->irq = platform_get_irq(pdev, 0);
++ platform_set_drvdata(pdev, pwr);
++
++ ret = request_threaded_irq(pwr->irq, NULL, retubutton_irq, 0,
++ "retu-pwrbutton", pwr);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot allocate irq\n");
++ goto err1;
++ }
++
++ pwr->idev = input_allocate_device();
++ if (!pwr->idev) {
++ dev_err(&pdev->dev, "can't allocate input device\n");
++ ret = -ENOMEM;
++ goto err2;
++ }
++
++ pwr->idev->evbit[0] = BIT_MASK(EV_KEY);
++ pwr->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
++ pwr->idev->name = "retu-pwrbutton";
++
++ ret = input_register_device(pwr->idev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to register input device\n");
++ goto err3;
++ }
++
++ return 0;
++
++err3:
++ input_free_device(pwr->idev);
++
++err2:
++ free_irq(pwr->irq, pwr);
++
++err1:
++ kfree(pwr);
++
++err0:
++ return ret;
++}
++
++static int __devexit retubutton_remove(struct platform_device *pdev)
++{
++ struct retu_pwrbutton *pwr = platform_get_drvdata(pdev);
++
++ free_irq(pwr->irq, pwr);
++ input_unregister_device(pwr->idev);
++ input_free_device(pwr->idev);
++ kfree(pwr);
++
++ return 0;
++}
++
++static struct platform_driver retu_pwrbutton_driver = {
++ .probe = retubutton_probe,
++ .remove = __devexit_p(retubutton_remove),
++ .driver = {
++ .name = "retu-pwrbutton",
++ },
++};
++
++module_platform_driver(retu_pwrbutton_driver);
++
++MODULE_ALIAS("platform:retu-pwrbutton");
++MODULE_DESCRIPTION("Retu Power Button");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Ari Saastamoinen");
++MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
++
+diff --git a/drivers/cbus/retu-rtc.c b/drivers/cbus/retu-rtc.c
+new file mode 100644
+index 0000000..965ee55
+--- /dev/null
++++ b/drivers/cbus/retu-rtc.c
+@@ -0,0 +1,279 @@
++/**
++ * drivers/cbus/retu-rtc.c
++ *
++ * Support for Retu RTC
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Paul Mundt <paul.mundt@nokia.com> and
++ * Igor Stoppa <igor.stoppa@nokia.com>
++ *
++ * The Retu RTC is essentially a partial read-only RTC that gives us Retu's
++ * idea of what time actually is. It's left as a userspace excercise to map
++ * this back to time in the real world and ensure that calibration settings
++ * are sane to compensate for any horrible drift (on account of not being able
++ * to set the clock to anything).
++ *
++ * Days are semi-writeable. Namely, Retu will only track 255 days for us
++ * consecutively, after which the counter is explicitly stuck at 255 until
++ * someone comes along and clears it with a write. In the event that no one
++ * comes along and clears it, we no longer have any idea what day it is.
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/mutex.h>
++#include <linux/rtc.h>
++
++#include "cbus.h"
++#include "retu.h"
++
++struct retu_rtc {
++ /* device lock */
++ struct mutex mutex;
++ struct device *dev;
++ struct rtc_device *rtc;
++
++ u16 alarm_expired;
++ int irq_rtcs;
++ int irq_rtca;
++};
++
++static void retu_rtc_do_reset(struct retu_rtc *rtc)
++{
++ u16 ccr1;
++
++ ccr1 = retu_read_reg(rtc->dev, RETU_REG_CC1);
++ /* RTC in reset */
++ retu_write_reg(rtc->dev, RETU_REG_CC1, ccr1 | 0x0001);
++ /* RTC in normal operating mode */
++ retu_write_reg(rtc->dev, RETU_REG_CC1, ccr1 & ~0x0001);
++
++ /* Disable alarm and RTC WD */
++ retu_write_reg(rtc->dev, RETU_REG_RTCHMAR, 0x7f3f);
++ /* Set Calibration register to default value */
++ retu_write_reg(rtc->dev, RETU_REG_RTCCALR, 0x00c0);
++
++ rtc->alarm_expired = 0;
++}
++
++static irqreturn_t retu_rtc_interrupt(int irq, void *_rtc)
++{
++ struct retu_rtc *rtc = _rtc;
++
++ mutex_lock(&rtc->mutex);
++ rtc->alarm_expired = 1;
++ retu_write_reg(rtc->dev, RETU_REG_RTCHMAR, (24 << 8) | 60);
++ mutex_unlock(&rtc->mutex);
++
++ return IRQ_HANDLED;
++}
++
++static int retu_rtc_init_irq(struct retu_rtc *rtc)
++{
++ int irq;
++ int ret;
++
++ irq = platform_get_irq(to_platform_device(rtc->dev), 0);
++ rtc->irq_rtcs = irq;
++
++ irq = platform_get_irq(to_platform_device(rtc->dev), 1);
++ rtc->irq_rtca = irq;
++
++ ret = request_threaded_irq(rtc->irq_rtcs, NULL, retu_rtc_interrupt,
++ 0, "RTCS", rtc);
++ if (ret != 0)
++ return ret;
++
++ ret = request_threaded_irq(rtc->irq_rtca, NULL, retu_rtc_interrupt,
++ 0, "RTCA", rtc);
++ if (ret != 0) {
++ free_irq(rtc->irq_rtcs, rtc);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int retu_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
++{
++ struct retu_rtc *rtc = dev_get_drvdata(dev);
++ u16 chmar;
++
++ mutex_lock(&rtc->mutex);
++
++ chmar = ((alm->time.tm_hour & 0x1f) << 8) | (alm->time.tm_min & 0x3f);
++ retu_write_reg(rtc->dev, RETU_REG_RTCHMAR, chmar);
++
++ mutex_unlock(&rtc->mutex);
++
++ return 0;
++}
++
++static int retu_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
++{
++ struct retu_rtc *rtc = dev_get_drvdata(dev);
++ u16 chmar;
++
++ mutex_lock(&rtc->mutex);
++
++ chmar = retu_read_reg(rtc->dev, RETU_REG_RTCHMAR);
++
++ alm->time.tm_hour = (chmar >> 8) & 0x1f;
++ alm->time.tm_min = chmar & 0x3f;
++ alm->enabled = !!rtc->alarm_expired;
++
++ mutex_unlock(&rtc->mutex);
++
++ return 0;
++}
++
++static int retu_rtc_set_time(struct device *dev, struct rtc_time *tm)
++{
++ struct retu_rtc *rtc = dev_get_drvdata(dev);
++ u16 dsr;
++ u16 hmr;
++
++ dsr = ((tm->tm_mday & 0xff) << 8) | (tm->tm_hour & 0xff);
++ hmr = ((tm->tm_min & 0xff) << 8) | (tm->tm_sec & 0xff);
++
++ mutex_lock(&rtc->mutex);
++
++ retu_write_reg(rtc->dev, RETU_REG_RTCDSR, dsr);
++ retu_write_reg(rtc->dev, RETU_REG_RTCHMR, hmr);
++
++ mutex_unlock(&rtc->mutex);
++
++ return 0;
++}
++
++static int retu_rtc_read_time(struct device *dev, struct rtc_time *tm)
++{
++ struct retu_rtc *rtc = dev_get_drvdata(dev);
++ u16 dsr;
++ u16 hmr;
++
++ /*
++ * DSR holds days and hours
++ * HMR hols minutes and seconds
++ *
++ * both are 16 bit registers with 8-bit for each field.
++ */
++
++ mutex_lock(&rtc->mutex);
++
++ dsr = retu_read_reg(rtc->dev, RETU_REG_RTCDSR);
++ hmr = retu_read_reg(rtc->dev, RETU_REG_RTCHMR);
++
++ tm->tm_sec = hmr & 0xff;
++ tm->tm_min = hmr >> 8;
++ tm->tm_hour = dsr & 0xff;
++ tm->tm_mday = dsr >> 8;
++
++ mutex_unlock(&rtc->mutex);
++
++ return 0;
++}
++
++static struct rtc_class_ops retu_rtc_ops = {
++ .read_time = retu_rtc_read_time,
++ .set_time = retu_rtc_set_time,
++ .read_alarm = retu_rtc_read_alarm,
++ .set_alarm = retu_rtc_set_alarm,
++};
++
++static int __devinit retu_rtc_probe(struct platform_device *pdev)
++{
++ struct retu_rtc *rtc;
++ int r;
++
++ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
++ if (!rtc) {
++ dev_err(&pdev->dev, "not enough memory\n");
++ r = -ENOMEM;
++ goto err0;
++ }
++
++ rtc->dev = &pdev->dev;
++ platform_set_drvdata(pdev, rtc);
++ mutex_init(&rtc->mutex);
++
++ rtc->alarm_expired = retu_read_reg(rtc->dev, RETU_REG_IDR) &
++ (0x1 << RETU_INT_RTCA);
++
++ r = retu_rtc_init_irq(rtc);
++ if (r < 0) {
++ dev_err(&pdev->dev, "failed to request retu irq\n");
++ goto err1;
++ }
++
++ /* If the calibration register is zero, we've probably lost power */
++ if (!(retu_read_reg(rtc->dev, RETU_REG_RTCCALR) & 0x00ff))
++ retu_rtc_do_reset(rtc);
++
++ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &
++ retu_rtc_ops, THIS_MODULE);
++ if (IS_ERR(rtc->rtc)) {
++ dev_err(&pdev->dev, "can't register RTC device\n");
++ goto err2;
++ }
++
++ return 0;
++
++err2:
++ free_irq(rtc->irq_rtcs, rtc);
++ free_irq(rtc->irq_rtca, rtc);
++
++err1:
++ kfree(rtc);
++
++err0:
++ return r;
++}
++
++static int __devexit retu_rtc_remove(struct platform_device *pdev)
++{
++ struct retu_rtc *rtc = platform_get_drvdata(pdev);
++
++ free_irq(rtc->irq_rtcs, rtc);
++ free_irq(rtc->irq_rtca, rtc);
++ rtc_device_unregister(rtc->rtc);
++ kfree(rtc);
++
++ return 0;
++}
++
++static struct platform_driver retu_rtc_driver = {
++ .probe = retu_rtc_probe,
++ .remove = __devexit_p(retu_rtc_remove),
++ .driver = {
++ .name = "retu-rtc",
++ },
++};
++
++module_platform_driver(retu_rtc_driver);
++
++MODULE_ALIAS("platform:retu-rtc");
++MODULE_DESCRIPTION("Retu RTC");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Paul Mundt");
++MODULE_AUTHOR("Igor Stoppa");
++MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
++
+diff --git a/drivers/cbus/retu-wdt.c b/drivers/cbus/retu-wdt.c
+new file mode 100644
+index 0000000..7557bc1
+--- /dev/null
++++ b/drivers/cbus/retu-wdt.c
+@@ -0,0 +1,263 @@
++/**
++ * drivers/cbus/retu-wdt.c
++ *
++ * Driver for Retu watchdog
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Amit Kucheria <amit.kucheria@nokia.com>
++ *
++ * Cleanups by Michael Buesch <mb@bu3sch.de> (C) 2011
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++
++#include <linux/completion.h>
++#include <linux/errno.h>
++#include <linux/moduleparam.h>
++#include <linux/miscdevice.h>
++#include <linux/watchdog.h>
++
++#include <asm/uaccess.h>
++
++#include <plat/prcm.h>
++
++#include "cbus.h"
++#include "retu.h"
++
++/* Watchdog timeout in seconds */
++#define RETU_WDT_MIN_TIMER 0
++#define RETU_WDT_DEFAULT_TIMER 32
++#define RETU_WDT_MAX_TIMER 63
++
++struct retu_wdt_dev {
++ struct device *dev;
++ unsigned int period_val; /* Current period of watchdog */
++ unsigned long users;
++ struct miscdevice miscdev;
++ struct delayed_work ping_work;
++ struct mutex mutex;
++};
++
++
++static inline void _retu_modify_counter(struct retu_wdt_dev *wdev,
++ unsigned int new)
++{
++ retu_write_reg(wdev->dev, RETU_REG_WATCHDOG, (u16)new);
++}
++
++static int retu_modify_counter(struct retu_wdt_dev *wdev, unsigned int new)
++{
++ if (new < RETU_WDT_MIN_TIMER || new > RETU_WDT_MAX_TIMER)
++ return -EINVAL;
++
++ mutex_lock(&wdev->mutex);
++ wdev->period_val = new;
++ _retu_modify_counter(wdev, wdev->period_val);
++ mutex_unlock(&wdev->mutex);
++
++ return 0;
++}
++
++/*
++ * Since retu watchdog cannot be disabled in hardware, we must kick it
++ * with a timer until userspace watchdog software takes over. Do this
++ * unless /dev/watchdog is open or CONFIG_WATCHDOG_NOWAYOUT is set.
++ */
++static void retu_wdt_ping_enable(struct retu_wdt_dev *wdev)
++{
++ _retu_modify_counter(wdev, RETU_WDT_MAX_TIMER);
++ schedule_delayed_work(&wdev->ping_work,
++ round_jiffies_relative(RETU_WDT_DEFAULT_TIMER * HZ));
++}
++
++static void retu_wdt_ping_disable(struct retu_wdt_dev *wdev)
++{
++ _retu_modify_counter(wdev, RETU_WDT_MAX_TIMER);
++ cancel_delayed_work_sync(&wdev->ping_work);
++}
++
++static void retu_wdt_ping_work(struct work_struct *work)
++{
++ struct retu_wdt_dev *wdev = container_of(to_delayed_work(work),
++ struct retu_wdt_dev, ping_work);
++ retu_wdt_ping_enable(wdev);
++}
++
++static int retu_wdt_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *mdev = file->private_data;
++ struct retu_wdt_dev *wdev = container_of(mdev, struct retu_wdt_dev, miscdev);
++
++ if (test_and_set_bit(0, &wdev->users))
++ return -EBUSY;
++
++ retu_wdt_ping_disable(wdev);
++
++ return nonseekable_open(inode, file);
++}
++
++static int retu_wdt_release(struct inode *inode, struct file *file)
++{
++ struct miscdevice *mdev = file->private_data;
++ struct retu_wdt_dev *wdev = container_of(mdev, struct retu_wdt_dev, miscdev);
++
++#ifndef CONFIG_WATCHDOG_NOWAYOUT
++ retu_wdt_ping_enable(wdev);
++#endif
++ clear_bit(0, &wdev->users);
++
++ return 0;
++}
++
++static ssize_t retu_wdt_write(struct file *file, const char __user *data,
++ size_t len, loff_t *ppos)
++{
++ struct miscdevice *mdev = file->private_data;
++ struct retu_wdt_dev *wdev = container_of(mdev, struct retu_wdt_dev, miscdev);
++
++ if (len)
++ retu_modify_counter(wdev, RETU_WDT_MAX_TIMER);
++
++ return len;
++}
++
++static long retu_wdt_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct miscdevice *mdev = file->private_data;
++ struct retu_wdt_dev *wdev = container_of(mdev, struct retu_wdt_dev, miscdev);
++ int new_margin;
++
++ static const struct watchdog_info ident = {
++ .identity = "Retu Watchdog",
++ .options = WDIOF_SETTIMEOUT,
++ .firmware_version = 0,
++ };
++
++ switch (cmd) {
++ default:
++ return -ENOTTY;
++ case WDIOC_GETSUPPORT:
++ return copy_to_user((struct watchdog_info __user *)arg, &ident,
++ sizeof(ident));
++ case WDIOC_GETSTATUS:
++ return put_user(0, (int __user *)arg);
++ case WDIOC_GETBOOTSTATUS:
++ if (cpu_is_omap16xx())
++ return put_user(omap_readw(ARM_SYSST),
++ (int __user *)arg);
++ if (cpu_is_omap24xx())
++ return put_user(omap_prcm_get_reset_sources(),
++ (int __user *)arg);
++ case WDIOC_KEEPALIVE:
++ retu_modify_counter(wdev, RETU_WDT_MAX_TIMER);
++ break;
++ case WDIOC_SETTIMEOUT:
++ if (get_user(new_margin, (int __user *)arg))
++ return -EFAULT;
++ retu_modify_counter(wdev, new_margin);
++ /* Fall through */
++ case WDIOC_GETTIMEOUT:
++ return put_user(wdev->period_val, (int __user *)arg);
++ }
++
++ return 0;
++}
++
++static const struct file_operations retu_wdt_fops = {
++ .owner = THIS_MODULE,
++ .write = retu_wdt_write,
++ .unlocked_ioctl = retu_wdt_ioctl,
++ .open = retu_wdt_open,
++ .release = retu_wdt_release,
++};
++
++static int __devinit retu_wdt_probe(struct platform_device *pdev)
++{
++ struct retu_wdt_dev *wdev;
++ int ret;
++
++ wdev = kzalloc(sizeof(struct retu_wdt_dev), GFP_KERNEL);
++ if (!wdev)
++ return -ENOMEM;
++
++ wdev->dev = &pdev->dev;
++ wdev->period_val = RETU_WDT_DEFAULT_TIMER;
++ mutex_init(&wdev->mutex);
++
++ platform_set_drvdata(pdev, wdev);
++
++ wdev->miscdev.parent = &pdev->dev;
++ wdev->miscdev.minor = WATCHDOG_MINOR;
++ wdev->miscdev.name = "watchdog";
++ wdev->miscdev.fops = &retu_wdt_fops;
++
++ ret = misc_register(&wdev->miscdev);
++ if (ret)
++ goto err_free_wdev;
++
++ INIT_DELAYED_WORK(&wdev->ping_work, retu_wdt_ping_work);
++
++ /* Kick the watchdog for kernel booting to finish.
++ * If nowayout is not set, we start the ping work. */
++#ifdef CONFIG_WATCHDOG_NOWAYOUT
++ retu_modify_counter(wdev, RETU_WDT_MAX_TIMER);
++#else
++ retu_wdt_ping_enable(wdev);
++#endif
++
++ return 0;
++
++err_free_wdev:
++ kfree(wdev);
++
++ return ret;
++}
++
++static int __devexit retu_wdt_remove(struct platform_device *pdev)
++{
++ struct retu_wdt_dev *wdev;
++
++ wdev = platform_get_drvdata(pdev);
++ misc_deregister(&wdev->miscdev);
++ cancel_delayed_work_sync(&wdev->ping_work);
++ kfree(wdev);
++
++ return 0;
++}
++
++static struct platform_driver retu_wdt_driver = {
++ .probe = retu_wdt_probe,
++ .remove = __devexit_p(retu_wdt_remove),
++ .driver = {
++ .name = "retu-wdt",
++ },
++};
++
++module_platform_driver(retu_wdt_driver);
++
++MODULE_ALIAS("platform:retu-wdt");
++MODULE_DESCRIPTION("Retu WatchDog");
++MODULE_AUTHOR("Amit Kucheria");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/cbus/retu.c b/drivers/cbus/retu.c
+new file mode 100644
+index 0000000..25fa405
+--- /dev/null
++++ b/drivers/cbus/retu.c
+@@ -0,0 +1,532 @@
++/**
++ * drivers/cbus/retu.c
++ *
++ * Support functions for Retu ASIC
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
++ * David Weinehall <david.weinehall@nokia.com>, and
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/device.h>
++#include <linux/mutex.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/platform_data/cbus.h>
++
++#include <asm/bitops.h>
++
++#include "cbus.h"
++#include "retu.h"
++
++struct retu {
++ /* Device lock */
++ struct mutex mutex;
++ struct device *dev;
++
++ struct irq_chip irq_chip;
++
++ int irq_base;
++ int irq_end;
++
++ int irq;
++
++ int mask;
++ bool mask_pending;
++
++ bool is_vilma;
++};
++
++static struct retu *the_retu;
++
++/**
++ * __retu_read_reg - Read a value from a register in Retu
++ * @retu: pointer to retu structure
++ * @reg: the register address to read from
++ */
++static int __retu_read_reg(struct retu *retu, unsigned reg)
++{
++ return cbus_read_reg(retu->dev, CBUS_RETU_DEVICE_ID, reg);
++}
++
++/**
++ * __retu_write_reg - Writes a value to a register in Retu
++ * @retu: pointer to retu structure
++ * @reg: the register address to write to
++ * @val: the value to write to the register
++ */
++static void __retu_write_reg(struct retu *retu, unsigned reg, u16 val)
++{
++ cbus_write_reg(retu->dev, CBUS_RETU_DEVICE_ID, reg, val);
++}
++
++/**
++ * retu_read_reg - Read a value from a register in Retu
++ * @child: device pointer for the calling child
++ * @reg: the register to read from
++ *
++ * This function returns the contents of the specified register
++ */
++int retu_read_reg(struct device *child, unsigned reg)
++{
++ struct retu *retu = dev_get_drvdata(child->parent);
++
++ return __retu_read_reg(retu, reg);
++}
++EXPORT_SYMBOL_GPL(retu_read_reg);
++
++/**
++ * retu_write_reg - Write a value to a register in Retu
++ * @child: the pointer to our calling child
++ * @reg: the register to write to
++ * @val: the value to write to the register
++ *
++ * This function writes a value to the specified register
++ */
++void retu_write_reg(struct device *child, unsigned reg, u16 val)
++{
++ struct retu *retu = dev_get_drvdata(child->parent);
++
++ mutex_lock(&retu->mutex);
++ __retu_write_reg(retu, reg, val);
++ mutex_unlock(&retu->mutex);
++}
++EXPORT_SYMBOL_GPL(retu_write_reg);
++
++/**
++ * retu_set_clear_reg_bits - helper function to read/set/clear bits
++ * @child: device pointer to calling child
++ * @reg: the register address
++ * @set: mask for setting bits
++ * @clear: mask for clearing bits
++ */
++void retu_set_clear_reg_bits(struct device *child, unsigned reg, u16 set,
++ u16 clear)
++{
++ struct retu *retu = dev_get_drvdata(child->parent);
++ u16 w;
++
++ mutex_lock(&retu->mutex);
++ w = __retu_read_reg(retu, reg);
++ w &= ~clear;
++ w |= set;
++ __retu_write_reg(retu, reg, w);
++ mutex_unlock(&retu->mutex);
++}
++EXPORT_SYMBOL_GPL(retu_set_clear_reg_bits);
++
++#define ADC_MAX_CHAN_NUMBER 13
++
++/**
++ * retu_read_adc - Reads AD conversion result
++ * @child: device pointer to calling child
++ * @channel: the ADC channel to read from
++ */
++int retu_read_adc(struct device *child, int channel)
++{
++ struct retu *retu = dev_get_drvdata(child->parent);
++ int res;
++
++ if (!retu)
++ return -ENODEV;
++
++ if (channel < 0 || channel > ADC_MAX_CHAN_NUMBER)
++ return -EINVAL;
++
++ mutex_lock(&retu->mutex);
++
++ if ((channel == 8) && retu->is_vilma) {
++ int scr = __retu_read_reg(retu, RETU_REG_ADCSCR);
++ int ch = (__retu_read_reg(retu, RETU_REG_ADCR) >> 10) & 0xf;
++ if (((scr & 0xff) != 0) && (ch != 8))
++ __retu_write_reg(retu, RETU_REG_ADCSCR, (scr & ~0xff));
++ }
++
++ /* Select the channel and read result */
++ __retu_write_reg(retu, RETU_REG_ADCR, channel << 10);
++ res = __retu_read_reg(retu, RETU_REG_ADCR) & 0x3ff;
++
++ if (retu->is_vilma)
++ __retu_write_reg(retu, RETU_REG_ADCR, (1 << 13));
++
++ /* Unlock retu */
++ mutex_unlock(&retu->mutex);
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(retu_read_adc);
++
++static irqreturn_t retu_irq_handler(int irq, void *_retu)
++{
++ struct retu *retu = _retu;
++
++ u16 idr;
++ u16 imr;
++
++ mutex_lock(&retu->mutex);
++ idr = __retu_read_reg(retu, RETU_REG_IDR);
++ imr = __retu_read_reg(retu, RETU_REG_IMR);
++ idr &= ~imr;
++ __retu_write_reg(retu, RETU_REG_IDR, idr);
++ mutex_unlock(&retu->mutex);
++
++ if (!idr) {
++ dev_vdbg(retu->dev, "No IRQ, spurious?\n");
++ return IRQ_NONE;
++ }
++
++ while (idr) {
++ unsigned long pending = __ffs(idr);
++ unsigned int irq;
++
++ idr &= ~BIT(pending);
++ irq = pending + retu->irq_base;
++ handle_nested_irq(irq);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/* -------------------------------------------------------------------------- */
++
++static void retu_irq_mask(struct irq_data *data)
++{
++ struct retu *retu = irq_data_get_irq_chip_data(data);
++ int irq = data->irq;
++
++ retu->mask |= (1 << (irq - retu->irq_base));
++ retu->mask_pending = true;
++}
++
++static void retu_irq_unmask(struct irq_data *data)
++{
++ struct retu *retu = irq_data_get_irq_chip_data(data);
++ int irq = data->irq;
++
++ retu->mask &= ~(1 << (irq - retu->irq_base));
++ retu->mask_pending = true;
++
++}
++
++static void retu_bus_lock(struct irq_data *data)
++{
++ struct retu *retu = irq_data_get_irq_chip_data(data);
++
++ mutex_lock(&retu->mutex);
++}
++
++static void retu_bus_sync_unlock(struct irq_data *data)
++{
++ struct retu *retu = irq_data_get_irq_chip_data(data);
++
++ if (retu->mask_pending) {
++ __retu_write_reg(retu, RETU_REG_IMR, retu->mask);
++ retu->mask_pending = false;
++ }
++
++ mutex_unlock(&retu->mutex);
++}
++
++static inline void retu_irq_setup(int irq)
++{
++#ifdef CONFIG_ARM
++ set_irq_flags(irq, IRQF_VALID);
++#else
++ irq_set_noprobe(irq);
++#endif
++}
++
++static void retu_irq_init(struct retu *retu)
++{
++ int base = retu->irq_base;
++ int end = retu->irq_end;
++ int irq;
++
++ for (irq = base; irq < end; irq++) {
++ irq_set_chip_data(irq, retu);
++ irq_set_chip(irq, &retu->irq_chip);
++ irq_set_nested_thread(irq, 1);
++ retu_irq_setup(irq);
++ }
++}
++
++static void retu_irq_exit(struct retu *retu)
++{
++ int base = retu->irq_base;
++ int end = retu->irq_end;
++ int irq;
++
++ for (irq = base; irq < end; irq++) {
++#ifdef CONFIG_ARM
++ set_irq_flags(irq, 0);
++#endif
++ irq_set_chip_and_handler(irq, NULL, NULL);
++ irq_set_chip_data(irq, NULL);
++ }
++}
++
++/* -------------------------------------------------------------------------- */
++
++/**
++ * retu_power_off - Shut down power to system
++ *
++ * This function puts the system in power off state
++ */
++static void retu_power_off(void)
++{
++ struct retu *retu = the_retu;
++ unsigned reg;
++
++ reg = __retu_read_reg(retu, RETU_REG_CC1);
++
++ /* Ignore power button state */
++ __retu_write_reg(retu, RETU_REG_CC1, reg | 2);
++ /* Expire watchdog immediately */
++ __retu_write_reg(retu, RETU_REG_WATCHDOG, 0);
++ /* Wait for poweroff*/
++ for (;;);
++}
++
++static struct resource generic_resources[] = {
++ {
++ .start = -EINVAL, /* fixed later */
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = -EINVAL, /* fixed later */
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++/**
++ * retu_allocate_child - Allocates one Retu child
++ * @name: name of new child
++ * @parent: parent device for this child
++ */
++static struct device *retu_allocate_child(char *name, struct device *parent,
++ int irq_base, int irq1, int irq2, int num)
++{
++ struct platform_device *pdev;
++ int status;
++
++ pdev = platform_device_alloc(name, -1);
++ if (!pdev) {
++ dev_dbg(parent, "can't allocate %s\n", name);
++ goto err;
++ }
++
++ pdev->dev.parent = parent;
++
++ if (num) {
++ generic_resources[0].start = irq_base + irq1;
++ generic_resources[1].start = irq_base + irq2;
++
++ status = platform_device_add_resources(pdev,
++ generic_resources, num);
++ if (status < 0) {
++ dev_dbg(parent, "can't add resources to %s\n", name);
++ goto err;
++ }
++ }
++
++ status = platform_device_add(pdev);
++ if (status < 0) {
++ dev_dbg(parent, "can't add %s\n", name);
++ goto err;
++ }
++
++ return &pdev->dev;
++
++err:
++ platform_device_put(pdev);
++
++ return NULL;
++}
++
++/**
++ * retu_allocate_children - Allocates Retu's children
++ */
++static int retu_allocate_children(struct device *parent, int irq_base)
++{
++ struct device *child;
++
++ child = retu_allocate_child("retu-pwrbutton", parent, irq_base,
++ RETU_INT_PWR, -1, 1);
++ if (!child)
++ return -ENOMEM;
++
++ child = retu_allocate_child("retu-headset", parent, irq_base,
++ RETU_INT_HOOK, -1, 1);
++ if (!child)
++ return -ENOMEM;
++
++ child = retu_allocate_child("retu-rtc", parent, irq_base,
++ RETU_INT_RTCS, RETU_INT_RTCA, 2);
++ if (!child)
++ return -ENOMEM;
++
++ child = retu_allocate_child("retu-wdt", parent, -1, -1, -1, 0);
++ if (!child)
++ return -ENOMEM;
++
++ return 0;
++}
++
++/**
++ * retu_probe - Probe for Retu ASIC
++ * @dev: the Retu device
++ *
++ * Probe for the Retu ASIC and allocate memory
++ * for its device-struct if found
++ */
++static int __devinit retu_probe(struct platform_device *pdev)
++{
++ struct irq_chip *chip;
++ struct retu *retu;
++
++ int ret = -ENOMEM;
++ int rev;
++
++ retu = kzalloc(sizeof(*retu), GFP_KERNEL);
++ if (!retu) {
++ dev_err(&pdev->dev, "not enough memory\n");
++ goto err0;
++ }
++
++ platform_set_drvdata(pdev, retu);
++
++ ret = irq_alloc_descs(-1, 0, MAX_RETU_IRQ_HANDLERS, 0);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to allocate IRQ descs\n");
++ goto err1;
++ }
++
++ chip = &retu->irq_chip;
++
++ chip->name = "retu",
++ chip->irq_bus_lock = retu_bus_lock,
++ chip->irq_bus_sync_unlock = retu_bus_sync_unlock,
++ chip->irq_mask = retu_irq_mask,
++ chip->irq_unmask = retu_irq_unmask,
++
++ retu->irq = platform_get_irq(pdev, 0);
++ retu->irq_base = ret;
++ retu->irq_end = ret + MAX_RETU_IRQ_HANDLERS;
++ retu->dev = &pdev->dev;
++
++ the_retu = retu;
++
++ mutex_init(&retu->mutex);
++
++ retu_irq_init(retu);
++
++ rev = __retu_read_reg(retu, RETU_REG_ASICR) & 0xff;
++ if (rev & (1 << 7))
++ retu->is_vilma = true;
++
++ dev_info(&pdev->dev, "%s v%d.%d found\n",
++ retu->is_vilma ? "Vilma" : "Retu",
++ (rev >> 4) & 0x07, rev & 0x0f);
++
++ /* Mask all RETU interrupts */
++ retu->mask = 0xffff;
++ __retu_write_reg(retu, RETU_REG_IMR, retu->mask);
++
++ ret = request_threaded_irq(retu->irq, NULL, retu_irq_handler,
++ IRQF_ONESHOT, "retu", retu);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Unable to register IRQ handler\n");
++ goto err2;
++ }
++
++ irq_set_irq_wake(retu->irq, 1);
++
++ /* Register power off function */
++ pm_power_off = retu_power_off;
++
++ ret = retu_allocate_children(&pdev->dev, retu->irq_base);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Unable to allocate Retu children\n");
++ goto err3;
++ }
++
++ return 0;
++
++err3:
++ pm_power_off = NULL;
++ free_irq(retu->irq, retu);
++
++err2:
++ retu_irq_exit(retu);
++ irq_free_descs(retu->irq_base, MAX_RETU_IRQ_HANDLERS);
++
++err1:
++ kfree(retu);
++ the_retu = NULL;
++
++err0:
++ return ret;
++}
++
++static int __devexit retu_remove(struct platform_device *pdev)
++{
++ struct retu *retu = platform_get_drvdata(pdev);
++
++ pm_power_off = NULL;
++ the_retu = NULL;
++
++ free_irq(retu->irq, retu);
++ retu_irq_exit(retu);
++ irq_free_descs(retu->irq_base, MAX_RETU_IRQ_HANDLERS);
++ kfree(retu);
++
++ return 0;
++}
++
++static const struct of_device_id retu_match_table[] __devinitconst = {
++ {
++ .compatible = "nokia,retu",
++ },
++ {},
++};
++MODULE_DEVICE_TABLE(of, retu_match);
++
++static struct platform_driver retu_driver = {
++ .probe = retu_probe,
++ .remove = __devexit_p(retu_remove),
++ .driver = {
++ .name = "retu",
++ .of_match_table = retu_match_table,
++ },
++};
++
++module_platform_driver(retu_driver);
++
++MODULE_ALIAS("platform:retu");
++MODULE_DESCRIPTION("Retu ASIC control");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Juha Yrjölä");
++MODULE_AUTHOR("David Weinehall");
++MODULE_AUTHOR("Mikko Ylinen");
+diff --git a/drivers/cbus/retu.h b/drivers/cbus/retu.h
+new file mode 100644
+index 0000000..d35ea77
+--- /dev/null
++++ b/drivers/cbus/retu.h
+@@ -0,0 +1,85 @@
++/**
++ * drivers/cbus/retu.h
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com> and
++ * David Weinehall <david.weinehall@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __DRIVERS_CBUS_RETU_H
++#define __DRIVERS_CBUS_RETU_H
++
++#include <linux/types.h>
++
++/* Registers */
++#define RETU_REG_ASICR 0x00 /* ASIC ID & revision */
++#define RETU_REG_IDR 0x01 /* Interrupt ID */
++#define RETU_REG_IMR 0x02 /* Interrupt mask */
++#define RETU_REG_RTCDSR 0x03 /* RTC seconds register */
++#define RETU_REG_RTCHMR 0x04 /* RTC hours and minutes register */
++#define RETU_REG_RTCHMAR 0x05 /* RTC hours and minutes alarm and time set register */
++#define RETU_REG_RTCCALR 0x06 /* RTC calibration register */
++#define RETU_REG_ADCR 0x08 /* ADC result */
++#define RETU_REG_ADCSCR 0x09 /* ADC sample ctrl */
++#define RETU_REG_CC1 0x0d /* Common control register 1 */
++#define RETU_REG_CC2 0x0e /* Common control register 2 */
++#define RETU_REG_CTRL_CLR 0x0f /* Regulator clear register */
++#define RETU_REG_CTRL_SET 0x10 /* Regulator set register */
++#define RETU_REG_STATUS 0x16 /* Status register */
++#define RETU_REG_STATUS_BATAVAIL 0x0100 /* Battery available */
++#define RETU_REG_STATUS_CHGPLUG 0x1000 /* Charger is plugged in */
++#define RETU_REG_WATCHDOG 0x17 /* Watchdog register */
++#define RETU_REG_AUDTXR 0x18 /* Audio Codec Tx register */
++#define RETU_REG_MAX 0x1f
++
++/* Interrupt sources */
++#define RETU_INT_PWR 0
++#define RETU_INT_CHAR 1
++#define RETU_INT_RTCS 2
++#define RETU_INT_RTCM 3
++#define RETU_INT_RTCD 4
++#define RETU_INT_RTCA 5
++#define RETU_INT_HOOK 6
++#define RETU_INT_HEAD 7
++#define RETU_INT_ADCS 8
++
++#define MAX_RETU_IRQ_HANDLERS 16
++
++/* ADC channels */
++#define RETU_ADC_GND 0x00 /* Ground */
++#define RETU_ADC_BSI 0x01 /* Battery Size Indicator */
++#define RETU_ADC_BATTEMP 0x02 /* Battery temperature */
++#define RETU_ADC_CHGVOLT 0x03 /* Charger voltage */
++#define RETU_ADC_HEADSET 0x04 /* Headset detection */
++#define RETU_ADC_HOOKDET 0x05 /* Hook detection */
++#define RETU_ADC_RFGP 0x06 /* RF GP */
++#define RETU_ADC_WBTX 0x07 /* Wideband Tx detection */
++#define RETU_ADC_BATTVOLT 0x08 /* Battery voltage measurement */
++#define RETU_ADC_GND2 0x09 /* Ground */
++#define RETU_ADC_LIGHTSENS 0x0A /* Light sensor */
++#define RETU_ADC_LIGHTTEMP 0x0B /* Light sensor temperature */
++#define RETU_ADC_BKUPVOLT 0x0C /* Backup battery voltage */
++#define RETU_ADC_TEMP 0x0D /* RETU temperature */
++
++
++int retu_read_reg(struct device *child, unsigned reg);
++void retu_write_reg(struct device *child, unsigned reg, u16 val);
++void retu_set_clear_reg_bits(struct device *child, unsigned reg, u16 set,
++ u16 clear);
++int retu_read_adc(struct device *child, int channel);
++
++#endif /* __DRIVERS_CBUS_RETU_H */
+diff --git a/drivers/cbus/tahvo-usb.c b/drivers/cbus/tahvo-usb.c
+new file mode 100644
+index 0000000..15da853
+--- /dev/null
++++ b/drivers/cbus/tahvo-usb.c
+@@ -0,0 +1,741 @@
++/**
++ * drivers/cbus/tahvo-usb.c
++ *
++ * Tahvo USB transeiver
++ *
++ * Copyright (C) 2005-2006 Nokia Corporation
++ *
++ * Parts copied from drivers/i2c/chips/isp1301_omap.c
++ * Copyright (C) 2004 Texas Instruments
++ * Copyright (C) 2004 David Brownell
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
++ * Tony Lindgren <tony@atomide.com>, and
++ * Timo Teräs <timo.teras@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++#include <linux/usb.h>
++#include <linux/usb/otg.h>
++#include <linux/i2c.h>
++#include <linux/workqueue.h>
++#include <linux/kobject.h>
++#include <linux/clk.h>
++#include <linux/mutex.h>
++
++#include <asm/irq.h>
++#include <plat/usb.h>
++
++#include "cbus.h"
++#include "tahvo.h"
++
++#define DRIVER_NAME "tahvo-usb"
++
++#define USBR_SLAVE_CONTROL (1 << 8)
++#define USBR_VPPVIO_SW (1 << 7)
++#define USBR_SPEED (1 << 6)
++#define USBR_REGOUT (1 << 5)
++#define USBR_MASTER_SW2 (1 << 4)
++#define USBR_MASTER_SW1 (1 << 3)
++#define USBR_SLAVE_SW (1 << 2)
++#define USBR_NSUSPEND (1 << 1)
++#define USBR_SEMODE (1 << 0)
++
++/* bits in OTG_CTRL */
++
++/* Bits that are controlled by OMAP OTG and are read-only */
++#define OTG_CTRL_OMAP_MASK (OTG_PULLDOWN|OTG_PULLUP|OTG_DRV_VBUS|\
++ OTG_PD_VBUS|OTG_PU_VBUS|OTG_PU_ID)
++/* Bits that are controlled by transceiver */
++#define OTG_CTRL_XCVR_MASK (OTG_ASESSVLD|OTG_BSESSEND|\
++ OTG_BSESSVLD|OTG_VBUSVLD|OTG_ID)
++/* Bits that are controlled by system */
++#define OTG_CTRL_SYS_MASK (OTG_A_BUSREQ|OTG_A_SETB_HNPEN|OTG_B_BUSREQ|\
++ OTG_B_HNPEN|OTG_BUSDROP)
++
++#if defined(CONFIG_USB_OHCI_HCD) && !defined(CONFIG_USB_OTG)
++#error tahvo-otg.c does not work with OCHI yet!
++#endif
++
++#define TAHVO_MODE_HOST 0
++#define TAHVO_MODE_PERIPHERAL 1
++
++#ifdef CONFIG_USB_OTG
++#define TAHVO_MODE(tu) (tu)->tahvo_mode
++#elif defined(CONFIG_USB_GADGET_OMAP)
++#define TAHVO_MODE(tu) TAHVO_MODE_PERIPHERAL
++#else
++#define TAHVO_MODE(tu) TAHVO_MODE_HOST
++#endif
++
++struct tahvo_usb {
++ struct device *dev;
++ struct platform_device *pt_dev;
++ struct otg_transceiver otg;
++ int vbus_state;
++ struct mutex serialize;
++#ifdef CONFIG_USB_OTG
++ int tahvo_mode;
++#endif
++ struct clk *ick;
++
++ int irq;
++};
++static struct tahvo_usb *tahvo_usb_device;
++
++/*
++ * ---------------------------------------------------------------------------
++ * OTG related functions
++ *
++ * These shoud be separated into omap-otg.c driver module, as they are used
++ * by various transceivers. These functions are needed in the UDC-only case
++ * as well. These functions are copied from GPL isp1301_omap.c
++ * ---------------------------------------------------------------------------
++ */
++static struct platform_device *tahvo_otg_dev;
++
++static irqreturn_t omap_otg_irq(int irq, void *arg)
++{
++ u16 otg_irq;
++
++ otg_irq = omap_readw(OTG_IRQ_SRC);
++ if (otg_irq & OPRT_CHG) {
++ omap_writew(OPRT_CHG, OTG_IRQ_SRC);
++ } else if (otg_irq & B_SRP_TMROUT) {
++ omap_writew(B_SRP_TMROUT, OTG_IRQ_SRC);
++ } else if (otg_irq & B_HNP_FAIL) {
++ omap_writew(B_HNP_FAIL, OTG_IRQ_SRC);
++ } else if (otg_irq & A_SRP_DETECT) {
++ omap_writew(A_SRP_DETECT, OTG_IRQ_SRC);
++ } else if (otg_irq & A_REQ_TMROUT) {
++ omap_writew(A_REQ_TMROUT, OTG_IRQ_SRC);
++ } else if (otg_irq & A_VBUS_ERR) {
++ omap_writew(A_VBUS_ERR, OTG_IRQ_SRC);
++ } else if (otg_irq & DRIVER_SWITCH) {
++#ifdef CONFIG_USB_OTG
++ if ((!(omap_readl(OTG_CTRL) & OTG_DRIVER_SEL)) &&
++ tu->otg.host && tu->otg.state == OTG_STATE_A_HOST) {
++ /* role is host */
++ usb_bus_start_enum(tu->otg.host,
++ tu->otg.host->otg_port);
++ }
++#endif
++ omap_writew(DRIVER_SWITCH, OTG_IRQ_SRC);
++ } else
++ return IRQ_NONE;
++
++ return IRQ_HANDLED;
++
++}
++
++static int tahvo_otg_init(void)
++{
++ u32 l;
++
++#ifdef CONFIG_USB_OTG
++ if (!tahvo_otg_dev) {
++ printk("tahvo-usb: no tahvo_otg_dev\n");
++ return -ENODEV;
++ }
++#endif
++
++ l = omap_readl(OTG_SYSCON_1);
++ l &= ~OTG_IDLE_EN;
++ omap_writel(l, OTG_SYSCON_1);
++ udelay(100);
++
++ /* some of these values are board-specific... */
++ l = omap_readl(OTG_SYSCON_2);
++ l |= OTG_EN
++ /* for B-device: */
++ | SRP_GPDATA /* 9msec Bdev D+ pulse */
++ | SRP_GPDVBUS /* discharge after VBUS pulse */
++ // | (3 << 24) /* 2msec VBUS pulse */
++ /* for A-device: */
++ | (0 << 20) /* 200ms nominal A_WAIT_VRISE timer */
++ | SRP_DPW /* detect 167+ns SRP pulses */
++ | SRP_DATA | SRP_VBUS; /* accept both kinds of SRP pulse */
++ omap_writel(l, OTG_SYSCON_2);
++
++ omap_writew(DRIVER_SWITCH | OPRT_CHG
++ | B_SRP_TMROUT | B_HNP_FAIL
++ | A_VBUS_ERR | A_SRP_DETECT | A_REQ_TMROUT,
++ OTG_IRQ_EN);
++ l = omap_readl(OTG_SYSCON_2);
++ l |= OTG_EN;
++ omap_writel(l, OTG_SYSCON_2);
++
++ return 0;
++}
++
++static int __devinit omap_otg_probe(struct platform_device *pdev)
++{
++ int ret;
++
++ tahvo_otg_dev = pdev;
++ ret = tahvo_otg_init();
++ if (ret != 0) {
++ printk(KERN_ERR "tahvo-usb: tahvo_otg_init failed\n");
++ return ret;
++ }
++
++ return request_irq(tahvo_otg_dev->resource[1].start,
++ omap_otg_irq, IRQF_DISABLED, DRIVER_NAME,
++ tahvo_usb_device);
++}
++
++static int __devexit omap_otg_remove(struct platform_device *pdev)
++{
++ free_irq(tahvo_otg_dev->resource[1].start, tahvo_usb_device);
++ tahvo_otg_dev = NULL;
++
++ return 0;
++}
++
++struct platform_driver omap_otg_driver = {
++ .probe = omap_otg_probe,
++ .remove = __devexit_p(omap_otg_remove),
++ .driver = {
++ .name = "omap_otg",
++ },
++};
++
++/*
++ * ---------------------------------------------------------------------------
++ * Tahvo related functions
++ * These are Nokia proprietary code, except for the OTG register settings,
++ * which are copied from isp1301.c
++ * ---------------------------------------------------------------------------
++ */
++static ssize_t vbus_state_show(struct device *device,
++ struct device_attribute *attr, char *buf)
++{
++ struct tahvo_usb *tu = dev_get_drvdata(device);
++ return sprintf(buf, "%d\n", tu->vbus_state);
++}
++static DEVICE_ATTR(vbus_state, 0444, vbus_state_show, NULL);
++
++int vbus_active = 0;
++
++static void check_vbus_state(struct tahvo_usb *tu)
++{
++ int reg, prev_state;
++
++ reg = tahvo_read_reg(tu->dev, TAHVO_REG_IDSR);
++ if (reg & 0x01) {
++ u32 l;
++
++ vbus_active = 1;
++ switch (tu->otg.state) {
++ case OTG_STATE_B_IDLE:
++ /* Enable the gadget driver */
++ if (tu->otg.gadget)
++ usb_gadget_vbus_connect(tu->otg.gadget);
++ /* Set B-session valid and not B-sessio ended to indicate
++ * Vbus to be ok. */
++ l = omap_readl(OTG_CTRL);
++ l &= ~OTG_BSESSEND;
++ l |= OTG_BSESSVLD;
++ omap_writel(l, OTG_CTRL);
++
++ tu->otg.state = OTG_STATE_B_PERIPHERAL;
++ break;
++ case OTG_STATE_A_IDLE:
++ /* Session is now valid assuming the USB hub is driving Vbus */
++ tu->otg.state = OTG_STATE_A_HOST;
++ break;
++ default:
++ break;
++ }
++ printk("USB cable connected\n");
++ } else {
++ switch (tu->otg.state) {
++ case OTG_STATE_B_PERIPHERAL:
++ if (tu->otg.gadget)
++ usb_gadget_vbus_disconnect(tu->otg.gadget);
++ tu->otg.state = OTG_STATE_B_IDLE;
++ break;
++ case OTG_STATE_A_HOST:
++ tu->otg.state = OTG_STATE_A_IDLE;
++ break;
++ default:
++ break;
++ }
++ printk("USB cable disconnected\n");
++ vbus_active = 0;
++ }
++
++ prev_state = tu->vbus_state;
++ tu->vbus_state = reg & 0x01;
++ if (prev_state != tu->vbus_state)
++ sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
++}
++
++static void tahvo_usb_become_host(struct tahvo_usb *tu)
++{
++ u32 l;
++
++ /* Clear system and transceiver controlled bits
++ * also mark the A-session is always valid */
++ tahvo_otg_init();
++
++ l = omap_readl(OTG_CTRL);
++ l &= ~(OTG_CTRL_XCVR_MASK | OTG_CTRL_SYS_MASK);
++ l |= OTG_ASESSVLD;
++ omap_writel(l, OTG_CTRL);
++
++ /* Power up the transceiver in USB host mode */
++ tahvo_write_reg(tu->dev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
++ USBR_MASTER_SW2 | USBR_MASTER_SW1);
++ tu->otg.state = OTG_STATE_A_IDLE;
++
++ check_vbus_state(tu);
++}
++
++static void tahvo_usb_stop_host(struct tahvo_usb *tu)
++{
++ tu->otg.state = OTG_STATE_A_IDLE;
++}
++
++static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
++{
++ u32 l;
++
++ /* Clear system and transceiver controlled bits
++ * and enable ID to mark peripheral mode and
++ * BSESSEND to mark no Vbus */
++ tahvo_otg_init();
++ l = omap_readl(OTG_CTRL);
++ l &= ~(OTG_CTRL_XCVR_MASK | OTG_CTRL_SYS_MASK | OTG_BSESSVLD);
++ l |= OTG_ID | OTG_BSESSEND;
++ omap_writel(l, OTG_CTRL);
++
++ /* Power up transceiver and set it in USB perhiperal mode */
++ tahvo_write_reg(tu->dev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT | USBR_NSUSPEND | USBR_SLAVE_SW);
++ tu->otg.state = OTG_STATE_B_IDLE;
++
++ check_vbus_state(tu);
++}
++
++static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
++{
++ u32 l;
++
++ l = omap_readl(OTG_CTRL);
++ l &= ~OTG_BSESSVLD;
++ l |= OTG_BSESSEND;
++ omap_writel(l, OTG_CTRL);
++
++ if (tu->otg.gadget)
++ usb_gadget_vbus_disconnect(tu->otg.gadget);
++ tu->otg.state = OTG_STATE_B_IDLE;
++
++}
++
++static void tahvo_usb_power_off(struct tahvo_usb *tu)
++{
++ u32 l;
++ int id;
++
++ /* Disable gadget controller if any */
++ if (tu->otg.gadget)
++ usb_gadget_vbus_disconnect(tu->otg.gadget);
++
++ /* Disable OTG and interrupts */
++ if (TAHVO_MODE(tu) == TAHVO_MODE_PERIPHERAL)
++ id = OTG_ID;
++ else
++ id = 0;
++ l = omap_readl(OTG_CTRL);
++ l &= ~(OTG_CTRL_XCVR_MASK | OTG_CTRL_SYS_MASK | OTG_BSESSVLD);
++ l |= id | OTG_BSESSEND;
++ omap_writel(l, OTG_CTRL);
++ omap_writew(0, OTG_IRQ_EN);
++
++ l = omap_readl(OTG_SYSCON_2);
++ l &= ~OTG_EN;
++ omap_writel(l, OTG_SYSCON_2);
++
++ l = omap_readl(OTG_SYSCON_1);
++ l |= OTG_IDLE_EN;
++ omap_writel(l, OTG_SYSCON_1);
++
++ /* Power off transceiver */
++ tahvo_write_reg(tu->dev, TAHVO_REG_USBR, 0);
++ tu->otg.state = OTG_STATE_UNDEFINED;
++}
++
++
++static int tahvo_usb_set_power(struct otg_transceiver *dev, unsigned mA)
++{
++ struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, otg);
++
++ dev_dbg(&tu->pt_dev->dev, "set_power %d mA\n", mA);
++
++ if (dev->state == OTG_STATE_B_PERIPHERAL) {
++ /* REVISIT: Can Tahvo charge battery from VBUS? */
++ }
++ return 0;
++}
++
++static int tahvo_usb_set_suspend(struct otg_transceiver *dev, int suspend)
++{
++ struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, otg);
++ u16 w;
++
++ dev_dbg(&tu->pt_dev->dev, "set_suspend\n");
++
++ w = tahvo_read_reg(tu->dev, TAHVO_REG_USBR);
++ if (suspend)
++ w &= ~USBR_NSUSPEND;
++ else
++ w |= USBR_NSUSPEND;
++ tahvo_write_reg(tu->dev, TAHVO_REG_USBR, w);
++
++ return 0;
++}
++
++static int tahvo_usb_start_srp(struct otg_transceiver *dev)
++{
++ struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, otg);
++ u32 otg_ctrl;
++
++ dev_dbg(&tu->pt_dev->dev, "start_srp\n");
++
++ if (!dev || tu->otg.state != OTG_STATE_B_IDLE)
++ return -ENODEV;
++
++ otg_ctrl = omap_readl(OTG_CTRL);
++ if (!(otg_ctrl & OTG_BSESSEND))
++ return -EINVAL;
++
++ otg_ctrl |= OTG_B_BUSREQ;
++ otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_SYS_MASK;
++ omap_writel(otg_ctrl, OTG_CTRL);
++ tu->otg.state = OTG_STATE_B_SRP_INIT;
++
++ return 0;
++}
++
++static int tahvo_usb_start_hnp(struct otg_transceiver *otg)
++{
++ struct tahvo_usb *tu = container_of(otg, struct tahvo_usb, otg);
++
++ dev_dbg(&tu->pt_dev->dev, "start_hnp\n");
++#ifdef CONFIG_USB_OTG
++ /* REVISIT: Add this for OTG */
++#endif
++ return -EINVAL;
++}
++
++static int tahvo_usb_set_host(struct otg_transceiver *otg, struct usb_bus *host)
++{
++ struct tahvo_usb *tu = container_of(otg, struct tahvo_usb, otg);
++ u32 l;
++
++ dev_dbg(&tu->pt_dev->dev, "set_host %p\n", host);
++
++ if (otg == NULL)
++ return -ENODEV;
++
++#if defined(CONFIG_USB_OTG) || !defined(CONFIG_USB_GADGET_OMAP)
++
++ mutex_lock(&tu->serialize);
++
++ if (host == NULL) {
++ if (TAHVO_MODE(tu) == TAHVO_MODE_HOST)
++ tahvo_usb_power_off(tu);
++ tu->otg.host = NULL;
++ mutex_unlock(&tu->serialize);
++ return 0;
++ }
++
++ l = omap_readl(OTG_SYSCON_1);
++ l &= ~(OTG_IDLE_EN | HST_IDLE_EN | DEV_IDLE_EN);
++ omap_writel(l, OTG_SYSCON_1);
++
++ if (TAHVO_MODE(tu) == TAHVO_MODE_HOST) {
++ tu->otg.host = NULL;
++ tahvo_usb_become_host(tu);
++ }
++
++ tu->otg.host = host;
++
++ mutex_unlock(&tu->serialize);
++#else
++ /* No host mode configured, so do not allow host controlled to be set */
++ return -EINVAL;
++#endif
++
++ return 0;
++}
++
++static int tahvo_usb_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *gadget)
++{
++ struct tahvo_usb *tu = container_of(otg, struct tahvo_usb, otg);
++
++ dev_dbg(&tu->pt_dev->dev, "set_peripheral %p\n", gadget);
++
++ if (!otg)
++ return -ENODEV;
++
++#if defined(CONFIG_USB_OTG) || defined(CONFIG_USB_GADGET_OMAP)
++
++ mutex_lock(&tu->serialize);
++
++ if (!gadget) {
++ if (TAHVO_MODE(tu) == TAHVO_MODE_PERIPHERAL)
++ tahvo_usb_power_off(tu);
++ tu->otg.gadget = NULL;
++ mutex_unlock(&tu->serialize);
++ return 0;
++ }
++
++ tu->otg.gadget = gadget;
++ if (TAHVO_MODE(tu) == TAHVO_MODE_PERIPHERAL)
++ tahvo_usb_become_peripheral(tu);
++
++ mutex_unlock(&tu->serialize);
++#else
++ /* No gadget mode configured, so do not allow host controlled to be set */
++ return -EINVAL;
++#endif
++
++ return 0;
++}
++
++static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu)
++{
++ struct tahvo_usb *tu = _tu;
++
++ check_vbus_state(tu);
++
++ return IRQ_HANDLED;
++}
++
++#ifdef CONFIG_USB_OTG
++static ssize_t otg_mode_show(struct device *device,
++ struct device_attribute *attr, char *buf)
++{
++ struct tahvo_usb *tu = dev_get_drvdata(device);
++ switch (tu->tahvo_mode) {
++ case TAHVO_MODE_HOST:
++ return sprintf(buf, "host\n");
++ case TAHVO_MODE_PERIPHERAL:
++ return sprintf(buf, "peripheral\n");
++ }
++ return sprintf(buf, "unknown\n");
++}
++
++static ssize_t otg_mode_store(struct device *device,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct tahvo_usb *tu = dev_get_drvdata(device);
++ int r;
++
++ r = strlen(buf);
++ mutex_lock(&tu->serialize);
++ if (strncmp(buf, "host", 4) == 0) {
++ if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
++ tahvo_usb_stop_peripheral(tu);
++ tu->tahvo_mode = TAHVO_MODE_HOST;
++ if (tu->otg.host) {
++ printk(KERN_INFO "Selected HOST mode: host controller present.\n");
++ tahvo_usb_become_host(tu);
++ } else {
++ printk(KERN_INFO "Selected HOST mode: no host controller, powering off.\n");
++ tahvo_usb_power_off(tu);
++ }
++ } else if (strncmp(buf, "peripheral", 10) == 0) {
++ if (tu->tahvo_mode == TAHVO_MODE_HOST)
++ tahvo_usb_stop_host(tu);
++ tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
++ if (tu->otg.gadget) {
++ printk(KERN_INFO "Selected PERIPHERAL mode: gadget driver present.\n");
++ tahvo_usb_become_peripheral(tu);
++ } else {
++ printk(KERN_INFO "Selected PERIPHERAL mode: no gadget driver, powering off.\n");
++ tahvo_usb_power_off(tu);
++ }
++ } else
++ r = -EINVAL;
++
++ mutex_unlock(&tu->serialize);
++ return r;
++}
++
++static DEVICE_ATTR(otg_mode, 0644, otg_mode_show, otg_mode_store);
++#endif
++
++static int __devinit tahvo_usb_probe(struct platform_device *pdev)
++{
++ struct tahvo_usb *tu;
++ struct device *dev = &pdev->dev;
++ int ret;
++ int irq;
++
++ dev_dbg(dev, "probe\n");
++
++ /* Create driver data */
++ tu = kzalloc(sizeof(*tu), GFP_KERNEL);
++ if (!tu)
++ return -ENOMEM;
++ tahvo_usb_device = tu;
++
++ tu->dev = dev;
++ tu->pt_dev = pdev;
++#ifdef CONFIG_USB_OTG
++ /* Default mode */
++#ifdef CONFIG_CBUS_TAHVO_USB_HOST_BY_DEFAULT
++ tu->tahvo_mode = TAHVO_MODE_HOST;
++#else
++ tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
++#endif
++#endif
++
++ mutex_init(&tu->serialize);
++
++ tu->ick = clk_get(NULL, "usb_l4_ick");
++ if (IS_ERR(tu->ick)) {
++ dev_err(dev, "Failed to get usb_l4_ick\n");
++ ret = PTR_ERR(tu->ick);
++ goto err_free_tu;
++ }
++ clk_enable(tu->ick);
++
++ /* Set initial state, so that we generate kevents only on
++ * state changes */
++ tu->vbus_state = tahvo_read_reg(tu->dev, TAHVO_REG_IDSR) & 0x01;
++
++ irq = platform_get_irq(pdev, 0);
++ tu->irq = irq;
++
++ /* We cannot enable interrupt until omap_udc is initialized */
++ ret = request_threaded_irq(irq, NULL, tahvo_usb_vbus_interrupt,
++ IRQF_ONESHOT, "tahvo-vbus", tu);
++ if (ret != 0) {
++ printk(KERN_ERR "Could not register Tahvo interrupt for VBUS\n");
++ goto err_release_clk;
++ }
++
++ /* Attributes */
++ ret = device_create_file(dev, &dev_attr_vbus_state);
++#ifdef CONFIG_USB_OTG
++ ret |= device_create_file(dev, &dev_attr_otg_mode);
++#endif
++ if (ret)
++ printk(KERN_ERR "attribute creation failed: %d\n", ret);
++
++ /* Create OTG interface */
++ tahvo_usb_power_off(tu);
++ tu->otg.state = OTG_STATE_UNDEFINED;
++ tu->otg.label = DRIVER_NAME;
++ tu->otg.set_host = tahvo_usb_set_host;
++ tu->otg.set_peripheral = tahvo_usb_set_peripheral;
++ tu->otg.set_power = tahvo_usb_set_power;
++ tu->otg.set_suspend = tahvo_usb_set_suspend;
++ tu->otg.start_srp = tahvo_usb_start_srp;
++ tu->otg.start_hnp = tahvo_usb_start_hnp;
++
++ ret = otg_set_transceiver(&tu->otg);
++ if (ret < 0) {
++ printk(KERN_ERR "Cannot register USB transceiver\n");
++ goto err_free_irq;
++ }
++
++ dev_set_drvdata(dev, tu);
++
++ return 0;
++
++err_free_irq:
++ free_irq(tu->irq, tu);
++err_release_clk:
++ clk_disable(tu->ick);
++ clk_put(tu->ick);
++err_free_tu:
++ kfree(tu);
++ tahvo_usb_device = NULL;
++
++ return ret;
++}
++
++static int __devexit tahvo_usb_remove(struct platform_device *pdev)
++{
++ struct tahvo_usb *tu = platform_get_drvdata(pdev);
++
++ dev_dbg(&pdev->dev, "remove\n");
++
++ free_irq(tu->irq, tu);
++ flush_scheduled_work();
++ otg_set_transceiver(0);
++ device_remove_file(&pdev->dev, &dev_attr_vbus_state);
++#ifdef CONFIG_USB_OTG
++ device_remove_file(&pdev->dev, &dev_attr_otg_mode);
++#endif
++ clk_disable(tu->ick);
++ clk_put(tu->ick);
++
++ kfree(tu);
++ tahvo_usb_device = NULL;
++
++ return 0;
++}
++
++static struct platform_driver tahvo_usb_driver = {
++ .probe = tahvo_usb_probe,
++ .remove = __devexit_p(tahvo_usb_remove),
++ .driver = {
++ .name = "tahvo-usb",
++ },
++};
++
++static int __init tahvo_usb_init(void)
++{
++ int ret = 0;
++
++ ret = platform_driver_register(&tahvo_usb_driver);
++ if (ret)
++ return ret;
++
++ ret = platform_driver_register(&omap_otg_driver);
++ if (ret) {
++ platform_driver_unregister(&tahvo_usb_driver);
++ return ret;
++ }
++
++ return 0;
++}
++module_init(tahvo_usb_init);
++
++static void __exit tahvo_usb_exit(void)
++{
++ platform_driver_unregister(&omap_otg_driver);
++ platform_driver_unregister(&tahvo_usb_driver);
++}
++module_exit(tahvo_usb_exit);
++
++MODULE_DESCRIPTION("Tahvo USB OTG Transceiver Driver");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs");
+diff --git a/drivers/cbus/tahvo.c b/drivers/cbus/tahvo.c
+new file mode 100644
+index 0000000..819111a
+--- /dev/null
++++ b/drivers/cbus/tahvo.c
+@@ -0,0 +1,415 @@
++/**
++ * drivers/cbus/tahvo.c
++ *
++ * Support functions for Tahvo ASIC
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com>,
++ * David Weinehall <david.weinehall@nokia.com>, and
++ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/device.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/platform_data/cbus.h>
++#include <linux/mutex.h>
++
++#include "cbus.h"
++#include "tahvo.h"
++
++struct tahvo {
++ /* device lock */
++ struct mutex mutex;
++ struct device *dev;
++
++ struct irq_chip irq_chip;
++
++ int irq_base;
++ int irq_end;
++ int irq;
++
++ int mask;
++
++ unsigned int mask_pending:1;
++ unsigned int is_betty:1;
++};
++
++/**
++ * __tahvo_read_reg - Reads a value from a register in Tahvo
++ * @tahvo: pointer to tahvo structure
++ * @reg: the register address to read from
++ */
++static int __tahvo_read_reg(struct tahvo *tahvo, unsigned reg)
++{
++ return cbus_read_reg(tahvo->dev, CBUS_TAHVO_DEVICE_ID, reg);
++}
++
++/**
++ * __tahvo_write_reg - Writes a value to a register in Tahvo
++ * @tahvo: pointer to tahvo structure
++ * @reg: register address to write to
++ * @val: the value to be written to @reg
++ */
++static void __tahvo_write_reg(struct tahvo *tahvo, unsigned reg, u16 val)
++{
++ cbus_write_reg(tahvo->dev, CBUS_TAHVO_DEVICE_ID, reg, val);
++}
++
++/**
++ * tahvo_read_reg - Read a value from a register in Tahvo
++ * @child: device pointer from the calling child
++ * @reg: the register to read from
++ *
++ * This function returns the contents of the specified register
++ */
++int tahvo_read_reg(struct device *child, unsigned reg)
++{
++ struct tahvo *tahvo = dev_get_drvdata(child->parent);
++
++ return __tahvo_read_reg(tahvo, reg);
++}
++EXPORT_SYMBOL(tahvo_read_reg);
++
++/**
++ * tahvo_write_reg - Write a value to a register in Tahvo
++ * @child: device pointer from the calling child
++ * @reg: the register to write to
++ * @val : the value to write to the register
++ *
++ * This function writes a value to the specified register
++ */
++void tahvo_write_reg(struct device *child, unsigned reg, u16 val)
++{
++ struct tahvo *tahvo = dev_get_drvdata(child->parent);
++
++ mutex_lock(&tahvo->mutex);
++ __tahvo_write_reg(tahvo, reg, val);
++ mutex_unlock(&tahvo->mutex);
++}
++EXPORT_SYMBOL(tahvo_write_reg);
++
++/**
++ * tahvo_set_clear_reg_bits - set and clear register bits atomically
++ * @child: device pointer from the calling child
++ * @reg: the register to write to
++ * @bits: the bits to set
++ *
++ * This function sets and clears the specified Tahvo register bits atomically
++ */
++void tahvo_set_clear_reg_bits(struct device *child, unsigned reg, u16 set,
++ u16 clear)
++{
++ struct tahvo *tahvo = dev_get_drvdata(child->parent);
++ u16 w;
++
++ mutex_lock(&tahvo->mutex);
++ w = __tahvo_read_reg(tahvo, reg);
++ w &= ~clear;
++ w |= set;
++ __tahvo_write_reg(tahvo, reg, w);
++ mutex_unlock(&tahvo->mutex);
++}
++
++static irqreturn_t tahvo_irq_handler(int irq, void *_tahvo)
++{
++ struct tahvo *tahvo = _tahvo;
++ u16 id;
++ u16 im;
++
++ mutex_lock(&tahvo->mutex);
++ id = __tahvo_read_reg(tahvo, TAHVO_REG_IDR);
++ im = __tahvo_read_reg(tahvo, TAHVO_REG_IMR);
++ id &= ~im;
++ __tahvo_write_reg(tahvo, TAHVO_REG_IDR, id);
++ mutex_unlock(&tahvo->mutex);
++
++ if (!id) {
++ dev_vdbg(tahvo->dev, "No IRQ, spurious ?\n");
++ return IRQ_NONE;
++ }
++
++ while (id) {
++ unsigned long pending = __ffs(id);
++ unsigned int irq;
++
++ id &= ~BIT(pending);
++ irq = pending + tahvo->irq_base;
++ handle_nested_irq(irq);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/* -------------------------------------------------------------------------- */
++
++static void tahvo_irq_bus_lock(struct irq_data *data)
++{
++ struct tahvo *tahvo = irq_data_get_irq_chip_data(data);
++
++ mutex_lock(&tahvo->mutex);
++}
++
++static void tahvo_irq_bus_sync_unlock(struct irq_data *data)
++{
++ struct tahvo *tahvo = irq_data_get_irq_chip_data(data);
++
++ if (tahvo->mask_pending) {
++ __tahvo_write_reg(tahvo, TAHVO_REG_IMR, tahvo->mask);
++ tahvo->mask_pending = false;
++ }
++
++ mutex_unlock(&tahvo->mutex);
++}
++
++static void tahvo_irq_mask(struct irq_data *data)
++{
++ struct tahvo *tahvo = irq_data_get_irq_chip_data(data);
++ int irq = data->irq;
++
++ tahvo->mask |= (1 << (irq - tahvo->irq_base));
++ tahvo->mask_pending = true;
++}
++
++static void tahvo_irq_unmask(struct irq_data *data)
++{
++ struct tahvo *tahvo = irq_data_get_irq_chip_data(data);
++ int irq = data->irq;
++
++ tahvo->mask &= ~(1 << (irq - tahvo->irq_base));
++ tahvo->mask_pending = true;
++}
++
++static inline void tahvo_irq_setup(int irq)
++{
++#ifdef CONFIG_ARM
++ set_irq_flags(irq, IRQF_VALID);
++#else
++ irq_set_noprobe(irq);
++#endif
++}
++
++static void tahvo_irq_init(struct tahvo *tahvo)
++{
++ int base = tahvo->irq_base;
++ int end = tahvo->irq_end;
++ int irq;
++
++ for (irq = base; irq < end; irq++) {
++ irq_set_chip_data(irq, tahvo);
++ irq_set_chip(irq, &tahvo->irq_chip);
++ irq_set_nested_thread(irq, 1);
++ tahvo_irq_setup(irq);
++ }
++}
++
++/* -------------------------------------------------------------------------- */
++
++static struct resource generic_resources[] = {
++ {
++ .start = -EINVAL, /* fixed later */
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct device *tahvo_allocate_child(const char *name,
++ struct device *parent, int irq)
++{
++ struct platform_device *pdev;
++ int ret;
++
++ pdev = platform_device_alloc(name, -1);
++ if (!pdev) {
++ dev_dbg(parent, "can't allocate %s\n", name);
++ goto err0;
++ }
++
++ pdev->dev.parent = parent;
++
++ if (irq > 0) {
++ generic_resources[0].start = irq;
++
++ ret = platform_device_add_resources(pdev, generic_resources,
++ ARRAY_SIZE(generic_resources));
++ if (ret < 0) {
++ dev_dbg(parent, "can't add resources to %s\n", name);
++ goto err1;
++ }
++ }
++
++ ret = platform_device_add(pdev);
++ if (ret < 0) {
++ dev_dbg(parent, "can't add %s\n", name);
++ goto err1;
++ }
++
++ return &pdev->dev;
++
++err1:
++ platform_device_put(pdev);
++
++err0:
++ return NULL;
++}
++
++static int tahvo_allocate_children(struct device *parent, int irq_base)
++{
++ struct device *child;
++
++ child = tahvo_allocate_child("tahvo-usb", parent,
++ irq_base + TAHVO_INT_VBUSON);
++ if (!child)
++ return -ENOMEM;
++
++ child = tahvo_allocate_child("tahvo-pwm", parent, -1);
++ if (!child)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static int __devinit tahvo_probe(struct platform_device *pdev)
++{
++ struct irq_chip *chip;
++ struct tahvo *tahvo;
++ int rev;
++ int ret;
++ int irq;
++ int id;
++
++ tahvo = kzalloc(sizeof(*tahvo), GFP_KERNEL);
++ if (!tahvo) {
++ dev_err(&pdev->dev, "not enough memory\n");
++ ret = -ENOMEM;
++ goto err0;
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ platform_set_drvdata(pdev, tahvo);
++
++ mutex_init(&tahvo->mutex);
++
++ ret = irq_alloc_descs(-1, 0, MAX_TAHVO_IRQ_HANDLERS, 0);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to allocate IRQ descs\n");
++ goto err1;
++ }
++
++ chip = &tahvo->irq_chip;
++
++ chip->name = "tahvo",
++ chip->irq_bus_lock = tahvo_irq_bus_lock,
++ chip->irq_bus_sync_unlock = tahvo_irq_bus_sync_unlock,
++ chip->irq_mask = tahvo_irq_mask,
++ chip->irq_unmask = tahvo_irq_unmask,
++
++ tahvo->irq_base = ret;
++ tahvo->irq_end = ret + MAX_TAHVO_IRQ_HANDLERS;
++ tahvo->dev = &pdev->dev;
++ tahvo->irq = irq;
++
++ tahvo_irq_init(tahvo);
++
++ rev = __tahvo_read_reg(tahvo, TAHVO_REG_ASICR);
++
++ id = (rev >> 8) & 0xff;
++
++ if (id == 0x0b)
++ tahvo->is_betty = true;
++
++ ret = tahvo_allocate_children(&pdev->dev, tahvo->irq_base);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to allocate children\n");
++ goto err2;
++ }
++
++ dev_err(&pdev->dev, "%s v%d.%d found\n",
++ tahvo->is_betty ? "Betty" : "Tahvo",
++ (rev >> 4) & 0x0f, rev & 0x0f);
++
++ /* Mask all TAHVO interrupts */
++ tahvo->mask = 0xffff;
++ __tahvo_write_reg(tahvo, TAHVO_REG_IMR, tahvo->mask);
++
++ ret = request_threaded_irq(irq, NULL, tahvo_irq_handler,
++ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++ "tahvo", tahvo);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Unable to register IRQ handler\n");
++ goto err2;
++ }
++
++ return 0;
++
++err2:
++ irq_free_descs(tahvo->irq_base, MAX_TAHVO_IRQ_HANDLERS);
++
++err1:
++ kfree(tahvo);
++
++err0:
++ return ret;
++}
++
++static int __devexit tahvo_remove(struct platform_device *pdev)
++{
++ struct tahvo *tahvo = platform_get_drvdata(pdev);
++ int irq;
++
++ irq = platform_get_irq(pdev, 0);
++
++ free_irq(irq, 0);
++ irq_free_descs(tahvo->irq_base, MAX_TAHVO_IRQ_HANDLERS);
++ kfree(tahvo);
++
++ return 0;
++}
++
++
++static const struct of_device_id tahvo_match_table[] __devinitconst = {
++ {
++ .compatible = "nokia,tahvo",
++ },
++ {},
++};
++MODULE_DEVICE_TABLE(of, tahvo_match);
++
++static struct platform_driver tahvo_driver = {
++ .probe = tahvo_probe,
++ .remove = __devexit_p(tahvo_remove),
++ .driver = {
++ .name = "tahvo",
++ .of_match_table = tahvo_match_table,
++ },
++};
++
++module_platform_driver(tahvo_driver);
++
++MODULE_ALIAS("platform:tahvo");
++MODULE_DESCRIPTION("Tahvo ASIC control");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Juha Yrjölä");
++MODULE_AUTHOR("David Weinehall");
++MODULE_AUTHOR("Mikko Ylinen");
++
+diff --git a/drivers/cbus/tahvo.h b/drivers/cbus/tahvo.h
+new file mode 100644
+index 0000000..f151a43
+--- /dev/null
++++ b/drivers/cbus/tahvo.h
+@@ -0,0 +1,58 @@
++/*
++ * drivers/cbus/tahvo.h
++ *
++ * Copyright (C) 2004, 2005 Nokia Corporation
++ *
++ * Written by Juha Yrjölä <juha.yrjola@nokia.com> and
++ * David Weinehall <david.weinehall@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __DRIVERS_CBUS_TAHVO_H
++#define __DRIVERS_CBUS_TAHVO_H
++
++#include <linux/types.h>
++
++/* Registers */
++#define TAHVO_REG_ASICR 0x00 /* ASIC ID & revision */
++#define TAHVO_REG_IDR 0x01 /* Interrupt ID */
++#define TAHVO_REG_IDSR 0x02 /* Interrupt status */
++#define TAHVO_REG_IMR 0x03 /* Interrupt mask */
++#define TAHVO_REG_CHGCURR 0x04 /* Charge current control PWM (8-bit) */
++#define TAHVO_REG_LEDPWMR 0x05 /* LED PWM */
++#define TAHVO_REG_USBR 0x06 /* USB control */
++#define TAHVO_REG_CHGCTL 0x08 /* Charge control register */
++#define TAHVO_REG_CHGCTL_EN 0x0001 /* Global charge enable */
++#define TAHVO_REG_CHGCTL_PWMOVR 0x0004 /* PWM override. Force charge PWM to 0%/100% duty cycle. */
++#define TAHVO_REG_CHGCTL_PWMOVRZERO 0x0008 /* If set, PWM override is 0% (If unset -> 100%) */
++#define TAHVO_REG_CHGCTL_CURMEAS 0x0040 /* Enable battery current measurement. */
++#define TAHVO_REG_CHGCTL_CURTIMRST 0x0080 /* Current measure timer reset. */
++#define TAHVO_REG_BATCURRTIMER 0x0c /* Battery current measure timer (8-bit) */
++#define TAHVO_REG_BATCURR 0x0d /* Battery (dis)charge current (signed 16-bit) */
++
++#define TAHVO_REG_MAX 0x0d
++
++/* Interrupt sources */
++#define TAHVO_INT_VBUSON 0
++#define TAHVO_INT_BATCURR 7 /* Battery current measure timer */
++
++#define MAX_TAHVO_IRQ_HANDLERS 8
++
++int tahvo_read_reg(struct device *child, unsigned reg);
++void tahvo_write_reg(struct device *child, unsigned reg, u16 val);
++void tahvo_set_clear_reg_bits(struct device *child, unsigned reg, u16 set,
++ u16 clear);
++
++#endif /* __DRIVERS_CBUS_TAHVO_H */
+diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
+index a48bc02..ce75fcb 100644
+--- a/drivers/cpufreq/Makefile
++++ b/drivers/cpufreq/Makefile
+@@ -43,6 +43,7 @@ obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+ obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+ obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
++obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
+
+ ##################################################################################
+ # PowerPC platform drivers
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 987a165..2f5801a 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
+ pr_debug("saving %lu as reference value for loops_per_jiffy; "
+ "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+ }
+- if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
+- (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
++ if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
+ (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
+ ci->new);
+diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
+index f231015..bedac1a 100644
+--- a/drivers/cpufreq/cpufreq_userspace.c
++++ b/drivers/cpufreq/cpufreq_userspace.c
+@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ if (!per_cpu(cpu_is_managed, freq->cpu))
+ return 0;
+
+- pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
+- freq->cpu, freq->new);
+- per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
++ if (val == CPUFREQ_POSTCHANGE) {
++ pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
++ freq->cpu, freq->new);
++ per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
+new file mode 100644
+index 0000000..0d1d070
+--- /dev/null
++++ b/drivers/cpufreq/omap-cpufreq.c
+@@ -0,0 +1,412 @@
++/*
++ * CPU frequency scaling for OMAP using OPP information
++ *
++ * Copyright (C) 2005 Nokia Corporation
++ * Written by Tony Lindgren <tony@atomide.com>
++ *
++ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
++ *
++ * Copyright (C) 2007-2011 Texas Instruments, Inc.
++ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/cpufreq.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/opp.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++#include <linux/regulator/consumer.h>
++#include <linux/suspend.h>
++
++#include <asm/system.h>
++#include <asm/smp_plat.h>
++#include <asm/cpu.h>
++
++#include <plat/clock.h>
++#include <plat/omap-pm.h>
++#include <plat/common.h>
++#include <plat/omap_device.h>
++
++#include <mach/hardware.h>
++
++/* Tolerance for MPU voltage is 4%, we have to pass +4% as a
++ * maximum voltage while setting the MPU regulator voltage.
++ * Which is taken from AM33XX datasheet */
++#define MPU_TOLERANCE 4
++#define PER_ROUND_VAL 100
++
++/* Use 275MHz when entering suspend */
++#define SLEEP_FREQ (275 * 1000)
++
++
++#ifdef CONFIG_SMP
++struct lpj_info {
++ unsigned long ref;
++ unsigned int freq;
++};
++
++static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
++static struct lpj_info global_lpj_ref;
++#endif
++
++static struct cpufreq_frequency_table *freq_table;
++static atomic_t freq_table_users = ATOMIC_INIT(0);
++static struct clk *mpu_clk;
++static char *mpu_clk_name;
++static struct device *mpu_dev;
++static struct regulator *mpu_reg;
++static DEFINE_MUTEX(omap_cpu_lock);
++static bool is_suspended;
++
++static int omap_verify_speed(struct cpufreq_policy *policy)
++{
++ if (!freq_table)
++ return -EINVAL;
++ return cpufreq_frequency_table_verify(policy, freq_table);
++}
++
++static unsigned int omap_getspeed(unsigned int cpu)
++{
++ unsigned long rate;
++
++ if (cpu >= NR_CPUS)
++ return 0;
++
++ rate = clk_get_rate(mpu_clk) / 1000;
++ return rate;
++}
++
++static int omap_target(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ unsigned int relation)
++{
++ unsigned int i;
++ int ret = 0;
++ struct cpufreq_freqs freqs;
++ struct opp *opp;
++ int volt_old = 0, volt_new = 0;
++
++ if (is_suspended)
++ return -EBUSY;
++
++ if (!freq_table) {
++ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
++ policy->cpu);
++ return -EINVAL;
++ }
++
++ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
++ relation, &i);
++ if (ret) {
++ dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
++ __func__, policy->cpu, target_freq, ret);
++ return ret;
++ }
++ freqs.new = freq_table[i].frequency;
++ if (!freqs.new) {
++ dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
++ policy->cpu, target_freq);
++ return -EINVAL;
++ }
++
++ freqs.old = omap_getspeed(policy->cpu);
++ freqs.cpu = policy->cpu;
++
++ if (freqs.old == freqs.new && policy->cur == freqs.new)
++ return ret;
++
++ opp = opp_find_freq_exact(mpu_dev, freqs.new * 1000, true);
++ if (IS_ERR(opp)) {
++ dev_err(mpu_dev, "%s: cpu%d: no opp match for freq %d\n",
++ __func__, policy->cpu, target_freq);
++ return -EINVAL;
++ }
++
++ volt_new = opp_get_voltage(opp);
++ if (!volt_new) {
++ dev_err(mpu_dev, "%s: cpu%d: no opp voltage for freq %d\n",
++ __func__, policy->cpu, target_freq);
++ return -EINVAL;
++ }
++
++ volt_old = regulator_get_voltage(mpu_reg);
++
++#ifdef CONFIG_CPU_FREQ_DEBUG
++ pr_info("cpufreq-omap: frequency transition: %u --> %u\n",
++ freqs.old, freqs.new);
++ pr_info("cpufreq-omap: voltage transition: %d --> %d\n",
++ volt_old, volt_new);
++#endif
++
++ if (freqs.new > freqs.old) {
++ ret = regulator_set_voltage(mpu_reg, volt_new,
++ volt_new + (volt_new * MPU_TOLERANCE) / PER_ROUND_VAL);
++ if (ret) {
++ dev_err(mpu_dev, "%s: unable to set voltage to %d uV (for %u MHz)\n",
++ __func__, volt_new, freqs.new/1000);
++ return ret;
++ }
++ }
++
++ /* notifiers */
++ for_each_cpu(i, policy->cpus) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
++ }
++
++ ret = clk_set_rate(mpu_clk, freqs.new * 1000);
++ freqs.new = omap_getspeed(policy->cpu);
++
++#ifdef CONFIG_SMP
++ /*
++ * Note that loops_per_jiffy is not updated on SMP systems in
++ * cpufreq driver. So, update the per-CPU loops_per_jiffy value
++ * on frequency transition. We need to update all dependent CPUs.
++ */
++ for_each_cpu(i, policy->cpus) {
++ struct lpj_info *lpj = &per_cpu(lpj_ref, i);
++ if (!lpj->freq) {
++ lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
++ lpj->freq = freqs.old;
++ }
++
++ per_cpu(cpu_data, i).loops_per_jiffy =
++ cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
++ }
++
++ /* And don't forget to adjust the global one */
++ if (!global_lpj_ref.freq) {
++ global_lpj_ref.ref = loops_per_jiffy;
++ global_lpj_ref.freq = freqs.old;
++ }
++ loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
++ freqs.new);
++#endif
++
++ /* notifiers */
++ for_each_cpu(i, policy->cpus) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
++ }
++
++ if (freqs.new < freqs.old) {
++ ret = regulator_set_voltage(mpu_reg, volt_new,
++ volt_new + (volt_new * MPU_TOLERANCE) / PER_ROUND_VAL);
++ if (ret) {
++ unsigned int temp;
++
++ dev_err(mpu_dev, "%s: unable to set voltage to %d uV (for %u MHz)\n",
++ __func__, volt_new, freqs.new/1000);
++
++ if (clk_set_rate(mpu_clk, freqs.old * 1000)) {
++ dev_err(mpu_dev,
++ "%s: failed restoring clock rate to %u MHz, clock rate is %u MHz",
++ __func__,
++ freqs.old/1000, freqs.new/1000);
++ return ret;
++ }
++
++ temp = freqs.new;
++ freqs.new = freqs.old;
++ freqs.old = temp;
++
++ for_each_cpu(i, policy->cpus) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs,
++ CPUFREQ_PRECHANGE);
++ cpufreq_notify_transition(&freqs,
++ CPUFREQ_POSTCHANGE);
++ }
++ return ret;
++ }
++ }
++
++ return ret;
++}
++
++static inline void freq_table_free(void)
++{
++ if (atomic_dec_and_test(&freq_table_users))
++ opp_free_cpufreq_table(mpu_dev, &freq_table);
++}
++
++static int omap_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
++ static unsigned int saved_frequency;
++
++ mutex_lock(&omap_cpu_lock);
++ switch (event) {
++ case PM_SUSPEND_PREPARE:
++ if (is_suspended)
++ goto out;
++
++ saved_frequency = omap_getspeed(0);
++
++ mutex_unlock(&omap_cpu_lock);
++ omap_target(policy, SLEEP_FREQ, CPUFREQ_RELATION_H);
++ mutex_lock(&omap_cpu_lock);
++ is_suspended = true;
++ break;
++
++ case PM_POST_SUSPEND:
++ is_suspended = false;
++ mutex_unlock(&omap_cpu_lock);
++ omap_target(policy, saved_frequency, CPUFREQ_RELATION_H);
++ mutex_lock(&omap_cpu_lock);
++ break;
++ }
++out:
++ mutex_unlock(&omap_cpu_lock);
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block omap_cpu_pm_notifier = {
++ .notifier_call = omap_pm_notify,
++};
++
++static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
++{
++ int result = 0;
++
++ mpu_clk = clk_get(NULL, mpu_clk_name);
++ if (IS_ERR(mpu_clk))
++ return PTR_ERR(mpu_clk);
++
++ mpu_reg = regulator_get(NULL, "vdd_mpu");
++ if (IS_ERR(mpu_reg)) {
++ result = -EINVAL;
++ goto fail_ck;
++ }
++
++ /* success of regulator_get doesn't gurantee presence of driver for
++ physical regulator and presence of physical regulator (this
++ situation arises if dummy regulator is enabled),so check voltage
++ to verify that physical regulator and it's driver is present
++ */
++ if (regulator_get_voltage(mpu_reg) < 0) {
++ result = -EINVAL;
++ goto fail_reg;
++ }
++
++ if (policy->cpu >= NR_CPUS) {
++ result = -EINVAL;
++ goto fail_reg;
++ }
++
++ policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
++
++ if (atomic_inc_return(&freq_table_users) == 1)
++ result = opp_init_cpufreq_table(mpu_dev, &freq_table);
++
++ if (result) {
++ dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
++ __func__, policy->cpu, result);
++ goto fail_reg;
++ }
++
++ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
++ if (result)
++ goto fail_table;
++
++ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
++
++ policy->min = policy->cpuinfo.min_freq;
++ policy->max = policy->cpuinfo.max_freq;
++ policy->cur = omap_getspeed(policy->cpu);
++
++ /*
++ * On OMAP SMP configuartion, both processors share the voltage
++ * and clock. So both CPUs needs to be scaled together and hence
++ * needs software co-ordination. Use cpufreq affected_cpus
++ * interface to handle this scenario. Additional is_smp() check
++ * is to keep SMP_ON_UP build working.
++ */
++ if (is_smp()) {
++ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
++ cpumask_setall(policy->cpus);
++ }
++
++ /* FIXME: what's the actual transition time? */
++ policy->cpuinfo.transition_latency = 300 * 1000;
++
++ register_pm_notifier(&omap_cpu_pm_notifier);
++
++ return 0;
++
++fail_table:
++ freq_table_free();
++fail_reg:
++ regulator_put(mpu_reg);
++fail_ck:
++ clk_put(mpu_clk);
++ return result;
++}
++
++static int omap_cpu_exit(struct cpufreq_policy *policy)
++{
++ freq_table_free();
++ clk_put(mpu_clk);
++ return 0;
++}
++
++static struct freq_attr *omap_cpufreq_attr[] = {
++ &cpufreq_freq_attr_scaling_available_freqs,
++ NULL,
++};
++
++static struct cpufreq_driver omap_driver = {
++ .flags = CPUFREQ_STICKY,
++ .verify = omap_verify_speed,
++ .target = omap_target,
++ .get = omap_getspeed,
++ .init = omap_cpu_init,
++ .exit = omap_cpu_exit,
++ .name = "omap",
++ .attr = omap_cpufreq_attr,
++};
++
++static int __init omap_cpufreq_init(void)
++{
++ if (cpu_is_omap24xx())
++ mpu_clk_name = "virt_prcm_set";
++ else if (cpu_is_omap34xx() && !cpu_is_am33xx())
++ mpu_clk_name = "dpll1_ck";
++ else if (cpu_is_omap44xx() || cpu_is_am33xx())
++ mpu_clk_name = "dpll_mpu_ck";
++
++ if (!mpu_clk_name) {
++ pr_err("%s: unsupported Silicon?\n", __func__);
++ return -EINVAL;
++ }
++
++ mpu_dev = omap_device_get_by_hwmod_name("mpu");
++ if (!mpu_dev) {
++ pr_warning("%s: unable to get the mpu device\n", __func__);
++ return -EINVAL;
++ }
++
++ return cpufreq_register_driver(&omap_driver);
++}
++
++static void __exit omap_cpufreq_exit(void)
++{
++ cpufreq_unregister_driver(&omap_driver);
++}
++
++MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
++MODULE_LICENSE("GPL");
++module_init(omap_cpufreq_init);
++module_exit(omap_cpufreq_exit);
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index a6c10e8..8952897 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -213,7 +213,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ void __iomem *base = bank->base;
+ u32 gpio_bit = 1 << gpio;
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+@@ -233,7 +233,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ }
+ if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
+ trigger != 0);
+ } else {
+@@ -264,7 +264,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ bank->enabled_non_wakeup_gpios &= ~gpio_bit;
+ }
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
+@@ -591,10 +591,10 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+- reg += OMAP24XX_GPIO_CTRL;
+- else if (cpu_is_omap44xx())
++ if (cpu_is_omap44xx() || cpu_is_am33xx())
+ reg += OMAP4_GPIO_CTRL;
++ else if (cpu_is_omap24xx() || cpu_is_omap34xx())
++ reg += OMAP24XX_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is enabled, clocks are not gated */
+ ctrl &= 0xFFFFFFFE;
+@@ -627,7 +627,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+ __raw_writel(1 << offset, reg);
+ }
+ #endif
+-#ifdef CONFIG_ARCH_OMAP4
++#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAPAM33XX)
+ if (bank->method == METHOD_GPIO_44XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
+@@ -640,10 +640,10 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+- reg += OMAP24XX_GPIO_CTRL;
+- else if (cpu_is_omap44xx())
++ if (cpu_is_omap44xx() || cpu_is_am33xx())
+ reg += OMAP4_GPIO_CTRL;
++ else if (cpu_is_omap24xx() || cpu_is_omap34xx())
++ reg += OMAP24XX_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is disabled, clocks are gated */
+ ctrl |= 1;
+@@ -1026,7 +1026,7 @@ static inline int init_gpio_info(struct platform_device *pdev)
+ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
+ {
+ if (cpu_class_is_omap2()) {
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ __raw_writel(0xffffffff, bank->base +
+ OMAP4_GPIO_IRQSTATUSCLR0);
+ __raw_writel(0x00000000, bank->base +
+@@ -1265,7 +1265,7 @@ static int omap_gpio_suspend(void)
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+ #endif
+-#ifdef CONFIG_ARCH_OMAP4
++#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAPAM33XX)
+ case METHOD_GPIO_44XX:
+ wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+@@ -1312,7 +1312,7 @@ static void omap_gpio_resume(void)
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+ #endif
+-#ifdef CONFIG_ARCH_OMAP4
++#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAPAM33XX)
+ case METHOD_GPIO_44XX:
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+@@ -1345,7 +1345,7 @@ void omap2_gpio_prepare_for_idle(int off_mode)
+ int i, c = 0;
+ int min = 0;
+
+- if (cpu_is_omap34xx())
++ if ((cpu_is_omap34xx() && !cpu_is_am33xx()))
+ min = 1;
+
+ for (i = min; i < gpio_bank_count; i++) {
+@@ -1365,7 +1365,8 @@ void omap2_gpio_prepare_for_idle(int off_mode)
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ if (cpu_is_omap24xx() || (cpu_is_omap34xx() &&
++ !cpu_is_am33xx())) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP24XX_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+@@ -1374,7 +1375,7 @@ void omap2_gpio_prepare_for_idle(int off_mode)
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP4_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+@@ -1388,14 +1389,15 @@ void omap2_gpio_prepare_for_idle(int off_mode)
+ l1 &= ~bank->enabled_non_wakeup_gpios;
+ l2 &= ~bank->enabled_non_wakeup_gpios;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ if (cpu_is_omap24xx() || (cpu_is_omap34xx() &&
++ !cpu_is_am33xx())) {
+ __raw_writel(l1, bank->base +
+ OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base +
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
+ }
+@@ -1414,7 +1416,7 @@ void omap2_gpio_resume_after_idle(void)
+ int i;
+ int min = 0;
+
+- if (cpu_is_omap34xx())
++ if ((cpu_is_omap34xx() && !cpu_is_am33xx()))
+ min = 1;
+ for (i = min; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+@@ -1430,7 +1432,8 @@ void omap2_gpio_resume_after_idle(void)
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ if (cpu_is_omap24xx() || (cpu_is_omap34xx() &&
++ !cpu_is_am33xx())) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+@@ -1438,7 +1441,7 @@ void omap2_gpio_resume_after_idle(void)
+ l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
+ }
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+@@ -1472,7 +1475,8 @@ void omap2_gpio_resume_after_idle(void)
+ if (gen) {
+ u32 old0, old1;
+
+- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
++ if (cpu_is_omap24xx() || (cpu_is_omap34xx() &&
++ !cpu_is_am33xx())) {
+ old0 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+@@ -1487,7 +1491,7 @@ void omap2_gpio_resume_after_idle(void)
+ OMAP24XX_GPIO_LEVELDETECT1);
+ }
+
+- if (cpu_is_omap44xx()) {
++ if (cpu_is_omap44xx() || cpu_is_am33xx()) {
+ old0 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 257c1a5..e0733b7 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -830,11 +830,9 @@ complete:
+ ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+
+- if (stat & OMAP_I2C_STAT_NACK) {
++ if (stat & OMAP_I2C_STAT_NACK)
+ err |= OMAP_I2C_STAT_NACK;
+- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
+- OMAP_I2C_CON_STP);
+- }
++
+ if (stat & OMAP_I2C_STAT_AL) {
+ dev_err(dev->dev, "Arbitration lost\n");
+ err |= OMAP_I2C_STAT_AL;
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index 3488ffe..dab56e5 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -407,6 +407,17 @@ config TOUCHSCREEN_TOUCHWIN
+ To compile this driver as a module, choose M here: the
+ module will be called touchwin.
+
++config TOUCHSCREEN_TI_TSCADC
++ tristate "TI Touchscreen Interface"
++ help
++ Say Y here if you have 4/5/8 wire touchscreen controller
++ to be connected to the ADC controller on your TI SoC.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called ti_tscadc.
++
+ config TOUCHSCREEN_ATMEL_TSADCC
+ tristate "Atmel Touchscreen Interface"
+ depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
+index f957676..124360b 100644
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -42,6 +42,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
+ obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
+ obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
++obj-$(CONFIG_TOUCHSCREEN_TI_TSCADC) += ti_tscadc.o
+ obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
+diff --git a/drivers/input/touchscreen/ti_tscadc.c b/drivers/input/touchscreen/ti_tscadc.c
+new file mode 100644
+index 0000000..3082e5c
+--- /dev/null
++++ b/drivers/input/touchscreen/ti_tscadc.c
+@@ -0,0 +1,793 @@
++/*
++ * TI Touch Screen driver
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/input/ti_tscadc.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/pm_runtime.h>
++
++size_t do_adc_sample(struct kobject *, struct attribute *, char *);
++static DEVICE_ATTR(ain1, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain2, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain3, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain4, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain5, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain6, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain7, S_IRUGO, do_adc_sample, NULL);
++static DEVICE_ATTR(ain8, S_IRUGO, do_adc_sample, NULL);
++
++/* Memory mapped registers here have incorrect offsets!
++ * Correct after referring TRM */
++#define TSCADC_REG_IRQEOI 0x020
++#define TSCADC_REG_RAWIRQSTATUS 0x024
++#define TSCADC_REG_IRQSTATUS 0x028
++#define TSCADC_REG_IRQENABLE 0x02C
++#define TSCADC_REG_IRQCLR 0x030
++#define TSCADC_REG_IRQWAKEUP 0x034
++#define TSCADC_REG_CTRL 0x040
++#define TSCADC_REG_ADCFSM 0x044
++#define TSCADC_REG_CLKDIV 0x04C
++#define TSCADC_REG_SE 0x054
++#define TSCADC_REG_IDLECONFIG 0x058
++#define TSCADC_REG_CHARGECONFIG 0x05C
++#define TSCADC_REG_CHARGEDELAY 0x060
++#define TSCADC_REG_STEPCONFIG(n) (0x64 + ((n-1) * 8))
++#define TSCADC_REG_STEPDELAY(n) (0x68 + ((n-1) * 8))
++#define TSCADC_REG_STEPCONFIG13 0x0C4
++#define TSCADC_REG_STEPDELAY13 0x0C8
++#define TSCADC_REG_STEPCONFIG14 0x0CC
++#define TSCADC_REG_STEPDELAY14 0x0D0
++#define TSCADC_REG_FIFO0CNT 0xE4
++#define TSCADC_REG_FIFO0THR 0xE8
++#define TSCADC_REG_FIFO1CNT 0xF0
++#define TSCADC_REG_FIFO1THR 0xF4
++#define TSCADC_REG_FIFO0 0x100
++#define TSCADC_REG_FIFO1 0x200
++
++/* Register Bitfields */
++#define TSCADC_IRQWKUP_ENB BIT(0)
++#define TSCADC_IRQWKUP_DISABLE 0x00
++#define TSCADC_STPENB_STEPENB 0x7FFF
++#define TSCADC_STPENB_STEPENB_TOUCHSCREEN 0x7FFF
++#define TSCADC_STPENB_STEPENB_GENERAL 0x0400
++#define TSCADC_IRQENB_FIFO0THRES BIT(2)
++#define TSCADC_IRQENB_FIFO0OVERRUN BIT(3)
++#define TSCADC_IRQENB_FIFO1THRES BIT(5)
++#define TSCADC_IRQENB_EOS BIT(1)
++#define TSCADC_IRQENB_PENUP BIT(9)
++#define TSCADC_IRQENB_HW_PEN BIT(0)
++#define TSCADC_STEPCONFIG_MODE_HWSYNC 0x2
++#define TSCADC_STEPCONFIG_MODE_SWCONT 0x1
++#define TSCADC_STEPCONFIG_MODE_SWONESHOT 0x0
++#define TSCADC_STEPCONFIG_2SAMPLES_AVG (1 << 4)
++#define TSCADC_STEPCONFIG_NO_AVG 0
++#define TSCADC_STEPCONFIG_XPP BIT(5)
++#define TSCADC_STEPCONFIG_XNN BIT(6)
++#define TSCADC_STEPCONFIG_YPP BIT(7)
++#define TSCADC_STEPCONFIG_YNN BIT(8)
++#define TSCADC_STEPCONFIG_XNP BIT(9)
++#define TSCADC_STEPCONFIG_YPN BIT(10)
++#define TSCADC_STEPCONFIG_RFP (1 << 12)
++#define TSCADC_STEPCONFIG_INM (1 << 18)
++#define TSCADC_STEPCONFIG_INP_4 (1 << 19)
++#define TSCADC_STEPCONFIG_INP (1 << 20)
++#define TSCADC_STEPCONFIG_INP_5 (1 << 21)
++#define TSCADC_STEPCONFIG_FIFO1 (1 << 26)
++#define TSCADC_STEPCONFIG_IDLE_INP (1 << 22)
++#define TSCADC_STEPCONFIG_OPENDLY 0x018
++#define TSCADC_STEPCONFIG_SAMPLEDLY 0x88
++#define TSCADC_STEPCONFIG_Z1 (3 << 19)
++#define TSCADC_STEPCHARGE_INM_SWAP BIT(16)
++#define TSCADC_STEPCHARGE_INM BIT(15)
++#define TSCADC_STEPCHARGE_INP_SWAP BIT(20)
++#define TSCADC_STEPCHARGE_INP BIT(19)
++#define TSCADC_STEPCHARGE_RFM (1 << 23)
++#define TSCADC_STEPCHARGE_DELAY 0x1
++#define TSCADC_CNTRLREG_TSCSSENB BIT(0)
++#define TSCADC_CNTRLREG_STEPID BIT(1)
++#define TSCADC_CNTRLREG_STEPCONFIGWRT BIT(2)
++#define TSCADC_CNTRLREG_TSCENB BIT(7)
++#define TSCADC_CNTRLREG_4WIRE (0x1 << 5)
++#define TSCADC_CNTRLREG_5WIRE (0x1 << 6)
++#define TSCADC_CNTRLREG_8WIRE (0x3 << 5)
++#define TSCADC_ADCFSM_STEPID 0x10
++#define TSCADC_ADCFSM_FSM BIT(5)
++
++#define ADC_CLK 3000000
++
++#define MAX_12BIT ((1 << 12) - 1)
++
++int pen = 1;
++unsigned int bckup_x = 0, bckup_y = 0;
++
++struct tscadc {
++ struct input_dev *input;
++ int wires;
++ int analog_input;
++ int x_plate_resistance;
++ int mode;
++ int irq;
++ void __iomem *tsc_base;
++ unsigned int ctrl;
++};
++
++static unsigned int tscadc_readl(struct tscadc *ts, unsigned int reg)
++{
++ return readl(ts->tsc_base + reg);
++}
++
++static void tscadc_writel(struct tscadc *tsc, unsigned int reg,
++ unsigned int val)
++{
++ writel(val, tsc->tsc_base + reg);
++}
++
++/* Configure ADC to sample on channel (1-8) */
++
++static void tsc_adc_step_config(struct tscadc *ts_dev, int channel)
++{
++ unsigned int stepconfig = 0, delay = 0, chargeconfig = 0;
++
++ /*
++ * Step Configuration
++ * software-enabled continous mode
++ * 2 sample averaging
++ * sample channel 1 (SEL_INP mux bits = 0)
++ */
++ stepconfig = TSCADC_STEPCONFIG_MODE_SWONESHOT |
++ TSCADC_STEPCONFIG_2SAMPLES_AVG |
++ ((channel-1) << 19);
++
++ delay = TSCADC_STEPCONFIG_SAMPLEDLY | TSCADC_STEPCONFIG_OPENDLY;
++
++ tscadc_writel(ts_dev, TSCADC_REG_STEPCONFIG(10), stepconfig);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPDELAY(10), delay);
++
++ /* Get the ball rolling, this will trigger the FSM to step through
++ * as soon as TSC_ADC_SS is turned on */
++ tscadc_writel(ts_dev, TSCADC_REG_SE, TSCADC_STPENB_STEPENB_GENERAL);
++}
++
++static irqreturn_t tsc_adc_interrupt(int irq, void *dev)
++{
++ struct tscadc *ts_dev = (struct tscadc *)dev;
++ struct input_dev *input_dev = ts_dev->input;
++ unsigned int status, irqclr = 0;
++ int i;
++ int fsm = 0, fifo0count = 0, fifo1count = 0;
++ unsigned int read_sample = 0, ready1 = 0;
++ unsigned int prev_val_x = ~0, prev_val_y = ~0;
++ unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
++ unsigned int cur_diff_x = 0, cur_diff_y = 0;
++ unsigned int val_x = 0, val_y = 0, diffx = 0, diffy = 0;
++
++ status = tscadc_readl(ts_dev, TSCADC_REG_IRQSTATUS);
++
++ // printk("interrupt! status=%x\n", status);
++ // if (status & TSCADC_IRQENB_EOS) {
++ // irqclr |= TSCADC_IRQENB_EOS;
++ // }
++
++ if (status & TSCADC_IRQENB_FIFO0THRES) {
++ fifo1count = tscadc_readl(ts_dev, TSCADC_REG_FIFO0CNT);
++ // printk("fifo 0 count = %d\n", fifo1count);
++
++ for (i = 0; i < fifo1count; i++) {
++ read_sample = tscadc_readl(ts_dev, TSCADC_REG_FIFO0);
++ printk("sample: %d: %x\n", i, read_sample);
++ }
++ irqclr |= TSCADC_IRQENB_FIFO0THRES;
++ }
++
++
++ if (status & TSCADC_IRQENB_FIFO1THRES) {
++ fifo1count = tscadc_readl(ts_dev, TSCADC_REG_FIFO1CNT);
++
++ for (i = 0; i < fifo1count; i++) {
++ read_sample = tscadc_readl(ts_dev, TSCADC_REG_FIFO1);
++ // read_sample = read_sample & 0xfff;
++ printk("sample: %d: %d\n", i, read_sample);
++ panic("sample read from fifo1!");
++ }
++ irqclr |= TSCADC_IRQENB_FIFO1THRES;
++ }
++
++ // mdelay(500);
++
++ tscadc_writel(ts_dev, TSCADC_REG_IRQSTATUS, irqclr);
++
++ /* check pending interrupts */
++ tscadc_writel(ts_dev, TSCADC_REG_IRQEOI, 0x0);
++
++ /* Turn on Step 1 again */
++ // tscadc_writel(ts_dev, TSCADC_REG_SE, TSCADC_STPENB_STEPENB_GENERAL);
++ return IRQ_HANDLED;
++}
++
++static void tsc_step_config(struct tscadc *ts_dev)
++{
++ unsigned int stepconfigx = 0, stepconfigy = 0;
++ unsigned int delay, chargeconfig = 0;
++ unsigned int stepconfigz1 = 0, stepconfigz2 = 0;
++ int i;
++
++ /* Configure the Step registers */
++
++ delay = TSCADC_STEPCONFIG_SAMPLEDLY | TSCADC_STEPCONFIG_OPENDLY;
++
++ stepconfigx = TSCADC_STEPCONFIG_MODE_HWSYNC |
++ TSCADC_STEPCONFIG_2SAMPLES_AVG | TSCADC_STEPCONFIG_XPP;
++
++ switch (ts_dev->wires) {
++ case 4:
++ if (ts_dev->analog_input == 0)
++ stepconfigx |= TSCADC_STEPCONFIG_INP_4 |
++ TSCADC_STEPCONFIG_YPN;
++ else
++ stepconfigx |= TSCADC_STEPCONFIG_INP |
++ TSCADC_STEPCONFIG_XNN;
++ break;
++ case 5:
++ stepconfigx |= TSCADC_STEPCONFIG_YNN |
++ TSCADC_STEPCONFIG_INP_5;
++ if (ts_dev->analog_input == 0)
++ stepconfigx |= TSCADC_STEPCONFIG_XNP |
++ TSCADC_STEPCONFIG_YPN;
++ else
++ stepconfigx |= TSCADC_STEPCONFIG_XNN |
++ TSCADC_STEPCONFIG_YPP;
++ break;
++ case 8:
++ if (ts_dev->analog_input == 0)
++ stepconfigx |= TSCADC_STEPCONFIG_INP_4 |
++ TSCADC_STEPCONFIG_YPN;
++ else
++ stepconfigx |= TSCADC_STEPCONFIG_INP |
++ TSCADC_STEPCONFIG_XNN;
++ break;
++ }
++
++ for (i = 1; i < 7; i++) {
++ tscadc_writel(ts_dev, TSCADC_REG_STEPCONFIG(i), stepconfigx);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPDELAY(i), delay);
++ }
++
++ stepconfigy = TSCADC_STEPCONFIG_MODE_HWSYNC |
++ TSCADC_STEPCONFIG_2SAMPLES_AVG | TSCADC_STEPCONFIG_YNN |
++ TSCADC_STEPCONFIG_INM | TSCADC_STEPCONFIG_FIFO1;
++ switch (ts_dev->wires) {
++ case 4:
++ if (ts_dev->analog_input == 0)
++ stepconfigy |= TSCADC_STEPCONFIG_XNP;
++ else
++ stepconfigy |= TSCADC_STEPCONFIG_YPP;
++ break;
++ case 5:
++ stepconfigy |= TSCADC_STEPCONFIG_XPP | TSCADC_STEPCONFIG_INP_5;
++ if (ts_dev->analog_input == 0)
++ stepconfigy |= TSCADC_STEPCONFIG_XNN |
++ TSCADC_STEPCONFIG_YPP;
++ else
++ stepconfigy |= TSCADC_STEPCONFIG_XNP |
++ TSCADC_STEPCONFIG_YPN;
++ break;
++ case 8:
++ if (ts_dev->analog_input == 0)
++ stepconfigy |= TSCADC_STEPCONFIG_XNP;
++ else
++ stepconfigy |= TSCADC_STEPCONFIG_YPP;
++ break;
++ }
++
++ for (i = 7; i < 13; i++) {
++ tscadc_writel(ts_dev, TSCADC_REG_STEPCONFIG(i), stepconfigy);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPDELAY(i), delay);
++ }
++
++ chargeconfig = TSCADC_STEPCONFIG_XPP |
++ TSCADC_STEPCONFIG_YNN |
++ TSCADC_STEPCONFIG_RFP |
++ TSCADC_STEPCHARGE_RFM;
++ if (ts_dev->analog_input == 0)
++ chargeconfig |= TSCADC_STEPCHARGE_INM_SWAP |
++ TSCADC_STEPCHARGE_INP_SWAP;
++ else
++ chargeconfig |= TSCADC_STEPCHARGE_INM | TSCADC_STEPCHARGE_INP;
++ tscadc_writel(ts_dev, TSCADC_REG_CHARGECONFIG, chargeconfig);
++ tscadc_writel(ts_dev, TSCADC_REG_CHARGEDELAY, TSCADC_STEPCHARGE_DELAY);
++
++ /* Configure to calculate pressure */
++ stepconfigz1 = TSCADC_STEPCONFIG_MODE_HWSYNC |
++ TSCADC_STEPCONFIG_2SAMPLES_AVG |
++ TSCADC_STEPCONFIG_XNP |
++ TSCADC_STEPCONFIG_YPN | TSCADC_STEPCONFIG_INM;
++ stepconfigz2 = stepconfigz1 | TSCADC_STEPCONFIG_Z1 |
++ TSCADC_STEPCONFIG_FIFO1;
++ tscadc_writel(ts_dev, TSCADC_REG_STEPCONFIG13, stepconfigz1);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPDELAY13, delay);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPCONFIG14, stepconfigz2);
++ tscadc_writel(ts_dev, TSCADC_REG_STEPDELAY14, delay);
++
++ tscadc_writel(ts_dev, TSCADC_REG_SE, TSCADC_STPENB_STEPENB_TOUCHSCREEN);
++}
++
++static void tsc_idle_config(struct tscadc *ts_config)
++{
++ /* Idle mode touch screen config */
++ unsigned int idleconfig;
++
++ idleconfig = TSCADC_STEPCONFIG_YNN |
++ TSCADC_STEPCONFIG_INM | TSCADC_STEPCONFIG_IDLE_INP;
++ if (ts_config->analog_input == 0)
++ idleconfig |= TSCADC_STEPCONFIG_XNN;
++ else
++ idleconfig |= TSCADC_STEPCONFIG_YPN;
++
++ tscadc_writel(ts_config, TSCADC_REG_IDLECONFIG, idleconfig);
++}
++
++static irqreturn_t tsc_interrupt(int irq, void *dev)
++{
++ struct tscadc *ts_dev = (struct tscadc *)dev;
++ struct input_dev *input_dev = ts_dev->input;
++ unsigned int status, irqclr = 0;
++ int i;
++ int fsm = 0, fifo0count = 0, fifo1count = 0;
++ unsigned int readx1 = 0, ready1 = 0;
++ unsigned int prev_val_x = ~0, prev_val_y = ~0;
++ unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
++ unsigned int cur_diff_x = 0, cur_diff_y = 0;
++ unsigned int val_x = 0, val_y = 0, diffx = 0, diffy = 0;
++ unsigned int z1 = 0, z2 = 0, z = 0;
++
++ status = tscadc_readl(ts_dev, TSCADC_REG_IRQSTATUS);
++
++ if (status & TSCADC_IRQENB_FIFO1THRES) {
++ fifo0count = tscadc_readl(ts_dev, TSCADC_REG_FIFO0CNT);
++ fifo1count = tscadc_readl(ts_dev, TSCADC_REG_FIFO1CNT);
++ for (i = 0; i < (fifo0count-1); i++) {
++ readx1 = tscadc_readl(ts_dev, TSCADC_REG_FIFO0);
++ readx1 = readx1 & 0xfff;
++ if (readx1 > prev_val_x)
++ cur_diff_x = readx1 - prev_val_x;
++ else
++ cur_diff_x = prev_val_x - readx1;
++
++ if (cur_diff_x < prev_diff_x) {
++ prev_diff_x = cur_diff_x;
++ val_x = readx1;
++ }
++
++ prev_val_x = readx1;
++ ready1 = tscadc_readl(ts_dev, TSCADC_REG_FIFO1);
++ ready1 &= 0xfff;
++ if (ready1 > prev_val_y)
++ cur_diff_y = ready1 - prev_val_y;
++ else
++ cur_diff_y = prev_val_y - ready1;
++
++ if (cur_diff_y < prev_diff_y) {
++ prev_diff_y = cur_diff_y;
++ val_y = ready1;
++ }
++
++ prev_val_y = ready1;
++ }
++
++ if (val_x > bckup_x) {
++ diffx = val_x - bckup_x;
++ diffy = val_y - bckup_y;
++ } else {
++ diffx = bckup_x - val_x;
++ diffy = bckup_y - val_y;
++ }
++ bckup_x = val_x;
++ bckup_y = val_y;
++
++ z1 = ((tscadc_readl(ts_dev, TSCADC_REG_FIFO0)) & 0xfff);
++ z2 = ((tscadc_readl(ts_dev, TSCADC_REG_FIFO1)) & 0xfff);
++
++ if ((z1 != 0) && (z2 != 0)) {
++ /*
++ * cal pressure using formula
++ * Resistance(touch) = x plate resistance *
++ * x postion/4096 * ((z2 / z1) - 1)
++ */
++ z = z2 - z1;
++ z *= val_x;
++ z *= ts_dev->x_plate_resistance;
++ z /= z1;
++ z = (z + 2047) >> 12;
++
++ /*
++ * Sample found inconsistent by debouncing
++ * or pressure is beyond the maximum.
++ * Don't report it to user space.
++ */
++ if (pen == 0) {
++ if ((diffx < 15) && (diffy < 15)
++ && (z <= MAX_12BIT)) {
++ input_report_abs(input_dev, ABS_X,
++ val_x);
++ input_report_abs(input_dev, ABS_Y,
++ val_y);
++ input_report_abs(input_dev, ABS_PRESSURE,
++ z);
++ input_report_key(input_dev, BTN_TOUCH,
++ 1);
++ input_sync(input_dev);
++ }
++ }
++ }
++ irqclr |= TSCADC_IRQENB_FIFO1THRES;
++ }
++
++ udelay(315);
++
++ status = tscadc_readl(ts_dev, TSCADC_REG_RAWIRQSTATUS);
++ if (status & TSCADC_IRQENB_PENUP) {
++ /* Pen up event */
++ fsm = tscadc_readl(ts_dev, TSCADC_REG_ADCFSM);
++ if (fsm == 0x10) {
++ pen = 1;
++ bckup_x = 0;
++ bckup_y = 0;
++ input_report_key(input_dev, BTN_TOUCH, 0);
++ input_report_abs(input_dev, ABS_PRESSURE, 0);
++ input_sync(input_dev);
++ } else {
++ pen = 0;
++ }
++ irqclr |= TSCADC_IRQENB_PENUP;
++ }
++ irqclr |= TSCADC_IRQENB_HW_PEN;
++
++ tscadc_writel(ts_dev, TSCADC_REG_IRQSTATUS, irqclr);
++
++ /* check pending interrupts */
++ tscadc_writel(ts_dev, TSCADC_REG_IRQEOI, 0x0);
++
++ tscadc_writel(ts_dev, TSCADC_REG_SE, TSCADC_STPENB_STEPENB_TOUCHSCREEN);
++ return IRQ_HANDLED;
++}
++
++/*
++* The functions for inserting/removing driver as a module.
++*/
++
++size_t do_adc_sample(struct kobject *kobj, struct attribute *attr, char *buf) {
++ struct platform_device *pdev;
++ struct device *dev;
++ struct tscadc *ts_dev;
++ int channel_num;
++ int fifo0count = 0;
++ int read_sample = 0;
++
++ pdev = (struct platform_device *)container_of(kobj, struct device, kobj);
++ dev = &pdev->dev;
++
++ ts_dev = dev_get_drvdata(dev);
++
++ if(strncmp(attr->name, "ain", 3)) {
++ printk("Invalid ain num\n");
++ return -EINVAL;
++ }
++
++ channel_num = attr->name[3] - 0x30;
++ if(channel_num > 8 || channel_num < 1) {
++ printk("Invalid channel_num=%d\n", channel_num);
++ return -EINVAL;
++ }
++
++ tsc_adc_step_config(ts_dev, channel_num);
++
++ do {
++ fifo0count = tscadc_readl(ts_dev, TSCADC_REG_FIFO0CNT);
++ }
++ while (!fifo0count);
++
++ while (fifo0count--) {
++ read_sample = tscadc_readl(ts_dev, TSCADC_REG_FIFO0) & 0xfff;
++ // printk("polling sample: %d: %x\n", fifo0count, read_sample);
++ }
++ sprintf(buf, "%d", read_sample);
++
++ return strlen(attr->name);
++}
++
++static int __devinit tscadc_probe(struct platform_device *pdev)
++{
++ struct tscadc *ts_dev;
++ struct input_dev *input_dev = NULL;
++ int err;
++ int clk_value;
++ int clock_rate, irqenable, ctrl;
++ struct tsc_data *pdata = pdev->dev.platform_data;
++ struct resource *res;
++ struct clk *clk;
++
++ printk("dev addr = %p\n", &pdev->dev);
++ printk("pdev addr = %p\n", pdev);
++
++ device_create_file(&pdev->dev, &dev_attr_ain1);
++ device_create_file(&pdev->dev, &dev_attr_ain2);
++ device_create_file(&pdev->dev, &dev_attr_ain3);
++ device_create_file(&pdev->dev, &dev_attr_ain4);
++ device_create_file(&pdev->dev, &dev_attr_ain5);
++ device_create_file(&pdev->dev, &dev_attr_ain6);
++ device_create_file(&pdev->dev, &dev_attr_ain7);
++ device_create_file(&pdev->dev, &dev_attr_ain8);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "no memory resource defined.\n");
++ return -EINVAL;
++ }
++
++ /* Allocate memory for device */
++ ts_dev = kzalloc(sizeof(struct tscadc), GFP_KERNEL);
++ if (!ts_dev) {
++ dev_err(&pdev->dev, "failed to allocate memory.\n");
++ return -ENOMEM;
++ }
++
++ ts_dev->irq = platform_get_irq(pdev, 0);
++ if (ts_dev->irq < 0) {
++ dev_err(&pdev->dev, "no irq ID is specified.\n");
++ return -ENODEV;
++ }
++
++ if(pdata->mode == TI_TSCADC_TSCMODE) {
++ input_dev = input_allocate_device();
++ if (!input_dev) {
++ dev_err(&pdev->dev, "failed to allocate input device.\n");
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++ ts_dev->input = input_dev;
++ }
++
++ res = request_mem_region(res->start, resource_size(res), pdev->name);
++ if (!res) {
++ dev_err(&pdev->dev, "failed to reserve registers.\n");
++ err = -EBUSY;
++ goto err_free_mem;
++ }
++
++ ts_dev->tsc_base = ioremap(res->start, resource_size(res));
++ if (!ts_dev->tsc_base) {
++ dev_err(&pdev->dev, "failed to map registers.\n");
++ err = -ENOMEM;
++ goto err_release_mem;
++ }
++
++ if(pdata->mode == TI_TSCADC_TSCMODE) {
++ err = request_irq(ts_dev->irq, tsc_interrupt, IRQF_DISABLED,
++ pdev->dev.driver->name, ts_dev);
++ }
++ else {
++ err = request_irq(ts_dev->irq, tsc_adc_interrupt, IRQF_DISABLED,
++ pdev->dev.driver->name, ts_dev);
++ }
++
++ if (err) {
++ dev_err(&pdev->dev, "failed to allocate irq.\n");
++ goto err_unmap_regs;
++ }
++
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
++
++ clk = clk_get(&pdev->dev, "adc_tsc_fck");
++ if (IS_ERR(clk)) {
++ dev_err(&pdev->dev, "failed to get TSC fck\n");
++ err = PTR_ERR(clk);
++ goto err_free_irq;
++ }
++ clock_rate = clk_get_rate(clk);
++
++ /* clk_value of atleast 21MHz required
++ * Clock verified on BeagleBone to be 24MHz */
++
++
++ clk_value = clock_rate / ADC_CLK;
++ if (clk_value < 7) {
++ dev_err(&pdev->dev, "clock input less than min clock requirement\n");
++ err = -EINVAL;
++ goto err_fail;
++ }
++
++ /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
++ clk_value = clk_value - 1;
++ tscadc_writel(ts_dev, TSCADC_REG_CLKDIV, clk_value);
++
++ ts_dev->wires = pdata->wires;
++ ts_dev->analog_input = pdata->analog_input;
++ ts_dev->x_plate_resistance = pdata->x_plate_resistance;
++ ts_dev->mode = pdata->mode;
++
++ /* Set the control register bits - 12.5.44 TRM */
++ ctrl = TSCADC_CNTRLREG_STEPCONFIGWRT |
++ TSCADC_CNTRLREG_STEPID;
++ if(pdata->mode == TI_TSCADC_TSCMODE) {
++ ctrl |= TSCADC_CNTRLREG_TSCENB;
++ switch (ts_dev->wires) {
++ case 4:
++ ctrl |= TSCADC_CNTRLREG_4WIRE;
++ break;
++ case 5:
++ ctrl |= TSCADC_CNTRLREG_5WIRE;
++ break;
++ case 8:
++ ctrl |= TSCADC_CNTRLREG_8WIRE;
++ break;
++ }
++ }
++ tscadc_writel(ts_dev, TSCADC_REG_CTRL, ctrl);
++ ts_dev->ctrl = ctrl;
++
++ /* Touch screen / ADC configuration */
++ if(pdata->mode == TI_TSCADC_TSCMODE) {
++ tsc_idle_config(ts_dev);
++ tsc_step_config(ts_dev);
++ tscadc_writel(ts_dev, TSCADC_REG_FIFO1THR, 6);
++ irqenable = TSCADC_IRQENB_FIFO1THRES;
++ /* Touch screen also needs an input_dev */
++ input_dev->name = "ti-tsc-adcc";
++ input_dev->dev.parent = &pdev->dev;
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
++ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
++ /* register to the input system */
++ err = input_register_device(input_dev);
++ if (err)
++ goto err_fail;
++ }
++ else {
++ tscadc_writel(ts_dev, TSCADC_REG_FIFO0THR, 0);
++ irqenable = 0; // TSCADC_IRQENB_FIFO0THRES;
++ }
++ tscadc_writel(ts_dev, TSCADC_REG_IRQENABLE, irqenable);
++
++ ctrl |= TSCADC_CNTRLREG_TSCSSENB;
++ tscadc_writel(ts_dev, TSCADC_REG_CTRL, ctrl); /* Turn on TSC_ADC */
++
++ device_init_wakeup(&pdev->dev, true);
++ platform_set_drvdata(pdev, ts_dev);
++ return 0;
++
++err_fail:
++ pm_runtime_disable(&pdev->dev);
++err_free_irq:
++ free_irq(ts_dev->irq, ts_dev);
++err_unmap_regs:
++ iounmap(ts_dev->tsc_base);
++err_release_mem:
++ release_mem_region(res->start, resource_size(res));
++ input_free_device(ts_dev->input);
++err_free_mem:
++ kfree(ts_dev);
++ return err;
++}
++
++static int __devexit tscadc_remove(struct platform_device *pdev)
++{
++ struct tscadc *ts_dev = platform_get_drvdata(pdev);
++ struct resource *res;
++
++ free_irq(ts_dev->irq, ts_dev);
++
++ input_unregister_device(ts_dev->input);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ iounmap(ts_dev->tsc_base);
++ release_mem_region(res->start, resource_size(res));
++
++ pm_runtime_disable(&pdev->dev);
++
++ kfree(ts_dev);
++
++ device_init_wakeup(&pdev->dev, 0);
++ platform_set_drvdata(pdev, NULL);
++ return 0;
++}
++
++static int tscadc_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct tscadc *ts_dev = platform_get_drvdata(pdev);
++ unsigned int idle;
++
++ if (device_may_wakeup(&pdev->dev)) {
++ idle = tscadc_readl(ts_dev, TSCADC_REG_IRQENABLE);
++ tscadc_writel(ts_dev, TSCADC_REG_IRQENABLE,
++ (idle | TSCADC_IRQENB_HW_PEN));
++ tscadc_writel(ts_dev, TSCADC_REG_IRQWAKEUP, TSCADC_IRQWKUP_ENB);
++ }
++
++ /* module disable */
++ idle = 0;
++ idle = tscadc_readl(ts_dev, TSCADC_REG_CTRL);
++ idle &= ~(TSCADC_CNTRLREG_TSCSSENB);
++ tscadc_writel(ts_dev, TSCADC_REG_CTRL, idle);
++
++ pm_runtime_put_sync(&pdev->dev);
++
++ return 0;
++
++}
++
++static int tscadc_resume(struct platform_device *pdev)
++{
++ struct tscadc *ts_dev = platform_get_drvdata(pdev);
++ unsigned int restore;
++
++ pm_runtime_get_sync(&pdev->dev);
++
++ if (device_may_wakeup(&pdev->dev)) {
++ tscadc_writel(ts_dev, TSCADC_REG_IRQWAKEUP,
++ TSCADC_IRQWKUP_DISABLE);
++ tscadc_writel(ts_dev, TSCADC_REG_IRQCLR, TSCADC_IRQENB_HW_PEN);
++ }
++
++ /* context restore */
++ tscadc_writel(ts_dev, TSCADC_REG_CTRL, ts_dev->ctrl);
++ tsc_idle_config(ts_dev);
++ tsc_step_config(ts_dev);
++ tscadc_writel(ts_dev, TSCADC_REG_FIFO1THR, 6);
++ restore = tscadc_readl(ts_dev, TSCADC_REG_CTRL);
++ tscadc_writel(ts_dev, TSCADC_REG_CTRL,
++ (restore | TSCADC_CNTRLREG_TSCSSENB));
++
++ return 0;
++}
++
++static struct platform_driver ti_tsc_driver = {
++ .probe = tscadc_probe,
++ .remove = __devexit_p(tscadc_remove),
++ .driver = {
++ .name = "tsc",
++ .owner = THIS_MODULE,
++ },
++ .suspend = tscadc_suspend,
++ .resume = tscadc_resume,
++};
++
++static int __init ti_tsc_init(void)
++{
++ return platform_driver_register(&ti_tsc_driver);
++}
++module_init(ti_tsc_init);
++
++static void __exit ti_tsc_exit(void)
++{
++ platform_driver_unregister(&ti_tsc_driver);
++}
++module_exit(ti_tsc_exit);
++
++MODULE_DESCRIPTION("TI touchscreen controller driver");
++MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index aba706c..8f32b2b 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1229,8 +1229,7 @@ static int __init omap_iommu_init(void)
+
+ return platform_driver_register(&omap_iommu_driver);
+ }
+-/* must be ready before omap3isp is probed */
+-subsys_initcall(omap_iommu_init);
++module_init(omap_iommu_init);
+
+ static void __exit omap_iommu_exit(void)
+ {
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index cf10ecf..860c112 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -324,7 +324,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
+ return dev->extra_buf2_address + r_pointer;
+ }
+
+- dbg("attempt to read beyong ring bufer end");
++ dbg("attempt to read beyond ring buffer end");
+ return 0;
+ }
+
+diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
+index fd108d9..6f978e8 100644
+--- a/drivers/media/rc/ene_ir.h
++++ b/drivers/media/rc/ene_ir.h
+@@ -227,7 +227,7 @@ struct ene_device {
+
+ /* TX buffer */
+ unsigned *tx_buffer; /* input samples buffer*/
+- int tx_pos; /* position in that bufer */
++ int tx_pos; /* position in that buffer */
+ int tx_len; /* current len of tx buffer */
+ int tx_done; /* done transmitting */
+ /* one more sample pending*/
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index f1391c2..d2c55e8 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -142,6 +142,21 @@ config TPS6507X
+ This driver can also be built as a module. If so, the module
+ will be called tps6507x.
+
++config MFD_TPS65217
++ tristate "TPS65217 Power Management / White LED chips"
++ depends on I2C
++ select MFD_CORE
++ select REGMAP_I2C
++ help
++ If you say yes here you get support for the TPS65217 series of
++ Power Management / White LED chips.
++ These include voltage regulators, lithium ion/polymer battery
++ charger, wled and other features that are often used in portable
++ devices.
++
++ This driver can also be built as a module. If so, the module
++ will be called tps65217.
++
+ config MFD_TPS6586X
+ bool "TPS6586x Power Management chips"
+ depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index b2292eb..7a6d111 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -36,6 +36,7 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
+ obj-$(CONFIG_TPS6105X) += tps6105x.o
+ obj-$(CONFIG_TPS65010) += tps65010.o
+ obj-$(CONFIG_TPS6507X) += tps6507x.o
++obj-$(CONFIG_MFD_TPS65217) += tps65217.o
+ obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+ tps65912-objs := tps65912-core.o tps65912-irq.o
+ obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 86e1458..3f565ef 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -27,8 +27,9 @@
+ #include <linux/spinlock.h>
+ #include <linux/gpio.h>
+ #include <plat/usb.h>
++#include <linux/pm_runtime.h>
+
+-#define USBHS_DRIVER_NAME "usbhs-omap"
++#define USBHS_DRIVER_NAME "usbhs_omap"
+ #define OMAP_EHCI_DEVICE "ehci-omap"
+ #define OMAP_OHCI_DEVICE "ohci-omap3"
+
+@@ -147,9 +148,6 @@
+
+
+ struct usbhs_hcd_omap {
+- struct clk *usbhost_ick;
+- struct clk *usbhost_hs_fck;
+- struct clk *usbhost_fs_fck;
+ struct clk *xclk60mhsp1_ck;
+ struct clk *xclk60mhsp2_ck;
+ struct clk *utmi_p1_fck;
+@@ -159,8 +157,7 @@ struct usbhs_hcd_omap {
+ struct clk *usbhost_p2_fck;
+ struct clk *usbtll_p2_fck;
+ struct clk *init_60m_fclk;
+- struct clk *usbtll_fck;
+- struct clk *usbtll_ick;
++ struct clk *ehci_logic_fck;
+
+ void __iomem *uhh_base;
+ void __iomem *tll_base;
+@@ -169,7 +166,6 @@ struct usbhs_hcd_omap {
+
+ u32 usbhs_rev;
+ spinlock_t lock;
+- int count;
+ };
+ /*-------------------------------------------------------------------------*/
+
+@@ -319,269 +315,6 @@ err_end:
+ return ret;
+ }
+
+-/**
+- * usbhs_omap_probe - initialize TI-based HCDs
+- *
+- * Allocates basic resources for this USB host controller.
+- */
+-static int __devinit usbhs_omap_probe(struct platform_device *pdev)
+-{
+- struct device *dev = &pdev->dev;
+- struct usbhs_omap_platform_data *pdata = dev->platform_data;
+- struct usbhs_hcd_omap *omap;
+- struct resource *res;
+- int ret = 0;
+- int i;
+-
+- if (!pdata) {
+- dev_err(dev, "Missing platform data\n");
+- ret = -ENOMEM;
+- goto end_probe;
+- }
+-
+- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+- if (!omap) {
+- dev_err(dev, "Memory allocation failed\n");
+- ret = -ENOMEM;
+- goto end_probe;
+- }
+-
+- spin_lock_init(&omap->lock);
+-
+- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+- omap->platdata.port_mode[i] = pdata->port_mode[i];
+-
+- omap->platdata.ehci_data = pdata->ehci_data;
+- omap->platdata.ohci_data = pdata->ohci_data;
+-
+- omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+- if (IS_ERR(omap->usbhost_ick)) {
+- ret = PTR_ERR(omap->usbhost_ick);
+- dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+- goto err_end;
+- }
+-
+- omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+- if (IS_ERR(omap->usbhost_hs_fck)) {
+- ret = PTR_ERR(omap->usbhost_hs_fck);
+- dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+- goto err_usbhost_ick;
+- }
+-
+- omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+- if (IS_ERR(omap->usbhost_fs_fck)) {
+- ret = PTR_ERR(omap->usbhost_fs_fck);
+- dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+- goto err_usbhost_hs_fck;
+- }
+-
+- omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+- if (IS_ERR(omap->usbtll_fck)) {
+- ret = PTR_ERR(omap->usbtll_fck);
+- dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+- goto err_usbhost_fs_fck;
+- }
+-
+- omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+- if (IS_ERR(omap->usbtll_ick)) {
+- ret = PTR_ERR(omap->usbtll_ick);
+- dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+- goto err_usbtll_fck;
+- }
+-
+- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+- if (IS_ERR(omap->utmi_p1_fck)) {
+- ret = PTR_ERR(omap->utmi_p1_fck);
+- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+- goto err_usbtll_ick;
+- }
+-
+- omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+- if (IS_ERR(omap->xclk60mhsp1_ck)) {
+- ret = PTR_ERR(omap->xclk60mhsp1_ck);
+- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+- goto err_utmi_p1_fck;
+- }
+-
+- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+- if (IS_ERR(omap->utmi_p2_fck)) {
+- ret = PTR_ERR(omap->utmi_p2_fck);
+- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+- goto err_xclk60mhsp1_ck;
+- }
+-
+- omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+- if (IS_ERR(omap->xclk60mhsp2_ck)) {
+- ret = PTR_ERR(omap->xclk60mhsp2_ck);
+- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+- goto err_utmi_p2_fck;
+- }
+-
+- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+- if (IS_ERR(omap->usbhost_p1_fck)) {
+- ret = PTR_ERR(omap->usbhost_p1_fck);
+- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+- goto err_xclk60mhsp2_ck;
+- }
+-
+- omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
+- if (IS_ERR(omap->usbtll_p1_fck)) {
+- ret = PTR_ERR(omap->usbtll_p1_fck);
+- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
+- goto err_usbhost_p1_fck;
+- }
+-
+- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+- if (IS_ERR(omap->usbhost_p2_fck)) {
+- ret = PTR_ERR(omap->usbhost_p2_fck);
+- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+- goto err_usbtll_p1_fck;
+- }
+-
+- omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
+- if (IS_ERR(omap->usbtll_p2_fck)) {
+- ret = PTR_ERR(omap->usbtll_p2_fck);
+- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
+- goto err_usbhost_p2_fck;
+- }
+-
+- omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+- if (IS_ERR(omap->init_60m_fclk)) {
+- ret = PTR_ERR(omap->init_60m_fclk);
+- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+- goto err_usbtll_p2_fck;
+- }
+-
+- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+- if (!res) {
+- dev_err(dev, "UHH EHCI get resource failed\n");
+- ret = -ENODEV;
+- goto err_init_60m_fclk;
+- }
+-
+- omap->uhh_base = ioremap(res->start, resource_size(res));
+- if (!omap->uhh_base) {
+- dev_err(dev, "UHH ioremap failed\n");
+- ret = -ENOMEM;
+- goto err_init_60m_fclk;
+- }
+-
+- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
+- if (!res) {
+- dev_err(dev, "UHH EHCI get resource failed\n");
+- ret = -ENODEV;
+- goto err_tll;
+- }
+-
+- omap->tll_base = ioremap(res->start, resource_size(res));
+- if (!omap->tll_base) {
+- dev_err(dev, "TLL ioremap failed\n");
+- ret = -ENOMEM;
+- goto err_tll;
+- }
+-
+- platform_set_drvdata(pdev, omap);
+-
+- ret = omap_usbhs_alloc_children(pdev);
+- if (ret) {
+- dev_err(dev, "omap_usbhs_alloc_children failed\n");
+- goto err_alloc;
+- }
+-
+- goto end_probe;
+-
+-err_alloc:
+- iounmap(omap->tll_base);
+-
+-err_tll:
+- iounmap(omap->uhh_base);
+-
+-err_init_60m_fclk:
+- clk_put(omap->init_60m_fclk);
+-
+-err_usbtll_p2_fck:
+- clk_put(omap->usbtll_p2_fck);
+-
+-err_usbhost_p2_fck:
+- clk_put(omap->usbhost_p2_fck);
+-
+-err_usbtll_p1_fck:
+- clk_put(omap->usbtll_p1_fck);
+-
+-err_usbhost_p1_fck:
+- clk_put(omap->usbhost_p1_fck);
+-
+-err_xclk60mhsp2_ck:
+- clk_put(omap->xclk60mhsp2_ck);
+-
+-err_utmi_p2_fck:
+- clk_put(omap->utmi_p2_fck);
+-
+-err_xclk60mhsp1_ck:
+- clk_put(omap->xclk60mhsp1_ck);
+-
+-err_utmi_p1_fck:
+- clk_put(omap->utmi_p1_fck);
+-
+-err_usbtll_ick:
+- clk_put(omap->usbtll_ick);
+-
+-err_usbtll_fck:
+- clk_put(omap->usbtll_fck);
+-
+-err_usbhost_fs_fck:
+- clk_put(omap->usbhost_fs_fck);
+-
+-err_usbhost_hs_fck:
+- clk_put(omap->usbhost_hs_fck);
+-
+-err_usbhost_ick:
+- clk_put(omap->usbhost_ick);
+-
+-err_end:
+- kfree(omap);
+-
+-end_probe:
+- return ret;
+-}
+-
+-/**
+- * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
+- * @pdev: USB Host Controller being removed
+- *
+- * Reverses the effect of usbhs_omap_probe().
+- */
+-static int __devexit usbhs_omap_remove(struct platform_device *pdev)
+-{
+- struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+-
+- if (omap->count != 0) {
+- dev_err(&pdev->dev,
+- "Either EHCI or OHCI is still using usbhs core\n");
+- return -EBUSY;
+- }
+-
+- iounmap(omap->tll_base);
+- iounmap(omap->uhh_base);
+- clk_put(omap->init_60m_fclk);
+- clk_put(omap->usbtll_p2_fck);
+- clk_put(omap->usbhost_p2_fck);
+- clk_put(omap->usbtll_p1_fck);
+- clk_put(omap->usbhost_p1_fck);
+- clk_put(omap->xclk60mhsp2_ck);
+- clk_put(omap->utmi_p2_fck);
+- clk_put(omap->xclk60mhsp1_ck);
+- clk_put(omap->utmi_p1_fck);
+- clk_put(omap->usbtll_ick);
+- clk_put(omap->usbtll_fck);
+- clk_put(omap->usbhost_fs_fck);
+- clk_put(omap->usbhost_hs_fck);
+- clk_put(omap->usbhost_ick);
+- kfree(omap);
+-
+- return 0;
+-}
+-
+ static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
+ {
+ switch (pmode) {
+@@ -689,30 +422,85 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
+ }
+ }
+
+-static int usbhs_enable(struct device *dev)
++static int usbhs_runtime_resume(struct device *dev)
+ {
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+- unsigned long flags = 0;
+- int ret = 0;
+- unsigned long timeout;
+- unsigned reg;
++ unsigned long flags;
++
++ dev_dbg(dev, "usbhs_runtime_resume\n");
+
+- dev_dbg(dev, "starting TI HSUSB Controller\n");
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
+- if (omap->count > 0)
+- goto end_count;
+
+- clk_enable(omap->usbhost_ick);
+- clk_enable(omap->usbhost_hs_fck);
+- clk_enable(omap->usbhost_fs_fck);
+- clk_enable(omap->usbtll_fck);
+- clk_enable(omap->usbtll_ick);
++ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
++ clk_enable(omap->ehci_logic_fck);
++
++ if (is_ehci_tll_mode(pdata->port_mode[0])) {
++ clk_enable(omap->usbhost_p1_fck);
++ clk_enable(omap->usbtll_p1_fck);
++ }
++ if (is_ehci_tll_mode(pdata->port_mode[1])) {
++ clk_enable(omap->usbhost_p2_fck);
++ clk_enable(omap->usbtll_p2_fck);
++ }
++ clk_enable(omap->utmi_p1_fck);
++ clk_enable(omap->utmi_p2_fck);
++
++ spin_unlock_irqrestore(&omap->lock, flags);
++
++ return 0;
++}
++
++static int usbhs_runtime_suspend(struct device *dev)
++{
++ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
++ struct usbhs_omap_platform_data *pdata = &omap->platdata;
++ unsigned long flags;
++
++ dev_dbg(dev, "usbhs_runtime_suspend\n");
++
++ if (!pdata) {
++ dev_dbg(dev, "missing platform_data\n");
++ return -ENODEV;
++ }
++
++ spin_lock_irqsave(&omap->lock, flags);
++
++ if (is_ehci_tll_mode(pdata->port_mode[0])) {
++ clk_disable(omap->usbhost_p1_fck);
++ clk_disable(omap->usbtll_p1_fck);
++ }
++ if (is_ehci_tll_mode(pdata->port_mode[1])) {
++ clk_disable(omap->usbhost_p2_fck);
++ clk_disable(omap->usbtll_p2_fck);
++ }
++ clk_disable(omap->utmi_p2_fck);
++ clk_disable(omap->utmi_p1_fck);
++
++ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
++ clk_disable(omap->ehci_logic_fck);
++
++ spin_unlock_irqrestore(&omap->lock, flags);
++
++ return 0;
++}
++
++static void omap_usbhs_init(struct device *dev)
++{
++ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
++ struct usbhs_omap_platform_data *pdata = &omap->platdata;
++ unsigned long flags;
++ unsigned reg;
++
++ dev_dbg(dev, "starting TI HSUSB Controller\n");
++
++ pm_runtime_get_sync(dev);
++ spin_lock_irqsave(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
+@@ -736,50 +524,6 @@ static int usbhs_enable(struct device *dev)
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
+ dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+
+- /* perform TLL soft reset, and wait until reset is complete */
+- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+-
+- /* Wait for TLL reset to complete */
+- timeout = jiffies + msecs_to_jiffies(1000);
+- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+- cpu_relax();
+-
+- if (time_after(jiffies, timeout)) {
+- dev_dbg(dev, "operation timed out\n");
+- ret = -EINVAL;
+- goto err_tll;
+- }
+- }
+-
+- dev_dbg(dev, "TLL RESET DONE\n");
+-
+- /* (1<<3) = no idle mode only for initial debugging */
+- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+- OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+-
+- /* Put UHH in NoIdle/NoStandby mode */
+- reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+- if (is_omap_usbhs_rev1(omap)) {
+- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+- | OMAP_UHH_SYSCONFIG_SIDLEMODE
+- | OMAP_UHH_SYSCONFIG_CACTIVITY
+- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+-
+-
+- } else if (is_omap_usbhs_rev2(omap)) {
+- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+- }
+-
+- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+-
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+ /* setup ULPI bypass and burst configurations */
+ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+@@ -825,49 +569,6 @@ static int usbhs_enable(struct device *dev)
+ reg &= ~OMAP4_P1_MODE_CLEAR;
+ reg &= ~OMAP4_P2_MODE_CLEAR;
+
+- if (is_ehci_phy_mode(pdata->port_mode[0])) {
+- ret = clk_set_parent(omap->utmi_p1_fck,
+- omap->xclk60mhsp1_ck);
+- if (ret != 0) {
+- dev_err(dev, "xclk60mhsp1_ck set parent"
+- "failed error:%d\n", ret);
+- goto err_tll;
+- }
+- } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+- ret = clk_set_parent(omap->utmi_p1_fck,
+- omap->init_60m_fclk);
+- if (ret != 0) {
+- dev_err(dev, "init_60m_fclk set parent"
+- "failed error:%d\n", ret);
+- goto err_tll;
+- }
+- clk_enable(omap->usbhost_p1_fck);
+- clk_enable(omap->usbtll_p1_fck);
+- }
+-
+- if (is_ehci_phy_mode(pdata->port_mode[1])) {
+- ret = clk_set_parent(omap->utmi_p2_fck,
+- omap->xclk60mhsp2_ck);
+- if (ret != 0) {
+- dev_err(dev, "xclk60mhsp1_ck set parent"
+- "failed error:%d\n", ret);
+- goto err_tll;
+- }
+- } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+- ret = clk_set_parent(omap->utmi_p2_fck,
+- omap->init_60m_fclk);
+- if (ret != 0) {
+- dev_err(dev, "init_60m_fclk set parent"
+- "failed error:%d\n", ret);
+- goto err_tll;
+- }
+- clk_enable(omap->usbhost_p2_fck);
+- clk_enable(omap->usbtll_p2_fck);
+- }
+-
+- clk_enable(omap->utmi_p1_fck);
+- clk_enable(omap->utmi_p2_fck);
+-
+ if (is_ehci_tll_mode(pdata->port_mode[0]) ||
+ (is_ohci_port(pdata->port_mode[0])))
+ reg |= OMAP4_P1_MODE_TLL;
+@@ -913,12 +614,15 @@ static int usbhs_enable(struct device *dev)
+ (pdata->ehci_data->reset_gpio_port[1], 1);
+ }
+
+-end_count:
+- omap->count++;
+ spin_unlock_irqrestore(&omap->lock, flags);
+- return 0;
++ pm_runtime_put_sync(dev);
++}
++
++static void omap_usbhs_deinit(struct device *dev)
++{
++ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
++ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+-err_tll:
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+@@ -926,123 +630,272 @@ err_tll:
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+-
+- clk_disable(omap->usbtll_ick);
+- clk_disable(omap->usbtll_fck);
+- clk_disable(omap->usbhost_fs_fck);
+- clk_disable(omap->usbhost_hs_fck);
+- clk_disable(omap->usbhost_ick);
+- spin_unlock_irqrestore(&omap->lock, flags);
+- return ret;
+ }
+
+-static void usbhs_disable(struct device *dev)
++
++/**
++ * usbhs_omap_probe - initialize TI-based HCDs
++ *
++ * Allocates basic resources for this USB host controller.
++ */
++static int __devinit usbhs_omap_probe(struct platform_device *pdev)
+ {
+- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+- struct usbhs_omap_platform_data *pdata = &omap->platdata;
+- unsigned long flags = 0;
+- unsigned long timeout;
++ struct device *dev = &pdev->dev;
++ struct usbhs_omap_platform_data *pdata = dev->platform_data;
++ struct usbhs_hcd_omap *omap;
++ struct resource *res;
++ int ret = 0;
++ int i;
+
+- dev_dbg(dev, "stopping TI HSUSB Controller\n");
++ if (!pdata) {
++ dev_err(dev, "Missing platform data\n");
++ ret = -ENOMEM;
++ goto end_probe;
++ }
+
+- spin_lock_irqsave(&omap->lock, flags);
++ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
++ if (!omap) {
++ dev_err(dev, "Memory allocation failed\n");
++ ret = -ENOMEM;
++ goto end_probe;
++ }
+
+- if (omap->count == 0)
+- goto end_disble;
++ spin_lock_init(&omap->lock);
+
+- omap->count--;
++ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
++ omap->platdata.port_mode[i] = pdata->port_mode[i];
++
++ omap->platdata.ehci_data = pdata->ehci_data;
++ omap->platdata.ohci_data = pdata->ohci_data;
+
+- if (omap->count != 0)
+- goto end_disble;
++ pm_runtime_enable(dev);
+
+- /* Reset OMAP modules for insmod/rmmod to work */
+- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+- is_omap_usbhs_rev2(omap) ?
+- OMAP4_UHH_SYSCONFIG_SOFTRESET :
+- OMAP_UHH_SYSCONFIG_SOFTRESET);
+
+- timeout = jiffies + msecs_to_jiffies(100);
+- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+- & (1 << 0))) {
+- cpu_relax();
++ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
++ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
++ is_ehci_hsic_mode(i)) {
++ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
++ if (IS_ERR(omap->ehci_logic_fck)) {
++ ret = PTR_ERR(omap->ehci_logic_fck);
++ dev_warn(dev, "ehci_logic_fck failed:%d\n",
++ ret);
++ }
++ break;
++ }
+
+- if (time_after(jiffies, timeout))
+- dev_dbg(dev, "operation timed out\n");
++ omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
++ if (IS_ERR(omap->utmi_p1_fck)) {
++ ret = PTR_ERR(omap->utmi_p1_fck);
++ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
++ goto err_end;
+ }
+
+- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+- & (1 << 1))) {
+- cpu_relax();
++ omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
++ if (IS_ERR(omap->xclk60mhsp1_ck)) {
++ ret = PTR_ERR(omap->xclk60mhsp1_ck);
++ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
++ goto err_utmi_p1_fck;
++ }
+
+- if (time_after(jiffies, timeout))
+- dev_dbg(dev, "operation timed out\n");
++ omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
++ if (IS_ERR(omap->utmi_p2_fck)) {
++ ret = PTR_ERR(omap->utmi_p2_fck);
++ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
++ goto err_xclk60mhsp1_ck;
+ }
+
+- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+- & (1 << 2))) {
+- cpu_relax();
++ omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
++ if (IS_ERR(omap->xclk60mhsp2_ck)) {
++ ret = PTR_ERR(omap->xclk60mhsp2_ck);
++ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
++ goto err_utmi_p2_fck;
++ }
+
+- if (time_after(jiffies, timeout))
+- dev_dbg(dev, "operation timed out\n");
++ omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
++ if (IS_ERR(omap->usbhost_p1_fck)) {
++ ret = PTR_ERR(omap->usbhost_p1_fck);
++ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
++ goto err_xclk60mhsp2_ck;
+ }
+
+- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
++ omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
++ if (IS_ERR(omap->usbtll_p1_fck)) {
++ ret = PTR_ERR(omap->usbtll_p1_fck);
++ dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
++ goto err_usbhost_p1_fck;
++ }
+
+- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+- & (1 << 0))) {
+- cpu_relax();
++ omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
++ if (IS_ERR(omap->usbhost_p2_fck)) {
++ ret = PTR_ERR(omap->usbhost_p2_fck);
++ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
++ goto err_usbtll_p1_fck;
++ }
+
+- if (time_after(jiffies, timeout))
+- dev_dbg(dev, "operation timed out\n");
++ omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
++ if (IS_ERR(omap->usbtll_p2_fck)) {
++ ret = PTR_ERR(omap->usbtll_p2_fck);
++ dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
++ goto err_usbhost_p2_fck;
+ }
+
+- if (is_omap_usbhs_rev2(omap)) {
+- if (is_ehci_tll_mode(pdata->port_mode[0]))
+- clk_disable(omap->usbtll_p1_fck);
+- if (is_ehci_tll_mode(pdata->port_mode[1]))
+- clk_disable(omap->usbtll_p2_fck);
+- clk_disable(omap->utmi_p2_fck);
+- clk_disable(omap->utmi_p1_fck);
++ omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
++ if (IS_ERR(omap->init_60m_fclk)) {
++ ret = PTR_ERR(omap->init_60m_fclk);
++ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
++ goto err_usbtll_p2_fck;
+ }
+
+- clk_disable(omap->usbtll_ick);
+- clk_disable(omap->usbtll_fck);
+- clk_disable(omap->usbhost_fs_fck);
+- clk_disable(omap->usbhost_hs_fck);
+- clk_disable(omap->usbhost_ick);
++ if (is_ehci_phy_mode(pdata->port_mode[0])) {
++ /* for OMAP3 , the clk set paretn fails */
++ ret = clk_set_parent(omap->utmi_p1_fck,
++ omap->xclk60mhsp1_ck);
++ if (ret != 0)
++ dev_err(dev, "xclk60mhsp1_ck set parent"
++ "failed error:%d\n", ret);
++ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
++ ret = clk_set_parent(omap->utmi_p1_fck,
++ omap->init_60m_fclk);
++ if (ret != 0)
++ dev_err(dev, "init_60m_fclk set parent"
++ "failed error:%d\n", ret);
++ }
+
+- /* The gpio_free migh sleep; so unlock the spinlock */
+- spin_unlock_irqrestore(&omap->lock, flags);
++ if (is_ehci_phy_mode(pdata->port_mode[1])) {
++ ret = clk_set_parent(omap->utmi_p2_fck,
++ omap->xclk60mhsp2_ck);
++ if (ret != 0)
++ dev_err(dev, "xclk60mhsp2_ck set parent"
++ "failed error:%d\n", ret);
++ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
++ ret = clk_set_parent(omap->utmi_p2_fck,
++ omap->init_60m_fclk);
++ if (ret != 0)
++ dev_err(dev, "init_60m_fclk set parent"
++ "failed error:%d\n", ret);
++ }
+
+- if (pdata->ehci_data->phy_reset) {
+- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
++ if (!res) {
++ dev_err(dev, "UHH EHCI get resource failed\n");
++ ret = -ENODEV;
++ goto err_init_60m_fclk;
++ }
+
+- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
++ omap->uhh_base = ioremap(res->start, resource_size(res));
++ if (!omap->uhh_base) {
++ dev_err(dev, "UHH ioremap failed\n");
++ ret = -ENOMEM;
++ goto err_init_60m_fclk;
+ }
+- return;
+
+-end_disble:
+- spin_unlock_irqrestore(&omap->lock, flags);
+-}
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
++ if (!res) {
++ dev_err(dev, "UHH EHCI get resource failed\n");
++ ret = -ENODEV;
++ goto err_tll;
++ }
+
+-int omap_usbhs_enable(struct device *dev)
+-{
+- return usbhs_enable(dev->parent);
++ omap->tll_base = ioremap(res->start, resource_size(res));
++ if (!omap->tll_base) {
++ dev_err(dev, "TLL ioremap failed\n");
++ ret = -ENOMEM;
++ goto err_tll;
++ }
++
++ platform_set_drvdata(pdev, omap);
++
++ ret = omap_usbhs_alloc_children(pdev);
++ if (ret) {
++ dev_err(dev, "omap_usbhs_alloc_children failed\n");
++ goto err_alloc;
++ }
++
++ omap_usbhs_init(dev);
++
++ goto end_probe;
++
++err_alloc:
++ iounmap(omap->tll_base);
++
++err_tll:
++ iounmap(omap->uhh_base);
++
++err_init_60m_fclk:
++ clk_put(omap->init_60m_fclk);
++
++err_usbtll_p2_fck:
++ clk_put(omap->usbtll_p2_fck);
++
++err_usbhost_p2_fck:
++ clk_put(omap->usbhost_p2_fck);
++
++err_usbtll_p1_fck:
++ clk_put(omap->usbtll_p1_fck);
++
++err_usbhost_p1_fck:
++ clk_put(omap->usbhost_p1_fck);
++
++err_xclk60mhsp2_ck:
++ clk_put(omap->xclk60mhsp2_ck);
++
++err_utmi_p2_fck:
++ clk_put(omap->utmi_p2_fck);
++
++err_xclk60mhsp1_ck:
++ clk_put(omap->xclk60mhsp1_ck);
++
++err_utmi_p1_fck:
++ clk_put(omap->utmi_p1_fck);
++
++err_end:
++ clk_put(omap->ehci_logic_fck);
++ pm_runtime_disable(dev);
++ kfree(omap);
++
++end_probe:
++ return ret;
+ }
+-EXPORT_SYMBOL_GPL(omap_usbhs_enable);
+
+-void omap_usbhs_disable(struct device *dev)
++/**
++ * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
++ * @pdev: USB Host Controller being removed
++ *
++ * Reverses the effect of usbhs_omap_probe().
++ */
++static int __devexit usbhs_omap_remove(struct platform_device *pdev)
+ {
+- usbhs_disable(dev->parent);
++ struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
++
++ omap_usbhs_deinit(&pdev->dev);
++ iounmap(omap->tll_base);
++ iounmap(omap->uhh_base);
++ clk_put(omap->init_60m_fclk);
++ clk_put(omap->usbtll_p2_fck);
++ clk_put(omap->usbhost_p2_fck);
++ clk_put(omap->usbtll_p1_fck);
++ clk_put(omap->usbhost_p1_fck);
++ clk_put(omap->xclk60mhsp2_ck);
++ clk_put(omap->utmi_p2_fck);
++ clk_put(omap->xclk60mhsp1_ck);
++ clk_put(omap->utmi_p1_fck);
++ clk_put(omap->ehci_logic_fck);
++ pm_runtime_disable(&pdev->dev);
++ kfree(omap);
++
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(omap_usbhs_disable);
++
++static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
++ .runtime_suspend = usbhs_runtime_suspend,
++ .runtime_resume = usbhs_runtime_resume,
++};
+
+ static struct platform_driver usbhs_omap_driver = {
+ .driver = {
+ .name = (char *)usbhs_driver_name,
+ .owner = THIS_MODULE,
++ .pm = &usbhsomap_dev_pm_ops,
+ },
+ .remove = __exit_p(usbhs_omap_remove),
+ };
+diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
+new file mode 100644
+index 0000000..f7d854e
+--- /dev/null
++++ b/drivers/mfd/tps65217.c
+@@ -0,0 +1,242 @@
++/*
++ * tps65217.c
++ *
++ * TPS65217 chip family multi-function driver
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/regmap.h>
++#include <linux/err.h>
++
++#include <linux/mfd/core.h>
++#include <linux/mfd/tps65217.h>
++
++/**
++ * tps65217_reg_read: Read a single tps65217 register.
++ *
++ * @tps: Device to read from.
++ * @reg: Register to read.
++ * @val: Contians the value
++ */
++int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
++ unsigned int *val)
++{
++ return regmap_read(tps->regmap, reg, val);
++}
++EXPORT_SYMBOL_GPL(tps65217_reg_read);
++
++/**
++ * tps65217_reg_write: Write a single tps65217 register.
++ *
++ * @tps65217: Device to write to.
++ * @reg: Register to write to.
++ * @val: Value to write.
++ * @level: Password protected level
++ */
++int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
++ unsigned int val, unsigned int level)
++{
++ int ret;
++ unsigned int xor_reg_val;
++
++ switch (level) {
++ case TPS65217_PROTECT_NONE:
++ return regmap_write(tps->regmap, reg, val);
++ case TPS65217_PROTECT_L1:
++ xor_reg_val = reg ^ TPS65217_PASSWORD_REGS_UNLOCK;
++ ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD,
++ xor_reg_val);
++ if (ret < 0)
++ return ret;
++
++ return regmap_write(tps->regmap, reg, val);
++ case TPS65217_PROTECT_L2:
++ xor_reg_val = reg ^ TPS65217_PASSWORD_REGS_UNLOCK;
++ ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD,
++ xor_reg_val);
++ if (ret < 0)
++ return ret;
++ ret = regmap_write(tps->regmap, reg, val);
++ if (ret < 0)
++ return ret;
++ ret = regmap_write(tps->regmap, TPS65217_REG_PASSWORD,
++ xor_reg_val);
++ if (ret < 0)
++ return ret;
++ return regmap_write(tps->regmap, reg, val);
++ default:
++ return -EINVAL;
++ }
++}
++EXPORT_SYMBOL_GPL(tps65217_reg_write);
++
++/**
++ * tps65217_update_bits: Modify bits w.r.t mask, val and level.
++ *
++ * @tps65217: Device to write to.
++ * @reg: Register to read-write to.
++ * @mask: Mask.
++ * @val: Value to write.
++ * @level: Password protected level
++ */
++int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
++ unsigned int mask, unsigned int val, unsigned int level)
++{
++ int ret;
++ unsigned int data;
++
++ ret = tps65217_reg_read(tps, reg, &data);
++ if (ret) {
++ dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
++ return ret;
++ }
++
++ data &= ~mask;
++ data |= val & mask;
++
++ ret = tps65217_reg_write(tps, reg, data, level);
++ if (ret)
++ dev_err(tps->dev, "Write for reg 0x%x failed\n", reg);
++
++ return ret;
++}
++
++int tps65217_set_bits(struct tps65217 *tps, unsigned int reg,
++ unsigned int mask, unsigned int val, unsigned int level)
++{
++ return tps65217_update_bits(tps, reg, mask, val, level);
++}
++EXPORT_SYMBOL_GPL(tps65217_set_bits);
++
++int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
++ unsigned int mask, unsigned int level)
++{
++ return tps65217_update_bits(tps, reg, mask, 0, level);
++}
++EXPORT_SYMBOL_GPL(tps65217_clear_bits);
++
++static struct regmap_config tps65217_regmap_config = {
++ .reg_bits = 8,
++ .val_bits = 8,
++};
++
++static int __devinit tps65217_probe(struct i2c_client *client,
++ const struct i2c_device_id *ids)
++{
++ struct tps65217 *tps;
++ struct tps65217_board *pdata = client->dev.platform_data;
++ int i, ret;
++ unsigned int version;
++
++ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
++ if (!tps)
++ return -ENOMEM;
++
++ tps->pdata = pdata;
++ tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
++ if (IS_ERR(tps->regmap)) {
++ ret = PTR_ERR(tps->regmap);
++ dev_err(tps->dev, "Failed to allocate register map: %d\n",
++ ret);
++ return ret;
++ }
++
++ i2c_set_clientdata(client, tps);
++ tps->dev = &client->dev;
++
++ ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
++ if (ret < 0) {
++ dev_err(tps->dev, "Failed to read revision"
++ " register: %d\n", ret);
++ goto err_regmap;
++ }
++
++ dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
++ (version & TPS65217_CHIPID_CHIP_MASK) >> 4,
++ version & TPS65217_CHIPID_REV_MASK);
++
++ for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
++ struct platform_device *pdev;
++
++ pdev = platform_device_alloc("tps65217-pmic", i);
++ if (!pdev) {
++ dev_err(tps->dev, "Cannot create regulator %d\n", i);
++ continue;
++ }
++
++ pdev->dev.parent = tps->dev;
++ platform_device_add_data(pdev, &pdata->tps65217_init_data[i],
++ sizeof(pdata->tps65217_init_data[i]));
++ tps->regulator_pdev[i] = pdev;
++
++ platform_device_add(pdev);
++ }
++
++ return 0;
++
++err_regmap:
++ regmap_exit(tps->regmap);
++
++ return ret;
++}
++
++static int __devexit tps65217_remove(struct i2c_client *client)
++{
++ struct tps65217 *tps = i2c_get_clientdata(client);
++ int i;
++
++ for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
++ platform_device_unregister(tps->regulator_pdev[i]);
++
++ regmap_exit(tps->regmap);
++
++ return 0;
++}
++
++static const struct i2c_device_id tps65217_id_table[] = {
++ {"tps65217", 0xF0},
++ {/* end of list */}
++};
++MODULE_DEVICE_TABLE(i2c, tps65217_id_table);
++
++static struct i2c_driver tps65217_driver = {
++ .driver = {
++ .name = "tps65217",
++ },
++ .id_table = tps65217_id_table,
++ .probe = tps65217_probe,
++ .remove = __devexit_p(tps65217_remove),
++};
++
++static int __init tps65217_init(void)
++{
++ return i2c_add_driver(&tps65217_driver);
++}
++subsys_initcall(tps65217_init);
++
++static void __exit tps65217_exit(void)
++{
++ i2c_del_driver(&tps65217_driver);
++}
++module_exit(tps65217_exit);
++
++MODULE_AUTHOR("AnilKumar Ch <anilkumar@ti.com>");
++MODULE_DESCRIPTION("TPS65217 chip family multi-function driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
+index c1da84b..2d4bc21 100644
+--- a/drivers/mfd/tps65910.c
++++ b/drivers/mfd/tps65910.c
+@@ -18,7 +18,6 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/i2c.h>
+-#include <linux/gpio.h>
+ #include <linux/mfd/core.h>
+ #include <linux/mfd/tps65910.h>
+
+@@ -138,6 +137,7 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
+ struct tps65910_board *pmic_plat_data;
+ struct tps65910_platform_data *init_data;
+ int ret = 0;
++ unsigned char buff;
+
+ pmic_plat_data = dev_get_platdata(&i2c->dev);
+ if (!pmic_plat_data)
+@@ -161,26 +161,38 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
+ tps65910->write = tps65910_i2c_write;
+ mutex_init(&tps65910->io_mutex);
+
+- ret = mfd_add_devices(tps65910->dev, -1,
+- tps65910s, ARRAY_SIZE(tps65910s),
+- NULL, 0);
++ /* Check that the device is actually there */
++ ret = tps65910_i2c_read(tps65910, 0x0, 1, &buff);
++ if (ret < 0) {
++ dev_err(tps65910->dev, "could not be detected\n");
++ ret = -ENODEV;
++ goto err;
++ }
++
++ dev_info(tps65910->dev, "JTAGREVNUM 0x%x\n", buff);
++
++ if (buff & ~JTAGVERNUM_VERNUM_MASK) {
++ dev_err(tps65910->dev, "unknown version\n");
++ ret = -ENODEV;
++ goto err;
++ }
++
++ ret = mfd_add_devices(tps65910->dev, -1, tps65910s,
++ ARRAY_SIZE(tps65910s), NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ init_data->irq = pmic_plat_data->irq;
+- init_data->irq_base = pmic_plat_data->irq;
++ init_data->irq_base = pmic_plat_data->irq_base;
+
+ tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
+
+- ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
+- if (ret < 0)
+- goto err;
++ tps65910_irq_init(tps65910, init_data->irq, init_data);
+
+ kfree(init_data);
+ return ret;
+
+ err:
+- mfd_remove_devices(tps65910->dev);
+ kfree(tps65910);
+ kfree(init_data);
+ return ret;
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index 29d12a7..2381220 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -80,6 +80,17 @@
+ #define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
+ #define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
+
++/* Sensitivity values for -2G, -4G, -8G and +2G, +4G, +8G scale */
++#define LIS3DLH_SENSITIVITY_2G (LIS3_ACCURACY * 1)
++#define LIS3DLH_SENSITIVITY_4G (LIS3_ACCURACY * 2)
++#define LIS3DLH_SENSITIVITY_8G ((LIS3_ACCURACY * 39)/10)
++
++#define SHIFT_ADJ_2G 4
++#define SHIFT_ADJ_4G 3
++#define SHIFT_ADJ_8G 2
++
++#define FS_MASK (0x3 << 4)
++
+ #define LIS3_DEFAULT_FUZZ_12B 3
+ #define LIS3_DEFAULT_FLAT_12B 3
+ #define LIS3_DEFAULT_FUZZ_8B 1
+@@ -148,6 +159,12 @@ static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
+ return -hw_values[-axis - 1];
+ }
+
++static int lis3lv02d_decode(u8 pl, u8 ph, int adj)
++{
++ s16 v = pl | ph << 8;
++ return (int) v >> adj;
++}
++
+ /**
+ * lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer
+ * @lis3: pointer to the device struct
+@@ -176,9 +193,24 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
+ position[i] = (s8)data[i * 2];
+ }
+ } else {
+- position[0] = lis3->read_data(lis3, OUTX);
+- position[1] = lis3->read_data(lis3, OUTY);
+- position[2] = lis3->read_data(lis3, OUTZ);
++ if (lis3_dev.whoami == WAI_3DLH) {
++ position[0] =
++ lis3lv02d_decode(lis3->read_data(lis3, OUTX_L),
++ lis3->read_data(lis3, OUTX_H),
++ lis3_dev.shift_adj);
++ position[1] =
++ lis3lv02d_decode(lis3->read_data(lis3, OUTY_L),
++ lis3->read_data(lis3, OUTY_H),
++ lis3_dev.shift_adj);
++ position[2] =
++ lis3lv02d_decode(lis3->read_data(lis3, OUTZ_L),
++ lis3->read_data(lis3, OUTZ_H),
++ lis3_dev.shift_adj);
++ } else {
++ position[0] = lis3->read_data(lis3, OUTX);
++ position[1] = lis3->read_data(lis3, OUTY);
++ position[2] = lis3->read_data(lis3, OUTZ);
++ }
+ }
+
+ for (i = 0; i < 3; i++)
+@@ -193,6 +225,7 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
+ static int lis3_12_rates[4] = {40, 160, 640, 2560};
+ static int lis3_8_rates[2] = {100, 400};
+ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
++static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
+
+ /* ODR is Output Data Rate */
+ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+@@ -265,7 +298,7 @@ static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
+ (LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
+ }
+
+- if (lis3->whoami == WAI_3DC) {
++ if ((lis3_dev.whoami == WAI_3DC) || (lis3_dev.whoami == WAI_3DLH)) {
+ ctlreg = CTRL_REG4;
+ selftest = CTRL4_ST0;
+ } else {
+@@ -396,6 +429,8 @@ int lis3lv02d_poweron(struct lis3lv02d *lis3)
+ lis3->read(lis3, CTRL_REG2, &reg);
+ if (lis3->whoami == WAI_12B)
+ reg |= CTRL2_BDU | CTRL2_BOOT;
++ else if (lis3->whoami == WAI_3DLH)
++ reg |= CTRL2_BOOT_3DLH;
+ else
+ reg |= CTRL2_BOOT_8B;
+ lis3->write(lis3, CTRL_REG2, reg);
+@@ -724,6 +759,36 @@ void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
+ }
+ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
+
++static void lis3lv02d_update_g_range(struct lis3lv02d *lis3)
++{
++ u8 reg;
++ u8 val;
++ u8 shift;
++
++ switch (lis3->g_range) {
++ case 8:
++ val = FS_8G_REGVAL;
++ shift = SHIFT_ADJ_8G;
++ lis3->scale = LIS3DLH_SENSITIVITY_8G;
++ break;
++ case 4:
++ val = FS_4G_REGVAL;
++ shift = SHIFT_ADJ_4G;
++ lis3->scale = LIS3DLH_SENSITIVITY_4G;
++ break;
++ case 2:
++ default:
++ val = FS_2G_REGVAL;
++ shift = SHIFT_ADJ_2G;
++ lis3->scale = LIS3DLH_SENSITIVITY_2G;
++ break;
++ }
++
++ lis3->shift_adj = shift;
++ lis3->read(lis3, CTRL_REG4, &reg);
++ lis3->write(lis3, CTRL_REG4, ((reg & ~FS_MASK) | val));
++}
++
+ /* Sysfs stuff */
+ static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3)
+ {
+@@ -792,6 +857,13 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
+ return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
+ }
+
++static ssize_t lis3lv02d_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ lis3lv02d_sysfs_poweron(&lis3_dev);
++ return sprintf(buf, "%d\n", lis3_dev.g_range);
++}
++
+ static ssize_t lis3lv02d_rate_set(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+@@ -809,15 +881,33 @@ static ssize_t lis3lv02d_rate_set(struct device *dev,
+ return count;
+ }
+
++static ssize_t lis3lv02d_range_set(struct device *dev,
++ struct device_attribute *attr, const char *buf,
++ size_t count)
++{
++ unsigned long range;
++
++ if (strict_strtoul(buf, 0, &range))
++ return -EINVAL;
++
++ lis3_dev.g_range = range;
++ lis3lv02d_update_g_range(&lis3_dev);
++
++ return count;
++}
++
+ static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
+ static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
+ static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
+ lis3lv02d_rate_set);
++static DEVICE_ATTR(range, S_IRUGO | S_IWUSR, lis3lv02d_range_show,
++ lis3lv02d_range_set);
+
+ static struct attribute *lis3lv02d_attributes[] = {
+ &dev_attr_selftest.attr,
+ &dev_attr_position.attr,
+ &dev_attr_rate.attr,
++ &dev_attr_range.attr,
+ NULL
+ };
+
+@@ -954,6 +1044,19 @@ int lis3lv02d_init_device(struct lis3lv02d *lis3)
+ lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
+ lis3->scale = LIS3_SENSITIVITY_8B;
+ break;
++ case WAI_3DLH:
++ pr_info("8 bits 3DLH sensor found\n");
++ lis3->read_data = lis3lv02d_read_8;
++ lis3->mdps_max_val = 128;
++ lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
++ lis3->odrs = lis3_3dlh_rates;
++ lis3->odr_mask = CTRL1_DR0 | CTRL1_DR1;
++ if (lis3->pdata) {
++ lis3->g_range = lis3->pdata->g_range;
++ lis3lv02d_update_g_range(lis3);
++ } else
++ lis3->scale = LIS3DLH_SENSITIVITY_2G;
++ break;
+ default:
+ pr_err("unknown sensor type 0x%X\n", lis3->whoami);
+ return -EINVAL;
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
+index 2b1482a..0e6fe06 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.h
++++ b/drivers/misc/lis3lv02d/lis3lv02d.h
+@@ -95,13 +95,29 @@ enum lis3lv02d_reg {
+ DD_THSE_H = 0x3F,
+ };
+
++enum lis331dlh_reg {
++ CTRL_REG5 = 0x24,
++ HP_FILTER_RESET_3DLH = 0x25,
++ REFERENCE = 0x26,
++};
++
+ enum lis3_who_am_i {
++ WAI_3DLH = 0x32, /* 8 bits: LIS331DLH */
+ WAI_3DC = 0x33, /* 8 bits: LIS3DC, HP3DC */
+ WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
+ WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
+ WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
+ };
+
++enum lis3_type {
++ LIS3DC,
++ HP3DC,
++ LIS3LV02D,
++ LIS2302D,
++ LIS331DLF,
++ LIS331DLH,
++};
++
+ enum lis3lv02d_ctrl1_12b {
+ CTRL1_Xen = 0x01,
+ CTRL1_Yen = 0x02,
+@@ -129,6 +145,32 @@ enum lis3lv02d_ctrl1_3dc {
+ CTRL1_ODR3 = 0x80,
+ };
+
++enum lis331dlh_ctrl1 {
++ CTRL1_DR0 = 0x08,
++ CTRL1_DR1 = 0x10,
++ CTRL1_PM0 = 0x20,
++ CTRL1_PM1 = 0x40,
++ CTRL1_PM2 = 0x80,
++};
++
++enum lis331dlh_ctrl2 {
++ CTRL2_HPEN1 = 0x04,
++ CTRL2_HPEN2 = 0x08,
++ CTRL2_FDS_3DLH = 0x10,
++ CTRL2_BOOT_3DLH = 0x80,
++};
++
++enum lis331dlh_ctrl4 {
++ CTRL4_STSIGN = 0x08,
++ CTRL4_BLE = 0x40,
++ CTRL4_BDU = 0x80,
++};
++
++enum lis331dlh_ctrl5 {
++ CTRL5_TURNON0 = 0x01,
++ CTRL5_TURNON1 = 0x20,
++};
++
+ enum lis3lv02d_ctrl2 {
+ CTRL2_DAS = 0x01,
+ CTRL2_SIM = 0x02,
+@@ -148,6 +190,13 @@ enum lis3lv02d_ctrl4_3dc {
+ CTRL4_FS1 = 0x20,
+ };
+
++/* Measurement Range */
++enum lis3lv02d_fs {
++ FS_2G_REGVAL = 0x00,
++ FS_4G_REGVAL = 0x10,
++ FS_8G_REGVAL = 0x30,
++};
++
+ enum lis302d_ctrl2 {
+ HP_FF_WU2 = 0x08,
+ HP_FF_WU1 = 0x04,
+@@ -185,6 +234,10 @@ enum lis3lv02d_ff_wu_cfg {
+ FF_WU_CFG_AOI = 0x80,
+ };
+
++enum lis331dlh_ff_wu_cfg {
++ FF_WU_CFG_6D = 0x40,
++};
++
+ enum lis3lv02d_ff_wu_src {
+ FF_WU_SRC_XL = 0x01,
+ FF_WU_SRC_XH = 0x02,
+@@ -206,6 +259,10 @@ enum lis3lv02d_dd_cfg {
+ DD_CFG_IEND = 0x80,
+ };
+
++enum lis331dlh_dd_cfg {
++ DD_CFG_6D = 0x40,
++};
++
+ enum lis3lv02d_dd_src {
+ DD_SRC_XL = 0x01,
+ DD_SRC_XH = 0x02,
+@@ -282,6 +339,8 @@ struct lis3lv02d {
+
+ struct lis3lv02d_platform_data *pdata; /* for passing board config */
+ struct mutex mutex; /* Serialize poll and selftest */
++ u8 g_range; /* Hold the g range */
++ u8 shift_adj;
+ };
+
+ int lis3lv02d_init_device(struct lis3lv02d *lis3);
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+index c02fea0..32b322f 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+@@ -90,7 +90,10 @@ static int lis3_i2c_init(struct lis3lv02d *lis3)
+ if (ret < 0)
+ return ret;
+
+- reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
++ if (lis3->whoami == WAI_3DLH)
++ reg |= CTRL1_PM0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
++ else
++ reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
+ return lis3->write(lis3, CTRL_REG1, reg);
+ }
+
+@@ -232,6 +235,7 @@ static int lis3_i2c_runtime_resume(struct device *dev)
+
+ static const struct i2c_device_id lis3lv02d_id[] = {
+ {"lis3lv02d", 0 },
++ {"lis331dlh", LIS331DLH},
+ {}
+ };
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index e15e47d..60e8951 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1685,7 +1685,7 @@ static void mmc_blk_remove(struct mmc_card *card)
+ }
+
+ #ifdef CONFIG_PM
+-static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
++static int mmc_blk_suspend(struct mmc_card *card)
+ {
+ struct mmc_blk_data *part_md;
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
+index 6be4924..20c1d4b 100644
+--- a/drivers/mmc/core/bus.c
++++ b/drivers/mmc/core/bus.c
+@@ -122,14 +122,14 @@ static int mmc_bus_remove(struct device *dev)
+ return 0;
+ }
+
+-static int mmc_bus_suspend(struct device *dev, pm_message_t state)
++static int mmc_bus_suspend(struct device *dev)
+ {
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ int ret = 0;
+
+ if (dev->driver && drv->suspend)
+- ret = drv->suspend(card, state);
++ ret = drv->suspend(card);
+ return ret;
+ }
+
+@@ -165,20 +165,20 @@ static int mmc_runtime_idle(struct device *dev)
+ return pm_runtime_suspend(dev);
+ }
+
++#else /* !CONFIG_PM_RUNTIME */
++#define mmc_runtime_suspend NULL
++#define mmc_runtime_resume NULL
++#define mmc_runtime_idle NULL
++#endif /* !CONFIG_PM_RUNTIME */
++
+ static const struct dev_pm_ops mmc_bus_pm_ops = {
+ .runtime_suspend = mmc_runtime_suspend,
+ .runtime_resume = mmc_runtime_resume,
+ .runtime_idle = mmc_runtime_idle,
++ .suspend = mmc_bus_suspend,
++ .resume = mmc_bus_resume,
+ };
+
+-#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
+-
+-#else /* !CONFIG_PM_RUNTIME */
+-
+-#define MMC_PM_OPS_PTR NULL
+-
+-#endif /* !CONFIG_PM_RUNTIME */
+-
+ static struct bus_type mmc_bus_type = {
+ .name = "mmc",
+ .dev_attrs = mmc_dev_attrs,
+@@ -186,9 +186,7 @@ static struct bus_type mmc_bus_type = {
+ .uevent = mmc_bus_uevent,
+ .probe = mmc_bus_probe,
+ .remove = mmc_bus_remove,
+- .suspend = mmc_bus_suspend,
+- .resume = mmc_bus_resume,
+- .pm = MMC_PM_OPS_PTR,
++ .pm = &mmc_bus_pm_ops,
+ };
+
+ int mmc_register_bus(void)
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d5fe43d..ace26f7 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -59,6 +59,7 @@
+ #define OMAP_HSMMC_IE 0x0134
+ #define OMAP_HSMMC_ISE 0x0138
+ #define OMAP_HSMMC_CAPA 0x0140
++#define OMAP_HSMMC_PSTATE 0x0124
+
+ #define VS18 (1 << 26)
+ #define VS30 (1 << 25)
+@@ -78,7 +79,7 @@
+ #define CLKD_SHIFT 6
+ #define DTO_MASK 0x000F0000
+ #define DTO_SHIFT 16
+-#define INT_EN_MASK 0x307F0033
++#define INT_EN_MASK 0x307F00f3
+ #define BWR_ENABLE (1 << 4)
+ #define BRR_ENABLE (1 << 5)
+ #define DTO_ENABLE (1 << 20)
+@@ -89,6 +90,10 @@
+ #define MSBS (1 << 5)
+ #define BCE (1 << 1)
+ #define FOUR_BIT (1 << 1)
++#define DVAL_MASK (3 << 9)
++#define DVAL_MAX (3 << 9) /* 8.4 ms debounce period */
++#define WPP_MASK (1 << 8)
++#define WPP_ACT_LOW (1 << 8) /* WPP is Active Low */
+ #define DW8 (1 << 5)
+ #define CC 0x1
+ #define TC 0x02
+@@ -106,6 +111,14 @@
+ #define SRD (1 << 26)
+ #define SOFTRESET (1 << 1)
+ #define RESETDONE (1 << 0)
++#define CINS (1 << 6)
++#define PSTATE_CINS_MASK BIT(16)
++#define PSTATE_CINS_SHIFT 16
++#define PSTATE_WP_MASK BIT(19)
++#define PSTATE_WP_SHIFT 19
++#define IE_CINS 0x00000040
++#define IE_CINS_SHIFT 6
++#define PSTATE_CINS (1 << 16)
+
+ /*
+ * FIXME: Most likely all the data using these _DEVID defines should come
+@@ -120,7 +133,6 @@
+
+ #define MMC_AUTOSUSPEND_DELAY 100
+ #define MMC_TIMEOUT_MS 20
+-#define OMAP_MMC_MASTER_CLOCK 96000000
+ #define OMAP_MMC_MIN_CLOCK 400000
+ #define OMAP_MMC_MAX_CLOCK 52000000
+ #define DRIVER_NAME "omap_hsmmc"
+@@ -193,6 +205,8 @@ struct omap_hsmmc_host {
+ struct omap_mmc_platform_data *pdata;
+ };
+
++static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id);
++
+ static int omap_hsmmc_card_detect(struct device *dev, int slot)
+ {
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+@@ -224,6 +238,7 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ disable_irq(mmc->slots[0].card_detect_irq);
++
+ return 0;
+ }
+
+@@ -232,6 +247,7 @@ static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ enable_irq(mmc->slots[0].card_detect_irq);
++
+ return 0;
+ }
+
+@@ -592,18 +608,20 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
+
+ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
+ {
++
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
++
+ }
+
+ /* Calculate divisor for the given clock frequency */
+-static u16 calc_divisor(struct mmc_ios *ios)
++static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
+ {
+ u16 dsor = 0;
+
+ if (ios->clock) {
+- dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
++ dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
+ if (dsor > 250)
+ dsor = 250;
+ }
+@@ -623,7 +641,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
+
+ regval = OMAP_HSMMC_READ(host->base, SYSCTL);
+ regval = regval & ~(CLKD_MASK | DTO_MASK);
+- regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
++ regval = regval | (calc_divisor(host, ios) << 6) | (DTO << 16);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+@@ -1088,7 +1106,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
+ __func__);
+ }
+
+-static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
++static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status, int irq)
+ {
+ struct mmc_data *data;
+ int end_cmd = 0, end_trans = 0;
+@@ -1170,7 +1188,7 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ do {
+- omap_hsmmc_do_irq(host, status);
++ omap_hsmmc_do_irq(host, status, irq);
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ } while (status & INT_EN_MASK);
+@@ -1338,23 +1356,30 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
+ struct scatterlist *sgl)
+ {
+ int blksz, nblk, dma_ch;
++ int bindex = 0, cindex = 0;
++ struct omap_mmc_platform_data *pdata = host->pdata;
+
+ dma_ch = host->dma_ch;
++ blksz = host->data->blksz;
++ nblk = sg_dma_len(sgl) / blksz;
++
++ if (pdata->version == MMC_CTRL_VERSION_2) {
++ bindex = 4;
++ cindex = blksz;
++ }
++
+ if (data->flags & MMC_DATA_WRITE) {
+ omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
+ omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+- sg_dma_address(sgl), 0, 0);
++ sg_dma_address(sgl), bindex, cindex);
+ } else {
+ omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
+ omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+- sg_dma_address(sgl), 0, 0);
++ sg_dma_address(sgl), bindex, cindex);
+ }
+
+- blksz = host->data->blksz;
+- nblk = sg_dma_len(sgl) / blksz;
+-
+ omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
+ omap_hsmmc_get_dma_sync_dev(host, data),
+@@ -1370,12 +1395,19 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
+ {
+ struct omap_hsmmc_host *host = cb_data;
+ struct mmc_data *data;
++ struct omap_mmc_platform_data *pdata = host->pdata;
+ int dma_ch, req_in_progress;
+
+- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
+- dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
+- ch_status);
+- return;
++
++ if (pdata->version == MMC_CTRL_VERSION_2) {
++ if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
++ dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
++ } else {
++ if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
++ dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
++ ch_status);
++ return;
++ }
+ }
+
+ spin_lock(&host->irq_lock);
+@@ -1498,11 +1530,8 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+ return 0;
+ }
+
+-static void set_data_timeout(struct omap_hsmmc_host *host,
+- unsigned int timeout_ns,
+- unsigned int timeout_clks)
++static void set_data_timeout(struct omap_hsmmc_host *host)
+ {
+- unsigned int timeout, cycle_ns;
+ uint32_t reg, clkd, dto = 0;
+
+ reg = OMAP_HSMMC_READ(host->base, SYSCTL);
+@@ -1510,25 +1539,11 @@ static void set_data_timeout(struct omap_hsmmc_host *host,
+ if (clkd == 0)
+ clkd = 1;
+
+- cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
+- timeout = timeout_ns / cycle_ns;
+- timeout += timeout_clks;
+- if (timeout) {
+- while ((timeout & 0x80000000) == 0) {
+- dto += 1;
+- timeout <<= 1;
+- }
+- dto = 31 - dto;
+- timeout <<= 1;
+- if (timeout && dto)
+- dto += 1;
+- if (dto >= 13)
+- dto -= 13;
+- else
+- dto = 0;
+- if (dto > 14)
+- dto = 14;
+- }
++ /* Use the maximum timeout value allowed in the standard of 14 or 0xE */
++ dto = 14;
++
++ /* Set dto to max value of 14 to avoid SD Card timeouts */
++ dto = 14;
+
+ reg &= ~DTO_MASK;
+ reg |= dto << DTO_SHIFT;
+@@ -1551,13 +1566,13 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
+ * busy signal.
+ */
+ if (req->cmd->flags & MMC_RSP_BUSY)
+- set_data_timeout(host, 100000000U, 0);
++ set_data_timeout(host);
+ return 0;
+ }
+
+ OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
+ | (req->data->blocks << 16));
+- set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
++ set_data_timeout(host);
+
+ if (host->use_dma) {
+ ret = omap_hsmmc_start_dma_transfer(host, req);
+@@ -1649,6 +1664,8 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int do_send_init_stream = 0;
++ struct omap_mmc_platform_data *pdata = host->pdata;
++ u32 regVal;
+
+ pm_runtime_get_sync(host->dev);
+
+@@ -1673,6 +1690,18 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+
+ /* FIXME: set registers based only on changes to ios */
+
++ if (pdata->version == MMC_CTRL_VERSION_2) {
++ /*
++ * Set
++ * Debounce filter value to max
++ * Write protect polarity to Active low level
++ */
++ regVal = OMAP_HSMMC_READ(host->base, CON);
++ regVal &= ~(DVAL_MASK | WPP_MASK);
++ regVal |= (DVAL_MAX | WPP_ACT_LOW);
++ OMAP_HSMMC_WRITE(host->base, CON, regVal);
++ }
++
+ omap_hsmmc_set_bus_width(host);
+
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
+@@ -1869,7 +1898,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
+ struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
+ struct mmc_host *mmc;
+ struct omap_hsmmc_host *host = NULL;
+- struct resource *res;
++ struct resource *res, *dma_tx, *dma_rx;
+ int ret, irq;
+
+ if (pdata == NULL) {
+@@ -1974,7 +2003,10 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
+
+ /* Since we do only SG emulation, we can have as many segs
+ * as we want. */
+- mmc->max_segs = 1024;
++ if (pdata->version == MMC_CTRL_VERSION_2)
++ mmc->max_segs = 1;
++ else
++ mmc->max_segs = 1024;
+
+ mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
+ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
+@@ -1991,33 +2023,54 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
+ if (mmc_slot(host).nonremovable)
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+
++ mmc->pm_caps = mmc_slot(host).pm_caps;
++
+ omap_hsmmc_conf_bus_power(host);
+
+ /* Select DMA lines */
+- switch (host->id) {
+- case OMAP_MMC1_DEVID:
+- host->dma_line_tx = OMAP24XX_DMA_MMC1_TX;
+- host->dma_line_rx = OMAP24XX_DMA_MMC1_RX;
+- break;
+- case OMAP_MMC2_DEVID:
+- host->dma_line_tx = OMAP24XX_DMA_MMC2_TX;
+- host->dma_line_rx = OMAP24XX_DMA_MMC2_RX;
+- break;
+- case OMAP_MMC3_DEVID:
+- host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
+- host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
+- break;
+- case OMAP_MMC4_DEVID:
+- host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
+- host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
+- break;
+- case OMAP_MMC5_DEVID:
+- host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
+- host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
+- break;
+- default:
+- dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
+- goto err_irq;
++ if (pdata->version == MMC_CTRL_VERSION_2) {
++ dma_rx = platform_get_resource_byname(pdev,
++ IORESOURCE_DMA, "rx");
++ if (!dma_rx) {
++ ret = -EINVAL;
++ goto err1;
++ }
++
++ dma_tx = platform_get_resource_byname(pdev,
++ IORESOURCE_DMA, "tx");
++ if (!dma_tx) {
++ ret = -EINVAL;
++ goto err1;
++ }
++ host->dma_line_tx = dma_tx->start;
++ host->dma_line_rx = dma_rx->start;
++
++ } else {
++ switch (host->id) {
++ case OMAP_MMC1_DEVID:
++ host->dma_line_tx = OMAP24XX_DMA_MMC1_TX;
++ host->dma_line_rx = OMAP24XX_DMA_MMC1_RX;
++ break;
++ case OMAP_MMC2_DEVID:
++ host->dma_line_tx = OMAP24XX_DMA_MMC2_TX;
++ host->dma_line_rx = OMAP24XX_DMA_MMC2_RX;
++ break;
++ case OMAP_MMC3_DEVID:
++ host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
++ host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
++ break;
++ case OMAP_MMC4_DEVID:
++ host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
++ host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
++ break;
++ case OMAP_MMC5_DEVID:
++ host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
++ host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
++ break;
++ default:
++ dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
++ goto err_irq;
++ }
+ }
+
+ /* Request IRQ for MMC operations */
+@@ -2179,13 +2232,7 @@ static int omap_hsmmc_suspend(struct device *dev)
+ cancel_work_sync(&host->mmc_carddetect_work);
+ ret = mmc_suspend_host(host->mmc);
+
+- if (ret == 0) {
+- omap_hsmmc_disable_irq(host);
+- OMAP_HSMMC_WRITE(host->base, HCTL,
+- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+- if (host->got_dbclk)
+- clk_disable(host->dbclk);
+- } else {
++ if (ret) {
+ host->suspended = 0;
+ if (host->pdata->resume) {
+ ret = host->pdata->resume(&pdev->dev,
+@@ -2194,9 +2241,21 @@ static int omap_hsmmc_suspend(struct device *dev)
+ dev_dbg(mmc_dev(host->mmc),
+ "Unmask interrupt failed\n");
+ }
++ ret = -EBUSY;
++ goto err;
+ }
+- pm_runtime_put_sync(host->dev);
++
++ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
++ omap_hsmmc_disable_irq(host);
++ OMAP_HSMMC_WRITE(host->base, HCTL,
++ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
++ }
++ if (host->got_dbclk)
++ clk_disable(host->dbclk);
++
+ }
++err:
++ pm_runtime_put_sync(host->dev);
+ return ret;
+ }
+
+@@ -2216,7 +2275,8 @@ static int omap_hsmmc_resume(struct device *dev)
+ if (host->got_dbclk)
+ clk_enable(host->dbclk);
+
+- omap_hsmmc_conf_bus_power(host);
++ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
++ omap_hsmmc_conf_bus_power(host);
+
+ if (host->pdata->resume) {
+ ret = host->pdata->resume(&pdev->dev, host->slot_id);
+diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
+index 56c7cd4..c9e14a0 100644
+--- a/drivers/mtd/devices/Makefile
++++ b/drivers/mtd/devices/Makefile
+@@ -18,5 +18,6 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
+ obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
+ obj-$(CONFIG_MTD_M25P80) += m25p80.o
+ obj-$(CONFIG_MTD_SST25L) += sst25l.o
++obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_elm.o
+
+-CFLAGS_docg3.o += -I$(src)
+\ No newline at end of file
++CFLAGS_docg3.o += -I$(src)
+diff --git a/drivers/mtd/devices/omap2_elm.c b/drivers/mtd/devices/omap2_elm.c
+new file mode 100644
+index 0000000..99e6458
+--- /dev/null
++++ b/drivers/mtd/devices/omap2_elm.c
+@@ -0,0 +1,381 @@
++/*
++ * OMAP2 Error Location Module
++ *
++ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/platform_device.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/io.h>
++#include <linux/pm_runtime.h>
++
++#include <plat/elm.h>
++
++#define ELM_SYSCONFIG 0x010
++#define ELM_SYSSTATUS 0x014
++#define ELM_IRQSTATUS 0x018
++#define ELM_IRQENABLE 0x01c
++#define ELM_LOCATION_CONFIG 0x020
++#define ELM_PAGE_CTRL 0x080
++#define ELM_SYNDROME_FRAGMENT_0 0x400
++#define ELM_SYNDROME_FRAGMENT_1 0x404
++#define ELM_SYNDROME_FRAGMENT_2 0x408
++#define ELM_SYNDROME_FRAGMENT_3 0x40c
++#define ELM_SYNDROME_FRAGMENT_4 0x410
++#define ELM_SYNDROME_FRAGMENT_5 0x414
++#define ELM_SYNDROME_FRAGMENT_6 0x418
++#define ELM_LOCATION_STATUS 0x800
++#define ELM_ERROR_LOCATION_0 0x880
++#define ELM_ERROR_LOCATION_1 0x884
++#define ELM_ERROR_LOCATION_2 0x888
++#define ELM_ERROR_LOCATION_3 0x88c
++#define ELM_ERROR_LOCATION_4 0x890
++#define ELM_ERROR_LOCATION_5 0x894
++#define ELM_ERROR_LOCATION_6 0x898
++#define ELM_ERROR_LOCATION_7 0x89c
++#define ELM_ERROR_LOCATION_8 0x8a0
++#define ELM_ERROR_LOCATION_9 0x8a4
++#define ELM_ERROR_LOCATION_10 0x8a8
++#define ELM_ERROR_LOCATION_11 0x8ac
++#define ELM_ERROR_LOCATION_12 0x8b0
++#define ELM_ERROR_LOCATION_13 0x8b4
++#define ELM_ERROR_LOCATION_14 0x8b8
++#define ELM_ERROR_LOCATION_15 0x8bc
++
++/* ELM System Configuration Register */
++#define ELM_SYSCONFIG_SOFTRESET BIT(1)
++#define ELM_SYSCONFIG_SIDLE_MASK (3 << 3)
++#define ELM_SYSCONFIG_SMART_IDLE (2 << 3)
++
++/* ELM System Status Register */
++#define ELM_SYSSTATUS_RESETDONE BIT(0)
++
++/* ELM Interrupt Status Register */
++#define INTR_STATUS_PAGE_VALID BIT(8)
++#define INTR_STATUS_LOC_VALID_7 BIT(7)
++#define INTR_STATUS_LOC_VALID_6 BIT(6)
++#define INTR_STATUS_LOC_VALID_5 BIT(5)
++#define INTR_STATUS_LOC_VALID_4 BIT(4)
++#define INTR_STATUS_LOC_VALID_3 BIT(3)
++#define INTR_STATUS_LOC_VALID_2 BIT(2)
++#define INTR_STATUS_LOC_VALID_1 BIT(1)
++#define INTR_STATUS_LOC_VALID_0 BIT(0)
++
++/* ELM Interrupt Enable Register */
++#define INTR_EN_PAGE_MASK BIT(8)
++#define INTR_EN_LOCATION_MASK_7 BIT(7)
++#define INTR_EN_LOCATION_MASK_6 BIT(6)
++#define INTR_EN_LOCATION_MASK_5 BIT(5)
++#define INTR_EN_LOCATION_MASK_4 BIT(4)
++#define INTR_EN_LOCATION_MASK_3 BIT(3)
++#define INTR_EN_LOCATION_MASK_2 BIT(2)
++#define INTR_EN_LOCATION_MASK_1 BIT(1)
++#define INTR_EN_LOCATION_MASK_0 BIT(0)
++
++/* ELM Location Configuration Register */
++#define ECC_SIZE_MASK (0x7ff << 16)
++#define ECC_BCH_LEVEL_MASK (0x3 << 0)
++#define ECC_BCH4_LEVEL (0x0 << 0)
++#define ECC_BCH8_LEVEL (0x1 << 0)
++#define ECC_BCH16_LEVEL (0x2 << 0)
++
++/* ELM Page Definition Register */
++#define PAGE_MODE_SECTOR_7 BIT(7)
++#define PAGE_MODE_SECTOR_6 BIT(6)
++#define PAGE_MODE_SECTOR_5 BIT(5)
++#define PAGE_MODE_SECTOR_4 BIT(4)
++#define PAGE_MODE_SECTOR_3 BIT(3)
++#define PAGE_MODE_SECTOR_2 BIT(2)
++#define PAGE_MODE_SECTOR_1 BIT(1)
++#define PAGE_MODE_SECTOR_0 BIT(0)
++
++/* ELM syndrome */
++#define ELM_SYNDROME_VALID BIT(16)
++
++/* ELM_LOCATION_STATUS Register */
++#define ECC_CORRECTABLE_MASK BIT(8)
++#define ECC_NB_ERRORS_MASK (0x1f << 0)
++
++/* ELM_ERROR_LOCATION_0-15 Registers */
++#define ECC_ERROR_LOCATION_MASK (0x1fff << 0)
++
++#define OMAP_ECC_SIZE (0x7ff)
++
++#define DRIVER_NAME "omap2_elm"
++
++static void __iomem *elm_base;
++static struct completion elm_completion;
++static struct mtd_info *mtd;
++static int bch_scheme;
++
++static void elm_write_reg(int idx, u32 val)
++{
++ writel(val, elm_base + idx);
++}
++
++static u32 elm_read_reg(int idx)
++{
++ return readl(elm_base + idx);
++}
++
++/**
++ * omap_elm_config - Configure ELM for BCH ECC scheme
++ * @bch_type: type of BCH ECC scheme
++ */
++void omap_elm_config(int bch_type)
++{
++ u32 reg_val;
++ u32 buffer_size = OMAP_ECC_SIZE;
++
++ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (buffer_size << 16);
++ elm_write_reg(ELM_LOCATION_CONFIG, reg_val);
++
++ /* clearing interrupts */
++ reg_val = elm_read_reg(ELM_IRQSTATUS);
++ elm_write_reg(ELM_IRQSTATUS, reg_val & INTR_STATUS_LOC_VALID_0);
++ elm_write_reg(ELM_IRQSTATUS, INTR_STATUS_LOC_VALID_0);
++
++ /* enable in interrupt mode */
++ reg_val = elm_read_reg(ELM_IRQENABLE);
++ reg_val |= INTR_EN_LOCATION_MASK_0;
++ elm_write_reg(ELM_IRQENABLE, reg_val);
++
++ /* config in Continuous mode */
++ reg_val = elm_read_reg(ELM_PAGE_CTRL);
++ reg_val &= ~PAGE_MODE_SECTOR_0;
++ elm_write_reg(ELM_PAGE_CTRL, reg_val);
++}
++
++/**
++ * omap_configure_elm - Configure ELM for BCH ECC scheme
++ * @mtd_info: mtd info structure
++ * @bch_type: type of BCH ECC scheme
++ *
++ * Configures the ELM module to support BCH error correction
++ */
++void omap_configure_elm(struct mtd_info *mtd_info, int bch_type)
++{
++ omap_elm_config(bch_type);
++ mtd = mtd_info;
++ bch_scheme = bch_type;
++}
++EXPORT_SYMBOL(omap_configure_elm);
++
++/**
++ * omap_elm_load_syndrome - Load ELM syndrome reg
++ * @bch_type: type of BCH ECC scheme
++ * @syndrome: Syndrome polynomial
++ *
++ * Load the syndrome polynomial to syndrome registers
++ */
++void omap_elm_load_syndrome(int bch_type, char *syndrome)
++{
++ int reg_val;
++ int i;
++
++ for (i = 0; i < 4; i++) {
++ reg_val = syndrome[0] | syndrome[1] << 8 |
++ syndrome[2] << 16 | syndrome[3] << 24;
++ elm_write_reg(ELM_SYNDROME_FRAGMENT_0 + i * 4, reg_val);
++ syndrome += 4;
++ }
++}
++
++/**
++ * omap_elm_start_processing - Start calculting error location
++ */
++void omap_elm_start_processing(void)
++{
++ u32 reg_val;
++
++ reg_val = elm_read_reg(ELM_SYNDROME_FRAGMENT_6);
++ reg_val |= ELM_SYNDROME_VALID;
++ elm_write_reg(ELM_SYNDROME_FRAGMENT_6, reg_val);
++}
++
++void rotate_ecc_bytes(u8 *src, u8 *dst)
++{
++ int i;
++
++ for (i = 0; i < BCH8_ECC_OOB_BYTES; i++)
++ dst[BCH8_ECC_OOB_BYTES - 1 - i] = src[i];
++}
++
++/**
++ * omap_elm_decode_bch_error - Locate error pos
++ * @bch_type: Type of BCH ECC scheme
++ * @ecc_calc: Calculated ECC bytes from GPMC
++ * @err_loc: Error location bytes
++ */
++int omap_elm_decode_bch_error(int bch_type, char *ecc_calc,
++ unsigned int *err_loc)
++{
++ u8 ecc_data[BCH_MAX_ECC_BYTES_PER_SECTOR] = {0};
++ u32 reg_val;
++ int i, err_no;
++
++ rotate_ecc_bytes(ecc_calc, ecc_data);
++ omap_elm_load_syndrome(bch_type, ecc_data);
++ omap_elm_start_processing();
++ wait_for_completion(&elm_completion);
++ reg_val = elm_read_reg(ELM_LOCATION_STATUS);
++
++ if (reg_val & ECC_CORRECTABLE_MASK) {
++ err_no = reg_val & ECC_NB_ERRORS_MASK;
++
++ for (i = 0; i < err_no; i++) {
++ reg_val = elm_read_reg(ELM_ERROR_LOCATION_0 + i * 4);
++ err_loc[i] = reg_val;
++ }
++
++ return err_no;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(omap_elm_decode_bch_error);
++
++static irqreturn_t omap_elm_isr(int this_irq, void *dev_id)
++{
++ u32 reg_val;
++
++ reg_val = elm_read_reg(ELM_IRQSTATUS);
++
++ if (reg_val & INTR_STATUS_LOC_VALID_0) {
++ elm_write_reg(ELM_IRQSTATUS, reg_val & INTR_STATUS_LOC_VALID_0);
++ complete(&elm_completion);
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int omap_elm_probe(struct platform_device *pdev)
++{
++ int ret_status = 0;
++ struct resource *res = NULL, *irq = NULL;
++
++ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++
++ if (irq == NULL)
++ return -EINVAL;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (res == NULL)
++ return -EINVAL;
++
++ if (!request_mem_region(res->start, resource_size(res),
++ dev_name(&pdev->dev)))
++ return -EBUSY;
++
++ elm_base = ioremap(res->start, resource_size(res));
++
++ if (!elm_base) {
++ dev_dbg(&pdev->dev, "can't ioremap\n");
++ ret_status = -ENOMEM;
++ goto err_remap;
++ }
++
++ pm_runtime_enable(&pdev->dev);
++ if (pm_runtime_get_sync(&pdev->dev)) {
++ ret_status = -EINVAL;
++ dev_dbg(&pdev->dev, "can't enable clock\n");
++ goto err_clk;
++ }
++
++ ret_status = request_irq(irq->start, omap_elm_isr, 0, pdev->name,
++ &pdev->dev);
++
++ if (ret_status) {
++ pr_err("failure requesting irq %i\n", irq->start);
++ goto err_irq;
++ }
++
++ init_completion(&elm_completion);
++ return ret_status;
++
++err_irq:
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++err_clk:
++ iounmap(elm_base);
++err_remap:
++ release_mem_region(res->start, resource_size(res));
++ return ret_status;
++}
++
++static int omap_elm_remove(struct platform_device *pdev)
++{
++ struct resource *res = NULL;
++
++ iounmap(elm_base);
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ release_mem_region(res->start, resource_size(res));
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ free_irq(res->start, &pdev->dev);
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ return 0;
++}
++
++
++#ifdef CONFIG_PM
++static int omap_elm_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ if (mtd && mtd->suspend)
++ mtd->suspend(mtd);
++ pm_runtime_put_sync(&pdev->dev);
++ return 0;
++}
++
++static int omap_elm_resume(struct platform_device *pdev)
++{
++ pm_runtime_get_sync(&pdev->dev);
++ /* Restore ELM context by configuring */
++ omap_elm_config(bch_scheme);
++ return 0;
++}
++#endif
++
++static struct platform_driver omap_elm_driver = {
++ .probe = omap_elm_probe,
++ .remove = omap_elm_remove,
++#ifdef CONFIG_PM
++ .suspend = omap_elm_suspend,
++ .resume = omap_elm_resume,
++#endif
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init omap_elm_init(void)
++{
++
++ return platform_driver_register(&omap_elm_driver);
++}
++
++static void __exit omap_elm_exit(void)
++{
++ platform_driver_unregister(&omap_elm_driver);
++}
++
++module_init(omap_elm_init);
++module_exit(omap_elm_exit);
++
++MODULE_ALIAS("platform:" DRIVER_NAME);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 3ed9c5e..a5df3ec 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -724,6 +724,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
++ dmb();
++
+ /* This applies to read commands */
+ default:
+ /*
+@@ -1993,6 +1995,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
++ memset(ecc_calc, 0, eccsteps * eccbytes);
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index f745f00..a2d3e29 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -24,6 +24,7 @@
+ #include <plat/dma.h>
+ #include <plat/gpmc.h>
+ #include <plat/nand.h>
++#include <plat/elm.h>
+
+ #define DRIVER_NAME "omap2-nand"
+ #define OMAP_NAND_TIMEOUT_MS 5000
+@@ -95,6 +96,13 @@
+ #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+ #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
++#define MAX_HWECC_BYTES_OOB_64 24
++#define JFFS2_CLEAN_MARKER_OFFSET 0x2
++
++#define BCH_ECC_POS 0x2
++#define BCH_JFFS2_CLEAN_MARKER_OFFSET 0x3a
++#define OMAP_BCH8_ECC_SECT_BYTES 14
++
+ /* oob info generated runtime depending on ecc algorithm and layout selected */
+ static struct nand_ecclayout omap_oobinfo;
+ /* Define some generic bad / good block scan pattern which are used
+@@ -126,7 +134,10 @@ struct omap_nand_info {
+ OMAP_NAND_IO_WRITE, /* write */
+ } iomode;
+ u_char *buf;
+- int buf_len;
++ int buf_len;
++ int ecc_opt;
++ int (*ctrlr_suspend) (void);
++ int (*ctrlr_resume) (void);
+ };
+
+ /**
+@@ -783,6 +794,76 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ }
+
+ /**
++ * omap_read_page_bch - BCH ecc based page read function
++ * @mtd: mtd info structure
++ * @chip: nand chip info structure
++ * @buf: buffer to store read data
++ * @page: page number to read
++ *
++ * For BCH ECC scheme, GPMC used for syndrome calculation and ELM module
++ * used for error correction.
++ */
++static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
++ uint8_t *buf, int page)
++{
++ int i, eccsize = chip->ecc.size;
++ int eccbytes = chip->ecc.bytes;
++ int eccsteps = chip->ecc.steps;
++ uint8_t *p = buf;
++ uint8_t *ecc_calc = chip->buffers->ecccalc;
++ uint8_t *ecc_code = chip->buffers->ecccode;
++ uint32_t *eccpos = chip->ecc.layout->eccpos;
++ uint8_t *oob = &chip->oob_poi[eccpos[0]];
++ uint32_t data_pos;
++ uint32_t oob_pos;
++
++ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
++ mtd);
++ data_pos = 0;
++ /* oob area start */
++ oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0];
++
++ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize,
++ oob += eccbytes) {
++ chip->ecc.hwctl(mtd, NAND_ECC_READ);
++ /* read data */
++ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page);
++ chip->read_buf(mtd, p, eccsize);
++
++ /* read respective ecc from oob area */
++ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page);
++
++ if (info->ecc_opt == OMAP_ECC_BCH8_CODE_HW)
++ chip->read_buf(mtd, oob, 13);
++ else
++ chip->read_buf(mtd, oob, eccbytes);
++ /* read syndrome */
++ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
++
++ data_pos += eccsize;
++ oob_pos += eccbytes;
++ }
++
++ for (i = 0; i < chip->ecc.total; i++)
++ ecc_code[i] = chip->oob_poi[eccpos[i]];
++
++ eccsteps = chip->ecc.steps;
++ p = buf;
++
++ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
++ int stat;
++
++ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
++
++ if (stat < 0)
++ mtd->ecc_stats.failed++;
++ else
++ mtd->ecc_stats.corrected += stat;
++ }
++ return 0;
++}
++
++/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+@@ -803,6 +884,8 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+ int stat = 0;
++ int j, eccsize, eccflag, count;
++ unsigned int err_loc[8];
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+@@ -811,17 +894,64 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ else
+ blockCnt = 1;
+
+- for (i = 0; i < blockCnt; i++) {
+- if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+- ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+- if (ret < 0)
+- return ret;
+- /* keep track of the number of corrected errors */
+- stat += ret;
++ switch (info->ecc_opt) {
++ case OMAP_ECC_HAMMING_CODE_HW:
++ case OMAP_ECC_HAMMING_CODE_HW_ROMCODE:
++ for (i = 0; i < blockCnt; i++) {
++ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
++ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
++ if (ret < 0)
++ return ret;
++
++ /* keep track of number of corrected errors */
++ stat += ret;
++ }
++ read_ecc += 3;
++ calc_ecc += 3;
++ dat += 512;
+ }
+- read_ecc += 3;
+- calc_ecc += 3;
+- dat += 512;
++ break;
++ case OMAP_ECC_BCH8_CODE_HW:
++ eccsize = BCH8_ECC_OOB_BYTES;
++
++ for (i = 0; i < blockCnt; i++) {
++ eccflag = 0;
++ /* check if area is flashed */
++ for (j = 0; (j < eccsize) && (eccflag == 0); j++)
++ if (read_ecc[j] != 0xFF)
++ eccflag = 1;
++
++ if (eccflag == 1) {
++ eccflag = 0;
++ /* check if any ecc error */
++ for (j = 0; (j < eccsize) && (eccflag == 0);
++ j++)
++ if (calc_ecc[j] != 0)
++ eccflag = 1;
++ }
++
++ count = 0;
++ if (eccflag == 1)
++ count = omap_elm_decode_bch_error(0, calc_ecc,
++ err_loc);
++
++ for (j = 0; j < count; j++) {
++ u32 bit_pos, byte_pos;
++
++ bit_pos = err_loc[j] % 8;
++ byte_pos = (BCH8_ECC_MAX - err_loc[j] - 1) / 8;
++ if (err_loc[j] < BCH8_ECC_MAX)
++ dat[byte_pos] ^=
++ 1 << bit_pos;
++ /* else, not interested to correct ecc */
++ }
++
++ stat += count;
++ calc_ecc = calc_ecc + OMAP_BCH8_ECC_SECT_BYTES;
++ read_ecc = read_ecc + OMAP_BCH8_ECC_SECT_BYTES;
++ dat += BCH8_ECC_BYTES;
++ }
++ break;
+ }
+ return stat;
+ }
+@@ -843,7 +973,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ {
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+- return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
++ return gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, ecc_code);
+ }
+
+ /**
+@@ -858,7 +988,8 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+- gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
++ gpmc_enable_hwecc(info->ecc_opt, info->gpmc_cs, mode,
++ dev_width, info->nand.ecc.size);
+ }
+
+ /**
+@@ -955,10 +1086,24 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
++ info->ecc_opt = pdata->ecc_opt;
+
+ info->nand.options = pdata->devsize;
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+
++ /*
++ * If ELM feature is used in OMAP NAND driver, then configure it
++ */
++ if (pdata->elm_used) {
++ if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)
++ omap_configure_elm(&info->mtd, OMAP_BCH8_ECC);
++ }
++
++ if (pdata->ctrlr_suspend)
++ info->ctrlr_suspend = pdata->ctrlr_suspend;
++ if (pdata->ctrlr_resume)
++ info->ctrlr_resume = pdata->ctrlr_resume;
++
+ /* NAND write protect off */
+ gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+
+@@ -1054,10 +1199,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
+ /* selsect the ecc type */
+ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+- else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+- (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+- info->nand.ecc.bytes = 3;
+- info->nand.ecc.size = 512;
++ else {
++ if (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) {
++ info->nand.ecc.bytes = 4*7;
++ info->nand.ecc.size = 4*512;
++ } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
++ info->nand.ecc.bytes = OMAP_BCH8_ECC_SECT_BYTES;
++ info->nand.ecc.size = 512;
++ info->nand.ecc.read_page = omap_read_page_bch;
++ } else {
++ info->nand.ecc.bytes = 3;
++ info->nand.ecc.size = 512;
++ }
++
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+@@ -1075,23 +1229,48 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
+ }
+ }
+
+- /* rom code layout */
+- if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
++ /* select ecc lyout */
++ if (info->nand.ecc.mode != NAND_ECC_SOFT) {
+
+- if (info->nand.options & NAND_BUSWIDTH_16)
+- offset = 2;
+- else {
+- offset = 1;
++ if (!(info->nand.options & NAND_BUSWIDTH_16))
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
++
++ offset = JFFS2_CLEAN_MARKER_OFFSET;
++
++ if (info->mtd.oobsize == 64)
++ omap_oobinfo.eccbytes = info->nand.ecc.bytes *
++ 2048/info->nand.ecc.size;
++ else
++ omap_oobinfo.eccbytes = info->nand.ecc.bytes;
++
++ if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
++ omap_oobinfo.oobfree->offset =
++ offset + omap_oobinfo.eccbytes;
++ omap_oobinfo.oobfree->length = info->mtd.oobsize -
++ (offset + omap_oobinfo.eccbytes);
++ } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
++ offset = BCH_ECC_POS; /* Synchronize with U-boot */
++ omap_oobinfo.oobfree->offset =
++ BCH_JFFS2_CLEAN_MARKER_OFFSET;
++ omap_oobinfo.oobfree->length = info->mtd.oobsize -
++ offset - omap_oobinfo.eccbytes;
++ } else {
++ omap_oobinfo.oobfree->offset = offset;
++ omap_oobinfo.oobfree->length = info->mtd.oobsize -
++ offset - omap_oobinfo.eccbytes;
++ /*
++ offset is calculated considering the following :
++ 1) 12 bytes ECC for 512 byte access and 24 bytes ECC for
++ 256 byte access in OOB_64 can be supported
++ 2)Ecc bytes lie to the end of OOB area.
++ 3)Ecc layout must match with u-boot's ECC layout.
++ */
++ offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64;
+ }
+- omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
++
+ for (i = 0; i < omap_oobinfo.eccbytes; i++)
+ omap_oobinfo.eccpos[i] = i+offset;
+
+- omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+- omap_oobinfo.oobfree->length = info->mtd.oobsize -
+- (offset + omap_oobinfo.eccbytes);
+-
+ info->nand.ecc.layout = &omap_oobinfo;
+ }
+
+@@ -1132,13 +1311,46 @@ static int omap_nand_remove(struct platform_device *pdev)
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
++ release_mem_region(info->phys_base, NAND_IO_SIZE);
+ kfree(&info->mtd);
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int omap_nand_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct mtd_info *mtd = platform_get_drvdata(pdev);
++ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
++ mtd);
++
++ mtd->suspend(mtd);
++
++ if (info->ctrlr_suspend)
++ info->ctrlr_suspend();
++
++ return 0;
++}
++
++static int omap_nand_resume(struct platform_device *pdev)
++{
++ struct mtd_info *mtd = platform_get_drvdata(pdev);
++ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
++ mtd);
++
++ if (info->ctrlr_resume)
++ info->ctrlr_resume();
++
++ return 0;
++}
++#endif
++
+ static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
++#ifdef CONFIG_PM
++ .suspend = omap_nand_suspend,
++ .resume = omap_nand_resume,
++#endif
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index f6c98fb..6851445 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
+
+ source "drivers/net/can/c_can/Kconfig"
+
++source "drivers/net/can/d_can/Kconfig"
++
+ source "drivers/net/can/usb/Kconfig"
+
+ source "drivers/net/can/softing/Kconfig"
+diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
+index 24ebfe8..3377679 100644
+--- a/drivers/net/can/Makefile
++++ b/drivers/net/can/Makefile
+@@ -14,6 +14,7 @@ obj-y += softing/
+ obj-$(CONFIG_CAN_SJA1000) += sja1000/
+ obj-$(CONFIG_CAN_MSCAN) += mscan/
+ obj-$(CONFIG_CAN_C_CAN) += c_can/
++obj-$(CONFIG_CAN_D_CAN) += d_can/
+ obj-$(CONFIG_CAN_AT91) += at91_can.o
+ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
+ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+diff --git a/drivers/net/can/d_can/Kconfig b/drivers/net/can/d_can/Kconfig
+new file mode 100644
+index 0000000..e5e9dcf
+--- /dev/null
++++ b/drivers/net/can/d_can/Kconfig
+@@ -0,0 +1,14 @@
++menuconfig CAN_D_CAN
++ tristate "Bosch D_CAN devices"
++ depends on CAN_DEV && HAS_IOMEM
++
++if CAN_D_CAN
++
++config CAN_D_CAN_PLATFORM
++ tristate "Generic Platform Bus based D_CAN driver"
++ ---help---
++ This driver adds support for the D_CAN chips connected to
++ the "platform bus" (Linux abstraction for directly to the
++ processor attached devices) which can be found on am335x
++ and dm814x boards from TI (http://www.ti.com).
++endif
+diff --git a/drivers/net/can/d_can/Makefile b/drivers/net/can/d_can/Makefile
+new file mode 100644
+index 0000000..80560c5
+--- /dev/null
++++ b/drivers/net/can/d_can/Makefile
+@@ -0,0 +1,8 @@
++#
++# Makefile for the Bosch D_CAN controller drivers.
++#
++
++obj-$(CONFIG_CAN_D_CAN) += d_can.o
++obj-$(CONFIG_CAN_D_CAN_PLATFORM) += d_can_platform.o
++
++ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+diff --git a/drivers/net/can/d_can/d_can.c b/drivers/net/can/d_can/d_can.c
+new file mode 100644
+index 0000000..18a9e10
+--- /dev/null
++++ b/drivers/net/can/d_can/d_can.c
+@@ -0,0 +1,1415 @@
++/*
++ * CAN bus driver for Bosch D_CAN controller
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * Borrowed from C_CAN driver
++ * Copyright (C) 2010 ST Microelectronics
++ * - Bhupesh Sharma <bhupesh.sharma@st.com>
++ *
++ * Borrowed heavily from the C_CAN driver originally written by:
++ * Copyright (C) 2007
++ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
++ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
++ *
++ * Bosch D_CAN controller is compliant to CAN protocol version 2.0 part A and B.
++ * Bosch D_CAN user manual can be obtained from:
++ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/can/
++ * d_can_users_manual_111.pdf
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/netdevice.h>
++#include <linux/if_arp.h>
++#include <linux/if_ether.h>
++#include <linux/list.h>
++#include <linux/io.h>
++
++#include <linux/platform_device.h>
++#include <linux/can.h>
++#include <linux/can/dev.h>
++#include <linux/can/error.h>
++
++#include "d_can.h"
++
++/* TI D_CAN module registers */
++#define D_CAN_CTL 0x0 /* CAN control register */
++#define D_CAN_ES 0x4 /* Error and status */
++#define D_CAN_PARITYERR_EOI 0x4 /* Parity error EOI */
++#define D_CAN_ERRC 0x8 /* Error counter */
++#define D_CAN_BTR 0xC /* Bit timing */
++#define D_CAN_INT 0x10 /* Interrupt register */
++#define D_CAN_TEST 0x14 /* Test register */
++#define D_CAN_PERR 0x1C /* Parity Error Code */
++#define D_CAN_ABOTR 0x80 /* Auto-Bus-On Time */
++#define D_CAN_TXRQ_X 0x84 /* Transmission Request X */
++#define D_CAN_TXRQ(n) (0x88 + ((n) * 4)) /* Transmission request */
++#define D_CAN_NWDAT_X 0x98 /* New data X register */
++#define D_CAN_NWDAT(n) (0x9C + ((n) * 4)) /* New data */
++#define D_CAN_INTPND_X 0xAC /* Interrupt Pending X */
++#define D_CAN_INTPND(n) (0xB0 + ((n) * 4)) /* Interrupt Pending */
++#define D_CAN_MSGVAL_X 0xC0 /* Message Valid X */
++#define D_CAN_MSGVAL(n) (0xC4 + ((n) * 4)) /* Message Valid */
++#define D_CAN_INTMUX(n) (0xD8 + ((n) * 4)) /* Interrupt Multiplexer */
++#define D_CAN_IFCMD(n) (0x100 + ((n) * 0x20)) /* Command */
++#define D_CAN_IFMSK(n) (0x104 + ((n) * 0x20)) /* Mask */
++#define D_CAN_IFARB(n) (0x108 + ((n) * 0x20)) /* Arbitration */
++#define D_CAN_IFMCTL(n) (0x10c + ((n) * 0x20)) /* Message ctl */
++#define D_CAN_IFDATA(n) (0x110 + ((n) * 0x20)) /* DATA A */
++#define D_CAN_IFDATB(n) (0x114 + ((n) * 0x20)) /* DATA B */
++#define D_CAN_IF3OBS 0x140 /* IF3 Observation */
++#define D_CAN_IF3UPD(n) (0x160 + ((n) * 4)) /* Update enable */
++#define D_CAN_TIOC 0x1E0 /* CAN TX IO Control */
++#define D_CAN_RIOC 0x1E4 /* CAN RX IO Control */
++
++/* Control register Bit fields */
++#define D_CAN_CTL_WUBA BIT(26) /* Automatic wake-up on bus activity */
++#define D_CAN_CTL_PDR BIT(24) /* Request for local low power mode */
++#define D_CAN_CTL_DE3 BIT(20) /* Enable DMA request line for IF3 */
++#define D_CAN_CTL_DE2 BIT(19) /* Enable DMA request line for IF2 */
++#define D_CAN_CTL_DE1 BIT(18) /* Enable DMA request line for IF1 */
++#define D_CAN_CTL_IE1 BIT(17) /* Interrupt line 1 enable */
++#define D_CAN_CTL_INITDBG BIT(16) /* Init state for debug access */
++#define D_CAN_CTL_SWR BIT(15) /* S/W reset enable */
++#define D_CAN_CTL_PMD (0xF << 10) /* Parity on/off */
++#define D_CAN_CTL_ABO BIT(9) /* Auto bus on enable */
++#define D_CAN_CTL_IDS BIT(8) /* Interruption debug support enable */
++#define D_CAN_CTL_TEST BIT(7) /* Test mode enable */
++#define D_CAN_CTL_CCE BIT(6) /* Configuration change enable */
++#define D_CAN_CTL_DISABLE_AR BIT(5) /* Disable automatic retransmission */
++#define D_CAN_CTL_ENABLE_AR (0 << 5)
++#define D_CAN_CTL_EIE BIT(3) /* Error interrupt enable */
++#define D_CAN_CTL_SIE BIT(2) /* Status change int enable */
++#define D_CAN_CTL_IE0 BIT(1) /* Interrupt line 0 enable */
++#define D_CAN_CTL_INIT BIT(0) /* D_CAN initialization mode */
++
++/* D_CAN Error and Status and Parity Error EOI reg bit fields */
++#define D_CAN_ES_PDA BIT(10) /* Local power-down ACK */
++#define D_CAN_ES_WUP BIT(9) /* Wkae up pending */
++#define D_CAN_ES_PER BIT(8) /* Parity error detected */
++#define D_CAN_ES_BOFF BIT(7) /* Bus off state */
++#define D_CAN_ES_EWARN BIT(6) /* Warning state */
++#define D_CAN_ES_EPASS BIT(5) /* Error passive state */
++#define D_CAN_ES_RXOK BIT(4) /* Received a msg successfully */
++#define D_CAN_ES_TXOK BIT(3) /* Transmitted a msg successfully */
++#define D_CAN_ES_LEC_MASK 0x7 /* Last error code */
++
++/* Parity error reg bit fields */
++#define D_CAN_PEEOI BIT(8) /* EOI indication for parity error */
++
++/* Error counter reg bit fields */
++#define D_CAN_ERRC_RP_SHIFT 15
++#define D_CAN_ERRC_RP_MASK BIT(15) /* Receive error passive */
++#define D_CAN_ERRC_REC_SHIFT 8
++#define D_CAN_ERRC_REC_MASK (0x7F << 8) /* Receive err counter */
++#define D_CAN_ERRC_TEC_SHIFT 0
++#define D_CAN_ERRC_TEC_MASK (0xFF << 0) /* Transmit err counter */
++
++/* Bit timing reg bit fields */
++#define D_CAN_BTR_BRPE_SHIFT 16
++#define D_CAN_BTR_BRPE_MASK (0xF << 16) /* Baud rate prescaler ext */
++#define D_CAN_BTR_TSEG2_SHIFT 12
++#define D_CAN_BTR_TSEG2_MASK (0x7 << 12) /* Time seg after smpl point */
++#define D_CAN_BTR_TSEG1_SHIFT 8
++#define D_CAN_BTR_TSEG1_MASK (0xF << 8) /* Time seg before smpl point */
++#define D_CAN_BTR_SJW_SHIFT 6
++#define D_CAN_BTR_SJW_MASK (0x3 << 6) /* Syncronization jump width */
++#define D_CAN_BTR_BRP_SHIFT 0
++#define D_CAN_BTR_BRP_MASK (0x3F << 0) /* Baud rate prescaler */
++
++/* D_CAN Test register bit fields */
++#define D_CAN_TEST_RDA BIT(9) /* RAM direct access enable */
++#define D_CAN_TEST_EXL BIT(8) /* External loopback mode */
++#define D_CAN_TEST_RX BIT(7) /* Monitors the reveive pin */
++#define D_CAN_TEST_TX (0x3 << 5) /* Control of CAN_TX pin */
++#define D_CAN_TEST_LBACK BIT(4) /* Loopback mode */
++#define D_CAN_TEST_SILENT BIT(3) /* Silent mdoe */
++
++/* D_CAN Parity error reg bit fields */
++#define D_CAN_PERR_WN_MASK (0x7 << 8) /* Parity error word nuber */
++#define D_CAN_PERR_MN_MASK 0xFF /* Parity error msg object */
++
++/* D_CAN X registers bit fields */
++#define D_CAN_BIT_FIELD(n) (0x3 << (2 * n)) /* X reg's bit field 1 mask */
++
++/* D_CAN IF command reg bit fields */
++#define D_CAN_IF_CMD_WR BIT(23) /* Write/read */
++#define D_CAN_IF_CMD_MASK BIT(22) /* Access to mask bits */
++#define D_CAN_IF_CMD_ARB BIT(21) /* Access to arbitration bits */
++#define D_CAN_IF_CMD_CONTROL BIT(20) /* Acess to control bits */
++#define D_CAN_IF_CMD_CIP BIT(19) /* Clear int pending */
++#define D_CAN_IF_CMD_TXRQST BIT(18) /* Access transmission request */
++#define D_CAN_IF_CMD_DATAA BIT(17) /* Access to Data Bytes 0-3 */
++#define D_CAN_IF_CMD_DATAB BIT(16) /* Access to Data Bytes 4-7 */
++#define D_CAN_IF_CMD_BUSY BIT(15) /* Busy flag */
++#define D_CAN_IF_CMD_DAM BIT(14) /* Activation of DMA */
++#define D_CAN_IF_CMD_MN_MASK 0xFF /* No. of msg's used for DMA T/F */
++#define D_CAN_IF_CMD_ALL (D_CAN_IF_CMD_MASK | D_CAN_IF_CMD_ARB | \
++ D_CAN_IF_CMD_CONTROL | D_CAN_IF_CMD_TXRQST | \
++ D_CAN_IF_CMD_DATAA | D_CAN_IF_CMD_DATAB)
++
++/* D_CAN IF mask reg bit fields */
++#define D_CAN_IF_MASK_MX BIT(31) /* Mask Extended Identifier */
++#define D_CAN_IF_MASK_MD BIT(30) /* Mask Message direction */
++
++/* D_CAN IF Arbitration */
++#define D_CAN_IF_ARB_MSGVAL BIT(31) /* Message Vaild */
++#define D_CAN_IF_ARB_MSGXTD BIT(30) /* Extended Identifier 0-11 1-29 */
++#define D_CAN_IF_ARB_DIR_XMIT BIT(29) /* Message direction 0-R 1-T */
++
++/* D_CAN IF Message control */
++#define D_CAN_IF_MCTL_NEWDAT BIT(15) /* New data available */
++#define D_CAN_IF_MCTL_MSGLST BIT(14) /* Message lost, only for receive */
++#define D_CAN_IF_MCTL_CLR_MSGLST (0 << 14)
++#define D_CAN_IF_MCTL_INTPND BIT(13) /* Interrupt pending */
++#define D_CAN_IF_MCTL_UMASK BIT(12) /* Use acceptance mask */
++#define D_CAN_IF_MCTL_TXIE BIT(11) /* Transmit int enable */
++#define D_CAN_IF_MCTL_RXIE BIT(10) /* Receive int enable */
++#define D_CAN_IF_MCTL_RMTEN BIT(9) /* Remote enable */
++#define D_CAN_IF_MCTL_TXRQST BIT(8) /* Transmit request */
++#define D_CAN_IF_MCTL_EOB BIT(7) /* Data frames */
++#define D_CAN_IF_MCTL_DLC_MASK 0xF /* Data length code */
++
++/* D_CAN IF3 Observation reg bit fields */
++#define D_CAN_IF3OBS_UP BIT(15) /* Update data status */
++#define D_CAN_IF3OBS_SDB BIT(12) /* DataB read out status */
++#define D_CAN_IF3OBS_SDA BIT(11) /* DataA read out status */
++#define D_CAN_IF3OBS_SC BIT(10) /* Contol bits read out status */
++#define D_CAN_IF3OBS_SA BIT(9) /* Arbitration read out status */
++#define D_CAN_IF3OBS_SM BIT(8) /* Mask bits read out status */
++#define D_CAN_IF3OBS_DB BIT(4) /* Data B read observation */
++#define D_CAN_IF3OBS_DA BIT(3) /* Data A read observation */
++#define D_CAN_IF3OBS_CTL BIT(2) /* Control read observation */
++#define D_CAN_IF3OBS_ARB BIT(1) /* Arbitration data read observation */
++#define D_CAN_IF3OBS_MASK BIT(0) /* Mask data read observation */
++
++/* D_CAN TX I/O reg bit fields */
++#define D_CAN_TIOC_PU BIT(18) /* CAN_TX pull up/down select */
++#define D_CAN_TIOC_PD BIT(17) /* CAN_TX pull disable */
++#define D_CAN_TIOC_OD BIT(16) /* CAN_TX open drain enable */
++#define D_CAN_TIOC_FUNC BIT(3) /* CAN_TX function */
++#define D_CAN_TIOC_DIR BIT(2) /* CAN_TX data direction */
++#define D_CAN_TIOC_OUT BIT(1) /* CAN_TX data out write */
++#define D_CAN_TIOC_IN BIT(0) /* CAN_TX data in */
++
++/* D_CAN RX I/O reg bit fields */
++#define D_CAN_RIOC_PU BIT(18) /* CAN_RX pull up/down select */
++#define D_CAN_RIOC_PD BIT(17) /* CAN_RX pull disable */
++#define D_CAN_RIOC_OD BIT(16) /* CAN_RX open drain enable */
++#define D_CAN_RIOC_FUNC BIT(3) /* CAN_RX function */
++#define D_CAN_RIOC_DIR BIT(2) /* CAN_RX data direction */
++#define D_CAN_RIOC_OUT BIT(1) /* CAN_RX data out write */
++#define D_CAN_RTIOC_IN BIT(0) /* CAN_RX data in */
++
++#define D_CAN_SET_REG 0xFFFFFFFF
++
++#define D_CAN_CANMID_IDE BIT(31) /* Extended frame format */
++#define D_CAN_CANMID_AME BIT(30) /* Acceptance mask enable */
++#define D_CAN_CANMID_AAM BIT(29) /* Auto answer mode */
++
++/*
++ * IF register masks:
++ */
++#define IFX_WRITE_IDR(x) ((x) & 0x1FFFFFFF)
++
++#define IFX_CMD_BITS(x) ((x) & 0xFFFFFF00)
++#define IFX_CMD_MSG_NUMBER(x) ((x) & 0xFF)
++
++/* Message objects split */
++#define D_CAN_NUM_MSG_OBJECTS 64
++#define D_CAN_NUM_RX_MSG_OBJECTS 32
++#define D_CAN_NUM_TX_MSG_OBJECTS 32
++
++#define D_CAN_MSG_OBJ_RX_FIRST 1
++#define D_CAN_MSG_OBJ_RX_LAST (D_CAN_MSG_OBJ_RX_FIRST + \
++ D_CAN_NUM_RX_MSG_OBJECTS - 1)
++
++#define D_CAN_MSG_OBJ_TX_FIRST (D_CAN_MSG_OBJ_RX_LAST + 1)
++#define D_CAN_MSG_OBJ_TX_LAST (D_CAN_MSG_OBJ_TX_FIRST + \
++ D_CAN_NUM_TX_MSG_OBJECTS - 1)
++
++#define D_CAN_MSG_OBJ_RX_SPLIT 17
++#define D_CAN_MSG_OBJ_RX_LOW_LAST (D_CAN_MSG_OBJ_RX_SPLIT - 1)
++
++#define D_CAN_NEXT_MSG_OBJ_MASK (D_CAN_NUM_TX_MSG_OBJECTS - 1)
++
++/* status interrupt */
++#define STATUS_INTERRUPT 0x8000
++
++/* global interrupt masks */
++#define ENABLE_ALL_INTERRUPTS 1
++#define DISABLE_ALL_INTERRUPTS 0
++
++/* minimum timeout for checking BUSY status */
++#define MIN_TIMEOUT_VALUE 6
++
++/* Wait for ~1 sec for INIT bit */
++#define D_CAN_WAIT_COUNT 1000
++
++#define D_CAN_IF_RX_NUM 0
++#define D_CAN_IF_TX_NUM 1
++
++#define D_CAN_GET_XREG_NUM(priv, reg) (__ffs(d_can_read(priv, reg))/4)
++
++/* CAN Bittiming constants as per D_CAN specs */
++static struct can_bittiming_const d_can_bittiming_const = {
++ .name = D_CAN_DRV_NAME,
++ .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
++ .tseg1_max = 16,
++ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
++ .tseg2_max = 8,
++ .sjw_max = 4,
++ .brp_min = 1,
++ .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
++ .brp_inc = 1,
++};
++
++/* d_can last error code (lec) values */
++enum d_can_lec_type {
++ LEC_NO_ERROR = 0,
++ LEC_STUFF_ERROR,
++ LEC_FORM_ERROR,
++ LEC_ACK_ERROR,
++ LEC_BIT1_ERROR,
++ LEC_BIT0_ERROR,
++ LEC_CRC_ERROR,
++ LEC_UNUSED,
++};
++
++/*
++ * d_can error types:
++ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
++ */
++enum d_can_bus_error_types {
++ D_CAN_NO_ERROR = 0,
++ D_CAN_BUS_OFF,
++ D_CAN_ERROR_WARNING,
++ D_CAN_ERROR_PASSIVE,
++};
++
++static inline void d_can_write(struct d_can_priv *priv, u32 reg, u32 val)
++{
++ __raw_writel(val, priv->base + reg);
++}
++
++static inline u32 d_can_read(struct d_can_priv *priv, int reg)
++{
++ return __raw_readl(priv->base + reg);
++}
++
++static inline void d_can_set_bit(struct d_can_priv *priv, int reg,
++ u32 bit_mask)
++{
++ d_can_write(priv, reg, d_can_read(priv, reg) | bit_mask);
++}
++
++static inline u32 d_can_get_bit(struct d_can_priv *priv, int reg,
++ u32 bit_mask)
++{
++ return (d_can_read(priv, reg) & bit_mask) ? 1 : 0;
++}
++
++static inline void d_can_clear_bit(struct d_can_priv *priv, int reg,
++ u32 bit_mask)
++{
++ d_can_write(priv, reg, d_can_read(priv, reg) & ~bit_mask);
++}
++
++static inline int get_tx_next_msg_obj(const struct d_can_priv *priv)
++{
++ return (priv->tx_next & D_CAN_NEXT_MSG_OBJ_MASK) +
++ D_CAN_MSG_OBJ_TX_FIRST;
++}
++
++static inline int get_tx_echo_msg_obj(const struct d_can_priv *priv)
++{
++ return (priv->tx_echo & D_CAN_NEXT_MSG_OBJ_MASK) +
++ D_CAN_MSG_OBJ_TX_FIRST;
++}
++
++/*
++ * API for enabling and disabling the multiple interrupts
++ * of the DCAN module like error interrupt, status interrupt
++ * error enable/disable for instance zero and one and etc.
++ */
++static void d_can_interrupts(struct d_can_priv *priv, int enable)
++{
++ unsigned int cntrl_save = d_can_read(priv, D_CAN_CTL);
++
++ if (enable)
++ cntrl_save |= (D_CAN_CTL_IE1 | D_CAN_CTL_EIE |
++ D_CAN_CTL_IE0);
++ else
++ cntrl_save &= ~(D_CAN_CTL_IE1 | D_CAN_CTL_SIE |
++ D_CAN_CTL_EIE | D_CAN_CTL_IE0);
++
++ d_can_write(priv, D_CAN_CTL, cntrl_save);
++}
++
++static inline int d_can_msg_obj_is_busy(struct d_can_priv *priv, int iface)
++{
++ int count = MIN_TIMEOUT_VALUE;
++
++ while (count && (d_can_read(priv, D_CAN_IFCMD(iface)) &
++ D_CAN_IF_CMD_BUSY)) {
++ count--;
++ udelay(1);
++ }
++
++ if (!count)
++ return 1;
++
++ return 0;
++}
++
++static inline void d_can_object_get(struct net_device *dev,
++ int iface, int objno, int mask)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFCMD(iface), IFX_CMD_BITS(mask) |
++ IFX_CMD_MSG_NUMBER(objno));
++
++ /*
++ * As per specs, after writing the message object number in the
++ * IF command register the transfer b/w interface register and
++ * message RAM must be complete in 12 CAN-CLK period.
++ */
++ if (d_can_msg_obj_is_busy(priv, iface))
++ netdev_err(dev, "timed out in object get\n");
++}
++
++static inline void d_can_object_put(struct net_device *dev,
++ int iface, int objno, int mask)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFCMD(iface), D_CAN_IF_CMD_WR |
++ IFX_CMD_BITS(mask) | IFX_CMD_MSG_NUMBER(objno));
++
++ /*
++ * As per specs, after writing the message object number in the
++ * IF command register the transfer b/w interface register and
++ * message RAM must be complete in 12 CAN-CLK period.
++ */
++ if (d_can_msg_obj_is_busy(priv, iface))
++ netdev_err(dev, "timed out in object put\n");
++}
++
++static void d_can_write_msg_object(struct net_device *dev,
++ int iface, struct can_frame *frame, int objno)
++{
++ int i;
++ unsigned int id;
++ u32 dataA = 0;
++ u32 dataB = 0;
++ u32 flags = 0;
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ if (!(frame->can_id & CAN_RTR_FLAG))
++ flags |= D_CAN_IF_ARB_DIR_XMIT;
++
++ if (frame->can_id & CAN_EFF_FLAG) {
++ id = frame->can_id & CAN_EFF_MASK;
++ flags |= D_CAN_IF_ARB_MSGXTD;
++ } else
++ id = ((frame->can_id & CAN_SFF_MASK) << 18);
++
++ flags |= D_CAN_IF_ARB_MSGVAL;
++ d_can_write(priv, D_CAN_IFARB(iface), IFX_WRITE_IDR(id) | flags);
++
++ for (i = 0; i < frame->can_dlc; i++) {
++ if (frame->can_dlc <= 4)
++ dataA |= (frame->data[i] << (8 * i));
++ else {
++ if (i < 4)
++ dataA |= (frame->data[i] << (8 * i));
++ else
++ dataB |= (frame->data[i] << (8 * (i - 4)));
++ }
++ }
++
++ /* DATA write to Message object registers DATAA and DATAB */
++ if (frame->can_dlc <= 4)
++ d_can_write(priv, D_CAN_IFDATA(iface), dataA);
++ else {
++ d_can_write(priv, D_CAN_IFDATB(iface), dataB);
++ d_can_write(priv, D_CAN_IFDATA(iface), dataA);
++ }
++
++ /* enable TX interrupt for this message object */
++ d_can_write(priv, D_CAN_IFMCTL(iface),
++ D_CAN_IF_MCTL_TXIE | D_CAN_IF_MCTL_EOB |
++ D_CAN_IF_MCTL_TXRQST | D_CAN_IF_MCTL_NEWDAT |
++ frame->can_dlc);
++
++ /* Put message data into message RAM */
++ d_can_object_put(dev, iface, objno, D_CAN_IF_CMD_ALL);
++}
++
++/*
++ * Mark that this particular message object is received and clearing
++ * the interrupt pending register value.
++ */
++static inline void d_can_mark_rx_msg_obj(struct net_device *dev,
++ int iface, int ctrl_mask, int obj)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFMCTL(iface), ctrl_mask
++ & ~(D_CAN_IF_MCTL_MSGLST | D_CAN_IF_MCTL_INTPND));
++
++ d_can_object_put(dev, iface, obj, D_CAN_IF_CMD_CONTROL);
++}
++
++static inline void d_can_activate_all_lower_rx_msg_objs(struct net_device *dev,
++ int iface, int ctrl_mask)
++{
++ int i;
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ for (i = D_CAN_MSG_OBJ_RX_FIRST; i <= D_CAN_MSG_OBJ_RX_LOW_LAST; i++) {
++ d_can_write(priv, D_CAN_IFMCTL(iface),
++ ctrl_mask & ~(D_CAN_IF_MCTL_MSGLST |
++ D_CAN_IF_MCTL_INTPND | D_CAN_IF_MCTL_NEWDAT));
++ d_can_object_put(dev, iface, i, D_CAN_IF_CMD_CONTROL);
++ }
++}
++
++static inline void d_can_activate_rx_msg_obj(struct net_device *dev,
++ int iface, int ctrl_mask,
++ int obj)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFMCTL(iface),
++ ctrl_mask & ~(D_CAN_IF_MCTL_MSGLST |
++ D_CAN_IF_MCTL_INTPND | D_CAN_IF_MCTL_NEWDAT));
++ d_can_object_put(dev, iface, obj, D_CAN_IF_CMD_CONTROL);
++}
++
++static void d_can_handle_lost_msg_obj(struct net_device *dev,
++ int iface, int objno)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ struct can_frame *frame;
++
++ netdev_err(dev, "msg lost in buffer %d\n", objno);
++
++ d_can_object_get(dev, iface, objno, D_CAN_IF_CMD_ALL &
++ ~D_CAN_IF_CMD_TXRQST);
++
++ d_can_write(priv, D_CAN_IFMCTL(iface), D_CAN_IF_MCTL_CLR_MSGLST);
++
++ d_can_object_put(dev, iface, objno, D_CAN_IF_CMD_CONTROL);
++
++ /* create an error msg */
++ skb = alloc_can_err_skb(dev, &frame);
++ if (unlikely(!skb))
++ return;
++
++ frame->can_id |= CAN_ERR_CRTL;
++ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
++ stats->rx_errors++;
++ stats->rx_over_errors++;
++
++ netif_receive_skb(skb);
++}
++
++static int d_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
++{
++ int i;
++ u32 dataA = 0;
++ u32 dataB = 0;
++ unsigned int arb_val;
++ unsigned int mctl_val;
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ struct can_frame *frame;
++
++ skb = alloc_can_skb(dev, &frame);
++ if (!skb) {
++ stats->rx_dropped++;
++ return -ENOMEM;
++ }
++
++ frame->can_dlc = get_can_dlc(ctrl & 0x0F);
++
++ arb_val = d_can_read(priv, D_CAN_IFARB(iface));
++ mctl_val = d_can_read(priv, D_CAN_IFMCTL(iface));
++
++ if (arb_val & D_CAN_IF_ARB_MSGXTD)
++ frame->can_id = (arb_val & CAN_EFF_MASK) | CAN_EFF_FLAG;
++ else
++ frame->can_id = (arb_val >> 18) & CAN_SFF_MASK;
++
++ if (mctl_val & D_CAN_IF_MCTL_RMTEN)
++ frame->can_id |= CAN_RTR_FLAG;
++ else {
++ dataA = d_can_read(priv, D_CAN_IFDATA(iface));
++ dataB = d_can_read(priv, D_CAN_IFDATB(iface));
++ for (i = 0; i < frame->can_dlc; i++) {
++ /* Writing MO higher 4 data bytes to skb */
++ if (frame->can_dlc <= 4)
++ frame->data[i] = dataA >> (8 * i);
++ else {
++ if (i < 4)
++ frame->data[i] = dataA >> (8 * i);
++ else
++ frame->data[i] = dataB >> (8 * (i-4));
++ }
++ }
++ }
++
++ netif_receive_skb(skb);
++
++ stats->rx_packets++;
++ stats->rx_bytes += frame->can_dlc;
++
++ return 0;
++}
++
++static void d_can_setup_receive_object(struct net_device *dev, int iface,
++ int objno, unsigned int mask,
++ unsigned int id, unsigned int mcont)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFMSK(iface), IFX_WRITE_IDR(mask));
++ d_can_write(priv, D_CAN_IFARB(iface), IFX_WRITE_IDR(id) |
++ D_CAN_IF_ARB_MSGVAL);
++ d_can_write(priv, D_CAN_IFMCTL(iface), mcont);
++
++ d_can_object_put(dev, iface, objno, D_CAN_IF_CMD_ALL &
++ ~D_CAN_IF_CMD_TXRQST);
++
++ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, d_can_read(priv,
++ D_CAN_MSGVAL(D_CAN_GET_XREG_NUM(priv, D_CAN_MSGVAL_X))));
++}
++
++static void d_can_inval_msg_object(struct net_device *dev, int iface, int objno)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ d_can_write(priv, D_CAN_IFARB(iface), 0);
++ d_can_write(priv, D_CAN_IFMCTL(iface), 0);
++
++ d_can_object_put(dev, iface, objno, D_CAN_IF_CMD_ARB |
++ D_CAN_IF_CMD_CONTROL);
++
++ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, d_can_read(priv,
++ D_CAN_MSGVAL(D_CAN_GET_XREG_NUM(priv, D_CAN_MSGVAL_X))));
++}
++
++static inline int d_can_is_next_tx_obj_busy(struct d_can_priv *priv, int objno)
++{
++ u32 txrq_x_reg_val = D_CAN_GET_XREG_NUM(priv, D_CAN_TXRQ_X);
++
++ /*
++ * as transmission request register's bit n-1 corresponds to
++ * message object n, we need to handle the same properly.
++ */
++ if (d_can_read(priv, D_CAN_TXRQ(txrq_x_reg_val)) &
++ (1 << (objno - D_CAN_MSG_OBJ_TX_FIRST)))
++ return 1;
++
++ return 0;
++}
++
++static netdev_tx_t d_can_start_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ u32 msg_obj_no;
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct can_frame *frame = (struct can_frame *)skb->data;
++
++ if (can_dropped_invalid_skb(dev, skb))
++ return NETDEV_TX_OK;
++
++ msg_obj_no = get_tx_next_msg_obj(priv);
++
++ /* prepare message object for transmission */
++ d_can_write_msg_object(dev, D_CAN_IF_TX_NUM, frame, msg_obj_no);
++ can_put_echo_skb(skb, dev, msg_obj_no - D_CAN_MSG_OBJ_TX_FIRST);
++
++ /*
++ * we have to stop the queue in case of a wrap around or
++ * if the next TX message object is still in use
++ */
++ priv->tx_next++;
++ if (d_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
++ ((priv->tx_next & D_CAN_NEXT_MSG_OBJ_MASK) == 0))
++ netif_stop_queue(dev);
++
++ return NETDEV_TX_OK;
++}
++
++static int d_can_set_bittiming(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++ const struct can_bittiming *bt = &priv->can.bittiming;
++ u32 can_btc;
++
++ can_btc = ((bt->phase_seg2 - 1) & 0x7) << D_CAN_BTR_TSEG2_SHIFT;
++ can_btc |= ((bt->phase_seg1 + bt->prop_seg - 1)
++ & 0xF) << D_CAN_BTR_TSEG1_SHIFT;
++
++ can_btc |= ((bt->sjw - 1) & 0x3) << D_CAN_BTR_SJW_SHIFT;
++
++ /* Ten bits contains the BRP, 6 bits for BRP and upper 4 bits for brpe*/
++ can_btc |= ((bt->brp - 1) & 0x3F) << D_CAN_BTR_BRP_SHIFT;
++ can_btc |= ((((bt->brp - 1) >> 6) & 0xF) << D_CAN_BTR_BRPE_SHIFT);
++
++ d_can_write(priv, D_CAN_BTR, can_btc);
++
++ netdev_info(dev, "setting CAN BT = %#x\n", can_btc);
++
++ return 0;
++}
++
++/*
++ * Configure D_CAN message objects for Tx and Rx purposes:
++ * D_CAN provides a total of 64 message objects that can be configured
++ * either for Tx or Rx purposes. In this driver first 32 message objects
++ * are used as a reception FIFO and the reception FIFO is signified by the
++ * EoB bit being SET. The remaining 32 message objects are kept aside for
++ * Tx purposes. See user guide document for further details on configuring
++ * message objects.
++ */
++static void d_can_configure_msg_objects(struct net_device *dev)
++{
++ unsigned int i;
++
++ /* first invalidate all message objects */
++ for (i = D_CAN_MSG_OBJ_RX_FIRST; i <= D_CAN_NUM_MSG_OBJECTS; i++)
++ d_can_inval_msg_object(dev, D_CAN_IF_RX_NUM, i);
++
++ /* setup receive message objects */
++ for (i = D_CAN_MSG_OBJ_RX_FIRST; i < D_CAN_MSG_OBJ_RX_LAST; i++)
++ d_can_setup_receive_object(dev, D_CAN_IF_RX_NUM, i, 0, 0,
++ (D_CAN_IF_MCTL_RXIE | D_CAN_IF_MCTL_UMASK) &
++ ~D_CAN_IF_MCTL_EOB);
++
++ /* Last object EoB bit should be 1 for terminate */
++ d_can_setup_receive_object(dev, D_CAN_IF_RX_NUM, D_CAN_MSG_OBJ_RX_LAST,
++ 0, 0, D_CAN_IF_MCTL_RXIE | D_CAN_IF_MCTL_UMASK |
++ D_CAN_IF_MCTL_EOB);
++}
++
++static void d_can_test_mode(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ /* Test mode is enabled in this step & the specific TEST bits
++ * are enabled accordingly */
++ d_can_write(priv, D_CAN_CTL, D_CAN_CTL_EIE |
++ D_CAN_CTL_IE1 | D_CAN_CTL_IE0 | D_CAN_CTL_TEST);
++
++ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
++ /* silent mode : bus-monitoring mode */
++ d_can_write(priv, D_CAN_TEST, D_CAN_TEST_SILENT);
++ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
++ /* loopback mode : useful for self-test function */
++ d_can_write(priv, D_CAN_TEST, D_CAN_TEST_LBACK);
++ } else {
++ /* loopback + silent mode : useful for hot self-test */
++ d_can_write(priv, D_CAN_TEST, D_CAN_TEST_LBACK |
++ D_CAN_TEST_SILENT);
++ }
++}
++
++/*
++ * Configure D_CAN chip:
++ * - enable/disable auto-retransmission
++ * - set operating mode
++ * - configure message objects
++ */
++static void d_can_init(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++ u32 cnt;
++
++ netdev_dbg(dev, "resetting d_can ...\n");
++ d_can_set_bit(priv, D_CAN_CTL, D_CAN_CTL_SWR);
++
++ /* Enter initialization mode by setting the Init bit */
++ d_can_set_bit(priv, D_CAN_CTL, D_CAN_CTL_INIT);
++
++ /* enable automatic retransmission */
++ d_can_set_bit(priv, D_CAN_CTL, D_CAN_CTL_ENABLE_AR);
++
++ /* Set the Configure Change Enable ( CCE) bit */
++ d_can_set_bit(priv, D_CAN_CTL, D_CAN_CTL_CCE);
++
++ /* Wait for the Init bit to get set */
++ cnt = D_CAN_WAIT_COUNT;
++ while (!d_can_get_bit(priv, D_CAN_CTL, D_CAN_CTL_INIT) && cnt != 0) {
++ --cnt;
++ udelay(10);
++ }
++
++ /* set bittiming params */
++ d_can_set_bittiming(dev);
++
++ d_can_clear_bit(priv, D_CAN_CTL, D_CAN_CTL_INIT | D_CAN_CTL_CCE);
++
++ /* Wait for the Init bit to get clear */
++ cnt = D_CAN_WAIT_COUNT;
++ while (d_can_get_bit(priv, D_CAN_CTL, D_CAN_CTL_INIT) && cnt != 0) {
++ --cnt;
++ udelay(10);
++ }
++
++ if (priv->can.ctrlmode & (CAN_CTRLMODE_LOOPBACK |
++ CAN_CTRLMODE_LISTENONLY))
++ d_can_test_mode(dev);
++ else
++ /* normal mode*/
++ d_can_write(priv, D_CAN_CTL, D_CAN_CTL_EIE | D_CAN_CTL_IE1 |
++ D_CAN_CTL_IE0);
++
++ /* Enable TX and RX I/O Control pins */
++ d_can_write(priv, D_CAN_TIOC, D_CAN_TIOC_FUNC);
++ d_can_write(priv, D_CAN_RIOC, D_CAN_RIOC_FUNC);
++
++ /* configure message objects */
++ d_can_configure_msg_objects(dev);
++
++ /* set a LEC value so that we can check for updates later */
++ d_can_write(priv, D_CAN_ES, LEC_UNUSED);
++}
++
++static void d_can_start(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ /* basic d_can initialization */
++ d_can_init(dev);
++
++ priv->can.state = CAN_STATE_ERROR_ACTIVE;
++
++ /* reset tx helper pointers */
++ priv->tx_next = priv->tx_echo = 0;
++
++ /* enable status change, error and module interrupts */
++ d_can_interrupts(priv, ENABLE_ALL_INTERRUPTS);
++}
++
++static void d_can_stop(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ /* disable all interrupts */
++ d_can_interrupts(priv, DISABLE_ALL_INTERRUPTS);
++
++ /* set the state as STOPPED */
++ priv->can.state = CAN_STATE_STOPPED;
++}
++
++static int d_can_set_mode(struct net_device *dev, enum can_mode mode)
++{
++ switch (mode) {
++ case CAN_MODE_START:
++ d_can_start(dev);
++ netif_wake_queue(dev);
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int d_can_get_berr_counter(const struct net_device *dev,
++ struct can_berr_counter *bec)
++{
++ unsigned int reg_err_counter;
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ reg_err_counter = d_can_read(priv, D_CAN_ERRC);
++ bec->rxerr = (reg_err_counter & D_CAN_ERRC_REC_MASK) >>
++ D_CAN_ERRC_REC_SHIFT;
++ bec->txerr = reg_err_counter & D_CAN_ERRC_TEC_MASK;
++
++ return 0;
++}
++
++/*
++ * theory of operation:
++ *
++ * priv->tx_echo holds the number of the oldest can_frame put for
++ * transmission into the hardware, but not yet ACKed by the CAN tx
++ * complete IRQ.
++ *
++ * We iterate from priv->tx_echo to priv->tx_next and check if the
++ * packet has been transmitted, echo it back to the CAN framework.
++ * If we discover a not yet transmitted package, stop looking for more.
++ */
++static void d_can_do_tx(struct net_device *dev)
++{
++ u32 msg_obj_no;
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ u32 txrq_x_reg_val;
++ u32 txrq_reg_val;
++
++ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
++ msg_obj_no = get_tx_echo_msg_obj(priv);
++ txrq_x_reg_val = D_CAN_GET_XREG_NUM(priv, D_CAN_TXRQ_X);
++ txrq_reg_val = d_can_read(priv, D_CAN_TXRQ(txrq_x_reg_val));
++ if (!(txrq_reg_val & (1 << (msg_obj_no -
++ D_CAN_MSG_OBJ_TX_FIRST)))) {
++ can_get_echo_skb(dev,
++ msg_obj_no - D_CAN_MSG_OBJ_TX_FIRST);
++ stats->tx_bytes += d_can_read(priv,
++ D_CAN_IFMCTL(D_CAN_IF_TX_NUM))
++ & D_CAN_IF_MCTL_DLC_MASK;
++ stats->tx_packets++;
++ d_can_inval_msg_object(dev, D_CAN_IF_TX_NUM,
++ msg_obj_no);
++ } else
++ break;
++ }
++
++ /* restart queue if wrap-up or if queue stalled on last pkt */
++ if (((priv->tx_next & D_CAN_NEXT_MSG_OBJ_MASK) != 0)
++ || ((priv->tx_echo & D_CAN_NEXT_MSG_OBJ_MASK) == 0))
++ netif_wake_queue(dev);
++}
++
++/*
++ * theory of operation:
++ *
++ * d_can core saves a received CAN message into the first free message
++ * object it finds free (starting with the lowest). Bits NEWDAT and
++ * INTPND are set for this message object indicating that a new message
++ * has arrived. To work-around this issue, we keep two groups of message
++ * objects whose partitioning is defined by D_CAN_MSG_OBJ_RX_SPLIT.
++ *
++ * To ensure in-order frame reception we use the following
++ * approach while re-activating a message object to receive further
++ * frames:
++ * - if the current message object number is lower than
++ * D_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
++ * the INTPND bit.
++ * - if the current message object number is equal to
++ * D_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
++ * receive message objects.
++ * - if the current message object number is greater than
++ * D_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
++ * only this message object.
++ */
++static int d_can_do_rx_poll(struct net_device *dev, int quota)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++ unsigned int msg_obj, mctrl_reg_val;
++ u32 num_rx_pkts = 0;
++ u32 intpnd_x_reg_val;
++ u32 intpnd_reg_val;
++
++ for (msg_obj = D_CAN_MSG_OBJ_RX_FIRST; msg_obj <= D_CAN_MSG_OBJ_RX_LAST
++ && quota > 0; msg_obj++) {
++
++ intpnd_x_reg_val = D_CAN_GET_XREG_NUM(priv, D_CAN_INTPND_X);
++ intpnd_reg_val = d_can_read(priv,
++ D_CAN_INTPND(intpnd_x_reg_val));
++
++ /*
++ * as interrupt pending register's bit n-1 corresponds to
++ * message object n, we need to handle the same properly.
++ */
++ if (intpnd_reg_val & (1 << (msg_obj - 1))) {
++
++ d_can_object_get(dev, D_CAN_IF_RX_NUM, msg_obj,
++ D_CAN_IF_CMD_ALL &
++ ~D_CAN_IF_CMD_TXRQST);
++
++ mctrl_reg_val = d_can_read(priv,
++ D_CAN_IFMCTL(D_CAN_IF_RX_NUM));
++
++ if (!(mctrl_reg_val & D_CAN_IF_MCTL_NEWDAT))
++ continue;
++
++ /* read the data from the message object */
++ d_can_read_msg_object(dev, D_CAN_IF_RX_NUM,
++ mctrl_reg_val);
++
++ if (mctrl_reg_val & D_CAN_IF_MCTL_EOB)
++ d_can_setup_receive_object(dev, D_CAN_IF_RX_NUM,
++ D_CAN_MSG_OBJ_RX_LAST, 0, 0,
++ D_CAN_IF_MCTL_RXIE | D_CAN_IF_MCTL_UMASK
++ | D_CAN_IF_MCTL_EOB);
++
++ if (mctrl_reg_val & D_CAN_IF_MCTL_MSGLST) {
++ d_can_handle_lost_msg_obj(dev, D_CAN_IF_RX_NUM,
++ msg_obj);
++ num_rx_pkts++;
++ quota--;
++ continue;
++ }
++
++ if (msg_obj < D_CAN_MSG_OBJ_RX_LOW_LAST)
++ d_can_mark_rx_msg_obj(dev, D_CAN_IF_RX_NUM,
++ mctrl_reg_val, msg_obj);
++ else if (msg_obj > D_CAN_MSG_OBJ_RX_LOW_LAST)
++ /* activate this msg obj */
++ d_can_activate_rx_msg_obj(dev, D_CAN_IF_RX_NUM,
++ mctrl_reg_val, msg_obj);
++ else if (msg_obj == D_CAN_MSG_OBJ_RX_LOW_LAST)
++ /* activate all lower message objects */
++ d_can_activate_all_lower_rx_msg_objs(dev,
++ D_CAN_IF_RX_NUM, mctrl_reg_val);
++
++ num_rx_pkts++;
++ quota--;
++ }
++ }
++
++ return num_rx_pkts;
++}
++
++static inline int d_can_has_handle_berr(struct d_can_priv *priv)
++{
++ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
++ (priv->current_status & LEC_UNUSED);
++}
++
++static int d_can_handle_state_change(struct net_device *dev,
++ enum d_can_bus_error_types error_type)
++{
++ unsigned int reg_err_counter;
++ unsigned int rx_err_passive;
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ struct can_frame *cf;
++ struct sk_buff *skb;
++ struct can_berr_counter bec;
++
++ /* propagate the error condition to the CAN stack */
++ skb = alloc_can_err_skb(dev, &cf);
++ if (unlikely(!skb))
++ return 0;
++
++ d_can_get_berr_counter(dev, &bec);
++ reg_err_counter = d_can_read(priv, D_CAN_ERRC);
++ rx_err_passive = (reg_err_counter & D_CAN_ERRC_RP_MASK) >>
++ D_CAN_ERRC_RP_SHIFT;
++
++ switch (error_type) {
++ case D_CAN_ERROR_WARNING:
++ /* error warning state */
++ priv->can.can_stats.error_warning++;
++ priv->can.state = CAN_STATE_ERROR_WARNING;
++ cf->can_id |= CAN_ERR_CRTL;
++ cf->data[1] = (bec.txerr > bec.rxerr) ?
++ CAN_ERR_CRTL_TX_WARNING :
++ CAN_ERR_CRTL_RX_WARNING;
++ cf->data[6] = bec.txerr;
++ cf->data[7] = bec.rxerr;
++
++ break;
++ case D_CAN_ERROR_PASSIVE:
++ /* error passive state */
++ priv->can.can_stats.error_passive++;
++ priv->can.state = CAN_STATE_ERROR_PASSIVE;
++ cf->can_id |= CAN_ERR_CRTL;
++ if (rx_err_passive)
++ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
++ if (bec.txerr > 127)
++ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
++
++ cf->data[6] = bec.txerr;
++ cf->data[7] = bec.rxerr;
++ break;
++ case D_CAN_BUS_OFF:
++ /* bus-off state */
++ priv->can.state = CAN_STATE_BUS_OFF;
++ cf->can_id |= CAN_ERR_BUSOFF;
++ /*
++ * disable all interrupts in bus-off mode to ensure that
++ * the CPU is not hogged down
++ */
++ d_can_interrupts(priv, DISABLE_ALL_INTERRUPTS);
++ can_bus_off(dev);
++ break;
++ default:
++ break;
++ }
++
++ netif_receive_skb(skb);
++ stats->rx_packets++;
++ stats->rx_bytes += cf->can_dlc;
++
++ return 1;
++}
++
++static int d_can_handle_bus_err(struct net_device *dev,
++ enum d_can_lec_type lec_type)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ struct can_frame *cf;
++ struct sk_buff *skb;
++
++ /*
++ * early exit if no lec update or no error.
++ * no lec update means that no CAN bus event has been detected
++ * since CPU wrote 0x7 value to status reg.
++ */
++ if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
++ return 0;
++
++ /* propagate the error condition to the CAN stack */
++ skb = alloc_can_err_skb(dev, &cf);
++ if (unlikely(!skb))
++ return 0;
++
++ /*
++ * check for 'last error code' which tells us the
++ * type of the last error to occur on the CAN bus
++ */
++
++ /* common for all type of bus errors */
++ priv->can.can_stats.bus_error++;
++ stats->rx_errors++;
++ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
++ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
++
++ switch (lec_type) {
++ case LEC_STUFF_ERROR:
++ netdev_dbg(dev, "stuff error\n");
++ cf->data[2] |= CAN_ERR_PROT_STUFF;
++ break;
++ case LEC_FORM_ERROR:
++ netdev_dbg(dev, "form error\n");
++ cf->data[2] |= CAN_ERR_PROT_FORM;
++ break;
++ case LEC_ACK_ERROR:
++ netdev_dbg(dev, "ack error\n");
++ cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
++ CAN_ERR_PROT_LOC_ACK_DEL);
++ break;
++ case LEC_BIT1_ERROR:
++ netdev_dbg(dev, "bit1 error\n");
++ cf->data[2] |= CAN_ERR_PROT_BIT1;
++ break;
++ case LEC_BIT0_ERROR:
++ netdev_dbg(dev, "bit0 error\n");
++ cf->data[2] |= CAN_ERR_PROT_BIT0;
++ break;
++ case LEC_CRC_ERROR:
++ netdev_dbg(dev, "CRC error\n");
++ cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
++ CAN_ERR_PROT_LOC_CRC_DEL);
++ break;
++ default:
++ break;
++ }
++
++ /* set a LEC value so that we can check for updates later */
++ d_can_write(priv, D_CAN_ES, LEC_UNUSED);
++
++ netif_receive_skb(skb);
++ stats->rx_packets++;
++ stats->rx_bytes += cf->can_dlc;
++
++ return 1;
++}
++
++static int d_can_poll(struct napi_struct *napi, int quota)
++{
++ int lec_type = 0;
++ int work_done = 0;
++ struct net_device *dev = napi->dev;
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ if (!priv->irqstatus)
++ goto end;
++
++ /* status events have the highest priority */
++ if (priv->irqstatus == STATUS_INTERRUPT) {
++ priv->current_status = d_can_read(priv, D_CAN_ES);
++
++ /* handle Tx/Rx events */
++ if (priv->current_status & D_CAN_ES_TXOK)
++ d_can_write(priv, D_CAN_ES,
++ priv->current_status & ~D_CAN_ES_TXOK);
++
++ if (priv->current_status & D_CAN_ES_RXOK)
++ d_can_write(priv, D_CAN_ES,
++ priv->current_status & ~D_CAN_ES_RXOK);
++
++ /* handle state changes */
++ if ((priv->current_status & D_CAN_ES_EWARN) &&
++ (!(priv->last_status & D_CAN_ES_EWARN))) {
++ netdev_dbg(dev, "entered error warning state\n");
++ work_done += d_can_handle_state_change(dev,
++ D_CAN_ERROR_WARNING);
++ }
++ if ((priv->current_status & D_CAN_ES_EPASS) &&
++ (!(priv->last_status & D_CAN_ES_EPASS))) {
++ netdev_dbg(dev, "entered error passive state\n");
++ work_done += d_can_handle_state_change(dev,
++ D_CAN_ERROR_PASSIVE);
++ }
++ if ((priv->current_status & D_CAN_ES_BOFF) &&
++ (!(priv->last_status & D_CAN_ES_BOFF))) {
++ netdev_dbg(dev, "entered bus off state\n");
++ work_done += d_can_handle_state_change(dev,
++ D_CAN_BUS_OFF);
++ }
++
++ /* handle bus recovery events */
++ if ((!(priv->current_status & D_CAN_ES_BOFF)) &&
++ (priv->last_status & D_CAN_ES_BOFF)) {
++ netdev_dbg(dev, "left bus off state\n");
++ priv->can.state = CAN_STATE_ERROR_ACTIVE;
++ }
++ if ((!(priv->current_status & D_CAN_ES_EPASS)) &&
++ (priv->last_status & D_CAN_ES_EPASS)) {
++ netdev_dbg(dev, "left error passive state\n");
++ priv->can.state = CAN_STATE_ERROR_ACTIVE;
++ }
++
++ priv->last_status = priv->current_status;
++
++ /* handle lec errors on the bus */
++ lec_type = d_can_has_handle_berr(priv);
++ if (lec_type)
++ work_done += d_can_handle_bus_err(dev, lec_type);
++ } else if ((priv->irqstatus >= D_CAN_MSG_OBJ_RX_FIRST) &&
++ (priv->irqstatus <= D_CAN_MSG_OBJ_RX_LAST)) {
++ /* handle events corresponding to receive message objects */
++ work_done += d_can_do_rx_poll(dev, (quota - work_done));
++ } else if ((priv->irqstatus >= D_CAN_MSG_OBJ_TX_FIRST) &&
++ (priv->irqstatus <= D_CAN_MSG_OBJ_TX_LAST)) {
++ /* handle events corresponding to transmit message objects */
++ d_can_do_tx(dev);
++ }
++
++end:
++ if (work_done < quota) {
++ napi_complete(napi);
++ /* enable all IRQs */
++ d_can_interrupts(priv, ENABLE_ALL_INTERRUPTS);
++ }
++
++ return work_done;
++}
++
++static irqreturn_t d_can_isr(int irq, void *dev_id)
++{
++ struct net_device *dev = (struct net_device *)dev_id;
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ priv->irqstatus = d_can_read(priv, D_CAN_INT);
++ if (!priv->irqstatus)
++ return IRQ_NONE;
++
++ /* disable all interrupts and schedule the NAPI */
++ d_can_interrupts(priv, DISABLE_ALL_INTERRUPTS);
++ napi_schedule(&priv->napi);
++
++ return IRQ_HANDLED;
++}
++
++static int d_can_open(struct net_device *ndev)
++{
++ int err;
++ struct d_can_priv *priv = netdev_priv(ndev);
++
++ /* Open common can device */
++ err = open_candev(ndev);
++ if (err) {
++ netdev_err(ndev, "open_candev() failed %d\n", err);
++ return err;
++ }
++
++ /* register interrupt handler for Message Object (MO)
++ * and Error + status change (ES) */
++ err = request_irq(ndev->irq, &d_can_isr, IRQF_SHARED, ndev->name,
++ ndev);
++ if (err) {
++ netdev_err(ndev, "failed to request MO_ES interrupt\n");
++ goto exit_close_candev;
++ }
++
++ /* register interrupt handler for only Message Object */
++ err = request_irq(priv->irq_obj, &d_can_isr, IRQF_SHARED, ndev->name,
++ ndev);
++ if (err) {
++ netdev_err(ndev, "failed to request MO interrupt\n");
++ goto exit_free_irq;
++ }
++
++ /* start the d_can controller */
++ d_can_start(ndev);
++
++ napi_enable(&priv->napi);
++ netif_start_queue(ndev);
++
++ priv->opened = true;
++ return 0;
++exit_free_irq:
++ free_irq(ndev->irq, ndev);
++exit_close_candev:
++ close_candev(ndev);
++
++ return err;
++}
++
++static int d_can_close(struct net_device *ndev)
++{
++ struct d_can_priv *priv = netdev_priv(ndev);
++
++ netif_stop_queue(ndev);
++ napi_disable(&priv->napi);
++ d_can_stop(ndev);
++ free_irq(ndev->irq, ndev);
++ free_irq(priv->irq_obj, ndev);
++ close_candev(ndev);
++ priv->opened = false;
++
++ return 0;
++}
++
++void d_can_reset_ram(struct d_can_priv *d_can, unsigned int instance,
++ unsigned int enable)
++{
++ if (d_can->ram_init)
++ d_can->ram_init(instance, enable);
++
++ /* Give some time delay for DCAN RAM initialization */
++ udelay(1);
++}
++EXPORT_SYMBOL_GPL(d_can_reset_ram);
++
++struct net_device *alloc_d_can_dev(int num_objs)
++{
++ struct net_device *dev;
++ struct d_can_priv *priv;
++
++ dev = alloc_candev(sizeof(struct d_can_priv), num_objs/2);
++ if (!dev)
++ return NULL;
++
++ priv = netdev_priv(dev);
++ netif_napi_add(dev, &priv->napi, d_can_poll, num_objs/2);
++
++ priv->dev = dev;
++ priv->can.bittiming_const = &d_can_bittiming_const;
++ priv->can.do_set_mode = d_can_set_mode;
++ priv->can.do_get_berr_counter = d_can_get_berr_counter;
++ priv->can.ctrlmode_supported = (CAN_CTRLMODE_LOOPBACK |
++ CAN_CTRLMODE_LISTENONLY |
++ CAN_CTRLMODE_BERR_REPORTING |
++ CAN_CTRLMODE_3_SAMPLES);
++
++ return dev;
++}
++EXPORT_SYMBOL_GPL(alloc_d_can_dev);
++
++#ifdef CONFIG_PM
++int d_can_power_down(struct d_can_priv *d_can)
++{
++ unsigned long time_out;
++ struct net_device *ndev = platform_get_drvdata(d_can->pdev);
++
++ d_can_set_bit(d_can, D_CAN_CTL, D_CAN_CTL_PDR);
++
++ /* Wait for the PDA bit to get set */
++ time_out = jiffies + msecs_to_jiffies(D_CAN_WAIT_COUNT);
++ while (!d_can_get_bit(d_can, D_CAN_ES, D_CAN_ES_PDA) &&
++ time_after(time_out, jiffies))
++ cpu_relax();
++
++ if (time_after(jiffies, time_out))
++ return -ETIMEDOUT;
++
++ if (d_can->opened)
++ d_can_stop(ndev);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(d_can_power_down);
++
++int d_can_power_up(struct d_can_priv *d_can)
++{
++ unsigned long time_out;
++ struct net_device *ndev = platform_get_drvdata(d_can->pdev);
++
++ d_can_clear_bit(d_can, D_CAN_CTL, D_CAN_CTL_PDR);
++ d_can_clear_bit(d_can, D_CAN_CTL, D_CAN_CTL_INIT);
++
++ /* Wait for the PDA bit to get clear */
++ time_out = jiffies + msecs_to_jiffies(D_CAN_WAIT_COUNT);
++ while (d_can_get_bit(d_can, D_CAN_ES, D_CAN_ES_PDA) &&
++ time_after(time_out, jiffies))
++ cpu_relax();
++
++ if (time_after(jiffies, time_out))
++ return -ETIMEDOUT;
++
++ if (d_can->opened)
++ d_can_start(ndev);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(d_can_power_up);
++#else
++#define d_can_power_down NULL
++#define d_can_power_up NULL
++#endif
++
++void free_d_can_dev(struct net_device *dev)
++{
++ free_candev(dev);
++}
++EXPORT_SYMBOL_GPL(free_d_can_dev);
++
++static const struct net_device_ops d_can_netdev_ops = {
++ .ndo_open = d_can_open,
++ .ndo_stop = d_can_close,
++ .ndo_start_xmit = d_can_start_xmit,
++};
++
++int register_d_can_dev(struct net_device *dev)
++{
++ /* we support local echo */
++ dev->flags |= IFF_ECHO;
++ dev->netdev_ops = &d_can_netdev_ops;
++
++ return register_candev(dev);
++}
++EXPORT_SYMBOL_GPL(register_d_can_dev);
++
++void unregister_d_can_dev(struct net_device *dev)
++{
++ struct d_can_priv *priv = netdev_priv(dev);
++
++ /* disable all interrupts */
++ d_can_interrupts(priv, DISABLE_ALL_INTERRUPTS);
++
++ unregister_candev(dev);
++}
++EXPORT_SYMBOL_GPL(unregister_d_can_dev);
++
++MODULE_AUTHOR("AnilKumar Ch <anilkumar@ti.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(D_CAN_VERSION);
++MODULE_DESCRIPTION(D_CAN_DRV_DESC);
+diff --git a/drivers/net/can/d_can/d_can.h b/drivers/net/can/d_can/d_can.h
+new file mode 100644
+index 0000000..5013421
+--- /dev/null
++++ b/drivers/net/can/d_can/d_can.h
+@@ -0,0 +1,72 @@
++/*
++ * CAN bus driver for Bosch D_CAN controller
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * Borrowed from C_CAN driver
++ * Copyright (C) 2010 ST Microelectronics
++ * - Bhupesh Sharma <bhupesh.sharma@st.com>
++ *
++ * Borrowed heavily from the C_CAN driver originally written by:
++ * Copyright (C) 2007
++ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
++ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
++ *
++ * Bosch D_CAN controller is compliant to CAN protocol version 2.0 part A and B.
++ * Bosch D_CAN user manual can be obtained from:
++ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/can/
++ * d_can_users_manual_111.pdf
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef D_CAN_H
++#define D_CAN_H
++
++#define D_CAN_DRV_NAME "d_can"
++#define D_CAN_VERSION "1.0"
++#define D_CAN_DRV_DESC "CAN bus driver for Bosch D_CAN controller " \
++ D_CAN_VERSION
++
++/* d_can private data structure */
++struct d_can_priv {
++ struct can_priv can; /* must be the first member */
++ struct napi_struct napi;
++ struct net_device *dev;
++ struct platform_device *pdev;
++ int current_status;
++ int last_status;
++ unsigned int irqstatus;
++ void __iomem *base;
++ u32 napi_weight;
++ struct clk *fck;
++ struct clk *ick;
++ unsigned int irq; /* device IRQ number, for all MO and ES */
++ unsigned int irq_obj; /* device IRQ number for only Msg Object */
++ unsigned int irq_parity; /* device IRQ number for parity error */
++ unsigned long irq_flags; /* for request_irq() */
++ unsigned int tx_next;
++ unsigned int tx_echo;
++ unsigned int rx_next;
++ bool opened;
++ void *priv; /* for board-specific data */
++ void (*ram_init) (unsigned int, unsigned int);
++};
++
++struct net_device *alloc_d_can_dev(int);
++void free_d_can_dev(struct net_device *dev);
++int d_can_power_up(struct d_can_priv *d_can);
++int d_can_power_down(struct d_can_priv *d_can);
++int register_d_can_dev(struct net_device *dev);
++void unregister_d_can_dev(struct net_device *dev);
++void d_can_reset_ram(struct d_can_priv *d_can, unsigned int instance,
++ unsigned int enable);
++
++#endif /* D_CAN_H */
+diff --git a/drivers/net/can/d_can/d_can_platform.c b/drivers/net/can/d_can/d_can_platform.c
+new file mode 100644
+index 0000000..7b8a62c
+--- /dev/null
++++ b/drivers/net/can/d_can/d_can_platform.c
+@@ -0,0 +1,288 @@
++/*
++ * Platform CAN bus driver for Bosch D_CAN controller
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * Borrowed from C_CAN driver
++ * Copyright (C) 2010 ST Microelectronics
++ * - Bhupesh Sharma <bhupesh.sharma@st.com>
++ *
++ * Borrowed heavily from the C_CAN driver originally written by:
++ * Copyright (C) 2007
++ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
++ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
++ *
++ * Bosch D_CAN controller is compliant to CAN protocol version 2.0 part A and B.
++ * Bosch D_CAN user manual can be obtained from:
++ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/can/
++ * d_can_users_manual_111.pdf
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++/*
++ * Your platform definitions should specify module ram offsets and interrupt
++ * number to use as follows:
++ *
++ * static struct d_can_platform_data am33xx_evm_d_can_pdata = {
++ * .num_of_msg_objs = 64,
++ * .dma_support = false,
++ * .ram_init = d_can_hw_raminit,
++ * };
++ *
++ * Please see include/linux/can/platform/d_can.h for description of
++ * above fields.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/netdevice.h>
++#include <linux/if_arp.h>
++#include <linux/if_ether.h>
++#include <linux/list.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/can/platform/d_can.h>
++#include <linux/clk.h>
++#include <linux/pm_runtime.h>
++#include <linux/slab.h>
++#include <linux/can/dev.h>
++
++#include "d_can.h"
++
++static int __devinit d_can_plat_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ void __iomem *addr;
++ struct net_device *ndev;
++ struct d_can_priv *priv;
++ struct resource *mem;
++ struct d_can_platform_data *pdata;
++ struct clk *fck;
++
++ pdata = pdev->dev.platform_data;
++ if (!pdata) {
++ dev_err(&pdev->dev, "No platform data\n");
++ goto exit;
++ }
++
++ /* allocate the d_can device */
++ ndev = alloc_d_can_dev(pdata->num_of_msg_objs);
++ if (!ndev) {
++ ret = -ENOMEM;
++ dev_err(&pdev->dev, "alloc_d_can_dev failed\n");
++ goto exit;
++ }
++
++ priv = netdev_priv(ndev);
++ fck = clk_get(&pdev->dev, "fck");
++ if (IS_ERR(fck)) {
++ dev_err(&pdev->dev, "fck is not found\n");
++ ret = -ENODEV;
++ goto exit_free_ndev;
++ }
++
++ /* get the platform data */
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!mem) {
++ ret = -ENODEV;
++ dev_err(&pdev->dev, "No mem resource\n");
++ goto exit_clk_put;
++ }
++
++ if (!request_mem_region(mem->start, resource_size(mem),
++ D_CAN_DRV_NAME)) {
++ dev_err(&pdev->dev, "resource unavailable\n");
++ ret = -EBUSY;
++ goto exit_clk_put;
++ }
++
++ addr = ioremap(mem->start, resource_size(mem));
++ if (!addr) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto exit_release_mem;
++ }
++
++ /* IRQ specific to Error and status & can be used for Message Object */
++ ndev->irq = platform_get_irq_byname(pdev, "d_can_ms");
++ if (!ndev->irq) {
++ dev_err(&pdev->dev, "No irq0 resource\n");
++ goto exit_iounmap;
++ }
++
++ /* IRQ specific for Message Object */
++ priv->irq_obj = platform_get_irq_byname(pdev, "d_can_mo");
++ if (!priv->irq_obj) {
++ dev_err(&pdev->dev, "No irq1 resource\n");
++ goto exit_iounmap;
++ }
++
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
++ priv->pdev = pdev;
++ priv->base = addr;
++ priv->can.clock.freq = clk_get_rate(fck);
++ priv->ram_init = pdata->ram_init;
++ priv->opened = false;
++
++ platform_set_drvdata(pdev, ndev);
++ SET_NETDEV_DEV(ndev, &pdev->dev);
++
++ ret = register_d_can_dev(ndev);
++ if (ret) {
++ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
++ D_CAN_DRV_NAME, ret);
++ goto exit_free_device;
++ }
++
++ /* Initialize DCAN RAM */
++ d_can_reset_ram(priv, pdev->id, 1);
++
++ dev_info(&pdev->dev, "device registered (irq=%d, irq_obj=%d)\n",
++ ndev->irq, priv->irq_obj);
++
++ return 0;
++
++exit_free_device:
++ platform_set_drvdata(pdev, NULL);
++ pm_runtime_disable(&pdev->dev);
++exit_iounmap:
++ iounmap(addr);
++exit_release_mem:
++ release_mem_region(mem->start, resource_size(mem));
++exit_clk_put:
++ clk_put(fck);
++exit_free_ndev:
++ free_d_can_dev(ndev);
++exit:
++ dev_err(&pdev->dev, "probe failed\n");
++
++ return ret;
++}
++
++static int __devexit d_can_plat_remove(struct platform_device *pdev)
++{
++ struct net_device *ndev = platform_get_drvdata(pdev);
++ struct d_can_priv *priv = netdev_priv(ndev);
++ struct resource *mem;
++
++ /* De-initialize DCAN RAM */
++ d_can_reset_ram(priv, pdev->id, 0);
++
++ unregister_d_can_dev(ndev);
++ platform_set_drvdata(pdev, NULL);
++
++ free_d_can_dev(ndev);
++ iounmap(priv->base);
++
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ release_mem_region(mem->start, resource_size(mem));
++
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int d_can_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ int ret;
++ struct net_device *ndev = platform_get_drvdata(pdev);
++ struct d_can_priv *priv = netdev_priv(ndev);
++
++ if (netif_running(ndev)) {
++ netif_stop_queue(ndev);
++ netif_device_detach(ndev);
++ }
++
++ ret = d_can_power_down(priv);
++ if (ret) {
++ dev_err(&pdev->dev, "Not entered power down mode\n");
++ return ret;
++ }
++
++ priv->can.state = CAN_STATE_SLEEPING;
++
++ /* De-initialize DCAN RAM */
++ d_can_reset_ram(priv, pdev->id, 0);
++
++ /* Disable the module */
++ pm_runtime_put_sync(&pdev->dev);
++
++ return 0;
++}
++
++static int d_can_resume(struct platform_device *pdev)
++{
++ int ret;
++
++ struct net_device *ndev = platform_get_drvdata(pdev);
++ struct d_can_priv *priv = netdev_priv(ndev);
++
++ /* Enable the module */
++ pm_runtime_get_sync(&pdev->dev);
++
++ /* Initialize DCAN RAM */
++ d_can_reset_ram(priv, pdev->id, 1);
++
++ ret = d_can_power_up(priv);
++ if (ret) {
++ dev_err(&pdev->dev, "Not came out from power down mode\n");
++ return ret;
++ }
++
++ priv->can.state = CAN_STATE_ERROR_ACTIVE;
++
++ if (netif_running(ndev)) {
++ netif_device_attach(ndev);
++ netif_start_queue(ndev);
++ }
++
++ return 0;
++}
++#else
++#define d_can_suspend NULL
++#define d_can_resume NULL
++#endif
++
++static struct platform_driver d_can_plat_driver = {
++ .driver = {
++ .name = D_CAN_DRV_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = d_can_plat_probe,
++ .remove = __devexit_p(d_can_plat_remove),
++ .suspend = d_can_suspend,
++ .resume = d_can_resume,
++};
++
++static int __init d_can_plat_init(void)
++{
++ printk(KERN_INFO D_CAN_DRV_DESC "\n");
++ return platform_driver_register(&d_can_plat_driver);
++}
++module_init(d_can_plat_init);
++
++static void __exit d_can_plat_exit(void)
++{
++ printk(KERN_INFO D_CAN_DRV_DESC " unloaded\n");
++ platform_driver_unregister(&d_can_plat_driver);
++}
++module_exit(d_can_plat_exit);
++
++MODULE_AUTHOR("AnilKumar Ch <anilkumar@ti.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(D_CAN_VERSION);
++MODULE_DESCRIPTION(D_CAN_DRV_DESC);
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 25695bd..bc1e87e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -392,7 +392,7 @@ void can_restart(unsigned long data)
+ stats->rx_bytes += cf->can_dlc;
+
+ restart:
+- dev_dbg(dev->dev.parent, "restarted\n");
++ netdev_dbg(dev, "restarted\n");
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+@@ -400,7 +400,7 @@ restart:
+
+ netif_carrier_on(dev);
+ if (err)
+- dev_err(dev->dev.parent, "Error %d during restart", err);
++ netdev_err(dev, "error %d during restart", err);
+ }
+
+ int can_restart_now(struct net_device *dev)
+@@ -433,7 +433,7 @@ void can_bus_off(struct net_device *dev)
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- dev_dbg(dev->dev.parent, "bus-off\n");
++ netdev_dbg(dev, "bus-off\n");
+
+ netif_carrier_off(dev);
+ priv->can_stats.bus_off++;
+@@ -545,7 +545,7 @@ int open_candev(struct net_device *dev)
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
+- dev_err(dev->dev.parent, "bit-timing not yet defined\n");
++ netdev_err(dev, "bit-timing not yet defined\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
+index 330140e..b3e231c 100644
+--- a/drivers/net/can/mcp251x.c
++++ b/drivers/net/can/mcp251x.c
+@@ -93,8 +93,9 @@
+ # define CANCTRL_REQOP_LOOPBACK 0x40
+ # define CANCTRL_REQOP_SLEEP 0x20
+ # define CANCTRL_REQOP_NORMAL 0x00
+-# define CANCTRL_OSM 0x08
+ # define CANCTRL_ABAT 0x10
++# define CANCTRL_OSM 0x08
++# define CANCTRL_CLKEN 0x04
+ #define TEC 0x1c
+ #define REC 0x1d
+ #define CNF1 0x2a
+@@ -287,7 +288,7 @@ static void mcp251x_clean(struct net_device *net)
+ /*
+ * Note about handling of error return of mcp251x_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+- * normal I/O assembler instructions, although it's much more
++ * normal I/O assembly instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+@@ -490,7 +491,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
+
+ static void mcp251x_hw_sleep(struct spi_device *spi)
+ {
+- mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
++// mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
+ }
+
+ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
+@@ -547,13 +548,16 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* Put device into loopback mode */
+- mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
++ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK | CANCTRL_CLKEN);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+- mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
++ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY | CANCTRL_CLKEN);
+ } else {
+ /* Put device into normal mode */
+- mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
++ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL | CANCTRL_CLKEN);
++
++ netdev_info(priv->net, "CANCTRL: 0x%02x\n",
++ mcp251x_read_reg(spi, CANCTRL));
+
+ /* Wait for the device to enter normal mode */
+ timeout = jiffies + HZ;
+@@ -585,11 +589,15 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
+ (bt->prop_seg - 1));
+ mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
+ (bt->phase_seg2 - 1));
+- dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
++
++ netdev_info(net, "CNF: 0x%02x 0x%02x 0x%02x\n",
+ mcp251x_read_reg(spi, CNF1),
+ mcp251x_read_reg(spi, CNF2),
+ mcp251x_read_reg(spi, CNF3));
+
++ netdev_info(net, "CANCTRL: 0x%02x\n",
++ mcp251x_read_reg(spi, CANCTRL));
++
+ return 0;
+ }
+
+@@ -600,6 +608,7 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
+
+ mcp251x_write_reg(spi, RXBCTRL(0),
+ RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
++
+ mcp251x_write_reg(spi, RXBCTRL(1),
+ RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ return 0;
+@@ -728,7 +737,9 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
+ mutex_lock(&priv->mcp_lock);
+ if (priv->tx_skb) {
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
++
+ mcp251x_clean(net);
++
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+@@ -827,21 +838,37 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+
+ /* Update can state */
+ if (eflag & EFLG_TXBO) {
++
++ netdev_err(net, "err: bus off\n");
++
+ new_state = CAN_STATE_BUS_OFF;
+ can_id |= CAN_ERR_BUSOFF;
+ } else if (eflag & EFLG_TXEP) {
++
++ netdev_err(net, "err: txep\n");
++
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_TX_PASSIVE;
++
+ } else if (eflag & EFLG_RXEP) {
++
++ netdev_err(net, "err: rxep\n");
++
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_PASSIVE;
+ } else if (eflag & EFLG_TXWAR) {
++
++ netdev_err(net, "err: txwar\n");
++
+ new_state = CAN_STATE_ERROR_WARNING;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_TX_WARNING;
+ } else if (eflag & EFLG_RXWAR) {
++
++ netdev_err(net, "err: rxwar\n");
++
+ new_state = CAN_STATE_ERROR_WARNING;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_WARNING;
+@@ -918,7 +945,7 @@ static int mcp251x_open(struct net_device *net)
+
+ ret = open_candev(net);
+ if (ret) {
+- dev_err(&spi->dev, "unable to set initial baudrate!\n");
++ netdev_err(net, "failed to open can device\n");
+ return ret;
+ }
+
+@@ -934,7 +961,7 @@ static int mcp251x_open(struct net_device *net)
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
+ if (ret) {
+- dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
++ netdev_err(net, "failed to acquire irq %d\n", spi->irq);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+ close_candev(net);
+@@ -1071,7 +1098,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
+
+ ret = register_candev(net);
+ if (!ret) {
+- dev_info(&spi->dev, "probed\n");
++ netdev_info(priv->net, "probed\n");
+ return ret;
+ }
+ error_probe:
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+index 8d5d55a..39b9de7 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -684,7 +684,7 @@ out:
+ /*
+ * Update our accounting state to incorporate the new Free List
+ * buffers, tell the hardware about them and return the number of
+- * bufers which we were able to allocate.
++ * buffers which we were able to allocate.
+ */
+ cred = fl->avail - cred;
+ fl->pend_cred += cred;
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index de76c70..2594c94 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -49,6 +49,27 @@ config TI_DAVINCI_CPDMA
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
++config TI_CPSW
++ tristate "TI CPSW Switch Support"
++ depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
++ select TI_DAVINCI_CPDMA
++ select TI_DAVINCI_MDIO
++ ---help---
++ This driver supports TI's CPSW Ethernet Switch.
++
++ To compile this driver as a module, choose M here: the module
++ will be called cpsw.
++
++config TLK110_WORKAROUND
++ tristate "TI TLK110 v1.0 PHY Workaround"
++ depends on TI_CPSW
++ help
++ This supports workaround for TLK110 rev 1.0 PHY.
++
++ This should be used when TLK110 rev 1.0 PHY is used. In case of
++ higher version of TLK110 PHY, this workaround is not required
++ and disable it.
++
+ config TLAN
+ tristate "TI ThunderLAN support"
+ depends on (PCI || EISA)
+diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
+index aedb3af..91bd8bb 100644
+--- a/drivers/net/ethernet/ti/Makefile
++++ b/drivers/net/ethernet/ti/Makefile
+@@ -7,3 +7,5 @@ obj-$(CONFIG_CPMAC) += cpmac.o
+ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+ obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
++obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
++ti_cpsw-y := cpsw_ale.o cpsw.o
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+new file mode 100644
+index 0000000..86fe57d
+--- /dev/null
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -0,0 +1,1378 @@
++/*
++ * Texas Instruments Ethernet Switch Driver
++ *
++ * Copyright (C) 2010 Texas Instruments
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/timer.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/if_ether.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <linux/phy.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++
++#include <linux/cpsw.h>
++#include <plat/dmtimer.h>
++#include "cpsw_ale.h"
++#include "davinci_cpdma.h"
++
++
++#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
++ NETIF_MSG_DRV | NETIF_MSG_LINK | \
++ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
++ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
++ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
++ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
++ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
++ NETIF_MSG_RX_STATUS)
++
++#define msg(level, type, format, ...) \
++do { \
++ if (netif_msg_##type(priv) && net_ratelimit()) \
++ dev_##level(priv->dev, format, ## __VA_ARGS__); \
++} while (0)
++
++#define CPDMA_RXTHRESH 0x0c0
++#define CPDMA_RXFREE 0x0e0
++#define CPDMA_TXHDP_VER1 0x100
++#define CPDMA_TXHDP_VER2 0x200
++#define CPDMA_RXHDP_VER1 0x120
++#define CPDMA_RXHDP_VER2 0x220
++#define CPDMA_TXCP_VER1 0x140
++#define CPDMA_TXCP_VER2 0x240
++#define CPDMA_RXCP_VER1 0x160
++#define CPDMA_RXCP_VER2 0x260
++
++#define CPSW_POLL_WEIGHT 64
++#define CPSW_MIN_PACKET_SIZE 60
++#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
++#define CPSW_PHY_SPEED 1000
++
++/* CPSW control module masks */
++#define CPSW_INTPACEEN (0x3 << 16)
++#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
++#define CPSW_CMINTMAX_CNT 63
++#define CPSW_CMINTMIN_CNT 2
++#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
++#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
++
++#define cpsw_enable_irq(priv) \
++ do { \
++ u32 i; \
++ for (i = 0; i < priv->num_irqs; i++) \
++ enable_irq(priv->irqs_table[i]); \
++ } while (0);
++#define cpsw_disable_irq(priv) \
++ do { \
++ u32 i; \
++ for (i = 0; i < priv->num_irqs; i++) \
++ disable_irq_nosync(priv->irqs_table[i]); \
++ } while (0);
++
++#define CPSW_CPDMA_EOI_REG 0x894
++#define CPSW_TIMER_MASK 0xA0908
++#define CPSW_TIMER_CAP_REG 0xFD0
++#define CPSW_RX_TIMER_REQ 5
++#define CPSW_TX_TIMER_REQ 6
++
++struct omap_dm_timer *dmtimer_rx;
++struct omap_dm_timer *dmtimer_tx;
++
++extern u32 omap_ctrl_readl(u16 offset);
++extern void omap_ctrl_writel(u32 val, u16 offset);
++
++static int debug_level;
++module_param(debug_level, int, 0);
++MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
++
++static int ale_ageout = 10;
++module_param(ale_ageout, int, 0);
++MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
++
++static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
++module_param(rx_packet_max, int, 0);
++MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
++
++struct cpsw_ss_regs {
++ u32 id_ver;
++ u32 soft_reset;
++ u32 control;
++ u32 int_control;
++ u32 rx_thresh_en;
++ u32 rx_en;
++ u32 tx_en;
++ u32 misc_en;
++ u32 mem_allign1[8];
++ u32 rx_thresh_stat;
++ u32 rx_stat;
++ u32 tx_stat;
++ u32 misc_stat;
++ u32 mem_allign2[8];
++ u32 rx_imax;
++ u32 tx_imax;
++};
++
++struct cpsw_regs {
++ u32 id_ver;
++ u32 control;
++ u32 soft_reset;
++ u32 stat_port_en;
++ u32 ptype;
++ u32 soft_idle;
++};
++
++struct cpsw_slave_regs {
++ u32 max_blks;
++ u32 blk_cnt;
++ u32 flow_thresh;
++ u32 port_vlan;
++ u32 tx_pri_map;
++ u32 ts_seq_mtype;
++#ifdef CONFIG_ARCH_TI814X
++ u32 ts_ctl;
++ u32 ts_seq_ltype;
++ u32 ts_vlan;
++#endif
++ u32 sa_lo;
++ u32 sa_hi;
++};
++
++struct cpsw_host_regs {
++ u32 max_blks;
++ u32 blk_cnt;
++ u32 flow_thresh;
++ u32 port_vlan;
++ u32 tx_pri_map;
++ u32 cpdma_tx_pri_map;
++ u32 cpdma_rx_chan_map;
++};
++
++struct cpsw_sliver_regs {
++ u32 id_ver;
++ u32 mac_control;
++ u32 mac_status;
++ u32 soft_reset;
++ u32 rx_maxlen;
++ u32 __reserved_0;
++ u32 rx_pause;
++ u32 tx_pause;
++ u32 __reserved_1;
++ u32 rx_pri_map;
++};
++
++struct cpsw_hw_stats {
++ u32 rxgoodframes;
++ u32 rxbroadcastframes;
++ u32 rxmulticastframes;
++ u32 rxpauseframes;
++ u32 rxcrcerrors;
++ u32 rxaligncodeerrors;
++ u32 rxoversizedframes;
++ u32 rxjabberframes;
++ u32 rxundersizedframes;
++ u32 rxfragments;
++ u32 __pad_0[2];
++ u32 rxoctets;
++ u32 txgoodframes;
++ u32 txbroadcastframes;
++ u32 txmulticastframes;
++ u32 txpauseframes;
++ u32 txdeferredframes;
++ u32 txcollisionframes;
++ u32 txsinglecollframes;
++ u32 txmultcollframes;
++ u32 txexcessivecollisions;
++ u32 txlatecollisions;
++ u32 txunderrun;
++ u32 txcarriersenseerrors;
++ u32 txoctets;
++ u32 octetframes64;
++ u32 octetframes65t127;
++ u32 octetframes128t255;
++ u32 octetframes256t511;
++ u32 octetframes512t1023;
++ u32 octetframes1024tup;
++ u32 netoctets;
++ u32 rxsofoverruns;
++ u32 rxmofoverruns;
++ u32 rxdmaoverruns;
++};
++
++struct cpsw_slave {
++ struct cpsw_slave_regs __iomem *regs;
++ struct cpsw_sliver_regs __iomem *sliver;
++ int slave_num;
++ u32 mac_control;
++ struct cpsw_slave_data *data;
++ struct phy_device *phy;
++};
++
++struct cpsw_priv {
++ spinlock_t lock;
++ struct platform_device *pdev;
++ struct net_device *ndev;
++ struct resource *cpsw_res;
++ struct resource *cpsw_ss_res;
++ struct napi_struct napi;
++#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
++ struct device *dev;
++ struct cpsw_platform_data data;
++ struct cpsw_regs __iomem *regs;
++ struct cpsw_ss_regs __iomem *ss_regs;
++ struct cpsw_hw_stats __iomem *hw_stats;
++ struct cpsw_host_regs __iomem *host_port_regs;
++ u32 msg_enable;
++ u32 coal_intvl;
++ u32 bus_freq_mhz;
++ struct net_device_stats stats;
++ int rx_packet_max;
++ int host_port;
++ struct clk *clk;
++ u8 mac_addr[ETH_ALEN];
++ struct cpsw_slave *slaves;
++#define for_each_slave(priv, func, arg...) \
++ do { \
++ int idx; \
++ for (idx = 0; idx < (priv)->data.slaves; idx++) \
++ (func)((priv)->slaves + idx, ##arg); \
++ } while (0)
++
++ struct cpdma_ctlr *dma;
++ struct cpdma_chan *txch, *rxch;
++ struct cpsw_ale *ale;
++
++ /* snapshot of IRQ numbers */
++ u32 irqs_table[4];
++ u32 num_irqs;
++
++};
++
++static int cpsw_set_coalesce(struct net_device *ndev,
++ struct ethtool_coalesce *coal);
++
++static void cpsw_intr_enable(struct cpsw_priv *priv)
++{
++ __raw_writel(0xFF, &priv->ss_regs->tx_en);
++ __raw_writel(0xFF, &priv->ss_regs->rx_en);
++
++ cpdma_ctlr_int_ctrl(priv->dma, true);
++ return;
++}
++
++static void cpsw_intr_disable(struct cpsw_priv *priv)
++{
++ __raw_writel(0, &priv->ss_regs->tx_en);
++ __raw_writel(0, &priv->ss_regs->rx_en);
++
++ cpdma_ctlr_int_ctrl(priv->dma, false);
++ return;
++}
++
++void cpsw_tx_handler(void *token, int len, int status)
++{
++ struct sk_buff *skb = token;
++ struct net_device *ndev = skb->dev;
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ if (unlikely(netif_queue_stopped(ndev)))
++ netif_start_queue(ndev);
++ priv->stats.tx_packets++;
++ priv->stats.tx_bytes += len;
++ dev_kfree_skb_any(skb);
++}
++
++void cpsw_rx_handler(void *token, int len, int status)
++{
++ struct sk_buff *skb = token;
++ struct net_device *ndev = skb->dev;
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int ret = 0;
++
++ if (unlikely(!netif_running(ndev)) ||
++ unlikely(!netif_carrier_ok(ndev))) {
++ dev_kfree_skb_any(skb);
++ return;
++ }
++
++ if (likely(status >= 0)) {
++ skb_put(skb, len);
++ skb->protocol = eth_type_trans(skb, ndev);
++ netif_receive_skb(skb);
++ priv->stats.rx_bytes += len;
++ priv->stats.rx_packets++;
++ skb = NULL;
++ }
++
++
++ if (unlikely(!netif_running(ndev))) {
++ if (skb)
++ dev_kfree_skb_any(skb);
++ return;
++ }
++
++ if (likely(!skb)) {
++ skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
++ if (WARN_ON(!skb))
++ return;
++
++ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
++ skb_tailroom(skb), GFP_KERNEL);
++ }
++
++ WARN_ON(ret < 0);
++
++}
++
++static void set_cpsw_dmtimer_clear(void)
++{
++ omap_dm_timer_write_status(dmtimer_rx, OMAP_TIMER_INT_CAPTURE);
++ omap_dm_timer_write_status(dmtimer_tx, OMAP_TIMER_INT_CAPTURE);
++
++ return;
++}
++
++static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
++{
++ struct cpsw_priv *priv = dev_id;
++
++ if (likely(netif_running(priv->ndev))) {
++ cpsw_intr_disable(priv);
++ cpsw_disable_irq(priv);
++ napi_schedule(&priv->napi);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int cpsw_poll(struct napi_struct *napi, int budget)
++{
++ struct cpsw_priv *priv = napi_to_priv(napi);
++ int num_tx, num_rx;
++
++ num_tx = cpdma_chan_process(priv->txch, 128);
++ num_rx = cpdma_chan_process(priv->rxch, budget);
++
++ if (num_rx || num_tx)
++ msg(dbg, intr, "poll %d rx, %d tx pkts\n", num_rx, num_tx);
++
++ if (num_rx < budget) {
++ napi_complete(napi);
++ cpdma_ctlr_eoi(priv->dma);
++ set_cpsw_dmtimer_clear();
++ cpsw_intr_enable(priv);
++ cpsw_enable_irq(priv);
++ }
++
++ return num_rx;
++}
++
++static inline void soft_reset(const char *module, void __iomem *reg)
++{
++ unsigned long timeout = jiffies + HZ;
++
++ __raw_writel(1, reg);
++ do {
++ cpu_relax();
++ } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
++
++ WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
++}
++
++#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
++ ((mac)[2] << 16) | ((mac)[3] << 24))
++#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
++
++static void cpsw_set_slave_mac(struct cpsw_slave *slave,
++ struct cpsw_priv *priv)
++{
++ __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
++ __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
++}
++
++static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
++{
++ if (priv->host_port == 0)
++ return slave_num + 1;
++ else
++ return slave_num;
++}
++
++static void _cpsw_adjust_link(struct cpsw_slave *slave,
++ struct cpsw_priv *priv, bool *link)
++{
++ struct phy_device *phy = slave->phy;
++ u32 mac_control = 0;
++ u32 slave_port;
++
++ if (!phy)
++ return;
++
++ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
++
++ if (phy->link) {
++ /* enable forwarding */
++ cpsw_ale_control_set(priv->ale, slave_port,
++ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
++
++ mac_control = priv->data.mac_control;
++ if (phy->speed == 10)
++ mac_control |= BIT(18); /* In Band mode */
++ if (phy->speed == 1000) {
++ mac_control |= BIT(7); /* Enable gigabit mode */
++ }
++ if (phy->speed == 100)
++ mac_control |= BIT(15);
++ if (phy->duplex)
++ mac_control |= BIT(0); /* FULLDUPLEXEN */
++ if (phy->interface == PHY_INTERFACE_MODE_RGMII) /* RGMII */
++ mac_control |= (BIT(15)|BIT(16));
++ *link = true;
++ } else {
++ cpsw_ale_control_set(priv->ale, slave_port,
++ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
++ mac_control = 0;
++ }
++
++ if (mac_control != slave->mac_control) {
++ phy_print_status(phy);
++ __raw_writel(mac_control, &slave->sliver->mac_control);
++ }
++
++ slave->mac_control = mac_control;
++}
++
++static void cpsw_adjust_link(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ bool link = false;
++
++ for_each_slave(priv, _cpsw_adjust_link, priv, &link);
++
++ if (link) {
++ netif_carrier_on(ndev);
++ if (netif_running(ndev))
++ netif_wake_queue(ndev);
++ } else {
++ netif_carrier_off(ndev);
++ netif_stop_queue(ndev);
++ }
++}
++
++static inline int __show_stat(char *buf, int maxlen, const char* name, u32 val)
++{
++ static char *leader = "........................................";
++
++ if (!val)
++ return 0;
++ else
++ return snprintf(buf, maxlen, "%s %s %10d\n", name,
++ leader + strlen(name), val);
++}
++
++static ssize_t cpsw_hw_stats_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct net_device *ndev = to_net_dev(dev);
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int len = 0;
++ struct cpdma_chan_stats dma_stats;
++
++#define show_stat(x) do { \
++ len += __show_stat(buf + len, SZ_4K - len, #x, \
++ __raw_readl(&priv->hw_stats->x)); \
++} while (0)
++
++#define show_dma_stat(x) do { \
++ len += __show_stat(buf + len, SZ_4K - len, #x, dma_stats.x); \
++} while (0)
++
++ len += snprintf(buf + len, SZ_4K - len, "CPSW Statistics:\n");
++ show_stat(rxgoodframes); show_stat(rxbroadcastframes);
++ show_stat(rxmulticastframes); show_stat(rxpauseframes);
++ show_stat(rxcrcerrors); show_stat(rxaligncodeerrors);
++ show_stat(rxoversizedframes); show_stat(rxjabberframes);
++ show_stat(rxundersizedframes); show_stat(rxfragments);
++ show_stat(rxoctets); show_stat(txgoodframes);
++ show_stat(txbroadcastframes); show_stat(txmulticastframes);
++ show_stat(txpauseframes); show_stat(txdeferredframes);
++ show_stat(txcollisionframes); show_stat(txsinglecollframes);
++ show_stat(txmultcollframes); show_stat(txexcessivecollisions);
++ show_stat(txlatecollisions); show_stat(txunderrun);
++ show_stat(txcarriersenseerrors); show_stat(txoctets);
++ show_stat(octetframes64); show_stat(octetframes65t127);
++ show_stat(octetframes128t255); show_stat(octetframes256t511);
++ show_stat(octetframes512t1023); show_stat(octetframes1024tup);
++ show_stat(netoctets); show_stat(rxsofoverruns);
++ show_stat(rxmofoverruns); show_stat(rxdmaoverruns);
++
++ cpdma_chan_get_stats(priv->rxch, &dma_stats);
++ len += snprintf(buf + len, SZ_4K - len, "\nRX DMA Statistics:\n");
++ show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue);
++ show_dma_stat(pad_enqueue); show_dma_stat(misqueued);
++ show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
++ show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
++ show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue);
++ show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue);
++
++ cpdma_chan_get_stats(priv->txch, &dma_stats);
++ len += snprintf(buf + len, SZ_4K - len, "\nTX DMA Statistics:\n");
++ show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue);
++ show_dma_stat(pad_enqueue); show_dma_stat(misqueued);
++ show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
++ show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
++ show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue);
++ show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue);
++
++ return len;
++}
++
++DEVICE_ATTR(hw_stats, S_IRUGO, cpsw_hw_stats_show, NULL);
++
++#define PHY_CONFIG_REG 22
++static void cpsw_set_phy_config(struct cpsw_priv *priv, struct phy_device *phy)
++{
++ struct cpsw_platform_data *pdata = priv->pdev->dev.platform_data;
++ struct mii_bus *miibus;
++ int phy_addr = 0;
++ u16 val = 0;
++ u16 tmp = 0;
++
++ if (!phy)
++ return;
++
++ miibus = phy->bus;
++
++ if (!miibus)
++ return;
++
++ phy_addr = phy->addr;
++
++ /* Disable 1 Gig mode support if it is not supported */
++ if (!pdata->gigabit_en)
++ phy->supported &= ~(SUPPORTED_1000baseT_Half |
++ SUPPORTED_1000baseT_Full);
++
++ /* Following lines enable gigbit advertisement capability even in case
++ * the advertisement is not enabled by default
++ */
++ val = miibus->read(miibus, phy_addr, MII_BMCR);
++ val |= (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_FULLDPLX);
++ miibus->write(miibus, phy_addr, MII_BMCR, val);
++ tmp = miibus->read(miibus, phy_addr, MII_BMCR);
++
++ /* Enable gigabit support only if the speed is 1000Mbps */
++ if (phy->speed == CPSW_PHY_SPEED) {
++ tmp = miibus->read(miibus, phy_addr, MII_BMSR);
++ if (tmp & 0x1) {
++ val = miibus->read(miibus, phy_addr, MII_CTRL1000);
++ val |= BIT(9);
++ miibus->write(miibus, phy_addr, MII_CTRL1000, val);
++ tmp = miibus->read(miibus, phy_addr, MII_CTRL1000);
++ }
++ }
++
++ val = miibus->read(miibus, phy_addr, MII_ADVERTISE);
++ val |= (ADVERTISE_10HALF | ADVERTISE_10FULL | \
++ ADVERTISE_100HALF | ADVERTISE_100FULL);
++ miibus->write(miibus, phy_addr, MII_ADVERTISE, val);
++ tmp = miibus->read(miibus, phy_addr, MII_ADVERTISE);
++
++ /* TODO : This check is required. This should be
++ * moved to a board init section as its specific
++ * to a phy.*/
++ if (phy->phy_id == 0x0282F014) {
++ /* This enables TX_CLK-ing in case of 10/100MBps operation */
++ val = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
++ val |= BIT(5);
++ miibus->write(miibus, phy_addr, PHY_CONFIG_REG, val);
++ tmp = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
++ }
++
++ return;
++}
++
++static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
++{
++ char name[32];
++ u32 slave_port;
++
++ sprintf(name, "slave-%d", slave->slave_num);
++
++ soft_reset(name, &slave->sliver->soft_reset);
++
++ /* setup priority mapping */
++ __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
++ __raw_writel(0x33221100, &slave->regs->tx_pri_map);
++
++ /* setup max packet size, and mac address */
++ __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
++ cpsw_set_slave_mac(slave, priv);
++
++ slave->mac_control = 0; /* no link yet */
++
++ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
++ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ 1 << slave_port);
++
++ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
++ &cpsw_adjust_link, 0, slave->data->phy_if);
++ if (IS_ERR(slave->phy)) {
++ msg(err, ifup, "phy %s not found on slave %d\n",
++ slave->data->phy_id, slave->slave_num);
++ slave->phy = NULL;
++ } else {
++ printk(KERN_ERR"\nCPSW phy found : id is : 0x%x\n",
++ slave->phy->phy_id);
++ cpsw_set_phy_config(priv, slave->phy);
++ phy_start(slave->phy);
++ }
++}
++
++static void cpsw_init_host_port(struct cpsw_priv *priv)
++{
++ /* soft reset the controller and initialize ale */
++ soft_reset("cpsw", &priv->regs->soft_reset);
++ cpsw_ale_start(priv->ale);
++
++ /* switch to vlan unaware mode */
++ cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
++
++ /* setup host port priority mapping */
++ __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
++ __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
++
++ cpsw_ale_control_set(priv->ale, priv->host_port,
++ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
++
++ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
++ 0);
++ /* ALE_SECURE); */
++ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
++ 1 << priv->host_port);
++}
++
++static int cpsw_ndo_open(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int i, ret;
++ u32 reg;
++
++ cpsw_intr_disable(priv);
++ netif_carrier_off(ndev);
++
++ ret = clk_enable(priv->clk);
++ if (ret < 0) {
++ dev_err(priv->dev, "unable to turn on device clock\n");
++ return ret;
++ }
++
++ ret = device_create_file(&ndev->dev, &dev_attr_hw_stats);
++ if (ret < 0) {
++ dev_err(priv->dev, "unable to add device attr\n");
++ return ret;
++ }
++
++ if (priv->data.phy_control)
++ (*priv->data.phy_control)(true);
++
++ reg = __raw_readl(&priv->regs->id_ver);
++
++ msg(info, ifup, "initializing cpsw version %d.%d (%d)\n",
++ (reg >> 8 & 0x7), reg & 0xff, (reg >> 11) & 0x1f);
++
++ /* initialize host and slave ports */
++ cpsw_init_host_port(priv);
++ for_each_slave(priv, cpsw_slave_open, priv);
++
++ /* setup tx dma to fixed prio and zero offset */
++ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
++ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
++
++ /* disable priority elevation and enable statistics on all ports */
++ __raw_writel(0, &priv->regs->ptype);
++
++ /* enable statistics collection only on the host port */
++ /* __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en); */
++ __raw_writel(0x7, &priv->regs->stat_port_en);
++
++ if (WARN_ON(!priv->data.rx_descs))
++ priv->data.rx_descs = 128;
++
++ for (i = 0; i < priv->data.rx_descs; i++) {
++ struct sk_buff *skb;
++
++ ret = -ENOMEM;
++ skb = netdev_alloc_skb_ip_align(priv->ndev,
++ priv->rx_packet_max);
++ if (!skb)
++ break;
++ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
++ skb_tailroom(skb), GFP_KERNEL);
++ if (WARN_ON(ret < 0))
++ break;
++ }
++ /* continue even if we didn't manage to submit all receive descs */
++ msg(info, ifup, "submitted %d rx descriptors\n", i);
++
++ /* Enable Interrupt pacing if configured */
++ if (priv->coal_intvl != 0) {
++ struct ethtool_coalesce coal;
++
++ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
++ cpsw_set_coalesce(ndev, &coal);
++ }
++
++ /* Enable Timer for capturing cpsw rx interrupts */
++ omap_dm_timer_set_int_enable(dmtimer_rx, OMAP_TIMER_INT_CAPTURE);
++ omap_dm_timer_set_capture(dmtimer_rx, 1, 0, 0);
++ omap_dm_timer_enable(dmtimer_rx);
++
++ /* Enable Timer for capturing cpsw tx interrupts */
++ omap_dm_timer_set_int_enable(dmtimer_tx, OMAP_TIMER_INT_CAPTURE);
++ omap_dm_timer_set_capture(dmtimer_tx, 1, 0, 0);
++ omap_dm_timer_enable(dmtimer_tx);
++
++ cpdma_ctlr_start(priv->dma);
++ cpsw_intr_enable(priv);
++ napi_enable(&priv->napi);
++ cpdma_ctlr_eoi(priv->dma);
++
++ return 0;
++}
++
++static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
++{
++ if (!slave->phy)
++ return;
++ phy_stop(slave->phy);
++ phy_disconnect(slave->phy);
++ slave->phy = NULL;
++}
++
++static int cpsw_ndo_stop(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ msg(info, ifdown, "shutting down cpsw device\n");
++ cpsw_intr_disable(priv);
++ cpdma_ctlr_int_ctrl(priv->dma, false);
++
++ omap_dm_timer_set_int_enable(dmtimer_rx, 0);
++ omap_dm_timer_set_int_enable(dmtimer_tx, 0);
++
++ netif_stop_queue(priv->ndev);
++ napi_disable(&priv->napi);
++ netif_carrier_off(priv->ndev);
++ cpdma_ctlr_stop(priv->dma);
++ cpsw_ale_stop(priv->ale);
++ device_remove_file(&ndev->dev, &dev_attr_hw_stats);
++ for_each_slave(priv, cpsw_slave_stop, priv);
++ if (priv->data.phy_control)
++ (*priv->data.phy_control)(false);
++ clk_disable(priv->clk);
++ return 0;
++}
++
++static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
++ struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ int ret;
++
++ ndev->trans_start = jiffies;
++
++ ret = skb_padto(skb, CPSW_MIN_PACKET_SIZE);
++ if (unlikely(ret < 0)) {
++ msg(err, tx_err, "packet pad failed");
++ goto fail;
++ }
++
++ ret = cpdma_chan_submit(priv->txch, skb, skb->data,
++ skb->len, GFP_KERNEL);
++ if (unlikely(ret != 0)) {
++ msg(err, tx_err, "desc submit failed");
++ goto fail;
++ }
++
++ return NETDEV_TX_OK;
++fail:
++ priv->stats.tx_dropped++;
++ netif_stop_queue(ndev);
++ return NETDEV_TX_BUSY;
++}
++
++static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
++{
++ /*
++ * The switch cannot operate in promiscuous mode without substantial
++ * headache. For promiscuous mode to work, we would need to put the
++ * ALE in bypass mode and route all traffic to the host port.
++ * Subsequently, the host will need to operate as a "bridge", learn,
++ * and flood as needed. For now, we simply complain here and
++ * do nothing about it :-)
++ */
++ if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
++ dev_err(&ndev->dev, "promiscuity ignored!\n");
++
++ /*
++ * The switch cannot filter multicast traffic unless it is configured
++ * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
++ * whole bunch of additional logic that this driver does not implement
++ * at present.
++ */
++ if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
++ dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
++}
++
++static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ struct sockaddr *addr = (struct sockaddr *)p;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port);
++
++ memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
++ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
++
++ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
++ 0);
++ /* ALE_SECURE); */
++ for_each_slave(priv, cpsw_set_slave_mac, priv);
++ return 0;
++}
++
++static void cpsw_ndo_tx_timeout(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ msg(err, tx_err, "transmit timeout, restarting dma");
++ priv->stats.tx_errors++;
++ cpsw_intr_disable(priv);
++ cpdma_ctlr_int_ctrl(priv->dma, false);
++ cpdma_chan_stop(priv->txch);
++ cpdma_chan_start(priv->txch);
++ cpdma_ctlr_int_ctrl(priv->dma, true);
++ cpsw_intr_enable(priv);
++ cpdma_ctlr_eoi(priv->dma);
++}
++
++static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ return &priv->stats;
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void cpsw_ndo_poll_controller(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ cpsw_intr_disable(priv);
++ cpdma_ctlr_int_ctrl(priv->dma, false);
++ cpsw_interrupt(ndev->irq, priv);
++ cpdma_ctlr_int_ctrl(priv->dma, true);
++ cpsw_intr_enable(priv);
++ cpdma_ctlr_eoi(priv->dma);
++}
++#endif
++
++/**
++ * cpsw_get_coalesce : Get interrupt coalesce settings for this device
++ * @ndev : CPSW network adapter
++ * @coal : ethtool coalesce settings structure
++ *
++ * Fetch the current interrupt coalesce settings
++ *
++ */
++static int cpsw_get_coalesce(struct net_device *ndev,
++ struct ethtool_coalesce *coal)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ coal->rx_coalesce_usecs = priv->coal_intvl;
++ return 0;
++}
++
++/**
++ * cpsw_set_coalesce : Set interrupt coalesce settings for this device
++ * @ndev : CPSW network adapter
++ * @coal : ethtool coalesce settings structure
++ *
++ * Set interrupt coalesce parameters
++ *
++ */
++static int cpsw_set_coalesce(struct net_device *ndev,
++ struct ethtool_coalesce *coal)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ u32 int_ctrl;
++ u32 num_interrupts = 0;
++ u32 prescale = 0;
++ u32 addnl_dvdr = 1;
++ u32 coal_intvl = 0;
++
++ if (!coal->rx_coalesce_usecs)
++ return -EINVAL;
++
++ coal_intvl = coal->rx_coalesce_usecs;
++
++ int_ctrl = __raw_readl(&priv->ss_regs->int_control);
++ prescale = priv->bus_freq_mhz * 4;
++
++ if (coal_intvl < CPSW_CMINTMIN_INTVL)
++ coal_intvl = CPSW_CMINTMIN_INTVL;
++
++ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
++ /*
++ * Interrupt pacer works with 4us Pulse, we can
++ * throttle further by dilating the 4us pulse.
++ */
++ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
++
++ if (addnl_dvdr > 1) {
++ prescale *= addnl_dvdr;
++ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
++ coal_intvl = (CPSW_CMINTMAX_INTVL
++ * addnl_dvdr);
++ } else {
++ addnl_dvdr = 1;
++ coal_intvl = CPSW_CMINTMAX_INTVL;
++ }
++ }
++
++ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
++
++ int_ctrl |= CPSW_INTPACEEN;
++ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
++ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
++ __raw_writel(int_ctrl, &priv->ss_regs->int_control);
++
++ __raw_writel(num_interrupts, &priv->ss_regs->rx_imax);
++ __raw_writel(num_interrupts, &priv->ss_regs->tx_imax);
++
++ printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
++ priv->coal_intvl = coal_intvl;
++
++ return 0;
++}
++
++static const struct net_device_ops cpsw_netdev_ops = {
++ .ndo_open = cpsw_ndo_open,
++ .ndo_stop = cpsw_ndo_stop,
++ .ndo_start_xmit = cpsw_ndo_start_xmit,
++ .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
++ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
++ .ndo_get_stats = cpsw_ndo_get_stats,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = cpsw_ndo_poll_controller,
++#endif
++};
++
++static void cpsw_get_drvinfo(struct net_device *ndev,
++ struct ethtool_drvinfo *info)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ strcpy(info->driver, "TI CPSW Driver v1.0");
++ strcpy(info->version, "1.0");
++ strcpy(info->bus_info, priv->pdev->name);
++}
++
++static u32 cpsw_get_msglevel(struct net_device *ndev)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ return priv->msg_enable;
++}
++
++static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
++{
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ priv->msg_enable = value;
++}
++
++static const struct ethtool_ops cpsw_ethtool_ops = {
++ .get_drvinfo = cpsw_get_drvinfo,
++ .get_msglevel = cpsw_get_msglevel,
++ .set_msglevel = cpsw_set_msglevel,
++ .get_link = ethtool_op_get_link,
++ .get_coalesce = cpsw_get_coalesce,
++ .set_coalesce = cpsw_set_coalesce,
++};
++
++static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
++{
++ void __iomem *regs = priv->regs;
++ int slave_num = slave->slave_num;
++ struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
++
++ slave->data = data;
++ slave->regs = regs + data->slave_reg_ofs;
++ slave->sliver = regs + data->sliver_reg_ofs;
++}
++
++static int __devinit cpsw_probe(struct platform_device *pdev)
++{
++ struct cpsw_platform_data *data = pdev->dev.platform_data;
++ struct net_device *ndev;
++ struct cpsw_priv *priv;
++ struct cpdma_params dma_params;
++ struct cpsw_ale_params ale_params;
++ void __iomem *regs;
++ int ret = 0, i, k = 0;
++
++ if (!data) {
++ pr_err("cpsw: platform data missing\n");
++ return -ENODEV;
++ }
++
++ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
++ if (!ndev) {
++ pr_err("cpsw: error allocating net_device\n");
++ return -ENOMEM;
++ }
++
++ platform_set_drvdata(pdev, ndev);
++ priv = netdev_priv(ndev);
++ spin_lock_init(&priv->lock);
++ priv->data = *data;
++ priv->pdev = pdev;
++ priv->ndev = ndev;
++ priv->dev = &ndev->dev;
++ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
++ priv->rx_packet_max = max(rx_packet_max, 128);
++
++ if (is_valid_ether_addr(data->mac_addr)) {
++ memcpy(priv->mac_addr, data->mac_addr, ETH_ALEN);
++ printk(KERN_INFO"Detected MACID=%x:%x:%x:%x:%x:%x\n",
++ priv->mac_addr[0], priv->mac_addr[1],
++ priv->mac_addr[2], priv->mac_addr[3],
++ priv->mac_addr[4], priv->mac_addr[5]);
++ } else {
++ random_ether_addr(priv->mac_addr);
++ printk(KERN_INFO"Random MACID=%x:%x:%x:%x:%x:%x\n",
++ priv->mac_addr[0], priv->mac_addr[1],
++ priv->mac_addr[2], priv->mac_addr[3],
++ priv->mac_addr[4], priv->mac_addr[5]);
++ }
++
++ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
++
++ priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
++ GFP_KERNEL);
++ if (!priv->slaves) {
++ dev_err(priv->dev, "failed to allocate slave ports\n");
++ ret = -EBUSY;
++ goto clean_ndev_ret;
++ }
++ for (i = 0; i < data->slaves; i++)
++ priv->slaves[i].slave_num = i;
++
++ priv->clk = clk_get(&pdev->dev, NULL);
++ if (IS_ERR(priv->clk))
++ dev_err(priv->dev, "failed to get device clock\n");
++
++ priv->coal_intvl = 0;
++ priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
++
++ priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!priv->cpsw_res) {
++ dev_err(priv->dev, "error getting i/o resource\n");
++ ret = -ENOENT;
++ goto clean_clk_ret;
++ }
++
++ if (!request_mem_region(priv->cpsw_res->start,
++ resource_size(priv->cpsw_res), ndev->name)) {
++ dev_err(priv->dev, "failed request i/o region\n");
++ ret = -ENXIO;
++ goto clean_clk_ret;
++ }
++
++ regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
++ if (!regs) {
++ dev_err(priv->dev, "unable to map i/o region\n");
++ goto clean_cpsw_iores_ret;
++ }
++ priv->regs = regs;
++ priv->host_port = data->host_port_num;
++ priv->host_port_regs = regs + data->host_port_reg_ofs;
++ priv->hw_stats = regs + data->hw_stats_reg_ofs;
++
++ priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ if (!priv->cpsw_ss_res) {
++ dev_err(priv->dev, "error getting i/o resource\n");
++ ret = -ENOENT;
++ goto clean_clk_ret;
++ }
++
++ if (!request_mem_region(priv->cpsw_ss_res->start,
++ resource_size(priv->cpsw_ss_res), ndev->name)) {
++ dev_err(priv->dev, "failed request i/o region\n");
++ ret = -ENXIO;
++ goto clean_clk_ret;
++ }
++
++ regs = ioremap(priv->cpsw_ss_res->start,
++ resource_size(priv->cpsw_ss_res));
++ if (!regs) {
++ dev_err(priv->dev, "unable to map i/o region\n");
++ goto clean_cpsw_ss_iores_ret;
++ }
++ priv->ss_regs = regs;
++
++
++ for_each_slave(priv, cpsw_slave_init, priv);
++
++ omap_ctrl_writel(CPSW_TIMER_MASK, CPSW_TIMER_CAP_REG);
++
++ dmtimer_rx = omap_dm_timer_request_specific(CPSW_RX_TIMER_REQ);
++ if (!dmtimer_rx) {
++ dev_err(priv->dev, "Error getting Rx Timer resource\n");
++ ret = -ENODEV;
++ goto clean_iomap_ret;
++ }
++ dmtimer_tx = omap_dm_timer_request_specific(CPSW_TX_TIMER_REQ);
++ if (!dmtimer_tx) {
++ dev_err(priv->dev, "Error getting Tx Timer resource\n");
++ ret = -ENODEV;
++ goto clean_timer_rx_ret;
++ }
++
++ memset(&dma_params, 0, sizeof(dma_params));
++ dma_params.dev = &pdev->dev;
++ dma_params.dmaregs = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs);
++ dma_params.rxthresh = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXTHRESH);
++ dma_params.rxfree = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXFREE);
++
++ if (data->version == CPSW_VERSION_2) {
++ dma_params.txhdp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_TXHDP_VER2);
++ dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXHDP_VER2);
++ dma_params.txcp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_TXCP_VER2);
++ dma_params.rxcp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXCP_VER2);
++ } else {
++ dma_params.txhdp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_TXHDP_VER1);
++ dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXHDP_VER1);
++ dma_params.txcp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_TXCP_VER1);
++ dma_params.rxcp = (void __iomem *)(((u32)priv->regs) +
++ data->cpdma_reg_ofs + CPDMA_RXCP_VER1);
++ }
++
++ dma_params.num_chan = data->channels;
++ dma_params.has_soft_reset = true;
++ dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
++ dma_params.desc_mem_size = data->bd_ram_size;
++ dma_params.desc_align = 16;
++ dma_params.has_ext_regs = true;
++ dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
++ (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
++ dma_params.desc_hw_addr = data->hw_ram_addr ?
++ data->hw_ram_addr : dma_params.desc_mem_phys ;
++
++ priv->dma = cpdma_ctlr_create(&dma_params);
++ if (!priv->dma) {
++ dev_err(priv->dev, "error initializing dma\n");
++ ret = -ENOMEM;
++ goto clean_timer_ret;
++ }
++
++ priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
++ cpsw_tx_handler);
++ priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
++ cpsw_rx_handler);
++
++ if (WARN_ON(!priv->txch || !priv->rxch)) {
++ dev_err(priv->dev, "error initializing dma channels\n");
++ ret = -ENOMEM;
++ goto clean_dma_ret;
++ }
++
++ memset(&ale_params, 0, sizeof(ale_params));
++ ale_params.dev = &ndev->dev;
++ ale_params.ale_regs = (void *)((u32)priv->regs) +
++ ((u32)data->ale_reg_ofs);
++ ale_params.ale_ageout = ale_ageout;
++ ale_params.ale_entries = data->ale_entries;
++ ale_params.ale_ports = data->slaves;
++
++ priv->ale = cpsw_ale_create(&ale_params);
++ if (!priv->ale) {
++ dev_err(priv->dev, "error initializing ale engine\n");
++ ret = -ENODEV;
++ goto clean_dma_ret;
++ }
++
++ while ((i = platform_get_irq(pdev, k)) >= 0) {
++ if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
++ dev_name(&pdev->dev), priv)) {
++ dev_err(priv->dev, "error attaching irq\n");
++ goto clean_ale_ret;
++ }
++ priv->irqs_table[k] = i;
++ priv->num_irqs = ++k;
++ }
++
++ ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
++
++ ndev->netdev_ops = &cpsw_netdev_ops;
++ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
++ netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
++
++ /* register the network device */
++ SET_NETDEV_DEV(ndev, &pdev->dev);
++ ret = register_netdev(ndev);
++ if (ret) {
++ dev_err(priv->dev, "error registering net device\n");
++ ret = -ENODEV;
++ goto clean_irq_ret;
++ }
++
++ msg(notice, probe, "initialized device (regs %x, irq %d)\n",
++ priv->cpsw_res->start, ndev->irq);
++
++ return 0;
++
++clean_irq_ret:
++ free_irq(ndev->irq, priv);
++clean_ale_ret:
++ cpsw_ale_destroy(priv->ale);
++clean_dma_ret:
++ cpdma_chan_destroy(priv->txch);
++ cpdma_chan_destroy(priv->rxch);
++ cpdma_ctlr_destroy(priv->dma);
++clean_timer_ret:
++ omap_dm_timer_free(dmtimer_tx);
++clean_timer_rx_ret:
++ omap_dm_timer_free(dmtimer_rx);
++clean_iomap_ret:
++ iounmap(priv->regs);
++clean_cpsw_ss_iores_ret:
++ release_mem_region(priv->cpsw_ss_res->start,
++ resource_size(priv->cpsw_ss_res));
++clean_cpsw_iores_ret:
++ release_mem_region(priv->cpsw_res->start,
++ resource_size(priv->cpsw_res));
++clean_clk_ret:
++ clk_put(priv->clk);
++ kfree(priv->slaves);
++clean_ndev_ret:
++ free_netdev(ndev);
++ return ret;
++}
++
++static int __devexit cpsw_remove(struct platform_device *pdev)
++{
++ struct net_device *ndev = platform_get_drvdata(pdev);
++ struct cpsw_priv *priv = netdev_priv(ndev);
++ u32 i;
++
++ msg(notice, probe, "removing device\n");
++ platform_set_drvdata(pdev, NULL);
++
++ omap_dm_timer_free(dmtimer_rx);
++ omap_dm_timer_free(dmtimer_tx);
++ for (i = 0; i < priv->num_irqs; i++)
++ free_irq(priv->irqs_table[i], priv);
++ cpsw_ale_destroy(priv->ale);
++ cpdma_chan_destroy(priv->txch);
++ cpdma_chan_destroy(priv->rxch);
++ cpdma_ctlr_destroy(priv->dma);
++ iounmap(priv->regs);
++ release_mem_region(priv->cpsw_res->start,
++ resource_size(priv->cpsw_res));
++ release_mem_region(priv->cpsw_ss_res->start,
++ resource_size(priv->cpsw_ss_res));
++ clk_put(priv->clk);
++ kfree(priv->slaves);
++ unregister_netdev(ndev);
++ free_netdev(ndev);
++
++ return 0;
++}
++
++static int cpsw_suspend(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct net_device *ndev = platform_get_drvdata(pdev);
++ struct cpsw_priv *priv = netdev_priv(ndev);
++
++ if (netif_running(ndev))
++ cpsw_ndo_stop(ndev);
++
++ soft_reset("cpsw", &priv->regs->soft_reset);
++ soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
++ soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
++ soft_reset("cpsw_ss", &priv->ss_regs->soft_reset);
++
++ return 0;
++}
++
++static int cpsw_resume(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct net_device *ndev = platform_get_drvdata(pdev);
++
++ if (netif_running(ndev))
++ cpsw_ndo_open(ndev);
++ return 0;
++}
++
++static const struct dev_pm_ops cpsw_pm_ops = {
++ .suspend = cpsw_suspend,
++ .resume = cpsw_resume,
++};
++
++static struct platform_driver cpsw_driver = {
++ .driver = {
++ .name = "cpsw",
++ .owner = THIS_MODULE,
++ .pm = &cpsw_pm_ops,
++ },
++ .probe = cpsw_probe,
++ .remove = __devexit_p(cpsw_remove),
++};
++
++static int __init cpsw_init(void)
++{
++ return platform_driver_register(&cpsw_driver);
++}
++late_initcall(cpsw_init);
++
++static void __exit cpsw_exit(void)
++{
++ platform_driver_unregister(&cpsw_driver);
++}
++module_exit(cpsw_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("TI CPSW Ethernet driver");
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+new file mode 100644
+index 0000000..9639c31
+--- /dev/null
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -0,0 +1,702 @@
++/*
++ * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
++ *
++ * Copyright (C) 2010 Texas Instruments
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/kernel.h>
++#include <linux/platform_device.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/stat.h>
++#include <linux/sysfs.h>
++#include <linux/export.h>
++#include <linux/module.h>
++
++#include "cpsw_ale.h"
++
++#define BITMASK(bits) (BIT(bits) - 1)
++#define ADDR_FMT_STR "%02x:%02x:%02x:%02x:%02x:%02x"
++#define ADDR_FMT_ARGS(addr) (addr)[0], (addr)[1], (addr)[2], \
++ (addr)[3], (addr)[4], (addr)[5]
++#define ALE_ENTRY_BITS 68
++#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
++
++/* ALE Registers */
++#define ALE_IDVER 0x00
++#define ALE_CONTROL 0x08
++#define ALE_PRESCALE 0x10
++#define ALE_UNKNOWNVLAN 0x18
++#define ALE_TABLE_CONTROL 0x20
++#define ALE_TABLE 0x34
++#define ALE_PORTCTL 0x40
++
++#define ALE_TABLE_WRITE BIT(31)
++
++#define ALE_TYPE_FREE 0
++#define ALE_TYPE_ADDR 1
++#define ALE_TYPE_VLAN 2
++#define ALE_TYPE_VLAN_ADDR 3
++
++#define ALE_UCAST_PERSISTANT 0
++#define ALE_UCAST_UNTOUCHED 1
++#define ALE_UCAST_OUI 2
++#define ALE_UCAST_TOUCHED 3
++
++#define ALE_MCAST_FWD 0
++#define ALE_MCAST_BLOCK_LEARN_FWD 1
++#define ALE_MCAST_FWD_LEARN 2
++#define ALE_MCAST_FWD_2 3
++
++/* the following remap params into members of cpsw_ale */
++#define ale_regs params.ale_regs
++#define ale_entries params.ale_entries
++#define ale_ports params.ale_ports
++
++static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
++{
++ int idx;
++
++ idx = start / 32;
++ start -= idx * 32;
++ idx = 2 - idx; /* flip */
++ return (ale_entry[idx] >> start) & BITMASK(bits);
++}
++
++static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
++ u32 value)
++{
++ int idx;
++
++ value &= BITMASK(bits);
++ idx = start / 32;
++ start -= idx * 32;
++ idx = 2 - idx; /* flip */
++ ale_entry[idx] &= ~(BITMASK(bits) << start);
++ ale_entry[idx] |= (value << start);
++}
++
++#define DEFINE_ALE_FIELD(name, start, bits) \
++static inline int cpsw_ale_get_##name(u32 *ale_entry) \
++{ \
++ return cpsw_ale_get_field(ale_entry, start, bits); \
++} \
++static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
++{ \
++ cpsw_ale_set_field(ale_entry, start, bits, value); \
++}
++
++DEFINE_ALE_FIELD(entry_type, 60, 2)
++DEFINE_ALE_FIELD(vlan_id, 48, 12)
++DEFINE_ALE_FIELD(mcast_state, 62, 2)
++DEFINE_ALE_FIELD(port_mask, 66, 3)
++DEFINE_ALE_FIELD(super, 65, 1)
++DEFINE_ALE_FIELD(ucast_type, 62, 2)
++DEFINE_ALE_FIELD(port_num, 66, 2)
++DEFINE_ALE_FIELD(blocked, 65, 1)
++DEFINE_ALE_FIELD(secure, 64, 1)
++DEFINE_ALE_FIELD(vlan_untag_force, 24, 3)
++DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3)
++DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3)
++DEFINE_ALE_FIELD(vlan_member_list, 0, 3)
++DEFINE_ALE_FIELD(mcast, 40, 1)
++
++/* The MAC address field in the ALE entry cannot be macroized as above */
++static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
++{
++ int i;
++
++ for (i = 0; i < 6; i++)
++ addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
++}
++
++static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
++{
++ int i;
++
++ for (i = 0; i < 6; i++)
++ cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
++}
++
++static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
++{
++ int i;
++
++ WARN_ON(idx > ale->ale_entries);
++
++ __raw_writel(idx, ale->ale_regs + ALE_TABLE_CONTROL);
++
++ for (i = 0; i < ALE_ENTRY_WORDS; i++)
++ ale_entry[i] = __raw_readl(ale->ale_regs + ALE_TABLE + 4 * i);
++
++ return idx;
++}
++
++static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
++{
++ int i;
++
++ WARN_ON(idx > ale->ale_entries);
++
++ for (i = 0; i < ALE_ENTRY_WORDS; i++)
++ __raw_writel(ale_entry[i], ale->ale_regs + ALE_TABLE + 4 * i);
++
++ __raw_writel(idx | ALE_TABLE_WRITE, ale->ale_regs + ALE_TABLE_CONTROL);
++
++ return idx;
++}
++
++static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8* addr)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ int type, idx;
++
++ for (idx = 0; idx < ale->ale_entries; idx++) {
++ u8 entry_addr[6];
++
++ cpsw_ale_read(ale, idx, ale_entry);
++ type = cpsw_ale_get_entry_type(ale_entry);
++ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
++ continue;
++ cpsw_ale_get_addr(ale_entry, entry_addr);
++ if (memcmp(entry_addr, addr, 6) == 0)
++ return idx;
++ }
++ return -ENOENT;
++}
++
++static int cpsw_ale_match_free(struct cpsw_ale *ale)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ int type, idx;
++
++ for (idx = 0; idx < ale->ale_entries; idx++) {
++ cpsw_ale_read(ale, idx, ale_entry);
++ type = cpsw_ale_get_entry_type(ale_entry);
++ if (type == ALE_TYPE_FREE)
++ return idx;
++ }
++ return -ENOENT;
++}
++
++static int cpsw_ale_find_ageable(struct cpsw_ale *ale)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ int type, idx;
++
++ for (idx = 0; idx < ale->ale_entries; idx++) {
++ cpsw_ale_read(ale, idx, ale_entry);
++ type = cpsw_ale_get_entry_type(ale_entry);
++ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
++ continue;
++ if (cpsw_ale_get_mcast(ale_entry))
++ continue;
++ type = cpsw_ale_get_ucast_type(ale_entry);
++ if (type != ALE_UCAST_PERSISTANT &&
++ type != ALE_UCAST_OUI)
++ return idx;
++ }
++ return -ENOENT;
++}
++
++static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
++ int port_mask)
++{
++ int mask;
++
++ mask = cpsw_ale_get_port_mask(ale_entry);
++ if ((mask & port_mask) == 0)
++ return; /* ports dont intersect, not interested */
++ mask &= ~port_mask;
++
++ /* free if only remaining port is host port */
++ if (mask == BIT(ale->ale_ports))
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
++ cpsw_ale_set_port_mask(ale_entry, mask);
++}
++
++static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
++ int port_mask)
++{
++ int port;
++
++ port = cpsw_ale_get_port_num(ale_entry);
++ if ((BIT(port) & port_mask) == 0)
++ return; /* ports dont intersect, not interested */
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++}
++
++int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ int ret, idx;
++
++ for (idx = 0; idx < ale->ale_entries; idx++) {
++ cpsw_ale_read(ale, idx, ale_entry);
++ ret = cpsw_ale_get_entry_type(ale_entry);
++ if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
++ continue;
++
++ if (cpsw_ale_get_mcast(ale_entry))
++ cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
++ else
++ cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ }
++ return 0;
++}
++
++static int cpsw_ale_dump_mcast(u32 *ale_entry, char *buf, int len)
++{
++ int outlen = 0;
++ static const char *str_mcast_state[] = {"f", "blf", "lf", "f"};
++ int mcast_state = cpsw_ale_get_mcast_state(ale_entry);
++ int port_mask = cpsw_ale_get_port_mask(ale_entry);
++ int super = cpsw_ale_get_super(ale_entry);
++
++ outlen += snprintf(buf + outlen, len - outlen,
++ "mcstate: %s(%d), ", str_mcast_state[mcast_state],
++ mcast_state);
++ outlen += snprintf(buf + outlen, len - outlen,
++ "port mask: %x, %ssuper\n", port_mask,
++ super ? "" : "no ");
++ return outlen;
++}
++
++static int cpsw_ale_dump_ucast(u32 *ale_entry, char *buf, int len)
++{
++ int outlen = 0;
++ static const char *str_ucast_type[] = {"persistant", "untouched",
++ "oui", "touched"};
++ int ucast_type = cpsw_ale_get_ucast_type(ale_entry);
++ int port_num = cpsw_ale_get_port_num(ale_entry);
++ int secure = cpsw_ale_get_secure(ale_entry);
++ int blocked = cpsw_ale_get_blocked(ale_entry);
++
++ outlen += snprintf(buf + outlen, len - outlen,
++ "uctype: %s(%d), ", str_ucast_type[ucast_type],
++ ucast_type);
++ outlen += snprintf(buf + outlen, len - outlen,
++ "port: %d%s%s\n", port_num, secure ? ", Secure" : "",
++ blocked ? ", Blocked" : "");
++ return outlen;
++}
++
++static int cpsw_ale_dump_entry(int idx, u32 *ale_entry, char *buf, int len)
++{
++ int type, outlen = 0;
++ u8 addr[6];
++ static const char *str_type[] = {"free", "addr", "vlan", "vlan+addr"};
++
++ type = cpsw_ale_get_entry_type(ale_entry);
++ if (type == ALE_TYPE_FREE)
++ return outlen;
++
++ if (idx >= 0) {
++ outlen += snprintf(buf + outlen, len - outlen,
++ "index %d, ", idx);
++ }
++
++ outlen += snprintf(buf + outlen, len - outlen, "raw: %08x %08x %08x, ",
++ ale_entry[0], ale_entry[1], ale_entry[2]);
++
++ outlen += snprintf(buf + outlen, len - outlen,
++ "type: %s(%d), ", str_type[type], type);
++
++ cpsw_ale_get_addr(ale_entry, addr);
++ outlen += snprintf(buf + outlen, len - outlen,
++ "addr: " ADDR_FMT_STR ", ", ADDR_FMT_ARGS(addr));
++
++ if (type == ALE_TYPE_VLAN || type == ALE_TYPE_VLAN_ADDR) {
++ outlen += snprintf(buf + outlen, len - outlen, "vlan: %d, ",
++ cpsw_ale_get_vlan_id(ale_entry));
++ }
++
++ outlen += cpsw_ale_get_mcast(ale_entry) ?
++ cpsw_ale_dump_mcast(ale_entry, buf + outlen, len - outlen) :
++ cpsw_ale_dump_ucast(ale_entry, buf + outlen, len - outlen);
++
++ return outlen;
++}
++
++int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx;
++
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
++ cpsw_ale_set_addr(ale_entry, addr);
++ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
++ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
++ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
++ cpsw_ale_set_port_num(ale_entry, port);
++
++ idx = cpsw_ale_match_addr(ale, addr);
++ if (idx < 0)
++ idx = cpsw_ale_match_free(ale);
++ if (idx < 0)
++ idx = cpsw_ale_find_ageable(ale);
++ if (idx < 0)
++ return -ENOMEM;
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
++
++int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx;
++
++ idx = cpsw_ale_match_addr(ale, addr);
++ if (idx < 0)
++ return -ENOENT;
++
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
++
++int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx, mask;
++
++ idx = cpsw_ale_match_addr(ale, addr);
++ if (idx >= 0)
++ cpsw_ale_read(ale, idx, ale_entry);
++
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
++ cpsw_ale_set_addr(ale_entry, addr);
++ cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
++
++ mask = cpsw_ale_get_port_mask(ale_entry);
++ port_mask |= mask;
++ cpsw_ale_set_port_mask(ale_entry, port_mask);
++
++ if (idx < 0)
++ idx = cpsw_ale_match_free(ale);
++ if (idx < 0)
++ idx = cpsw_ale_find_ageable(ale);
++ if (idx < 0)
++ return -ENOMEM;
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
++
++int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
++{
++ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
++ int idx, mask;
++
++ idx = cpsw_ale_match_addr(ale, addr);
++ if (idx < 0)
++ return -EINVAL;
++
++ cpsw_ale_read(ale, idx, ale_entry);
++ mask = cpsw_ale_get_port_mask(ale_entry);
++ port_mask = mask & ~port_mask;
++
++ if (port_mask == BIT(ale->ale_ports))
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
++ cpsw_ale_set_port_mask(ale_entry, port_mask);
++
++ cpsw_ale_write(ale, idx, ale_entry);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
++
++struct ale_control_info {
++ const char *name;
++ int offset, port_offset;
++ int shift, port_shift;
++ int bits;
++};
++
++#define CTRL_GLOBAL(name, bit) {#name, ALE_CONTROL, 0, bit, 0, 1}
++#define CTRL_UNK(name, bit) {#name, ALE_UNKNOWNVLAN, 0, bit, 1, 1}
++#define CTRL_PORTCTL(name, start, bits) {#name, ALE_PORTCTL, 4, start, 0, bits}
++
++static struct ale_control_info ale_controls[] = {
++ [ALE_ENABLE] = CTRL_GLOBAL(enable, 31),
++ [ALE_CLEAR] = CTRL_GLOBAL(clear, 30),
++ [ALE_AGEOUT] = CTRL_GLOBAL(ageout, 29),
++ [ALE_VLAN_NOLEARN] = CTRL_GLOBAL(vlan_nolearn, 7),
++ [ALE_NO_PORT_VLAN] = CTRL_GLOBAL(no_port_vlan, 6),
++ [ALE_OUI_DENY] = CTRL_GLOBAL(oui_deny, 5),
++ [ALE_BYPASS] = CTRL_GLOBAL(bypass, 4),
++ [ALE_RATE_LIMIT_TX] = CTRL_GLOBAL(rate_limit_tx, 3),
++ [ALE_VLAN_AWARE] = CTRL_GLOBAL(vlan_aware, 2),
++ [ALE_AUTH_ENABLE] = CTRL_GLOBAL(auth_enable, 1),
++ [ALE_RATE_LIMIT] = CTRL_GLOBAL(rate_limit, 0),
++
++ [ALE_PORT_STATE] = CTRL_PORTCTL(port_state, 0, 2),
++ [ALE_PORT_DROP_UNTAGGED] = CTRL_PORTCTL(drop_untagged, 2, 1),
++ [ALE_PORT_DROP_UNKNOWN_VLAN] = CTRL_PORTCTL(drop_unknown, 3, 1),
++ [ALE_PORT_NOLEARN] = CTRL_PORTCTL(nolearn, 4, 1),
++ [ALE_PORT_MCAST_LIMIT] = CTRL_PORTCTL(mcast_limit, 16, 8),
++ [ALE_PORT_BCAST_LIMIT] = CTRL_PORTCTL(bcast_limit, 24, 8),
++
++ [ALE_PORT_UNKNOWN_VLAN_MEMBER] = CTRL_UNK(unknown_vlan_member, 0),
++ [ALE_PORT_UNKNOWN_MCAST_FLOOD] = CTRL_UNK(unknown_mcast_flood, 8),
++ [ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = CTRL_UNK(unknown_reg_flood, 16),
++ [ALE_PORT_UNTAGGED_EGRESS] = CTRL_UNK(untagged_egress, 24),
++};
++
++int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
++ int value)
++{
++ struct ale_control_info *info = &ale_controls[control];
++ int offset, shift;
++ u32 tmp, mask;
++
++ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
++ return -EINVAL;
++
++ if (info->port_offset == 0 && info->port_shift == 0)
++ port = 0; /* global, port is a dont care */
++
++ if (port < 0 || port > ale->ale_ports)
++ return -EINVAL;
++
++ mask = BITMASK(info->bits);
++ if (value & ~mask)
++ return -EINVAL;
++
++ offset = info->offset + (port * info->port_offset);
++ shift = info->shift + (port * info->port_shift);
++
++ tmp = __raw_readl(ale->ale_regs + offset);
++ tmp = (tmp & ~(mask << shift)) | (value << shift);
++ __raw_writel(tmp, ale->ale_regs + offset);
++
++ {
++ volatile u32 dly = 10000;
++ while (dly--)
++ ;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
++
++int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
++{
++ struct ale_control_info *info = &ale_controls[control];
++ int offset, shift;
++ u32 tmp;
++
++ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
++ return -EINVAL;
++
++ if (info->port_offset == 0 && info->port_shift == 0)
++ port = 0; /* global, port is a dont care */
++
++ if (port < 0 || port > ale->ale_ports)
++ return -EINVAL;
++
++ offset = info->offset + (port * info->port_offset);
++ shift = info->shift + (port * info->port_shift);
++
++ tmp = __raw_readl(ale->ale_regs + offset) >> shift;
++ return tmp & BITMASK(info->bits);
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
++
++static ssize_t cpsw_ale_control_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int i, port, len = 0;
++ struct ale_control_info *info;
++ struct cpsw_ale *ale = control_attr_to_ale(attr);
++
++ for (i = 0, info = ale_controls; i < ALE_NUM_CONTROLS; i++, info++) {
++ /* global controls */
++ if (info->port_shift == 0 && info->port_offset == 0) {
++ len += snprintf(buf + len, SZ_4K - len,
++ "%s=%d\n", info->name,
++ cpsw_ale_control_get(ale, 0, i));
++ continue;
++ }
++ /* port specific controls */
++ for (port = 0; port < ale->ale_ports; port++) {
++ len += snprintf(buf + len, SZ_4K - len,
++ "%s.%d=%d\n", info->name, port,
++ cpsw_ale_control_get(ale, port, i));
++ }
++ }
++ return len;
++}
++
++static ssize_t cpsw_ale_control_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ char ctrl_str[33], *end;
++ int port = 0, value, len, ret, control;
++ struct cpsw_ale *ale = control_attr_to_ale(attr);
++
++ len = strcspn(buf, ".=");
++ if (len >= 32)
++ return -ENOMEM;
++ strncpy(ctrl_str, buf, len);
++ ctrl_str[len] = '\0';
++ buf += len;
++
++ if (*buf == '.') {
++ port = simple_strtoul(buf + 1, &end, 0);
++ buf = end;
++ }
++
++ if (*buf != '=')
++ return -EINVAL;
++
++ value = simple_strtoul(buf + 1, NULL, 0);
++
++ for (control = 0; control < ALE_NUM_CONTROLS; control++)
++ if (strcmp(ctrl_str, ale_controls[control].name) == 0)
++ break;
++
++ if (control >= ALE_NUM_CONTROLS)
++ return -ENOENT;
++
++ dev_dbg(ale->params.dev, "processing command %s.%d=%d\n",
++ ale_controls[control].name, port, value);
++
++ ret = cpsw_ale_control_set(ale, port, control, value);
++ if (ret < 0)
++ return ret;
++ return count;
++}
++
++DEVICE_ATTR(ale_control, S_IRUGO | S_IWUSR, cpsw_ale_control_show,
++ cpsw_ale_control_store);
++
++static ssize_t cpsw_ale_table_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int len = SZ_4K, outlen = 0, idx;
++ u32 ale_entry[ALE_ENTRY_WORDS];
++ struct cpsw_ale *ale = table_attr_to_ale(attr);
++
++ for (idx = 0; idx < ale->ale_entries; idx++) {
++ cpsw_ale_read(ale, idx, ale_entry);
++ outlen += cpsw_ale_dump_entry(idx, ale_entry, buf + outlen,
++ len - outlen);
++ }
++ return outlen;
++}
++DEVICE_ATTR(ale_table, S_IRUGO, cpsw_ale_table_show, NULL);
++
++static void cpsw_ale_timer(unsigned long arg)
++{
++ struct cpsw_ale *ale = (struct cpsw_ale *)arg;
++
++ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
++
++ if (ale->ageout) {
++ ale->timer.expires = jiffies + ale->ageout;
++ add_timer(&ale->timer);
++ }
++}
++
++int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
++{
++ del_timer_sync(&ale->timer);
++ ale->ageout = ageout * HZ;
++ if (ale->ageout) {
++ ale->timer.expires = jiffies + ale->ageout;
++ add_timer(&ale->timer);
++ }
++ return 0;
++}
++
++void cpsw_ale_start(struct cpsw_ale *ale)
++{
++ u32 rev;
++ int ret;
++
++ rev = __raw_readl(ale->ale_regs + ALE_IDVER);
++ dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
++ (rev >> 8) & 0xff, rev & 0xff);
++ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
++ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
++
++ ale->ale_control_attr = dev_attr_ale_control;
++ sysfs_attr_init(&ale->ale_control_attr.attr);
++ ret = device_create_file(ale->params.dev, &ale->ale_control_attr);
++ WARN_ON(ret < 0);
++
++ ale->ale_table_attr = dev_attr_ale_table;
++ sysfs_attr_init(&ale->ale_table_attr.attr);
++ ret = device_create_file(ale->params.dev, &ale->ale_table_attr);
++ WARN_ON(ret < 0);
++
++ init_timer(&ale->timer);
++ ale->timer.data = (unsigned long)ale;
++ ale->timer.function = cpsw_ale_timer;
++ if (ale->ageout) {
++ ale->timer.expires = jiffies + ale->ageout;
++ add_timer(&ale->timer);
++ }
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_start);
++
++void cpsw_ale_stop(struct cpsw_ale *ale)
++{
++ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
++ del_timer_sync(&ale->timer);
++ device_remove_file(ale->params.dev, &ale->ale_table_attr);
++ device_remove_file(ale->params.dev, &ale->ale_control_attr);
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_stop);
++
++struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
++{
++ struct cpsw_ale *ale;
++ int ret;
++
++ ret = -ENOMEM;
++ ale = kzalloc(sizeof(*ale), GFP_KERNEL);
++ if (WARN_ON(!ale))
++ return NULL;
++
++ ale->params = *params;
++ ale->ageout = ale->params.ale_ageout * HZ;
++
++ return ale;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_create);
++
++int cpsw_ale_destroy(struct cpsw_ale *ale)
++{
++ if (!ale)
++ return -EINVAL;
++ cpsw_ale_stop(ale);
++ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
++ kfree(ale);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
++
++MODULE_DESCRIPTION("Ethernet Switch Address Lookup Engine driver");
++MODULE_AUTHOR("Chandan Nath <chandan.nath@ti.com>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
+new file mode 100644
+index 0000000..f064a04a
+--- /dev/null
++++ b/drivers/net/ethernet/ti/cpsw_ale.h
+@@ -0,0 +1,93 @@
++/*
++ * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs
++ *
++ * Copyright (C) 2010 Texas Instruments
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#ifndef __TI_CPSW_ALE_H__
++#define __TI_CPSW_ALE_H__
++
++struct cpsw_ale_params {
++ struct device *dev;
++ void __iomem *ale_regs;
++ unsigned long ale_ageout; /* in secs */
++ unsigned long ale_entries;
++ unsigned long ale_ports;
++};
++
++struct cpsw_ale {
++ struct cpsw_ale_params params;
++ struct timer_list timer;
++ unsigned long ageout;
++ struct device_attribute ale_control_attr;
++#define control_attr_to_ale(attr) \
++ container_of(attr, struct cpsw_ale, ale_control_attr);
++ struct device_attribute ale_table_attr;
++#define table_attr_to_ale(attr) \
++ container_of(attr, struct cpsw_ale, ale_table_attr);
++};
++
++enum cpsw_ale_control {
++ /* global */
++ ALE_ENABLE,
++ ALE_CLEAR,
++ ALE_AGEOUT,
++ ALE_VLAN_NOLEARN,
++ ALE_NO_PORT_VLAN,
++ ALE_OUI_DENY,
++ ALE_BYPASS,
++ ALE_RATE_LIMIT_TX,
++ ALE_VLAN_AWARE,
++ ALE_AUTH_ENABLE,
++ ALE_RATE_LIMIT,
++ /* port controls */
++ ALE_PORT_STATE,
++ ALE_PORT_DROP_UNTAGGED,
++ ALE_PORT_DROP_UNKNOWN_VLAN,
++ ALE_PORT_NOLEARN,
++ ALE_PORT_UNKNOWN_VLAN_MEMBER,
++ ALE_PORT_UNKNOWN_MCAST_FLOOD,
++ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
++ ALE_PORT_UNTAGGED_EGRESS,
++ ALE_PORT_BCAST_LIMIT,
++ ALE_PORT_MCAST_LIMIT,
++ ALE_NUM_CONTROLS,
++};
++
++enum cpsw_ale_port_state {
++ ALE_PORT_STATE_DISABLE = 0x00,
++ ALE_PORT_STATE_BLOCK = 0x01,
++ ALE_PORT_STATE_LEARN = 0x02,
++ ALE_PORT_STATE_FORWARD = 0x03,
++};
++
++/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
++#define ALE_SECURE 1
++#define ALE_BLOCKED 2
++
++struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
++int cpsw_ale_destroy(struct cpsw_ale *ale);
++
++void cpsw_ale_start(struct cpsw_ale *ale);
++void cpsw_ale_stop(struct cpsw_ale *ale);
++
++int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
++int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
++int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
++int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
++int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
++int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
++
++int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
++int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
++ int control, int value);
++
++#endif
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
+index c97d2f5..306d930 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.c
++++ b/drivers/net/ethernet/ti/davinci_cpdma.c
+@@ -19,6 +19,7 @@
+ #include <linux/err.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
++#include <linux/export.h>
+
+ #include "davinci_cpdma.h"
+
+@@ -276,6 +277,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
+
+ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+ {
+@@ -321,6 +323,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
+
+ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+ {
+@@ -348,9 +351,21 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
++ if (ctlr->params.has_soft_reset) {
++ unsigned long timeout = jiffies + HZ/10;
++
++ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
++ while (time_before(jiffies, timeout)) {
++ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
++ break;
++ }
++ WARN_ON(!time_before(jiffies, timeout));
++ }
++
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
+
+ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+ {
+@@ -444,6 +459,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+ kfree(ctlr);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
+
+ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+ {
+@@ -467,11 +483,15 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
+
+ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+ {
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
++ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 1);
++ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 2);
+ }
++EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
+
+ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+@@ -528,6 +548,7 @@ err_chan_busy:
+ err_chan_alloc:
+ return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_create);
+
+ int cpdma_chan_destroy(struct cpdma_chan *chan)
+ {
+@@ -545,6 +566,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
+ kfree(chan);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
+
+ int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+@@ -557,6 +579,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
+
+ int cpdma_chan_dump(struct cpdma_chan *chan)
+ {
+@@ -693,6 +716,7 @@ unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+
+ static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+@@ -720,9 +744,6 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+@@ -751,13 +772,10 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+- spin_unlock_irqrestore(&chan->lock, flags);
+-
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+ unlock_ret:
+- spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+ }
+
+@@ -776,6 +794,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+ }
+ return used;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_process);
+
+ int cpdma_chan_start(struct cpdma_chan *chan)
+ {
+@@ -803,6 +822,7 @@ int cpdma_chan_start(struct cpdma_chan *chan)
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_start);
+
+ int cpdma_chan_stop(struct cpdma_chan *chan)
+ {
+@@ -863,6 +883,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(cpdma_chan_stop);
+
+ int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+ {
+@@ -934,6 +955,7 @@ unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(cpdma_control_get);
+
+ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+ {
+@@ -970,3 +992,4 @@ unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(cpdma_control_set);
+diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
+index 7615040..1f14be6 100644
+--- a/drivers/net/ethernet/ti/davinci_mdio.c
++++ b/drivers/net/ethernet/ti/davinci_mdio.c
+@@ -48,6 +48,10 @@
+
+ #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
++#define CPGMAC_CLK_CTRL_REG 0x44E00014
++#define CPGMAC_CLK_SYSC 0x4A101208
++#define CPSW_NO_IDLE_NO_STDBY 0xA
++
+ struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+@@ -402,6 +406,33 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static inline int wait_for_clock_enable(struct davinci_mdio_data *data)
++{
++ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
++ u32 __iomem *cpgmac_clk = ioremap(CPGMAC_CLK_CTRL_REG, 4);
++ u32 __iomem *cpgmac_sysc = ioremap(CPGMAC_CLK_SYSC, 4);
++ u32 reg = 0;
++
++ while (time_after(timeout, jiffies)) {
++ reg = readl(cpgmac_clk);
++ if ((reg & 0x30000) == 0) {
++ writel(CPSW_NO_IDLE_NO_STDBY, cpgmac_sysc);
++ goto iounmap_ret;
++ }
++ }
++ dev_err(data->dev,
++ "timed out waiting for CPGMAC clock enable, value = 0x%x\n",
++ reg);
++ iounmap(cpgmac_sysc);
++ iounmap(cpgmac_clk);
++ return -ETIMEDOUT;
++
++iounmap_ret:
++ iounmap(cpgmac_sysc);
++ iounmap(cpgmac_clk);
++ return 0;
++}
++
+ static int davinci_mdio_suspend(struct device *dev)
+ {
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+@@ -433,12 +464,15 @@ static int davinci_mdio_resume(struct device *dev)
+ if (data->clk)
+ clk_enable(data->clk);
+
++ /* Need to wait till Module is enabled */
++ wait_for_clock_enable(data);
++
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+-
+ data->suspended = false;
++
+ spin_unlock(&data->lock);
+
+ return 0;
+diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
+index 3fe388b..f3162eb 100644
+--- a/drivers/net/wireless/wl12xx/Kconfig
++++ b/drivers/net/wireless/wl12xx/Kconfig
+@@ -54,5 +54,5 @@ config WL12XX_SDIO_TEST
+
+ config WL12XX_PLATFORM_DATA
+ bool
+- depends on WL12XX_SDIO != n || WL1251_SDIO != n
+ default y
++ select WIRELESS_EXT
+diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
+new file mode 100644
+index 0000000..73c0db0
+--- /dev/null
++++ b/drivers/pwm/Kconfig
+@@ -0,0 +1,29 @@
++#
++# PWM infrastructure and devices
++#
++
++menuconfig GENERIC_PWM
++ tristate "PWM Support"
++ default n
++ help
++ Enables PWM device support implemented via a generic
++ framework. If unsure, say N.
++
++config DAVINCI_EHRPWM
++ bool "Davinci eHRPWM support"
++ select HAVE_PWM
++ depends on GENERIC_PWM && (ARCH_DAVINCI_DA850 || SOC_OMAPAM33XX)
++ help
++ This option enables support for eHRPWM driver. If
++ unsure, say N.
++
++config ECAP_PWM
++ tristate "eCAP PWM support"
++ select HAVE_PWM
++ depends on GENERIC_PWM && (ARCH_DAVINCI_DA850 || SOC_OMAPAM33XX)
++ help
++ This option enables device driver support for eCAP module found
++ on DA8xx Processors & AM335x processor. eCAP module is used to
++ generate wide range of PWM waveforms. Maximum frequency generated
++ is equal to half the system clock frequency.
++ Say Y to enable the eCAP support. If unsure, say N.
+diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
+new file mode 100644
+index 0000000..e888fad
+--- /dev/null
++++ b/drivers/pwm/Makefile
+@@ -0,0 +1,8 @@
++#
++# Makefile for pwm devices
++#
++obj-$(CONFIG_GENERIC_PWM) := pwm.o
++
++obj-$(CONFIG_DAVINCI_EHRPWM) += ehrpwm.o
++
++obj-$(CONFIG_ECAP_PWM) += ecap.o
+diff --git a/drivers/pwm/ecap.c b/drivers/pwm/ecap.c
+new file mode 100644
+index 0000000..63c2405
+--- /dev/null
++++ b/drivers/pwm/ecap.c
+@@ -0,0 +1,461 @@
++/*
++ * eCAP driver for PWM output generation
++ *
++ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/pwm/pwm.h>
++#include <linux/slab.h>
++#include <linux/pm_runtime.h>
++
++#include <plat/clock.h>
++#include <plat/config_pwm.h>
++
++#define TIMER_CTR_REG 0x0
++#define CAPTURE_1_REG 0x08
++#define CAPTURE_2_REG 0x0c
++#define CAPTURE_3_REG 0x10
++#define CAPTURE_4_REG 0x14
++#define CAPTURE_CTRL2_REG 0x2A
++
++#define ECTRL2_SYNCOSEL_MASK (0x03 << 6)
++
++#define ECTRL2_MDSL_ECAP BIT(9)
++#define ECTRL2_CTRSTP_FREERUN BIT(4)
++#define ECTRL2_PLSL_LOW BIT(10)
++#define ECTRL2_SYNC_EN BIT(5)
++
++struct ecap_regs {
++ unsigned tsctr;
++ unsigned cap1;
++ unsigned cap2;
++ unsigned cap3;
++ unsigned cap4;
++ unsigned short ecctl2;
++ unsigned short clkconfig;
++};
++
++struct ecap_pwm {
++ struct pwm_device pwm;
++ struct pwm_device_ops ops;
++ spinlock_t lock;
++ struct clk *clk;
++ void __iomem *mmio_base;
++ u8 version;
++ void __iomem *config_mem_base;
++ struct device *dev;
++ struct ecap_regs ctx;
++};
++
++static inline struct ecap_pwm *to_ecap_pwm(const struct pwm_device *p)
++{
++ return pwm_get_drvdata(p);
++}
++
++static int ecap_pwm_stop(struct pwm_device *p)
++{
++ unsigned long flags, v;
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ /* Trying to stop a non-running PWM, not allowed */
++ if (!pwm_is_running(p))
++ return -EPERM;
++
++ spin_lock_irqsave(&ep->lock, flags);
++ v = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ v &= ~ECTRL2_CTRSTP_FREERUN;
++ writew(v, ep->mmio_base + CAPTURE_CTRL2_REG);
++ spin_unlock_irqrestore(&ep->lock, flags);
++
++ /* For PWM clock should be disabled on stop */
++ pm_runtime_put_sync(ep->dev);
++ clear_bit(FLAG_RUNNING, &p->flags);
++
++ return 0;
++}
++
++static int ecap_pwm_start(struct pwm_device *p)
++{
++ int ret = 0;
++ unsigned long flags, v;
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ /* Trying to start a running PWM, not allowed */
++ if (pwm_is_running(p))
++ return -EPERM;
++
++ /* For PWM clock should be enabled on start */
++ pm_runtime_get_sync(ep->dev);
++
++ spin_lock_irqsave(&ep->lock, flags);
++ v = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ v |= ECTRL2_CTRSTP_FREERUN;
++ writew(v, ep->mmio_base + CAPTURE_CTRL2_REG);
++ spin_unlock_irqrestore(&ep->lock, flags);
++ set_bit(FLAG_RUNNING, &p->flags);
++
++ return ret;
++}
++
++static int ecap_pwm_set_polarity(struct pwm_device *p, char pol)
++{
++ unsigned long flags, v;
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ pm_runtime_get_sync(ep->dev);
++
++ spin_lock_irqsave(&ep->lock, flags);
++ v = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ v &= ~ECTRL2_PLSL_LOW;
++ v |= (!pol << 10);
++ writew(v, ep->mmio_base + CAPTURE_CTRL2_REG);
++ spin_unlock_irqrestore(&ep->lock, flags);
++
++ pm_runtime_put_sync(ep->dev);
++ return 0;
++}
++
++static int ecap_pwm_config_period(struct pwm_device *p)
++{
++ unsigned long flags, v;
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ pm_runtime_get_sync(ep->dev);
++
++ spin_lock_irqsave(&ep->lock, flags);
++ writel((p->period_ticks) - 1, ep->mmio_base + CAPTURE_3_REG);
++ v = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ v |= (ECTRL2_MDSL_ECAP | ECTRL2_SYNCOSEL_MASK);
++ writew(v, ep->mmio_base + CAPTURE_CTRL2_REG);
++ spin_unlock_irqrestore(&ep->lock, flags);
++
++ pm_runtime_put_sync(ep->dev);
++ return 0;
++}
++
++static int ecap_pwm_config_duty(struct pwm_device *p)
++{
++ unsigned long flags, v;
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ pm_runtime_get_sync(ep->dev);
++
++ spin_lock_irqsave(&ep->lock, flags);
++ v = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ v |= (ECTRL2_MDSL_ECAP | ECTRL2_SYNCOSEL_MASK);
++ writew(v, ep->mmio_base + CAPTURE_CTRL2_REG);
++
++ if (p->duty_ticks > 0) {
++ writel(p->duty_ticks, ep->mmio_base + CAPTURE_4_REG);
++ } else {
++ writel(p->duty_ticks, ep->mmio_base + CAPTURE_2_REG);
++ writel(0, ep->mmio_base + TIMER_CTR_REG);
++ }
++ spin_unlock_irqrestore(&ep->lock, flags);
++
++ pm_runtime_put_sync(ep->dev);
++ return 0;
++}
++
++static int ecap_pwm_config(struct pwm_device *p,
++ struct pwm_config *c)
++{
++ int ret = 0;
++ switch (c->config_mask) {
++
++ case BIT(PWM_CONFIG_DUTY_TICKS):
++ p->duty_ticks = c->duty_ticks;
++ ret = ecap_pwm_config_duty(p);
++ break;
++
++ case BIT(PWM_CONFIG_PERIOD_TICKS):
++ p->period_ticks = c->period_ticks;
++ ret = ecap_pwm_config_period(p);
++ break;
++
++ case BIT(PWM_CONFIG_POLARITY):
++ ret = ecap_pwm_set_polarity(p, c->polarity);
++ break;
++
++ case BIT(PWM_CONFIG_START):
++ ret = ecap_pwm_start(p);
++ break;
++
++ case BIT(PWM_CONFIG_STOP):
++ ret = ecap_pwm_stop(p);
++ break;
++ }
++
++ return ret;
++}
++
++static int ecap_pwm_request(struct pwm_device *p)
++{
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++
++ p->tick_hz = clk_get_rate(ep->clk);
++ return 0;
++}
++
++static int ecap_frequency_transition_cb(struct pwm_device *p)
++{
++ struct ecap_pwm *ep = to_ecap_pwm(p);
++ unsigned long duty_ns, rate;
++
++ rate = clk_get_rate(ep->clk);
++ if (rate == p->tick_hz)
++ return 0;
++ p->tick_hz = rate;
++
++ duty_ns = p->duty_ns;
++ if (pwm_is_running(p)) {
++ pwm_stop(p);
++ pwm_set_duty_ns(p, 0);
++ pwm_set_period_ns(p, p->period_ns);
++ pwm_set_duty_ns(p, duty_ns);
++ pwm_start(p);
++ } else {
++ pwm_set_duty_ns(p, 0);
++ pwm_set_period_ns(p, p->period_ns);
++ pwm_set_duty_ns(p, duty_ns);
++ }
++ return 0;
++}
++
++static int ecap_probe(struct platform_device *pdev)
++{
++ struct ecap_pwm *ep = NULL;
++ struct resource *r;
++ int ret = 0;
++ int val;
++ char con_id[PWM_CON_ID_STRING_LENGTH] = "epwmss";
++ struct pwmss_platform_data *pdata = (&pdev->dev)->platform_data;
++
++ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
++
++ if (!ep) {
++ dev_err(&pdev->dev, "failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ ep->version = pdata->version;
++
++ if (ep->version == PWM_VERSION_1) {
++ sprintf(con_id, "%s%d_%s", con_id, pdev->id, "fck");
++ ep->clk = clk_get(&pdev->dev, con_id);
++ } else
++ ep->clk = clk_get(&pdev->dev, "ecap");
++
++ pm_runtime_enable(&pdev->dev);
++ ep->dev = &pdev->dev;
++ if (IS_ERR(ep->clk)) {
++ ret = PTR_ERR(ep->clk);
++ goto err_clk_get;
++ }
++
++ if (ep->version == PWM_VERSION_1) {
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!r) {
++ dev_err(&pdev->dev, "no memory resource defined\n");
++ ret = -ENOMEM;
++ goto err_get_resource;
++ }
++
++ ep->config_mem_base = ioremap(r->start, resource_size(r));
++
++ if (!ep->config_mem_base) {
++
++ dev_err(&pdev->dev, "failed to ioremap() registers\n");
++ ret = -ENOMEM;
++ goto err_get_resource;
++ }
++
++ pm_runtime_get_sync(ep->dev);
++ val = readw(ep->config_mem_base + PWMSS_CLKCONFIG);
++ val |= BIT(ECAP_CLK_EN);
++ writew(val, ep->config_mem_base + PWMSS_CLKCONFIG);
++ pm_runtime_put_sync(ep->dev);
++ }
++
++ spin_lock_init(&ep->lock);
++ ep->ops.config = ecap_pwm_config;
++ ep->ops.request = ecap_pwm_request;
++ ep->ops.freq_transition_notifier_cb = ecap_frequency_transition_cb;
++
++ if (ep->version == PWM_VERSION_1)
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ else
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!r) {
++ dev_err(&pdev->dev, "no memory resource defined\n");
++ ret = -ENODEV;
++ goto err_request_mem;
++ }
++
++ r = request_mem_region(r->start, resource_size(r), pdev->name);
++ if (!r) {
++ dev_err(&pdev->dev, "failed to request memory resource\n");
++ ret = -EBUSY;
++ goto err_request_mem;
++ }
++
++ ep->mmio_base = ioremap(r->start, resource_size(r));
++ if (!ep->mmio_base) {
++ dev_err(&pdev->dev, "failed to ioremap() registers\n");
++ ret = -ENODEV;
++ goto err_ioremap;
++ }
++
++ ep->pwm.ops = &ep->ops;
++ pwm_set_drvdata(&ep->pwm, ep);
++ ret = pwm_register(&ep->pwm, &pdev->dev, -1);
++ platform_set_drvdata(pdev, ep);
++ return 0;
++
++err_ioremap:
++ release_mem_region(r->start, resource_size(r));
++err_request_mem:
++ if (ep->version == PWM_VERSION_1) {
++ iounmap(ep->config_mem_base);
++ ep->config_mem_base = NULL;
++ }
++err_get_resource:
++ clk_put(ep->clk);
++ pm_runtime_disable(&pdev->dev);
++err_clk_get:
++ kfree(ep);
++ return ret;
++}
++
++#ifdef CONFIG_PM
++
++void ecap_save_reg(struct ecap_pwm *ep)
++{
++ pm_runtime_get_sync(ep->dev);
++
++ ep->ctx.ecctl2 = readw(ep->mmio_base + CAPTURE_CTRL2_REG);
++ ep->ctx.tsctr = readl(ep->mmio_base + TIMER_CTR_REG);
++ ep->ctx.cap1 = readl(ep->mmio_base + CAPTURE_1_REG);
++ ep->ctx.cap2 = readl(ep->mmio_base + CAPTURE_2_REG);
++ ep->ctx.cap4 = readl(ep->mmio_base + CAPTURE_4_REG);
++ ep->ctx.cap3 = readl(ep->mmio_base + CAPTURE_3_REG);
++
++ ep->ctx.clkconfig = readw(ep->config_mem_base + PWMSS_CLKCONFIG);
++
++ pm_runtime_put_sync(ep->dev);
++}
++
++void ecap_restore_reg(struct ecap_pwm *ep)
++{
++ writew(ep->ctx.clkconfig, ep->config_mem_base + PWMSS_CLKCONFIG);
++
++ writel(ep->ctx.cap3, ep->mmio_base + CAPTURE_3_REG);
++ writel(ep->ctx.cap4, ep->mmio_base + CAPTURE_4_REG);
++ writel(ep->ctx.cap2, ep->mmio_base + CAPTURE_2_REG);
++ writel(ep->ctx.cap1, ep->mmio_base + CAPTURE_1_REG);
++ writel(ep->ctx.tsctr, ep->mmio_base + TIMER_CTR_REG);
++ writew(ep->ctx.ecctl2, ep->mmio_base + CAPTURE_CTRL2_REG);
++}
++
++static int ecap_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct ecap_pwm *ep = platform_get_drvdata(pdev);
++
++ ecap_save_reg(ep);
++ pm_runtime_put_sync(ep->dev);
++
++ return 0;
++}
++
++static int ecap_resume(struct platform_device *pdev)
++{
++ struct ecap_pwm *ep = platform_get_drvdata(pdev);
++
++ pm_runtime_get_sync(ep->dev);
++
++ ecap_restore_reg(ep);
++
++ return 0;
++}
++
++#else
++#define ecap_suspend NULL
++#define ecap_resume NULL
++#endif
++
++static int __devexit ecap_remove(struct platform_device *pdev)
++{
++ struct ecap_pwm *ep = platform_get_drvdata(pdev);
++ struct resource *r;
++ struct pwmss_platform_data *pdata;
++ int val;
++
++ if (ep->version == PWM_VERSION_1) {
++ pdata = (&pdev->dev)->platform_data;
++ val = readw(ep->config_mem_base + PWMSS_CLKCONFIG);
++ val &= ~BIT(ECAP_CLK_EN);
++ writew(val, ep->config_mem_base + PWMSS_CLKCONFIG);
++ iounmap(ep->config_mem_base);
++ ep->config_mem_base = NULL;
++ }
++
++ pwm_unregister(&ep->pwm);
++ iounmap(ep->mmio_base);
++
++ if (ep->version == PWM_VERSION_1)
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ else
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ release_mem_region(r->start, resource_size(r));
++ platform_set_drvdata(pdev, NULL);
++ clk_put(ep->clk);
++ pm_runtime_disable(&pdev->dev);
++ kfree(ep);
++
++ return 0;
++}
++
++static struct platform_driver ecap_driver = {
++ .driver = {
++ .name = "ecap",
++ .owner = THIS_MODULE,
++ },
++ .probe = ecap_probe,
++ .remove = __devexit_p(ecap_remove),
++ .suspend = ecap_suspend,
++ .resume = ecap_resume,
++};
++
++static int __init ecap_init(void)
++{
++ return platform_driver_register(&ecap_driver);
++}
++
++static void __exit ecap_exit(void)
++{
++ platform_driver_unregister(&ecap_driver);
++}
++
++module_init(ecap_init);
++module_exit(ecap_exit);
++
++MODULE_AUTHOR("Texas Instruments");
++MODULE_DESCRIPTION("Driver for Davinci eCAP peripheral");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:ecap");
+diff --git a/drivers/pwm/ehrpwm.c b/drivers/pwm/ehrpwm.c
+new file mode 100644
+index 0000000..8bbed87
+--- /dev/null
++++ b/drivers/pwm/ehrpwm.c
+@@ -0,0 +1,1645 @@
++/*
++ * eHRPWM driver for simple PWM output generation
++ *
++ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/pwm/pwm.h>
++#include <linux/pwm/ehrpwm.h>
++#include <linux/pm_runtime.h>
++
++#include <plat/clock.h>
++#include <plat/config_pwm.h>
++
++#ifdef DEBUG
++#define debug(format, args...) printk(format, ##args)
++#else
++#define debug(format, args...) { }
++#endif
++
++/******************** Time base sub module*****************************/
++#define TBCTL 0x0
++#define TBSTS 0x2
++#define TBPHS 0x6
++#define TBCTR 0x8
++#define TBPRD 0xA
++
++#define TBCTL_CLKDIV_MASK (BIT(12) | BIT(11) | BIT(10))
++#define TBCTL_HSPCLKDIV_MASK (BIT(9) | BIT(8) | BIT(7))
++#define TBCTL_SYNCOSEL_MASK (BIT(5) | BIT(4))
++#define TBCTL_CTRMODE_MASK (BIT(1) | BIT(0))
++
++#define TBCTL_CLKDIV_POS 0xA
++#define TBCTL_HSPCLKDIV_POS 0x7
++#define TBCTL_PHSEN_POS 0x2
++#define TBCTL_SYNCOSEL_POS 0x4
++#define TBCTL_PHSDIR_POS 0xD
++#define TBCTL_FRC_SYC_POS 0x6
++#define TBCTL_LOAD_MD_POS 0x3
++
++#define TBCTL_FREERUN_FREE 0x2
++#define TBCTL_CTRMOD_CTRUP 0x0
++
++/******************* Counter-Compare Sub Module ***********************/
++#define CMPCTL 0xE
++#define CMPA 0x12
++#define CMPB 0x14
++
++#define CMPCTL_LDBMODE_MASK (BIT(3) | BIT(2))
++#define CMPCTL_LDAMODE_MASK (BIT(1) | BIT(0))
++
++#define CMPCTL_SHDAMODE_POS 0x4
++#define CMPCTL_SHDBMODE_POS 0x6
++#define CMPCTL_LDBMODE_POS 0x2
++
++/*********************** Action Control Sub module ********************/
++#define AQCTLA 0x16
++#define AQCTLB 0x18
++#define AQSFRC 0x1A
++#define AQCSFRC 0x1c
++
++#define ACTCTL_CBD_MASK (BIT(11) | BIT(10))
++#define ACTCTL_CBU_MASK (BIT(9) | BIT(8))
++#define ACTCTL_CAD_MASK (BIT(7) | BIT(6))
++#define ACTCTL_CAU_MASK (BIT(5) | BIT(4))
++#define ACTCTL_CPRD_MASK (BIT(3) | BIT(2))
++#define ACTCTL_CZRO_MASK (BIT(1) | BIT(0))
++
++#define ACTCTL_CTREQPRD_POS 0x2
++#define ACTCTL_CTREQCMPAUP_POS 0x4
++#define ACTCTL_CTREQCMPADN_POS 0x6
++#define ACTCTL_CTREQCMPBUP_POS 0x8
++#define ACTCTL_CTREQCMPBDN_POS 0xA
++
++#define ACTCTL_CTREQCMP_LOW 0x1
++#define ACTCTL_CTREQCMP_HIGH 0x2
++#define ACTCTL_CTREQZRO_LOW 0x1
++#define ACTCTL_CTREQZRO_HIGH 0x2
++
++#define AQSFRC_ACTA_MASK (BIT(1) | BIT(0))
++#define AQSFRC_ACTB_MASK (BIT(4) | BIT(3))
++#define AQCSFRC_CFRC_LOAD_MASK (BIT(7) | BIT(6))
++#define AQCSFRC_OUTB_MASK (BIT(3) | BIT(2))
++#define AQCSFRC_OUTA_MASK (BIT(1) | BIT(0))
++
++#define AQSFRC_ACTB_POS 0x3
++#define AQSFRC_OTFRCA_POS 0x2
++#define AQSFRC_OTFRCB_POS 0x5
++#define AQSFRC_LDMD_POS 0x6
++
++#define AQCSFRC_OUTB_POS 0x2
++
++/******************** Dead Band Generator Sub module *******************/
++#define DBCTL 0x1E
++#define DBRED 0x20
++#define DBFED 0x22
++
++#define DBCTL_INMODE_MASK (BIT(5) | BIT(4))
++#define DBCTL_PLSEL_MASK (BIT(3) | BIT(2))
++#define DBCTL_OUTMODE_MASK (BIT(1) | BIT(0))
++
++#define DBCTL_INMODE_POS 0x4
++#define DBCTL_POLSEL_POS 0x2
++
++/********************** PWM Chopper Sub module ************************/
++#define PCCTL 0x3C
++
++#define PCCTL_CHPDUTY_MASK (BIT(10) | BIT(9) | BIT(8))
++#define PCCTL_CHPFREQ_MASK (BIT(7) | BIT(6) | BIT(5))
++#define PCCTL_OSHTWTH_MASK (BIT(4) | BIT(3) | BIT(2) | BIT(1))
++
++#define PCCTL_CHPDUTY_POS 0x8
++#define PCCTL_CHPFRQ_POS 0x5
++#define PCCTL_OSTWID_POS 0x1
++
++/*************************Trip-zone submodule **************************/
++#define TZSEL 0x24
++#define TZCTL 0x28
++#define TZEINT 0x2A
++#define TZFLG 0x2C
++#define TZCLR 0x2E
++#define TZFRC 0x30
++
++#define TZCTL_ACTA_MASK (BIT(1) | BIT(0))
++#define TZCTL_ACTB_MASK (BIT(3) | BIT(2))
++
++#define TZCTL_ACTB_POS 0x2
++
++#define TZEINT_OSHTEVT_POS 0x2
++#define TZEINT_CBCEVT_POS 0x1
++
++/*************************Event-Trigger submodule registers**************/
++#define ETSEL 0x32
++#define ETPS 0x34
++#define ETFLG 0x36
++#define ETCLR 0x38
++#define ETFRC 0x3A
++
++#define ETSEL_INTSEL_MASK (BIT(2) | BIT(1) | BIT(0))
++#define ETPS_INTCNT_MASK (BIT(3) | BIT(2))
++#define ETPS_INTPRD_MASK (BIT(1) | BIT(0))
++
++#define ETSEL_EN_INT_EN_POS 0x3
++
++/**********************High Resolution Registers ********************/
++#define TBPHSHR 0x4
++#define CMPAHR 0x10
++#define HRCNFG 0x1040
++
++#define AM335X_HRCNFG 0x40
++
++#define HRCNFG_EDGEMD_MASK (BIT(1) | BIT(0))
++#define HRCNFG_LDMD_POS 0x3
++#define HRCNFG_CTLMD_POS 0x2
++
++struct ehrpwm_suspend_params {
++ struct pwm_device *pch;
++ unsigned long req_delay_cycles;
++ unsigned long act_delay;
++} ehrpwm_suspend_params;
++
++static inline unsigned short ehrpwm_read(struct ehrpwm_pwm *ehrpwm,
++ unsigned int offset)
++{
++ return readw(ehrpwm->mmio_base + offset);
++}
++
++static inline void ehrpwm_write(struct ehrpwm_pwm *ehrpwm, unsigned int offset,
++ unsigned short val)
++{
++ writew(val, ehrpwm->mmio_base + offset);
++}
++
++static void ehrpwm_reg_config(struct ehrpwm_pwm *ehrpwm, unsigned int offset,
++ unsigned short val, unsigned short mask)
++{
++ unsigned short read_val;
++
++ read_val = ehrpwm_read(ehrpwm, offset);
++ read_val = read_val & ~mask;
++ read_val = read_val | val;
++ ehrpwm_write(ehrpwm, offset, read_val);
++}
++
++static inline struct ehrpwm_pwm *to_ehrpwm_pwm(const struct pwm_device *p)
++{
++ return pwm_get_drvdata(p);
++}
++
++/* Time Base Module Configurations */
++int ehrpwm_tb_set_prescalar_val(struct pwm_device *p, unsigned char clkdiv,
++ unsigned char hspclkdiv)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (clkdiv > 0x7 || hspclkdiv > 0x7)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, TBCTL, clkdiv << TBCTL_CLKDIV_POS,
++ TBCTL_CLKDIV_MASK);
++ ehrpwm_reg_config(ehrpwm, TBCTL, hspclkdiv << TBCTL_HSPCLKDIV_POS,
++ TBCTL_HSPCLKDIV_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_set_prescalar_val);
++
++int ehrpwm_tb_config_sync(struct pwm_device *p, unsigned char phsen,
++ unsigned char syncosel)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (phsen > 1 || syncosel > 0x3)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, TBCTL, phsen << TBCTL_PHSEN_POS, BIT(2));
++ ehrpwm_reg_config(ehrpwm, TBCTL, syncosel << TBCTL_SYNCOSEL_POS,
++ TBCTL_SYNCOSEL_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_config_sync);
++
++int ehrpwm_tb_set_counter_mode(struct pwm_device *p, unsigned char ctrmode,
++ unsigned char phsdir)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (ctrmode > 0x3 || phsdir > 1)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, TBCTL, phsdir << TBCTL_PHSDIR_POS, BIT(13));
++ ehrpwm_reg_config(ehrpwm, TBCTL, ctrmode, TBCTL_CTRMODE_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_set_counter_mode);
++
++int ehrpwm_tb_force_sync(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_reg_config(ehrpwm, TBCTL, ENABLE << TBCTL_FRC_SYC_POS, BIT(6));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_force_sync);
++
++int ehrpwm_tb_set_periodload(struct pwm_device *p, unsigned char loadmode)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (loadmode > 0x1)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, TBCTL, loadmode << TBCTL_LOAD_MD_POS, BIT(3));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_set_periodload);
++
++int ehrpwm_tb_read_status(struct pwm_device *p, unsigned short *val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ *val = ehrpwm_read(ehrpwm, TBSTS);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_read_status);
++
++int ehrpwm_tb_read_counter(struct pwm_device *p, unsigned short *val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ *val = ehrpwm_read(ehrpwm, TBCTR);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_read_counter);
++
++int ehrpwm_tb_set_period(struct pwm_device *p, unsigned short val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, TBPRD, val);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_set_period);
++
++int ehrpwm_tb_set_phase(struct pwm_device *p, unsigned short val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, TBPHS, val);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tb_set_phase);
++
++int ehrpwm_cmp_set_cmp_ctl(struct pwm_device *p, unsigned char shdwamode,
++ unsigned char shdwbmode, unsigned char loadamode,
++ unsigned char loadbmode)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (shdwamode > 0x1 || shdwbmode > 0x1 || loadamode > 0x3 ||
++ loadbmode > 0x3)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, CMPCTL, shdwamode << CMPCTL_SHDAMODE_POS,
++ BIT(4));
++ ehrpwm_reg_config(ehrpwm, CMPCTL, shdwbmode << CMPCTL_SHDBMODE_POS,
++ BIT(6));
++ ehrpwm_reg_config(ehrpwm, CMPCTL, loadamode, CMPCTL_LDAMODE_MASK);
++ ehrpwm_reg_config(ehrpwm, CMPCTL, loadbmode << CMPCTL_LDBMODE_POS,
++ CMPCTL_LDBMODE_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_cmp_set_cmp_ctl);
++
++int ehrpwm_cmp_set_cmp_val(struct pwm_device *p, unsigned char reg,
++ unsigned short val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned char offset;
++
++ if (reg > 0x1)
++ return -EINVAL;
++
++ if (reg == 0)
++ offset = CMPA;
++ else
++ offset = CMPB;
++
++ ehrpwm_write(ehrpwm, offset, val);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_cmp_set_cmp_val);
++
++int ehrpwm_aq_set_act_ctrl(struct pwm_device *p, struct aq_config_params *cfg)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned char reg;
++
++ if (!cfg)
++ return -EINVAL;
++
++ if (cfg->ch > 1 || cfg->ctreqzro > 3 || cfg->ctreqprd > 3 ||
++ cfg->ctreqcmpaup > 3 || cfg->ctreqcmpadown > 3 ||
++ cfg->ctreqcmpbup > 3 || cfg->ctreqcmpbdown > 3)
++ return -EINVAL;
++
++ if (cfg->ch == 0)
++ reg = AQCTLA;
++ else
++ reg = AQCTLB;
++
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqzro, ACTCTL_CZRO_MASK);
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqprd << ACTCTL_CTREQPRD_POS,
++ ACTCTL_CPRD_MASK);
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqcmpaup <<
++ ACTCTL_CTREQCMPAUP_POS, ACTCTL_CAU_MASK);
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqcmpadown <<
++ ACTCTL_CTREQCMPADN_POS, ACTCTL_CAD_MASK);
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqcmpbup <<
++ ACTCTL_CTREQCMPBUP_POS, ACTCTL_CBU_MASK);
++ ehrpwm_reg_config(ehrpwm, reg, cfg->ctreqcmpbdown <<
++ ACTCTL_CTREQCMPBDN_POS, ACTCTL_CBD_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_aq_set_act_ctrl);
++
++int ehrpwm_aq_set_one_shot_act(struct pwm_device *p, unsigned char ch,
++ unsigned char act)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (ch > 1 || act > 3)
++ return -EINVAL;
++
++ if (ch == 0)
++ ehrpwm_reg_config(ehrpwm, AQSFRC, act, AQSFRC_ACTA_MASK);
++ else
++ ehrpwm_reg_config(ehrpwm, AQSFRC, act << AQSFRC_ACTB_POS,
++ AQSFRC_ACTB_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_aq_set_one_shot_act);
++
++int ehrpwm_aq_ot_frc(struct pwm_device *p, unsigned char ch)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (ch > 1)
++ return -EINVAL;
++
++ if (ch == 0)
++ ehrpwm_reg_config(ehrpwm, AQSFRC, ENABLE << AQSFRC_OTFRCA_POS,
++ BIT(2));
++ else
++ ehrpwm_reg_config(ehrpwm, AQSFRC, ENABLE << AQSFRC_OTFRCB_POS,
++ BIT(5));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_aq_ot_frc);
++
++int ehrpwm_aq_set_csfrc_load_mode(struct pwm_device *p, unsigned char loadmode)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (loadmode > 0x3)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, AQSFRC, loadmode << AQSFRC_LDMD_POS,
++ AQCSFRC_CFRC_LOAD_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_aq_set_csfrc_load_mode);
++
++int ehrpwm_aq_continuous_frc(struct pwm_device *p, unsigned char ch,
++ unsigned char act)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (ch > 1)
++ return -EINVAL;
++
++ if (ch == 0)
++ ehrpwm_reg_config(ehrpwm, AQCSFRC, act, AQCSFRC_OUTA_MASK);
++ else
++ ehrpwm_reg_config(ehrpwm, AQCSFRC, act << AQCSFRC_OUTB_POS,
++ AQCSFRC_OUTB_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_aq_continuous_frc);
++
++int ehrpwm_db_get_max_delay(struct pwm_device *p, enum config_mask cfgmask,
++ unsigned long *delay_val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long delay_ns;
++ unsigned long max_ticks;
++
++ if (cfgmask == CONFIG_NS) {
++ max_ticks = 0x3ff * ehrpwm->prescale_val;
++ delay_ns = pwm_ticks_to_ns(p, max_ticks);
++ *delay_val = delay_ns;
++ } else if (cfgmask == CONFIG_TICKS) {
++ *delay_val = 0x3ff * ehrpwm->prescale_val;
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_db_get_max_delay);
++
++int ehrpwm_db_get_delay(struct pwm_device *p, unsigned char edge,
++ enum config_mask cfgmask, unsigned long *delay_val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long delay_ns;
++ unsigned long delay_ticks;
++ unsigned char offset;
++
++ if (!ehrpwm)
++ return -EINVAL;
++
++ if (edge == RISING_EDGE_DELAY)
++ offset = DBRED;
++ else if (edge == FALLING_EDGE_DELAY)
++ offset = DBFED;
++ else
++ return -EINVAL;
++
++ delay_ticks = ehrpwm_read(ehrpwm, offset);
++ /* Only least 10 bits are required */
++ delay_ticks = delay_ticks & 0x3ff;
++ if (cfgmask == CONFIG_TICKS) {
++ *delay_val = delay_ticks * ehrpwm->prescale_val;
++ } else if (cfgmask == CONFIG_NS) {
++ delay_ticks = delay_ticks * ehrpwm->prescale_val;
++ delay_ns = pwm_ticks_to_ns(p, delay_ticks);
++ debug("\n delay ns value is %lu", delay_ns);
++ *delay_val = delay_ns;
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_db_get_delay);
++
++int ehrpwm_db_set_delay(struct pwm_device *p, unsigned char edge,
++ enum config_mask cfgmask, unsigned long delay)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long delay_ticks;
++ unsigned char offset = 0;
++
++ if (!ehrpwm)
++ return -EINVAL;
++
++ if (edge == RISING_EDGE_DELAY)
++ offset = DBRED;
++ else if (edge == FALLING_EDGE_DELAY)
++ offset = DBFED;
++ else
++ return -EINVAL;
++
++ if (cfgmask == CONFIG_TICKS) {
++ delay = delay / ehrpwm->prescale_val;
++ if (delay > 0x3ff)
++ return -EINVAL;
++ ehrpwm_write(ehrpwm, offset, delay);
++ } else if (cfgmask == CONFIG_NS) {
++ delay_ticks = pwm_ns_to_ticks(p, delay);
++ delay_ticks = delay_ticks / ehrpwm->prescale_val;
++ if (delay_ticks > 0x3ff) {
++ ehrpwm_db_get_max_delay(p, CONFIG_NS, &delay_ticks);
++ dev_dbg(p->dev, "%s: Expected delay cannot be"
++ " attained setting the maximum possible delay of"
++ " %lu ns", __func__, delay_ticks);
++ delay_ticks = 0x3ff;
++ }
++ debug("\n delay ticks is %lu", delay_ticks);
++ ehrpwm_write(ehrpwm, offset, delay_ticks);
++ } else {
++ return -EINVAL;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_db_set_delay);
++
++/* Dead Band Configuration functions */
++int ehrpwm_db_set_mode(struct pwm_device *p, unsigned char inmode,
++ unsigned char polsel, unsigned char outmode)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (inmode > 0x3 || polsel > 0x3 || outmode > 0x3)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, DBCTL, inmode << DBCTL_INMODE_POS,
++ DBCTL_INMODE_MASK);
++ ehrpwm_reg_config(ehrpwm, DBCTL, polsel << DBCTL_POLSEL_POS,
++ DBCTL_PLSEL_MASK);
++ ehrpwm_reg_config(ehrpwm, DBCTL, outmode, DBCTL_OUTMODE_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_db_set_mode);
++
++/* PWM chopper Configuration functions */
++int ehrpwm_pc_configure(struct pwm_device *p, unsigned char chpduty,
++ unsigned char chpfreq, unsigned char oshtwidth)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (chpduty > 0x7 || chpfreq > 0x7 || oshtwidth > 0xf)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, PCCTL, chpduty << PCCTL_CHPDUTY_POS,
++ PCCTL_CHPDUTY_MASK);
++ ehrpwm_reg_config(ehrpwm, PCCTL, chpfreq << PCCTL_CHPFRQ_POS,
++ PCCTL_CHPFREQ_MASK);
++ ehrpwm_reg_config(ehrpwm, PCCTL, oshtwidth << PCCTL_OSTWID_POS,
++ PCCTL_OSHTWTH_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_pc_configure);
++
++int ehrpwm_pc_en_dis(struct pwm_device *p, unsigned char chpen)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (chpen > 1)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, PCCTL, chpen, BIT(0));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_pc_en_dis);
++
++/* Trip Zone configuration functions */
++int ehrpwm_tz_sel_event(struct pwm_device *p, unsigned char input,
++ enum tz_event evt)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ unsigned short val = 0;
++ unsigned short mask;
++ unsigned short pos;
++
++ if (evt > 4 || input > 7)
++ return -EINVAL;
++
++ switch (evt) {
++ case TZ_ONE_SHOT_EVENT:
++ pos = input + 8;
++ mask = BIT((pos)) | BIT(input);
++ ehrpwm_reg_config(ehrpwm, TZSEL, 1 << pos, mask);
++ break;
++
++ case TZ_CYCLE_BY_CYCLE:
++ pos = input;
++ mask = BIT(pos) | BIT((pos + 8));
++ ehrpwm_reg_config(ehrpwm, TZSEL, 1 << pos, mask);
++ break;
++
++ case TZ_OSHT_CBC:
++ case TZ_DIS_EVT:
++ if (evt == TZ_OSHT_CBC)
++ val = 1;
++ else
++ val = 0;
++
++ pos = input + 8;
++ mask = BIT((pos));
++ ehrpwm_reg_config(ehrpwm, TZSEL, val << pos, mask);
++ pos = input;
++ mask = BIT((pos));
++ ehrpwm_reg_config(ehrpwm, TZSEL, val << pos, mask);
++ break;
++
++ default:
++ dev_dbg(p->dev, "%s: Invalid command", __func__);
++ return -EINVAL;
++ }
++ debug("\n TZ_sel val is %0x", ehrpwm_read(ehrpwm, TZSEL));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_sel_event);
++
++int ehrpwm_tz_set_action(struct pwm_device *p, unsigned char ch,
++ unsigned char act)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (act > 0x3 || ch > 1)
++ return -EINVAL;
++
++ if (ch == 0)
++ ehrpwm_reg_config(ehrpwm, TZCTL, act, TZCTL_ACTA_MASK);
++ else
++ ehrpwm_reg_config(ehrpwm, TZCTL, act << TZCTL_ACTB_POS,
++ TZCTL_ACTB_MASK);
++
++ debug("\n TZCTL reg val is %0x", ehrpwm_read(ehrpwm, TZCTL));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_set_action);
++
++int ehrpwm_tz_set_int_en_dis(struct pwm_device *p, enum tz_event event,
++ unsigned char int_en_dis)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (event == TZ_ONE_SHOT_EVENT)
++ ehrpwm_reg_config(ehrpwm, TZEINT, int_en_dis <<
++ TZEINT_OSHTEVT_POS, BIT(2));
++ else if (event == TZ_CYCLE_BY_CYCLE)
++ ehrpwm_reg_config(ehrpwm, TZEINT, int_en_dis <<
++ TZEINT_CBCEVT_POS, BIT(1));
++ else
++ return -EINVAL;
++
++ debug("\n TZEINT reg val is %0x", ehrpwm_read(ehrpwm, TZEINT));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_set_int_en_dis);
++
++int ehrpwm_tz_force_evt(struct pwm_device *p, enum tz_event event)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (event == TZ_ONE_SHOT_EVENT)
++ ehrpwm_write(ehrpwm, TZFRC, 0x4);
++ else if (event == TZ_CYCLE_BY_CYCLE)
++ ehrpwm_write(ehrpwm, TZFRC, 0x2);
++ else
++ return -EINVAL;
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_force_evt);
++
++inline int ehrpwm_tz_read_status(struct pwm_device *p, unsigned short *status)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ *status = ehrpwm_read(ehrpwm, TZFLG);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_read_status);
++
++inline int ehrpwm_tz_clr_evt_status(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned short ret;
++
++ ret = ehrpwm_read(ehrpwm, TZFLG);
++ ehrpwm_write(ehrpwm, TZCLR, ret & ~0x1);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_clr_evt_status);
++
++inline int ehrpwm_tz_clr_int_status(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, TZCLR, 0x1);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_clr_int_status);
++
++/* Event Trigger Configuration functions */
++int ehrpwm_et_set_sel_evt(struct pwm_device *p, unsigned char evt,
++ unsigned char prd)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (evt > 0x7 || prd > 0x3)
++ return -EINVAL;
++
++ ehrpwm_reg_config(ehrpwm, ETSEL, evt, ETSEL_INTSEL_MASK);
++ ehrpwm_reg_config(ehrpwm, ETPS, prd, ETPS_INTPRD_MASK);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_set_sel_evt);
++
++inline int ehrpwm_et_int_en_dis(struct pwm_device *p, unsigned char en_dis)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_reg_config(ehrpwm, ETSEL, en_dis << ETSEL_EN_INT_EN_POS, BIT(3));
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_int_en_dis);
++
++inline int ehrpwm_et_read_evt_cnt(struct pwm_device *p,
++ unsigned long *evtcnt)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ *evtcnt = ehrpwm_read(ehrpwm, ETPS) & ETPS_INTCNT_MASK;
++ *evtcnt >>= 0x2;
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_read_evt_cnt);
++
++inline int ehrpwm_et_read_int_status(struct pwm_device *p,
++ unsigned long *status)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ *status = ehrpwm_read(ehrpwm, ETFLG) & BIT(0);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_read_int_status);
++
++inline int ehrpwm_et_frc_int(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, ETFRC, ENABLE);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_frc_int);
++
++inline int ehrpwm_et_clr_int(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, ETCLR, ENABLE);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_clr_int);
++
++/* High Resolution Module configuration */
++inline int ehrpwm_hr_set_phase(struct pwm_device *p, unsigned char val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, TBPHSHR, val << 8);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_hr_set_phase);
++
++inline int ehrpwm_hr_set_cmpval(struct pwm_device *p, unsigned char val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ ehrpwm_write(ehrpwm, CMPAHR, val << 8);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_hr_set_cmpval);
++
++int ehrpwm_hr_config(struct pwm_device *p, unsigned char loadmode,
++ unsigned char ctlmode, unsigned char edgemode)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (loadmode > 1 || ctlmode > 1 || edgemode > 3)
++ return -EINVAL;
++
++ if (ehrpwm->version == PWM_VERSION_1) {
++ ehrpwm_reg_config(ehrpwm, AM335X_HRCNFG,
++ loadmode << HRCNFG_LDMD_POS, BIT(3));
++ ehrpwm_reg_config(ehrpwm, AM335X_HRCNFG,
++ ctlmode << HRCNFG_CTLMD_POS, BIT(2));
++ ehrpwm_reg_config(ehrpwm, AM335X_HRCNFG,
++ edgemode, HRCNFG_EDGEMD_MASK);
++ } else {
++ ehrpwm_reg_config(ehrpwm, HRCNFG,
++ loadmode << HRCNFG_LDMD_POS, BIT(3));
++ ehrpwm_reg_config(ehrpwm, HRCNFG,
++ ctlmode << HRCNFG_CTLMD_POS, BIT(2));
++ ehrpwm_reg_config(ehrpwm, HRCNFG,
++ edgemode, HRCNFG_EDGEMD_MASK);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_hr_config);
++
++inline int ehrpwm_reg_read(struct pwm_device *p, unsigned int reg,
++ unsigned short *val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (!(ehrpwm->version == PWM_VERSION_1)) {
++ if (reg > HRCNFG)
++ return -EINVAL;
++ }
++
++ *val = ehrpwm_read(ehrpwm, reg);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_reg_read);
++
++inline int ehrpwm_reg_write(struct pwm_device *p, unsigned int reg,
++ unsigned short val)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (!(ehrpwm->version == PWM_VERSION_1)) {
++ if (reg > HRCNFG)
++ return -EINVAL;
++ }
++
++ ehrpwm_write(ehrpwm, reg, val);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_reg_write);
++
++static int ehrpwm_pwm_set_pol(struct pwm_device *p)
++{
++ unsigned int act_ctrl_reg;
++ unsigned int cmp_reg;
++ unsigned int ctreqcmp_mask;
++ unsigned int ctreqcmp;
++ unsigned short val;
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ int chan;
++
++ chan = p - &ehrpwm->pwm[0];
++ if (!chan) {
++ act_ctrl_reg = AQCTLA;
++ cmp_reg = CMPA;
++ ctreqcmp_mask = ACTCTL_CAU_MASK;
++ ctreqcmp = 4;
++ } else {
++ act_ctrl_reg = AQCTLB;
++ cmp_reg = CMPB;
++ ctreqcmp_mask = ACTCTL_CBU_MASK;
++ ctreqcmp = 8;
++ }
++
++
++ pm_runtime_get_sync(ehrpwm->dev);
++ val = ((p->active_high ? ACTCTL_CTREQCMP_HIGH : ACTCTL_CTREQCMP_LOW)
++ << ctreqcmp) | (p->active_high ? ACTCTL_CTREQZRO_LOW :
++ ACTCTL_CTREQZRO_HIGH);
++ ehrpwm_write(ehrpwm, act_ctrl_reg, val);
++ pm_runtime_put_sync(ehrpwm->dev);
++ return 0;
++}
++
++static int ehrpwm_pwm_start(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned short val;
++ unsigned short read_val1;
++ unsigned short read_val2;
++ int chan;
++
++
++ /* Trying to start a running PWM, not allowed */
++ if (pwm_is_running(p))
++ return -EPERM;
++
++ /* For PWM clock should be enabled on start */
++ pm_runtime_get_sync(ehrpwm->dev);
++
++ ehrpwm_pwm_set_pol(p);
++ chan = p - &ehrpwm->pwm[0];
++ val = ehrpwm_read(ehrpwm, TBCTL);
++ val = (val & ~TBCTL_CTRMODE_MASK) | (TBCTL_CTRMOD_CTRUP |
++ TBCTL_FREERUN_FREE << 14);
++ ehrpwm_write(ehrpwm, TBCTL, val);
++ ehrpwm_tz_set_action(p, chan, 0x3);
++ read_val1 = ehrpwm_read(ehrpwm, TZFLG);
++ read_val2 = ehrpwm_read(ehrpwm, TZCTL);
++ /*
++ * State of the other channel is determined by reading the
++ * TZCTL register. If the other channel is also in running state,
++ * one shot event status is cleared, otherwise one shot action for
++ * this channel is set to "DO NOTHING.
++ */
++ read_val2 = read_val2 & (chan ? 0x3 : (0x3 << 2));
++ read_val2 = chan ? read_val2 : (read_val2 >> 2);
++ if (!(read_val1 & 0x4) || (read_val2 == 0x3))
++ ehrpwm_tz_clr_evt_status(p);
++
++ set_bit(FLAG_RUNNING, &p->flags);
++ return 0;
++}
++
++/*
++ * Stop function is implemented using the Trip Zone module. Action for the
++ * corresponding channel is set to low and the one shot software force
++ * event is triggered.
++ */
++static int ehrpwm_pwm_stop(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned short read_val;
++ int chan;
++
++ /* Trying to stop a non-running PWM, not allowed */
++ if (!pwm_is_running(p))
++ return -EPERM;
++
++ chan = p - &ehrpwm->pwm[0];
++ /* Set the Trip Zone Action to low */
++ ehrpwm_tz_set_action(p, chan, 0x2);
++ read_val = ehrpwm_read(ehrpwm, TZFLG);
++ /*
++ * If the channel is already in stop state, Trip Zone software force is
++ * not required
++ */
++ if (!(read_val & 0x4)) {
++ ehrpwm_tz_clr_evt_status(p);
++ ehrpwm_tz_force_evt(p, TZ_ONE_SHOT_EVENT);
++ }
++
++ /* For PWM clock should be disabled on stop */
++ pm_runtime_put_sync(ehrpwm->dev);
++ clear_bit(FLAG_RUNNING, &p->flags);
++ return 0;
++}
++
++/*
++ * Prescalar is used when the period value exceeds the maximum value
++ * of the 16 bit period register. We always look for the minimum prescalar
++ * value as it would result in wide range of duty control
++ */
++static char get_divider_val(unsigned int desired_ps_val, unsigned int
++*ps_div_val, unsigned int *tb_div_val)
++{
++ char i = 0;
++ char j = 0;
++
++ for (i = 0; i <= 7; i++) {
++ for (j = 0; j <= 7; j++) {
++ if (((1 << i) * (j ? (j * 2) : 1)) >= desired_ps_val) {
++ *ps_div_val = (1 << i) * (j ? (j * 2) : 1);
++ *tb_div_val = (i << 10) | (j << 7) ;
++ return 0;
++ }
++ }
++ }
++
++ return -1;
++}
++
++static int ehrpwm_pwm_set_prd(struct pwm_device *p)
++{
++ unsigned int ps_div_val = 1;
++ unsigned int tb_div_val = 0;
++ char ret;
++ unsigned short val;
++ unsigned short period_ticks;
++ struct pwm_device *temp;
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ int chan = 0;
++ /*
++ * Since the device has a singe period register, copy the period
++ * value to the other channel also.
++ */
++ chan = p - &ehrpwm->pwm[0];
++ temp = &ehrpwm->pwm[!chan];
++ temp->period_ticks = p->period_ticks;
++ temp->period_ns = p->period_ns;
++ debug("\n period_ticks is %lu", p->period_ticks);
++
++ if (p->period_ticks > 65535) {
++ ret = get_divider_val(p->period_ticks / 65535 + 1, &ps_div_val,
++ &tb_div_val);
++ if (ret) {
++ dev_err(p->dev, "failed to get the divider value");
++ return -EINVAL;
++ }
++ }
++
++ pm_runtime_get_sync(ehrpwm->dev);
++ val = ehrpwm_read(ehrpwm, TBCTL);
++ val = (val & ~TBCTL_CLKDIV_MASK & ~TBCTL_HSPCLKDIV_MASK) | tb_div_val;
++ ehrpwm_write(ehrpwm, TBCTL, val);
++ period_ticks = p->period_ticks / ps_div_val;
++
++ if (period_ticks <= 1) {
++ dev_err(p->dev, "Required period/frequency cannot be obtained");
++ pm_runtime_put_sync(ehrpwm->dev);
++ return -EINVAL;
++ }
++ /*
++ * Program the period register with 1 less than the actual value since
++ * the module generates waveform with period always 1 greater
++ * the programmed value.
++ */
++ ehrpwm_write(ehrpwm, TBPRD, (unsigned short)(period_ticks - 1));
++ pm_runtime_put_sync(ehrpwm->dev);
++ debug("\n period_ticks is %d", period_ticks);
++ ehrpwm->prescale_val = ps_div_val;
++ debug("\n Prescaler value is %d", ehrpwm->prescale_val);
++
++ return 0;
++}
++
++static int ehrpwm_hr_duty_config(struct pwm_device *p)
++{
++ unsigned char no_of_mepsteps;
++ unsigned short cmphr_val;
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++
++ if (!p->tick_hz) {
++ dev_dbg(p->dev, "%s: p->tick_hz is zero\n", __func__);
++ return -EINVAL;
++ }
++
++ /*
++ * Calculate the no of MEP steps. Assume system clock
++ * is in the order of MHZ.
++ */
++ no_of_mepsteps = USEC_PER_SEC / ((p->tick_hz / USEC_PER_SEC) * 63);
++
++ pm_runtime_get_sync(ehrpwm->dev);
++ /* Calculate the CMPHR Value */
++ cmphr_val = p->tick_hz / USEC_PER_SEC;
++ cmphr_val = (p->duty_ns * cmphr_val) % MSEC_PER_SEC;
++ cmphr_val = (cmphr_val * no_of_mepsteps) / 1000;
++ cmphr_val = (cmphr_val << 8) + 0x180;
++ ehrpwm_write(ehrpwm, CMPAHR, cmphr_val);
++
++ if (ehrpwm->version == PWM_VERSION_1)
++ ehrpwm_write(ehrpwm, AM335X_HRCNFG, 0x2);
++ else
++ ehrpwm_write(ehrpwm, HRCNFG, 0x2);
++
++ pm_runtime_put_sync(ehrpwm->dev);
++ return 0;
++}
++
++static int ehrpwm_pwm_set_dty(struct pwm_device *p)
++{
++ unsigned short duty_ticks = 0;
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ int ret = 0;
++ int chan;
++
++ chan = p - &ehrpwm->pwm[0];
++
++ if (!ehrpwm->prescale_val) {
++ dev_dbg(p->dev, "%s: prescale_val is zero\n", __func__);
++ return -EINVAL;
++ }
++
++ duty_ticks = p->duty_ticks / ehrpwm->prescale_val;
++ debug("\n Prescaler value is %d", ehrpwm->prescale_val);
++ debug("\n duty ticks is %d", duty_ticks);
++ pm_runtime_get_sync(ehrpwm->dev);
++ /* High resolution module */
++ if (chan && ehrpwm->prescale_val <= 1) {
++ ret = ehrpwm_hr_duty_config(p);
++ if (ehrpwm->version == PWM_VERSION_1)
++ ehrpwm_write(ehrpwm, AM335X_HRCNFG, 0x2);
++ else
++ ehrpwm_write(ehrpwm, HRCNFG, 0x2);
++ }
++
++ ehrpwm_write(ehrpwm, (chan ? CMPB : CMPA), duty_ticks);
++ pm_runtime_put_sync(ehrpwm->dev);
++ return ret;
++}
++
++int ehrpwm_et_cb_register(struct pwm_device *p, void *data,
++ p_fcallback cb)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long flags;
++
++ spin_lock_irqsave(&ehrpwm->lock, flags);
++ ehrpwm->st_etint.data = data;
++ ehrpwm->st_etint.pcallback = cb;
++ spin_unlock_irqrestore(&ehrpwm->lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_et_cb_register);
++
++int ehrpwm_tz_cb_register(struct pwm_device *p, void *data,
++ p_fcallback cb)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long flags;
++
++ spin_lock_irqsave(&ehrpwm->lock, flags);
++ ehrpwm->st_tzint.data = data;
++ ehrpwm->st_tzint.pcallback = cb;
++ spin_unlock_irqrestore(&ehrpwm->lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_tz_cb_register);
++
++static int ehrpwm_pwm_suspend_cb(struct ehrpwm_pwm *ehrpwm, void *data)
++{
++ struct ehrpwm_suspend_params *pwm_suspend_params =
++ (struct ehrpwm_suspend_params *)data;
++
++ if (pwm_suspend_params->act_delay++ >= pwm_suspend_params->
++ req_delay_cycles) {
++ pwm_start(pwm_suspend_params->pch);
++ ehrpwm_et_cb_register(pwm_suspend_params->pch, NULL, NULL);
++ ehrpwm_et_int_en_dis(pwm_suspend_params->pch, DISABLE);
++ }
++
++ return 0;
++}
++
++int ehrpwm_pwm_suspend(struct pwm_device *p, enum config_mask config_mask,
++ unsigned long val)
++{
++ unsigned long long req_cycles = 0;
++
++ if (!p->period_ns)
++ return -EINVAL;
++
++ ehrpwm_pwm_stop(p);
++ /* Calculate the delay in terms of cycles */
++ if (config_mask == CONFIG_NS)
++ req_cycles = val / p->period_ns;
++ else if (config_mask == CONFIG_TICKS)
++ req_cycles = val;
++ else
++ return -EINVAL;
++
++ /* Configute the event interrupt */
++ ehrpwm_et_set_sel_evt(p, 0x2, 0x1);
++ ehrpwm_suspend_params.pch = p;
++ ehrpwm_suspend_params.req_delay_cycles = req_cycles;
++ ehrpwm_suspend_params.act_delay = 0;
++ ehrpwm_et_cb_register(p, &ehrpwm_suspend_params,
++ ehrpwm_pwm_suspend_cb);
++ ehrpwm_et_int_en_dis(p, ENABLE);
++
++ return 0;
++}
++EXPORT_SYMBOL(ehrpwm_pwm_suspend);
++
++static irqreturn_t ehrpwm_trip_zone_irq_handler(int irq, void *data)
++{
++ struct ehrpwm_pwm *ehrpwm = (struct ehrpwm_pwm *)data;
++ unsigned long flags;
++ int ret = 0;
++
++ spin_lock_irqsave(&ehrpwm->lock, flags);
++ ret = ehrpwm_read(ehrpwm, TZFLG);
++ if (!(ret & 0x1))
++ return IRQ_NONE;
++
++ if (ehrpwm->st_tzint.pcallback)
++ ret = ehrpwm->st_tzint.pcallback(ehrpwm, ehrpwm->st_tzint.data);
++
++ ret = ehrpwm_read(ehrpwm, TZFLG);
++ ehrpwm_write(ehrpwm, TZCLR, ret & ~0x1);
++ ehrpwm_write(ehrpwm, TZCLR, 0x1);
++ spin_unlock_irqrestore(&ehrpwm->lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t ehrpwm_event_irq_handler(int irq, void *data)
++{
++ struct ehrpwm_pwm *ehrpwm = (struct ehrpwm_pwm *)data;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ehrpwm->lock, flags);
++
++ if (ehrpwm->st_etint.pcallback)
++ ehrpwm->st_etint.pcallback(ehrpwm, ehrpwm->st_etint.data);
++
++ ehrpwm_write(ehrpwm, ETCLR, 0x1);
++
++ spin_unlock_irqrestore(&ehrpwm->lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static int ehrpwm_pwm_config(struct pwm_device *p,
++ struct pwm_config *c)
++{
++ int ret = 0;
++
++ switch (c->config_mask) {
++ case BIT(PWM_CONFIG_PERIOD_TICKS):
++ if (p->max_period_ticks &&
++ (p->max_period_ticks >= c->period_ticks))
++ p->period_ticks = p->max_period_ticks;
++ else
++ p->period_ticks = c->period_ticks;
++
++ ret = ehrpwm_pwm_set_prd(p);
++ break;
++
++ case BIT(PWM_CONFIG_DUTY_TICKS):
++ p->duty_ticks = c->duty_ticks;
++ ret = ehrpwm_pwm_set_dty(p);
++ break;
++
++ case BIT(PWM_CONFIG_POLARITY):
++ p->active_high = c->polarity;
++ ret = ehrpwm_pwm_set_pol(p);
++ break;
++
++ case BIT(PWM_CONFIG_START):
++ ret = ehrpwm_pwm_start(p);
++ break;
++
++ case BIT(PWM_CONFIG_STOP):
++ ret = ehrpwm_pwm_stop(p);
++ break;
++
++ default:
++ dev_dbg(p->dev, "%s: Invalid configuration\n", __func__);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static int ehrpwm_pwm_request(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ int chan;
++
++ chan = p - &ehrpwm->pwm[0];
++
++ p->tick_hz = clk_get_rate(ehrpwm->clk);
++ debug("\n The clk freq is %lu", p->tick_hz);
++ ehrpwm_pwm_stop(p);
++
++ return 0;
++}
++
++static int ehrpwm_freq_transition_cb(struct pwm_device *p)
++{
++ struct ehrpwm_pwm *ehrpwm = to_ehrpwm_pwm(p);
++ unsigned long duty_ns;
++
++ p->tick_hz = clk_get_rate(ehrpwm->clk);
++ duty_ns = p->duty_ns;
++ if (pwm_is_running(p)) {
++ pwm_stop(p);
++ pwm_set_duty_ns(p, 0);
++ pwm_set_period_ns(p, p->period_ns);
++ pwm_set_duty_ns(p, duty_ns);
++ pwm_start(p);
++ } else {
++ pwm_set_duty_ns(p, 0);
++ pwm_set_period_ns(p, p->period_ns);
++ pwm_set_duty_ns(p, duty_ns);
++ }
++ return 0;
++}
++
++static int ehrpwm_probe(struct platform_device *pdev)
++{
++ struct ehrpwm_pwm *ehrpwm = NULL;
++ struct resource *r;
++ int ret = 0;
++ int chan = 0;
++ struct pwmss_platform_data *pdata = (&pdev->dev)->platform_data;
++ int ch_mask = 0;
++ int val;
++ char con_id[PWM_CON_ID_STRING_LENGTH] = "epwmss";
++
++ ehrpwm = kzalloc(sizeof(*ehrpwm), GFP_KERNEL);
++ if (!ehrpwm) {
++ dev_err(&pdev->dev, "failed to allocate memory\n");
++ ret = -ENOMEM;
++ goto err_mem_failure;
++ }
++
++ ehrpwm->version = pdata->version;
++
++ if (ehrpwm->version == PWM_VERSION_1) {
++ sprintf(con_id, "%s%d_%s", con_id, pdev->id, "fck");
++ ehrpwm->clk = clk_get(&pdev->dev, con_id);
++ } else
++ ehrpwm->clk = clk_get(&pdev->dev, "ehrpwm");
++
++ pm_runtime_enable(&pdev->dev);
++ ehrpwm->dev = &pdev->dev;
++ if (IS_ERR(ehrpwm->clk)) {
++ ret = PTR_ERR(ehrpwm->clk);
++ goto err_clock_failure;
++ }
++
++ if (ehrpwm->version == PWM_VERSION_1) {
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!r) {
++ dev_err(&pdev->dev, "no memory resource defined\n");
++ ret = -ENOMEM;
++ goto err_resource_mem_failure;
++ }
++
++ ehrpwm->config_mem_base = ioremap(r->start, resource_size(r));
++
++ if (!ehrpwm->config_mem_base) {
++
++ dev_err(&pdev->dev, "failed to ioremap() registers\n");
++ ret = -ENODEV;
++ goto err_free_mem_config;
++ }
++
++ pm_runtime_get_sync(ehrpwm->dev);
++ val = readw(ehrpwm->config_mem_base + PWMSS_CLKCONFIG);
++ val |= BIT(EPWM_CLK_EN);
++ writew(val, ehrpwm->config_mem_base + PWMSS_CLKCONFIG);
++ pm_runtime_put_sync(ehrpwm->dev);
++ } else
++ ch_mask = pdata->channel_mask;
++
++ spin_lock_init(&ehrpwm->lock);
++ ehrpwm->ops.config = ehrpwm_pwm_config;
++ ehrpwm->ops.request = ehrpwm_pwm_request;
++ ehrpwm->ops.freq_transition_notifier_cb = ehrpwm_freq_transition_cb;
++ ehrpwm->prescale_val = 1;
++
++ if (ehrpwm->version == PWM_VERSION_1)
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ else
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!r) {
++ dev_err(&pdev->dev, "no memory resource defined\n");
++ ret = -ENODEV;
++ goto err_resource_mem2_failiure;
++ }
++
++ r = request_mem_region(r->start, resource_size(r), pdev->name);
++ if (!r) {
++ dev_err(&pdev->dev, "failed to request memory resource\n");
++ ret = -EBUSY;
++ goto err_request_mem2_failure;
++ }
++
++ ehrpwm->mmio_base = ioremap(r->start, resource_size(r));
++ if (!ehrpwm->mmio_base) {
++ dev_err(&pdev->dev, "failed to ioremap() registers\n");
++ ret = -ENODEV;
++ goto err_free_mem2;
++ }
++
++ ehrpwm->irq[0] = platform_get_irq(pdev, 0);
++ if (ehrpwm->irq[0] == -ENXIO) {
++ dev_err(&pdev->dev, "No IRQ resource\n");
++ ret = -ENXIO;
++ goto err_free_io;
++ }
++
++ ret = request_irq(ehrpwm->irq[0], ehrpwm_trip_zone_irq_handler,
++ 0, "ehrpwmTZ", ehrpwm);
++ if (ret)
++ goto err_free_io;
++
++ ehrpwm->irq[1] = platform_get_irq(pdev, 1);
++ if (ehrpwm->irq[1] == -ENXIO) {
++ dev_err(&pdev->dev, "No IRQ resource\n");
++ ret = -ENXIO;
++ goto err_request_irq;
++ }
++
++ ret = request_irq(ehrpwm->irq[1], ehrpwm_event_irq_handler,
++ 0, "ehrpwm_evt", ehrpwm);
++ if (ret)
++ goto err_request_irq;
++
++ for (chan = 0; chan < NCHAN; chan++) {
++ ehrpwm->pwm[chan].ops = &ehrpwm->ops;
++ pwm_set_drvdata(&ehrpwm->pwm[chan], ehrpwm);
++ ehrpwm->pwm[chan].tick_hz = clk_get_rate(ehrpwm->clk);
++
++ if (pdata->chan_attrib[chan].max_freq) {
++ int period_ns = NSEC_PER_SEC
++ / pdata->chan_attrib[chan].max_freq;
++
++ ehrpwm->pwm[chan].max_period_ticks =
++ pwm_ns_to_ticks(&ehrpwm->pwm[chan], period_ns);
++ }
++
++ if (!(ehrpwm->version == PWM_VERSION_1)) {
++ if (!(ch_mask & (0x1 << chan)))
++ continue;
++ }
++
++ ret = pwm_register(&ehrpwm->pwm[chan], &pdev->dev, chan);
++ if (ret)
++ goto err_pwm_register;
++ }
++
++ platform_set_drvdata(pdev, ehrpwm);
++ return 0;
++
++err_pwm_register:
++ for (chan = 0; chan < NCHAN; chan++) {
++ if (pwm_is_registered(&ehrpwm->pwm[chan]))
++ pwm_unregister(&ehrpwm->pwm[chan]);
++ }
++
++err_request_irq:
++ if (ehrpwm->irq[0] != -ENXIO)
++ free_irq(ehrpwm->irq[0], ehrpwm);
++err_free_io:
++ iounmap(ehrpwm->mmio_base);
++err_free_mem2:
++ release_mem_region(r->start, resource_size(r));
++err_request_mem2_failure:
++err_resource_mem2_failiure:
++ if (ehrpwm->version == PWM_VERSION_1) {
++ iounmap(ehrpwm->config_mem_base);
++ ehrpwm->config_mem_base = NULL;
++ }
++err_free_mem_config:
++err_resource_mem_failure:
++ clk_put(ehrpwm->clk);
++ pm_runtime_disable(ehrpwm->dev);
++err_clock_failure:
++ kfree(ehrpwm);
++err_mem_failure:
++ return ret;
++}
++
++#ifdef CONFIG_PM
++
++void ehrpwm_context_save(struct ehrpwm_pwm *ehrpwm,
++ struct ehrpwm_context *ehrpwm_ctx)
++{
++ pm_runtime_get_sync(ehrpwm->dev);
++ ehrpwm_ctx->tbctl = ehrpwm_read(ehrpwm, TBCTL);
++ ehrpwm_ctx->tbprd = ehrpwm_read(ehrpwm, TBPRD);
++ if (ehrpwm->version == PWM_VERSION_1)
++ ehrpwm_ctx->hrcfg = ehrpwm_read(ehrpwm, AM335X_HRCNFG);
++ else
++ ehrpwm_ctx->hrcfg = ehrpwm_read(ehrpwm, HRCNFG);
++ ehrpwm_ctx->aqctla = ehrpwm_read(ehrpwm, AQCTLA);
++ ehrpwm_ctx->aqctlb = ehrpwm_read(ehrpwm, AQCTLB);
++ ehrpwm_ctx->cmpa = ehrpwm_read(ehrpwm, CMPA);
++ ehrpwm_ctx->cmpb = ehrpwm_read(ehrpwm, CMPB);
++ ehrpwm_ctx->tzctl = ehrpwm_read(ehrpwm, TZCTL);
++ ehrpwm_ctx->tzflg = ehrpwm_read(ehrpwm, TZFLG);
++ ehrpwm_ctx->tzclr = ehrpwm_read(ehrpwm, TZCLR);
++ ehrpwm_ctx->tzfrc = ehrpwm_read(ehrpwm, TZFRC);
++ pm_runtime_put_sync(ehrpwm->dev);
++}
++
++void ehrpwm_context_restore(struct ehrpwm_pwm *ehrpwm,
++ struct ehrpwm_context *ehrpwm_ctx)
++{
++ ehrpwm_write(ehrpwm, TBCTL, ehrpwm_ctx->tbctl);
++ ehrpwm_write(ehrpwm, TBPRD, ehrpwm_ctx->tbprd);
++ if (ehrpwm->version == PWM_VERSION_1)
++ ehrpwm_write(ehrpwm, AM335X_HRCNFG, ehrpwm_ctx->hrcfg);
++ else
++ ehrpwm_write(ehrpwm, HRCNFG, ehrpwm_ctx->hrcfg);
++ ehrpwm_write(ehrpwm, AQCTLA, ehrpwm_ctx->aqctla);
++ ehrpwm_write(ehrpwm, AQCTLB, ehrpwm_ctx->aqctlb);
++ ehrpwm_write(ehrpwm, CMPA, ehrpwm_ctx->cmpa);
++ ehrpwm_write(ehrpwm, CMPB, ehrpwm_ctx->cmpb);
++ ehrpwm_write(ehrpwm, TZCTL, ehrpwm_ctx->tzctl);
++ ehrpwm_write(ehrpwm, TZFLG, ehrpwm_ctx->tzflg);
++ ehrpwm_write(ehrpwm, TZCLR, ehrpwm_ctx->tzclr);
++ ehrpwm_write(ehrpwm, TZFRC, ehrpwm_ctx->tzfrc);
++}
++
++static int ehrpwm_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct ehrpwm_pwm *ehrpwm = platform_get_drvdata(pdev);
++
++ ehrpwm_context_save(ehrpwm, &ehrpwm->ctx);
++ pm_runtime_put_sync(ehrpwm->dev);
++
++ return 0;
++}
++
++static int ehrpwm_resume(struct platform_device *pdev)
++{
++ struct ehrpwm_pwm *ehrpwm = platform_get_drvdata(pdev);
++
++ pm_runtime_get_sync(ehrpwm->dev);
++ ehrpwm_context_restore(ehrpwm, &ehrpwm->ctx);
++
++ return 0;
++}
++
++#else
++#define ehrpwm_suspend NULL
++#define ehrpwm_resume NULL
++#endif /* CONFIG_PM */
++
++static int __devexit ehrpwm_remove(struct platform_device *pdev)
++{
++ struct ehrpwm_pwm *ehrpwm = platform_get_drvdata(pdev);
++ struct resource *r;
++ unsigned char i;
++ int val;
++ struct pwmss_platform_data *pdata;
++
++ if (ehrpwm->version == PWM_VERSION_1) {
++ pdata = (&pdev->dev)->platform_data;
++ val = readw(ehrpwm->config_mem_base + PWMSS_CLKCONFIG);
++ val &= ~BIT(EPWM_CLK_EN);
++ writew(val, ehrpwm->config_mem_base + PWMSS_CLKCONFIG);
++ iounmap(ehrpwm->config_mem_base);
++ ehrpwm->config_mem_base = NULL;
++ }
++
++ for (i = 0; i < NCHAN; i++) {
++ if (pwm_is_registered(&ehrpwm->pwm[i]))
++ pwm_unregister(&ehrpwm->pwm[i]);
++ }
++
++ for (i = 0; i < 2; i++)
++ if (ehrpwm->irq[i] != -ENXIO)
++ free_irq(ehrpwm->irq[i], ehrpwm);
++ iounmap(ehrpwm->mmio_base);
++
++ if (ehrpwm->version == PWM_VERSION_1)
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ else
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ release_mem_region(r->start, resource_size(r));
++ platform_set_drvdata(pdev, NULL);
++ clk_put(ehrpwm->clk);
++ pm_runtime_disable(ehrpwm->dev);
++ kfree(ehrpwm);
++
++ return 0;
++}
++
++static struct platform_driver ehrpwm_driver = {
++ .driver = {
++ .name = "ehrpwm",
++ .owner = THIS_MODULE,
++ },
++ .probe = ehrpwm_probe,
++ .remove = __devexit_p(ehrpwm_remove),
++ .suspend = ehrpwm_suspend,
++ .resume = ehrpwm_resume,
++};
++
++static int __init ehrpwm_init(void)
++{
++ return platform_driver_register(&ehrpwm_driver);
++}
++module_init(ehrpwm_init);
++
++static void __exit ehrpwm_exit(void)
++{
++ platform_driver_unregister(&ehrpwm_driver);
++}
++module_exit(ehrpwm_exit);
++
++MODULE_AUTHOR("Texas Instruments");
++MODULE_DESCRIPTION("Driver for Davinci eHRPWM peripheral");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:ehrpwm");
+diff --git a/drivers/pwm/pwm.c b/drivers/pwm/pwm.c
+new file mode 100644
+index 0000000..b23b260
+--- /dev/null
++++ b/drivers/pwm/pwm.c
+@@ -0,0 +1,843 @@
++/*
++ * PWM API implementation
++ *
++ * Copyright (C) 2011 Bill Gatliff <bgat@billgatliff.com>
++ * Copyright (C) 2011 Arun Murthy <arun.murthy@stericsson.com>
++ *
++ * This program is free software; you may redistribute and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
++ * USA
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/completion.h>
++#include <linux/workqueue.h>
++#include <linux/list.h>
++#include <linux/sched.h>
++#include <linux/platform_device.h>
++#include <linux/cpufreq.h>
++#include <linux/pwm/pwm.h>
++
++static const char *REQUEST_SYSFS = "sysfs";
++static LIST_HEAD(pwm_device_list);
++static DEFINE_MUTEX(device_list_mutex);
++static struct class pwm_class;
++static struct workqueue_struct *pwm_handler_workqueue;
++
++static int pwm_match_name(struct device *dev, void *name)
++{
++ return !strcmp(name, dev_name(dev));
++}
++
++static struct pwm_device *__pwm_request(struct pwm_device *p, const char *label)
++{
++ int ret;
++
++ ret = test_and_set_bit(FLAG_REQUESTED, &p->flags);
++ if (ret) {
++ p = ERR_PTR(-EBUSY);
++ goto done;
++ }
++
++ p->label = label;
++ p->pid = current->pid;
++
++ if (p->ops->request) {
++ ret = p->ops->request(p);
++ if (ret) {
++ p = ERR_PTR(ret);
++ clear_bit(FLAG_REQUESTED, &p->flags);
++ goto done;
++ }
++ }
++
++done:
++ return p;
++}
++
++static struct pwm_device *__pwm_request_byname(const char *name,
++ const char *label)
++{
++ struct device *d;
++ struct pwm_device *p;
++
++ d = class_find_device(&pwm_class, NULL, (char *)name, pwm_match_name);
++ if (!d) {
++ p = ERR_PTR(-EINVAL);
++ goto done;
++ }
++ if (IS_ERR(d)) {
++ p = (struct pwm_device *)d;
++ goto done;
++ }
++
++ p = __pwm_request(dev_get_drvdata(d), label);
++
++done:
++ return p;
++}
++
++struct pwm_device *pwm_request_byname(const char *name, const char *label)
++{
++ struct pwm_device *p;
++
++ mutex_lock(&device_list_mutex);
++ p = __pwm_request_byname(name, label);
++ mutex_unlock(&device_list_mutex);
++ return p;
++}
++EXPORT_SYMBOL(pwm_request_byname);
++
++struct pwm_device *pwm_request(const char *bus_id, int id, const char *label)
++{
++ char name[256];
++ int ret;
++
++ if (id == -1)
++ ret = scnprintf(name, sizeof name, "%s", bus_id);
++ else
++ ret = scnprintf(name, sizeof name, "%s:%d", bus_id, id);
++ if (ret <= 0 || ret >= sizeof name)
++ return ERR_PTR(-EINVAL);
++
++ return pwm_request_byname(name, label);
++}
++EXPORT_SYMBOL(pwm_request);
++
++void pwm_release(struct pwm_device *p)
++{
++ mutex_lock(&device_list_mutex);
++
++ if (!test_and_clear_bit(FLAG_REQUESTED, &p->flags)) {
++ pr_debug("%s pwm device is not requested!\n",
++ dev_name(p->dev));
++ goto done;
++ }
++
++ pwm_stop(p);
++ pwm_unsynchronize(p, NULL);
++ pwm_set_handler(p, NULL, NULL);
++
++ p->label = NULL;
++ p->pid = -1;
++
++ if (p->ops->release)
++ p->ops->release(p);
++done:
++ mutex_unlock(&device_list_mutex);
++}
++EXPORT_SYMBOL(pwm_release);
++
++unsigned long pwm_ns_to_ticks(struct pwm_device *p, unsigned long nsecs)
++{
++ unsigned long long ticks;
++
++ ticks = nsecs;
++ ticks *= p->tick_hz;
++ do_div(ticks, 1000000000);
++ return ticks;
++}
++EXPORT_SYMBOL(pwm_ns_to_ticks);
++
++unsigned long pwm_ticks_to_ns(struct pwm_device *p, unsigned long ticks)
++{
++ unsigned long long ns;
++
++ if (!p->tick_hz) {
++ pr_debug("%s: frequency is zero\n", dev_name(p->dev));
++ return 0;
++ }
++
++ ns = ticks;
++ ns *= 1000000000UL;
++ do_div(ns, p->tick_hz);
++ return ns;
++}
++EXPORT_SYMBOL(pwm_ticks_to_ns);
++
++static void pwm_config_ns_to_ticks(struct pwm_device *p, struct pwm_config *c)
++{
++ if (test_bit(PWM_CONFIG_PERIOD_NS, &c->config_mask)) {
++ c->period_ticks = pwm_ns_to_ticks(p, c->period_ns);
++ clear_bit(PWM_CONFIG_PERIOD_NS, &c->config_mask);
++ set_bit(PWM_CONFIG_PERIOD_TICKS, &c->config_mask);
++ }
++
++ if (test_bit(PWM_CONFIG_DUTY_NS, &c->config_mask)) {
++ c->duty_ticks = pwm_ns_to_ticks(p, c->duty_ns);
++ clear_bit(PWM_CONFIG_DUTY_NS, &c->config_mask);
++ set_bit(PWM_CONFIG_DUTY_TICKS, &c->config_mask);
++ }
++}
++
++static void pwm_config_percent_to_ticks(struct pwm_device *p,
++ struct pwm_config *c)
++{
++ if (test_bit(PWM_CONFIG_DUTY_PERCENT, &c->config_mask)) {
++ if (test_bit(PWM_CONFIG_PERIOD_TICKS, &c->config_mask))
++ c->duty_ticks = c->period_ticks;
++ else
++ c->duty_ticks = p->period_ticks;
++
++ c->duty_ticks *= c->duty_percent;
++ c->duty_ticks /= 100;
++ clear_bit(PWM_CONFIG_DUTY_PERCENT, &c->config_mask);
++ set_bit(PWM_CONFIG_DUTY_TICKS, &c->config_mask);
++ }
++}
++
++int pwm_config_nosleep(struct pwm_device *p, struct pwm_config *c)
++{
++ if (!p->ops->config_nosleep)
++ return -EINVAL;
++
++ pwm_config_ns_to_ticks(p, c);
++ pwm_config_percent_to_ticks(p, c);
++
++ return p->ops->config_nosleep(p, c);
++}
++EXPORT_SYMBOL(pwm_config_nosleep);
++
++int pwm_config(struct pwm_device *p, struct pwm_config *c)
++{
++ int ret = 0;
++
++ pwm_config_ns_to_ticks(p, c);
++ pwm_config_percent_to_ticks(p, c);
++
++ switch (c->config_mask & (BIT(PWM_CONFIG_PERIOD_TICKS)
++ | BIT(PWM_CONFIG_DUTY_TICKS))) {
++ case BIT(PWM_CONFIG_PERIOD_TICKS):
++ if (p->duty_ticks > c->period_ticks) {
++ ret = -EINVAL;
++ goto err;
++ }
++ break;
++ case BIT(PWM_CONFIG_DUTY_TICKS):
++ if (p->period_ticks < c->duty_ticks) {
++ ret = -EINVAL;
++ goto err;
++ }
++ break;
++ case BIT(PWM_CONFIG_DUTY_TICKS) | BIT(PWM_CONFIG_PERIOD_TICKS):
++ if (c->duty_ticks > c->period_ticks) {
++ ret = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ break;
++ }
++
++err:
++ dev_dbg(p->dev, "%s: config_mask %lu period_ticks %lu duty_ticks %lu"
++ " polarity %d duty_ns %lu period_ns %lu duty_percent %d\n",
++ __func__, c->config_mask, c->period_ticks, c->duty_ticks,
++ c->polarity, c->duty_ns, c->period_ns, c->duty_percent);
++
++ if (ret)
++ return ret;
++ spin_lock(&p->pwm_lock);
++ ret = p->ops->config(p, c);
++ spin_unlock(&p->pwm_lock);
++ return ret;
++}
++EXPORT_SYMBOL(pwm_config);
++
++int pwm_set_period_ns(struct pwm_device *p, unsigned long period_ns)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_PERIOD_TICKS),
++ .period_ticks = pwm_ns_to_ticks(p, period_ns),
++ };
++
++ spin_lock(&p->pwm_lock);
++ p->period_ns = period_ns;
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_period_ns);
++
++unsigned long pwm_get_period_ns(struct pwm_device *p)
++{
++ return pwm_ticks_to_ns(p, p->period_ticks);
++}
++EXPORT_SYMBOL(pwm_get_period_ns);
++
++int pwm_set_frequency(struct pwm_device *p, unsigned long freq)
++{
++ struct pwm_config c;
++
++ if (!freq)
++ return -EINVAL;
++
++ c.config_mask = BIT(PWM_CONFIG_PERIOD_TICKS),
++ c.period_ticks = pwm_ns_to_ticks(p, (NSEC_PER_SEC / freq)),
++ spin_lock(&p->pwm_lock);
++ p->period_ns = NSEC_PER_SEC / freq;
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_frequency);
++
++unsigned long pwm_get_frequency(struct pwm_device *p)
++{
++ unsigned long period_ns;
++
++ period_ns = pwm_ticks_to_ns(p, p->period_ticks);
++
++ if (!period_ns) {
++ pr_debug("%s: frequency is zero\n", dev_name(p->dev));
++ return 0;
++ }
++
++ return NSEC_PER_SEC / period_ns;
++}
++EXPORT_SYMBOL(pwm_get_frequency);
++
++int pwm_set_period_ticks(struct pwm_device *p,
++ unsigned long ticks)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_PERIOD_TICKS),
++ .period_ticks = ticks,
++ };
++
++ spin_lock(&p->pwm_lock);
++ p->period_ns = pwm_ticks_to_ns(p, ticks);
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_period_ticks);
++
++int pwm_set_duty_ns(struct pwm_device *p, unsigned long duty_ns)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_DUTY_TICKS),
++ .duty_ticks = pwm_ns_to_ticks(p, duty_ns),
++ };
++ spin_lock(&p->pwm_lock);
++ p->duty_ns = duty_ns;
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_duty_ns);
++
++unsigned long pwm_get_duty_ns(struct pwm_device *p)
++{
++ return pwm_ticks_to_ns(p, p->duty_ticks);
++}
++EXPORT_SYMBOL(pwm_get_duty_ns);
++
++int pwm_set_duty_percent(struct pwm_device *p, int percent)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_DUTY_PERCENT),
++ .duty_percent = percent,
++ };
++
++ spin_lock(&p->pwm_lock);
++ p->duty_ns = p->period_ns * percent;
++ p->duty_ns /= 100;
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_duty_percent);
++
++unsigned long pwm_get_duty_percent(struct pwm_device *p)
++{
++ unsigned long long duty_percent;
++
++ if (!p->period_ns) {
++ pr_debug("%s: frequency is zero\n", dev_name(p->dev));
++ return 0;
++ }
++
++ duty_percent = pwm_ticks_to_ns(p, p->duty_ticks);
++ duty_percent *= 100;
++ do_div(duty_percent, p->period_ns);
++ return duty_percent;
++}
++EXPORT_SYMBOL(pwm_get_duty_percent);
++
++int pwm_set_duty_ticks(struct pwm_device *p,
++ unsigned long ticks)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_DUTY_TICKS),
++ .duty_ticks = ticks,
++ };
++
++ spin_lock(&p->pwm_lock);
++ p->duty_ns = pwm_ticks_to_ns(p, ticks);
++ spin_unlock(&p->pwm_lock);
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_duty_ticks);
++
++int pwm_set_polarity(struct pwm_device *p, int active_high)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_POLARITY),
++ .polarity = active_high,
++ };
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_set_polarity);
++
++int pwm_start(struct pwm_device *p)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_START),
++ };
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_start);
++
++int pwm_stop(struct pwm_device *p)
++{
++ struct pwm_config c = {
++ .config_mask = BIT(PWM_CONFIG_STOP),
++ };
++ return pwm_config(p, &c);
++}
++EXPORT_SYMBOL(pwm_stop);
++
++int pwm_synchronize(struct pwm_device *p, struct pwm_device *to_p)
++{
++ if (!p->ops->synchronize)
++ return -EINVAL;
++
++ return p->ops->synchronize(p, to_p);
++}
++EXPORT_SYMBOL(pwm_synchronize);
++
++int pwm_unsynchronize(struct pwm_device *p, struct pwm_device *from_p)
++{
++ if (!p->ops->unsynchronize)
++ return -EINVAL;
++
++ return p->ops->unsynchronize(p, from_p);
++}
++EXPORT_SYMBOL(pwm_unsynchronize);
++
++static void pwm_handler(struct work_struct *w)
++{
++ struct pwm_device *p = container_of(w, struct pwm_device,
++ handler_work);
++ if (p->handler && p->handler(p, p->handler_data))
++ pwm_stop(p);
++}
++
++static void __pwm_callback(struct pwm_device *p)
++{
++ queue_work(pwm_handler_workqueue, &p->handler_work);
++}
++
++int pwm_set_handler(struct pwm_device *p, pwm_handler_t handler, void *data)
++{
++ if (p->ops->set_callback) {
++ p->handler_data = data;
++ p->handler = handler;
++ INIT_WORK(&p->handler_work, pwm_handler);
++ return p->ops->set_callback(p, handler ? __pwm_callback : NULL);
++ }
++ return -EINVAL;
++}
++EXPORT_SYMBOL(pwm_set_handler);
++
++static ssize_t pwm_run_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", pwm_is_running(p));
++}
++
++static ssize_t pwm_run_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ if (sysfs_streq(buf, "1"))
++ ret = pwm_start(p);
++ else if (sysfs_streq(buf, "0"))
++ ret = pwm_stop(p);
++ else
++ ret = -EINVAL;
++
++ if (ret < 0)
++ return ret;
++ return len;
++}
++static DEVICE_ATTR(run, S_IRUGO | S_IWUSR, pwm_run_show, pwm_run_store);
++
++static ssize_t pwm_tick_hz_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%lu\n", p->tick_hz);
++}
++static DEVICE_ATTR(tick_hz, S_IRUGO, pwm_tick_hz_show, NULL);
++
++static ssize_t pwm_duty_ns_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%lu\n", pwm_get_duty_ns(p));
++}
++
++static ssize_t pwm_duty_ns_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ unsigned long duty_ns;
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ if (!kstrtoul(buf, 10, &duty_ns)) {
++ ret = pwm_set_duty_ns(p, duty_ns);
++
++ if (ret < 0)
++ return ret;
++ }
++
++ return len;
++}
++static DEVICE_ATTR(duty_ns, S_IRUGO | S_IWUSR, pwm_duty_ns_show,
++ pwm_duty_ns_store);
++
++static ssize_t pwm_duty_percent_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%lu\n", pwm_get_duty_percent(p));
++}
++
++static ssize_t pwm_duty_percent_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t len)
++{
++ unsigned long duty_ns;
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ if (!kstrtoul(buf, 10, &duty_ns)) {
++ ret = pwm_set_duty_percent(p, duty_ns);
++
++ if (ret < 0)
++ return ret;
++ }
++
++ return len;
++}
++
++static DEVICE_ATTR(duty_percent, S_IRUGO | S_IWUSR, pwm_duty_percent_show,
++ pwm_duty_percent_store);
++
++static ssize_t pwm_period_ns_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%lu\n", pwm_get_period_ns(p));
++}
++
++static ssize_t pwm_period_ns_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ unsigned long period_ns;
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ if (!kstrtoul(buf, 10, &period_ns)) {
++ ret = pwm_set_period_ns(p, period_ns);
++
++ if (ret < 0)
++ return ret;
++ }
++
++ return len;
++}
++static DEVICE_ATTR(period_ns, S_IRUGO | S_IWUSR, pwm_period_ns_show,
++ pwm_period_ns_store);
++
++static ssize_t pwm_period_freq_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%lu\n", pwm_get_frequency(p));
++}
++
++static ssize_t pwm_period_freq_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t len)
++{
++ unsigned long freq_hz;
++ int ret;
++
++ struct pwm_device *p = dev_get_drvdata(dev);
++ if (!kstrtoul(buf, 10, &freq_hz)) {
++ ret = pwm_set_frequency(p, freq_hz);
++
++ if (ret < 0)
++ return ret;
++ }
++ return len;
++}
++
++static DEVICE_ATTR(period_freq, S_IRUGO | S_IWUSR, pwm_period_freq_show,
++ pwm_period_freq_store);
++
++static ssize_t pwm_polarity_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ return sprintf(buf, "%d\n", p->active_high ? 1 : 0);
++}
++
++static ssize_t pwm_polarity_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ unsigned long polarity;
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ if (!kstrtoul(buf, 10, &polarity)) {
++ ret = pwm_set_polarity(p, polarity);
++
++ if (ret < 0)
++ return ret;
++ }
++
++ return len;
++}
++static DEVICE_ATTR(polarity, S_IRUGO | S_IWUSR, pwm_polarity_show,
++ pwm_polarity_store);
++
++static ssize_t pwm_request_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ int ret;
++
++ ret = test_bit(FLAG_REQUESTED, &p->flags);
++
++ if (ret)
++ return sprintf(buf, "%s requested by %s\n",
++ dev_name(p->dev), p->label);
++ else
++ return sprintf(buf, "%s is free\n",
++ dev_name(p->dev));
++}
++
++static ssize_t pwm_request_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct pwm_device *p = dev_get_drvdata(dev);
++ unsigned long request;
++ struct pwm_device *ret;
++
++ if (!kstrtoul(buf, 10, &request)) {
++ if (request) {
++ mutex_lock(&device_list_mutex);
++ ret = __pwm_request(p, REQUEST_SYSFS);
++ mutex_unlock(&device_list_mutex);
++
++ if (IS_ERR(ret))
++ return PTR_ERR(ret);
++ } else
++ pwm_release(p);
++ }
++
++ return len;
++}
++static DEVICE_ATTR(request, S_IRUGO | S_IWUSR, pwm_request_show,
++ pwm_request_store);
++
++static const struct attribute *pwm_attrs[] = {
++ &dev_attr_tick_hz.attr,
++ &dev_attr_run.attr,
++ &dev_attr_polarity.attr,
++ &dev_attr_duty_ns.attr,
++ &dev_attr_period_ns.attr,
++ &dev_attr_request.attr,
++ &dev_attr_duty_percent.attr,
++ &dev_attr_period_freq.attr,
++ NULL,
++};
++
++static const struct attribute_group pwm_device_attr_group = {
++ .attrs = (struct attribute **) pwm_attrs,
++};
++
++static struct class_attribute pwm_class_attrs[] = {
++ __ATTR_NULL,
++};
++
++static struct class pwm_class = {
++ .name = "pwm",
++ .owner = THIS_MODULE,
++
++ .class_attrs = pwm_class_attrs,
++};
++
++static int pwm_freq_transition_notifier_cb(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct pwm_device *p;
++
++ p = container_of(nb, struct pwm_device, freq_transition);
++
++ if (val == CPUFREQ_POSTCHANGE && pwm_is_requested(p))
++ p->ops->freq_transition_notifier_cb(p);
++
++ return 0;
++}
++
++static inline int pwm_cpufreq_notifier_register(struct pwm_device *p)
++{
++ p->freq_transition.notifier_call = pwm_freq_transition_notifier_cb;
++
++ return cpufreq_register_notifier(&p->freq_transition,
++ CPUFREQ_TRANSITION_NOTIFIER);
++}
++
++int pwm_register_byname(struct pwm_device *p, struct device *parent,
++ const char *name)
++{
++ struct device *d;
++ int ret;
++
++ if (!p->ops || !p->ops->config)
++ return -EINVAL;
++
++ mutex_lock(&device_list_mutex);
++
++ d = class_find_device(&pwm_class, NULL, (char *)name, pwm_match_name);
++ if (d) {
++ ret = -EEXIST;
++ goto err_found_device;
++ }
++
++ p->dev = device_create(&pwm_class, parent, MKDEV(0, 0), NULL, name);
++ if (IS_ERR(p->dev)) {
++ ret = PTR_ERR(p->dev);
++ goto err_device_create;
++ }
++
++ ret = sysfs_create_group(&p->dev->kobj, &pwm_device_attr_group);
++ if (ret)
++ goto err_create_group;
++
++ dev_set_drvdata(p->dev, p);
++ p->flags = BIT(FLAG_REGISTERED);
++
++ ret = pwm_cpufreq_notifier_register(p);
++
++ if (ret < 0)
++ printk(KERN_ERR "Failed to add cpufreq notifier\n");
++
++ spin_lock_init(&p->pwm_lock);
++ goto done;
++
++err_create_group:
++ device_unregister(p->dev);
++ p->flags = 0;
++
++err_device_create:
++err_found_device:
++done:
++ mutex_unlock(&device_list_mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL(pwm_register_byname);
++
++int pwm_register(struct pwm_device *p, struct device *parent, int id)
++{
++ int ret;
++ char name[256];
++
++ if (IS_ERR_OR_NULL(parent))
++ return -EINVAL;
++
++ if (id == -1)
++ ret = scnprintf(name, sizeof name, "%s", dev_name(parent));
++ else
++ ret = scnprintf(name, sizeof name, "%s:%d",
++ dev_name(parent), id);
++ if (ret <= 0 || ret >= sizeof name)
++ return -EINVAL;
++
++ return pwm_register_byname(p, parent, name);
++}
++EXPORT_SYMBOL(pwm_register);
++
++int pwm_unregister(struct pwm_device *p)
++{
++ int ret = 0;
++
++ mutex_lock(&device_list_mutex);
++
++ if (pwm_is_running(p) || pwm_is_requested(p)) {
++ ret = -EBUSY;
++ goto done;
++ }
++
++ sysfs_remove_group(&p->dev->kobj, &pwm_device_attr_group);
++ device_unregister(p->dev);
++ p->flags = 0;
++
++done:
++ mutex_unlock(&device_list_mutex);
++
++ return ret;
++}
++EXPORT_SYMBOL(pwm_unregister);
++
++static int __init pwm_init(void)
++{
++ return class_register(&pwm_class);
++}
++
++static void __exit pwm_exit(void)
++{
++ class_unregister(&pwm_class);
++}
++
++#ifdef MODULE
++module_init(pwm_init);
++module_exit(pwm_exit);
++MODULE_LICENSE("GPL");
++#else
++postcore_initcall(pwm_init);
++#endif
+diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
+index 9713b1b..529c591 100644
+--- a/drivers/regulator/Kconfig
++++ b/drivers/regulator/Kconfig
+@@ -259,6 +259,15 @@ config REGULATOR_TPS6507X
+ three step-down converters and two general-purpose LDO voltage regulators.
+ It supports TI's software based Class-2 SmartReflex implementation.
+
++config REGULATOR_TPS65217
++ tristate "TI TPS65217 Power regulators"
++ depends on MFD_TPS65217
++ help
++ This driver supports TPS65217 voltage regulator chips. TPS65217
++ provides three step-down converters and four general-purpose LDO
++ voltage regulators. It supports software based voltage control
++ for different voltage domains
++
+ config REGULATOR_TPS65912
+ tristate "TI TPS65912 Power regulator"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
+index 93a6318..aeae546 100644
+--- a/drivers/regulator/Makefile
++++ b/drivers/regulator/Makefile
+@@ -38,6 +38,7 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
+ obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
+ obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
+ obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
++obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
+ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
+ obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
+ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
+new file mode 100644
+index 0000000..6665566
+--- /dev/null
++++ b/drivers/regulator/tps65217-regulator.c
+@@ -0,0 +1,493 @@
++/*
++ * tps65217-regulator.c
++ *
++ * Regulator driver for TPS65217 PMIC
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/platform_device.h>
++
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
++#include <linux/mfd/tps65217.h>
++
++#define TPS65217_REGULATOR(_name, _id, _ops, _n) \
++ { \
++ .name = _name, \
++ .id = _id, \
++ .ops = &_ops, \
++ .n_voltages = _n, \
++ .type = REGULATOR_VOLTAGE, \
++ .owner = THIS_MODULE, \
++ } \
++
++#define TPS65217_INFO(_nm, _min, _max, _f1, _f2, _t, _n, _em, _vr, _vm) \
++ { \
++ .name = _nm, \
++ .min_uV = _min, \
++ .max_uV = _max, \
++ .vsel_to_uv = _f1, \
++ .uv_to_vsel = _f2, \
++ .table = _t, \
++ .table_len = _n, \
++ .enable_mask = _em, \
++ .set_vout_reg = _vr, \
++ .set_vout_mask = _vm, \
++ }
++
++static const int LDO1_VSEL_table[] = {
++ 1000000, 1100000, 1200000, 1250000,
++ 1300000, 1350000, 1400000, 1500000,
++ 1600000, 1800000, 2500000, 2750000,
++ 2800000, 3000000, 3100000, 3300000,
++};
++
++static int tps65217_vsel_to_uv1(unsigned int vsel)
++{
++ int uV = 0;
++
++ if (vsel > 63)
++ return -EINVAL;
++
++ if (vsel <= 24)
++ uV = vsel * 25000 + 900000;
++ else if (vsel <= 52)
++ uV = (vsel - 24) * 50000 + 1500000;
++ else if (vsel < 56)
++ uV = (vsel - 52) * 100000 + 2900000;
++ else
++ uV = 3300000;
++
++ return uV;
++}
++
++static int tps65217_uv_to_vsel1(int uV, unsigned int *vsel)
++{
++ if ((uV < 0) && (uV > 3300000))
++ return -EINVAL;
++
++ if (uV <= 1500000)
++ *vsel = (uV - 875001) / 25000;
++ else if (uV <= 2900000)
++ *vsel = 24 + (uV - 1450001) / 50000;
++ else if (uV < 3300000)
++ *vsel = 52 + (uV - 2800001) / 100000;
++ else
++ *vsel = 56;
++
++ return 0;
++}
++
++static int tps65217_vsel_to_uv2(unsigned int vsel)
++{
++ int uV = 0;
++
++ if (vsel > 31)
++ return -EINVAL;
++
++ if (vsel <= 8)
++ uV = vsel * 50000 + 1500000;
++ else if (vsel <= 13)
++ uV = (vsel - 8) * 100000 + 1900000;
++ else
++ uV = (vsel - 13) * 50000 + 2400000;
++
++ return uV;
++}
++
++static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
++{
++ if ((uV < 0) && (uV > 3300000))
++ return -EINVAL;
++
++ if (uV <= 1900000)
++ *vsel = (uV - 1450001) / 50000;
++ else if (uV <= 2400000)
++ *vsel = 8 + (uV - 1800001) / 100000;
++ else
++ *vsel = 13 + (uV - 2350001) / 50000;
++
++ return 0;
++}
++
++static struct tps_info tps65217_pmic_regs[] = {
++ TPS65217_INFO("DCDC1", 900000, 1800000, tps65217_vsel_to_uv1,
++ tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC1_EN,
++ TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK),
++ TPS65217_INFO("DCDC2", 900000, 3300000, tps65217_vsel_to_uv1,
++ tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC2_EN,
++ TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK),
++ TPS65217_INFO("DCDC3", 900000, 1500000, tps65217_vsel_to_uv1,
++ tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC3_EN,
++ TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK),
++ TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL, LDO1_VSEL_table,
++ 16, TPS65217_ENABLE_LDO1_EN, TPS65217_REG_DEFLDO1,
++ TPS65217_DEFLDO1_LDO1_MASK),
++ TPS65217_INFO("LDO2", 900000, 3300000, tps65217_vsel_to_uv1,
++ tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_LDO2_EN,
++ TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK),
++ TPS65217_INFO("LDO3", 1800000, 3300000, tps65217_vsel_to_uv2,
++ tps65217_uv_to_vsel2, NULL, 32,
++ TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
++ TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK),
++ TPS65217_INFO("LDO4", 1800000, 3300000, tps65217_vsel_to_uv2,
++ tps65217_uv_to_vsel2, NULL, 32,
++ TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
++ TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK),
++};
++
++static int tps65217_pmic_dcdc_is_enabled(struct regulator_dev *dev)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int data, dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ ret = tps65217_reg_read(tps, TPS65217_REG_ENABLE, &data);
++ if (ret)
++ return ret;
++
++ return (data & tps->info[dcdc]->enable_mask) ? 1 : 0;
++}
++
++static int tps65217_pmic_ldo_is_enabled(struct regulator_dev *dev)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int data, ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_1 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ ret = tps65217_reg_read(tps, TPS65217_REG_ENABLE, &data);
++ if (ret)
++ return ret;
++
++ return (data & tps->info[ldo]->enable_mask) ? 1 : 0;
++}
++
++static int tps65217_pmic_dcdc_enable(struct regulator_dev *dev)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ /* Enable the regulator and password protection is level 1 */
++ return tps65217_set_bits(tps, TPS65217_REG_ENABLE,
++ tps->info[dcdc]->enable_mask,
++ tps->info[dcdc]->enable_mask,
++ TPS65217_PROTECT_L1);
++}
++
++static int tps65217_pmic_dcdc_disable(struct regulator_dev *dev)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ /* Disable the regulator and password protection is level 1 */
++ return tps65217_clear_bits(tps, TPS65217_REG_ENABLE,
++ tps->info[dcdc]->enable_mask, TPS65217_PROTECT_L1);
++}
++
++static int tps65217_pmic_ldo_enable(struct regulator_dev *dev)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_1 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ /* Enable the regulator and password protection is level 1 */
++ return tps65217_set_bits(tps, TPS65217_REG_ENABLE,
++ tps->info[ldo]->enable_mask,
++ tps->info[ldo]->enable_mask,
++ TPS65217_PROTECT_L1);
++}
++
++static int tps65217_pmic_ldo_disable(struct regulator_dev *dev)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_1 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ /* Disable the regulator and password protection is level 1 */
++ return tps65217_clear_bits(tps, TPS65217_REG_ENABLE,
++ tps->info[ldo]->enable_mask, TPS65217_PROTECT_L1);
++}
++
++static int tps65217_pmic_dcdc_get_voltage_sel(struct regulator_dev *dev)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int selector, dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ ret = tps65217_reg_read(tps, tps->info[dcdc]->set_vout_reg, &selector);
++ if (ret)
++ return ret;
++
++ selector &= tps->info[dcdc]->set_vout_mask;
++
++ return selector;
++}
++
++static int tps65217_pmic_dcdc_set_voltage(struct regulator_dev *dev,
++ int min_uV, int max_uV, unsigned *selector)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ if (min_uV < tps->info[dcdc]->min_uV
++ || min_uV > tps->info[dcdc]->max_uV)
++ return -EINVAL;
++
++ if (max_uV < tps->info[dcdc]->min_uV
++ || max_uV > tps->info[dcdc]->max_uV)
++ return -EINVAL;
++
++ ret = tps->info[dcdc]->uv_to_vsel(min_uV, selector);
++ if (ret)
++ return ret;
++
++ /* Set the voltage based on vsel value and write protect level is 2 */
++ ret = tps65217_set_bits(tps, tps->info[dcdc]->set_vout_reg,
++ tps->info[dcdc]->set_vout_mask,
++ *selector, TPS65217_PROTECT_L2);
++ if (ret)
++ return ret;
++
++ /* Set GO bit to initiate voltage transistion */
++ return tps65217_set_bits(tps, TPS65217_REG_DEFSLEW,
++ TPS65217_DEFSLEW_GO, TPS65217_DEFSLEW_GO,
++ TPS65217_PROTECT_L2);
++}
++
++static int tps65217_pmic_ldo_get_voltage_sel(struct regulator_dev *dev)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int selector, ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_1 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ ret = tps65217_reg_read(tps, tps->info[ldo]->set_vout_reg, &selector);
++ if (ret)
++ return ret;
++
++ selector &= tps->info[ldo]->set_vout_mask;
++
++ return selector;
++}
++
++static int tps65217_pmic_ldo_set_voltage_sel(struct regulator_dev *dev,
++ unsigned selector)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ int ldo = rdev_get_id(dev);
++
++ if (ldo != TPS65217_LDO_1)
++ return -EINVAL;
++
++ if (selector >= tps->info[ldo]->table_len)
++ return -EINVAL;
++
++ /* Set the voltage based on vsel value and write protect level is 2 */
++ return tps65217_set_bits(tps, tps->info[ldo]->set_vout_reg,
++ tps->info[ldo]->set_vout_mask,
++ selector, TPS65217_PROTECT_L2);
++}
++
++static int tps65217_pmic_ldo_set_voltage(struct regulator_dev *dev,
++ int min_uV, int max_uV, unsigned *selector)
++{
++ int ret;
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_2 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ if (min_uV < tps->info[ldo]->min_uV
++ || min_uV > tps->info[ldo]->max_uV)
++ return -EINVAL;
++
++ if (max_uV < tps->info[ldo]->min_uV
++ || max_uV > tps->info[ldo]->max_uV)
++ return -EINVAL;
++
++ ret = tps->info[ldo]->uv_to_vsel(min_uV, selector);
++ if (ret)
++ return ret;
++
++ /* Set the voltage based on vsel value and write protect level is 2 */
++ return tps65217_set_bits(tps, tps->info[ldo]->set_vout_reg,
++ tps->info[ldo]->set_vout_mask,
++ *selector, TPS65217_PROTECT_L2);
++}
++
++static int tps65217_pmic_dcdc_list_voltage(struct regulator_dev *dev,
++ unsigned selector)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int dcdc = rdev_get_id(dev);
++
++ if (dcdc < TPS65217_DCDC_1 || dcdc > TPS65217_DCDC_3)
++ return -EINVAL;
++
++ if (selector >= tps->info[dcdc]->table_len)
++ return -EINVAL;
++
++ return tps->info[dcdc]->vsel_to_uv(selector);
++}
++
++static int tps65217_pmic_ldo_list_voltage(struct regulator_dev *dev,
++ unsigned selector)
++{
++ struct tps65217 *tps = rdev_get_drvdata(dev);
++ unsigned int ldo = rdev_get_id(dev);
++
++ if (ldo < TPS65217_LDO_1 || ldo > TPS65217_LDO_4)
++ return -EINVAL;
++
++ if (selector >= tps->info[ldo]->table_len)
++ return -EINVAL;
++
++ if (tps->info[ldo]->table)
++ return tps->info[ldo]->table[selector];
++
++ return tps->info[ldo]->vsel_to_uv(selector);
++}
++
++/* Operations permitted on DCDCx */
++static struct regulator_ops tps65217_pmic_dcdc_ops = {
++ .is_enabled = tps65217_pmic_dcdc_is_enabled,
++ .enable = tps65217_pmic_dcdc_enable,
++ .disable = tps65217_pmic_dcdc_disable,
++ .get_voltage_sel = tps65217_pmic_dcdc_get_voltage_sel,
++ .set_voltage = tps65217_pmic_dcdc_set_voltage,
++ .list_voltage = tps65217_pmic_dcdc_list_voltage,
++};
++
++/* Operations permitted on LDO1 */
++static struct regulator_ops tps65217_pmic_ldo1_ops = {
++ .is_enabled = tps65217_pmic_ldo_is_enabled,
++ .enable = tps65217_pmic_ldo_enable,
++ .disable = tps65217_pmic_ldo_disable,
++ .get_voltage_sel = tps65217_pmic_ldo_get_voltage_sel,
++ .set_voltage_sel = tps65217_pmic_ldo_set_voltage_sel,
++ .list_voltage = tps65217_pmic_ldo_list_voltage,
++};
++
++/* Operations permitted on LDO2, LDO3 and LDO4 */
++static struct regulator_ops tps65217_pmic_ldo234_ops = {
++ .is_enabled = tps65217_pmic_ldo_is_enabled,
++ .enable = tps65217_pmic_ldo_enable,
++ .disable = tps65217_pmic_ldo_disable,
++ .get_voltage_sel = tps65217_pmic_ldo_get_voltage_sel,
++ .set_voltage = tps65217_pmic_ldo_set_voltage,
++ .list_voltage = tps65217_pmic_ldo_list_voltage,
++};
++
++static struct regulator_desc regulators[] = {
++ TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1,
++ tps65217_pmic_dcdc_ops, 64),
++ TPS65217_REGULATOR("DCDC2",TPS65217_DCDC_2,
++ tps65217_pmic_dcdc_ops, 64),
++ TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3,
++ tps65217_pmic_dcdc_ops, 64),
++ TPS65217_REGULATOR("LDO1", TPS65217_LDO_1,
++ tps65217_pmic_ldo1_ops, 16),
++ TPS65217_REGULATOR("LDO2", TPS65217_LDO_2,
++ tps65217_pmic_ldo234_ops, 64),
++ TPS65217_REGULATOR("LDO3", TPS65217_LDO_3,
++ tps65217_pmic_ldo234_ops, 32),
++ TPS65217_REGULATOR("LDO4", TPS65217_LDO_4,
++ tps65217_pmic_ldo234_ops, 32),
++};
++
++static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
++{
++ struct regulator_dev *rdev;
++ struct tps65217 *tps;
++ struct tps_info *info = &tps65217_pmic_regs[pdev->id];
++
++ /* Already set by core driver */
++ tps = dev_to_tps65217(pdev->dev.parent);
++ tps->info[pdev->id] = info;
++
++ rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
++ pdev->dev.platform_data, tps);
++ if (IS_ERR(rdev))
++ return PTR_ERR(rdev);
++
++ platform_set_drvdata(pdev, rdev);
++
++ return 0;
++}
++
++static int __devexit tps65217_regulator_remove(struct platform_device *pdev)
++{
++ struct regulator_dev *rdev = platform_get_drvdata(pdev);
++
++ platform_set_drvdata(pdev, NULL);
++ regulator_unregister(rdev);
++
++ return 0;
++}
++
++static struct platform_driver tps65217_regulator_driver = {
++ .driver = {
++ .name = "tps65217-pmic",
++ },
++ .probe = tps65217_regulator_probe,
++ .remove = __devexit_p(tps65217_regulator_remove),
++};
++
++static int __init tps65217_regulator_init(void)
++{
++ return platform_driver_register(&tps65217_regulator_driver);
++}
++subsys_initcall(tps65217_regulator_init);
++
++static void __exit tps65217_regulator_exit(void)
++{
++ platform_driver_unregister(&tps65217_regulator_driver);
++}
++module_exit(tps65217_regulator_exit);
++
++
++MODULE_AUTHOR("AnilKumar Ch <anilkumar@ti.com>");
++MODULE_DESCRIPTION("TPS65217 voltage regulator driver");
++MODULE_ALIAS("platform:tps65217-pmic");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
+index b552aae..fc42a34 100644
+--- a/drivers/regulator/tps65910-regulator.c
++++ b/drivers/regulator/tps65910-regulator.c
+@@ -25,30 +25,6 @@
+ #include <linux/gpio.h>
+ #include <linux/mfd/tps65910.h>
+
+-#define TPS65910_REG_VRTC 0
+-#define TPS65910_REG_VIO 1
+-#define TPS65910_REG_VDD1 2
+-#define TPS65910_REG_VDD2 3
+-#define TPS65910_REG_VDD3 4
+-#define TPS65910_REG_VDIG1 5
+-#define TPS65910_REG_VDIG2 6
+-#define TPS65910_REG_VPLL 7
+-#define TPS65910_REG_VDAC 8
+-#define TPS65910_REG_VAUX1 9
+-#define TPS65910_REG_VAUX2 10
+-#define TPS65910_REG_VAUX33 11
+-#define TPS65910_REG_VMMC 12
+-
+-#define TPS65911_REG_VDDCTRL 4
+-#define TPS65911_REG_LDO1 5
+-#define TPS65911_REG_LDO2 6
+-#define TPS65911_REG_LDO3 7
+-#define TPS65911_REG_LDO4 8
+-#define TPS65911_REG_LDO5 9
+-#define TPS65911_REG_LDO6 10
+-#define TPS65911_REG_LDO7 11
+-#define TPS65911_REG_LDO8 12
+-
+ #define TPS65910_SUPPLY_STATE_ENABLED 0x1
+
+ /* supported VIO voltages in milivolts */
+@@ -508,9 +484,15 @@ static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
++ if (opvsel < 0)
++ return opvsel;
+ mult = tps65910_reg_read(pmic, TPS65910_VDD1);
++ if (mult < 0)
++ return mult;
+ mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
+ srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
++ if (srvsel < 0)
++ return srvsel;
+ sr = opvsel & VDD1_OP_CMD_MASK;
+ opvsel &= VDD1_OP_SEL_MASK;
+ srvsel &= VDD1_SR_SEL_MASK;
+@@ -661,6 +643,7 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), vsel;
+ int dcdc_mult = 0;
++ int ret = 0;
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+@@ -669,10 +652,11 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+ dcdc_mult--;
+ vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
+
+- tps65910_modify_bits(pmic, TPS65910_VDD1,
++ ret = tps65910_modify_bits(pmic, TPS65910_VDD1,
+ (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
+ VDD1_VGAIN_SEL_MASK);
+- tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
++ if (!ret)
++ ret = tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ break;
+ case TPS65910_REG_VDD2:
+ dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
+@@ -690,7 +674,7 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+ tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
+@@ -885,8 +869,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+- reg_data = pmic_plat_data->tps65910_pmic_init_data;
+-
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+@@ -937,7 +919,16 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
+ goto err_free_info;
+ }
+
+- for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
++ for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
++ i++, info++) {
++
++ reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
++
++ /* Regulator API handles empty constraints but not NULL
++ * constraints */
++ if (!reg_data)
++ continue;
++
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index 53eb4e5..34a7129 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -733,7 +733,7 @@ config RTC_DRV_DAVINCI
+
+ config RTC_DRV_OMAP
+ tristate "TI OMAP1"
+- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
++ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX || SOC_OMAPAM33XX
+ help
+ Say "yes" here to support the real time clock on TI OMAP1 and
+ DA8xx/OMAP-L13x chips. This driver can also be built as a
+diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
+index d969855..d3e4d7c 100644
+--- a/drivers/scsi/gdth.h
++++ b/drivers/scsi/gdth.h
+@@ -359,7 +359,7 @@ typedef struct {
+ u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
+ u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
+ u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
+- u32 cmd_buff_size; /* size of each cmd bufer in bytes */
++ u32 cmd_buff_size; /* size of each cmd buffer in bytes */
+ u32 reserved1;
+ u32 reserved2;
+ } __attribute__((packed)) gdth_perf_modes;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 322be7a..460b9b4 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -40,6 +40,7 @@
+ #include <plat/dma.h>
+ #include <plat/clock.h>
+ #include <plat/mcspi.h>
++#include <mach/edma.h>
+
+ #define OMAP2_MCSPI_MAX_FREQ 48000000
+
+@@ -101,6 +102,7 @@ struct omap2_mcspi_dma {
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
++ int dummy_param_slot;
+ };
+
+ /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+@@ -108,7 +110,6 @@ struct omap2_mcspi_dma {
+ */
+ #define DMA_MIN_BYTES 160
+
+-
+ struct omap2_mcspi {
+ struct work_struct work;
+ /* lock protects queue and registers */
+@@ -303,6 +304,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ u8 * rx;
+ const u8 * tx;
+ void __iomem *chstat_reg;
++ struct edmacc_param param;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+@@ -332,37 +334,57 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ }
+
+ if (tx != NULL) {
+- omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
+- data_type, element_count, 1,
+- OMAP_DMA_SYNC_ELEMENT,
+- mcspi_dma->dma_tx_sync_dev, 0);
+-
+- omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
+- OMAP_DMA_AMODE_CONSTANT,
+- tx_reg, 0, 0);
+-
+- omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
+- OMAP_DMA_AMODE_POST_INC,
+- xfer->tx_dma, 0, 0);
++ int a_cnt, b_cnt, c_cnt, b_cntrld;
++
++ a_cnt = 1;
++ b_cnt = 1;
++ c_cnt = (element_count / a_cnt) / 256;
++ b_cntrld = SZ_64K - 1;
++
++ param.opt = TCINTEN |
++ EDMA_TCC(mcspi_dma->dma_tx_channel) | SYNCDIM ;
++ param.src = xfer->tx_dma;
++ param.a_b_cnt = a_cnt | b_cnt << 16;
++ param.dst = tx_reg;
++ param.src_dst_bidx = a_cnt;
++ param.link_bcntrld = b_cntrld << 16;
++ param.src_dst_cidx = b_cnt;
++ param.ccnt = element_count;
++ edma_write_slot(mcspi_dma->dma_tx_channel, &param);
++ edma_link(mcspi_dma->dma_tx_channel,
++ mcspi_dma->dummy_param_slot);
+ }
+
+ if (rx != NULL) {
++ int a_cnt, b_cnt, c_cnt, b_cntrld;
++
++ a_cnt = 1;
++ c_cnt = (element_count / a_cnt) / (SZ_64K - 1);
++ b_cnt = element_count - c_cnt * (SZ_64K - 1);
++ b_cntrld = SZ_64K - 1;
++
++ if (b_cnt)
++ c_cnt++;
++ else
++ b_cnt = SZ_64K - 1;
++
+ elements = element_count - 1;
+ if (l & OMAP2_MCSPI_CHCONF_TURBO)
+ elements--;
+
+- omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
+- data_type, elements, 1,
+- OMAP_DMA_SYNC_ELEMENT,
+- mcspi_dma->dma_rx_sync_dev, 1);
++ param.opt = TCINTEN |
++ EDMA_TCC(mcspi_dma->dma_rx_channel);
++ param.src = rx_reg;
++ param.a_b_cnt = a_cnt | b_cnt << 16;
++ param.dst = xfer->rx_dma;
++ param.src_dst_bidx = a_cnt << 16;
++ param.link_bcntrld = b_cntrld << 16;
++ param.src_dst_cidx = 1 << 16;
++ param.ccnt = c_cnt;
++ edma_write_slot(mcspi_dma->dma_rx_channel, &param);
++ edma_link(mcspi_dma->dma_rx_channel,
++ mcspi_dma->dummy_param_slot);
+
+- omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
+- OMAP_DMA_AMODE_CONSTANT,
+- rx_reg, 0, 0);
+-
+- omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
+- OMAP_DMA_AMODE_POST_INC,
+- xfer->rx_dma, 0, 0);
+ }
+
+ if (tx != NULL) {
+@@ -419,23 +441,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ }
+ }
+
+- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+- & OMAP2_MCSPI_CHSTAT_RXS)) {
+- u32 w;
+-
+- w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+- if (word_len <= 8)
+- ((u8 *)xfer->rx_buf)[elements] = w;
+- else if (word_len <= 16)
+- ((u16 *)xfer->rx_buf)[elements] = w;
+- else /* word_len <= 32 */
+- ((u32 *)xfer->rx_buf)[elements] = w;
+- } else {
+- dev_err(&spi->dev, "DMA RX last word empty");
+- count -= (word_len <= 8) ? 1 :
+- (word_len <= 16) ? 2 :
+- /* word_len <= 32 */ 4;
+- }
+ omap2_mcspi_set_enable(spi, 1);
+ }
+ return count;
+@@ -718,13 +723,13 @@ static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
++ /* We must disable the DMA RX request */
++ omap2_mcspi_set_dma_req(spi, 1, 0);
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+- /* We must disable the DMA RX request */
+- omap2_mcspi_set_dma_req(spi, 1, 0);
+ }
+
+ static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
+@@ -733,13 +738,13 @@ static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
++ /* We must disable the DMA TX request */
++ omap2_mcspi_set_dma_req(spi, 0, 0);
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+- /* We must disable the DMA TX request */
+- omap2_mcspi_set_dma_req(spi, 0, 0);
+ }
+
+ static int omap2_mcspi_request_dma(struct spi_device *spi)
+@@ -747,6 +752,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
++ int ret = 0;
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+@@ -766,6 +772,18 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
+ dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ return -EAGAIN;
+ }
++ ret = edma_alloc_slot(EDMA_CTLR(mcspi_dma->dma_tx_channel),
++ EDMA_SLOT_ANY);
++
++ if (ret < 0) {
++ pr_err("Unable to request SPI TX DMA param slot\n");
++ ret = -EAGAIN;
++ return ret;
++ }
++
++ mcspi_dma->dummy_param_slot = ret;
++ edma_link(mcspi_dma->dummy_param_slot,
++ mcspi_dma->dummy_param_slot);
+
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+@@ -1114,7 +1132,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ status = -ENODEV;
+- goto err1;
++ goto free_master;
+ }
+
+ r->start += pdata->regs_offset;
+@@ -1123,14 +1141,14 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
+ if (!request_mem_region(r->start, resource_size(r),
+ dev_name(&pdev->dev))) {
+ status = -EBUSY;
+- goto err1;
++ goto free_master;
+ }
+
+ mcspi->base = ioremap(r->start, resource_size(r));
+ if (!mcspi->base) {
+ dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
+ status = -ENOMEM;
+- goto err2;
++ goto release_region;
+ }
+
+ mcspi->dev = &pdev->dev;
+@@ -1145,7 +1163,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
+ GFP_KERNEL);
+
+ if (mcspi->dma_channels == NULL)
+- goto err2;
++ goto unmap_io;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ char dma_ch_name[14];
+@@ -1175,25 +1193,34 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
+ mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
+ }
+
++ if (status < 0)
++ goto dma_chnl_free;
++
+ pm_runtime_enable(&pdev->dev);
+
+ if (status || omap2_mcspi_master_setup(mcspi) < 0)
+- goto err3;
++ goto diable_pm;
+
+ status = spi_register_master(master);
+ if (status < 0)
+- goto err4;
++ goto err_spi_register;
+
+ return status;
+
+-err4:
++err_spi_register:
+ spi_master_put(master);
+-err3:
++diable_pm:
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++dma_chnl_free:
+ kfree(mcspi->dma_channels);
+-err2:
+- release_mem_region(r->start, resource_size(r));
++unmap_io:
+ iounmap(mcspi->base);
+-err1:
++release_region:
++ release_mem_region(r->start, resource_size(r));
++free_master:
++ kfree(master);
++ platform_set_drvdata(pdev, NULL);
+ return status;
+ }
+
+@@ -1210,13 +1237,16 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+ dma_channels = mcspi->dma_channels;
+
+ omap2_mcspi_disable_clocks(mcspi);
++ pm_runtime_disable(&pdev->dev);
++ kfree(dma_channels);
++ base = mcspi->base;
++ iounmap(base);
++
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, resource_size(r));
+
+- base = mcspi->base;
+ spi_unregister_master(master);
+- iounmap(base);
+- kfree(dma_channels);
++ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+ }
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 5e713d3..ca24ab3 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -37,17 +37,24 @@
+ #include <linux/clk.h>
+ #include <linux/serial_core.h>
+ #include <linux/irq.h>
++#include <linux/pm_runtime.h>
++#include <linux/of.h>
+
+ #include <plat/dma.h>
+ #include <plat/dmtimer.h>
+ #include <plat/omap-serial.h>
+
++#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
++
+ static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
+
+ /* Forward declaration of functions */
+ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
+-static void serial_omap_rx_timeout(unsigned long uart_no);
++static void serial_omap_rxdma_poll(unsigned long uart_no);
+ static int serial_omap_start_rxdma(struct uart_omap_port *up);
++static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
++
++static struct workqueue_struct *serial_omap_uart_wq;
+
+ static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
+ {
+@@ -102,6 +109,8 @@ static void serial_omap_stop_rxdma(struct uart_omap_port *up)
+ omap_free_dma(up->uart_dma.rx_dma_channel);
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_used = false;
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ }
+ }
+
+@@ -109,9 +118,12 @@ static void serial_omap_enable_ms(struct uart_port *port)
+ {
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+- dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
++
++ pm_runtime_get_sync(&up->pdev->dev);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
++ pm_runtime_put(&up->pdev->dev);
+ }
+
+ static void serial_omap_stop_tx(struct uart_port *port)
+@@ -129,30 +141,40 @@ static void serial_omap_stop_tx(struct uart_port *port)
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ omap_free_dma(up->uart_dma.tx_dma_channel);
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ }
+
++ pm_runtime_get_sync(&up->pdev->dev);
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
++
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ }
+
+ static void serial_omap_stop_rx(struct uart_port *port)
+ {
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
++ pm_runtime_get_sync(&up->pdev->dev);
+ if (up->use_dma)
+ serial_omap_stop_rxdma(up);
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ }
+
+-static inline void receive_chars(struct uart_omap_port *up, int *status)
++static inline void receive_chars(struct uart_omap_port *up,
++ unsigned int *status)
+ {
+ struct tty_struct *tty = up->port.state->port.tty;
+- unsigned int flag;
+- unsigned char ch, lsr = *status;
++ unsigned int flag, lsr = *status;
++ unsigned char ch = 0;
+ int max_count = 256;
+
+ do {
+@@ -262,7 +284,10 @@ static void serial_omap_start_tx(struct uart_port *port)
+ int ret = 0;
+
+ if (!up->use_dma) {
++ pm_runtime_get_sync(&up->pdev->dev);
+ serial_omap_enable_ier_thri(up);
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ return;
+ }
+
+@@ -272,6 +297,7 @@ static void serial_omap_start_tx(struct uart_port *port)
+ xmit = &up->port.state->xmit;
+
+ if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
++ pm_runtime_get_sync(&up->pdev->dev);
+ ret = omap_request_dma(up->uart_dma.uart_dma_tx,
+ "UART Tx DMA",
+ (void *)uart_tx_dma_callback, up,
+@@ -354,9 +380,13 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
+ unsigned int iir, lsr;
+ unsigned long flags;
+
++ pm_runtime_get_sync(&up->pdev->dev);
+ iir = serial_in(up, UART_IIR);
+- if (iir & UART_IIR_NO_INT)
++ if (iir & UART_IIR_NO_INT) {
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ return IRQ_NONE;
++ }
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ lsr = serial_in(up, UART_LSR);
+@@ -378,6 +408,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
++
+ up->port_activity = jiffies;
+ return IRQ_HANDLED;
+ }
+@@ -388,11 +421,12 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
+ unsigned long flags = 0;
+ unsigned int ret = 0;
+
+- dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
++ pm_runtime_get_sync(&up->pdev->dev);
++ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+-
++ pm_runtime_put(&up->pdev->dev);
+ return ret;
+ }
+
+@@ -402,8 +436,11 @@ static unsigned int serial_omap_get_mctrl(struct uart_port *port)
+ unsigned char status;
+ unsigned int ret = 0;
+
++ pm_runtime_get_sync(&up->pdev->dev);
+ status = check_modem_status(up);
+- dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
++ pm_runtime_put(&up->pdev->dev);
++
++ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
+
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+@@ -421,7 +458,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char mcr = 0;
+
+- dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+@@ -433,8 +470,11 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+- mcr |= up->mcr;
+- serial_out(up, UART_MCR, mcr);
++ pm_runtime_get_sync(&up->pdev->dev);
++ up->mcr = serial_in(up, UART_MCR);
++ up->mcr |= mcr;
++ serial_out(up, UART_MCR, up->mcr);
++ pm_runtime_put(&up->pdev->dev);
+ }
+
+ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+@@ -442,7 +482,8 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+- dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
++ pm_runtime_get_sync(&up->pdev->dev);
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+@@ -450,6 +491,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
++ pm_runtime_put(&up->pdev->dev);
+ }
+
+ static int serial_omap_startup(struct uart_port *port)
+@@ -466,8 +508,9 @@ static int serial_omap_startup(struct uart_port *port)
+ if (retval)
+ return retval;
+
+- dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
+
++ pm_runtime_get_sync(&up->pdev->dev);
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+@@ -505,8 +548,8 @@ static int serial_omap_startup(struct uart_port *port)
+ (dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
+ 0);
+ init_timer(&(up->uart_dma.rx_timer));
+- up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
+- up->uart_dma.rx_timer.data = up->pdev->id;
++ up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
++ up->uart_dma.rx_timer.data = up->port.line;
+ /* Currently the buffer size is 4KB. Can increase it */
+ up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
+ up->uart_dma.rx_buf_size,
+@@ -523,6 +566,8 @@ static int serial_omap_startup(struct uart_port *port)
+ /* Enable module level wake up */
+ serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ up->port_activity = jiffies;
+ return 0;
+ }
+@@ -532,7 +577,9 @@ static void serial_omap_shutdown(struct uart_port *port)
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+- dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
++
++ pm_runtime_get_sync(&up->pdev->dev);
+ /*
+ * Disable interrupts from this port
+ */
+@@ -566,6 +613,8 @@ static void serial_omap_shutdown(struct uart_port *port)
+ up->uart_dma.rx_buf_dma_phys);
+ up->uart_dma.rx_buf = NULL;
+ }
++
++ pm_runtime_put(&up->pdev->dev);
+ free_irq(up->port.irq, up);
+ }
+
+@@ -573,8 +622,6 @@ static inline void
+ serial_omap_configure_xonxoff
+ (struct uart_omap_port *up, struct ktermios *termios)
+ {
+- unsigned char efr = 0;
+-
+ up->lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ up->efr = serial_in(up, UART_EFR);
+@@ -584,8 +631,7 @@ serial_omap_configure_xonxoff
+ serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
+
+ /* clear SW control mode bits */
+- efr = up->efr;
+- efr &= OMAP_UART_SW_CLR;
++ up->efr &= OMAP_UART_SW_CLR;
+
+ /*
+ * IXON Flag:
+@@ -593,7 +639,7 @@ serial_omap_configure_xonxoff
+ * Transmit XON1, XOFF1
+ */
+ if (termios->c_iflag & IXON)
+- efr |= OMAP_UART_SW_TX;
++ up->efr |= OMAP_UART_SW_TX;
+
+ /*
+ * IXOFF Flag:
+@@ -601,7 +647,7 @@ serial_omap_configure_xonxoff
+ * Receiver compares XON1, XOFF1.
+ */
+ if (termios->c_iflag & IXOFF)
+- efr |= OMAP_UART_SW_RX;
++ up->efr |= OMAP_UART_SW_RX;
+
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+@@ -624,13 +670,21 @@ serial_omap_configure_xonxoff
+ * load the new software flow control mode IXON or IXOFF
+ * and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
+ */
+- serial_out(up, UART_EFR, efr | UART_EFR_SCD);
++ serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, up->lcr);
+ }
+
++static void serial_omap_uart_qos_work(struct work_struct *work)
++{
++ struct uart_omap_port *up = container_of(work, struct uart_omap_port,
++ qos_work);
++
++ pm_qos_update_request(&up->pm_qos_request, up->latency);
++}
++
+ static void
+ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+@@ -671,6 +725,16 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
+ quot = serial_omap_get_divisor(port, baud);
+
++ /* calculate wakeup latency constraint */
++ up->calc_latency = (1000000 * up->port.fifosize) /
++ (1000 * baud / 8);
++ up->latency = up->calc_latency;
++ schedule_work(&up->qos_work);
++
++ up->dll = quot & 0xff;
++ up->dlh = quot >> 8;
++ up->mdr1 = UART_OMAP_MDR1_DISABLE;
++
+ up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
+ UART_FCR_ENABLE_FIFO;
+ if (up->use_dma)
+@@ -680,6 +744,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
++ pm_runtime_get_sync(&up->pdev->dev);
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+@@ -723,6 +788,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
++ up->lcr = cval;
++ up->scr = OMAP_UART_SCR_TX_EMPTY;
+
+ /* FIFOs and DMA Settings */
+
+@@ -749,17 +816,22 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+
+ if (up->use_dma) {
+ serial_out(up, UART_TI752_TLR, 0);
+- serial_out(up, UART_OMAP_SCR,
+- (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
++ up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8);
+ }
+
++ serial_out(up, UART_OMAP_SCR, up->scr);
++
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+
+ /* Protocol, Baud Rate, and Interrupt Settings */
+
+- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
++ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
++ serial_omap_mdr1_errataset(up, up->mdr1);
++ else
++ serial_out(up, UART_OMAP_MDR1, up->mdr1);
++
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ up->efr = serial_in(up, UART_EFR);
+@@ -769,8 +841,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
++ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
++ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, up->ier);
+@@ -780,9 +852,14 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ serial_out(up, UART_LCR, cval);
+
+ if (baud > 230400 && baud != 3000000)
+- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
++ up->mdr1 = UART_OMAP_MDR1_13X_MODE;
+ else
+- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
++ up->mdr1 = UART_OMAP_MDR1_16X_MODE;
++
++ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
++ serial_omap_mdr1_errataset(up, up->mdr1);
++ else
++ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+
+ /* Hardware Flow Control Configuration */
+
+@@ -809,7 +886,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ serial_omap_configure_xonxoff(up, termios);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+- dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
++ pm_runtime_put(&up->pdev->dev);
++ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
+ }
+
+ static void
+@@ -819,7 +897,9 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char efr;
+
+- dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
++
++ pm_runtime_get_sync(&up->pdev->dev);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, efr | UART_EFR_ECB);
+@@ -829,6 +909,15 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
++
++ if (!device_may_wakeup(&up->pdev->dev)) {
++ if (!state)
++ pm_runtime_forbid(&up->pdev->dev);
++ else
++ pm_runtime_allow(&up->pdev->dev);
++ }
++
++ pm_runtime_put(&up->pdev->dev);
+ }
+
+ static void serial_omap_release_port(struct uart_port *port)
+@@ -847,7 +936,7 @@ static void serial_omap_config_port(struct uart_port *port, int flags)
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
+- up->pdev->id);
++ up->port.line);
+ up->port.type = PORT_OMAP;
+ }
+
+@@ -864,7 +953,7 @@ serial_omap_type(struct uart_port *port)
+ {
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+- dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
++ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
+ return up->name;
+ }
+
+@@ -906,19 +995,26 @@ static inline void wait_for_xmitr(struct uart_omap_port *up)
+ static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
+ {
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
++
++ pm_runtime_get_sync(&up->pdev->dev);
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
++ pm_runtime_put(&up->pdev->dev);
+ }
+
+ static int serial_omap_poll_get_char(struct uart_port *port)
+ {
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+- unsigned int status = serial_in(up, UART_LSR);
++ unsigned int status;
+
++ pm_runtime_get_sync(&up->pdev->dev);
++ status = serial_in(up, UART_LSR);
+ if (!(status & UART_LSR_DR))
+ return NO_POLL_CHAR;
+
+- return serial_in(up, UART_RX);
++ status = serial_in(up, UART_RX);
++ pm_runtime_put(&up->pdev->dev);
++ return status;
+ }
+
+ #endif /* CONFIG_CONSOLE_POLL */
+@@ -946,6 +1042,8 @@ serial_omap_console_write(struct console *co, const char *s,
+ unsigned int ier;
+ int locked = 1;
+
++ pm_runtime_get_sync(&up->pdev->dev);
++
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+@@ -978,6 +1076,8 @@ serial_omap_console_write(struct console *co, const char *s,
+ if (up->msr_saved_flags)
+ check_modem_status(up);
+
++ pm_runtime_mark_last_busy(&up->pdev->dev);
++ pm_runtime_put_autosuspend(&up->pdev->dev);
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+@@ -1014,7 +1114,7 @@ static struct console serial_omap_console = {
+
+ static void serial_omap_add_console_port(struct uart_omap_port *up)
+ {
+- serial_omap_console_ports[up->pdev->id] = up;
++ serial_omap_console_ports[up->port.line] = up;
+ }
+
+ #define OMAP_CONSOLE (&serial_omap_console)
+@@ -1060,26 +1160,30 @@ static struct uart_driver serial_omap_reg = {
+ .cons = OMAP_CONSOLE,
+ };
+
+-static int
+-serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
++#ifdef CONFIG_SUSPEND
++static int serial_omap_suspend(struct device *dev)
+ {
+- struct uart_omap_port *up = platform_get_drvdata(pdev);
++ struct uart_omap_port *up = dev_get_drvdata(dev);
+
+- if (up)
++ if (up) {
+ uart_suspend_port(&serial_omap_reg, &up->port);
++ flush_work_sync(&up->qos_work);
++ }
++
+ return 0;
+ }
+
+-static int serial_omap_resume(struct platform_device *dev)
++static int serial_omap_resume(struct device *dev)
+ {
+- struct uart_omap_port *up = platform_get_drvdata(dev);
++ struct uart_omap_port *up = dev_get_drvdata(dev);
+
+ if (up)
+ uart_resume_port(&serial_omap_reg, &up->port);
+ return 0;
+ }
++#endif
+
+-static void serial_omap_rx_timeout(unsigned long uart_no)
++static void serial_omap_rxdma_poll(unsigned long uart_no)
+ {
+ struct uart_omap_port *up = ui[uart_no];
+ unsigned int curr_dma_pos, curr_transmitted_size;
+@@ -1089,9 +1193,9 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
+ if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
+ (curr_dma_pos == 0)) {
+ if (jiffies_to_msecs(jiffies - up->port_activity) <
+- RX_TIMEOUT) {
++ up->uart_dma.rx_timeout) {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+- usecs_to_jiffies(up->uart_dma.rx_timeout));
++ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
+ } else {
+ serial_omap_stop_rxdma(up);
+ up->ier |= (UART_IER_RDI | UART_IER_RLSI);
+@@ -1120,7 +1224,7 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
+ }
+ } else {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+- usecs_to_jiffies(up->uart_dma.rx_timeout));
++ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
+ }
+ up->port_activity = jiffies;
+ }
+@@ -1135,6 +1239,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
+ int ret = 0;
+
+ if (up->uart_dma.rx_dma_channel == -1) {
++ pm_runtime_get_sync(&up->pdev->dev);
+ ret = omap_request_dma(up->uart_dma.uart_dma_rx,
+ "UART Rx DMA",
+ (void *)uart_rx_dma_callback, up,
+@@ -1158,7 +1263,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.rx_dma_channel);
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+- usecs_to_jiffies(up->uart_dma.rx_timeout));
++ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
+ up->uart_dma.rx_dma_used = true;
+ return ret;
+ }
+@@ -1221,6 +1326,19 @@ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
+ return;
+ }
+
++static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
++{
++ struct omap_uart_port_info *omap_up_info;
++
++ omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
++ if (!omap_up_info)
++ return NULL; /* out of memory */
++
++ of_property_read_u32(dev->of_node, "clock-frequency",
++ &omap_up_info->uartclk);
++ return omap_up_info;
++}
++
+ static int serial_omap_probe(struct platform_device *pdev)
+ {
+ struct uart_omap_port *up;
+@@ -1228,6 +1346,9 @@ static int serial_omap_probe(struct platform_device *pdev)
+ struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ int ret = -ENOSPC;
+
++ if (pdev->dev.of_node)
++ omap_up_info = of_get_uart_port_info(&pdev->dev);
++
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+@@ -1263,7 +1384,6 @@ static int serial_omap_probe(struct platform_device *pdev)
+ ret = -ENOMEM;
+ goto do_release_region;
+ }
+- sprintf(up->name, "OMAP UART%d", pdev->id);
+ up->pdev = pdev;
+ up->port.dev = &pdev->dev;
+ up->port.type = PORT_OMAP;
+@@ -1273,34 +1393,74 @@ static int serial_omap_probe(struct platform_device *pdev)
+ up->port.regshift = 2;
+ up->port.fifosize = 64;
+ up->port.ops = &serial_omap_pops;
+- up->port.line = pdev->id;
+
+- up->port.membase = omap_up_info->membase;
+- up->port.mapbase = omap_up_info->mapbase;
++ if (pdev->dev.of_node)
++ up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
++ else
++ up->port.line = pdev->id;
++
++ if (up->port.line < 0) {
++ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
++ up->port.line);
++ ret = -ENODEV;
++ goto err;
++ }
++
++ sprintf(up->name, "OMAP UART%d", up->port.line);
++ up->port.mapbase = mem->start;
++ up->port.membase = ioremap(mem->start, resource_size(mem));
++ if (!up->port.membase) {
++ dev_err(&pdev->dev, "can't ioremap UART\n");
++ ret = -ENOMEM;
++ goto err;
++ }
++
+ up->port.flags = omap_up_info->flags;
+- up->port.irqflags = omap_up_info->irqflags;
+ up->port.uartclk = omap_up_info->uartclk;
++ if (!up->port.uartclk) {
++ up->port.uartclk = DEFAULT_CLK_SPEED;
++ dev_warn(&pdev->dev, "No clock speed specified: using default:"
++ "%d\n", DEFAULT_CLK_SPEED);
++ }
+ up->uart_dma.uart_base = mem->start;
++ up->errata = omap_up_info->errata;
+
+ if (omap_up_info->dma_enabled) {
+ up->uart_dma.uart_dma_tx = dma_tx->start;
+ up->uart_dma.uart_dma_rx = dma_rx->start;
+ up->use_dma = 1;
+- up->uart_dma.rx_buf_size = 4096;
+- up->uart_dma.rx_timeout = 2;
++ up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
++ up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
++ up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
+ spin_lock_init(&(up->uart_dma.tx_lock));
+ spin_lock_init(&(up->uart_dma.rx_lock));
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+- ui[pdev->id] = up;
++ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
++ up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
++ pm_qos_add_request(&up->pm_qos_request,
++ PM_QOS_CPU_DMA_LATENCY, up->latency);
++ serial_omap_uart_wq = create_singlethread_workqueue(up->name);
++ INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
++
++ pm_runtime_use_autosuspend(&pdev->dev);
++ pm_runtime_set_autosuspend_delay(&pdev->dev,
++ omap_up_info->autosuspend_timeout);
++
++ pm_runtime_irq_safe(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
++
++ ui[up->port.line] = up;
+ serial_omap_add_console_port(up);
+
+ ret = uart_add_one_port(&serial_omap_reg, &up->port);
+ if (ret != 0)
+ goto do_release_region;
+
++ pm_runtime_put(&pdev->dev);
+ platform_set_drvdata(pdev, up);
+ return 0;
+ err:
+@@ -1315,22 +1475,168 @@ static int serial_omap_remove(struct platform_device *dev)
+ {
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+- platform_set_drvdata(dev, NULL);
+ if (up) {
++ pm_runtime_disable(&up->pdev->dev);
+ uart_remove_one_port(&serial_omap_reg, &up->port);
++ pm_qos_remove_request(&up->pm_qos_request);
++
+ kfree(up);
+ }
++
++ platform_set_drvdata(dev, NULL);
++ return 0;
++}
++
++/*
++ * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
++ * The access to uart register after MDR1 Access
++ * causes UART to corrupt data.
++ *
++ * Need a delay =
++ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
++ * give 10 times as much
++ */
++static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
++{
++ u8 timeout = 255;
++
++ serial_out(up, UART_OMAP_MDR1, mdr1);
++ udelay(2);
++ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
++ UART_FCR_CLEAR_RCVR);
++ /*
++ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
++ * TX_FIFO_E bit is 1.
++ */
++ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
++ (UART_LSR_THRE | UART_LSR_DR))) {
++ timeout--;
++ if (!timeout) {
++ /* Should *never* happen. we warn and carry on */
++ dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
++ serial_in(up, UART_LSR));
++ break;
++ }
++ udelay(1);
++ }
++}
++
++static void serial_omap_restore_context(struct uart_omap_port *up)
++{
++ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
++ serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
++ else
++ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
++
++ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
++ serial_out(up, UART_EFR, UART_EFR_ECB);
++ serial_out(up, UART_LCR, 0x0); /* Operational mode */
++ serial_out(up, UART_IER, 0x0);
++ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
++ serial_out(up, UART_DLL, up->dll);
++ serial_out(up, UART_DLM, up->dlh);
++ serial_out(up, UART_LCR, 0x0); /* Operational mode */
++ serial_out(up, UART_IER, up->ier);
++ serial_out(up, UART_FCR, up->fcr);
++ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
++ serial_out(up, UART_MCR, up->mcr);
++ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
++ serial_out(up, UART_OMAP_SCR, up->scr);
++ serial_out(up, UART_EFR, up->efr);
++ serial_out(up, UART_LCR, up->lcr);
++ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
++ serial_omap_mdr1_errataset(up, up->mdr1);
++ else
++ serial_out(up, UART_OMAP_MDR1, up->mdr1);
++}
++
++#ifdef CONFIG_PM_RUNTIME
++static int serial_omap_runtime_suspend(struct device *dev)
++{
++ struct uart_omap_port *up = dev_get_drvdata(dev);
++ struct omap_uart_port_info *pdata = dev->platform_data;
++
++ if (!up)
++ return -EINVAL;
++
++ if (!pdata || !pdata->enable_wakeup)
++ return 0;
++
++ if (pdata->get_context_loss_count)
++ up->context_loss_cnt = pdata->get_context_loss_count(dev);
++
++ if (device_may_wakeup(dev)) {
++ if (!up->wakeups_enabled) {
++ pdata->enable_wakeup(up->pdev, true);
++ up->wakeups_enabled = true;
++ }
++ } else {
++ if (up->wakeups_enabled) {
++ pdata->enable_wakeup(up->pdev, false);
++ up->wakeups_enabled = false;
++ }
++ }
++
++ /* Errata i291 */
++ if (up->use_dma && pdata->set_forceidle &&
++ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
++ pdata->set_forceidle(up->pdev);
++
++ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
++ schedule_work(&up->qos_work);
++
++ return 0;
++}
++
++static int serial_omap_runtime_resume(struct device *dev)
++{
++ struct uart_omap_port *up = dev_get_drvdata(dev);
++ struct omap_uart_port_info *pdata = dev->platform_data;
++
++ if (up) {
++ if (pdata->get_context_loss_count) {
++ u32 loss_cnt = pdata->get_context_loss_count(dev);
++
++ if (up->context_loss_cnt != loss_cnt)
++ serial_omap_restore_context(up);
++ }
++
++ /* Errata i291 */
++ if (up->use_dma && pdata->set_noidle &&
++ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
++ pdata->set_noidle(up->pdev);
++
++ up->latency = up->calc_latency;
++ schedule_work(&up->qos_work);
++ }
++
+ return 0;
+ }
++#endif
++
++static const struct dev_pm_ops serial_omap_dev_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
++ SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
++ serial_omap_runtime_resume, NULL)
++};
++
++#if defined(CONFIG_OF)
++static const struct of_device_id omap_serial_of_match[] = {
++ { .compatible = "ti,omap2-uart" },
++ { .compatible = "ti,omap3-uart" },
++ { .compatible = "ti,omap4-uart" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, omap_serial_of_match);
++#endif
+
+ static struct platform_driver serial_omap_driver = {
+ .probe = serial_omap_probe,
+ .remove = serial_omap_remove,
+-
+- .suspend = serial_omap_suspend,
+- .resume = serial_omap_resume,
+ .driver = {
+ .name = DRIVER_NAME,
++ .pm = &serial_omap_dev_pm_ops,
++ .of_match_table = of_match_ptr(omap_serial_of_match),
+ },
+ };
+
+diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
+index 6f3ea9b..9016ec1 100644
+--- a/drivers/uio/Kconfig
++++ b/drivers/uio/Kconfig
+@@ -96,9 +96,9 @@ config UIO_NETX
+
+ config UIO_PRUSS
+ tristate "Texas Instruments PRUSS driver"
+- depends on ARCH_DAVINCI_DA850
++ depends on ARCH_DAVINCI_DA850 || SOC_OMAPAM33XX
+ help
+- PRUSS driver for OMAPL138/DA850/AM18XX devices
++ PRUSS driver for OMAPL138/DA850/AM18XX/AM33XX devices
+ PRUSS driver requires user space components, examples and user space
+ driver is available from below SVN repo - you may use anonymous login
+
+diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
+index e67b566..42553d2 100644
+--- a/drivers/uio/uio_pruss.c
++++ b/drivers/uio/uio_pruss.c
+@@ -25,7 +25,14 @@
+ #include <linux/clk.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
++
++#ifdef ARCH_DAVINCI_DA850
++#define ENABLE_SRAM_SUPPORT
++#endif
++
++#ifdef ENABLE_SRAM_SUPPORT
+ #include <mach/sram.h>
++#endif
+
+ #define DRV_NAME "pruss_uio"
+ #define DRV_VERSION "1.0"
+@@ -62,7 +69,7 @@ MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate");
+ struct uio_pruss_dev {
+ struct uio_info *info;
+ struct clk *pruss_clk;
+- dma_addr_t sram_paddr;
++ phys_addr_t sram_paddr;
+ dma_addr_t ddr_paddr;
+ void __iomem *prussio_vaddr;
+ void *sram_vaddr;
+@@ -105,8 +112,11 @@ static void pruss_cleanup(struct platform_device *dev,
+ dma_free_coherent(&dev->dev, extram_pool_sz, gdev->ddr_vaddr,
+ gdev->ddr_paddr);
+ }
++#ifdef ENABLE_SRAM_SUPPORT
+ if (gdev->sram_vaddr)
+- sram_free(gdev->sram_vaddr, sram_pool_sz);
++ gen_pool_free(davinci_gen_pool,
++ (unsigned long)gdev->sram_vaddr, sram_pool_sz);
++#endif
+ kfree(gdev->info);
+ clk_put(gdev->pruss_clk);
+ kfree(gdev);
+@@ -152,12 +162,17 @@ static int __devinit pruss_probe(struct platform_device *dev)
+ goto out_free;
+ }
+
+- gdev->sram_vaddr = sram_alloc(sram_pool_sz, &(gdev->sram_paddr));
++#ifdef ENABLE_SRAM_SUPPORT
++ gdev->sram_vaddr = (void *)gen_pool_alloc(davinci_gen_pool,
++ sram_pool_sz);
+ if (!gdev->sram_vaddr) {
+ dev_err(&dev->dev, "Could not allocate SRAM pool\n");
+ goto out_free;
+ }
+
++ gdev->sram_paddr = gen_pool_virt_to_phys(davinci_gen_pool,
++ (unsigned long)gdev->sram_vaddr);
++#endif
+ gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
+ &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
+ if (!gdev->ddr_vaddr) {
+@@ -179,8 +194,6 @@ static int __devinit pruss_probe(struct platform_device *dev)
+ p->mem[0].addr = regs_prussio->start;
+ p->mem[0].size = resource_size(regs_prussio);
+ p->mem[0].memtype = UIO_MEM_PHYS;
+-
+- p->mem[1].addr = gdev->sram_paddr;
+ p->mem[1].size = sram_pool_sz;
+ p->mem[1].memtype = UIO_MEM_PHYS;
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e238b3b..18373ec 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1300,7 +1300,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ * since that isn't a "real" hub.
+ */
+ if (!hub_is_superspeed(hdev) || !hdev->parent)
+- usb_enable_autosuspend(hdev);
++ usb_disable_autosuspend(hdev);
+
+ if (hdev->level == MAX_TOPO_LEVEL) {
+ dev_err(&intf->dev,
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 3700aa6..dfcda94 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1980,7 +1980,7 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
+ dev_set_name(&dwc->gadget.dev, "gadget");
+
+ dwc->gadget.ops = &dwc3_gadget_ops;
+- dwc->gadget.is_dualspeed = true;
++ dwc->gadget.max_speed = USB_SPEED_SUPER;
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->gadget.dev.parent = dwc->dev;
+
+diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
+index 45f422a..e9a2c5c 100644
+--- a/drivers/usb/gadget/amd5536udc.c
++++ b/drivers/usb/gadget/amd5536udc.c
+@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
+ u32 tmp;
+
+ if (!driver || !bind || !driver->setup
+- || driver->speed < USB_SPEED_HIGH)
++ || driver->max_speed < USB_SPEED_HIGH)
+ return -EINVAL;
+ if (!dev)
+ return -ENODEV;
+@@ -3349,7 +3349,7 @@ static int udc_probe(struct udc *dev)
+ dev_set_name(&dev->gadget.dev, "gadget");
+ dev->gadget.dev.release = gadget_release;
+ dev->gadget.name = name;
+- dev->gadget.is_dualspeed = 1;
++ dev->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* init registers, interrupts, ... */
+ startup_registers(dev);
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 8efe0fa..ac41f71 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1633,7 +1633,7 @@ static int at91_start(struct usb_gadget_driver *driver,
+ unsigned long flags;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->setup) {
+ DBG("bad parameter.\n");
+diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
+index 271a9d8..e2fb6d5 100644
+--- a/drivers/usb/gadget/atmel_usba_udc.c
++++ b/drivers/usb/gadget/atmel_usba_udc.c
+@@ -1038,7 +1038,7 @@ static struct usba_udc the_udc = {
+ .gadget = {
+ .ops = &usba_udc_ops,
+ .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
+- .is_dualspeed = 1,
++ .max_speed = USB_SPEED_HIGH,
+ .name = "atmel_usba_udc",
+ .dev = {
+ .init_name = "gadget",
+diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
+index 9a0c397..bd96ad9 100644
+--- a/drivers/usb/gadget/ci13xxx_udc.c
++++ b/drivers/usb/gadget/ci13xxx_udc.c
+@@ -754,8 +754,11 @@ static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
+ gadget->speed);
++ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
++ gadget->max_speed);
++ /* TODO: Scheduled for removal in 3.8. */
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
+- gadget->is_dualspeed);
++ gadget_is_dualspeed(gadget));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
+ gadget->is_otg);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
+@@ -798,7 +801,7 @@ static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+ n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
+ (driver->function ? driver->function : ""));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+- driver->speed);
++ driver->max_speed);
+
+ return n;
+ }
+@@ -2871,7 +2874,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
+
+ udc->gadget.ops = &usb_gadget_ops;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+- udc->gadget.is_dualspeed = 1;
++ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.is_otg = 0;
+ udc->gadget.name = driver->name;
+
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index f71b078..a95de6a 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1535,9 +1535,9 @@ composite_resume(struct usb_gadget *gadget)
+
+ static struct usb_gadget_driver composite_driver = {
+ #ifdef CONFIG_USB_GADGET_SUPERSPEED
+- .speed = USB_SPEED_SUPER,
++ .max_speed = USB_SPEED_SUPER,
+ #else
+- .speed = USB_SPEED_HIGH,
++ .max_speed = USB_SPEED_HIGH,
+ #endif
+
+ .unbind = composite_unbind,
+@@ -1584,8 +1584,8 @@ int usb_composite_probe(struct usb_composite_driver *driver,
+ driver->iProduct = driver->name;
+ composite_driver.function = (char *) driver->name;
+ composite_driver.driver.name = driver->name;
+- composite_driver.speed = min((u8)composite_driver.speed,
+- (u8)driver->max_speed);
++ composite_driver.max_speed =
++ min_t(u8, composite_driver.max_speed, driver->max_speed);
+ composite = driver;
+ composite_gadget_bind = bind;
+
+diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
+index 6256420..19d7bb0 100644
+--- a/drivers/usb/gadget/dbgp.c
++++ b/drivers/usb/gadget/dbgp.c
+@@ -404,7 +404,7 @@ fail:
+
+ static struct usb_gadget_driver dbgp_driver = {
+ .function = "dbgp",
+- .speed = USB_SPEED_HIGH,
++ .max_speed = USB_SPEED_HIGH,
+ .unbind = dbgp_unbind,
+ .setup = dbgp_setup,
+ .disconnect = dbgp_disconnect,
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index ab8f1b4..db815c2 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -823,19 +823,18 @@ static int dummy_pullup (struct usb_gadget *_gadget, int value)
+
+ if (value && dum->driver) {
+ if (mod_data.is_super_speed)
+- dum->gadget.speed = dum->driver->speed;
++ dum->gadget.speed = dum->driver->max_speed;
+ else if (mod_data.is_high_speed)
+ dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
+- dum->driver->speed);
++ dum->driver->max_speed);
+ else
+ dum->gadget.speed = USB_SPEED_FULL;
+ dummy_udc_udpate_ep0(dum);
+
+- if (dum->gadget.speed < dum->driver->speed)
++ if (dum->gadget.speed < dum->driver->max_speed)
+ dev_dbg(udc_dev(dum), "This device can perform faster"
+- " if you connect it to a %s port...\n",
+- (dum->driver->speed == USB_SPEED_SUPER ?
+- "SuperSpeed" : "HighSpeed"));
++ " if you connect it to a %s port...\n",
++ usb_speed_string(dum->driver->max_speed));
+ }
+ dum_hcd = gadget_to_dummy_hcd(_gadget);
+
+@@ -898,7 +897,7 @@ static int dummy_udc_start(struct usb_gadget *g,
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
+ struct dummy *dum = dum_hcd->dum;
+
+- if (driver->speed == USB_SPEED_UNKNOWN)
++ if (driver->max_speed == USB_SPEED_UNKNOWN)
+ return -EINVAL;
+
+ /*
+@@ -977,7 +976,7 @@ static int dummy_udc_probe (struct platform_device *pdev)
+
+ dum->gadget.name = gadget_name;
+ dum->gadget.ops = &dummy_ops;
+- dum->gadget.is_dualspeed = 1;
++ dum->gadget.max_speed = USB_SPEED_SUPER;
+
+ dev_set_name(&dum->gadget.dev, "gadget");
+ dum->gadget.dev.parent = &pdev->dev;
+diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
+index 4dff83d..a9f58da 100644
+--- a/drivers/usb/gadget/epautoconf.c
++++ b/drivers/usb/gadget/epautoconf.c
+@@ -149,7 +149,7 @@ ep_matches (
+ switch (type) {
+ case USB_ENDPOINT_XFER_INT:
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
+- if (!gadget->is_dualspeed && max > 64)
++ if (!gadget_is_dualspeed(gadget) && max > 64)
+ return 0;
+ /* FALLTHROUGH */
+
+@@ -157,12 +157,12 @@ ep_matches (
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (ep->maxpacket < max)
+ return 0;
+- if (!gadget->is_dualspeed && max > 1023)
++ if (!gadget_is_dualspeed(gadget) && max > 1023)
+ return 0;
+
+ /* BOTH: "high bandwidth" works only at high speed */
+ if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
+- if (!gadget->is_dualspeed)
++ if (!gadget_is_dualspeed(gadget))
+ return 0;
+ /* configure your hardware with enough buffering!! */
+ }
+@@ -380,6 +380,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+
+ list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+ ep->driver_data = NULL;
++ ep->desc = NULL;
+ }
+ #ifdef MANY_ENDPOINTS
+ in_epnum = 0;
+diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
+index 0cd764d..5c1e1a9 100644
+--- a/drivers/usb/gadget/ether.c
++++ b/drivers/usb/gadget/ether.c
+@@ -93,6 +93,10 @@ static inline bool has_rndis(void)
+ #endif
+ }
+
++static char manufacturer[50];
++
++static u16 vendorID;
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -201,8 +205,6 @@ static const struct usb_descriptor_header *otg_desc[] = {
+ #define STRING_MANUFACTURER_IDX 0
+ #define STRING_PRODUCT_IDX 1
+
+-static char manufacturer[50];
+-
+ static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = PREFIX DRIVER_DESC,
+@@ -323,6 +325,8 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
+ device_desc.bNumConfigurations = 2;
+ }
+
++ vendorID = device_desc.idVendor;
++
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
+index 11c07cb..58d9172 100644
+--- a/drivers/usb/gadget/f_ecm.c
++++ b/drivers/usb/gadget/f_ecm.c
+@@ -690,6 +690,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
++ usb_ep_autoconfig_reset(cdev->gadget);
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
+ if (!ep)
+ goto fail;
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index 704d1d9..d2f1b6f 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -684,6 +684,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
++ usb_ep_autoconfig_reset(cdev->gadget);
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+ if (!ep)
+ goto fail;
+@@ -766,14 +767,11 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+
+ rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->config, rndis->ethaddr);
+-
+-#if 0
+-// FIXME
++/*
+ if (rndis_set_param_vendor(rndis->config, vendorID,
+ manufacturer))
+- goto fail0;
+-#endif
+-
++ goto fail;
++*/
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index 21ab474..160ba02 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -307,6 +307,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
++ usb_ep_autoconfig_reset(cdev->gadget);
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc);
+ if (!ep)
+ goto fail;
+diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
+index 11b5196..17a7047 100644
+--- a/drivers/usb/gadget/file_storage.c
++++ b/drivers/usb/gadget/file_storage.c
+@@ -3584,7 +3584,7 @@ static void fsg_resume(struct usb_gadget *gadget)
+ /*-------------------------------------------------------------------------*/
+
+ static struct usb_gadget_driver fsg_driver = {
+- .speed = USB_SPEED_SUPER,
++ .max_speed = USB_SPEED_SUPER,
+ .function = (char *) fsg_string_product,
+ .unbind = fsg_unbind,
+ .disconnect = fsg_disconnect,
+diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
+index e00cf92..b7a1efe 100644
+--- a/drivers/usb/gadget/fsl_qe_udc.c
++++ b/drivers/usb/gadget/fsl_qe_udc.c
+@@ -2336,7 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
+ if (!udc_controller)
+ return -ENODEV;
+
+- if (!driver || driver->speed < USB_SPEED_FULL
++ if (!driver || driver->max_speed < USB_SPEED_FULL
+ || !bind || !driver->disconnect || !driver->setup)
+ return -EINVAL;
+
+@@ -2350,7 +2350,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
+ /* hook up the driver */
+ udc_controller->driver = driver;
+ udc_controller->gadget.dev.driver = &driver->driver;
+- udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
++ udc_controller->gadget.speed = driver->max_speed;
+ spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+ retval = bind(&udc_controller->gadget);
+diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
+index 8e3e509..9085d14 100644
+--- a/drivers/usb/gadget/fsl_udc_core.c
++++ b/drivers/usb/gadget/fsl_udc_core.c
+@@ -1932,7 +1932,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
+ if (!udc_controller)
+ return -ENODEV;
+
+- if (!driver || driver->speed < USB_SPEED_FULL
++ if (!driver || driver->max_speed < USB_SPEED_FULL
+ || !bind || !driver->disconnect || !driver->setup)
+ return -EINVAL;
+
+@@ -2523,7 +2523,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
+
+ /* Setup gadget structure */
+ udc_controller->gadget.ops = &fsl_gadget_ops;
+- udc_controller->gadget.is_dualspeed = 1;
++ udc_controller->gadget.max_speed = USB_SPEED_HIGH;
+ udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+ INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+ udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
+index 74da206..5831cb4 100644
+--- a/drivers/usb/gadget/fusb300_udc.c
++++ b/drivers/usb/gadget/fusb300_udc.c
+@@ -1317,7 +1317,7 @@ static int fusb300_udc_start(struct usb_gadget_driver *driver,
+ int retval;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->setup)
+ return -EINVAL;
+@@ -1463,7 +1463,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
+
+ dev_set_name(&fusb300->gadget.dev, "gadget");
+
+- fusb300->gadget.is_dualspeed = 1;
++ fusb300->gadget.max_speed = USB_SPEED_HIGH;
+ fusb300->gadget.dev.parent = &pdev->dev;
+ fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ fusb300->gadget.dev.release = pdev->dev.release;
+diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
+index 7f87805..5af70fc 100644
+--- a/drivers/usb/gadget/goku_udc.c
++++ b/drivers/usb/gadget/goku_udc.c
+@@ -1357,7 +1357,7 @@ static int goku_start(struct usb_gadget_driver *driver,
+ int retval;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->disconnect
+ || !driver->setup)
+@@ -1796,6 +1796,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init(&dev->lock);
+ dev->pdev = pdev;
+ dev->gadget.ops = &goku_ops;
++ dev->gadget.max_speed = USB_SPEED_FULL;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&dev->gadget.dev, "gadget");
+diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
+index 2d978c0..8d1c75a 100644
+--- a/drivers/usb/gadget/imx_udc.c
++++ b/drivers/usb/gadget/imx_udc.c
+@@ -1336,7 +1336,7 @@ static int imx_udc_start(struct usb_gadget_driver *driver,
+ int retval;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->disconnect
+ || !driver->setup)
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
+index 7138540..2a96f57 100644
+--- a/drivers/usb/gadget/inode.c
++++ b/drivers/usb/gadget/inode.c
+@@ -1768,9 +1768,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
+
+ static struct usb_gadget_driver gadgetfs_driver = {
+ #ifdef CONFIG_USB_GADGET_DUALSPEED
+- .speed = USB_SPEED_HIGH,
++ .max_speed = USB_SPEED_HIGH,
+ #else
+- .speed = USB_SPEED_FULL,
++ .max_speed = USB_SPEED_FULL,
+ #endif
+ .function = (char *) driver_desc,
+ .unbind = gadgetfs_unbind,
+@@ -1794,7 +1794,7 @@ static int gadgetfs_probe (struct usb_gadget *gadget)
+ }
+
+ static struct usb_gadget_driver probe_driver = {
+- .speed = USB_SPEED_HIGH,
++ .max_speed = USB_SPEED_HIGH,
+ .unbind = gadgetfs_nop,
+ .setup = (void *)gadgetfs_nop,
+ .disconnect = gadgetfs_nop,
+diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
+index 6ad0ad6..b0c5b6d 100644
+--- a/drivers/usb/gadget/langwell_udc.c
++++ b/drivers/usb/gadget/langwell_udc.c
+@@ -3265,7 +3265,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
+ INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
+ dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
+- dev->gadget.is_dualspeed = 1; /* support dual speed */
++ dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
+ #ifdef OTG_TRANSCEIVER
+ dev->gadget.is_otg = 1; /* support otg mode */
+ #endif
+diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
+index 9aa1cbb..3608b3b 100644
+--- a/drivers/usb/gadget/m66592-udc.c
++++ b/drivers/usb/gadget/m66592-udc.c
+@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
+ int retval;
+
+ if (!driver
+- || driver->speed < USB_SPEED_HIGH
++ || driver->max_speed < USB_SPEED_HIGH
+ || !bind
+ || !driver->setup)
+ return -EINVAL;
+@@ -1653,7 +1653,7 @@ static int __init m66592_probe(struct platform_device *pdev)
+ m66592->gadget.ops = &m66592_gadget_ops;
+ device_initialize(&m66592->gadget.dev);
+ dev_set_name(&m66592->gadget.dev, "gadget");
+- m66592->gadget.is_dualspeed = 1;
++ m66592->gadget.max_speed = USB_SPEED_HIGH;
+ m66592->gadget.dev.parent = &pdev->dev;
+ m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ m66592->gadget.dev.release = pdev->dev.release;
+diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
+index 8924121..9376a74 100644
+--- a/drivers/usb/gadget/mv_udc_core.c
++++ b/drivers/usb/gadget/mv_udc_core.c
+@@ -2312,7 +2312,7 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
+ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
+ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
+ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
+- udc->gadget.is_dualspeed = 1; /* support dual speed */
++ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&udc->gadget.dev, "gadget");
+diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
+index d1b7636..4c81d54 100644
+--- a/drivers/usb/gadget/net2272.c
++++ b/drivers/usb/gadget/net2272.c
+@@ -1459,7 +1459,7 @@ static int net2272_start(struct usb_gadget *_gadget,
+ unsigned i;
+
+ if (!driver || !driver->unbind || !driver->setup ||
+- driver->speed != USB_SPEED_HIGH)
++ driver->max_speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ dev = container_of(_gadget, struct net2272, gadget);
+@@ -2235,7 +2235,7 @@ net2272_probe_init(struct device *dev, unsigned int irq)
+ ret->irq = irq;
+ ret->dev = dev;
+ ret->gadget.ops = &net2272_ops;
+- ret->gadget.is_dualspeed = 1;
++ ret->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&ret->gadget.dev, "gadget");
+diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
+index da2b9d0..cf1f364 100644
+--- a/drivers/usb/gadget/net2280.c
++++ b/drivers/usb/gadget/net2280.c
+@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
+ * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
+ * "must not be used in normal operation"
+ */
+- if (!driver || driver->speed < USB_SPEED_HIGH
++ if (!driver || driver->max_speed < USB_SPEED_HIGH
+ || !driver->setup)
+ return -EINVAL;
+
+@@ -2698,7 +2698,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init (&dev->lock);
+ dev->pdev = pdev;
+ dev->gadget.ops = &net2280_ops;
+- dev->gadget.is_dualspeed = 1;
++ dev->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&dev->gadget.dev, "gadget");
+diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
+index 788989a..8da1492 100644
+--- a/drivers/usb/gadget/omap_udc.c
++++ b/drivers/usb/gadget/omap_udc.c
+@@ -2110,7 +2110,7 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
+ return -ENODEV;
+ if (!driver
+ // FIXME if otg, check: driver->is_otg
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind || !driver->setup)
+ return -EINVAL;
+
+@@ -2676,6 +2676,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->iso);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
++ udc->gadget.max_speed = USB_SPEED_FULL;
+ udc->gadget.name = driver_name;
+
+ device_initialize(&udc->gadget.dev);
+@@ -2794,6 +2795,7 @@ static int __init omap_udc_probe(struct platform_device *pdev)
+ struct omap_usb_config *config = pdev->dev.platform_data;
+ struct clk *dc_clk;
+ struct clk *hhc_clk;
++ u8 pdev_id;
+
+ /* NOTE: "knows" the order of the resources! */
+ if (!request_mem_region(pdev->resource[0].start,
+@@ -2862,7 +2864,8 @@ static int __init omap_udc_probe(struct platform_device *pdev)
+ * use it. Except for OTG, we don't _need_ to talk to one;
+ * but not having one probably means no VBUS detection.
+ */
+- xceiv = otg_get_transceiver();
++ pdev_id = (pdev->id >= 0) ? pdev->id : 0;
++ xceiv = otg_get_transceiver(pdev_id);
+ if (xceiv)
+ type = xceiv->label;
+ else if (config->otg) {
+diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
+index e7fb1a3..870897ac 100644
+--- a/drivers/usb/gadget/pch_udc.c
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -2768,7 +2768,7 @@ static int pch_udc_start(struct usb_gadget_driver *driver,
+ struct pch_udc_dev *dev = pch_udc;
+ int retval;
+
+- if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
++ if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
+ !driver->setup || !driver->unbind || !driver->disconnect) {
+ dev_err(&dev->pdev->dev,
+ "%s: invalid driver parameter\n", __func__);
+@@ -3018,7 +3018,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ dev->gadget.dev.release = gadget_release;
+ dev->gadget.name = KBUILD_MODNAME;
+- dev->gadget.is_dualspeed = 1;
++ dev->gadget.max_speed = USB_SPEED_HIGH;
+
+ retval = device_register(&dev->gadget.dev);
+ if (retval)
+diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
+index 65a8834..d83134b 100644
+--- a/drivers/usb/gadget/printer.c
++++ b/drivers/usb/gadget/printer.c
+@@ -1141,7 +1141,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ break;
+ #ifdef CONFIG_USB_GADGET_DUALSPEED
+ case USB_DT_DEVICE_QUALIFIER:
+- if (!gadget->is_dualspeed)
++ if (!gadget_is_dualspeed(gadget))
+ break;
+ /*
+ * assumes ep0 uses the same value for both
+@@ -1155,7 +1155,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ break;
+
+ case USB_DT_OTHER_SPEED_CONFIG:
+- if (!gadget->is_dualspeed)
++ if (!gadget_is_dualspeed(gadget))
+ break;
+ /* FALLTHROUGH */
+ #endif /* CONFIG_USB_GADGET_DUALSPEED */
+@@ -1535,7 +1535,7 @@ fail:
+ /*-------------------------------------------------------------------------*/
+
+ static struct usb_gadget_driver printer_driver = {
+- .speed = DEVSPEED,
++ .max_speed = DEVSPEED,
+
+ .function = (char *) driver_desc,
+ .unbind = printer_unbind,
+diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
+index c090a7e..dd47063 100644
+--- a/drivers/usb/gadget/pxa25x_udc.c
++++ b/drivers/usb/gadget/pxa25x_udc.c
+@@ -1264,7 +1264,7 @@ static int pxa25x_start(struct usb_gadget_driver *driver,
+ int retval;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->disconnect
+ || !driver->setup)
+diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
+index 18b6b09..f4c44eb 100644
+--- a/drivers/usb/gadget/pxa27x_udc.c
++++ b/drivers/usb/gadget/pxa27x_udc.c
+@@ -1807,7 +1807,7 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
+ struct pxa_udc *udc = the_controller;
+ int retval;
+
+- if (!driver || driver->speed < USB_SPEED_FULL || !bind
++ if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
+ || !driver->disconnect || !driver->setup)
+ return -EINVAL;
+ if (!udc)
+diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
+index fc719a3..f5b8d21 100644
+--- a/drivers/usb/gadget/r8a66597-udc.c
++++ b/drivers/usb/gadget/r8a66597-udc.c
+@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+
+ if (!driver
+- || driver->speed < USB_SPEED_HIGH
++ || driver->max_speed < USB_SPEED_HIGH
+ || !driver->setup)
+ return -EINVAL;
+ if (!r8a66597)
+@@ -1911,7 +1911,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
+
+ r8a66597->gadget.ops = &r8a66597_gadget_ops;
+ dev_set_name(&r8a66597->gadget.dev, "gadget");
+- r8a66597->gadget.is_dualspeed = 1;
++ r8a66597->gadget.max_speed = USB_SPEED_HIGH;
+ r8a66597->gadget.dev.parent = &pdev->dev;
+ r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ r8a66597->gadget.dev.release = pdev->dev.release;
+diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
+index d3cdffe..208e574 100644
+--- a/drivers/usb/gadget/rndis.c
++++ b/drivers/usb/gadget/rndis.c
+@@ -314,7 +314,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+ /* mandatory */
+ case OID_GEN_CURRENT_PACKET_FILTER:
+ pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
+- *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
++ *outbuf =
++ cpu_to_le32(*(u16 *)rndis_per_dev_params[configNr].filter);
+ retval = 0;
+ break;
+
+@@ -336,7 +337,7 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+
+ case OID_GEN_PHYSICAL_MEDIUM:
+ pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__);
+- *outbuf = cpu_to_le32(0);
++ *outbuf = __constant_cpu_to_le32(2);
+ retval = 0;
+ break;
+
+@@ -413,7 +414,7 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+ if (rndis_per_dev_params[configNr].dev) {
+ length = ETH_ALEN;
+ memcpy(outbuf,
+- rndis_per_dev_params[configNr].host_mac,
++ rndis_per_dev_params[configNr].perm_mac,
+ length);
+ retval = 0;
+ }
+@@ -443,7 +444,7 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+ case OID_802_3_MAXIMUM_LIST_SIZE:
+ pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
+ /* Multicast base address only */
+- *outbuf = cpu_to_le32(1);
++ *outbuf = __constant_cpu_to_le32(32);
+ retval = 0;
+ break;
+
+@@ -549,6 +550,10 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
+ case OID_802_3_MULTICAST_LIST:
+ /* I think we can ignore this */
+ pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
++ memset(rndis_per_dev_params[configNr].mcast_addr, 0,
++ RNDIS_MAX_MULTICAST_SIZE * 6);
++ memcpy(rndis_per_dev_params[configNr].mcast_addr,
++ buf, buf_len);
+ retval = 0;
+ break;
+
+@@ -578,6 +583,9 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
+ return -ENOMEM;
+ resp = (rndis_init_cmplt_type *)r->buf;
+
++ if (!resp)
++ return -ENOMEM;
++
+ resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT);
+ resp->MessageLength = cpu_to_le32(52);
+ resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+@@ -788,7 +796,8 @@ void rndis_uninit(int configNr)
+
+ void rndis_set_host_mac(int configNr, const u8 *addr)
+ {
+- rndis_per_dev_params[configNr].host_mac = addr;
++ rndis_per_dev_params[configNr].host_mac = (u8 *)addr;
++ memcpy((void *)rndis_per_dev_params[configNr].perm_mac, addr, 6);
+ }
+
+ /*
+@@ -830,6 +839,8 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
+ __func__);
+ params->state = RNDIS_UNINITIALIZED;
+ if (params->dev) {
++ memcpy((void *)rndis_per_dev_params[configNr].host_mac,
++ (void *)rndis_per_dev_params[configNr].perm_mac, 6);
+ netif_carrier_off(params->dev);
+ netif_stop_queue(params->dev);
+ }
+diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
+index 907c330..026da39 100644
+--- a/drivers/usb/gadget/rndis.h
++++ b/drivers/usb/gadget/rndis.h
+@@ -18,7 +18,8 @@
+ #include "ndis.h"
+
+ #define RNDIS_MAXIMUM_FRAME_SIZE 1518
+-#define RNDIS_MAX_TOTAL_SIZE 1558
++#define RNDIS_MAX_TOTAL_SIZE 1514
++#define RNDIS_MAX_MULTICAST_SIZE 32
+
+ /* Remote NDIS Versions */
+ #define RNDIS_MAJOR_VERSION 1
+@@ -230,7 +231,8 @@ typedef struct rndis_params
+ u32 speed;
+ u32 media_state;
+
+- const u8 *host_mac;
++ u8 perm_mac[6];
++ u8 *host_mac;
+ u16 *filter;
+ struct net_device *dev;
+
+@@ -239,6 +241,7 @@ typedef struct rndis_params
+ void (*resp_avail)(void *v);
+ void *v;
+ struct list_head resp_queue;
++ u8 mcast_addr[RNDIS_MAX_MULTICAST_SIZE][6];
+ } rndis_params;
+
+ /* RNDIS Message parser and other useless functions */
+diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
+index b314482..d098c36 100644
+--- a/drivers/usb/gadget/s3c-hsotg.c
++++ b/drivers/usb/gadget/s3c-hsotg.c
+@@ -2586,7 +2586,7 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
+ return -EINVAL;
+ }
+
+- if (driver->speed < USB_SPEED_FULL)
++ if (driver->max_speed < USB_SPEED_FULL)
+ dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+
+ if (!bind || !driver->setup) {
+@@ -3362,7 +3362,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
+
+ dev_set_name(&hsotg->gadget.dev, "gadget");
+
+- hsotg->gadget.is_dualspeed = 1;
++ hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
+ hsotg->gadget.name = dev_name(dev);
+
+diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
+index 20a553b..f398b85 100644
+--- a/drivers/usb/gadget/s3c-hsudc.c
++++ b/drivers/usb/gadget/s3c-hsudc.c
+@@ -1142,7 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
+ int ret;
+
+ if (!driver
+- || driver->speed < USB_SPEED_FULL
++ || driver->max_speed < USB_SPEED_FULL
+ || !bind
+ || !driver->unbind || !driver->disconnect || !driver->setup)
+ return -EINVAL;
+@@ -1310,7 +1310,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
+ device_initialize(&hsudc->gadget.dev);
+ dev_set_name(&hsudc->gadget.dev, "gadget");
+
+- hsudc->gadget.is_dualspeed = 1;
++ hsudc->gadget.max_speed = USB_SPEED_HIGH;
+ hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
+ hsudc->gadget.name = dev_name(dev);
+ hsudc->gadget.dev.parent = dev;
+diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
+index b864377..4d860e9 100644
+--- a/drivers/usb/gadget/s3c2410_udc.c
++++ b/drivers/usb/gadget/s3c2410_udc.c
+@@ -1683,9 +1683,9 @@ static int s3c2410_udc_start(struct usb_gadget_driver *driver,
+ if (udc->driver)
+ return -EBUSY;
+
+- if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
++ if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
+ printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
+- bind, driver->setup, driver->speed);
++ bind, driver->setup, driver->max_speed);
+ return -EINVAL;
+ }
+ #if defined(MODULE)
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 6939e17..0b0d12c 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -371,14 +371,28 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
+ }
+ static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+-static ssize_t usb_udc_speed_show(struct device *dev,
++#define USB_UDC_SPEED_ATTR(name, param) \
++ssize_t usb_udc_##param##_show(struct device *dev, \
++ struct device_attribute *attr, char *buf) \
++{ \
++ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
++ return snprintf(buf, PAGE_SIZE, "%s\n", \
++ usb_speed_string(udc->gadget->param)); \
++} \
++static DEVICE_ATTR(name, S_IRUSR, usb_udc_##param##_show, NULL)
++
++static USB_UDC_SPEED_ATTR(current_speed, speed);
++static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
++
++/* TODO: Scheduled for removal in 3.8. */
++static ssize_t usb_udc_is_dualspeed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+- return snprintf(buf, PAGE_SIZE, "%s\n",
+- usb_speed_string(udc->gadget->speed));
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ gadget_is_dualspeed(udc->gadget));
+ }
+-static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
++static DEVICE_ATTR(is_dualspeed, S_IRUSR, usb_udc_is_dualspeed_show, NULL);
+
+ #define USB_UDC_ATTR(name) \
+ ssize_t usb_udc_##name##_show(struct device *dev, \
+@@ -391,7 +405,6 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
+ } \
+ static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
+
+-static USB_UDC_ATTR(is_dualspeed);
+ static USB_UDC_ATTR(is_otg);
+ static USB_UDC_ATTR(is_a_peripheral);
+ static USB_UDC_ATTR(b_hnp_enable);
+@@ -401,7 +414,8 @@ static USB_UDC_ATTR(a_alt_hnp_support);
+ static struct attribute *usb_udc_attrs[] = {
+ &dev_attr_srp.attr,
+ &dev_attr_soft_connect.attr,
+- &dev_attr_speed.attr,
++ &dev_attr_current_speed.attr,
++ &dev_attr_maximum_speed.attr,
+
+ &dev_attr_is_dualspeed.attr,
+ &dev_attr_is_otg.attr,
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index e39b029..568cefb 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -41,6 +41,7 @@
+ #include <linux/usb/ulpi.h>
+ #include <plat/usb.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/pm_runtime.h>
+
+ /* EHCI Register Set */
+ #define EHCI_INSNREG04 (0xA0)
+@@ -190,11 +191,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
+ }
+ }
+
+- ret = omap_usbhs_enable(dev);
+- if (ret) {
+- dev_err(dev, "failed to start usbhs with err %d\n", ret);
+- goto err_enable;
+- }
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
+
+ /*
+ * An undocumented "feature" in the OMAP3 EHCI controller,
+@@ -240,11 +238,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
+ return 0;
+
+ err_add_hcd:
+- omap_usbhs_disable(dev);
+-
+-err_enable:
+ disable_put_regulator(pdata);
+- usb_put_hcd(hcd);
++ pm_runtime_put_sync(dev);
+
+ err_io:
+ iounmap(regs);
+@@ -266,10 +261,12 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_remove_hcd(hcd);
+- omap_usbhs_disable(dev);
+ disable_put_regulator(dev->platform_data);
+ iounmap(hcd->regs);
+ usb_put_hcd(hcd);
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
+index 516ebc4..1b8133b 100644
+--- a/drivers/usb/host/ohci-omap3.c
++++ b/drivers/usb/host/ohci-omap3.c
+@@ -31,6 +31,7 @@
+
+ #include <linux/platform_device.h>
+ #include <plat/usb.h>
++#include <linux/pm_runtime.h>
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -134,7 +135,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+ int irq;
+
+ if (usb_disabled())
+- goto err_end;
++ return -ENODEV;
+
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+@@ -172,11 +173,8 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
+
+- ret = omap_usbhs_enable(dev);
+- if (ret) {
+- dev_dbg(dev, "failed to start ohci\n");
+- goto err_end;
+- }
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+@@ -189,9 +187,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+ return 0;
+
+ err_add_hcd:
+- omap_usbhs_disable(dev);
+-
+-err_end:
++ pm_runtime_put_sync(dev);
+ usb_put_hcd(hcd);
+
+ err_io:
+@@ -220,9 +216,9 @@ static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
+
+ iounmap(hcd->regs);
+ usb_remove_hcd(hcd);
+- omap_usbhs_disable(dev);
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
+ usb_put_hcd(hcd);
+-
+ return 0;
+ }
+
+diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
+index 07a0346..79f09f5 100644
+--- a/drivers/usb/musb/Kconfig
++++ b/drivers/usb/musb/Kconfig
+@@ -5,14 +5,14 @@
+
+ # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+ config USB_MUSB_HDRC
++ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+ depends on USB && USB_GADGET
+- depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
+ select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
++ select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX)
+ select TWL4030_USB if MACH_OMAP_3430SDP
+ select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
+ select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
+- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+@@ -31,80 +31,160 @@ config USB_MUSB_HDRC
+ To compile this driver as a module, choose M here; the
+ module will be called "musb-hdrc".
+
+-choice
+- prompt "Platform Glue Layer"
+- depends on USB_MUSB_HDRC
++if USB_MUSB_HDRC
+
+-config USB_MUSB_DAVINCI
++comment "Platform Glue Layer"
++
++config USB_MUSB_DAVINCI_GLUE
++ select USB_MUSB_DAVINCI
+ tristate "DaVinci"
+- depends on ARCH_DAVINCI_DMx
++ depends on ARCH_DAVINCI_DMx && USB_MUSB_HDRC
+
+-config USB_MUSB_DA8XX
++config USB_MUSB_DA8XX_GLUE
++ select USB_MUSB_DA8XX
+ tristate "DA8xx/OMAP-L1x"
+- depends on ARCH_DAVINCI_DA8XX
++ depends on ARCH_DAVINCI_DA8XX && USB_MUSB_HDRC
+
+-config USB_MUSB_TUSB6010
++config USB_MUSB_TUSB6010_GLUE
++ select USB_MUSB_TUSB6010
+ tristate "TUSB6010"
+- depends on ARCH_OMAP
++ depends on ARCH_OMAP && USB_MUSB_HDRC
+
+-config USB_MUSB_OMAP2PLUS
++config USB_MUSB_OMAP2PLUS_GLUE
++ select USB_MUSB_OMAP2PLUS
+ tristate "OMAP2430 and onwards"
+- depends on ARCH_OMAP2PLUS
++ depends on ARCH_OMAP2PLUS && USB_MUSB_HDRC
+
+-config USB_MUSB_AM35X
++config USB_MUSB_AM35X_GLUE
++ select USB_MUSB_AM35X
+ tristate "AM35x"
+- depends on ARCH_OMAP
++ depends on ARCH_OMAP && USB_MUSB_HDRC
+
+-config USB_MUSB_BLACKFIN
++config USB_MUSB_TI81XX_GLUE
++ select USB_MUSB_TI81XX
++ tristate "TI81XX onward"
++ depends on (SOC_OMAPTI81XX || SOC_OMAPAM33XX) && USB_MUSB_HDRC
++
++config USB_MUSB_BLACKFIN_GLUE
++ select USB_MUSB_BLACKFIN
+ tristate "Blackfin"
+- depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
++ depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523) && USB_MUSB_HDRC
+
+-config USB_MUSB_UX500
++config USB_MUSB_UX500_GLUE
++ select USB_MUSB_UX500
+ tristate "U8500 and U5500"
+- depends on (ARCH_U8500 && AB8500_USB)
++ depends on (ARCH_U8500 && AB8500_USB) && USB_MUSB_HDRC
++
++config USB_MUSB_DAVINCI
++ bool
++ default n
++config USB_MUSB_DA8XX
++ bool
++ default n
++config USB_MUSB_TUSB6010
++ bool
++ default n
++config USB_MUSB_OMAP2PLUS
++ bool
++ default n
++config USB_MUSB_AM35X
++ bool
++ default n
++config USB_MUSB_TI81XX
++ bool
++ default n
++config USB_MUSB_BLACKFIN
++ bool
++ default n
++config USB_MUSB_UX500
++ bool
++ default n
++
++choice
++ prompt 'MUSB DMA mode'
++ depends on !MUSB_PIO_ONLY
++ default USB_UX500_DMA_HW if USB_MUSB_UX500
++ default USB_INVENTRA_DMA_HW if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
++ default USB_TI_CPPI_DMA_HW if USB_MUSB_DAVINCI
++ default USB_TUSB_OMAP_DMA_HW if USB_MUSB_TUSB6010
++ default USB_TI_CPPI41_DMA_HW if USB_MUSB_DA8XX || USB_MUSB_AM35X || USB_MUSB_TI81XX
++ help
++ Unfortunately, only one option can be enabled here. Ideally one
++ should be able to build all these drivers into one kernel to
++ allow using DMA on multiplatform kernels.
++
++config USB_UX500_DMA_HW
++ tristate 'ST Ericsson U8500 and U5500'
++ select USB_UX500_DMA
++ depends on USB_MUSB_UX500
++ help
++ Enable DMA transfers on UX500 platforms.
++
++config USB_INVENTRA_DMA_HW
++ tristate 'Inventra'
++ select USB_INVENTRA_DMA
++ depends on (USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN) && !USB_MUSB_AM35X
++ help
++ Enable DMA transfers using Mentor's engine.
++
++config USB_TI_CPPI_DMA_HW
++ tristate 'TI CPPI (Davinci)'
++ select USB_TI_CPPI_DMA
++ depends on USB_MUSB_DAVINCI
++ help
++ Enable DMA transfers when TI CPPI DMA is available.
++
++config USB_TI_CPPI41_DMA_HW
++ tristate 'TI CPPI4.1'
++ select USB_TI_CPPI41_DMA
++ depends on USB_MUSB_DA8XX || USB_MUSB_AM35X || USB_MUSB_TI81XX
++ select CPPI41
++ help
++ Configure this option to include the CPPI 4.1 support,
++ The CPPI 4.1 DMA engine integrated with musb controller
++ accelarate the usb packet transmission and receception
++ to/from musb endpoints.
++
++config USB_TUSB_OMAP_DMA_HW
++ tristate 'TUSB 6010'
++ select USB_TUSB_OMAP_DMA
++ depends on USB_MUSB_TUSB6010
++ depends on ARCH_OMAP
++ help
++ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+ endchoice
+
+ config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+- depends on USB_MUSB_HDRC
+- default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
++ default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+- Do not select 'n' here unless DMA support for your SOC or board
++ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+-config USB_UX500_DMA
+- bool
+- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+- default USB_MUSB_UX500
+- help
+- Enable DMA transfers on UX500 platforms.
+-
+ config USB_INVENTRA_DMA
+ bool
+- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+- default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+- help
+- Enable DMA transfers using Mentor's engine.
++ default n
+
+ config USB_TI_CPPI_DMA
+ bool
+- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+- default USB_MUSB_DAVINCI
+- help
+- Enable DMA transfers when TI CPPI DMA is available.
++ default n
++
++config USB_TI_CPPI41_DMA
++ bool
++ default n
+
+ config USB_TUSB_OMAP_DMA
+ bool
+- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+- depends on USB_MUSB_TUSB6010
+- depends on ARCH_OMAP
+- default y
+- help
+- Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
++ default n
++
++config USB_UX500_DMA
++ bool
++ default n
+
++endif # USB_MUSB_HDRC
+diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
+index d8fd9d0..da5421b 100644
+--- a/drivers/usb/musb/Makefile
++++ b/drivers/usb/musb/Makefile
+@@ -9,40 +9,25 @@ musb_hdrc-y := musb_core.o
+ musb_hdrc-y += musb_gadget_ep0.o musb_gadget.o
+ musb_hdrc-y += musb_virthub.o musb_host.o
+ musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
+-
++musb_hdrc-$(CONFIG_PROC_FS) += musb_procfs.o
+ # Hardware Glue Layer
+-obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o
+-obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o
+-obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o
+-obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o
+-obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
+-obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
+-obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
++obj-$(CONFIG_USB_MUSB_OMAP2PLUS_GLUE) += omap2430.o
++obj-$(CONFIG_USB_MUSB_AM35X_GLUE) += am35x.o
++obj-$(CONFIG_USB_MUSB_TI81XX_GLUE) += ti81xx.o
++obj-$(CONFIG_USB_MUSB_TUSB6010_GLUE) += tusb6010.o
++obj-$(CONFIG_USB_MUSB_DAVINCI_GLUE) += davinci.o
++obj-$(CONFIG_USB_MUSB_DA8XX_GLUE) += da8xx.o
++obj-$(CONFIG_USB_MUSB_BLACKFIN_GLUE) += blackfin.o
++obj-$(CONFIG_USB_MUSB_UX500_GLUE) += ux500.o
+
+ # the kconfig must guarantee that only one of the
+ # possible I/O schemes will be enabled at a time ...
+ # PIO only, or DMA (several potential schemes).
+ # though PIO is always there to back up DMA, and for ep0
+
+-ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+-
+- ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+- musb_hdrc-y += musbhsdma.o
+-
+- else
+- ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+- musb_hdrc-y += cppi_dma.o
+-
+- else
+- ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+- musb_hdrc-y += tusb6010_omap.o
+-
+- else
+- ifeq ($(CONFIG_USB_UX500_DMA),y)
+- musb_hdrc-y += ux500_dma.o
+-
+- endif
+- endif
+- endif
+- endif
+-endif
++obj-$(CONFIG_USB_INVENTRA_DMA_HW) += musbhsdma.o
++obj-$(CONFIG_USB_TI_CPPI_DMA_HW) += cppi_dma.o
++obj-$(CONFIG_USB_TI_CPPI41_DMA_HW) += cppi41dma.o
++cppi41dma-y += cppi41.o cppi41_dma.o
++obj-$(CONFIG_USB_TUSB_OMAP_DMA_HW) += tusb6010_omap.o
++obj-$(CONFIG_USB_UX500_DMA_HW) += ux500_dma.o
+diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
+index e233d2b..1e81fe8 100644
+--- a/drivers/usb/musb/am35x.c
++++ b/drivers/usb/musb/am35x.c
+@@ -36,6 +36,8 @@
+ #include <plat/usb.h>
+
+ #include "musb_core.h"
++#include "cppi41.h"
++#include "cppi41_dma.h"
+
+ /*
+ * AM35x specific definitions
+@@ -46,9 +48,7 @@
+ #define USB_STAT_REG 0x08
+ #define USB_EMULATION_REG 0x0c
+ /* 0x10 Reserved */
+-#define USB_AUTOREQ_REG 0x14
+ #define USB_SRP_FIX_TIME_REG 0x18
+-#define USB_TEARDOWN_REG 0x1c
+ #define EP_INTR_SRC_REG 0x20
+ #define EP_INTR_SRC_SET_REG 0x24
+ #define EP_INTR_SRC_CLEAR_REG 0x28
+@@ -80,8 +80,196 @@
+ #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
+ #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
+
++/* CPPI 4.1 queue manager registers */
++#define QMGR_PEND0_REG 0x4090
++#define QMGR_PEND1_REG 0x4094
++#define QMGR_PEND2_REG 0x4098
++
+ #define USB_MENTOR_CORE_OFFSET 0x400
+
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++#define CPPI41_QMGR_REG0SIZE 0x3fff
++
++/*
++ * CPPI 4.1 resources used for USB OTG controller module:
++ *
++ * USB DMA DMA QMgr Tx Src
++ * Tx Rx QNum Port
++ * ---------------------------------
++ * EP0 0 0 0 16,17 1
++ * ---------------------------------
++ * EP1 1 1 0 18,19 2
++ * ---------------------------------
++ * EP2 2 2 0 20,21 3
++ * ---------------------------------
++ * EP3 3 3 0 22,23 4
++ * ---------------------------------
++ */
++
++static u16 tx_comp_q[] = {63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
++ 63, 63};
++static u16 rx_comp_q[] = {65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
++ 65, 65};
++
++/* Fair scheduling */
++u32 dma_sched_table[] = {
++ 0x81018000, 0x83038202, 0x85058404, 0x87078606,
++ 0x89098808, 0x8b0b8a0a, 0x8d0d8c0c, 0x00008e0e
++};
++
++/* DMA block configuration */
++static const struct cppi41_tx_ch tx_ch_info[] = {
++ [0] = {
++ .port_num = 1,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 32} , {0, 33} }
++ },
++ [1] = {
++ .port_num = 2,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 34} , {0, 35} }
++ },
++ [2] = {
++ .port_num = 3,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 36} , {0, 37} }
++ },
++ [3] = {
++ .port_num = 4,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 38} , {0, 39} }
++ },
++ [4] = {
++ .port_num = 5,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 40} , {0, 41} }
++ },
++ [5] = {
++ .port_num = 6,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 42} , {0, 43} }
++ },
++ [6] = {
++ .port_num = 7,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 44} , {0, 45} }
++ },
++ [7] = {
++ .port_num = 8,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 46} , {0, 47} }
++ },
++ [8] = {
++ .port_num = 9,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 48} , {0, 49} }
++ },
++ [9] = {
++ .port_num = 10,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 50} , {0, 51} }
++ },
++ [10] = {
++ .port_num = 11,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 52} , {0, 53} }
++ },
++ [11] = {
++ .port_num = 12,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 54} , {0, 55} }
++ },
++ [12] = {
++ .port_num = 13,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 56} , {0, 57} }
++ },
++ [13] = {
++ .port_num = 14,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 58} , {0, 59} }
++ },
++ [14] = {
++ .port_num = 15,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 60} , {0, 61} }
++ }
++};
++
++/* Queues 0 to 66 are pre-assigned, others are spare */
++static const u32 assigned_queues[] = { 0xffffffff, 0xffffffff, 0x7 };
++
++int __devinit cppi41_init(struct musb *musb)
++{
++ struct usb_cppi41_info *cppi_info = &usb_cppi41_info[musb->id];
++ u16 numch, blknum, order, i;
++
++ /* init cppi info structure */
++ cppi_info->dma_block = 0;
++ for (i = 0 ; i < USB_CPPI41_NUM_CH ; i++)
++ cppi_info->ep_dma_ch[i] = i;
++
++ cppi_info->q_mgr = 0;
++ cppi_info->num_tx_comp_q = 15;
++ cppi_info->num_rx_comp_q = 15;
++ cppi_info->tx_comp_q = tx_comp_q;
++ cppi_info->rx_comp_q = rx_comp_q;
++ cppi_info->bd_intr_ctrl = 0; /* am35x dont support bd interrupt */
++
++ blknum = cppi_info->dma_block;
++
++ /* Queue manager information */
++ cppi41_queue_mgr[0].num_queue = 96;
++ cppi41_queue_mgr[0].queue_types = CPPI41_FREE_DESC_BUF_QUEUE |
++ CPPI41_UNASSIGNED_QUEUE;
++ cppi41_queue_mgr[0].base_fdbq_num = 0;
++ cppi41_queue_mgr[0].assigned = assigned_queues;
++
++ /* init mappings */
++ cppi41_queue_mgr[0].q_mgr_rgn_base = musb->ctrl_base + 0x4000;
++ cppi41_queue_mgr[0].desc_mem_rgn_base = musb->ctrl_base + 0x5000;
++ cppi41_queue_mgr[0].q_mgmt_rgn_base = musb->ctrl_base + 0x6000;
++ cppi41_queue_mgr[0].q_stat_rgn_base = musb->ctrl_base + 0x6800;
++
++ /* init DMA block */
++ cppi41_dma_block[0].num_tx_ch = 15;
++ cppi41_dma_block[0].num_rx_ch = 15;
++ cppi41_dma_block[0].tx_ch_info = tx_ch_info;
++
++ cppi41_dma_block[0].global_ctrl_base = musb->ctrl_base + 0x1000;
++ cppi41_dma_block[0].ch_ctrl_stat_base = musb->ctrl_base + 0x1800;
++ cppi41_dma_block[0].sched_ctrl_base = musb->ctrl_base + 0x2000;
++ cppi41_dma_block[0].sched_table_base = musb->ctrl_base + 0x2800;
++
++ /* Initialize for Linking RAM region 0 alone */
++ cppi41_queue_mgr_init(cppi_info->q_mgr, 0, CPPI41_QMGR_REG0SIZE);
++
++ numch = USB_CPPI41_NUM_CH * 2;
++ order = get_count_order(numch);
++
++ /* TODO: check two teardown desc per channel (5 or 7 ?)*/
++ if (order < 5)
++ order = 5;
++
++ cppi41_dma_block_init(blknum, cppi_info->q_mgr, order,
++ dma_sched_table, numch);
++ return 0;
++}
++void cppi41_free(void)
++{
++ u32 numch, blknum, order;
++ struct usb_cppi41_info *cppi_info = &usb_cppi41_info[0];
++
++ numch = USB_CPPI41_NUM_CH * 2;
++ order = get_count_order(numch);
++ blknum = cppi_info->dma_block;
++
++ cppi41_dma_block_uninit(blknum, cppi_info->q_mgr, order,
++ dma_sched_table, numch);
++ cppi41_queue_mgr_uninit(cppi_info->q_mgr);
++}
++#endif /* CONFIG_USB_TI_CPPI41_DMA */
++
+ struct am35x_glue {
+ struct device *dev;
+ struct platform_device *musb;
+@@ -228,10 +416,36 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
+ struct omap_musb_board_data *data = plat->board_data;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
++ u32 pend1 = 0, pend2 = 0, tx, rx;
+ u32 epintr, usbintr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
++ /*
++ * CPPI 4.1 interrupts share the same IRQ and the EOI register but
++ * don't get reflected in the interrupt source/mask registers.
++ */
++ if (is_cppi41_enabled(musb)) {
++ /*
++ * Check for the interrupts from Tx/Rx completion queues; they
++ * are level-triggered and will stay asserted until the queues
++ * are emptied. We're using the queue pending register 0 as a
++ * substitute for the interrupt status register and reading it
++ * directly for speed.
++ */
++ pend1 = musb_readl(reg_base, QMGR_PEND1_REG);
++ pend2 = musb_readl(reg_base, QMGR_PEND2_REG);
++
++ /* AM3517 uses 63,64,65 and 66 queues as completion queue */
++ if ((pend1 & (1 << 31)) || (pend2 & (7 << 0))) {
++ tx = (pend1 >> 31) | ((pend2 & 1) ? (1 << 1) : 0);
++ rx = (pend2 >> 1) & 0x3;
++
++ dev_dbg(musb->controller, "CPPI 4.1 IRQ: Tx %x, Rx %x\n", tx, rx);
++ cppi41_completion(musb, rx, tx);
++ ret = IRQ_HANDLED;
++ }
++ }
+ /* Get endpoint interrupts */
+ epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
+
+@@ -362,8 +576,8 @@ static int am35x_musb_init(struct musb *musb)
+ if (!rev)
+ return -ENODEV;
+
+- usb_nop_xceiv_register();
+- musb->xceiv = otg_get_transceiver();
++ usb_nop_xceiv_register(musb->id);
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv)
+ return -ENODEV;
+
+@@ -379,10 +593,14 @@ static int am35x_musb_init(struct musb *musb)
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+- data->set_phy_power(1);
++ data->set_phy_power(0, 1);
+
+ msleep(5);
+
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ cppi41_init(musb);
++#endif
++
+ musb->isr = am35x_musb_interrupt;
+
+ /* clear level interrupt */
+@@ -403,16 +621,16 @@ static int am35x_musb_exit(struct musb *musb)
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+- data->set_phy_power(0);
++ data->set_phy_power(0, 0);
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+
+ return 0;
+ }
+
+ /* AM35x supports only 32bit read operation */
+-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
++static void am35x_musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+ {
+ void __iomem *fifo = hw_ep->fifo;
+ u32 val;
+@@ -442,6 +660,8 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+ }
+
+ static const struct musb_platform_ops am35x_ops = {
++ .fifo_mode = 4,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING | MUSB_GLUE_DMA_CPPI41,
+ .init = am35x_musb_init,
+ .exit = am35x_musb_exit,
+
+@@ -452,6 +672,12 @@ static const struct musb_platform_ops am35x_ops = {
+ .try_idle = am35x_musb_try_idle,
+
+ .set_vbus = am35x_musb_set_vbus,
++
++ .read_fifo = am35x_musb_read_fifo,
++ .write_fifo = musb_write_fifo,
++
++ .dma_controller_create = cppi41_dma_controller_create,
++ .dma_controller_destroy = cppi41_dma_controller_destroy,
+ };
+
+ static u64 am35x_dmamask = DMA_BIT_MASK(32);
+@@ -473,12 +699,13 @@ static int __init am35x_probe(struct platform_device *pdev)
+ goto err0;
+ }
+
+- musb = platform_device_alloc("musb-hdrc", -1);
++ musb = platform_device_alloc("musb-hdrc", pdev->id);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err1;
+ }
+
++ dev_set_name(&pdev->dev, "musb-am35x");
+ phy_clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(phy_clk)) {
+ dev_err(&pdev->dev, "failed to get PHY clock\n");
+@@ -566,7 +793,7 @@ static int __exit am35x_remove(struct platform_device *pdev)
+ struct am35x_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_del(glue->musb);
+- platform_device_put(glue->musb);
++ /*platform_device_put(glue->musb);*/
+ clk_disable(glue->clk);
+ clk_disable(glue->phy_clk);
+ clk_put(glue->clk);
+@@ -585,7 +812,7 @@ static int am35x_suspend(struct device *dev)
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+- data->set_phy_power(0);
++ data->set_phy_power(0, 0);
+
+ clk_disable(glue->phy_clk);
+ clk_disable(glue->clk);
+@@ -602,7 +829,7 @@ static int am35x_resume(struct device *dev)
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+- data->set_phy_power(1);
++ data->set_phy_power(0, 1);
+
+ ret = clk_enable(glue->phy_clk);
+ if (ret) {
+@@ -649,6 +876,9 @@ subsys_initcall(am35x_init);
+
+ static void __exit am35x_exit(void)
+ {
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ cppi41_free();
++#endif
+ platform_driver_unregister(&am35x_driver);
+ }
+ module_exit(am35x_exit);
+diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
+index 5e7cfba..241a168 100644
+--- a/drivers/usb/musb/blackfin.c
++++ b/drivers/usb/musb/blackfin.c
+@@ -34,7 +34,8 @@ struct bfin_glue {
+ /*
+ * Load an endpoint's FIFO
+ */
+-void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
++static void bfin_musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
++ const u8 *src)
+ {
+ struct musb *musb = hw_ep->musb;
+ void __iomem *fifo = hw_ep->fifo;
+@@ -98,7 +99,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+ /*
+ * Unload an endpoint's FIFO
+ */
+-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
++static void bfin_musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+ {
+ struct musb *musb = hw_ep->musb;
+ void __iomem *fifo = hw_ep->fifo;
+@@ -414,8 +415,8 @@ static int bfin_musb_init(struct musb *musb)
+ }
+ gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+- usb_nop_xceiv_register();
+- musb->xceiv = otg_get_transceiver();
++ usb_nop_xceiv_register(musb->id);
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv) {
+ gpio_free(musb->config->gpio_vrsel);
+ return -ENODEV;
+@@ -441,17 +442,23 @@ static int bfin_musb_exit(struct musb *musb)
+ gpio_free(musb->config->gpio_vrsel);
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+ return 0;
+ }
+
+ static const struct musb_platform_ops bfin_ops = {
++ .fifo_mode = 2,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING |
++ MUSB_GLUE_DMA_INVENTRA,
+ .init = bfin_musb_init,
+ .exit = bfin_musb_exit,
+
+ .enable = bfin_musb_enable,
+ .disable = bfin_musb_disable,
+
++ .read_fifo = bfin_musb_read_fifo,
++ .write_fifo = bfin_musb_write_fifo,
++
+ .set_mode = bfin_musb_set_mode,
+ .try_idle = bfin_musb_try_idle,
+
+@@ -459,6 +466,9 @@ static const struct musb_platform_ops bfin_ops = {
+ .set_vbus = bfin_musb_set_vbus,
+
+ .adjust_channel_params = bfin_musb_adjust_channel_params,
++
++ .dma_controller_create = inventra_dma_controller_create,
++ .dma_controller_destroy = inventra_dma_controller_destroy,
+ };
+
+ static u64 bfin_dmamask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/musb/cppi41.c b/drivers/usb/musb/cppi41.c
+new file mode 100644
+index 0000000..7b3c0bd
+--- /dev/null
++++ b/drivers/usb/musb/cppi41.c
+@@ -0,0 +1,1124 @@
++/*
++ * CPPI 4.1 support
++ *
++ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
++ *
++ * Based on the PAL CPPI 4.1 implementation
++ * Copyright (C) 1998-2006 Texas Instruments Incorporated
++ *
++ * This file contains the main implementation for CPPI 4.1 common peripherals,
++ * including the DMA Controllers and the Queue Managers.
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ */
++
++#include <linux/io.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++
++#include "cppi41.h"
++
++#undef CPPI41_DEBUG
++
++#ifdef CPPI41_DEBUG
++#define DBG(format, args...) printk(format, ##args)
++#else
++#define DBG(format, args...)
++#endif
++
++static struct {
++ void *virt_addr;
++ dma_addr_t phys_addr;
++ u32 size;
++} linking_ram[CPPI41_NUM_QUEUE_MGR];
++
++static u32 *allocated_queues[CPPI41_NUM_QUEUE_MGR];
++
++/* First 32 packet descriptors are reserved for unallocated memory regions. */
++static u32 next_desc_index[CPPI41_NUM_QUEUE_MGR] = { 1 << 5 };
++static u8 next_mem_rgn[CPPI41_NUM_QUEUE_MGR];
++
++static struct {
++ size_t rgn_size;
++ void *virt_addr;
++ dma_addr_t phys_addr;
++ struct cppi41_queue_obj queue_obj;
++ u8 mem_rgn;
++ u16 q_mgr;
++ u16 q_num;
++ u32 num_desc;
++} dma_teardown[CPPI41_NUM_DMA_BLOCK];
++
++struct cppi41_dma_sched_tbl_t {
++ u8 pos;
++ u8 dma_ch;
++ u8 is_tx;
++ u8 enb;
++};
++
++struct cppi41_dma_sched_tbl_t dma_sched_tbl[MAX_SCHED_TBL_ENTRY] = {
++ /*pos dma_ch# is_tx enb/dis*/
++ { 0, 0, 0, 1},
++ { 1, 0, 1, 1},
++ { 2, 1, 0, 1},
++ { 3, 1, 1, 1},
++ { 4, 2, 0, 1},
++ { 5, 2, 1, 1},
++ { 6, 3, 0, 1},
++ { 7, 3, 1, 1}
++};
++
++struct cppi41_queue_mgr cppi41_queue_mgr[CPPI41_NUM_QUEUE_MGR];
++EXPORT_SYMBOL(cppi41_queue_mgr);
++
++struct cppi41_dma_block cppi41_dma_block[CPPI41_NUM_DMA_BLOCK];
++EXPORT_SYMBOL(cppi41_dma_block);
++/******************** CPPI 4.1 Functions (External Interface) *****************/
++
++int cppi41_queue_mgr_init(u8 q_mgr, dma_addr_t rgn0_base, u16 rgn0_size)
++{
++ void __iomem *q_mgr_regs;
++ void *ptr;
++
++ if (q_mgr >= cppi41_num_queue_mgr)
++ return -EINVAL;
++
++ q_mgr_regs = cppi41_queue_mgr[q_mgr].q_mgr_rgn_base;
++ ptr = dma_alloc_coherent(NULL, rgn0_size * 4,
++ &linking_ram[q_mgr].phys_addr,
++ GFP_KERNEL | GFP_DMA);
++ if (ptr == NULL) {
++ printk(KERN_ERR "ERROR: %s: Unable to allocate "
++ "linking RAM.\n", __func__);
++ return -ENOMEM;
++ }
++ linking_ram[q_mgr].virt_addr = ptr;
++ linking_ram[q_mgr].size = rgn0_size * 4;
++
++ cppi_writel(linking_ram[q_mgr].phys_addr,
++ q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG);
++ DBG("Linking RAM region 0 base @ %p, value: %x\n",
++ q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG,
++ cppi_readl(q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG));
++
++ cppi_writel(rgn0_size, q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG);
++ DBG("Linking RAM region 0 size @ %p, value: %x\n",
++ q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG,
++ cppi_readl(q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG));
++
++ ptr = kzalloc(BITS_TO_LONGS(cppi41_queue_mgr[q_mgr].num_queue) *
++ sizeof(long), GFP_KERNEL);
++ if (ptr == NULL) {
++ printk(KERN_ERR "ERROR: %s: Unable to allocate queue bitmap.\n",
++ __func__);
++ dma_free_coherent(NULL, rgn0_size * 4,
++ linking_ram[q_mgr].virt_addr,
++ linking_ram[q_mgr].phys_addr);
++ return -ENOMEM;
++ }
++ allocated_queues[q_mgr] = ptr;
++
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_queue_mgr_init);
++
++int cppi41_queue_mgr_uninit(u8 q_mgr)
++{
++ void __iomem *q_mgr_regs;
++
++ if (q_mgr >= cppi41_num_queue_mgr)
++ return -EINVAL;
++
++ q_mgr_regs = cppi41_queue_mgr[q_mgr].q_mgr_rgn_base;
++
++ /* free the Queue Mgr linking ram space */
++ cppi_writel(0, q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG);
++ cppi_writel(0, q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG);
++ dma_free_coherent(NULL, linking_ram[q_mgr].size,
++ linking_ram[q_mgr].virt_addr,
++ linking_ram[q_mgr].phys_addr);
++
++ /* free the allocated queues */
++ kfree(allocated_queues[q_mgr]);
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_queue_mgr_uninit);
++
++int cppi41_dma_sched_tbl_init(u8 dma_num, u8 q_mgr,
++ u32 *sched_tbl, u8 tbl_size)
++{
++ struct cppi41_dma_block *dma_block;
++ int num_reg, k, i, val = 0;
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dma_num];
++
++ num_reg = (tbl_size + 3) / 4;
++ for (k = i = 0; i < num_reg; i++) {
++#if 0
++ for (val = j = 0; j < 4; j++, k++) {
++ val >>= 8;
++ if (k < tbl_size)
++ val |= sched_tbl[k] << 24;
++ }
++#endif
++ val = sched_tbl[i];
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(i));
++ DBG("DMA scheduler table @ %p, value written: %x\n",
++ dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
++ val);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_dma_sched_tbl_init);
++
++int cppi41_schedtbl_add_dma_ch(u8 dmanum, u8 qmgr, u8 dma_ch, u8 is_tx)
++{
++ struct cppi41_dma_block *dma_block;
++ int num_ch, i, tbl_index = 0, j = 0, found = 0;
++ u32 val;
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dmanum];
++
++ val = 0;
++ for (num_ch = 0, i = 0; i < MAX_SCHED_TBL_ENTRY; i++) {
++ if (!found && dma_sched_tbl[i].dma_ch == dma_ch &&
++ dma_sched_tbl[i].is_tx == is_tx &&
++ dma_sched_tbl[i].enb == 0) {
++ dma_sched_tbl[i].enb = 1;
++ found = 1;
++ }
++
++ if (dma_sched_tbl[i].enb) {
++ val |= ((dma_sched_tbl[i].dma_ch |
++ (dma_sched_tbl[i].is_tx ? 0 : (1<<7))) << j*8);
++ num_ch++;
++ j++;
++ }
++ if (num_ch % 4 == 0) {
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(tbl_index));
++ tbl_index++;
++ val = j = 0;
++ }
++ }
++
++ if (num_ch % 4) {
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(tbl_index));
++ }
++ return num_ch;
++}
++EXPORT_SYMBOL(cppi41_schedtbl_add_dma_ch);
++
++int cppi41_schedtbl_remove_dma_ch(u8 dmanum, u8 qmgr, u8 dma_ch, u8 is_tx)
++{
++ struct cppi41_dma_block *dma_block;
++ int num_ch, i, tbl_index = 0, j = 0, found = 0;
++ u32 val;
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dmanum];
++
++ val = 0;
++ for (num_ch = 0, i = 0; i < MAX_SCHED_TBL_ENTRY; i++) {
++ if (!found && dma_sched_tbl[i].dma_ch == dma_ch &&
++ dma_sched_tbl[i].is_tx == is_tx &&
++ dma_sched_tbl[i].enb == 1) {
++ dma_sched_tbl[i].enb = 0;
++ }
++
++ if (dma_sched_tbl[i].enb) {
++ val |= ((dma_sched_tbl[i].dma_ch |
++ (dma_sched_tbl[i].is_tx ? 0 : (1<<7))) << j*8);
++ num_ch++;
++ j++;
++ }
++ if (num_ch % 4 == 0) {
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(tbl_index));
++ tbl_index++;
++ val = j = 0;
++ }
++ }
++
++ if (num_ch % 4) {
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(tbl_index));
++ }
++ return num_ch;
++}
++EXPORT_SYMBOL(cppi41_schedtbl_remove_dma_ch);
++
++int cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
++ u32 *sched_tbl, u8 tbl_size)
++{
++ const struct cppi41_dma_block *dma_block;
++ unsigned num_desc, num_reg;
++ void *ptr;
++ int error, i;
++ u16 q_num;
++ u32 val;
++
++ if (dma_num >= cppi41_num_dma_block ||
++ q_mgr >= cppi41_num_queue_mgr ||
++ !tbl_size || sched_tbl == NULL)
++ return -EINVAL;
++
++ error = cppi41_queue_alloc(CPPI41_FREE_DESC_QUEUE |
++ CPPI41_UNASSIGNED_QUEUE, q_mgr, &q_num);
++ if (error) {
++ printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
++ "descriptor queue.\n", __func__);
++ return error;
++ }
++ DBG("Teardown descriptor queue %d in queue manager 0 "
++ "allocated\n", q_num);
++
++ /*
++ * Tell the hardware about the Teardown descriptor
++ * queue manager and queue number.
++ */
++ dma_block = &cppi41_dma_block[dma_num];
++ cppi_writel((q_mgr << DMA_TD_DESC_QMGR_SHIFT) |
++ (q_num << DMA_TD_DESC_QNUM_SHIFT),
++ dma_block->global_ctrl_base +
++ DMA_TEARDOWN_FREE_DESC_CTRL_REG);
++ DBG("Teardown free descriptor control @ %p, value: %x\n",
++ dma_block->global_ctrl_base + DMA_TEARDOWN_FREE_DESC_CTRL_REG,
++ cppi_readl(dma_block->global_ctrl_base +
++ DMA_TEARDOWN_FREE_DESC_CTRL_REG));
++
++ num_desc = 1 << num_order;
++ dma_teardown[dma_num].rgn_size = num_desc *
++ sizeof(struct cppi41_teardown_desc);
++
++ /* Pre-allocate teardown descriptors. */
++ ptr = dma_alloc_coherent(NULL, dma_teardown[dma_num].rgn_size,
++ &dma_teardown[dma_num].phys_addr,
++ GFP_KERNEL | GFP_DMA);
++ if (ptr == NULL) {
++ printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
++ "descriptors.\n", __func__);
++ error = -ENOMEM;
++ goto free_queue;
++ }
++ dma_teardown[dma_num].virt_addr = ptr;
++
++ error = cppi41_mem_rgn_alloc(q_mgr, dma_teardown[dma_num].phys_addr, 5,
++ num_order, &dma_teardown[dma_num].mem_rgn);
++ if (error) {
++ printk(KERN_ERR "ERROR: %s: Unable to allocate queue manager "
++ "memory region for teardown descriptors.\n", __func__);
++ goto free_mem;
++ }
++
++ error = cppi41_queue_init(&dma_teardown[dma_num].queue_obj, 0, q_num);
++ if (error) {
++ printk(KERN_ERR "ERROR: %s: Unable to initialize teardown "
++ "free descriptor queue.\n", __func__);
++ goto free_rgn;
++ }
++
++ dma_teardown[dma_num].q_num = q_num;
++ dma_teardown[dma_num].q_mgr = q_mgr;
++ dma_teardown[dma_num].num_desc = num_desc;
++ /*
++ * Push all teardown descriptors to the free teardown queue
++ * for the CPPI 4.1 system.
++ */
++ cppi41_init_teardown_queue(dma_num);
++
++ /* Initialize the DMA scheduler. */
++ num_reg = (tbl_size + 3) / 4;
++ for (i = 0; i < num_reg; i++) {
++ val = sched_tbl[i];
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(i));
++ DBG("DMA scheduler table @ %p, value written: %x\n",
++ dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
++ val);
++ }
++
++ cppi_writel((tbl_size - 1) << DMA_SCHED_LAST_ENTRY_SHIFT |
++ DMA_SCHED_ENABLE_MASK,
++ dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
++ DBG("DMA scheduler control @ %p, value: %x\n",
++ dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG,
++ cppi_readl(dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG));
++
++ return 0;
++
++free_rgn:
++ cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
++free_mem:
++ dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
++ dma_teardown[dma_num].virt_addr,
++ dma_teardown[dma_num].phys_addr);
++free_queue:
++ cppi41_queue_free(q_mgr, q_num);
++ return error;
++}
++EXPORT_SYMBOL(cppi41_dma_block_init);
++
++int cppi41_dma_block_uninit(u8 dma_num, u8 q_mgr, u8 num_order,
++ u32 *sched_tbl, u8 tbl_size)
++{
++ const struct cppi41_dma_block *dma_block;
++ unsigned num_reg;
++ int i;
++
++ /* popout all teardown descriptors */
++ cppi41_free_teardown_queue(dma_num);
++
++ /* free queue mgr region */
++ cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
++ /* free the allocated teardown descriptors */
++ dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
++ dma_teardown[dma_num].virt_addr,
++ dma_teardown[dma_num].phys_addr);
++
++ /* free the teardown queue*/
++ cppi41_queue_free(dma_teardown[dma_num].q_mgr,
++ dma_teardown[dma_num].q_num);
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dma_num];
++ /* disable the dma schedular */
++ num_reg = (tbl_size + 3) / 4;
++ for (i = 0; i < num_reg; i++) {
++ cppi_writel(0, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(i));
++ DBG("DMA scheduler table @ %p, value written: %x\n",
++ dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
++ 0);
++ }
++
++ cppi_writel(0, dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
++
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_dma_block_uninit);
++/*
++ * cppi41_mem_rgn_alloc - allocate a memory region within the queue manager
++ */
++int cppi41_mem_rgn_alloc(u8 q_mgr, dma_addr_t rgn_addr, u8 size_order,
++ u8 num_order, u8 *mem_rgn)
++{
++ void __iomem *desc_mem_regs;
++ u32 num_desc = 1 << num_order, index, ctrl;
++ int rgn;
++
++ DBG("%s called with rgn_addr = %08x, size_order = %d, num_order = %d\n",
++ __func__, rgn_addr, size_order, num_order);
++
++ if (q_mgr >= cppi41_num_queue_mgr ||
++ size_order < 5 || size_order > 13 ||
++ num_order < 5 || num_order > 12 ||
++ (rgn_addr & ((1 << size_order) - 1)))
++ return -EINVAL;
++
++ rgn = next_mem_rgn[q_mgr];
++ index = next_desc_index[q_mgr];
++ if (rgn >= CPPI41_MAX_MEM_RGN || index + num_desc > 0x4000)
++ return -ENOSPC;
++
++ next_mem_rgn[q_mgr] = rgn + 1;
++ next_desc_index[q_mgr] = index + num_desc;
++
++ desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
++
++ /* Write the base register */
++ cppi_writel(rgn_addr, desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn));
++ DBG("Descriptor region base @ %p, value: %x\n",
++ desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn),
++ cppi_readl(desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn)));
++
++ /* Write the control register */
++ ctrl = ((index << QMGR_MEM_RGN_INDEX_SHIFT) &
++ QMGR_MEM_RGN_INDEX_MASK) |
++ (((size_order - 5) << QMGR_MEM_RGN_DESC_SIZE_SHIFT) &
++ QMGR_MEM_RGN_DESC_SIZE_MASK) |
++ (((num_order - 5) << QMGR_MEM_RGN_SIZE_SHIFT) &
++ QMGR_MEM_RGN_SIZE_MASK);
++ cppi_writel(ctrl, desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn));
++ DBG("Descriptor region control @ %p, value: %x\n",
++ desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn),
++ cppi_readl(desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn)));
++
++ *mem_rgn = rgn;
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_mem_rgn_alloc);
++
++/*
++ * cppi41_mem_rgn_free - free the memory region within the queue manager
++ */
++int cppi41_mem_rgn_free(u8 q_mgr, u8 mem_rgn)
++{
++ void __iomem *desc_mem_regs;
++
++ DBG("%s called.\n", __func__);
++
++ if (q_mgr >= cppi41_num_queue_mgr || mem_rgn >= next_mem_rgn[q_mgr])
++ return -EINVAL;
++
++ desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
++
++ if (cppi_readl(desc_mem_regs + QMGR_MEM_RGN_BASE_REG(mem_rgn)) == 0)
++ return -ENOENT;
++
++ cppi_writel(0, desc_mem_regs + QMGR_MEM_RGN_BASE_REG(mem_rgn));
++ cppi_writel(0, desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(mem_rgn));
++
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_mem_rgn_free);
++
++/*
++ * cppi41_tx_ch_init - initialize a CPPI 4.1 Tx channel object
++ *
++ * Verify the channel info (range checking, etc.) and store the channel
++ * information within the object structure.
++ */
++int cppi41_tx_ch_init(struct cppi41_dma_ch_obj *tx_ch_obj,
++ u8 dma_num, u8 ch_num)
++{
++ if (dma_num >= cppi41_num_dma_block ||
++ ch_num >= cppi41_dma_block[dma_num].num_tx_ch)
++ return -EINVAL;
++
++ /* Populate the channel object structure */
++ tx_ch_obj->base_addr = cppi41_dma_block[dma_num].ch_ctrl_stat_base +
++ DMA_CH_TX_GLOBAL_CFG_REG(ch_num);
++ tx_ch_obj->global_cfg = cppi_readl(tx_ch_obj->base_addr);
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_tx_ch_init);
++
++/*
++ * cppi41_rx_ch_init - initialize a CPPI 4.1 Rx channel object
++ *
++ * Verify the channel info (range checking, etc.) and store the channel
++ * information within the object structure.
++ */
++int cppi41_rx_ch_init(struct cppi41_dma_ch_obj *rx_ch_obj,
++ u8 dma_num, u8 ch_num)
++{
++ if (dma_num >= cppi41_num_dma_block ||
++ ch_num >= cppi41_dma_block[dma_num].num_rx_ch)
++ return -EINVAL;
++
++ /* Populate the channel object structure */
++ rx_ch_obj->base_addr = cppi41_dma_block[dma_num].ch_ctrl_stat_base +
++ DMA_CH_RX_GLOBAL_CFG_REG(ch_num);
++ rx_ch_obj->global_cfg = cppi_readl(rx_ch_obj->base_addr);
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_rx_ch_init);
++
++/*
++ * We have to cache the last written Rx/Tx channel global configration register
++ * value due to its bits other than enable/teardown being write-only. Yet there
++ * is a caveat related to caching the enable bit: this bit may be automatically
++ * cleared as a result of teardown, so we can't trust its cached value!
++ * When modifying the write only register fields, we're making use of the fact
++ * that they read back as zeros, and not clearing them explicitly...
++ */
++
++/*
++ * cppi41_dma_ch_default_queue - set CPPI 4.1 channel default completion queue
++ */
++void cppi41_dma_ch_default_queue(struct cppi41_dma_ch_obj *dma_ch_obj,
++ u8 q_mgr, u16 q_num)
++{
++ u32 val = dma_ch_obj->global_cfg;
++
++ /* Clear the fields to be modified. */
++ val &= ~(DMA_CH_TX_DEFAULT_QMGR_MASK | DMA_CH_TX_DEFAULT_QNUM_MASK |
++ DMA_CH_TX_ENABLE_MASK);
++
++ /* Set the default completion queue. */
++ val |= ((q_mgr << DMA_CH_TX_DEFAULT_QMGR_SHIFT) &
++ DMA_CH_TX_DEFAULT_QMGR_MASK) |
++ ((q_num << DMA_CH_TX_DEFAULT_QNUM_SHIFT) &
++ DMA_CH_TX_DEFAULT_QNUM_MASK);
++
++ /* Get the current state of the enable bit. */
++ dma_ch_obj->global_cfg = val |= cppi_readl(dma_ch_obj->base_addr);
++ cppi_writel(val, dma_ch_obj->base_addr);
++ DBG("Channel global configuration @ %p, value written: %x, "
++ "value read: %x\n", dma_ch_obj->base_addr, val,
++ cppi_readl(dma_ch_obj->base_addr));
++
++}
++EXPORT_SYMBOL(cppi41_dma_ch_default_queue);
++
++/*
++ * cppi41_rx_ch_configure - configure CPPI 4.1 Rx channel
++ */
++void cppi41_rx_ch_configure(struct cppi41_dma_ch_obj *rx_ch_obj,
++ struct cppi41_rx_ch_cfg *cfg)
++{
++ void __iomem *base = rx_ch_obj->base_addr;
++ u32 val = cppi_readl(rx_ch_obj->base_addr);
++
++ val |= ((cfg->sop_offset << DMA_CH_RX_SOP_OFFSET_SHIFT) &
++ DMA_CH_RX_SOP_OFFSET_MASK) |
++ ((cfg->default_desc_type << DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT) &
++ DMA_CH_RX_DEFAULT_DESC_TYPE_MASK) |
++ ((cfg->retry_starved << DMA_CH_RX_ERROR_HANDLING_SHIFT) &
++ DMA_CH_RX_ERROR_HANDLING_MASK) |
++ ((cfg->rx_queue.q_mgr << DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT) &
++ DMA_CH_RX_DEFAULT_RQ_QMGR_MASK) |
++ ((cfg->rx_queue.q_num << DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT) &
++ DMA_CH_RX_DEFAULT_RQ_QNUM_MASK);
++
++ val &= ~(0x7 << DMA_CH_RX_MAX_BUF_CNT_SHIFT);
++ val |= (cfg->rx_max_buf_cnt << DMA_CH_RX_MAX_BUF_CNT_SHIFT);
++
++ rx_ch_obj->global_cfg = val;
++ cppi_writel(val, base);
++ DBG("Rx channel global configuration @ %p, value written: %x, "
++ "value read: %x\n", base, val, cppi_readl(base));
++
++ base -= DMA_CH_RX_GLOBAL_CFG_REG(0);
++
++ /*
++ * Set up the packet configuration register
++ * based on the descriptor type...
++ */
++ switch (cfg->default_desc_type) {
++ case DMA_CH_RX_DEFAULT_DESC_EMBED:
++ val = ((cfg->cfg.embed_pkt.fd_queue.q_mgr <<
++ DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT) &
++ DMA_CH_RX_EMBED_FDQ_QMGR_MASK) |
++ ((cfg->cfg.embed_pkt.fd_queue.q_num <<
++ DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT) &
++ DMA_CH_RX_EMBED_FDQ_QNUM_MASK) |
++ ((cfg->cfg.embed_pkt.num_buf_slot <<
++ DMA_CH_RX_EMBED_NUM_SLOT_SHIFT) &
++ DMA_CH_RX_EMBED_NUM_SLOT_MASK) |
++ ((cfg->cfg.embed_pkt.sop_slot_num <<
++ DMA_CH_RX_EMBED_SOP_SLOT_SHIFT) &
++ DMA_CH_RX_EMBED_SOP_SLOT_MASK);
++
++ cppi_writel(val, base + DMA_CH_RX_EMBED_PKT_CFG_REG_B(0));
++ DBG("Rx channel embedded packet configuration B @ %p, "
++ "value written: %x\n",
++ base + DMA_CH_RX_EMBED_PKT_CFG_REG_B(0), val);
++
++ val = ((cfg->cfg.embed_pkt.free_buf_pool[0].b_pool <<
++ DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(0)) &
++ DMA_CH_RX_EMBED_FBP_PNUM_MASK(0)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[0].b_mgr <<
++ DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(0)) &
++ DMA_CH_RX_EMBED_FBP_BMGR_MASK(0)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[1].b_pool <<
++ DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(1)) &
++ DMA_CH_RX_EMBED_FBP_PNUM_MASK(1)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[1].b_mgr <<
++ DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(1)) &
++ DMA_CH_RX_EMBED_FBP_BMGR_MASK(1)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[2].b_pool <<
++ DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(2)) &
++ DMA_CH_RX_EMBED_FBP_PNUM_MASK(2)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[2].b_mgr <<
++ DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(2)) &
++ DMA_CH_RX_EMBED_FBP_BMGR_MASK(2)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[3].b_pool <<
++ DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(3)) &
++ DMA_CH_RX_EMBED_FBP_PNUM_MASK(3)) |
++ ((cfg->cfg.embed_pkt.free_buf_pool[3].b_mgr <<
++ DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(3)) &
++ DMA_CH_RX_EMBED_FBP_BMGR_MASK(3));
++
++ cppi_writel(val, base + DMA_CH_RX_EMBED_PKT_CFG_REG_A(0));
++ DBG("Rx channel embedded packet configuration A @ %p, "
++ "value written: %x\n",
++ base + DMA_CH_RX_EMBED_PKT_CFG_REG_A(0), val);
++ break;
++ case DMA_CH_RX_DEFAULT_DESC_HOST:
++ val = ((cfg->cfg.host_pkt.fdb_queue[0].q_num <<
++ DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(0)) &
++ DMA_CH_RX_HOST_FDQ_QNUM_MASK(0)) |
++ ((cfg->cfg.host_pkt.fdb_queue[0].q_mgr <<
++ DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(0)) &
++ DMA_CH_RX_HOST_FDQ_QMGR_MASK(0)) |
++ ((cfg->cfg.host_pkt.fdb_queue[1].q_num <<
++ DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(1)) &
++ DMA_CH_RX_HOST_FDQ_QNUM_MASK(1)) |
++ ((cfg->cfg.host_pkt.fdb_queue[1].q_mgr <<
++ DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(1)) &
++ DMA_CH_RX_HOST_FDQ_QMGR_MASK(1));
++
++ cppi_writel(val, base + DMA_CH_RX_HOST_PKT_CFG_REG_A(0));
++ DBG("Rx channel host packet configuration A @ %p, "
++ "value written: %x\n",
++ base + DMA_CH_RX_HOST_PKT_CFG_REG_A(0), val);
++
++ val = ((cfg->cfg.host_pkt.fdb_queue[2].q_num <<
++ DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(2)) &
++ DMA_CH_RX_HOST_FDQ_QNUM_MASK(2)) |
++ ((cfg->cfg.host_pkt.fdb_queue[2].q_mgr <<
++ DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(2)) &
++ DMA_CH_RX_HOST_FDQ_QMGR_MASK(2)) |
++ ((cfg->cfg.host_pkt.fdb_queue[3].q_num <<
++ DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(3)) &
++ DMA_CH_RX_HOST_FDQ_QNUM_MASK(3)) |
++ ((cfg->cfg.host_pkt.fdb_queue[3].q_mgr <<
++ DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(3)) &
++ DMA_CH_RX_HOST_FDQ_QMGR_MASK(3));
++
++ cppi_writel(val, base + DMA_CH_RX_HOST_PKT_CFG_REG_B(0));
++ DBG("Rx channel host packet configuration B @ %p, "
++ "value written: %x\n",
++ base + DMA_CH_RX_HOST_PKT_CFG_REG_B(0), val);
++ break;
++ case DMA_CH_RX_DEFAULT_DESC_MONO:
++ val = ((cfg->cfg.mono_pkt.fd_queue.q_num <<
++ DMA_CH_RX_MONO_FDQ_QNUM_SHIFT) &
++ DMA_CH_RX_MONO_FDQ_QNUM_MASK) |
++ ((cfg->cfg.mono_pkt.fd_queue.q_mgr <<
++ DMA_CH_RX_MONO_FDQ_QMGR_SHIFT) &
++ DMA_CH_RX_MONO_FDQ_QMGR_MASK) |
++ ((cfg->cfg.mono_pkt.sop_offset <<
++ DMA_CH_RX_MONO_SOP_OFFSET_SHIFT) &
++ DMA_CH_RX_MONO_SOP_OFFSET_MASK);
++
++ cppi_writel(val, base + DMA_CH_RX_MONO_PKT_CFG_REG(0));
++ DBG("Rx channel monolithic packet configuration @ %p, "
++ "value written: %x\n",
++ base + DMA_CH_RX_MONO_PKT_CFG_REG(0), val);
++ break;
++ }
++}
++EXPORT_SYMBOL(cppi41_rx_ch_configure);
++
++void cppi41_rx_ch_set_maxbufcnt(struct cppi41_dma_ch_obj *rx_ch_obj,
++ u8 rx_max_buf_cnt)
++{
++ void __iomem *base = rx_ch_obj->base_addr;
++ u32 val = cppi_readl(rx_ch_obj->base_addr);
++
++ val = rx_ch_obj->global_cfg;
++ val &= ~(0x7 << DMA_CH_RX_MAX_BUF_CNT_SHIFT);
++ val |= (rx_max_buf_cnt << DMA_CH_RX_MAX_BUF_CNT_SHIFT);
++
++ rx_ch_obj->global_cfg = val;
++ cppi_writel(val, base);
++
++ DBG("%s: rx-global-cfg @ %p, value written: %x, "
++ "value read: %x\n", __func__, base, val, cppi_readl(base));
++
++}
++EXPORT_SYMBOL(cppi41_rx_ch_set_maxbufcnt);
++/*
++ * cppi41_dma_ch_teardown - teardown a given Tx/Rx channel
++ */
++void cppi41_dma_ch_teardown(struct cppi41_dma_ch_obj *dma_ch_obj)
++{
++ u32 val = cppi_readl(dma_ch_obj->base_addr);
++
++ /* Initiate channel teardown. */
++ val |= dma_ch_obj->global_cfg & ~DMA_CH_TX_ENABLE_MASK;
++ dma_ch_obj->global_cfg = val |= DMA_CH_TX_TEARDOWN_MASK;
++ cppi_writel(val, dma_ch_obj->base_addr);
++ DBG("Tear down channel @ %p, value written: %x, value read: %x\n",
++ dma_ch_obj->base_addr, val, cppi_readl(dma_ch_obj->base_addr));
++}
++EXPORT_SYMBOL(cppi41_dma_ch_teardown);
++
++/*
++ * cppi41_dma_ch_enable - enable Tx/Rx DMA channel in hardware
++ *
++ * Makes the channel ready for data transmission/reception.
++ */
++void cppi41_dma_ch_enable(struct cppi41_dma_ch_obj *dma_ch_obj)
++{
++ u32 val = dma_ch_obj->global_cfg | DMA_CH_TX_ENABLE_MASK;
++
++ /* Teardown bit remains set after completion, so clear it now... */
++ dma_ch_obj->global_cfg = val &= ~DMA_CH_TX_TEARDOWN_MASK;
++ cppi_writel(val, dma_ch_obj->base_addr);
++ DBG("Enable channel @ %p, value written: %x, value read: %x\n",
++ dma_ch_obj->base_addr, val, cppi_readl(dma_ch_obj->base_addr));
++}
++EXPORT_SYMBOL(cppi41_dma_ch_enable);
++
++/*
++ * cppi41_dma_ch_disable - disable Tx/Rx DMA channel in hardware
++ */
++void cppi41_dma_ch_disable(struct cppi41_dma_ch_obj *dma_ch_obj)
++{
++ dma_ch_obj->global_cfg &= ~DMA_CH_TX_ENABLE_MASK;
++ cppi_writel(dma_ch_obj->global_cfg, dma_ch_obj->base_addr);
++ DBG("Disable channel @ %p, value written: %x, value read: %x\n",
++ dma_ch_obj->base_addr, dma_ch_obj->global_cfg,
++ cppi_readl(dma_ch_obj->base_addr));
++}
++EXPORT_SYMBOL(cppi41_dma_ch_disable);
++
++void cppi41_init_teardown_queue(int dma_num)
++{
++ dma_addr_t td_addr;
++ struct cppi41_teardown_desc *curr_td;
++ u32 num_desc = dma_teardown[dma_num].num_desc;
++ int i;
++
++ curr_td = dma_teardown[dma_num].virt_addr;
++ td_addr = dma_teardown[dma_num].phys_addr;
++
++ for (i = 0; i < num_desc; i++) {
++ cppi41_queue_push(&dma_teardown[dma_num].queue_obj, td_addr,
++ sizeof(*curr_td), 0);
++ td_addr += sizeof(*curr_td);
++ }
++}
++EXPORT_SYMBOL(cppi41_init_teardown_queue);
++
++void cppi41_free_teardown_queue(int dma_num)
++{
++ unsigned long td_addr;
++ u32 num_desc = dma_teardown[dma_num].num_desc;
++
++ while (num_desc--) {
++ td_addr = cppi41_queue_pop(&dma_teardown[dma_num].queue_obj);
++
++ if (td_addr == 0)
++ break;
++ }
++}
++EXPORT_SYMBOL(cppi41_free_teardown_queue);
++
++/**
++ * alloc_queue - allocate a queue in the given range
++ * @allocated: pointer to the bitmap of the allocated queues
++ * @excluded: pointer to the bitmap of the queues excluded from allocation
++ * (optional)
++ * @start: starting queue number
++ * @count: number of queues available
++ *
++ * Returns queue number on success, -ENOSPC otherwise.
++ */
++static int alloc_queue(u32 *allocated, const u32 *excluded, unsigned start,
++ unsigned count)
++{
++ u32 bit, mask = 0;
++ int index = -1;
++
++ /*
++ * We're starting the loop as if we've just wrapped around 32 bits
++ * in order to save on preloading the bitmasks.
++ */
++ for (bit = 0; count--; start++, bit <<= 1) {
++ /* Have we just wrapped around 32 bits? */
++ if (!bit) {
++ /* Start over with the next bitmask word */
++ bit = 1;
++ index++;
++ /* Have we just entered the loop? */
++ if (!index) {
++ /* Calculate the starting values */
++ bit <<= start & 0x1f;
++ index = start >> 5;
++ }
++ /*
++ * Load the next word of the allocated bitmask OR'ing
++ * it with the excluded bitmask if it's been passed.
++ */
++ mask = allocated[index];
++ if (excluded != NULL)
++ mask |= excluded[index];
++ }
++ /*
++ * If the bit in the combined bitmask is zero,
++ * we've just found a free queue.
++ */
++ if (!(mask & bit)) {
++ allocated[index] |= bit;
++ return start;
++ }
++ }
++ return -ENOSPC;
++}
++
++/*
++ * cppi41_queue_alloc - allocate a queue of a given type in the queue manager
++ */
++int cppi41_queue_alloc(u8 type, u8 q_mgr, u16 *q_num)
++{
++ int res = -ENOSPC;
++
++ if (q_mgr >= cppi41_num_queue_mgr)
++ return -EINVAL;
++
++ /* Mask out the unsupported queue types */
++ type &= cppi41_queue_mgr[q_mgr].queue_types;
++ /* First see if a free descriptor queue was requested... */
++ if (type & CPPI41_FREE_DESC_QUEUE)
++ res = alloc_queue(allocated_queues[q_mgr], NULL,
++ cppi41_queue_mgr[q_mgr].base_fdq_num, 16);
++
++ /* Then see if a free descriptor/buffer queue was requested... */
++ if (res < 0 && (type & CPPI41_FREE_DESC_BUF_QUEUE))
++ res = alloc_queue(allocated_queues[q_mgr], NULL,
++ cppi41_queue_mgr[q_mgr].base_fdbq_num, 16);
++
++ /* Last see if an unassigned queue was requested... */
++ if (res < 0 && (type & CPPI41_UNASSIGNED_QUEUE))
++ res = alloc_queue(allocated_queues[q_mgr],
++ cppi41_queue_mgr[q_mgr].assigned, 0,
++ cppi41_queue_mgr[q_mgr].num_queue);
++
++ /* See if any queue was allocated... */
++ if (res < 0)
++ return res;
++
++ /* Return the queue allocated */
++ *q_num = res;
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_queue_alloc);
++
++/*
++ * cppi41_queue_free - free the given queue in the queue manager
++ */
++int cppi41_queue_free(u8 q_mgr, u16 q_num)
++{
++ int index = q_num >> 5, bit = 1 << (q_num & 0x1f);
++
++ if (allocated_queues[q_mgr] != NULL) {
++ if (q_mgr >= cppi41_num_queue_mgr ||
++ q_num >= cppi41_queue_mgr[q_mgr].num_queue ||
++ !(allocated_queues[q_mgr][index] & bit))
++ return -EINVAL;
++ allocated_queues[q_mgr][index] &= ~bit;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_queue_free);
++
++/*
++ * cppi41_queue_init - initialize a CPPI 4.1 queue object
++ */
++int cppi41_queue_init(struct cppi41_queue_obj *queue_obj, u8 q_mgr, u16 q_num)
++{
++ if (q_mgr >= cppi41_num_queue_mgr ||
++ q_num >= cppi41_queue_mgr[q_mgr].num_queue)
++ return -EINVAL;
++
++ queue_obj->base_addr = cppi41_queue_mgr[q_mgr].q_mgmt_rgn_base +
++ QMGR_QUEUE_STATUS_REG_A(q_num);
++
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_queue_init);
++
++/*
++ * cppi41_queue_push - push a descriptor into the given queue
++ */
++void cppi41_queue_push(const struct cppi41_queue_obj *queue_obj, u32 desc_addr,
++ u32 desc_size, u32 pkt_size)
++{
++ u32 val;
++
++ /*
++ * Write to the tail of the queue.
++ * TODO: Can't think of a reason why a queue to head may be required.
++ * If it is, the API may have to be extended.
++ */
++#if 0
++ /*
++ * Also, can't understand why packet size is required to queue up a
++ * descriptor. The spec says packet size *must* be written prior to
++ * the packet write operation.
++ */
++ if (pkt_size)
++ val = (pkt_size << QMGR_QUEUE_PKT_SIZE_SHIFT) &
++ QMGR_QUEUE_PKT_SIZE_MASK;
++ cppi_writel(val, queue_obj->base_addr + QMGR_QUEUE_REG_C(0));
++#endif
++
++ val = (((desc_size - 24) >> (2 - QMGR_QUEUE_DESC_SIZE_SHIFT)) &
++ QMGR_QUEUE_DESC_SIZE_MASK) |
++ (desc_addr & QMGR_QUEUE_DESC_PTR_MASK);
++
++ DBG("Pushing value %x to queue @ %p\n", val, queue_obj->base_addr);
++
++ cppi_writel(val, queue_obj->base_addr + QMGR_QUEUE_REG_D(0));
++}
++EXPORT_SYMBOL(cppi41_queue_push);
++
++/*
++ * cppi41_queue_pop - pop a descriptor from a given queue
++ */
++unsigned long cppi41_queue_pop(const struct cppi41_queue_obj *queue_obj)
++{
++ u32 val = cppi_readl(queue_obj->base_addr + QMGR_QUEUE_REG_D(0));
++
++ DBG("Popping value %x from queue @ %p\n", val, queue_obj->base_addr);
++
++ return val & QMGR_QUEUE_DESC_PTR_MASK;
++}
++EXPORT_SYMBOL(cppi41_queue_pop);
++
++/*
++ * cppi41_get_teardown_info - extract information from a teardown descriptor
++ */
++int cppi41_get_teardown_info(unsigned long addr, u32 *info)
++{
++ struct cppi41_teardown_desc *desc;
++ int dma_num;
++
++ for (dma_num = 0; dma_num < cppi41_num_dma_block; dma_num++)
++ if (addr >= dma_teardown[dma_num].phys_addr &&
++ addr < dma_teardown[dma_num].phys_addr +
++ dma_teardown[dma_num].rgn_size)
++ break;
++
++ if (dma_num == cppi41_num_dma_block)
++ return -EINVAL;
++
++ desc = addr - dma_teardown[dma_num].phys_addr +
++ dma_teardown[dma_num].virt_addr;
++
++ if ((desc->teardown_info & CPPI41_DESC_TYPE_MASK) !=
++ (CPPI41_DESC_TYPE_TEARDOWN << CPPI41_DESC_TYPE_SHIFT))
++ return -EINVAL;
++
++ *info = desc->teardown_info;
++#if 1
++ /* Hardware is not giving the current DMA number as of now. :-/ */
++ *info |= (dma_num << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &
++ CPPI41_TEARDOWN_DMA_NUM_MASK;
++#else
++ dma_num = (desc->teardown_info & CPPI41_TEARDOWN_DMA_NUM_MASK) >>
++ CPPI41_TEARDOWN_DMA_NUM_SHIFT;
++#endif
++
++ cppi41_queue_push(&dma_teardown[dma_num].queue_obj, addr,
++ sizeof(struct cppi41_teardown_desc), 0);
++
++ return 0;
++}
++EXPORT_SYMBOL(cppi41_get_teardown_info);
++
++/*
++ * cppi41_save_context - save regsiter context before going to suspend.
++ */
++void cppi41_save_context(u8 dma_num)
++{
++ const struct cppi41_dma_block *dma_block;
++ struct cppi41_dma_regs *cppi41;
++ struct cppi41_queue_manager *qmgr;
++ void __iomem *q_mgr_regs, *desc_mem_regs;
++ u8 i, q_mgr = 0;
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dma_num];
++ cppi41 = (struct cppi41_dma_regs *)&dma_block->cppi41_regs;
++ qmgr = &cppi41->qmgr;
++ q_mgr_regs = cppi41_queue_mgr[q_mgr].q_mgr_rgn_base;
++ desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
++
++ /* popout all teardown descriptors */
++ cppi41_free_teardown_queue(dma_num);
++
++ cppi41->teardn_fdq_ctrl = cppi_readl(dma_block->global_ctrl_base +
++ DMA_TEARDOWN_FREE_DESC_CTRL_REG);
++ cppi41->emulation_ctrl = cppi_readl(dma_block->global_ctrl_base +
++ DMA_EMULATION_CTRL_REG);
++
++ qmgr->link_ram_rgn0_base = cppi_readl(q_mgr_regs +
++ QMGR_LINKING_RAM_RGN0_BASE_REG);
++ qmgr->link_ram_rgn0_size = cppi_readl(q_mgr_regs +
++ QMGR_LINKING_RAM_RGN0_SIZE_REG);
++ qmgr->link_ram_rgn1_base = cppi_readl(q_mgr_regs +
++ QMGR_LINKING_RAM_RGN1_BASE_REG);
++
++ for (i = 0 ; i < 8 ; i++) {
++ qmgr->memr_base[i] = cppi_readl(desc_mem_regs +
++ QMGR_MEM_RGN_BASE_REG(i));
++ qmgr->memr_ctrl[i] = cppi_readl(desc_mem_regs +
++ QMGR_MEM_RGN_CTRL_REG(i));
++ }
++
++ cppi41->sched_ctrl = cppi_readl(dma_block->sched_ctrl_base +
++ DMA_SCHED_CTRL_REG);
++
++}
++EXPORT_SYMBOL(cppi41_save_context);
++
++/*
++ * cppi41_restore_context - restore regsiter context after resume.
++ */
++void cppi41_restore_context(u8 dma_num, u32 *sched_tbl)
++{
++ const struct cppi41_dma_block *dma_block;
++ struct cppi41_dma_regs *cppi41;
++ struct cppi41_queue_manager *qmgr;
++ void __iomem *q_mgr_regs, *desc_mem_regs;
++ unsigned num_reg;
++ u32 val;
++ u8 tbl_size;
++ u8 i, q_mgr = 0;
++
++ dma_block = (struct cppi41_dma_block *)&cppi41_dma_block[dma_num];
++ cppi41 = (struct cppi41_dma_regs *)&dma_block->cppi41_regs;
++ qmgr = &cppi41->qmgr;
++ q_mgr_regs = cppi41_queue_mgr[q_mgr].q_mgr_rgn_base;
++ desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
++ tbl_size = dma_block->num_max_ch;
++
++ cppi_writel(cppi41->teardn_fdq_ctrl, dma_block->global_ctrl_base +
++ DMA_TEARDOWN_FREE_DESC_CTRL_REG);
++ cppi_writel(cppi41->emulation_ctrl, dma_block->global_ctrl_base +
++ DMA_EMULATION_CTRL_REG);
++
++ cppi_writel(qmgr->link_ram_rgn0_base, q_mgr_regs +
++ QMGR_LINKING_RAM_RGN0_BASE_REG);
++ cppi_writel(qmgr->link_ram_rgn0_size, q_mgr_regs +
++ QMGR_LINKING_RAM_RGN0_SIZE_REG);
++ cppi_writel(qmgr->link_ram_rgn1_base, q_mgr_regs +
++ QMGR_LINKING_RAM_RGN1_BASE_REG);
++
++ for (i = 0 ; i < 8 ; i++) {
++ cppi_writel(qmgr->memr_base[i], desc_mem_regs +
++ QMGR_MEM_RGN_BASE_REG(i));
++ cppi_writel(qmgr->memr_ctrl[i], desc_mem_regs +
++ QMGR_MEM_RGN_CTRL_REG(i));
++ }
++
++ /*
++ * Push all teardown descriptors to the free teardown queue
++ * for the CPPI 4.1 system.
++ */
++ cppi41_init_teardown_queue(dma_num);
++
++ /* Initialize the DMA scheduler. */
++ num_reg = (tbl_size + 3) / 4;
++ for (i = 0; i < num_reg; i++) {
++ val = sched_tbl[i];
++ cppi_writel(val, dma_block->sched_table_base +
++ DMA_SCHED_TABLE_WORD_REG(i));
++ }
++ cppi_writel(cppi41->sched_ctrl, dma_block->sched_ctrl_base +
++ DMA_SCHED_CTRL_REG);
++}
++EXPORT_SYMBOL(cppi41_restore_context);
++
++MODULE_DESCRIPTION("TI CPPI 4.1 support");
++MODULE_AUTHOR("MontaVista Software");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/musb/cppi41.h b/drivers/usb/musb/cppi41.h
+new file mode 100644
+index 0000000..9f0b3ef
+--- /dev/null
++++ b/drivers/usb/musb/cppi41.h
+@@ -0,0 +1,850 @@
++/*
++ * CPPI 4.1 definitions
++ *
++ * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ */
++
++#include <linux/types.h>
++
++/*
++ * Queue Manager - Control Registers Region
++ */
++#define QMGR_REVISION_REG 0x00 /* Major and minor versions */
++ /* of the module */
++#define QMGR_QUEUE_DIVERSION_REG 0x08 /* Queue Diversion register */
++#define QMGR_FREE_DESC_BUF_STARVED_REG(n) (0x20 + ((n) << 2)) /* Free Desc./ */
++ /* Buffer Starvation Count */
++#define QMGR_FREE_DESC_STARVED_REG(n) (0x30 + ((n) << 2)) /* Free Desc. */
++ /* Starvation Count */
++#define QMGR_LINKING_RAM_RGN0_BASE_REG 0x80 /* Linking RAM Region 0 Base */
++ /* Address */
++#define QMGR_LINKING_RAM_RGN0_SIZE_REG 0x84 /* Linking RAM Region 0 Size */
++#define QMGR_LINKING_RAM_RGN1_BASE_REG 0x88 /* Linking RAM Region 1 Base */
++ /* Address */
++#define QMGR_QUEUE_PENDING_REG(n) (0x90 + ((n) << 2)) /* Pending status */
++ /* for all queues */
++
++/*
++ * Queue Manager - Memory Region Registers
++ */
++#define QMGR_MEM_RGN_BASE_REG(r) (0x00 + ((r) << 4))
++#define QMGR_MEM_RGN_CTRL_REG(r) (0x04 + ((r) << 4))
++
++/* Memory Region R Control Register bits */
++#define QMGR_MEM_RGN_INDEX_SHIFT 16
++#define QMGR_MEM_RGN_INDEX_MASK (0x3fff << QMGR_MEM_RGN_INDEX_SHIFT)
++#define QMGR_MEM_RGN_DESC_SIZE_SHIFT 8
++#define QMGR_MEM_RGN_DESC_SIZE_MASK (0xf << QMGR_MEM_RGN_DESC_SIZE_SHIFT)
++#define QMGR_MEM_RGN_SIZE_SHIFT 0
++#define QMGR_MEM_RGN_SIZE_MASK (7 << QMGR_MEM_RGN_SIZE_SHIFT)
++
++/*
++ * Queue Manager - Queues Region
++ */
++#define QMGR_QUEUE_REG_A(n) (0x00 + ((n) << 4))
++#define QMGR_QUEUE_REG_B(n) (0x04 + ((n) << 4))
++#define QMGR_QUEUE_REG_C(n) (0x08 + ((n) << 4))
++#define QMGR_QUEUE_REG_D(n) (0x0C + ((n) << 4))
++
++/* Queue N Register C bits */
++#define QMGR_QUEUE_HEAD_TAIL_SHIFT 31
++#define QMGR_QUEUE_HEAD_TAIL_MASK (1 << QMGR_QUEUE_HEAD_TAIL_SHIFT)
++#define QMGR_QUEUE_PKT_SIZE_SHIFT 0
++#define QMGR_QUEUE_PKT_SIZE_MASK (0x3fff << QMGR_QUEUE_PKT_SIZE_SHIFT)
++/* Queue N Register D bits */
++#define QMGR_QUEUE_DESC_PTR_SHIFT 5
++#define QMGR_QUEUE_DESC_PTR_MASK (0x7ffffff << QMGR_QUEUE_DESC_PTR_SHIFT)
++#define QMGR_QUEUE_DESC_SIZE_SHIFT 0
++#define QMGR_QUEUE_DESC_SIZE_MASK (0x1f << QMGR_QUEUE_DESC_SIZE_SHIFT)
++
++/*
++ * Queue Manager - Queue Status Region
++ */
++#define QMGR_QUEUE_STATUS_REG_A(n) (0x00 + ((n) << 4))
++#define QMGR_QUEUE_STATUS_REG_B(n) (0x04 + ((n) << 4))
++#define QMGR_QUEUE_STATUS_REG_C(n) (0x08 + ((n) << 4))
++
++/*
++ * DMA Controller - Global Control Registers Region
++ */
++#define DMA_REVISION_REG 0x00 /* Major and minor versions */
++ /* of the module */
++#define DMA_TEARDOWN_FREE_DESC_CTRL_REG 0x04 /* Queue manager and queue */
++ /* number for Teardown free */
++ /* descriptor queue */
++#define DMA_EMULATION_CTRL_REG 0x08 /* Emulation control register */
++
++/* Teardown Free Descriptor Queue Control Register bits */
++#define DMA_TD_DESC_QMGR_SHIFT 12
++#define DMA_TD_DESC_QMGR_MASK (3 << DMA_TD_DESC_QMGR_SHIFT)
++#define DMA_TD_DESC_QNUM_SHIFT 0
++#define DMA_TD_DESC_QNUM_MASK (0xfff << DMA_TD_DESC_QNUM_SHIFT)
++
++/*
++ * DMA Controller - Channel Control / Status Registers Region
++ */
++#define DMA_CH_TX_GLOBAL_CFG_REG(n) (0x00 + ((n) << 5))
++#define DMA_CH_RX_GLOBAL_CFG_REG(n) (0x08 + ((n) << 5))
++#define DMA_CH_RX_HOST_PKT_CFG_REG_A(n) (0x0C + ((n) << 5))
++#define DMA_CH_RX_HOST_PKT_CFG_REG_B(n) (0x10 + ((n) << 5))
++#define DMA_CH_RX_EMBED_PKT_CFG_REG_A(n) (0x14 + ((n) << 5))
++#define DMA_CH_RX_EMBED_PKT_CFG_REG_B(n) (0x18 + ((n) << 5))
++#define DMA_CH_RX_MONO_PKT_CFG_REG(n) (0x1C + ((n) << 5))
++
++/* Tx Channel N Global Configuration Register bits */
++#define DMA_CH_TX_ENABLE_SHIFT 31
++#define DMA_CH_TX_ENABLE_MASK (1 << DMA_CH_TX_ENABLE_SHIFT)
++#define DMA_CH_TX_TEARDOWN_SHIFT 30
++#define DMA_CH_TX_TEARDOWN_MASK (1 << DMA_CH_TX_TEARDOWN_SHIFT)
++#define DMA_CH_TX_DEFAULT_QMGR_SHIFT 12
++#define DMA_CH_TX_DEFAULT_QMGR_MASK (3 << DMA_CH_TX_DEFAULT_QMGR_SHIFT)
++#define DMA_CH_TX_DEFAULT_QNUM_SHIFT 0
++#define DMA_CH_TX_DEFAULT_QNUM_MASK (0xfff << DMA_CH_TX_DEFAULT_QNUM_SHIFT)
++
++/* Rx Channel N Global Configuration Register bits */
++#define DMA_CH_RX_ENABLE_SHIFT 31
++#define DMA_CH_RX_ENABLE_MASK (1 << DMA_CH_RX_ENABLE_SHIFT)
++#define DMA_CH_RX_TEARDOWN_SHIFT 30
++#define DMA_CH_RX_TEARDOWN_MASK (1 << DMA_CH_RX_TEARDOWN_SHIFT)
++#define DMA_CH_RX_ERROR_HANDLING_SHIFT 24
++#define DMA_CH_RX_ERROR_HANDLING_MASK (1 << DMA_CH_RX_ERROR_HANDLING_SHIFT)
++#define DMA_CH_RX_SOP_OFFSET_SHIFT 16
++#define DMA_CH_RX_SOP_OFFSET_MASK (0xff << DMA_CH_RX_SOP_OFFSET_SHIFT)
++#define DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT 14
++#define DMA_CH_RX_DEFAULT_DESC_TYPE_MASK (3 << \
++ DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT)
++#define DMA_CH_RX_DEFAULT_DESC_EMBED 0
++#define DMA_CH_RX_DEFAULT_DESC_HOST 1
++#define DMA_CH_RX_DEFAULT_DESC_MONO 2
++#define DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT 12
++#define DMA_CH_RX_DEFAULT_RQ_QMGR_MASK (3 << DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT)
++#define DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT 0
++#define DMA_CH_RX_DEFAULT_RQ_QNUM_MASK (0xfff << \
++ DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT)
++#define DMA_CH_RX_MAX_BUF_CNT_SHIFT 26
++#define DMA_CH_RX_MAX_BUF_CNT_0 0
++#define DMA_CH_RX_MAX_BUF_CNT_1 1
++#define DMA_CH_RX_MAX_BUF_CNT_2 2
++#define DMA_CH_RX_MAX_BUF_CNT_3 3
++
++/* Rx Channel N Host Packet Configuration Register A/B bits */
++#define DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(n) (12 + 16 * ((n) & 1))
++#define DMA_CH_RX_HOST_FDQ_QMGR_MASK(n) (3 << DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(n))
++#define DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(n) (0 + 16 * ((n) & 1))
++#define DMA_CH_RX_HOST_FDQ_QNUM_MASK(n) (0xfff << \
++ DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(n))
++
++/* Rx Channel N Embedded Packet Configuration Register A bits */
++#define DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(n) (6 + 8 * (n))
++#define DMA_CH_RX_EMBED_FBP_BMGR_MASK(n) (3 << \
++ DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(n))
++#define DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(n) (0 + 8 * (n))
++#define DMA_CH_RX_EMBED_FBP_PNUM_MASK(n) (0x1f << \
++ DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(n))
++
++/* Rx Channel N Embedded Packet Configuration Register B bits */
++#define DMA_CH_RX_EMBED_NUM_SLOT_SHIFT 24
++#define DMA_CH_RX_EMBED_NUM_SLOT_MASK (7 << DMA_CH_RX_EMBED_NUM_SLOT_SHIFT)
++#define DMA_CH_RX_EMBED_SOP_SLOT_SHIFT 16
++#define DMA_CH_RX_EMBED_SOP_SLOT_MASK (7 << DMA_CH_RX_EMBED_SOP_SLOT_SHIFT)
++#define DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT 12
++#define DMA_CH_RX_EMBED_FDQ_QMGR_MASK (3 << DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT)
++#define DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT 0
++#define DMA_CH_RX_EMBED_FDQ_QNUM_MASK (0xfff << \
++ DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT)
++
++/* Rx Channel N Monolithic Packet Configuration Register bits */
++#define DMA_CH_RX_MONO_SOP_OFFSET_SHIFT 16
++#define DMA_CH_RX_MONO_SOP_OFFSET_MASK (0xff << \
++ DMA_CH_RX_MONO_SOP_OFFSET_SHIFT)
++#define DMA_CH_RX_MONO_FDQ_QMGR_SHIFT 12
++#define DMA_CH_RX_MONO_FDQ_QMGR_MASK (3 << DMA_CH_RX_MONO_FDQ_QMGR_SHIFT)
++#define DMA_CH_RX_MONO_FDQ_QNUM_SHIFT 0
++#define DMA_CH_RX_MONO_FDQ_QNUM_MASK (0xfff << DMA_CH_RX_MONO_FDQ_QNUM_SHIFT)
++
++/*
++ * DMA Scheduler - Control Region
++ */
++#define DMA_SCHED_CTRL_REG 0x00
++
++/* DMA Scheduler Control Register bits */
++#define DMA_SCHED_ENABLE_SHIFT 31
++#define DMA_SCHED_ENABLE_MASK (1 << DMA_SCHED_ENABLE_SHIFT)
++#define DMA_SCHED_LAST_ENTRY_SHIFT 0
++#define DMA_SCHED_LAST_ENTRY_MASK (0xff << DMA_SCHED_LAST_ENTRY_SHIFT)
++
++#define CPPI41_TXDMA_MAXLEN (4 * 1024 * 1024 - 1)
++#define CPPI41_RXDMA_MAXLEN (64 * 1024)
++
++/*
++ * Queue Status register
++ */
++#define CPPI41_QSTATUS_REG0 0x90
++#define CPPI41_QSTATUS_REG1 0x94
++#define CPPI41_QSTATUS_REG2 0x98
++#define CPPI41_QSTATUS_REG3 0x9c
++#define CPPI41_QSTATUS_REG4 0xa0
++
++/*
++ * DMA Scheduler - Table Region
++ */
++#define DMA_SCHED_TABLE_WORD_REG(n) ((n) << 2)
++#define MAX_SCHED_TBL_ENTRY 8
++
++/*
++ * CPPI 4.1 Host Packet Descriptor
++ */
++struct cppi41_host_pkt_desc {
++ u32 desc_info; /* Descriptor type, protocol specific word */
++ /* count, packet length */
++ u32 tag_info; /* Source tag (31:16), destination tag (15:0) */
++ u32 pkt_info; /* Packet error state, type, protocol flags, */
++ /* return info, descriptor location */
++ u32 buf_len; /* Number of valid data bytes in the buffer */
++ u32 buf_ptr; /* Pointer to the buffer associated with */
++ /* this descriptor */
++ u32 next_desc_ptr; /* Pointer to the next buffer descriptor */
++ u32 orig_buf_len; /* Original buffer length */
++ u32 orig_buf_ptr; /* Original buffer pointer */
++ u32 stk_comms_info[2]; /* Network stack private communications info */
++};
++
++/*
++ * CPPI 4.1 Host Buffer Descriptor
++ */
++struct cppi41_host_buf_desc {
++ u32 reserved[2];
++ u32 buf_recl_info; /* Return info, descriptor location */
++ u32 buf_len; /* Number of valid data bytes in the buffer */
++ u32 buf_ptr; /* Pointer to the buffer associated with */
++ /* this descriptor */
++ u32 next_desc_ptr; /* Pointer to the next buffer descriptor */
++ u32 orig_buf_len; /* Original buffer length */
++ u32 orig_buf_ptr; /* Original buffer pointer */
++};
++
++#define CPPI41_DESC_TYPE_SHIFT 27
++#define CPPI41_DESC_TYPE_MASK (0x1f << CPPI41_DESC_TYPE_SHIFT)
++#define CPPI41_DESC_TYPE_HOST 16
++#define CPPI41_DESC_TYPE_MONOLITHIC 18
++#define CPPI41_DESC_TYPE_TEARDOWN 19
++#define CPPI41_PROT_VALID_WORD_CNT_SHIFT 22
++#define CPPI41_PROT_VALID_WORD_CNT_MASK (0x1f << CPPI41_PROT_WORD_CNT_SHIFT)
++#define CPPI41_PKT_LEN_SHIFT 0
++#define CPPI41_PKT_LEN_MASK (0x1fffff << CPPI41_PKT_LEN_SHIFT)
++
++#define CPPI41_PKT_ERROR_SHIFT 31
++#define CPPI41_PKT_ERROR_MASK (1 << CPPI41_PKT_ERROR_SHIFT)
++#define CPPI41_PKT_TYPE_SHIFT 26
++#define CPPI41_PKT_TYPE_MASK (0x1f << CPPI41_PKT_TYPE_SHIFT)
++#define CPPI41_PKT_TYPE_ATM_AAL5 0
++#define CPPI41_PKT_TYPE_ATM_NULL_AAL 1
++#define CPPI41_PKT_TYPE_ATM_OAM 2
++#define CPPI41_PKT_TYPE_ATM_TRANSPARENT 3
++#define CPPI41_PKT_TYPE_EFM 4
++#define CPPI41_PKT_TYPE_USB 5
++#define CPPI41_PKT_TYPE_GENERIC 6
++#define CPPI41_PKT_TYPE_ETHERNET 7
++#define CPPI41_RETURN_POLICY_SHIFT 15
++#define CPPI41_RETURN_POLICY_MASK (1 << CPPI41_RETURN_POLICY_SHIFT)
++#define CPPI41_RETURN_LINKED 0
++#define CPPI41_RETURN_UNLINKED 1
++#define CPPI41_ONCHIP_SHIFT 14
++#define CPPI41_ONCHIP_MASK (1 << CPPI41_ONCHIP_SHIFT)
++#define CPPI41_RETURN_QMGR_SHIFT 12
++#define CPPI41_RETURN_QMGR_MASK (3 << CPPI41_RETURN_QMGR_SHIFT)
++#define CPPI41_RETURN_QNUM_SHIFT 0
++#define CPPI41_RETURN_QNUM_MASK (0xfff << CPPI41_RETURN_QNUM_SHIFT)
++
++#define CPPI41_SRC_TAG_PORT_NUM_SHIFT 27
++#define CPPI41_SRC_TAG_PORT_NUM_MASK (0x1f << CPPI41_SRC_TAG_PORT_NUM_SHIFT)
++#define CPPI41_SRC_TAG_CH_NUM_SHIFT 21
++#define CPPI41_SRC_TAG_CH_NUM_MASK (0x3f << CPPI41_SRC_TAG_CH_NUM_SHIFT)
++#define CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT 16
++#define CPPI41_SRC_TAG_SUB_CH_NUM_MASK (0x1f << \
++ CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT)
++#define CPPI41_DEST_TAG_SHIFT 0
++#define CPPI41_DEST_TAG_MASK (0xffff << CPPI41_DEST_TAG_SHIFT)
++#define CPPI41_PKT_INTR_FLAG (1 << 31)
++
++/*
++ * CPPI 4.1 Teardown Descriptor
++ */
++struct cppi41_teardown_desc {
++ u32 teardown_info; /* Teardown information */
++ u32 reserved[7]; /* 28 byte padding */
++};
++
++#define CPPI41_TEARDOWN_TX_RX_SHIFT 16
++#define CPPI41_TEARDOWN_TX_RX_MASK (1 << CPPI41_TEARDOWN_TX_RX_SHIFT)
++#define CPPI41_TEARDOWN_DMA_NUM_SHIFT 10
++#define CPPI41_TEARDOWN_DMA_NUM_MASK (0x3f << CPPI41_TEARDOWN_DMA_NUM_SHIFT)
++#define CPPI41_TEARDOWN_CHAN_NUM_SHIFT 0
++#define CPPI41_TEARDOWN_CHAN_NUM_MASK (0x3f << CPPI41_TEARDOWN_CHAN_NUM_SHIFT)
++
++#define CPPI41_MAX_MEM_RGN 16
++
++/* CPPI 4.1 configuration for AM3517 */
++#define CPPI41_NUM_QUEUE_MGR 1 /* 4 max */
++#define CPPI41_NUM_DMA_BLOCK 1 /* 64 max */
++#define cppi41_num_queue_mgr CPPI41_NUM_QUEUE_MGR
++#define cppi41_num_dma_block CPPI41_NUM_DMA_BLOCK
++
++/**
++ * struct cppi41_queue_manager - CPPI 4.1 DMA queue manager registers for
++ * context save and restore.
++ */
++struct cppi41_queue_manager {
++ u32 link_ram_rgn0_base;
++ u32 link_ram_rgn0_size;
++ u32 link_ram_rgn1_base;
++
++ u32 memr_base[8];
++ u32 memr_ctrl[8];
++};
++
++/**
++ * struct cppi41_dma_regs - CPPI 4.1 DMA registers for
++ * context save and restore.
++ */
++struct cppi41_dma_regs {
++ u32 teardn_fdq_ctrl;
++ u32 emulation_ctrl;
++
++ /* CPPI DMA scheduler registers */
++ u32 sched_ctrl;
++
++ /* Queue manager registers */
++ struct cppi41_queue_manager qmgr;
++};
++
++/**
++ * struct cppi41_queue - Queue Tuple
++ *
++ * The basic queue tuple in CPPI 4.1 used across all data structures
++ * where a definition of a queue is required.
++ */
++struct cppi41_queue {
++ u8 q_mgr; /* The queue manager number */
++ u16 q_num; /* The queue number */
++};
++
++/**
++ * struct cppi41_buf_pool - Buffer Pool Tuple
++ *
++ * The basic buffer pool tuple in CPPI 4.1 used across all data structures
++ * where a definition of a buffer pool is required.
++ */
++struct cppi41_buf_pool {
++ u8 b_mgr; /* The buffer manager number */
++ u16 b_pool; /* The buffer pool number */
++};
++
++/**
++ * struct cppi41_queue_mgr - Queue Manager information
++ *
++ * Contains the information about the queue manager which should be copied from
++ * the hardware spec as is.
++ */
++struct cppi41_queue_mgr {
++ void __iomem *q_mgr_rgn_base; /* Base address of the Control region. */
++ void __iomem *desc_mem_rgn_base; /* Base address of the descriptor */
++ /* memory region. */
++ void __iomem *q_mgmt_rgn_base; /* Base address of the queues region. */
++ void __iomem *q_stat_rgn_base; /* Base address of the queue status */
++ /* region. */
++ u16 num_queue; /* Number of the queues supported. */
++ u8 queue_types; /* Bitmask of the supported queue types. */
++ u16 base_fdq_num; /* The base free descriptor queue number. */
++ /* If present, there's always 16 such queues. */
++ u16 base_fdbq_num; /* The base free descriptor/buffer queue */
++ /* number. If present, there's always 16 */
++ /* such queues. */
++ const u32 *assigned; /* Pointer to the bitmask of the pre-assigned */
++ /* queues. */
++};
++
++/* Queue type flags */
++#define CPPI41_FREE_DESC_QUEUE 0x01
++#define CPPI41_FREE_DESC_BUF_QUEUE 0x02
++#define CPPI41_UNASSIGNED_QUEUE 0x04
++
++/**
++ * struct cppi41_embed_pkt_cfg - Rx Channel Embedded packet configuration
++ *
++ * An instance of this structure forms part of the Rx channel information
++ * structure.
++ */
++struct cppi41_embed_pkt_cfg {
++ struct cppi41_queue fd_queue; /* Free Descriptor queue.*/
++ u8 num_buf_slot; /* Number of buffer slots in the descriptor */
++ u8 sop_slot_num; /* SOP buffer slot number. */
++ struct cppi41_buf_pool free_buf_pool[4]; /* Free Buffer pool. Element */
++ /* 0 used for the 1st Rx buffer, etc. */
++};
++
++/**
++ * struct cppi41_host_pkt_cfg - Rx Channel Host Packet Configuration
++ *
++ * An instance of this structure forms part of the Rx channel information
++ * structure.
++ */
++struct cppi41_host_pkt_cfg {
++ struct cppi41_queue fdb_queue[4]; /* Free Desc/Buffer queue. Element */
++ /* 0 used for 1st Rx buffer, etc. */
++};
++
++/**
++ * struct cppi41_mono_pkt_cfg - Rx Channel Monolithic Packet Configuration
++ *
++ * An instance of this structure forms part of the Rx channel information
++ * structure.
++ */
++struct cppi41_mono_pkt_cfg {
++ struct cppi41_queue fd_queue; /* Free descriptor queue */
++ u8 sop_offset; /* Number of bytes to skip before writing */
++ /* payload */
++};
++
++enum cppi41_rx_desc_type {
++ cppi41_rx_embed_desc,
++ cppi41_rx_host_desc,
++ cppi41_rx_mono_desc,
++};
++
++/**
++ * struct cppi41_rx_ch_cfg - Rx Channel Configuration
++ *
++ * Must be allocated and filled by the caller of cppi41_rx_ch_configure().
++ *
++ * The same channel can be configured to receive different descripor type
++ * packets (not simaltaneously). When the Rx packets on a port need to be sent
++ * to the SR, the channels default descriptor type is set to Embedded and the
++ * Rx completion queue is set to the queue which CPU polls for input packets.
++ * When in SR bypass mode, the same channel's default descriptor type will be
++ * set to Host and the Rx completion queue set to one of the queues which host
++ * can get interrupted on (via the Queuing proxy/accumulator). In this example,
++ * the embedded mode configuration fetches free descriptor from the Free
++ * descriptor queue (as defined by struct cppi41_embed_pkt_cfg) and host
++ * mode configuration fetches free descriptors/buffers from the free descriptor/
++ * buffer queue (as defined by struct cppi41_host_pkt_cfg).
++ *
++ * NOTE: There seems to be no separate configuration for teardown completion
++ * descriptor. The assumption is rxQueue tuple is used for this purpose as well.
++ */
++struct cppi41_rx_ch_cfg {
++ enum cppi41_rx_desc_type default_desc_type; /* Describes which queue */
++ /* configuration is used for the free */
++ /* descriptors and/or buffers */
++ u8 sop_offset; /* Number of bytes to skip in SOP buffer */
++ /* before writing payload */
++ u8 retry_starved; /* 0 = Drop packet on descriptor/buffer */
++ /* starvartion, 1 = DMA retries FIFO block */
++ /* transfer at a later time */
++ u8 rx_max_buf_cnt; /* The DMA ignores the SOP bit and closes up
++ * a packet after a max_buf_cnt buffer has been
++ * filled OR if the EOP field is set in the
++ * info word 0
++ */
++ struct cppi41_queue rx_queue; /* Rx complete packets queue */
++ union {
++ struct cppi41_host_pkt_cfg host_pkt; /* Host packet */
++ /* configuration. This defines where channel */
++ /* picks free descriptors from. */
++ struct cppi41_embed_pkt_cfg embed_pkt; /* Embedded packet */
++ /* configuration. This defines where channel */
++ /* picks free descriptors/buffers from. */
++ /* from. */
++ struct cppi41_mono_pkt_cfg mono_pkt; /* Monolithic packet */
++ /* configuration. This defines where channel */
++ /* picks free descriptors from. */
++ } cfg; /* Union of packet configuration structures */
++ /* to be filled in depending on the */
++ /* defDescType field. */
++};
++
++/**
++ * struct cppi41_tx_ch - Tx channel information
++ *
++ * NOTE: The queues that feed into the Tx channel are fixed at SoC design time.
++ */
++struct cppi41_tx_ch {
++ u8 port_num; /* Port number. */
++ u8 ch_num; /* Channel number within port. */
++ u8 sub_ch_num; /* Sub-channel number within channel. */
++ u8 num_tx_queue; /* Number of queues from which the channel */
++ /* can feed. */
++ struct cppi41_queue tx_queue[4]; /* List of queues from which the */
++ /* channel can feed. */
++};
++
++/**
++ * struct cppi41_dma_block - CPPI 4.1 DMA configuration
++ *
++ * Configuration information for CPPI DMA functionality. Includes the Global
++ * configuration, Channel configuration, and the Scheduler configuration.
++ */
++struct cppi41_dma_block {
++ void __iomem *global_ctrl_base; /* Base address of the Global Control */
++ /* registers. */
++ void __iomem *ch_ctrl_stat_base; /* Base address of the Channel */
++ /* Control/Status registers. */
++ void __iomem *sched_ctrl_base; /* Base address of the Scheduler */
++ /* Control register. */
++ void __iomem *sched_table_base; /* Base address of the Scheduler */
++ /* Table registers. */
++ u8 num_tx_ch; /* Number of the Tx channels. */
++ u8 num_rx_ch; /* Number of the Rx channels. */
++ u8 num_max_ch; /* maximum dma channels */
++ const struct cppi41_tx_ch *tx_ch_info;
++ struct cppi41_dma_regs cppi41_regs; /* registers to save and restore */
++};
++
++extern struct cppi41_queue_mgr cppi41_queue_mgr[];
++extern struct cppi41_dma_block cppi41_dma_block[];
++
++/**
++ * struct cppi41_dma_ch_obj - CPPI 4.1 DMA Channel object
++ */
++struct cppi41_dma_ch_obj {
++ void __iomem *base_addr; /* The address of the channel global */
++ /* configuration register */
++ u32 global_cfg; /* Tx/Rx global configuration backed-up value */
++};
++
++/**
++ * struct cppi41_queue_obj - CPPI 4.1 queue object
++ */
++struct cppi41_queue_obj {
++ void __iomem *base_addr; /* The base address of the queue management */
++ /* registers */
++};
++
++static inline u32 cppi_readl(const void __iomem *addr)
++ { return readl(addr); }
++static inline void cppi_writel(u32 data, void __iomem *addr)
++ { writel(data, addr); }
++/**
++ * cppi41_queue_mgr_init - CPPI 4.1 queue manager initialization.
++ * @q_mgr: the queue manager to initialize
++ * @rgn0_base: linking RAM region 0 physical address
++ * @rgn0_size: linking RAM region 0 size in 32-bit words (0 to 0x3fff)
++ *
++ * Returns 0 on success, error otherwise.
++ */
++int cppi41_queue_mgr_init(u8 q_mgr, dma_addr_t rgn0_base, u16 rgn0_size);
++
++/**
++ * cppi41_queue_mgr_init - CPPI 4.1 queue manager un-initialization.
++ * @q_mgr: the queue manager to un-initialize
++ * Returns 0 on success, error otherwise.
++ */
++int cppi41_queue_mgr_uninit(u8 q_mgr);
++
++/*
++ * CPPI 4.1 Queue Manager Memory Region Allocation and De-allocation APIs.
++ */
++
++/**
++ * cppi41_mem_rgn_alloc - CPPI 4.1 queue manager memory region allocation.
++ * @q_mgr: the queue manager whose memory region to allocate
++ * @rgn_addr: physical address of the memory region
++ * @size_order: descriptor size as a power of two (between 5 and 13)
++ * @num_order: number of descriptors as a power of two (between 5 and 12)
++ * @mem_rgn: pointer to the index of the memory region allocated
++ *
++ * This function allocates a memory region within the queue manager
++ * consisiting of the descriptors of paricular size and number.
++ *
++ * Returns 0 on success, error otherwise.
++ */
++int cppi41_mem_rgn_alloc(u8 q_mgr, dma_addr_t rgn_addr, u8 size_order,
++ u8 num_order, u8 *mem_rgn);
++
++/**
++ * cppi41_mem_rgn_free - CPPI 4.1 queue manager memory region de-allocation.
++ * @q_mgr: the queue manager whose memory region was allocated
++ * @mem_rgn: index of the memory region
++ *
++ * This function frees the memory region allocated by cppi41_mem_rgn_alloc().
++ *
++ * Returns 0 on success, -EINVAL otherwise.
++ */
++int cppi41_mem_rgn_free(u8 q_mgr, u8 mem_rgn);
++
++/**
++ * cppi41_dma_block_init - CPPI 4.1 DMA block initialization.
++ * @dma_num: number of the DMA block
++ * @q_mgr: the queue manager in which to allocate the free teardown
++ * descriptor queue
++ * @num_order: number of teardown descriptors as a power of two (at least 5)
++ * @sched_tbl: the DMA scheduler table
++ * @tbl_size: number of entries in the DMA scheduler table
++ *
++ * This function frees the memory region allocated by cppi41_mem_rgn_alloc().
++ *
++ * Returns 0 on success, error otherwise.
++ */
++int cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
++ u32 *sched_tbl, u8 tbl_size);
++
++/**
++ * cppi41_dma_block_init - CPPI 4.1 DMA block un-initialization.
++ * @dma_num: number of the DMA block
++ * @q_mgr: the queue manager in which to allocate the free teardown
++ * descriptor queue
++ * @num_order: number of teardown descriptors as a power of two (at least 5)
++ * @sched_tbl: the DMA scheduler table
++ * @tbl_size: number of entries in the DMA scheduler table
++ *
++ * Returns 0 on success, error otherwise.
++ */
++int cppi41_dma_block_uninit(u8 dma_num, u8 q_mgr, u8 num_order,
++ u32 *sched_tbl, u8 tbl_size);
++
++/*
++ * CPPI 4.1 DMA Channel Management APIs
++ */
++
++/**
++ * cppi41_tx_ch_init - initialize CPPI 4.1 transmit channel object
++ * @tx_ch_obj: pointer to Tx channel object
++ * @dma_num: DMA block to which this channel belongs
++ * @ch_num: DMA channel number
++ *
++ * Returns 0 if valid Tx channel, -EINVAL otherwise.
++ */
++int cppi41_tx_ch_init(struct cppi41_dma_ch_obj *tx_ch_obj,
++ u8 dma_num, u8 ch_num);
++
++/**
++ * cppi41_rx_ch_init - initialize CPPI 4.1 receive channel object
++ * @rx_ch_obj: pointer to Rx channel object
++ * @dma_num: DMA block to which this channel belongs
++ * @ch_num: DMA channel number
++ *
++ * Returns 0 if valid Rx channel, -EINVAL otherwise.
++ */
++int cppi41_rx_ch_init(struct cppi41_dma_ch_obj *rx_ch_obj,
++ u8 dma_num, u8 ch_num);
++
++/**
++ * cppi41_dma_ch_default_queue - set CPPI 4.1 channel default completion queue
++ * @dma_ch_obj: pointer to DMA channel object
++ * @q_mgr: default queue manager
++ * @q_num: default queue number
++ *
++ * This function configures the specified channel. The caller is required to
++ * provide the default queue onto which the teardown descriptors will be queued.
++ */
++void cppi41_dma_ch_default_queue(struct cppi41_dma_ch_obj *dma_ch_obj,
++ u8 q_mgr, u16 q_num);
++
++/**
++ * cppi41_rx_ch_configure - configure CPPI 4.1 receive channel
++ * @rx_ch_obj: pointer to Rx channel object
++ * @cfg: pointer to Rx channel configuration
++ *
++ * This function configures and opens the specified Rx channel. The caller
++ * is required to provide channel configuration information by initializing
++ * a struct cppi41_rx_ch_cfg.
++ */
++void cppi41_rx_ch_configure(struct cppi41_dma_ch_obj *rx_ch_obj,
++ struct cppi41_rx_ch_cfg *cfg);
++
++/**
++ * cppi41_rx_ch_set_maxbufcnt - configure max rx buffer count
++ * @rx_ch_obj: pointer to Rx channel object
++ * rx_max_buf_cnt: maximum rx buffer count
++ *
++ * This function configures the maximum rx buffer count in rx dma
++ * global configuration register. The valid rx_max_buf_cnt value
++ * must be 0 to 4.
++ */
++void cppi41_rx_ch_set_maxbufcnt(struct cppi41_dma_ch_obj *rx_ch_obj,
++ u8 rx_max_buf_cnt);
++/**
++ * cppi41_dma_ch_enable - enable CPPI 4.1 Tx/Rx DMA channel
++ * @dma_ch_obj: pointer to DMA channel object
++ *
++ * This function enables a specified Tx channel. The caller is required to
++ * provide a reference to a channel object initialized by an earlier call of
++ * the cppi41_dma_ch_init() function. After the successful completion of this
++ * function, the Tx DMA channel will be active and ready for data transmission.
++ */
++void cppi41_dma_ch_enable(struct cppi41_dma_ch_obj *dma_ch_obj);
++
++/**
++ * cppi41_dma_ch_disable - disable CPPI 4.1 Tx/Rx DMA channel
++ * @dma_ch_obj: pointer to DMA channel object
++ *
++ * This function disables a specific Tx channel. The caller is required to
++ * provide a reference to a channel object initialized by an earlier call of
++ * the cppi41_dma_ch_init() function. After the successful completion of this
++ * function, the Tx DMA channel will be deactived.
++ */
++void cppi41_dma_ch_disable(struct cppi41_dma_ch_obj *dma_ch_obj);
++
++/**
++ * cppi41_dma_ch_teardown - tear down CPPI 4.1 transmit channel
++ * @dma_ch_obj: pointer DMA channel object
++ *
++ * This function triggers the teardown of the given DMA channel.
++ *
++ * ATTENTION: Channel disable should not be called before the teardown is
++ * completed as a disable will stop the DMA scheduling on the channel resulting
++ * in the teardown complete event not being registered at all.
++ *
++ * NOTE: A successful channel teardown event is reported via queueing of a
++ * teardown descriptor.
++ *
++ * This function just sets up for the teardown of the channel and returns. The
++ * caller must detect the channel teardown event to assume that the channel is
++ * disabled.
++ *
++ * See cppi41_get_teardown_info() for the teardown completion processing.
++ */
++void cppi41_dma_ch_teardown(struct cppi41_dma_ch_obj *dma_ch_obj);
++
++/*
++ * CPPI 4.1 Queue Allocation and De-allocation APIs.
++ */
++
++/**
++ * cppi41_queue_alloc - allocate CPPI 4.1 queue
++ * @type: queue type bitmask
++ * @q_mgr: queue manager
++ * @q_num: pointer to the queue number
++ *
++ * Returns 0 if queue allocated, error otherwise.
++ */
++int cppi41_queue_alloc(u8 type, u8 q_mgr, u16 *q_num);
++
++/**
++ * cppi41_queue_free - de-allocate CPPI 4.1 queue
++ * @q_mgr: queue manager
++ * @q_num: queue number
++ *
++ * Returns 0 on success, -EINVAL otherwise.
++ */
++int cppi41_queue_free(u8 q_mgr, u16 q_num);
++
++/*
++ * CPPI 4.1 Queue Management APIs
++ */
++
++/**
++ * cppi41_queue_init - initialize CPPI 4.1 queue object
++ * @queue_obj: pointer to the queue object
++ * @q_mgr: queue manager
++ * @q_num: queue number
++ *
++ * Returns 0 if valid queue, -EINVAL otherwise.
++ */
++int cppi41_queue_init(struct cppi41_queue_obj *queue_obj, u8 q_mgr, u16 q_num);
++
++/**
++ * cppi41_queue_push - push to CPPI 4.1 queue
++ * @queue_obj: pointer to the queue object
++ * @desc_addr: descriptor physical address
++ * @desc_size: descriptor size
++ * @pkt_size: packet size
++ *
++ * This function is called to queue a descriptor onto a queue.
++ * NOTE: pSize parameter is optional. Pass 0 in case not required.
++ */
++void cppi41_queue_push(const struct cppi41_queue_obj *queue_obj, u32 desc_addr,
++ u32 desc_size, u32 pkt_size);
++
++/**
++ * cppi41_queue_pop - pop from CPPI 4.1 queue
++ * @queue_obj: pointer to the queue object
++ *
++ * This function is called to pop a single descriptor from the queue.
++ *
++ * Returns a packet descriptor's physical address.
++ */
++unsigned long cppi41_queue_pop(const struct cppi41_queue_obj *queue_obj);
++
++/*
++ * CPPI 4.1 Miscellaneous APIs
++ */
++
++/**
++ * cppi41_get_teardown_info - CPPI 4.1 teardown completion processing function
++ *
++ * @addr: physical address of teardown descriptor
++ * @info: pointer to the teardown information word
++ *
++ * This function is called to complete the teardown processing on a channel
++ * and provides teardown information from the teardown descriptor passed to it.
++ * It also recycles the teardown descriptor back to the teardown descriptor
++ * queue.
++ *
++ * Returns 0 if valid descriptor, -EINVAL otherwise.
++ */
++int cppi41_get_teardown_info(unsigned long addr, u32 *info);
++
++/**
++ * cppi41_dma_sched_tbl_init
++ */
++int cppi41_dma_sched_tbl_init(u8 dma_num, u8 q_mgr,
++ u32 *sched_tbl, u8 tbl_size);
++
++/**
++ * cppi41_schedtbl_add_dma_ch - add a dma channel to schedular table
++ *
++ * @dmanum Number of DMa block
++ * @qmgr Queue Manager Number
++ * @dma_ch dma channel number
++ * @is_tx transmit (is_tx=1) or recieve(is_tx=0)
++ *
++ * returns number of channel in schedular table
++ */
++int cppi41_schedtbl_add_dma_ch(u8 dmanum, u8 qmgr, u8 dma_ch, u8 is_tx);
++
++/**
++ * cppi41_schedtbl_remove_dma_ch - remove a dma channel from schedular table
++ *
++ * @dmanum Number of DMa block
++ * @qmgr Queue Manager Number
++ * @dma_ch dma channel number
++ * @is_tx transmit (is_tx=1) or recieve(is_tx=0)
++ *
++ * returns number of channel in schedular table
++ */
++int cppi41_schedtbl_remove_dma_ch(u8 dmanum, u8 qmgr, u8 dma_ch, u8 is_tx);
++
++/**
++ * cppi41_init_teardown_queue
++ */
++void cppi41_init_teardown_queue(int dma_num);
++
++/**
++ * cppi41_free_teardown_queue
++ */
++void cppi41_free_teardown_queue(int dma_num);
++
++/**
++ * cppi41_save_context
++ */
++void cppi41_save_context(u8 dma_num);
++
++/**
++ * cppi41_restore_context
++ */
++void cppi41_restore_context(u8 dma_num, u32 *sched_tbl);
+diff --git a/drivers/usb/musb/cppi41_dma.c b/drivers/usb/musb/cppi41_dma.c
+new file mode 100644
+index 0000000..69a952d
+--- /dev/null
++++ b/drivers/usb/musb/cppi41_dma.c
+@@ -0,0 +1,1596 @@
++/*
++ * Copyright (C) 2005-2006 by Texas Instruments
++ * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * This file implements a DMA interface using TI's CPPI 4.1 DMA.
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ */
++
++#include <linux/errno.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++
++#include "cppi41.h"
++
++#include "musb_core.h"
++#include "musb_dma.h"
++#include "cppi41_dma.h"
++
++/* Configuration */
++#define USB_CPPI41_DESC_SIZE_SHIFT 6
++#define USB_CPPI41_DESC_ALIGN (1 << USB_CPPI41_DESC_SIZE_SHIFT)
++#define USB_CPPI41_CH_NUM_PD 128 /* 4K bulk data at full speed */
++#define USB_CPPI41_MAX_PD (USB_CPPI41_CH_NUM_PD * (USB_CPPI41_NUM_CH+1))
++
++#undef DEBUG_CPPI_TD
++#undef USBDRV_DEBUG
++
++#ifdef USBDRV_DEBUG
++#define dprintk(x, ...) printk(x, ## __VA_ARGS__)
++#else
++#define dprintk(x, ...)
++#endif
++
++/*
++ * Data structure definitions
++ */
++
++/*
++ * USB Packet Descriptor
++ */
++struct usb_pkt_desc;
++
++struct usb_pkt_desc {
++ /* Hardware descriptor fields from this point */
++ struct cppi41_host_pkt_desc hw_desc; /* 40 bytes */
++ /* Protocol specific data */
++ dma_addr_t dma_addr; /* offs:44 byte */
++ struct usb_pkt_desc *next_pd_ptr; /* offs:48 byte*/
++ u8 ch_num;
++ u8 ep_num;
++ u8 eop;
++ u8 res1; /* offs:52 */
++ u8 res2[12]; /* offs:64 */
++};
++
++/**
++ * struct cppi41_channel - DMA Channel Control Structure
++ *
++ * Using the same for Tx/Rx.
++ */
++struct cppi41_channel {
++ struct dma_channel channel;
++
++ struct cppi41_dma_ch_obj dma_ch_obj; /* DMA channel object */
++ struct cppi41_queue src_queue; /* Tx queue or Rx free descriptor/ */
++ /* buffer queue */
++ struct cppi41_queue_obj queue_obj; /* Tx queue object or Rx free */
++ /* descriptor/buffer queue object */
++
++ u32 tag_info; /* Tx PD Tag Information field */
++
++ /* Which direction of which endpoint? */
++ struct musb_hw_ep *end_pt;
++ u8 transmit;
++ u8 ch_num; /* Channel number of Tx/Rx 0..3 */
++
++ /* DMA mode: "transparent", RNDIS, CDC, or Generic RNDIS */
++ u8 dma_mode;
++ u8 autoreq;
++
++ /* Book keeping for the current transfer request */
++ dma_addr_t start_addr;
++ u32 length;
++ u32 curr_offset;
++ u16 pkt_size;
++ u8 transfer_mode;
++ u8 zlp_queued;
++ u8 inf_mode;
++ u8 tx_complete;
++ u8 hb_mult;
++};
++
++/**
++ * struct cppi41 - CPPI 4.1 DMA Controller Object
++ *
++ * Encapsulates all book keeping and data structures pertaining to
++ * the CPPI 1.4 DMA controller.
++ */
++struct cppi41 {
++ struct dma_controller controller;
++ struct musb *musb;
++
++ struct cppi41_channel tx_cppi_ch[USB_CPPI41_NUM_CH];
++ struct cppi41_channel rx_cppi_ch[USB_CPPI41_NUM_CH];
++ struct work_struct txdma_work;
++
++ struct usb_pkt_desc *pd_pool_head; /* Free PD pool head */
++ dma_addr_t pd_mem_phys; /* PD memory physical address */
++ void *pd_mem; /* PD memory pointer */
++ u8 pd_mem_rgn; /* PD memory region number */
++
++ u16 teardownQNum; /* Teardown completion queue number */
++ struct cppi41_queue_obj queue_obj; /* Teardown completion queue */
++ /* object */
++ u32 pkt_info; /* Tx PD Packet Information field */
++ struct usb_cppi41_info *cppi_info; /* cppi channel information */
++ u8 en_bd_intr; /* enable bd interrupt */
++ u32 automode_reg_offs; /* USB_AUTOREQ_REG offset */
++ u32 teardown_reg_offs; /* USB_TEARDOWN_REG offset */
++ u32 bd_size;
++ u8 inf_mode;
++};
++
++struct usb_cppi41_info usb_cppi41_info[2];
++EXPORT_SYMBOL(usb_cppi41_info);
++
++#ifdef DEBUG_CPPI_TD
++static void print_pd_list(struct usb_pkt_desc *pd_pool_head)
++{
++ struct usb_pkt_desc *curr_pd = pd_pool_head;
++ int cnt = 0;
++
++ while (curr_pd != NULL) {
++ if (cnt % 8 == 0)
++ dprintk("\n%02x ", cnt);
++ cnt++;
++ dprintk(" %p", curr_pd);
++ curr_pd = curr_pd->next_pd_ptr;
++ }
++ dprintk("\n");
++}
++#endif
++
++static struct usb_pkt_desc *usb_get_free_pd(struct cppi41 *cppi)
++{
++ struct usb_pkt_desc *free_pd = cppi->pd_pool_head;
++
++ if (free_pd != NULL) {
++ cppi->pd_pool_head = free_pd->next_pd_ptr;
++ free_pd->next_pd_ptr = NULL;
++ }
++ return free_pd;
++}
++
++static void usb_put_free_pd(struct cppi41 *cppi, struct usb_pkt_desc *free_pd)
++{
++ free_pd->next_pd_ptr = cppi->pd_pool_head;
++ cppi->pd_pool_head = free_pd;
++}
++
++/**
++ * cppi41_controller_start - start DMA controller
++ * @controller: the controller
++ *
++ * This function initializes the CPPI 4.1 Tx/Rx channels.
++ */
++static int __devinit cppi41_controller_start(struct dma_controller *controller)
++{
++ struct cppi41 *cppi;
++ struct cppi41_channel *cppi_ch;
++ void __iomem *reg_base;
++ struct usb_pkt_desc *curr_pd;
++ unsigned long pd_addr;
++ int i;
++ struct usb_cppi41_info *cppi_info;
++ struct musb *musb;
++
++ cppi = container_of(controller, struct cppi41, controller);
++ cppi_info = cppi->cppi_info;
++ musb = cppi->musb;
++
++ if (cpu_is_ti816x() || cpu_is_am33xx()) {
++ cppi->automode_reg_offs = TI81XX_USB_AUTOREQ_REG;
++ cppi->teardown_reg_offs = TI81XX_USB_TEARDOWN_REG;
++ } else {
++ cppi->automode_reg_offs = USB_AUTOREQ_REG;
++ cppi->teardown_reg_offs = USB_TEARDOWN_REG;
++ }
++
++ /*
++ * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
++ * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
++ * Similarly, the descriptor size should also be a multiple of 32.
++ */
++
++ /*
++ * Allocate free packet descriptor pool for all Tx/Rx endpoints --
++ * dma_alloc_coherent() will return a page aligned address, so our
++ * alignment requirement will be honored.
++ */
++ cppi->bd_size = USB_CPPI41_MAX_PD * sizeof(struct usb_pkt_desc);
++ cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
++ cppi->bd_size,
++ &cppi->pd_mem_phys,
++ GFP_KERNEL | GFP_DMA);
++ if (cppi->pd_mem == NULL) {
++ dev_dbg(musb->controller, "ERROR: packet descriptor memory allocation failed\n");
++ return 0;
++ }
++
++ if (cppi41_mem_rgn_alloc(cppi_info->q_mgr, cppi->pd_mem_phys,
++ USB_CPPI41_DESC_SIZE_SHIFT,
++ get_count_order(USB_CPPI41_MAX_PD),
++ &cppi->pd_mem_rgn)) {
++ dev_dbg(musb->controller, "ERROR: queue manager memory region allocation "
++ "failed\n");
++ goto free_pds;
++ }
++
++ /* Allocate the teardown completion queue */
++ if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
++ 0, &cppi->teardownQNum)) {
++ dev_dbg(musb->controller, "ERROR: teardown completion queue allocation failed\n");
++ goto free_mem_rgn;
++ }
++ dev_dbg(musb->controller, "Allocated teardown completion queue %d in queue manager 0\n",
++ cppi->teardownQNum);
++
++ if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
++ dev_dbg(musb->controller, "ERROR: teardown completion queue initialization "
++ "failed\n");
++ goto free_queue;
++ }
++
++ /*
++ * "Slice" PDs one-by-one from the big chunk and
++ * add them to the free pool.
++ */
++ curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
++ pd_addr = cppi->pd_mem_phys;
++ for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
++ curr_pd->dma_addr = pd_addr;
++
++ usb_put_free_pd(cppi, curr_pd);
++ curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
++ USB_CPPI41_DESC_ALIGN);
++ pd_addr += USB_CPPI41_DESC_ALIGN;
++ }
++
++ /* Configure the Tx channels */
++ for (i = 0, cppi_ch = cppi->tx_cppi_ch;
++ i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
++ const struct cppi41_tx_ch *tx_info;
++
++ memset(cppi_ch, 0, sizeof(struct cppi41_channel));
++ cppi_ch->transmit = 1;
++ cppi_ch->ch_num = i;
++ cppi_ch->channel.private_data = cppi;
++
++ /*
++ * Extract the CPPI 4.1 DMA Tx channel configuration and
++ * construct/store the Tx PD tag info field for later use...
++ */
++ tx_info = cppi41_dma_block[cppi_info->dma_block].tx_ch_info
++ + cppi_info->ep_dma_ch[i];
++ cppi_ch->src_queue = tx_info->tx_queue[0];
++ cppi_ch->tag_info = (tx_info->port_num <<
++ CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
++ (tx_info->ch_num <<
++ CPPI41_SRC_TAG_CH_NUM_SHIFT) |
++ (tx_info->sub_ch_num <<
++ CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
++ }
++
++ /* Configure the Rx channels */
++ for (i = 0, cppi_ch = cppi->rx_cppi_ch;
++ i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
++ memset(cppi_ch, 0, sizeof(struct cppi41_channel));
++ cppi_ch->ch_num = i;
++ cppi_ch->channel.private_data = cppi;
++ }
++
++ /* Construct/store Tx PD packet info field for later use */
++ cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
++ (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT);
++
++ /* Do necessary configuartion in hardware to get started */
++ reg_base = cppi->musb->ctrl_base;
++
++ /* Disable auto request mode */
++ musb_writel(reg_base, cppi->automode_reg_offs, 0);
++
++ /* Disable the CDC/RNDIS modes */
++ musb_writel(reg_base, USB_TX_MODE_REG, 0);
++ musb_writel(reg_base, USB_RX_MODE_REG, 0);
++
++ return 1;
++
++ free_queue:
++ if (cppi41_queue_free(0, cppi->teardownQNum))
++ dev_dbg(musb->controller, "ERROR: failed to free teardown completion queue\n");
++
++ free_mem_rgn:
++ if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
++ dev_dbg(musb->controller, "ERROR: failed to free queue manager memory region\n");
++
++ free_pds:
++ dma_free_coherent(cppi->musb->controller,
++ cppi->bd_size,
++ cppi->pd_mem, cppi->pd_mem_phys);
++
++ return 0;
++}
++
++/**
++ * cppi41_controller_stop - stop DMA controller
++ * @controller: the controller
++ *
++ * De-initialize the DMA Controller as necessary.
++ */
++static int cppi41_controller_stop(struct dma_controller *controller)
++{
++ struct cppi41 *cppi;
++ void __iomem *reg_base;
++ struct usb_cppi41_info *cppi_info;
++ struct musb *musb;
++
++ cppi = container_of(controller, struct cppi41, controller);
++ cppi_info = cppi->cppi_info;
++ musb = cppi->musb;
++
++ /* Free the teardown completion queue */
++ if (cppi41_queue_free(cppi_info->q_mgr, cppi->teardownQNum))
++ dev_dbg(musb->controller, "ERROR: failed to free teardown completion queue\n");
++
++ /*
++ * Free the packet descriptor region allocated
++ * for all Tx/Rx channels.
++ */
++ if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
++ dev_dbg(musb->controller, "ERROR: failed to free queue manager memory region\n");
++
++ dma_free_coherent(cppi->musb->controller, cppi->bd_size,
++ cppi->pd_mem, cppi->pd_mem_phys);
++
++ cppi->pd_mem = 0;
++ cppi->pd_mem_phys = 0;
++ cppi->pd_pool_head = 0;
++ cppi->bd_size = 0;
++
++ reg_base = cppi->musb->ctrl_base;
++
++ /* Disable auto request mode */
++ musb_writel(reg_base, cppi->automode_reg_offs, 0);
++
++ /* Disable the CDC/RNDIS modes */
++ musb_writel(reg_base, USB_TX_MODE_REG, 0);
++ musb_writel(reg_base, USB_RX_MODE_REG, 0);
++
++ return 1;
++}
++
++/**
++ * cppi41_channel_alloc - allocate a CPPI channel for DMA.
++ * @controller: the controller
++ * @ep: the endpoint
++ * @is_tx: 1 for Tx channel, 0 for Rx channel
++ *
++ * With CPPI, channels are bound to each transfer direction of a non-control
++ * endpoint, so allocating (and deallocating) is mostly a way to notice bad
++ * housekeeping on the software side. We assume the IRQs are always active.
++ */
++static struct dma_channel *cppi41_channel_alloc(struct dma_controller
++ *controller,
++ struct musb_hw_ep *ep, u8 is_tx)
++{
++ struct cppi41 *cppi;
++ struct cppi41_channel *cppi_ch;
++ u32 ch_num, ep_num = ep->epnum;
++ struct usb_cppi41_info *cppi_info;
++ struct musb *musb;
++
++ cppi = container_of(controller, struct cppi41, controller);
++ cppi_info = cppi->cppi_info;
++ musb = cppi->musb;
++
++ /* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
++ ch_num = ep_num - 1;
++
++ if (ep_num > USB_CPPI41_NUM_CH) {
++ dev_dbg(musb->controller, "No %cx DMA channel for EP%d\n",
++ is_tx ? 'T' : 'R', ep_num);
++ return NULL;
++ }
++
++ cppi_ch = (is_tx ? cppi->tx_cppi_ch : cppi->rx_cppi_ch) + ch_num;
++
++ /* As of now, just return the corresponding CPPI 4.1 channel handle */
++ if (is_tx) {
++ /* Initialize the CPPI 4.1 Tx DMA channel */
++ if (cppi41_tx_ch_init(&cppi_ch->dma_ch_obj,
++ cppi_info->dma_block,
++ cppi_info->ep_dma_ch[ch_num])) {
++ dev_dbg(musb->controller, "ERROR: cppi41_tx_ch_init failed for "
++ "channel %d\n", ch_num);
++ return NULL;
++ }
++ /*
++ * Teardown descriptors will be pushed to the dedicated
++ * completion queue.
++ */
++ cppi41_dma_ch_default_queue(&cppi_ch->dma_ch_obj,
++ 0, cppi->teardownQNum);
++ } else {
++ struct cppi41_rx_ch_cfg rx_cfg;
++ u8 q_mgr = cppi_info->q_mgr;
++ int i;
++
++ /* Initialize the CPPI 4.1 Rx DMA channel */
++ if (cppi41_rx_ch_init(&cppi_ch->dma_ch_obj,
++ cppi_info->dma_block,
++ cppi_info->ep_dma_ch[ch_num])) {
++ dev_dbg(musb->controller, "ERROR: cppi41_rx_ch_init failed\n");
++ return NULL;
++ }
++
++ if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE |
++ CPPI41_UNASSIGNED_QUEUE,
++ q_mgr, &cppi_ch->src_queue.q_num)) {
++ dev_dbg(musb->controller, "ERROR: cppi41_queue_alloc failed for "
++ "free descriptor/buffer queue\n");
++ return NULL;
++ }
++ dev_dbg(musb->controller, "Allocated free descriptor/buffer queue %d in "
++ "queue manager %d\n", cppi_ch->src_queue.q_num, q_mgr);
++
++ rx_cfg.default_desc_type = cppi41_rx_host_desc;
++ rx_cfg.sop_offset = 0;
++ rx_cfg.retry_starved = 1;
++ rx_cfg.rx_max_buf_cnt = 0;
++ rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
++ rx_cfg.rx_queue.q_num = cppi_info->rx_comp_q[ch_num];
++ for (i = 0; i < 4; i++)
++ rx_cfg.cfg.host_pkt.fdb_queue[i] = cppi_ch->src_queue;
++ cppi41_rx_ch_configure(&cppi_ch->dma_ch_obj, &rx_cfg);
++ }
++
++ /* Initialize the CPPI 4.1 DMA source queue */
++ if (cppi41_queue_init(&cppi_ch->queue_obj, cppi_ch->src_queue.q_mgr,
++ cppi_ch->src_queue.q_num)) {
++ dev_dbg(musb->controller, "ERROR: cppi41_queue_init failed for %s queue",
++ is_tx ? "Tx" : "Rx free descriptor/buffer");
++ if (is_tx == 0 &&
++ cppi41_queue_free(cppi_ch->src_queue.q_mgr,
++ cppi_ch->src_queue.q_num))
++ dev_dbg(musb->controller, "ERROR: failed to free Rx descriptor/buffer "
++ "queue\n");
++ return NULL;
++ }
++
++ /* Enable the DMA channel */
++ cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
++
++ if (cppi_ch->end_pt)
++ dev_dbg(musb->controller, "Re-allocating DMA %cx channel %d (%p)\n",
++ is_tx ? 'T' : 'R', ch_num, cppi_ch);
++
++ cppi_ch->end_pt = ep;
++ cppi_ch->ch_num = ch_num;
++ cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
++ cppi_ch->channel.max_len = is_tx ?
++ CPPI41_TXDMA_MAXLEN : CPPI41_RXDMA_MAXLEN;
++
++ dev_dbg(musb->controller, "Allocated DMA %cx channel %d for EP%d\n", is_tx ? 'T' : 'R',
++ ch_num, ep_num);
++
++ return &cppi_ch->channel;
++}
++
++/**
++ * cppi41_channel_release - release a CPPI DMA channel
++ * @channel: the channel
++ */
++static void cppi41_channel_release(struct dma_channel *channel)
++{
++ struct cppi41_channel *cppi_ch;
++
++ /* REVISIT: for paranoia, check state and abort if needed... */
++ cppi_ch = container_of(channel, struct cppi41_channel, channel);
++
++ if (cppi_ch->end_pt == NULL)
++ printk(KERN_INFO "Releasing idle DMA channel %p\n", cppi_ch);
++
++ /* But for now, not its IRQ */
++ cppi_ch->end_pt = NULL;
++ channel->status = MUSB_DMA_STATUS_UNKNOWN;
++
++ cppi41_dma_ch_disable(&cppi_ch->dma_ch_obj);
++
++ /* De-allocate Rx free descriptior/buffer queue */
++ if (cppi_ch->transmit == 0 &&
++ cppi41_queue_free(cppi_ch->src_queue.q_mgr,
++ cppi_ch->src_queue.q_num))
++ printk(KERN_ERR "ERROR: failed to free Rx descriptor/buffer queue\n");
++}
++
++static void cppi41_mode_update(struct cppi41_channel *cppi_ch, u8 mode)
++{
++ if (mode != cppi_ch->dma_mode) {
++ struct cppi41 *cppi = cppi_ch->channel.private_data;
++ void *__iomem reg_base = cppi->musb->ctrl_base;
++ u32 reg_val;
++ u8 ep_num = cppi_ch->ch_num + 1;
++
++ if (cppi_ch->transmit) {
++ reg_val = musb_readl(reg_base, USB_TX_MODE_REG);
++ reg_val &= ~USB_TX_MODE_MASK(ep_num);
++ reg_val |= mode << USB_TX_MODE_SHIFT(ep_num);
++ musb_writel(reg_base, USB_TX_MODE_REG, reg_val);
++ } else {
++ reg_val = musb_readl(reg_base, USB_RX_MODE_REG);
++ reg_val &= ~USB_RX_MODE_MASK(ep_num);
++ reg_val |= mode << USB_RX_MODE_SHIFT(ep_num);
++ musb_writel(reg_base, USB_RX_MODE_REG, reg_val);
++ }
++ cppi_ch->dma_mode = mode;
++ }
++}
++
++/*
++ * CPPI 4.1 Tx:
++ * ============
++ * Tx is a lot more reasonable than Rx: RNDIS mode seems to behave well except
++ * how it handles the exactly-N-packets case. It appears that there's a hiccup
++ * in that case (maybe the DMA completes before a ZLP gets written?) boiling
++ * down to not being able to rely on the XFER DMA writing any terminating zero
++ * length packet before the next transfer is started...
++ *
++ * The generic RNDIS mode does not have this misfeature, so we prefer using it
++ * instead. We then send the terminating ZLP *explictly* using DMA instead of
++ * doing it by PIO after an IRQ.
++ *
++ */
++
++/**
++ * cppi41_next_tx_segment - DMA write for the next chunk of a buffer
++ * @tx_ch: Tx channel
++ *
++ * Context: controller IRQ-locked
++ */
++static unsigned cppi41_next_tx_segment(struct cppi41_channel *tx_ch)
++{
++ struct cppi41 *cppi = tx_ch->channel.private_data;
++ struct musb *musb = cppi->musb;
++ struct usb_pkt_desc *curr_pd;
++ u32 length = tx_ch->length - tx_ch->curr_offset;
++ u32 pkt_size = tx_ch->pkt_size;
++ unsigned num_pds, n;
++ struct usb_cppi41_info *cppi_info = cppi->cppi_info;
++ u16 q_mgr = cppi_info->q_mgr;
++ u16 tx_comp_q = cppi_info->tx_comp_q[tx_ch->ch_num];
++ u8 en_bd_intr = cppi->en_bd_intr;
++
++ /*
++ * Tx can use the generic RNDIS mode where we can probably fit this
++ * transfer in one PD and one IRQ. The only time we would NOT want
++ * to use it is when the hardware constraints prevent it...
++ */
++ if ((pkt_size & 0x3f) == 0) {
++ num_pds = length ? 1 : 0;
++ cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE);
++ } else {
++ num_pds = (length + pkt_size - 1) / pkt_size;
++ cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE);
++ }
++
++ pkt_size = length;
++ /*
++ * If length of transmit buffer is 0 or a multiple of the endpoint size,
++ * then send the zero length packet.
++ */
++ if (!length || (tx_ch->transfer_mode && length % pkt_size == 0))
++ num_pds++;
++
++ dev_dbg(musb->controller, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n",
++ tx_ch->ch_num, tx_ch->dma_mode ? "accelerated" : "transparent",
++ pkt_size, num_pds, tx_ch->start_addr + tx_ch->curr_offset, length);
++
++ for (n = 0; n < num_pds; n++) {
++ struct cppi41_host_pkt_desc *hw_desc;
++
++ /* Get Tx host packet descriptor from the free pool */
++ curr_pd = usb_get_free_pd(cppi);
++ if (curr_pd == NULL) {
++ dev_dbg(musb->controller, "No Tx PDs\n");
++ break;
++ }
++
++ if (length < pkt_size)
++ pkt_size = length;
++
++ hw_desc = &curr_pd->hw_desc;
++ hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST <<
++ CPPI41_DESC_TYPE_SHIFT) | pkt_size;
++ hw_desc->tag_info = tx_ch->tag_info;
++ hw_desc->pkt_info = cppi->pkt_info;
++ hw_desc->pkt_info |= ((q_mgr << CPPI41_RETURN_QMGR_SHIFT) |
++ (tx_comp_q << CPPI41_RETURN_QNUM_SHIFT));
++
++ hw_desc->buf_ptr = tx_ch->start_addr + tx_ch->curr_offset;
++ hw_desc->buf_len = pkt_size;
++ hw_desc->next_desc_ptr = 0;
++ hw_desc->orig_buf_len = pkt_size;
++
++ curr_pd->ch_num = tx_ch->ch_num;
++ curr_pd->ep_num = tx_ch->end_pt->epnum;
++
++ tx_ch->curr_offset += pkt_size;
++ length -= pkt_size;
++
++ if (pkt_size == 0)
++ tx_ch->zlp_queued = 1;
++
++ if (en_bd_intr)
++ hw_desc->orig_buf_len |= CPPI41_PKT_INTR_FLAG;
++
++ dev_dbg(musb->controller, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd,
++ hw_desc->buf_ptr, hw_desc->buf_len, hw_desc->pkt_info);
++
++ cppi41_queue_push(&tx_ch->queue_obj, curr_pd->dma_addr,
++ USB_CPPI41_DESC_ALIGN, pkt_size);
++ }
++
++ return n;
++}
++
++static void cppi41_autoreq_update(struct cppi41_channel *rx_ch, u8 autoreq)
++{
++ struct cppi41 *cppi = rx_ch->channel.private_data;
++
++ if (is_host_active(cppi->musb) &&
++ autoreq != rx_ch->autoreq) {
++ void *__iomem reg_base = cppi->musb->ctrl_base;
++ u32 reg_val = musb_readl(reg_base, cppi->automode_reg_offs);
++ u8 ep_num = rx_ch->ch_num + 1;
++
++ reg_val &= ~USB_RX_AUTOREQ_MASK(ep_num);
++ reg_val |= autoreq << USB_RX_AUTOREQ_SHIFT(ep_num);
++
++ musb_writel(reg_base, cppi->automode_reg_offs, reg_val);
++ rx_ch->autoreq = autoreq;
++ }
++}
++
++static void cppi41_set_ep_size(struct cppi41_channel *rx_ch, u32 pkt_size)
++{
++ struct cppi41 *cppi = rx_ch->channel.private_data;
++ void *__iomem reg_base = cppi->musb->ctrl_base;
++ u8 ep_num = rx_ch->ch_num + 1;
++ u32 res = pkt_size % 64;
++
++ /* epsize register must be multiple of 64 */
++ pkt_size += res ? (64 - res) : res;
++
++ musb_writel(reg_base, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num), pkt_size);
++}
++
++/*
++ * CPPI 4.1 Rx:
++ * ============
++ * Consider a 1KB bulk Rx buffer in two scenarios: (a) it's fed two 300 byte
++ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
++ * (Full speed transfers have similar scenarios.)
++ *
++ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
++ * and the next packet goes into a buffer that's queued later; while (b) fills
++ * the buffer with 1024 bytes. How to do that with accelerated DMA modes?
++ *
++ * Rx queues in RNDIS mode (one single BD) handle (a) correctly but (b) loses
++ * BADLY because nothing (!) happens when that second packet fills the buffer,
++ * much less when a third one arrives -- which makes it not a "true" RNDIS mode.
++ * In the RNDIS protocol short-packet termination is optional, and it's fine if
++ * the peripherals (not hosts!) pad the messages out to end of buffer. Standard
++ * PCI host controller DMA descriptors implement that mode by default... which
++ * is no accident.
++ *
++ * Generic RNDIS mode is the only way to reliably make both cases work. This
++ * mode is identical to the "normal" RNDIS mode except for the case where the
++ * last packet of the segment matches the max USB packet size -- in this case,
++ * the packet will be closed when a value (0x10000 max) in the Generic RNDIS
++ * EP Size register is reached. This mode will work for the network drivers
++ * (CDC/RNDIS) as well as for the mass storage drivers where there is no short
++ * packet.
++ *
++ * BUT we can only use non-transparent modes when USB packet size is a multiple
++ * of 64 bytes. Let's see what happens when this is not the case...
++ *
++ * Rx queues (2 BDs with 512 bytes each) have converse problems to RNDIS mode:
++ * (b) is handled right but (a) loses badly. DMA doesn't stop after receiving
++ * a short packet and processes both of those PDs; so both packets are loaded
++ * into the buffer (with 212 byte gap between them), and the next buffer queued
++ * will NOT get its 300 bytes of data. Even in the case when there should be
++ * no short packets (URB_SHORT_NOT_OK is set), queueing several packets in the
++ * host mode doesn't win us anything since we have to manually "prod" the Rx
++ * process after each packet is received by setting ReqPkt bit in endpoint's
++ * RXCSR; in the peripheral mode without short packets, queueing could be used
++ * BUT we'll have to *teardown* the channel if a short packet still arrives in
++ * the peripheral mode, and to "collect" the left-over packet descriptors from
++ * the free descriptor/buffer queue in both cases...
++ *
++ * One BD at a time is the only way to make make both cases work reliably, with
++ * software handling both cases correctly, at the significant penalty of needing
++ * an IRQ per packet. (The lack of I/O overlap can be slightly ameliorated by
++ * enabling double buffering.)
++ *
++ * There seems to be no way to identify for sure the cases where the CDC mode
++ * is appropriate...
++ *
++ */
++
++/**
++ * cppi41_next_rx_segment - DMA read for the next chunk of a buffer
++ * @rx_ch: Rx channel
++ *
++ * Context: controller IRQ-locked
++ *
++ * NOTE: In the transparent mode, we have to queue one packet at a time since:
++ * - we must avoid starting reception of another packet after receiving
++ * a short packet;
++ * - in host mode we have to set ReqPkt bit in the endpoint's RXCSR after
++ * receiving each packet but the last one... ugly!
++ */
++static unsigned cppi41_next_rx_segment(struct cppi41_channel *rx_ch)
++{
++ struct cppi41 *cppi = rx_ch->channel.private_data;
++ struct musb *musb = cppi->musb;
++ struct usb_pkt_desc *curr_pd;
++ struct cppi41_host_pkt_desc *hw_desc;
++ u32 length = rx_ch->length - rx_ch->curr_offset;
++ u32 pkt_size = rx_ch->pkt_size;
++ u32 max_rx_transfer_size = 64 * 1024;
++ u32 i, n_bd , pkt_len;
++ struct usb_gadget_driver *gadget_driver;
++ u8 en_bd_intr = cppi->en_bd_intr, mode;
++
++ if (is_peripheral_active(cppi->musb)) {
++ /* TODO: temporary fix for CDC/RNDIS which needs to be in
++ * GENERIC_RNDIS mode. Without this RNDIS gadget taking
++ * more then 2K ms for a 64 byte pings.
++ */
++ gadget_driver = cppi->musb->gadget_driver;
++
++ pkt_len = rx_ch->pkt_size;
++ mode = USB_GENERIC_RNDIS_MODE;
++ if (!strcmp(gadget_driver->driver.name, "g_file_storage") ||
++ !strcmp(gadget_driver->driver.name, "g_mass_storage")) {
++ if (cppi->inf_mode && length > pkt_len) {
++ pkt_len = 0;
++ length = length - rx_ch->pkt_size;
++ cppi41_rx_ch_set_maxbufcnt(&rx_ch->dma_ch_obj,
++ DMA_CH_RX_MAX_BUF_CNT_1);
++ rx_ch->inf_mode = 1;
++ } else {
++ max_rx_transfer_size = rx_ch->pkt_size;
++ mode = USB_TRANSPARENT_MODE;
++ }
++ } else
++ if (rx_ch->length < max_rx_transfer_size)
++ pkt_len = rx_ch->length;
++
++ if (mode != USB_TRANSPARENT_MODE)
++ cppi41_set_ep_size(rx_ch, pkt_len);
++ cppi41_mode_update(rx_ch, mode);
++ } else {
++ /*
++ * Rx can use the generic RNDIS mode where we can
++ * probably fit this transfer in one PD and one IRQ
++ * (or two with a short packet).
++ */
++ if ((pkt_size & 0x3f) == 0) {
++ cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
++ cppi41_autoreq_update(rx_ch, USB_AUTOREQ_ALL_BUT_EOP);
++
++ pkt_size = (length > 0x10000) ? 0x10000 : length;
++ cppi41_set_ep_size(rx_ch, pkt_size);
++ } else {
++ cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
++ cppi41_autoreq_update(rx_ch, USB_NO_AUTOREQ);
++ max_rx_transfer_size = rx_ch->hb_mult * rx_ch->pkt_size;
++ }
++ }
++
++ dev_dbg(musb->controller, "RX DMA%u, %s, maxpkt %u, addr %#x, rec'd %u/%u\n",
++ rx_ch->ch_num, rx_ch->dma_mode ? "accelerated" : "transparent",
++ pkt_size, rx_ch->start_addr + rx_ch->curr_offset,
++ rx_ch->curr_offset, rx_ch->length);
++
++ /* calculate number of bd required */
++ n_bd = (length + max_rx_transfer_size - 1)/max_rx_transfer_size;
++
++ for (i = 0; i < n_bd ; ++i) {
++ /* Get Rx packet descriptor from the free pool */
++ curr_pd = usb_get_free_pd(cppi);
++ if (curr_pd == NULL) {
++ /* Shouldn't ever happen! */
++ dev_dbg(musb->controller, "No Rx PDs\n");
++ goto sched;
++ }
++
++ pkt_len =
++ (length > max_rx_transfer_size) ? max_rx_transfer_size : length;
++
++ hw_desc = &curr_pd->hw_desc;
++ hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST <<
++ CPPI41_DESC_TYPE_SHIFT);
++ hw_desc->orig_buf_ptr = rx_ch->start_addr + rx_ch->curr_offset;
++ hw_desc->orig_buf_len = pkt_len;
++
++ /* buf_len field of buffer descriptor updated by dma
++ * after reception of data is completed
++ */
++ hw_desc->buf_len = 0;
++
++ curr_pd->ch_num = rx_ch->ch_num;
++ curr_pd->ep_num = rx_ch->end_pt->epnum;
++
++ curr_pd->eop = (length -= pkt_len) ? 0 : 1;
++ rx_ch->curr_offset += pkt_len;
++
++ if (en_bd_intr)
++ hw_desc->orig_buf_len |= CPPI41_PKT_INTR_FLAG;
++ /*
++ * Push the free Rx packet descriptor
++ * to the free descriptor/buffer queue.
++ */
++ cppi41_queue_push(&rx_ch->queue_obj, curr_pd->dma_addr,
++ USB_CPPI41_DESC_ALIGN, 0);
++ }
++
++sched:
++ /*
++ * HCD arranged ReqPkt for the first packet.
++ * We arrange it for all but the last one.
++ */
++ if (is_host_active(cppi->musb) && rx_ch->channel.actual_len) {
++ void __iomem *epio = rx_ch->end_pt->regs;
++ u16 csr = musb_readw(epio, MUSB_RXCSR);
++
++ csr |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
++ musb_writew(epio, MUSB_RXCSR, csr);
++ }
++
++ /* enable schedular if not enabled */
++ if (is_peripheral_active(cppi->musb) && (n_bd > 0))
++ cppi41_schedtbl_add_dma_ch(0, 0, rx_ch->ch_num, 0);
++ return 1;
++}
++
++/**
++ * cppi41_channel_program - program channel for data transfer
++ * @channel: the channel
++ * @maxpacket: max packet size
++ * @mode: for Rx, 1 unless the USB protocol driver promised to treat
++ * all short reads as errors and kick in high level fault recovery;
++ * for Tx, 0 unless the protocol driver _requires_ short-packet
++ * termination mode
++ * @dma_addr: DMA address of buffer
++ * @length: length of buffer
++ *
++ * Context: controller IRQ-locked
++ */
++static int cppi41_channel_program(struct dma_channel *channel, u16 maxpacket,
++ u8 mode, dma_addr_t dma_addr, u32 length)
++{
++ struct cppi41_channel *cppi_ch;
++ unsigned queued;
++
++ cppi_ch = container_of(channel, struct cppi41_channel, channel);
++
++ switch (channel->status) {
++ case MUSB_DMA_STATUS_BUS_ABORT:
++ case MUSB_DMA_STATUS_CORE_ABORT:
++ /* Fault IRQ handler should have handled cleanup */
++ WARNING("%cx DMA%d not cleaned up after abort!\n",
++ cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
++ break;
++ case MUSB_DMA_STATUS_BUSY:
++ WARNING("Program active channel? %cx DMA%d\n",
++ cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
++ break;
++ case MUSB_DMA_STATUS_UNKNOWN:
++ WARNING("%cx DMA%d not allocated!\n",
++ cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
++ return 0;
++ case MUSB_DMA_STATUS_FREE:
++ break;
++ }
++
++ channel->status = MUSB_DMA_STATUS_BUSY;
++
++ /* Set the transfer parameters, then queue up the first segment */
++ cppi_ch->start_addr = dma_addr;
++ cppi_ch->curr_offset = 0;
++ cppi_ch->hb_mult = (maxpacket >> 11) & 0x03;
++ cppi_ch->pkt_size = maxpacket & 0x7ff;
++ cppi_ch->length = length;
++ cppi_ch->transfer_mode = mode;
++ cppi_ch->zlp_queued = 0;
++ cppi_ch->channel.actual_len = 0;
++
++ /* Tx or Rx channel? */
++ if (cppi_ch->transmit)
++ queued = cppi41_next_tx_segment(cppi_ch);
++ else
++ queued = cppi41_next_rx_segment(cppi_ch);
++
++ return queued > 0;
++}
++
++static struct usb_pkt_desc *usb_get_pd_ptr(struct cppi41 *cppi,
++ unsigned long pd_addr)
++{
++ if (pd_addr >= cppi->pd_mem_phys && pd_addr < cppi->pd_mem_phys +
++ USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN)
++ return pd_addr - cppi->pd_mem_phys + cppi->pd_mem;
++ else
++ return NULL;
++}
++
++static int usb_check_teardown(struct cppi41_channel *cppi_ch,
++ unsigned long pd_addr)
++{
++ u32 info;
++ struct cppi41 *cppi = cppi_ch->channel.private_data;
++ struct usb_cppi41_info *cppi_info = cppi->cppi_info;
++ struct musb *musb = cppi->musb;
++
++ if (cppi41_get_teardown_info(pd_addr, &info)) {
++ dev_dbg(musb->controller, "ERROR: not a teardown descriptor\n");
++ return 0;
++ }
++
++ if ((info & CPPI41_TEARDOWN_TX_RX_MASK) ==
++ (!cppi_ch->transmit << CPPI41_TEARDOWN_TX_RX_SHIFT) &&
++ (info & CPPI41_TEARDOWN_DMA_NUM_MASK) ==
++ (cppi_info->dma_block << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &&
++ (info & CPPI41_TEARDOWN_CHAN_NUM_MASK) ==
++ (cppi_info->ep_dma_ch[cppi_ch->ch_num] <<
++ CPPI41_TEARDOWN_CHAN_NUM_SHIFT))
++ return 1;
++
++ dev_dbg(musb->controller, "ERROR: unexpected values in teardown descriptor\n");
++ return 0;
++}
++
++/*
++ * We can't handle the channel teardown via the default completion queue in
++ * context of the controller IRQ-locked, so we use the dedicated teardown
++ * completion queue which we can just poll for a teardown descriptor, not
++ * interfering with the Tx completion queue processing.
++ */
++static void usb_tx_ch_teardown(struct cppi41_channel *tx_ch)
++{
++ struct cppi41 *cppi = tx_ch->channel.private_data;
++ struct musb *musb = cppi->musb;
++ void __iomem *reg_base = musb->ctrl_base;
++ u32 td_reg, timeout = 0xfffff;
++ u8 ep_num = tx_ch->ch_num + 1;
++ unsigned long pd_addr;
++ struct cppi41_queue_obj tx_queue_obj;
++ struct usb_cppi41_info *cppi_info;
++
++ /* Initiate teardown for Tx DMA channel */
++ cppi41_dma_ch_teardown(&tx_ch->dma_ch_obj);
++
++ /* Wait for a descriptor to be queued and pop it... */
++ do {
++ td_reg = musb_readl(reg_base, cppi->teardown_reg_offs);
++ td_reg |= USB_TX_TDOWN_MASK(ep_num);
++ musb_writel(reg_base, cppi->teardown_reg_offs, td_reg);
++
++ pd_addr = cppi41_queue_pop(&cppi->queue_obj);
++ } while (!pd_addr && timeout--);
++
++ if (pd_addr) {
++
++ dev_dbg(musb->controller, "Descriptor (%08lx) popped from teardown completion "
++ "queue\n", pd_addr);
++
++ if (usb_check_teardown(tx_ch, pd_addr)) {
++ dev_dbg(musb->controller, "Teardown Desc (%lx) rcvd\n", pd_addr);
++ } else
++ ERR("Invalid PD(%08lx)popped from TearDn completion"
++ "queue\n", pd_addr);
++ } else {
++ if (timeout <= 0)
++ ERR("Teardown Desc not rcvd\n");
++ }
++
++ /* read the tx completion queue and remove
++ * completion bd if any
++ */
++ cppi_info = cppi->cppi_info;
++ if (cppi41_queue_init(&tx_queue_obj, cppi_info->q_mgr,
++ cppi_info->tx_comp_q[tx_ch->ch_num])) {
++ ERR("ERROR: cppi41_queue_init failed for "
++ "Tx completion queue");
++ return;
++ }
++
++ while ((pd_addr = cppi41_queue_pop(&tx_queue_obj)) != 0) {
++ struct usb_pkt_desc *curr_pd;
++
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ ERR("Invalid PD popped from Tx completion queue\n");
++ continue;
++ }
++
++ dev_dbg(musb->controller, "Tx-PD(%p) popped from completion queue\n", curr_pd);
++ dev_dbg(musb->controller, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
++ curr_pd->ep_num, curr_pd->hw_desc.buf_len);
++
++ usb_put_free_pd(cppi, curr_pd);
++ }
++}
++
++/*
++ * For Rx DMA channels, the situation is more complex: there's only a single
++ * completion queue for all our needs, so we have to temporarily redirect the
++ * completed descriptors to our teardown completion queue, with a possibility
++ * of a completed packet landing there as well...
++ */
++static void usb_rx_ch_teardown(struct cppi41_channel *rx_ch)
++{
++ struct cppi41 *cppi = rx_ch->channel.private_data;
++ struct musb *musb = cppi->musb;
++ struct usb_cppi41_info *cppi_info = cppi->cppi_info;
++ u32 timeout = 0xfffff, pd_addr;
++ struct cppi41_queue_obj rx_queue_obj;
++
++ cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, 0, cppi->teardownQNum);
++
++ /* Initiate teardown for Rx DMA channel */
++ cppi41_dma_ch_teardown(&rx_ch->dma_ch_obj);
++
++ do {
++ struct usb_pkt_desc *curr_pd;
++ unsigned long pd_addr;
++
++ /* Wait for a descriptor to be queued and pop it... */
++ do {
++ pd_addr = cppi41_queue_pop(&cppi->queue_obj);
++ } while (!pd_addr && timeout--);
++
++ if (timeout <= 0 || !pd_addr) {
++ ERR("teardown Desc not found\n");
++ break;
++ }
++
++ dev_dbg(musb->controller, "Descriptor (%08lx) popped from teardown completion "
++ "queue\n", pd_addr);
++
++ /*
++ * We might have popped a completed Rx PD, so check if the
++ * physical address is within the PD region first. If it's
++ * not the case, it must be a teardown descriptor...
++ * */
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ if (usb_check_teardown(rx_ch, pd_addr))
++ break;
++ continue;
++ }
++
++ /* Paranoia: check if PD is from the right channel... */
++ if (curr_pd->ch_num != rx_ch->ch_num) {
++ ERR("Unexpected channel %d in Rx PD\n",
++ curr_pd->ch_num);
++ continue;
++ }
++
++ /* Extract the buffer length from the completed PD */
++ rx_ch->channel.actual_len += curr_pd->hw_desc.buf_len;
++
++ /*
++ * Return Rx PDs to the software list --
++ * this is protected by critical section.
++ */
++ usb_put_free_pd(cppi, curr_pd);
++ } while (0);
++
++ /* read the rx completion queue and remove
++ * completion bd if any
++ */
++ if (cppi41_queue_init(&rx_queue_obj, cppi_info->q_mgr,
++ cppi_info->rx_comp_q[rx_ch->ch_num])) {
++ ERR("ERROR: cppi41_queue_init failed for "
++ "Rx completion queue");
++ return;
++ }
++
++ while ((pd_addr = cppi41_queue_pop(&rx_queue_obj)) != 0) {
++ struct usb_pkt_desc *curr_pd;
++
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ ERR("Invalid PD popped from Rx completion queue\n");
++ continue;
++ }
++
++ dev_dbg(musb->controller, "Rx-PD(%p) popped from completion queue\n", curr_pd);
++ dev_dbg(musb->controller, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
++ curr_pd->ep_num, curr_pd->hw_desc.buf_len);
++
++ usb_put_free_pd(cppi, curr_pd);
++ }
++
++ /* Now restore the default Rx completion queue... */
++ cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, cppi_info->q_mgr,
++ cppi_info->rx_comp_q[rx_ch->ch_num]);
++}
++
++/*
++ * cppi41_channel_abort
++ *
++ * Context: controller IRQ-locked, endpoint selected.
++ */
++static int cppi41_channel_abort(struct dma_channel *channel)
++{
++ struct cppi41 *cppi;
++ struct cppi41_channel *cppi_ch;
++ struct musb *musb;
++ void __iomem *reg_base, *epio;
++ unsigned long pd_addr;
++ u32 csr, td_reg;
++ u8 ch_num, ep_num;
++
++ cppi_ch = container_of(channel, struct cppi41_channel, channel);
++ ch_num = cppi_ch->ch_num;
++ cppi = cppi_ch->channel.private_data;
++ musb = cppi->musb;
++
++ switch (channel->status) {
++ case MUSB_DMA_STATUS_BUS_ABORT:
++ case MUSB_DMA_STATUS_CORE_ABORT:
++ /* From Rx or Tx fault IRQ handler */
++ case MUSB_DMA_STATUS_BUSY:
++ /* The hardware needs shutting down... */
++ dprintk("%s: DMA busy, status = %x\n",
++ __func__, channel->status);
++ break;
++ case MUSB_DMA_STATUS_UNKNOWN:
++ dev_dbg(musb->controller, "%cx DMA%d not allocated\n",
++ cppi_ch->transmit ? 'T' : 'R', ch_num);
++ /* FALLTHROUGH */
++ case MUSB_DMA_STATUS_FREE:
++ return 0;
++ }
++
++ reg_base = musb->ctrl_base;
++ epio = cppi_ch->end_pt->regs;
++ ep_num = ch_num + 1;
++
++#ifdef DEBUG_CPPI_TD
++ printk("Before teardown:");
++ print_pd_list(cppi->pd_pool_head);
++#endif
++
++ if (cppi_ch->transmit) {
++ dprintk("Tx channel teardown, cppi_ch = %p\n", cppi_ch);
++
++ /* Tear down Tx DMA channel */
++ usb_tx_ch_teardown(cppi_ch);
++
++ /* Issue CPPI FIFO teardown for Tx channel */
++ td_reg = musb_readl(reg_base, cppi->teardown_reg_offs);
++ td_reg |= USB_TX_TDOWN_MASK(ep_num);
++ musb_writel(reg_base, cppi->teardown_reg_offs, td_reg);
++
++ /* Flush FIFO of the endpoint */
++ csr = musb_readw(epio, MUSB_TXCSR);
++ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_H_WZC_BITS;
++ musb_writew(epio, MUSB_TXCSR, csr);
++ musb_writew(epio, MUSB_TXCSR, csr);
++ cppi_ch->tx_complete = 0;
++ } else { /* Rx */
++ dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch);
++
++ /* Flush FIFO of the endpoint */
++ csr = musb_readw(epio, MUSB_RXCSR);
++ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
++ musb_writew(epio, MUSB_RXCSR, csr);
++ musb_writew(epio, MUSB_RXCSR, csr);
++
++ /* Issue CPPI FIFO teardown for Rx channel */
++ td_reg = musb_readl(reg_base, cppi->teardown_reg_offs);
++ td_reg |= USB_RX_TDOWN_MASK(ep_num);
++ musb_writel(reg_base, cppi->teardown_reg_offs, td_reg);
++
++ /* Tear down Rx DMA channel */
++ usb_rx_ch_teardown(cppi_ch);
++
++ /*
++ * NOTE: docs don't guarantee any of this works... we expect
++ * that if the USB core stops telling the CPPI core to pull
++ * more data from it, then it'll be safe to flush current Rx
++ * DMA state iff any pending FIFO transfer is done.
++ */
++
++ /* For host, ensure ReqPkt is never set again */
++ cppi41_autoreq_update(cppi_ch, USB_NO_AUTOREQ);
++
++ /* For host, clear (just) ReqPkt at end of current packet(s) */
++ if (is_host_active(cppi->musb))
++ csr &= ~MUSB_RXCSR_H_REQPKT;
++ csr |= MUSB_RXCSR_H_WZC_BITS;
++
++ /* Clear DMA enable */
++ csr &= ~MUSB_RXCSR_DMAENAB;
++ musb_writew(epio, MUSB_RXCSR, csr);
++
++ /* Flush the FIFO of endpoint once again */
++ csr = musb_readw(epio, MUSB_RXCSR);
++ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
++ musb_writew(epio, MUSB_RXCSR, csr);
++
++ udelay(50);
++ }
++
++ /*
++ * There might be PDs in the Rx/Tx source queue that were not consumed
++ * by the DMA controller -- they need to be recycled properly.
++ */
++ while ((pd_addr = cppi41_queue_pop(&cppi_ch->queue_obj)) != 0) {
++ struct usb_pkt_desc *curr_pd;
++
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ ERR("Invalid PD popped from source queue\n");
++ continue;
++ }
++
++ /*
++ * Return Rx/Tx PDs to the software list --
++ * this is protected by critical section.
++ */
++ dprintk("Returning PD %p to the free PD list\n", curr_pd);
++ usb_put_free_pd(cppi, curr_pd);
++ }
++
++#ifdef DEBUG_CPPI_TD
++ printk("After teardown:");
++ print_pd_list(cppi->pd_pool_head);
++#endif
++
++ /* Re-enable the DMA channel */
++ cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
++
++ channel->status = MUSB_DMA_STATUS_FREE;
++
++ return 0;
++}
++
++void txdma_completion_work(struct work_struct *data)
++{
++ struct cppi41 *cppi = container_of(data, struct cppi41, txdma_work);
++ struct cppi41_channel *tx_ch;
++ struct musb *musb = cppi->musb;
++ unsigned index;
++ u8 resched = 0;
++ unsigned long flags;
++
++ while (1) {
++ for (index = 0; index < USB_CPPI41_NUM_CH; index++) {
++ void __iomem *epio;
++ u16 csr;
++
++ tx_ch = &cppi->tx_cppi_ch[index];
++ spin_lock_irqsave(&musb->lock, flags);
++ if (tx_ch->tx_complete) {
++ /* Sometimes a EP can unregister from a DMA
++ * channel while the data is still in the FIFO.
++ * Probable reason a proper abort was not
++ * called before taking such a step.
++ * Protect against such cases.
++ */
++ if (!tx_ch->end_pt) {
++ tx_ch->tx_complete = 0;
++ continue;
++ }
++
++ epio = tx_ch->end_pt->regs;
++ csr = musb_readw(epio, MUSB_TXCSR);
++
++ if (csr & (MUSB_TXCSR_TXPKTRDY |
++ MUSB_TXCSR_FIFONOTEMPTY)) {
++ resched = 1;
++ } else {
++ tx_ch->channel.status =
++ MUSB_DMA_STATUS_FREE;
++ tx_ch->tx_complete = 0;
++ musb_dma_completion(musb, index+1, 1);
++ }
++ }
++ spin_unlock_irqrestore(&musb->lock, flags);
++
++ if (!resched)
++ cond_resched();
++ }
++
++ if (resched) {
++ resched = 0;
++ cond_resched();
++ } else {
++ return ;
++ }
++ }
++
++}
++
++/**
++ * cppi41_dma_controller_create -
++ * instantiate an object representing DMA controller.
++ */
++struct dma_controller * __devinit
++cppi41_dma_controller_create(struct musb *musb, void __iomem *mregs)
++{
++ struct cppi41 *cppi;
++
++ cppi = kzalloc(sizeof *cppi, GFP_KERNEL);
++ if (!cppi)
++ return NULL;
++
++ /* Initialize the CPPI 4.1 DMA controller structure */
++ cppi->musb = musb;
++ cppi->controller.start = cppi41_controller_start;
++ cppi->controller.stop = cppi41_controller_stop;
++ cppi->controller.channel_alloc = cppi41_channel_alloc;
++ cppi->controller.channel_release = cppi41_channel_release;
++ cppi->controller.channel_program = cppi41_channel_program;
++ cppi->controller.channel_abort = cppi41_channel_abort;
++ cppi->cppi_info = (struct usb_cppi41_info *)&usb_cppi41_info[musb->id];;
++ cppi->en_bd_intr = cppi->cppi_info->bd_intr_ctrl;
++ INIT_WORK(&cppi->txdma_work, txdma_completion_work);
++
++ /* enable infinite mode only for ti81xx silicon rev2 */
++ if (cpu_is_am33xx() || cpu_is_ti816x()) {
++ dev_dbg(musb->controller, "cppi41dma supports infinite mode\n");
++ cppi->inf_mode = 1;
++ }
++
++ return &cppi->controller;
++}
++EXPORT_SYMBOL(cppi41_dma_controller_create);
++
++/**
++ * cppi41_dma_controller_destroy -
++ * destroy a previously instantiated DMA controller
++ * @controller: the controller
++ */
++void cppi41_dma_controller_destroy(struct dma_controller *controller)
++{
++ struct cppi41 *cppi;
++
++ cppi = container_of(controller, struct cppi41, controller);
++
++ /* Free the CPPI object */
++ kfree(cppi);
++}
++EXPORT_SYMBOL(cppi41_dma_controller_destroy);
++
++static void usb_process_tx_queue(struct cppi41 *cppi, unsigned index)
++{
++ struct cppi41_queue_obj tx_queue_obj;
++ unsigned long pd_addr;
++ struct usb_cppi41_info *cppi_info = cppi->cppi_info;
++ struct musb *musb = cppi->musb;
++
++ if (cppi41_queue_init(&tx_queue_obj, cppi_info->q_mgr,
++ cppi_info->tx_comp_q[index])) {
++ dev_dbg(musb->controller, "ERROR: cppi41_queue_init failed for "
++ "Tx completion queue");
++ return;
++ }
++
++ while ((pd_addr = cppi41_queue_pop(&tx_queue_obj)) != 0) {
++ struct usb_pkt_desc *curr_pd;
++ struct cppi41_channel *tx_ch;
++ u8 ch_num, ep_num;
++ u32 length;
++
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ ERR("Invalid PD popped from Tx completion queue\n");
++ continue;
++ }
++
++ /* Extract the data from received packet descriptor */
++ ch_num = curr_pd->ch_num;
++ ep_num = curr_pd->ep_num;
++ length = curr_pd->hw_desc.buf_len;
++
++ tx_ch = &cppi->tx_cppi_ch[ch_num];
++ tx_ch->channel.actual_len += length;
++
++ /*
++ * Return Tx PD to the software list --
++ * this is protected by critical section
++ */
++ usb_put_free_pd(cppi, curr_pd);
++
++ if ((tx_ch->curr_offset < tx_ch->length) ||
++ (tx_ch->transfer_mode && !tx_ch->zlp_queued))
++ cppi41_next_tx_segment(tx_ch);
++ else if (tx_ch->channel.actual_len >= tx_ch->length) {
++ /*
++ * We get Tx DMA completion interrupt even when
++ * data is still in FIFO and not moved out to
++ * USB bus. As we program the next request we
++ * flush out and old data in FIFO which affects
++ * USB functionality. So far, we have obsered
++ * failure with iperf.
++ */
++ tx_ch->tx_complete = 1;
++ schedule_work(&cppi->txdma_work);
++ }
++ }
++}
++
++static void usb_process_rx_queue(struct cppi41 *cppi, unsigned index)
++{
++ struct cppi41_queue_obj rx_queue_obj;
++ unsigned long pd_addr;
++ struct usb_cppi41_info *cppi_info = cppi->cppi_info;
++ struct musb *musb = cppi->musb;
++ u8 en_bd_intr = cppi->en_bd_intr;
++
++ if (cppi41_queue_init(&rx_queue_obj, cppi_info->q_mgr,
++ cppi_info->rx_comp_q[index])) {
++ dev_dbg(musb->controller, "ERROR: cppi41_queue_init failed for Rx queue\n");
++ return;
++ }
++
++ while ((pd_addr = cppi41_queue_pop(&rx_queue_obj)) != 0) {
++ struct usb_pkt_desc *curr_pd;
++ struct cppi41_channel *rx_ch;
++ u8 ch_num, ep_num;
++ u32 length = 0, orig_buf_len, timeout = 50;
++
++ curr_pd = usb_get_pd_ptr(cppi, pd_addr);
++ if (curr_pd == NULL) {
++ ERR("Invalid PD popped from Rx completion queue\n");
++ continue;
++ }
++
++ /* This delay is required to overcome the dma race condition
++ * where software reads buffer descriptor before being updated
++ * by dma as buffer descriptor's writes by dma still pending in
++ * interconnect bridge.
++ */
++ while (timeout--) {
++ length = curr_pd->hw_desc.desc_info &
++ CPPI41_PKT_LEN_MASK;
++ if (length != 0)
++ break;
++ udelay(1);
++ }
++
++ if (length == 0)
++ ERR("!Race condtion: rxBD read before updated by dma");
++
++ /* Extract the data from received packet descriptor */
++ ch_num = curr_pd->ch_num;
++ ep_num = curr_pd->ep_num;
++
++ dev_dbg(musb->controller, "Rx complete: dma channel(%d) ep%d len %d timeout %d\n",
++ ch_num, ep_num, length, (50-timeout));
++
++ rx_ch = &cppi->rx_cppi_ch[ch_num];
++ rx_ch->channel.actual_len += length;
++
++ if (curr_pd->eop) {
++ curr_pd->eop = 0;
++ /* disable the rx dma schedular */
++ if (is_peripheral_active(cppi->musb) && !cppi->inf_mode)
++ cppi41_schedtbl_remove_dma_ch(0, 0, ch_num, 0);
++ }
++
++ /*
++ * Return Rx PD to the software list --
++ * this is protected by critical section
++ */
++ usb_put_free_pd(cppi, curr_pd);
++
++ orig_buf_len = curr_pd->hw_desc.orig_buf_len;
++ if (en_bd_intr)
++ orig_buf_len &= ~CPPI41_PKT_INTR_FLAG;
++
++ if (unlikely(rx_ch->channel.actual_len >= rx_ch->length ||
++ length < orig_buf_len)) {
++
++#if defined(CONFIG_SOC_OMAPTI81XX) || defined(CONFIG_SOC_OMAPAM33XX)
++ struct musb_hw_ep *ep;
++ u8 isoc, next_seg = 0;
++
++ /* Workaround for early rx completion of
++ * cppi41 dma in Generic RNDIS mode for ti81xx
++ */
++ if (is_host_enabled(cppi->musb)) {
++ u32 pkt_size = rx_ch->pkt_size;
++ ep = cppi->musb->endpoints + ep_num;
++ isoc = musb_readb(ep->regs, MUSB_RXTYPE);
++ isoc = (isoc >> 4) & 0x1;
++
++ if (!isoc
++ && (rx_ch->dma_mode == USB_GENERIC_RNDIS_MODE)
++ && (rx_ch->channel.actual_len < rx_ch->length)
++ && !(rx_ch->channel.actual_len % pkt_size))
++ next_seg = 1;
++ }
++ if (next_seg) {
++ rx_ch->curr_offset = rx_ch->channel.actual_len;
++ cppi41_next_rx_segment(rx_ch);
++ } else
++#endif
++ {
++ rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
++
++ if (rx_ch->inf_mode) {
++ cppi41_rx_ch_set_maxbufcnt(
++ &rx_ch->dma_ch_obj, 0);
++ rx_ch->inf_mode = 0;
++ }
++ /* Rx completion routine callback */
++ musb_dma_completion(cppi->musb, ep_num, 0);
++ }
++ } else {
++ if (is_peripheral_active(cppi->musb) &&
++ ((rx_ch->length - rx_ch->curr_offset) > 0))
++ cppi41_next_rx_segment(rx_ch);
++ }
++ }
++}
++
++/*
++ * cppi41_completion - handle interrupts from the Tx/Rx completion queues
++ *
++ * NOTE: since we have to manually prod the Rx process in the transparent mode,
++ * we certainly want to handle the Rx queues first.
++ */
++void cppi41_completion(struct musb *musb, u32 rx, u32 tx)
++{
++ struct cppi41 *cppi;
++ unsigned index;
++
++ cppi = container_of(musb->dma_controller, struct cppi41, controller);
++
++ /* Process packet descriptors from the Rx queues */
++ for (index = 0; rx != 0; rx >>= 1, index++)
++ if (rx & 1)
++ usb_process_rx_queue(cppi, index);
++
++ /* Process packet descriptors from the Tx completion queues */
++ for (index = 0; tx != 0; tx >>= 1, index++)
++ if (tx & 1)
++ usb_process_tx_queue(cppi, index);
++}
++EXPORT_SYMBOL(cppi41_completion);
++
++MODULE_DESCRIPTION("CPPI4.1 dma controller driver for musb");
++MODULE_LICENSE("GPL v2");
++
++static int __init cppi41_dma_init(void)
++{
++ return 0;
++}
++module_init(cppi41_dma_init);
++
++static void __exit cppi41_dma__exit(void)
++{
++}
++module_exit(cppi41_dma__exit);
+diff --git a/drivers/usb/musb/cppi41_dma.h b/drivers/usb/musb/cppi41_dma.h
+new file mode 100644
+index 0000000..fd746c3
+--- /dev/null
++++ b/drivers/usb/musb/cppi41_dma.h
+@@ -0,0 +1,55 @@
++/*
++ * Copyright (C) 2005-2006 by Texas Instruments
++ * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
++ *
++ */
++
++#ifndef _CPPI41_DMA_H_
++#define _CPPI41_DMA_H_
++#include <plat/usb.h>
++
++/**
++ * struct usb_cppi41_info - CPPI 4.1 USB implementation details
++ * @dma_block: DMA block number
++ * @ep_dma_ch: DMA channel numbers used for EPs 1 .. Max_EP
++ * @q_mgr: queue manager number
++ * @num_tx_comp_q: number of the Tx completion queues
++ * @num_rx_comp_q: number of the Rx queues
++ * @tx_comp_q: pointer to the list of the Tx completion queue numbers
++ * @rx_comp_q: pointer to the list of the Rx queue numbers
++ */
++struct usb_cppi41_info {
++ u8 dma_block;
++ u8 ep_dma_ch[USB_CPPI41_NUM_CH];
++ u8 q_mgr;
++ u8 num_tx_comp_q;
++ u8 num_rx_comp_q;
++ u16 *tx_comp_q;
++ u16 *rx_comp_q;
++ u8 bd_intr_ctrl;
++};
++
++extern struct usb_cppi41_info usb_cppi41_info[];
++
++/**
++ * cppi41_completion - Tx/Rx completion queue interrupt handling hook
++ * @musb: the controller
++ * @rx: bitmask having bit N set if Rx queue N is not empty
++ * @tx: bitmask having bit N set if Tx completion queue N is not empty
++ */
++void cppi41_completion(struct musb *musb, u32 rx, u32 tx);
++
++#endif /* _CPPI41_DMA_H_ */
+diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
+index 318fb4e..94db2e9 100644
+--- a/drivers/usb/musb/cppi_dma.c
++++ b/drivers/usb/musb/cppi_dma.c
+@@ -104,7 +104,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+ musb_writel(&tx->tx_complete, 0, ptr);
+ }
+
+-static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
++static void __devinit cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+ {
+ int j;
+
+@@ -149,7 +149,7 @@ static void cppi_pool_free(struct cppi_channel *c)
+ c->last_processed = NULL;
+ }
+
+-static int __init cppi_controller_start(struct dma_controller *c)
++static int __devinit cppi_controller_start(struct dma_controller *c)
+ {
+ struct cppi *controller;
+ void __iomem *tibase;
+@@ -359,8 +359,9 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+ {
+ void __iomem *base = c->controller->mregs;
+ struct cppi_rx_stateram __iomem *rx = c->state_ram;
++ struct musb *musb = c->controller->musb;
+
+- musb_ep_select(base, c->index + 1);
++ musb_ep_select(musb, base, c->index + 1);
+
+ dev_dbg(c->controller->musb->controller,
+ "RX DMA%d%s: %d left, csr %04x, "
+@@ -390,8 +391,9 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+ {
+ void __iomem *base = c->controller->mregs;
+ struct cppi_tx_stateram __iomem *tx = c->state_ram;
++ struct musb *musb = c->controller->musb;
+
+- musb_ep_select(base, c->index + 1);
++ musb_ep_select(musb, base, c->index + 1);
+
+ dev_dbg(c->controller->musb->controller,
+ "TX DMA%d%s: csr %04x, "
+@@ -513,7 +515,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ if (!(val & MUSB_RXCSR_H_REQPKT)) {
+ val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, val);
+- /* flush writebufer */
++ /* flush writebuffer */
+ val = musb_readw(regs, MUSB_RXCSR);
+ }
+ }
+@@ -1117,7 +1119,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+ */
+ WARN_ON(rx->head);
+ }
+- musb_ep_select(cppi->mregs, rx->index + 1);
++ musb_ep_select(cppi->musb, cppi->mregs, rx->index + 1);
+ csr = musb_readw(regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_DMAENAB) {
+ dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
+@@ -1315,8 +1317,8 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
+ }
+
+ /* Instantiate a software object representing a DMA controller. */
+-struct dma_controller *__init
+-dma_controller_create(struct musb *musb, void __iomem *mregs)
++struct dma_controller *__devinit
++cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
+ {
+ struct cppi *controller;
+ struct device *dev = musb->controller;
+@@ -1355,7 +1357,7 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
+ if (irq > 0) {
+ if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+- dma_controller_destroy(&controller->controller);
++ cppi_dma_controller_destroy(&controller->controller);
+ return NULL;
+ }
+ controller->irq = irq;
+@@ -1363,11 +1365,12 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
+
+ return &controller->controller;
+ }
++EXPORT_SYMBOL(cppi_dma_controller_create);
+
+ /*
+ * Destroy a previously-instantiated DMA controller.
+ */
+-void dma_controller_destroy(struct dma_controller *c)
++void cppi_dma_controller_destroy(struct dma_controller *c)
+ {
+ struct cppi *cppi;
+
+@@ -1381,6 +1384,7 @@ void dma_controller_destroy(struct dma_controller *c)
+
+ kfree(cppi);
+ }
++EXPORT_SYMBOL(cppi_dma_controller_destroy);
+
+ /*
+ * Context: controller irqlocked, endpoint selected
+@@ -1428,7 +1432,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+- musb_ep_select(mbase, cppi_ch->index + 1);
++ musb_ep_select(controller->musb, mbase, cppi_ch->index + 1);
+
+ if (cppi_ch->transmit) {
+ struct cppi_tx_stateram __iomem *tx_ram;
+@@ -1561,3 +1565,16 @@ static int cppi_channel_abort(struct dma_channel *channel)
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
++MODULE_DESCRIPTION("CPPI dma controller driver for musb");
++MODULE_LICENSE("GPL v2");
++
++static int __init cppi_dma_init(void)
++{
++ return 0;
++}
++module_init(cppi_dma_init);
++
++static void __exit cppi_dma__exit(void)
++{
++}
++module_exit(cppi_dma__exit);
+diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
+index 2613bfd..8b72c22 100644
+--- a/drivers/usb/musb/da8xx.c
++++ b/drivers/usb/musb/da8xx.c
+@@ -423,8 +423,8 @@ static int da8xx_musb_init(struct musb *musb)
+ if (!rev)
+ goto fail;
+
+- usb_nop_xceiv_register();
+- musb->xceiv = otg_get_transceiver();
++ usb_nop_xceiv_register(musb->id);
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv)
+ goto fail;
+
+@@ -458,12 +458,14 @@ static int da8xx_musb_exit(struct musb *musb)
+ phy_off();
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+
+ return 0;
+ }
+
+ static const struct musb_platform_ops da8xx_ops = {
++ .fifo_mode = 2,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING,
+ .init = da8xx_musb_init,
+ .exit = da8xx_musb_exit,
+
+@@ -474,6 +476,9 @@ static const struct musb_platform_ops da8xx_ops = {
+ .try_idle = da8xx_musb_try_idle,
+
+ .set_vbus = da8xx_musb_set_vbus,
++
++ .read_fifo = musb_read_fifo,
++ .write_fifo = musb_write_fifo,
+ };
+
+ static u64 da8xx_dmamask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
+index 7c569f5..3911d9a 100644
+--- a/drivers/usb/musb/davinci.c
++++ b/drivers/usb/musb/davinci.c
+@@ -284,7 +284,7 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
+ * mask, state, "vector", and EOI registers.
+ */
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+- if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
++ if (is_cppi_enabled(musb) && musb->dma_controller && !cppi->irq)
+ retval = cppi_interrupt(irq, __hci);
+
+ /* ack and handle non-CPPI interrupts */
+@@ -382,8 +382,8 @@ static int davinci_musb_init(struct musb *musb)
+ void __iomem *tibase = musb->ctrl_base;
+ u32 revision;
+
+- usb_nop_xceiv_register();
+- musb->xceiv = otg_get_transceiver();
++ usb_nop_xceiv_register(musb->id);
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv)
+ return -ENODEV;
+
+@@ -443,7 +443,7 @@ static int davinci_musb_init(struct musb *musb)
+
+ fail:
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+ return -ENODEV;
+ }
+
+@@ -492,12 +492,14 @@ static int davinci_musb_exit(struct musb *musb)
+ phy_off();
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+
+ return 0;
+ }
+
+ static const struct musb_platform_ops davinci_ops = {
++ .fifo_mode = 2,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING | MUSB_GLUE_DMA_CPPI,
+ .init = davinci_musb_init,
+ .exit = davinci_musb_exit,
+
+@@ -507,6 +509,12 @@ static const struct musb_platform_ops davinci_ops = {
+ .set_mode = davinci_musb_set_mode,
+
+ .set_vbus = davinci_musb_set_vbus,
++
++ .read_fifo = musb_read_fifo,
++ .write_fifo = musb_write_fifo,
++
++ .dma_controller_create = cppi_dma_controller_create,
++ .dma_controller_destroy = cppi_dma_controller_destroy,
+ };
+
+ static u64 davinci_dmamask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 920f04e..db5afeb 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -102,7 +102,6 @@
+
+ #include "musb_core.h"
+
+-#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
+
+
+ #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+@@ -120,6 +119,10 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
++u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
++EXPORT_SYMBOL_GPL(musb_readb);
++void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
++EXPORT_SYMBOL_GPL(musb_writeb);
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -131,6 +134,44 @@ static inline struct musb *dev_to_musb(struct device *dev)
+ /*-------------------------------------------------------------------------*/
+
+ #ifndef CONFIG_BLACKFIN
++
++/*
++ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
++ */
++static inline u8 __tusb_musb_readb(const void __iomem *addr, unsigned offset)
++{
++ u16 tmp;
++ u8 val;
++
++ tmp = __raw_readw(addr + (offset & ~1));
++ if (offset & 1)
++ val = (tmp >> 8);
++ else
++ val = tmp & 0xff;
++
++ return val;
++}
++
++static inline void __tusb_musb_writeb(void __iomem *addr, unsigned offset,
++ u8 data)
++{
++ u16 tmp;
++
++ tmp = __raw_readw(addr + (offset & ~1));
++ if (offset & 1)
++ tmp = (data << 8) | (tmp & 0xff);
++ else
++ tmp = (tmp & 0xff00) | data;
++
++ __raw_writew(tmp, addr + (offset & ~1));
++}
++
++static inline u8 __musb_readb(const void __iomem *addr, unsigned offset)
++ { return readb(addr + offset); }
++
++static inline void __musb_writeb(void __iomem *addr, unsigned offset, u8 data)
++ { writeb(data, addr + offset); }
++
+ static int musb_ulpi_read(struct otg_transceiver *otg, u32 offset)
+ {
+ void __iomem *addr = otg->io_priv;
+@@ -196,6 +237,12 @@ static int musb_ulpi_write(struct otg_transceiver *otg,
+ return 0;
+ }
+ #else
++static inline u8 __musb_readb(const void __iomem *addr, unsigned offset)
++ { return (u8) (bfin_read16(addr + offset)); }
++
++static inline void __musb_writeb(void __iomem *addr, unsigned offset, u8 data)
++ { bfin_write16(addr + offset, (u16) data); }
++
+ #define musb_ulpi_read NULL
+ #define musb_ulpi_write NULL
+ #endif
+@@ -205,10 +252,6 @@ static struct otg_io_access_ops musb_ulpi_access = {
+ .write = musb_ulpi_write,
+ };
+
+-/*-------------------------------------------------------------------------*/
+-
+-#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
+-
+ /*
+ * Load an endpoint's FIFO
+ */
+@@ -249,8 +292,8 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+ writesb(fifo, src, len);
+ }
+ }
++EXPORT_SYMBOL_GPL(musb_write_fifo);
+
+-#if !defined(CONFIG_USB_MUSB_AM35X)
+ /*
+ * Unload an endpoint's FIFO
+ */
+@@ -289,10 +332,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+ readsb(fifo, dst, len);
+ }
+ }
+-#endif
+-
+-#endif /* normal PIO */
+-
++EXPORT_SYMBOL_GPL(musb_read_fifo);
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -318,17 +358,20 @@ static const u8 musb_test_packet[53] = {
+
+ void musb_load_testpacket(struct musb *musb)
+ {
+- void __iomem *regs = musb->endpoints[0].regs;
+-
+- musb_ep_select(musb->mregs, 0);
+- musb_write_fifo(musb->control_ep,
++ musb_ep_select(musb, musb->mregs, 0);
++ musb->ops->write_fifo(musb->control_ep,
+ sizeof(musb_test_packet), musb_test_packet);
+- musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+ }
+
+ /*-------------------------------------------------------------------------*/
+
+ /*
++ * See also USB_OTG_1-3.pdf 6.6.5 Timers
++ * REVISIT: Are the other timers done in the hardware?
++ */
++#define TB_ASE0_BRST 100 /* Min 3.125 ms */
++
++/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+ void musb_otg_timer_func(unsigned long data)
+@@ -344,12 +387,9 @@ void musb_otg_timer_func(unsigned long data)
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 0;
+ break;
+- case OTG_STATE_A_SUSPEND:
+ case OTG_STATE_A_WAIT_BCON:
+- dev_dbg(musb->controller, "HNP: %s timeout\n",
+- otg_state_string(musb->xceiv->state));
+- musb_platform_set_vbus(musb, 0);
+- musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
++ dev_dbg(musb->controller, "HNP: a_wait_bcon timeout; back to a_host\n");
++ musb_hnp_stop(musb);
+ break;
+ default:
+ dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
+@@ -360,7 +400,7 @@ void musb_otg_timer_func(unsigned long data)
+ }
+
+ /*
+- * Stops the HNP transition. Caller must take care of locking.
++ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+ void musb_hnp_stop(struct musb *musb)
+ {
+@@ -368,13 +408,15 @@ void musb_hnp_stop(struct musb *musb)
+ void __iomem *mbase = musb->mregs;
+ u8 reg;
+
+- dev_dbg(musb->controller, "HNP: stop from %s\n", otg_state_string(musb->xceiv->state));
+-
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_PERIPHERAL:
++ case OTG_STATE_A_WAIT_VFALL:
++ case OTG_STATE_A_WAIT_BCON:
++ dev_dbg(musb->controller, "HNP: Switching back to A-host\n");
+ musb_g_disconnect(musb);
+- dev_dbg(musb->controller, "HNP: back to %s\n",
+- otg_state_string(musb->xceiv->state));
++ musb->xceiv->state = OTG_STATE_A_IDLE;
++ MUSB_HST_MODE(musb);
++ musb->is_active = 0;
+ break;
+ case OTG_STATE_B_HOST:
+ dev_dbg(musb->controller, "HNP: Disabling HR\n");
+@@ -427,7 +469,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ handled = IRQ_HANDLED;
+ dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state));
+
+- if (devctl & MUSB_DEVCTL_HM) {
++ if (is_host_enabled(musb) && (devctl & MUSB_DEVCTL_HM)) {
+ void __iomem *mbase = musb->mregs;
+
+ switch (musb->xceiv->state) {
+@@ -470,26 +512,34 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ } else {
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_SUSPEND:
+- /* possibly DISCONNECT is upcoming */
+- musb->xceiv->state = OTG_STATE_A_HOST;
+- usb_hcd_resume_root_hub(musb_to_hcd(musb));
++ if (is_host_enabled(musb)) {
++ /* possibly DISCONNECT is upcoming */
++ musb->xceiv->state = OTG_STATE_A_HOST;
++ usb_hcd_resume_root_hub(
++ musb_to_hcd(musb));
++ }
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+- /* disconnect while suspended? we may
+- * not get a disconnect irq...
+- */
+- if ((devctl & MUSB_DEVCTL_VBUS)
++ if (is_peripheral_enabled(musb)) {
++ /* disconnect while suspended? we may
++ * not get a disconnect irq...
++ */
++ if ((devctl & MUSB_DEVCTL_VBUS)
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+- ) {
+- musb->int_usb |= MUSB_INTR_DISCONNECT;
+- musb->int_usb &= ~MUSB_INTR_SUSPEND;
+- break;
++ ) {
++ musb->int_usb |=
++ MUSB_INTR_DISCONNECT;
++ musb->int_usb &=
++ ~MUSB_INTR_SUSPEND;
++ break;
++ }
++ musb_g_resume(musb);
+ }
+- musb_g_resume(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+- musb->int_usb &= ~MUSB_INTR_SUSPEND;
++ if (is_peripheral_enabled(musb))
++ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+@@ -500,7 +550,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ }
+
+ /* see manual for the order of the tests */
+- if (int_usb & MUSB_INTR_SESSREQ) {
++ if (is_host_enabled(musb) && (int_usb & MUSB_INTR_SESSREQ)) {
+ void __iomem *mbase = musb->mregs;
+
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
+@@ -528,7 +578,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ handled = IRQ_HANDLED;
+ }
+
+- if (int_usb & MUSB_INTR_VBUSERROR) {
++ if (is_host_enabled(musb) && (int_usb & MUSB_INTR_VBUSERROR)) {
++ struct usb_hcd *hcd = musb_to_hcd(musb);
+ int ignore = 0;
+
+ /* During connection as an A-Device, we may see a short
+@@ -568,6 +619,13 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ musb->port1_status |=
+ USB_PORT_STAT_OVERCURRENT
+ | (USB_PORT_STAT_C_OVERCURRENT << 16);
++
++ if (hcd->status_urb)
++ usb_hcd_poll_rh_status(hcd);
++ else
++ usb_hcd_resume_root_hub(hcd);
++
++ MUSB_HST_MODE(musb);
+ }
+ break;
+ default:
+@@ -604,22 +662,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+- case OTG_STATE_A_PERIPHERAL:
+- /* We also come here if the cable is removed, since
+- * this silicon doesn't report ID-no-longer-grounded.
+- *
+- * We depend on T(a_wait_bcon) to shut us down, and
+- * hope users don't do anything dicey during this
+- * undesired detour through A_WAIT_BCON.
+- */
+- musb_hnp_stop(musb);
+- usb_hcd_resume_root_hub(musb_to_hcd(musb));
+- musb_root_disconnect(musb);
+- musb_platform_try_idle(musb, jiffies
+- + msecs_to_jiffies(musb->a_wait_bcon
+- ? : OTG_TIME_A_WAIT_BCON));
+-
+- break;
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+@@ -627,12 +669,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->gadget->b_hnp_enable;
+- if (musb->is_active) {
++ if (is_otg_enabled(musb) && musb->is_active) {
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+ mod_timer(&musb->otg_timer, jiffies
+- + msecs_to_jiffies(
+- OTG_TIME_B_ASE0_BRST));
++ + msecs_to_jiffies(TB_ASE0_BRST));
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+@@ -649,6 +690,13 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
+ break;
++ case OTG_STATE_A_PERIPHERAL:
++ /*
++ * We cannot stop HNP here, devctl BDEVICE might be
++ * still set.
++ */
++ if (is_otg_enabled(musb))
++ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+@@ -656,7 +704,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ }
+ }
+
+- if (int_usb & MUSB_INTR_CONNECT) {
++ if (is_host_enabled(musb) && (int_usb & MUSB_INTR_CONNECT)) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ handled = IRQ_HANDLED;
+@@ -665,13 +713,16 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+
+ musb->ep0_stage = MUSB_EP0_START;
+
+- /* flush endpoints when transitioning from Device Mode */
+- if (is_peripheral_active(musb)) {
+- /* REVISIT HNP; just force disconnect */
++ if (is_otg_enabled(musb)) {
++ /* flush endpoints when transitioning from DeviceMode */
++ if (is_peripheral_active(musb)) {
++ /* REVISIT HNP; just force disconnect */
++ }
++ musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask);
++ musb_writew(musb->mregs, MUSB_INTRRXE,
++ musb->epmask & 0xfffe);
++ musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
+ }
+- musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask);
+- musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+- musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
+ musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+@@ -683,23 +734,28 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ if (devctl & MUSB_DEVCTL_LSDEV)
+ musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
++ if (hcd->status_urb)
++ usb_hcd_poll_rh_status(hcd);
++ else
++ usb_hcd_resume_root_hub(hcd);
++
++ MUSB_HST_MODE(musb);
++
+ /* indicate new connection to OTG machine */
+ switch (musb->xceiv->state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
++ musb->xceiv->state = OTG_STATE_B_HOST;
++ hcd->self.is_b_host = 1;
+ int_usb &= ~MUSB_INTR_SUSPEND;
+- goto b_host;
+ } else
+ dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+- dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
+-b_host:
++ dev_dbg(musb->controller, "HNP: Waiting to switch to b_host state\n");
+ musb->xceiv->state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+- musb->ignore_disconnect = 0;
+- del_timer(&musb->otg_timer);
+ break;
+ default:
+ if ((devctl & MUSB_DEVCTL_VBUS)
+@@ -710,13 +766,6 @@ b_host:
+ break;
+ }
+
+- /* poke the root hub */
+- MUSB_HST_MODE(musb);
+- if (hcd->status_urb)
+- usb_hcd_poll_rh_status(hcd);
+- else
+- usb_hcd_resume_root_hub(hcd);
+-
+ dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
+ otg_state_string(musb->xceiv->state), devctl);
+ }
+@@ -730,33 +779,38 @@ b_host:
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+- usb_hcd_resume_root_hub(musb_to_hcd(musb));
+- musb_root_disconnect(musb);
+- if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
+- musb_platform_try_idle(musb, jiffies
++ if (is_host_enabled(musb)) {
++ usb_hcd_resume_root_hub(musb_to_hcd(musb));
++ musb_root_disconnect(musb);
++ if (musb->a_wait_bcon != 0 &&
++ is_otg_enabled(musb))
++ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
++ }
+ break;
+ case OTG_STATE_B_HOST:
+- /* REVISIT this behaves for "real disconnect"
+- * cases; make sure the other transitions from
+- * from B_HOST act right too. The B_HOST code
+- * in hnp_stop() is currently not used...
+- */
+- musb_root_disconnect(musb);
+- musb_to_hcd(musb)->self.is_b_host = 0;
+- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+- MUSB_DEV_MODE(musb);
+- musb_g_disconnect(musb);
++ if (is_otg_enabled(musb))
++ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+- musb_hnp_stop(musb);
+- musb_root_disconnect(musb);
++ if (is_otg_enabled(musb)) {
++ musb_hnp_stop(musb);
++ musb_root_disconnect(musb);
++ }
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
++ if (!is_otg_enabled(musb))
++ break;
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+- musb_g_disconnect(musb);
++ if (is_peripheral_enabled(musb)) {
++ printk(KERN_INFO "musb %s gadget disconnected.\n",
++ musb->gadget_driver
++ ? musb->gadget_driver->driver.name
++ : "");
++ musb_g_disconnect(musb);
++ }
+ break;
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+@@ -770,7 +824,7 @@ b_host:
+ */
+ if (int_usb & MUSB_INTR_RESET) {
+ handled = IRQ_HANDLED;
+- if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
++ if (is_host_enabled(musb) && (devctl & MUSB_DEVCTL_HM) != 0) {
+ /*
+ * Looks like non-HS BABBLE can be ignored, but
+ * HS BABBLE is an error condition. For HS the solution
+@@ -784,36 +838,45 @@ b_host:
+ ERR("Stopping host session -- babble\n");
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ }
+- } else if (is_peripheral_capable()) {
++ } else if (is_peripheral_enabled(musb)) {
+ dev_dbg(musb->controller, "BUS RESET as %s\n",
+ otg_state_string(musb->xceiv->state));
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_SUSPEND:
+- /* We need to ignore disconnect on suspend
+- * otherwise tusb 2.0 won't reconnect after a
+- * power cycle, which breaks otg compliance.
+- */
+- musb->ignore_disconnect = 1;
+- musb_g_reset(musb);
++ if (is_otg_enabled(musb)) {
++ /* We need to ignore disconnect on
++ * suspend otherwise tusb 2.0 won't
++ * reconnect after a power cycle,
++ * which breaks otg compliance.
++ */
++ musb->ignore_disconnect = 1;
++ musb_g_reset(musb);
++ } else {
++ break;
++ }
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+- /* never use invalid T(a_wait_bcon) */
+- dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
+- otg_state_string(musb->xceiv->state),
+- TA_WAIT_BCON(musb));
+- mod_timer(&musb->otg_timer, jiffies
+- + msecs_to_jiffies(TA_WAIT_BCON(musb)));
++ if (is_otg_enabled(musb)) {
++ dev_dbg(musb->controller,
++ "HNP: Setting timer as %s\n",
++ otg_state_string(musb->xceiv->state));
++ mod_timer(&musb->otg_timer, jiffies
++ + msecs_to_jiffies(100));
++ }
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+- musb->ignore_disconnect = 0;
+- del_timer(&musb->otg_timer);
+- musb_g_reset(musb);
++ if (is_otg_enabled(musb))
++ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+- dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
++ if (is_otg_enabled(musb)) {
++ dev_dbg(musb->controller,
++ "HNP: RESET (%s), to b_peripheral\n",
+ otg_state_string(musb->xceiv->state));
+- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+- musb_g_reset(musb);
++ musb->xceiv->state =
++ OTG_STATE_B_PERIPHERAL;
++ musb_g_reset(musb);
++ }
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+@@ -906,7 +969,6 @@ void musb_start(struct musb *musb)
+
+ musb->is_active = 0;
+ devctl = musb_readb(regs, MUSB_DEVCTL);
+- devctl &= ~MUSB_DEVCTL_SESSION;
+
+ if (is_otg_enabled(musb)) {
+ /* session started after:
+@@ -930,7 +992,7 @@ void musb_start(struct musb *musb)
+ musb_platform_enable(musb);
+ musb_writeb(regs, MUSB_DEVCTL, devctl);
+ }
+-
++EXPORT_SYMBOL(musb_start);
+
+ static void musb_generic_disable(struct musb *musb)
+ {
+@@ -1000,40 +1062,12 @@ static void musb_shutdown(struct platform_device *pdev)
+ /*-------------------------------------------------------------------------*/
+
+ /*
+- * The silicon either has hard-wired endpoint configurations, or else
+- * "dynamic fifo" sizing. The driver has support for both, though at this
+- * writing only the dynamic sizing is very well tested. Since we switched
+- * away from compile-time hardware parameters, we can no longer rely on
+- * dead code elimination to leave only the relevant one in the object file.
+- *
+- * We don't currently use dynamic fifo setup capability to do anything
+- * more than selecting one of a bunch of predefined configurations.
+- */
+-#if defined(CONFIG_USB_MUSB_TUSB6010) \
+- || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \
+- || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
+- || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \
+- || defined(CONFIG_USB_MUSB_AM35X) \
+- || defined(CONFIG_USB_MUSB_AM35X_MODULE)
+-static ushort __initdata fifo_mode = 4;
+-#elif defined(CONFIG_USB_MUSB_UX500) \
+- || defined(CONFIG_USB_MUSB_UX500_MODULE)
+-static ushort __initdata fifo_mode = 5;
+-#else
+-static ushort __initdata fifo_mode = 2;
+-#endif
+-
+-/* "modprobe ... fifo_mode=1" etc */
+-module_param(fifo_mode, ushort, 0);
+-MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+-
+-/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+ /* mode 0 - fits in 2KB */
+-static struct musb_fifo_cfg __initdata mode_0_cfg[] = {
++static struct musb_fifo_cfg __devinitdata mode_0_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+@@ -1042,7 +1076,7 @@ static struct musb_fifo_cfg __initdata mode_0_cfg[] = {
+ };
+
+ /* mode 1 - fits in 4KB */
+-static struct musb_fifo_cfg __initdata mode_1_cfg[] = {
++static struct musb_fifo_cfg __devinitdata mode_1_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+ { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+@@ -1051,7 +1085,7 @@ static struct musb_fifo_cfg __initdata mode_1_cfg[] = {
+ };
+
+ /* mode 2 - fits in 4KB */
+-static struct musb_fifo_cfg __initdata mode_2_cfg[] = {
++static struct musb_fifo_cfg __devinitdata mode_2_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+@@ -1061,7 +1095,7 @@ static struct musb_fifo_cfg __initdata mode_2_cfg[] = {
+ };
+
+ /* mode 3 - fits in 4KB */
+-static struct musb_fifo_cfg __initdata mode_3_cfg[] = {
++static struct musb_fifo_cfg __devinitdata mode_3_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+@@ -1071,13 +1105,13 @@ static struct musb_fifo_cfg __initdata mode_3_cfg[] = {
+ };
+
+ /* mode 4 - fits in 16KB */
+-static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
+-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+-{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+-{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+-{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+-{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
++static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = {
++{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512,},
++{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512,},
++{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512,},
++{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512,},
++{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512,},
++{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512,},
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+@@ -1101,8 +1135,9 @@ static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
+ { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+ };
+
++
+ /* mode 5 - fits in 8KB */
+-static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
++static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+@@ -1132,13 +1167,44 @@ static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
+ { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+ };
+
++/* mode 6 - fits in 32KB */
++static struct musb_fifo_cfg __devinitdata mode_6_cfg[] = {
++{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE,},
++{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 64, },
++{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
++{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
++{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
++{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
++{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
++{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
++{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
++};
++
+ /*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+-static int __init
++static int __devinit
+ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct musb_fifo_cfg *cfg, u16 offset)
+ {
+@@ -1170,7 +1236,7 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirection halves.
+ */
+- if (hw_ep->epnum == 1)
++ if (is_host_enabled(musb) && hw_ep->epnum == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+ switch (cfg->style) {
+@@ -1209,26 +1275,28 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+ }
+
+-static struct musb_fifo_cfg __initdata ep0_cfg = {
++static struct musb_fifo_cfg __devinitdata ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+ };
+
+-static int __init ep_config_from_table(struct musb *musb)
++static int __devinit ep_config_from_table(struct musb *musb)
+ {
+ const struct musb_fifo_cfg *cfg;
+ unsigned i, n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->endpoints;
+
+- if (musb->config->fifo_cfg) {
++ if (musb->config->fifo_mode)
++ musb->fifo_mode = musb->config->fifo_mode;
++ else if (musb->config->fifo_cfg) {
+ cfg = musb->config->fifo_cfg;
+ n = musb->config->fifo_cfg_size;
+ goto done;
+ }
+
+- switch (fifo_mode) {
++ switch (musb->fifo_mode) {
+ default:
+- fifo_mode = 0;
++ musb->fifo_mode = 0;
+ /* FALLTHROUGH */
+ case 0:
+ cfg = mode_0_cfg;
+@@ -1254,10 +1322,14 @@ static int __init ep_config_from_table(struct musb *musb)
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
++ case 6:
++ cfg = mode_6_cfg;
++ n = ARRAY_SIZE(mode_6_cfg);
++ break;
+ }
+
+ printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+- musb_driver_name, fifo_mode);
++ musb_driver_name, musb->fifo_mode);
+
+
+ done:
+@@ -1291,7 +1363,7 @@ done:
+ n + 1, musb->config->num_eps * 2 - 1,
+ offset, (1 << (musb->config->ram_bits + 2)));
+
+- if (!musb->bulk_ep) {
++ if (is_host_enabled(musb) && !musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+@@ -1299,12 +1371,11 @@ done:
+ return 0;
+ }
+
+-
+ /*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+-static int __init ep_config_from_hw(struct musb *musb)
++static int __devinit ep_config_from_hw(struct musb *musb)
+ {
+ u8 epnum = 0;
+ struct musb_hw_ep *hw_ep;
+@@ -1316,7 +1387,7 @@ static int __init ep_config_from_hw(struct musb *musb)
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ hw_ep = musb->endpoints + epnum;
+
+ ret = musb_read_fifosize(musb, hw_ep, epnum);
+@@ -1325,20 +1396,22 @@ static int __init ep_config_from_hw(struct musb *musb)
+
+ /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+- /* pick an RX/TX endpoint for bulk */
+- if (hw_ep->max_packet_sz_tx < 512
++ if (is_host_enabled(musb)) {
++ /* pick an RX/TX endpoint for bulk */
++ if (hw_ep->max_packet_sz_tx < 512
+ || hw_ep->max_packet_sz_rx < 512)
+- continue;
++ continue;
+
+- /* REVISIT: this algorithm is lazy, we should at least
+- * try to pick a double buffered endpoint.
+- */
+- if (musb->bulk_ep)
+- continue;
+- musb->bulk_ep = hw_ep;
++ /* REVISIT: this algorithm is lazy, we should at least
++ * try to pick a double buffered endpoint.
++ */
++ if (musb->bulk_ep)
++ continue;
++ musb->bulk_ep = hw_ep;
++ }
+ }
+
+- if (!musb->bulk_ep) {
++ if (is_host_enabled(musb) && !musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+@@ -1351,7 +1424,7 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+ /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+-static int __init musb_core_init(u16 musb_type, struct musb *musb)
++static int __devinit musb_core_init(u16 musb_type, struct musb *musb)
+ {
+ u8 reg;
+ char *type;
+@@ -1398,9 +1471,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
+ musb->is_multipoint = 0;
+ type = "";
+ #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
+- printk(KERN_ERR
+- "%s: kernel must blacklist external hubs\n",
+- musb_driver_name);
++ if (is_host_enabled(musb))
++ printk(KERN_ERR
++ "%s: kernel must blacklist external hubs\n",
++ musb_driver_name);
+ #endif
+ }
+
+@@ -1431,23 +1505,31 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
+ for (i = 0; i < musb->nr_endpoints; i++) {
+ struct musb_hw_ep *hw_ep = musb->endpoints + i;
+
+- hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+-#ifdef CONFIG_USB_MUSB_TUSB6010
+- hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+- hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+- hw_ep->fifo_sync_va =
+- musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+-
+- if (i == 0)
+- hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+- else
+- hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+-#endif
++ if (musb->ops->flags & MUSB_GLUE_TUSB_STYLE) {
++ hw_ep->fifo = MUSB_TUSB_FIFO_OFFSET(i) + mbase;
++ hw_ep->fifo_async = musb->async +
++ 0x400 + MUSB_TUSB_FIFO_OFFSET(i);
++ hw_ep->fifo_sync = musb->sync +
++ 0x400 + MUSB_TUSB_FIFO_OFFSET(i);
++ hw_ep->fifo_sync_va = musb->sync_va + 0x400 +
++ MUSB_TUSB_FIFO_OFFSET(i);
++
++ if (i == 0)
++ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
++ else
++ hw_ep->conf = mbase + 0x400 +
++ (((i - 1) & 0xf) << 2);
++ } else {
++ hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
++ }
+
+- hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+- hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
+- hw_ep->rx_reinit = 1;
+- hw_ep->tx_reinit = 1;
++ hw_ep->regs = MUSB_EP_OFFSET(musb, i, 0) + mbase;
++ if (is_host_enabled(musb)) {
++ hw_ep->target_regs =
++ musb_read_target_reg_base(i, mbase);
++ hw_ep->rx_reinit = 1;
++ hw_ep->tx_reinit = 1;
++ }
+
+ if (hw_ep->max_packet_sz_tx) {
+ dev_dbg(musb->controller,
+@@ -1546,14 +1628,14 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
++ /* musb_ep_select(musb, musb->mregs, ep_num); */
+ /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+- if (is_host_capable())
++ if (is_host_enabled(musb))
+ musb_host_rx(musb, ep_num);
+ } else {
+- if (is_peripheral_capable())
++ if (is_peripheral_enabled(musb))
+ musb_g_rx(musb, ep_num);
+ }
+ }
+@@ -1567,14 +1649,14 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
++ /* musb_ep_select(musb, musb->mregs, ep_num); */
+ /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+- if (is_host_capable())
++ if (is_host_enabled(musb))
+ musb_host_tx(musb, ep_num);
+ } else {
+- if (is_peripheral_capable())
++ if (is_peripheral_enabled(musb))
+ musb_g_tx(musb, ep_num);
+ }
+ }
+@@ -1587,12 +1669,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ EXPORT_SYMBOL_GPL(musb_interrupt);
+
+ #ifndef CONFIG_MUSB_PIO_ONLY
+-static int __initdata use_dma = 1;
+-
+-/* "modprobe ... use_dma=0" etc */
+-module_param(use_dma, bool, 0);
+-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+-
+ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+ {
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+@@ -1600,40 +1676,37 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+ /* called with controller lock already held */
+
+ if (!epnum) {
+-#ifndef CONFIG_USB_TUSB_OMAP_DMA
+- if (!is_cppi_enabled()) {
++ if (!tusb_dma_omap(musb) && !is_cppi_enabled(musb)
++ && !is_cppi41_enabled(musb)) {
+ /* endpoint 0 */
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+ }
+-#endif
+ } else {
+ /* endpoints 1..15 */
+ if (transmit) {
+ if (devctl & MUSB_DEVCTL_HM) {
+- if (is_host_capable())
++ if (is_host_enabled(musb))
+ musb_host_tx(musb, epnum);
+ } else {
+- if (is_peripheral_capable())
++ if (is_peripheral_enabled(musb))
+ musb_g_tx(musb, epnum);
+ }
+ } else {
+ /* receive */
+ if (devctl & MUSB_DEVCTL_HM) {
+- if (is_host_capable())
++ if (is_host_enabled(musb))
+ musb_host_rx(musb, epnum);
+ } else {
+- if (is_peripheral_capable())
++ if (is_peripheral_enabled(musb))
+ musb_g_rx(musb, epnum);
+ }
+ }
+ }
+ }
+-
+-#else
+-#define use_dma 0
++EXPORT_SYMBOL_GPL(musb_dma_completion);
+ #endif
+
+ /*-------------------------------------------------------------------------*/
+@@ -1691,8 +1764,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+- /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+- musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
++ musb->a_wait_bcon = val;
+ if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+@@ -1711,13 +1783,10 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+
+ spin_lock_irqsave(&musb->lock, flags);
+ val = musb->a_wait_bcon;
+- /* FIXME get_vbus_status() is normally #defined as false...
+- * and is effectively TUSB-specific.
+- */
+ vbus = musb_platform_get_vbus_status(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+- return sprintf(buf, "Vbus %s, timeout %lu msec\n",
++ return sprintf(buf, "Vbus %s, timeout %lu\n",
+ vbus ? "on" : "off", val);
+ }
+ static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+@@ -1762,10 +1831,9 @@ static const struct attribute_group musb_attr_group = {
+ static void musb_irq_work(struct work_struct *data)
+ {
+ struct musb *musb = container_of(data, struct musb, irq_work);
+- static int old_state;
+
+- if (musb->xceiv->state != old_state) {
+- old_state = musb->xceiv->state;
++ if (musb->xceiv->state != musb->old_state) {
++ musb->old_state = musb->xceiv->state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
+ }
+@@ -1774,7 +1842,7 @@ static void musb_irq_work(struct work_struct *data)
+ * Init support
+ */
+
+-static struct musb *__init
++static struct musb *__devinit
+ allocate_instance(struct device *dev,
+ struct musb_hdrc_config *config, void __iomem *mbase)
+ {
+@@ -1782,22 +1850,30 @@ allocate_instance(struct device *dev,
+ struct musb_hw_ep *ep;
+ int epnum;
+ struct usb_hcd *hcd;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+- hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
+- if (!hcd)
+- return NULL;
+- /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+-
+- musb = hcd_to_musb(hcd);
+- INIT_LIST_HEAD(&musb->control);
+- INIT_LIST_HEAD(&musb->in_bulk);
+- INIT_LIST_HEAD(&musb->out_bulk);
++ if (plat->mode != MUSB_PERIPHERAL) {
++ hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
++ if (!hcd)
++ return NULL;
++ /* usbcore sets dev->driver_data to hcd, and sometimes uses
++ * that...
++ */
+
+- hcd->uses_new_polling = 1;
+- hcd->has_tt = 1;
++ musb = hcd_to_musb(hcd);
++ INIT_LIST_HEAD(&musb->control);
++ INIT_LIST_HEAD(&musb->in_bulk);
++ INIT_LIST_HEAD(&musb->out_bulk);
++ INIT_LIST_HEAD(&musb->gb_list);
+
+- musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+- musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
++ hcd->uses_new_polling = 1;
++ hcd->has_tt = 1;
++ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
++ } else {
++ musb = kzalloc(sizeof *musb, GFP_KERNEL);
++ if (!musb)
++ return NULL;
++ }
+ dev_set_drvdata(dev, musb);
+ musb->mregs = mbase;
+ musb->ctrl_base = mbase;
+@@ -1827,7 +1903,8 @@ static void musb_free(struct musb *musb)
+ sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
+ #endif
+
+- musb_gadget_cleanup(musb);
++ if (is_peripheral_enabled(musb))
++ musb_gadget_cleanup(musb);
+
+ if (musb->nIrq >= 0) {
+ if (musb->irq_wake)
+@@ -1838,10 +1915,19 @@ static void musb_free(struct musb *musb)
+ struct dma_controller *c = musb->dma_controller;
+
+ (void) c->stop(c);
+- dma_controller_destroy(c);
++ musb->ops->dma_controller_destroy(c);
+ }
+
+- kfree(musb);
++ if (is_otg_enabled(musb))
++ del_timer_sync(&musb->otg_timer);
++
++ if (is_host_enabled(musb)) {
++ if (musb->gb_queue)
++ destroy_workqueue(musb->gb_queue);
++ usb_put_hcd(musb_to_hcd(musb));
++ } else {
++ kfree(musb);
++ }
+ }
+
+ /*
+@@ -1852,12 +1938,13 @@ static void musb_free(struct musb *musb)
+ * @mregs: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+-static int __init
++static int __devinit
+ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ {
+ int status;
+ struct musb *musb;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct platform_device *pdev = to_platform_device(dev);
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+@@ -1884,6 +1971,25 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ musb->board_set_power = plat->set_power;
+ musb->min_power = plat->min_power;
+ musb->ops = plat->platform_ops;
++ musb->id = pdev->id;
++ musb->first = 1;
++ if (is_host_enabled(musb))
++ spin_lock_init(&musb->gb_lock);
++
++ musb->fifo_mode = musb->ops->fifo_mode;
++
++#ifndef CONFIG_MUSB_PIO_ONLY
++ musb->orig_dma_mask = dev->dma_mask;
++#endif
++ if (musb->ops->flags & MUSB_GLUE_TUSB_STYLE) {
++ musb_readb = __tusb_musb_readb;
++ musb_writeb = __tusb_musb_writeb;
++ } else {
++ musb_readb = __musb_readb;
++ musb_writeb = __musb_writeb;
++ }
++
++ dev_info(dev, "dma type: %s\n", get_dma_name(musb));
+
+ /* The musb_platform_init() call:
+ * - adjusts musb->mregs and musb->isr if needed,
+@@ -1912,10 +2018,15 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ }
+
+ #ifndef CONFIG_MUSB_PIO_ONLY
+- if (use_dma && dev->dma_mask) {
++ if (dev->dma_mask) {
+ struct dma_controller *c;
+
+- c = dma_controller_create(musb, musb->mregs);
++ if (!musb->ops->dma_controller_create) {
++ dev_err(dev, "no dma_controller_create for non-PIO mode!\n");
++ status = -ENODEV;
++ goto fail3;
++ }
++ c = musb->ops->dma_controller_create(musb, musb->mregs);
+ musb->dma_controller = c;
+ if (c)
+ (void) c->start(c);
+@@ -1936,8 +2047,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ if (status < 0)
+ goto fail3;
+
+- setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+-
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+
+@@ -2034,8 +2143,30 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
++ if (status == 0) {
++ u8 drvbuf[19];
++ sprintf(drvbuf, "driver/musb_hdrc.%d", musb->id);
++ musb_debug_create(drvbuf, musb);
++ }
++
++ if (is_host_enabled(musb)) {
++ musb->gb_queue = create_singlethread_workqueue(dev_name(dev));
++ if (musb->gb_queue == NULL)
++ goto fail6;
++ /* Init giveback workqueue */
++ INIT_WORK(&musb->gb_work, musb_gb_work);
++ }
++
++ /* setup otg_timer */
++ if (is_otg_enabled(musb))
++ setup_timer(&musb->otg_timer, musb_otg_timer_func,
++ (unsigned long) musb);
+ return 0;
+
++fail6:
++ if (is_host_enabled(musb))
++ destroy_workqueue(musb->gb_queue);
++
+ fail5:
+ musb_exit_debugfs(musb);
+
+@@ -2067,20 +2198,29 @@ fail0:
+ /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+-
+-#ifndef CONFIG_MUSB_PIO_ONLY
+-static u64 *orig_dma_mask;
+-#endif
+-
+-static int __init musb_probe(struct platform_device *pdev)
++static int __devinit musb_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq_byname(pdev, "mc");
+ int status;
+ struct resource *iomem;
+ void __iomem *base;
++ char res_name[20];
++
++ if (pdev->id == -1)
++ strcpy(res_name, "mc");
++ else
++ sprintf(res_name, "musb%d-irq", pdev->id);
++ irq = platform_get_irq_byname(pdev, res_name);
++
++ if (pdev->id == -1)
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ else {
++ sprintf(res_name, "musb%d", pdev->id);
++ iomem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ res_name);
++ }
+
+- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || irq <= 0)
+ return -ENODEV;
+
+@@ -2090,10 +2230,6 @@ static int __init musb_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+
+-#ifndef CONFIG_MUSB_PIO_ONLY
+- /* clobbered by use_dma=n */
+- orig_dma_mask = dev->dma_mask;
+-#endif
+ status = musb_init_controller(dev, irq, base);
+ if (status < 0)
+ iounmap(base);
+@@ -2105,6 +2241,7 @@ static int __exit musb_remove(struct platform_device *pdev)
+ {
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ void __iomem *ctrl_base = musb->ctrl_base;
++ u8 drvbuf[19];
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+@@ -2114,20 +2251,22 @@ static int __exit musb_remove(struct platform_device *pdev)
+ pm_runtime_get_sync(musb->controller);
+ musb_exit_debugfs(musb);
+ musb_shutdown(pdev);
++ sprintf(drvbuf, "driver/musb_hdrc.%d", musb->id);
++ musb_debug_delete(drvbuf, musb);
+
+ pm_runtime_put(musb->controller);
+ musb_free(musb);
+ iounmap(ctrl_base);
+ device_init_wakeup(&pdev->dev, 0);
+ #ifndef CONFIG_MUSB_PIO_ONLY
+- pdev->dev.dma_mask = orig_dma_mask;
++ pdev->dev.dma_mask = musb->orig_dma_mask;
+ #endif
+ return 0;
+ }
+
+ #ifdef CONFIG_PM
+
+-static void musb_save_context(struct musb *musb)
++void musb_save_context(struct musb *musb)
+ {
+ int i;
+ void __iomem *musb_base = musb->mregs;
+@@ -2156,6 +2295,7 @@ static void musb_save_context(struct musb *musb)
+ if (!epio)
+ continue;
+
++ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb->context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ musb->context.index_regs[i].txcsr =
+@@ -2201,8 +2341,9 @@ static void musb_save_context(struct musb *musb)
+ }
+ }
+ }
++EXPORT_SYMBOL(musb_save_context);
+
+-static void musb_restore_context(struct musb *musb)
++void musb_restore_context(struct musb *musb)
+ {
+ int i;
+ void __iomem *musb_base = musb->mregs;
+@@ -2231,6 +2372,7 @@ static void musb_restore_context(struct musb *musb)
+ if (!epio)
+ continue;
+
++ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(epio, MUSB_TXMAXP,
+ musb->context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+@@ -2281,17 +2423,20 @@ static void musb_restore_context(struct musb *musb)
+ }
+ musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
+ }
++EXPORT_SYMBOL(musb_restore_context);
+
+ static int musb_suspend(struct device *dev)
+ {
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
++ int ret = 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (is_peripheral_active(musb)) {
+- /* FIXME force disconnect unless we know USB will wake
+- * the system up quickly enough to respond ...
++ /*
++ * Don't allow system suspend while peripheral mode
++ * is actve and cable is connected to host.
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+@@ -2300,7 +2445,7 @@ static int musb_suspend(struct device *dev)
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+- return 0;
++ return ret;
+ }
+
+ static int musb_resume_noirq(struct device *dev)
+@@ -2324,7 +2469,6 @@ static int musb_runtime_suspend(struct device *dev)
+ static int musb_runtime_resume(struct device *dev)
+ {
+ struct musb *musb = dev_to_musb(dev);
+- static int first = 1;
+
+ /*
+ * When pm_runtime_get_sync called for the first time in driver
+@@ -2335,9 +2479,11 @@ static int musb_runtime_resume(struct device *dev)
+ * Also context restore without save does not make
+ * any sense
+ */
+- if (!first)
++ if (musb->first)
++ musb->first = 0;
++ else
+ musb_restore_context(musb);
+- first = 0;
++
+
+ return 0;
+ }
+@@ -2361,6 +2507,7 @@ static struct platform_driver musb_driver = {
+ .owner = THIS_MODULE,
+ .pm = MUSB_DEV_PM_OPS,
+ },
++ .probe = musb_probe,
+ .remove = __exit_p(musb_remove),
+ .shutdown = musb_shutdown,
+ };
+@@ -2377,7 +2524,7 @@ static int __init musb_init(void)
+ ", "
+ "otg (peripheral+host)",
+ musb_driver_name);
+- return platform_driver_probe(&musb_driver, musb_probe);
++ return platform_driver_register(&musb_driver);
+ }
+
+ /* make us init after usbcore and i2c (transceivers, regulators, etc)
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index b3c065a..3605b97 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -39,8 +39,6 @@
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/errno.h>
+-#include <linux/timer.h>
+-#include <linux/clk.h>
+ #include <linux/device.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+@@ -62,6 +60,9 @@ struct musb_ep;
+ #define MUSB_HWVERS_1900 0x784
+ #define MUSB_HWVERS_2000 0x800
+
++extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
++extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
++
+ #include "musb_debug.h"
+ #include "musb_dma.h"
+
+@@ -97,8 +98,6 @@ struct musb_ep;
+
+ /****************************** PERIPHERAL ROLE *****************************/
+
+-#define is_peripheral_capable() (1)
+-
+ extern irqreturn_t musb_g_ep0_irq(struct musb *);
+ extern void musb_g_tx(struct musb *, u8);
+ extern void musb_g_rx(struct musb *, u8);
+@@ -110,8 +109,6 @@ extern void musb_g_disconnect(struct musb *);
+
+ /****************************** HOST ROLE ***********************************/
+
+-#define is_host_capable() (1)
+-
+ extern irqreturn_t musb_h_ep0_irq(struct musb *);
+ extern void musb_host_tx(struct musb *, u8);
+ extern void musb_host_rx(struct musb *, u8);
+@@ -146,15 +143,10 @@ enum musb_g_ep0_state {
+ MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
+ } __attribute__ ((packed));
+
+-/*
+- * OTG protocol constants. See USB OTG 1.3 spec,
+- * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+- */
++/* OTG protocol constants */
+ #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
+-#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
+-#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
+-#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
+-
++#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
++#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+
+ /*************************** REGISTER ACCESS ********************************/
+
+@@ -162,34 +154,12 @@ enum musb_g_ep0_state {
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+- || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
+- || defined(CONFIG_ARCH_OMAP4)
+-/* REVISIT indexed access seemed to
+- * misbehave (on DaVinci) for at least peripheral IN ...
+- */
+-#define MUSB_FLAT_REG
+-#endif
+-
+-/* TUSB mapping: "flat" plus ep0 special cases */
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+-#define musb_ep_select(_mbase, _epnum) \
+- musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+-#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
+-
+-/* "flat" mapping: each endpoint has its own i/o address */
+-#elif defined(MUSB_FLAT_REG)
+-#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+-#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
+-
+-/* "indexed" mapping: INDEX register controls register bank select */
+-#else
+-#define musb_ep_select(_mbase, _epnum) \
+- musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+-#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
+-#endif
++#define musb_ep_select(_musb, _mbase, _epnum) do { \
++ if (_musb->ops->flags & MUSB_GLUE_EP_ADDR_INDEXED_MAPPING) \
++ musb_writeb((_mbase), MUSB_INDEX, (_epnum)); \
++ } while (0)
+
++#define MUSB_EP_OFFSET MUSB_OFFSET
+ /****************************** FUNCTIONS ********************************/
+
+ #define MUSB_HST_MODE(_musb)\
+@@ -204,32 +174,60 @@ enum musb_g_ep0_state {
+
+ /******************************** TYPES *************************************/
+
++#define MUSB_GLUE_TUSB_STYLE 0x0001
++#define MUSB_GLUE_EP_ADDR_FLAT_MAPPING 0x0002
++#define MUSB_GLUE_EP_ADDR_INDEXED_MAPPING 0x0004
++#define MUSB_GLUE_DMA_INVENTRA 0x0008
++#define MUSB_GLUE_DMA_CPPI 0x0010
++#define MUSB_GLUE_DMA_TUSB 0x0020
++#define MUSB_GLUE_DMA_UX500 0x0040
++#define MUSB_GLUE_DMA_CPPI41 0x0080
++
++
+ /**
+ * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
++ * @fifo_mode: which fifo_mode is taken by me
++ * @flags: each hw glue difference information will be here
+ * @init: turns on clocks, sets up platform-specific registers, etc
+ * @exit: undoes @init
++ * @read_fifo: read data from musb fifo in PIO
++ * @write_fifo: write data into musb fifo in PIO
+ * @set_mode: forcefully changes operating mode
+ * @try_ilde: tries to idle the IP
++ * @get_hw_revision: get hardware revision
+ * @vbus_status: returns vbus status if possible
+ * @set_vbus: forces vbus status
+ * @adjust_channel_params: pre check for standard dma channel_program func
++ * @dma_controller_create: create dma controller for me
++ * @dma_controller_destroy: destroy dma controller
+ */
+ struct musb_platform_ops {
++ short fifo_mode;
++ unsigned short flags;
+ int (*init)(struct musb *musb);
+ int (*exit)(struct musb *musb);
+
+ void (*enable)(struct musb *musb);
+ void (*disable)(struct musb *musb);
+
++ void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
++ void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
++
+ int (*set_mode)(struct musb *musb, u8 mode);
+ void (*try_idle)(struct musb *musb, unsigned long timeout);
+
++ u16 (*get_hw_revision)(struct musb *musb);
++
+ int (*vbus_status)(struct musb *musb);
+ void (*set_vbus)(struct musb *musb, int on);
+
+ int (*adjust_channel_params)(struct dma_channel *channel,
+ u16 packet_sz, u8 *mode,
+ dma_addr_t *dma_addr, u32 *len);
++ struct dma_controller* (*dma_controller_create)(struct musb *,
++ void __iomem *);
++ void (*dma_controller_destroy)(struct dma_controller *);
++ int (*simulate_babble_intr)(struct musb *musb);
+ };
+
+ /*
+@@ -242,10 +240,8 @@ struct musb_hw_ep {
+ void __iomem *fifo;
+ void __iomem *regs;
+
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
++ /*Fixme: the following field is only used by tusb*/
+ void __iomem *conf;
+-#endif
+
+ /* index in musb->endpoints[] */
+ u8 epnum;
+@@ -260,13 +256,13 @@ struct musb_hw_ep {
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+- /* TUSB has "asynchronous" and "synchronous" dma modes */
++ /*
++ * TUSB has "asynchronous" and "synchronous" dma modes
++ * Fixme: the following three fields are only valid for TUSB.
++ * */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+ void __iomem *fifo_sync_va;
+-#endif
+
+ void __iomem *target_regs;
+
+@@ -311,6 +307,7 @@ struct musb_context_registers {
+ u8 index, testmode;
+
+ u8 devctl, busctl, misc;
++ u32 otg_interfsel;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+ };
+@@ -327,6 +324,9 @@ struct musb {
+
+ irqreturn_t (*isr)(int, void *);
+ struct work_struct irq_work;
++ struct work_struct work;
++ struct work_struct otg_notifier_work;
++ u8 enable_babble_work;
+ u16 hwvers;
+
+ /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+@@ -350,7 +350,11 @@ struct musb {
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+
+- struct timer_list otg_timer;
++ struct workqueue_struct *gb_queue;
++ struct work_struct gb_work;
++ spinlock_t gb_lock;
++ struct list_head gb_list; /* of urbs */
++
+ struct notifier_block nb;
+
+ struct dma_controller *dma_controller;
+@@ -359,12 +363,10 @@ struct musb {
+ void __iomem *ctrl_base;
+ void __iomem *mregs;
+
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
++ /*Fixme: the three fields below are only used by tusb*/
+ dma_addr_t async;
+ dma_addr_t sync;
+ void __iomem *sync_va;
+-#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+@@ -372,6 +374,7 @@ struct musb {
+ u16 int_tx;
+
+ struct otg_transceiver *xceiv;
++ u8 xceiv_event;
+
+ int nIrq;
+ unsigned irq_wake:1;
+@@ -454,6 +457,19 @@ struct musb {
+ #ifdef MUSB_CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+ #endif
++ /* id for multiple musb instances */
++ u8 id;
++ struct timer_list otg_workaround;
++ unsigned long last_timer;
++ int first;
++ int old_state;
++ struct timer_list otg_timer;
++ u8 en_otg_timer;
++ u8 en_otgw_timer;
++#ifndef CONFIG_MUSB_PIO_ONLY
++ u64 *orig_dma_mask;
++#endif
++ short fifo_mode;
+ };
+
+ static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+@@ -496,7 +512,7 @@ static inline int musb_read_fifosize(struct musb *musb,
+ u8 reg = 0;
+
+ /* read from core using indexed model */
+- reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
++ reg = musb_readb(mbase, MUSB_EP_OFFSET(musb, epnum, MUSB_FIFOSIZE));
+ /* 0's returned when no more endpoints */
+ if (!reg)
+ return -ENODEV;
+@@ -601,4 +617,47 @@ static inline int musb_platform_exit(struct musb *musb)
+ return musb->ops->exit(musb);
+ }
+
++static inline u16 musb_platform_get_hw_revision(struct musb *musb)
++{
++ if (!musb->ops->get_hw_revision)
++ return musb_readw(musb->mregs, MUSB_HWVERS);
++
++ return musb->ops->get_hw_revision(musb);
++}
++
++static inline int musb_simulate_babble_intr(struct musb *musb)
++{
++ if (!musb->ops->simulate_babble_intr)
++ return -EINVAL;
++
++ return musb->ops->simulate_babble_intr(musb);
++}
++
++static inline const char *get_dma_name(struct musb *musb)
++{
++#ifdef CONFIG_MUSB_PIO_ONLY
++ return "pio";
++#else
++ if (musb->ops->flags & MUSB_GLUE_DMA_INVENTRA)
++ return "dma-inventra";
++ else if (musb->ops->flags & MUSB_GLUE_DMA_CPPI)
++ return "dma-cppi3";
++ else if (musb->ops->flags & MUSB_GLUE_DMA_CPPI41)
++ return "dma-cppi41";
++ else if (musb->ops->flags & MUSB_GLUE_DMA_TUSB)
++ return "dma-tusb-omap";
++ else
++ return "?dma?";
++#endif
++}
++
++extern void musb_gb_work(struct work_struct *data);
++/*-------------------------- ProcFS definitions ---------------------*/
++
++struct proc_dir_entry;
++
++extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
++extern void musb_debug_delete(char *name, struct musb *data);
++extern void musb_save_context(struct musb *musb);
++extern void musb_restore_context(struct musb *musb);
+ #endif /* __MUSB_CORE_H__ */
+diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
+index 742eada..27ba8f7 100644
+--- a/drivers/usb/musb/musb_debug.h
++++ b/drivers/usb/musb/musb_debug.h
+@@ -43,8 +43,8 @@
+ #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+ #ifdef CONFIG_DEBUG_FS
+-extern int musb_init_debugfs(struct musb *musb);
+-extern void musb_exit_debugfs(struct musb *musb);
++int musb_init_debugfs(struct musb *musb);
++void musb_exit_debugfs(struct musb *musb);
+ #else
+ static inline int musb_init_debugfs(struct musb *musb)
+ {
+diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
+index 61f4ee4..c902b66 100644
+--- a/drivers/usb/musb/musb_debugfs.c
++++ b/drivers/usb/musb/musb_debugfs.c
+@@ -33,11 +33,7 @@
+
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/init.h>
+-#include <linux/list.h>
+-#include <linux/platform_device.h>
+-#include <linux/io.h>
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+
+@@ -46,10 +42,6 @@
+ #include "musb_core.h"
+ #include "musb_debug.h"
+
+-#ifdef CONFIG_ARCH_DAVINCI
+-#include "davinci.h"
+-#endif
+-
+ struct musb_register_map {
+ char *name;
+ unsigned offset;
+@@ -243,7 +235,7 @@ static const struct file_operations musb_test_mode_fops = {
+ .release = single_release,
+ };
+
+-int __init musb_init_debugfs(struct musb *musb)
++int __devinit musb_init_debugfs(struct musb *musb)
+ {
+ struct dentry *root;
+ struct dentry *file;
+@@ -280,7 +272,7 @@ err0:
+ return ret;
+ }
+
+-void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
++void /* __devinit_or_exit */ musb_exit_debugfs(struct musb *musb)
+ {
+ debugfs_remove_recursive(musb_debugfs_root);
+ }
+diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
+index 3a97c4e..06e857e 100644
+--- a/drivers/usb/musb/musb_dma.h
++++ b/drivers/usb/musb/musb_dma.h
+@@ -68,16 +68,35 @@ struct musb_hw_ep;
+ #define is_dma_capable() (0)
+ #endif
+
++#ifdef CONFIG_USB_UX500_DMA
++#define is_ux500_dma(musb) (musb->ops->flags & MUSB_GLUE_DMA_UX500)
++#else
++#define is_ux500_dma(musb) 0
++#endif
++
++#ifdef CONFIG_USB_INVENTRA_DMA
++#define is_inventra_dma(musb) (musb->ops->flags & MUSB_GLUE_DMA_INVENTRA)
++#else
++#define is_inventra_dma(musb) 0
++#endif
++
+ #ifdef CONFIG_USB_TI_CPPI_DMA
+-#define is_cppi_enabled() 1
++#define is_cppi_enabled(musb) (musb->ops->flags & MUSB_GLUE_DMA_CPPI)
++
++#else
++#define is_cppi_enabled(musb) 0
++#endif
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++#define is_cppi41_enabled(musb) (musb->ops->flags & MUSB_GLUE_DMA_CPPI41)
+ #else
+-#define is_cppi_enabled() 0
++#define is_cppi41_enabled(musb) 0
+ #endif
+
+ #ifdef CONFIG_USB_TUSB_OMAP_DMA
+-#define tusb_dma_omap() 1
++#define tusb_dma_omap(musb) (musb->ops->flags & MUSB_GLUE_DMA_TUSB)
+ #else
+-#define tusb_dma_omap() 0
++#define tusb_dma_omap(musb) 0
+ #endif
+
+ /* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1
+@@ -178,9 +197,72 @@ struct dma_controller {
+ extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+-extern struct dma_controller *__init
+-dma_controller_create(struct musb *, void __iomem *);
++#ifdef CONFIG_USB_TI_CPPI_DMA
++extern struct dma_controller *__devinit
++cppi_dma_controller_create(struct musb *, void __iomem *);
++
++extern void cppi_dma_controller_destroy(struct dma_controller *);
++#else
++static inline struct dma_controller *__devinit
++cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
++{
++ return NULL;
++}
++
++static inline void cppi_dma_controller_destroy(struct dma_controller *c)
++{
++}
++#endif
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++extern struct dma_controller *__devinit
++cppi41_dma_controller_create(struct musb *, void __iomem *);
++
++extern void cppi41_dma_controller_destroy(struct dma_controller *);
++#else
++static inline struct dma_controller *__devinit
++cppi41_dma_controller_create(struct musb *musb, void __iomem *mregs)
++{
++ return NULL;
++}
++
++static inline void cppi41_dma_controller_destroy(struct dma_controller *c)
++{
++}
++#endif
++
++#ifdef CONFIG_USB_INVENTRA_DMA
++extern struct dma_controller *__devinit
++inventra_dma_controller_create(struct musb *, void __iomem *);
++
++extern void inventra_dma_controller_destroy(struct dma_controller *);
++#else
++static inline struct dma_controller *__devinit
++inventra_dma_controller_create(struct musb *musb, void __iomem *mregs)
++{
++ return NULL;
++}
+
+-extern void dma_controller_destroy(struct dma_controller *);
++static inline void inventra_dma_controller_destroy(struct dma_controller *c)
++{
++}
++#endif
++
++#ifdef CONFIG_USB_TUSB_OMAP_DMA
++extern struct dma_controller *__devinit
++tusb_dma_controller_create(struct musb *, void __iomem *);
++
++extern void tusb_dma_controller_destroy(struct dma_controller *);
++#else
++static inline struct dma_controller *__devinit
++tusb_dma_controller_create(struct musb *musb, void __iomem *mregs)
++{
++ return NULL;
++}
++
++static inline void tusb_dma_controller_destroy(struct dma_controller *c)
++{
++}
++#endif
+
+ #endif /* __MUSB_DMA_H__ */
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index c860597..305d843 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -40,8 +40,6 @@
+ #include <linux/smp.h>
+ #include <linux/spinlock.h>
+ #include <linux/delay.h>
+-#include <linux/moduleparam.h>
+-#include <linux/stat.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+
+@@ -278,8 +276,6 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+ }
+
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+-
+ /* Peripheral tx (IN) using Mentor DMA works as follows:
+ Only mode 0 is used for transfers <= wPktSize,
+ mode 1 is used for larger transfers,
+@@ -310,8 +306,6 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+-#endif
+-
+ /*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+@@ -372,8 +366,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
+
+ /* MUSB_TXCSR_P_ISO is still set correctly */
+
+-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+- {
++ if (is_inventra_dma(musb) || is_ux500_dma(musb)) {
+ if (request_size < musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+@@ -410,49 +403,58 @@ static void txstate(struct musb *musb, struct musb_request *req)
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+- }
++ } else if (is_cppi_enabled(musb) || is_cppi41_enabled(musb)) {
++ /* program endpoint CSR first, then setup DMA */
++ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+
+-#elif defined(CONFIG_USB_TI_CPPI_DMA)
+- /* program endpoint CSR first, then setup DMA */
+- csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+- csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+- MUSB_TXCSR_MODE;
+- musb_writew(epio, MUSB_TXCSR,
+- (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+- | csr);
++ if (request_size == 0)
++ csr &= ~(MUSB_TXCSR_DMAENAB |
++ MUSB_TXCSR_DMAMODE);
++ else
++ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
++ MUSB_TXCSR_MODE;
++ musb_writew(epio, MUSB_TXCSR,
++ (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
++ | csr);
+
+- /* ensure writebuffer is empty */
+- csr = musb_readw(epio, MUSB_TXCSR);
++ /* ensure writebuffer is empty */
++ csr = musb_readw(epio, MUSB_TXCSR);
+
+- /* NOTE host side sets DMAENAB later than this; both are
+- * OK since the transfer dma glue (between CPPI and Mentor
+- * fifos) just tells CPPI it could start. Data only moves
+- * to the USB TX fifo when both fifos are ready.
+- */
++ /* NOTE host side sets DMAENAB later than this; both are
++ * OK since the transfer dma glue (between CPPI & Mentor
++ * fifos) just tells CPPI it could start. Data only
++ * moves to the USB TX fifo when both fifos are ready.
++ */
+
+- /* "mode" is irrelevant here; handle terminating ZLPs like
+- * PIO does, since the hardware RNDIS mode seems unreliable
+- * except for the last-packet-is-already-short case.
+- */
+- use_dma = use_dma && c->channel_program(
+- musb_ep->dma, musb_ep->packet_sz,
+- 0,
+- request->dma + request->actual,
+- request_size);
+- if (!use_dma) {
+- c->channel_release(musb_ep->dma);
+- musb_ep->dma = NULL;
+- csr &= ~MUSB_TXCSR_DMAENAB;
+- musb_writew(epio, MUSB_TXCSR, csr);
+- /* invariant: prequest->buf is non-null */
++ /* "mode" is irrelevant here; handle terminating ZLPs
++ * like PIO does, since the hardware RNDIS mode seems
++ * unreliable except for the last-packet-is-already-
++ * short case.
++ */
++ /* for zero byte transfer use pio mode */
++ if (request_size == 0)
++ use_dma = 0;
++ else {
++ use_dma = use_dma && c->channel_program(
++ musb_ep->dma, musb_ep->packet_sz,
++ 0,
++ request->dma + request->actual,
++ request_size);
++ if (!use_dma) {
++ c->channel_release(musb_ep->dma);
++ musb_ep->dma = NULL;
++ csr &= ~MUSB_TXCSR_DMAENAB;
++ musb_writew(epio, MUSB_TXCSR, csr);
++ /* invariant: prequest->buf is non-null */
++ }
++ }
++ } else if (tusb_dma_omap(musb)) {
++ use_dma = use_dma && c->channel_program(
++ musb_ep->dma, musb_ep->packet_sz,
++ request->zero,
++ request->dma + request->actual,
++ request_size);
+ }
+-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+- use_dma = use_dma && c->channel_program(
+- musb_ep->dma, musb_ep->packet_sz,
+- request->zero,
+- request->dma + request->actual,
+- request_size);
+-#endif
+ }
+ #endif
+
+@@ -463,7 +465,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
+ */
+ unmap_dma_buffer(req, musb);
+
+- musb_write_fifo(musb_ep->hw_ep, fifo_count,
++ musb->ops->write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+ csr |= MUSB_TXCSR_TXPKTRDY;
+@@ -494,7 +496,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ req = next_request(musb_ep);
+ request = &req->request;
+
+@@ -555,11 +557,9 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ if ((request->zero && request->length
+ && (request->length % musb_ep->packet_sz == 0)
+ && (request->actual == request->length))
+-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+- || (is_dma && (!dma->desired_mode ||
+- (request->actual &
+- (musb_ep->packet_sz - 1))))
+-#endif
++ || ((is_inventra_dma(musb) || is_ux500_dma(musb)) &&
++ is_dma && (!dma->desired_mode || (request->actual &
++ (musb_ep->packet_sz - 1))))
+ ) {
+ /*
+ * On DMA completion, FIFO may not be
+@@ -576,15 +576,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+- /*
+- * In the giveback function the MUSB lock is
+- * released and acquired after sometime. During
+- * this time period the INDEX register could get
+- * changed by the gadget_queue function especially
+- * on SMP systems. Reselect the INDEX to be sure
+- * we are reading/modifying the right registers
+- */
+- musb_ep_select(mbase, epnum);
+ req = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!req) {
+ dev_dbg(musb->controller, "%s idle now\n",
+@@ -599,8 +590,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+
+ /* ------------------------------------------------------------ */
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+-
+ /* Peripheral rx (OUT) using Mentor DMA works as follows:
+ - Only mode 0 is used.
+
+@@ -628,8 +617,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+-#endif
+-
+ /*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+@@ -664,7 +651,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+ return;
+ }
+
+- if (is_cppi_enabled() && is_buffer_mapped(req)) {
++ if ((is_cppi_enabled(musb) || is_cppi41_enabled(musb)) &&
++ is_buffer_mapped(req)) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+
+@@ -706,8 +694,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+ use_mode_1 = 0;
+
+ if (request->actual < request->length) {
+-#ifdef CONFIG_USB_INVENTRA_DMA
+- if (is_buffer_mapped(req)) {
++ if (is_buffer_mapped(req) && is_inventra_dma(musb)) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+@@ -784,8 +771,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+ if (use_dma)
+ return;
+ }
+-#elif defined(CONFIG_USB_UX500_DMA)
+- if ((is_buffer_mapped(req)) &&
++ if (is_ux500_dma(musb) && (is_buffer_mapped(req)) &&
+ (request->actual < request->length)) {
+
+ struct dma_controller *c;
+@@ -831,7 +817,6 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+
+ return;
+ }
+-#endif /* Mentor's DMA */
+
+ fifo_count = request->length - request->actual;
+ dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+@@ -841,8 +826,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+
+ fifo_count = min_t(unsigned, len, fifo_count);
+
+-#ifdef CONFIG_USB_TUSB_OMAP_DMA
+- if (tusb_dma_omap() && is_buffer_mapped(req)) {
++ if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+ u32 dma_addr = request->dma + request->actual;
+@@ -856,7 +840,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+ if (ret)
+ return;
+ }
+-#endif
++
+ /*
+ * Unmap the dma buffer back to cpu if dma channel
+ * programming fails. This buffer is mapped if the
+@@ -873,7 +857,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+- musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
++ musb->ops->read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+ request->actual += fifo_count;
+
+@@ -912,7 +896,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
+ else
+ musb_ep = &hw_ep->ep_out;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+
+ req = next_request(musb_ep);
+ if (!req)
+@@ -968,50 +952,37 @@ void musb_g_rx(struct musb *musb, u8 epnum)
+ musb_readw(epio, MUSB_RXCSR),
+ musb_ep->dma->actual_len, request);
+
+-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+- defined(CONFIG_USB_UX500_DMA)
+- /* Autoclear doesn't clear RxPktRdy for short packets */
+- if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
+- || (dma->actual_len
+- & (musb_ep->packet_sz - 1))) {
+- /* ack the read! */
+- csr &= ~MUSB_RXCSR_RXPKTRDY;
+- musb_writew(epio, MUSB_RXCSR, csr);
+- }
++ if (is_inventra_dma(musb) || tusb_dma_omap(musb)
++ || is_ux500_dma(musb)) {
++ /* Autoclear doesn't clear RxPktRdy for short packets */
++ if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
++ || (dma->actual_len
++ & (musb_ep->packet_sz - 1))) {
++ /* ack the read! */
++ csr &= ~MUSB_RXCSR_RXPKTRDY;
++ musb_writew(epio, MUSB_RXCSR, csr);
++ }
+
+- /* incomplete, and not short? wait for next IN packet */
+- if ((request->actual < request->length)
+- && (musb_ep->dma->actual_len
+- == musb_ep->packet_sz)) {
+- /* In double buffer case, continue to unload fifo if
+- * there is Rx packet in FIFO.
+- **/
+- csr = musb_readw(epio, MUSB_RXCSR);
+- if ((csr & MUSB_RXCSR_RXPKTRDY) &&
+- hw_ep->rx_double_buffered)
+- goto exit;
+- return;
++ /* incomplete, and not short? wait for next IN packet */
++ if ((request->actual < request->length)
++ && (musb_ep->dma->actual_len
++ == musb_ep->packet_sz)) {
++ /* In double buffer case, continue to unload
++ * fifo if there is Rx packet in FIFO.
++ **/
++ csr = musb_readw(epio, MUSB_RXCSR);
++ if ((csr & MUSB_RXCSR_RXPKTRDY) &&
++ hw_ep->rx_double_buffered)
++ rxstate(musb, to_musb_request(request));
++ return;
++ }
+ }
+-#endif
+ musb_g_giveback(musb_ep, request, 0);
+- /*
+- * In the giveback function the MUSB lock is
+- * released and acquired after sometime. During
+- * this time period the INDEX register could get
+- * changed by the gadget_queue function especially
+- * on SMP systems. Reselect the INDEX to be sure
+- * we are reading/modifying the right registers
+- */
+- musb_ep_select(mbase, epnum);
+
+ req = next_request(musb_ep);
+ if (!req)
+ return;
+ }
+-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+- defined(CONFIG_USB_UX500_DMA)
+-exit:
+-#endif
+ /* Analyze request */
+ rxstate(musb, req);
+ }
+@@ -1079,7 +1050,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ if (usb_endpoint_dir_in(desc)) {
+ u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+@@ -1218,7 +1189,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
+ epio = musb->endpoints[epnum].regs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+- musb_ep_select(musb->mregs, epnum);
++ musb_ep_select(musb, musb->mregs, epnum);
+
+ /* zero the endpoint sizes */
+ if (musb_ep->is_in) {
+@@ -1297,7 +1268,7 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req)
+ req->tx ? "TX/IN" : "RX/OUT",
+ &req->request, req->request.length, req->epnum);
+
+- musb_ep_select(musb->mregs, req->epnum);
++ musb_ep_select(musb, musb->mregs, req->epnum);
+ if (req->tx)
+ txstate(musb, req);
+ else
+@@ -1391,7 +1362,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+ else if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+- musb_ep_select(musb->mregs, musb_ep->current_epnum);
++ musb_ep_select(musb, musb->mregs, musb_ep->current_epnum);
+ if (c->channel_abort)
+ status = c->channel_abort(musb_ep->dma);
+ else
+@@ -1439,7 +1410,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
+ goto done;
+ }
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+
+ request = next_request(musb_ep);
+ if (value) {
+@@ -1527,7 +1498,7 @@ static int musb_gadget_fifo_status(struct usb_ep *ep)
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = musb_readw(epio, MUSB_RXCOUNT);
+
+@@ -1549,7 +1520,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+- musb_ep_select(mbase, (u8) epnum);
++ musb_ep_select(musb, mbase, (u8) epnum);
+
+ /* disable interrupts */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+@@ -1782,7 +1753,7 @@ static void musb_gadget_release(struct device *dev)
+ }
+
+
+-static void __init
++static void __devinit
+ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+ {
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+@@ -1819,7 +1790,7 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+-static inline void __init musb_g_init_endpoints(struct musb *musb)
++static inline void __devinit musb_g_init_endpoints(struct musb *musb)
+ {
+ u8 epnum;
+ struct musb_hw_ep *hw_ep;
+@@ -1852,7 +1823,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb)
+ /* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+-int __init musb_gadget_setup(struct musb *musb)
++int __devinit musb_gadget_setup(struct musb *musb)
+ {
+ int status;
+
+@@ -1862,7 +1833,7 @@ int __init musb_gadget_setup(struct musb *musb)
+ */
+
+ musb->g.ops = &musb_gadget_operations;
+- musb->g.is_dualspeed = 1;
++ musb->g.max_speed = USB_SPEED_HIGH;
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* this "gadget" abstracts/virtualizes the controller */
+@@ -1921,7 +1892,7 @@ static int musb_gadget_start(struct usb_gadget *g,
+ unsigned long flags;
+ int retval = -EINVAL;
+
+- if (driver->speed < USB_SPEED_HIGH)
++ if (driver->max_speed < USB_SPEED_HIGH)
+ goto err0;
+
+ pm_runtime_get_sync(musb->controller);
+@@ -2007,7 +1978,7 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+ for (i = 0, hw_ep = musb->endpoints;
+ i < musb->nr_endpoints;
+ i++, hw_ep++) {
+- musb_ep_select(musb->mregs, i);
++ musb_ep_select(musb, musb->mregs, i);
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ } else {
+@@ -2042,7 +2013,8 @@ static int musb_gadget_stop(struct usb_gadget *g,
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+- musb_hnp_stop(musb);
++ if (is_otg_enabled(musb))
++ musb_hnp_stop(musb);
+
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+@@ -2156,17 +2128,20 @@ void musb_g_disconnect(struct musb *musb)
+
+ switch (musb->xceiv->state) {
+ default:
+- dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
+- otg_state_string(musb->xceiv->state));
+- musb->xceiv->state = OTG_STATE_A_IDLE;
+- MUSB_HST_MODE(musb);
+- break;
++ if (is_otg_enabled(musb)) {
++ dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
++ otg_state_string(musb->xceiv->state));
++ musb->xceiv->state = OTG_STATE_A_IDLE;
++ break;
++ }
+ case OTG_STATE_A_PERIPHERAL:
+- musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+- MUSB_HST_MODE(musb);
++ if (is_otg_enabled(musb))
++ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
++ if (!is_otg_enabled(musb))
++ break;
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
+index 6a0d046..3e9ec7c 100644
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -37,7 +37,6 @@
+ #include <linux/list.h>
+ #include <linux/timer.h>
+ #include <linux/spinlock.h>
+-#include <linux/init.h>
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
+
+@@ -88,7 +87,7 @@ static int service_tx_status_request(
+ case USB_RECIP_DEVICE:
+ result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+- if (musb->g.is_otg) {
++ if (is_otg_enabled(musb) && musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+@@ -128,14 +127,14 @@ static int service_tx_status_request(
+ break;
+ }
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ if (is_in)
+ tmp = musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MUSB_RXCSR)
+ & MUSB_RXCSR_P_SENDSTALL;
+- musb_ep_select(mbase, 0);
++ musb_ep_select(musb, mbase, 0);
+
+ result[0] = tmp ? 1 : 0;
+ } break;
+@@ -152,7 +151,7 @@ static int service_tx_status_request(
+
+ if (len > 2)
+ len = 2;
+- musb_write_fifo(&musb->endpoints[0], len, result);
++ musb->ops->write_fifo(&musb->endpoints[0], len, result);
+ }
+
+ return handled;
+@@ -283,7 +282,7 @@ __acquires(musb->lock)
+ if (musb_ep->wedged)
+ break;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_CLRDATATOG |
+@@ -309,7 +308,7 @@ __acquires(musb->lock)
+ }
+
+ /* select ep0 again */
+- musb_ep_select(mbase, 0);
++ musb_ep_select(musb, mbase, 0);
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+@@ -391,20 +390,26 @@ __acquires(musb->lock)
+ musb->test_mode = true;
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+- if (!musb->g.is_otg)
+- goto stall;
+- musb->g.b_hnp_enable = 1;
+- musb_try_b_hnp_enable(musb);
++ if (is_otg_enabled(musb)) {
++ if (!musb->g.is_otg)
++ goto stall;
++ musb->g.b_hnp_enable = 1;
++ musb_try_b_hnp_enable(musb);
++ }
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+- if (!musb->g.is_otg)
+- goto stall;
+- musb->g.a_hnp_support = 1;
++ if (is_otg_enabled(musb)) {
++ if (!musb->g.is_otg)
++ goto stall;
++ musb->g.a_hnp_support = 1;
++ }
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+- if (!musb->g.is_otg)
+- goto stall;
+- musb->g.a_alt_hnp_support = 1;
++ if (is_otg_enabled(musb)) {
++ if (!musb->g.is_otg)
++ goto stall;
++ musb->g.a_alt_hnp_support = 1;
++ }
+ break;
+ case USB_DEVICE_DEBUG_MODE:
+ handled = 0;
+@@ -442,7 +447,7 @@ stall:
+ if (!musb_ep->desc)
+ break;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+@@ -461,7 +466,7 @@ stall:
+ }
+
+ /* select ep0 again */
+- musb_ep_select(mbase, 0);
++ musb_ep_select(musb, mbase, 0);
+ handled = 1;
+ } break;
+
+@@ -506,8 +511,10 @@ static void ep0_rxstate(struct musb *musb)
+ req->status = -EOVERFLOW;
+ count = len;
+ }
+- musb_read_fifo(&musb->endpoints[0], count, buf);
+- req->actual += count;
++ if (count > 0) {
++ musb->ops->read_fifo(&musb->endpoints[0], count, buf);
++ req->actual += count;
++ }
+ csr = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (count < 64 || req->actual == req->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+@@ -528,7 +535,7 @@ static void ep0_rxstate(struct musb *musb)
+ return;
+ musb->ackpend = 0;
+ }
+- musb_ep_select(musb->mregs, 0);
++ musb_ep_select(musb, musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+ }
+
+@@ -559,7 +566,7 @@ static void ep0_txstate(struct musb *musb)
+ fifo_src = (u8 *) request->buf + request->actual;
+ fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+ request->length - request->actual);
+- musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
++ musb->ops->write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+ request->actual += fifo_count;
+
+ /* update the flags */
+@@ -585,7 +592,7 @@ static void ep0_txstate(struct musb *musb)
+ }
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+- musb_ep_select(musb->mregs, 0);
++ musb_ep_select(musb, musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+ }
+
+@@ -601,7 +608,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+ struct musb_request *r;
+ void __iomem *regs = musb->control_ep->regs;
+
+- musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
++ musb->ops->read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+@@ -670,7 +677,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
+ void __iomem *regs = musb->endpoints[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+- musb_ep_select(mbase, 0); /* select ep0 */
++ musb_ep_select(musb, mbase, 0); /* select ep0 */
+ csr = musb_readw(regs, MUSB_CSR0);
+ len = musb_readb(regs, MUSB_COUNT0);
+
+@@ -760,6 +767,9 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
+
+ musb_writeb(mbase, MUSB_TESTMODE,
+ musb->test_mode_nr);
++ if (MUSB_TEST_PACKET == musb->test_mode_nr)
++ musb_writew(musb->endpoints[0].regs,
++ MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+ }
+ /* FALLTHROUGH */
+
+@@ -880,7 +890,7 @@ setup:
+
+ handled = forward_to_driver(musb, &setup);
+ if (handled < 0) {
+- musb_ep_select(mbase, 0);
++ musb_ep_select(musb, mbase, 0);
+ stall:
+ dev_dbg(musb->controller, "stall (%d)\n", handled);
+ musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+@@ -975,7 +985,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+- musb_ep_select(musb->mregs, 0);
++ musb_ep_select(musb, musb->mregs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+@@ -1038,7 +1048,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
+ goto cleanup;
+ }
+
+- musb_ep_select(base, 0);
++ musb_ep_select(musb, base, 0);
+ csr = musb->ackpend;
+
+ switch (musb->ep0_state) {
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 79cb0af..c4d44fa 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -101,6 +101,30 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, int is_out,
+ u8 *buf, u32 offset, u32 len);
+
++void push_queue(struct musb *musb, struct urb *urb)
++{
++ spin_lock(&musb->gb_lock);
++ list_add_tail(&urb->giveback_list, &musb->gb_list);
++ spin_unlock(&musb->gb_lock);
++}
++
++struct urb *pop_queue(struct musb *musb)
++{
++ struct urb *urb;
++ unsigned long flags;
++
++ spin_lock_irqsave(&musb->gb_lock, flags);
++ if (list_empty(&musb->gb_list)) {
++ spin_unlock_irqrestore(&musb->gb_lock, flags);
++ return NULL;
++ }
++ urb = list_entry(musb->gb_list.next, struct urb, giveback_list);
++ list_del(&urb->giveback_list);
++ spin_unlock_irqrestore(&musb->gb_lock, flags);
++
++ return urb;
++}
++
+ /*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+@@ -117,13 +141,16 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+ if (csr != lastcsr)
+ dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ lastcsr = csr;
+- csr |= MUSB_TXCSR_FLUSHFIFO;
++ csr = MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+- if (WARN(retries-- < 1,
+- "Could not flush host TX%d fifo: csr: %04x\n",
+- ep->epnum, csr))
++ if (!(csr & MUSB_TXCSR_FIFONOTEMPTY))
++ break;
++ if (retries-- < 1) {
++ dev_dbg(musb->controller, "Could not flush host TX%d fifo: csr: %04x\n",
++ ep->epnum, csr);
+ return;
++ }
+ mdelay(1);
+ }
+ }
+@@ -178,7 +205,7 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+- if (is_cppi_enabled())
++ if (is_cppi_enabled(ep->musb) || is_cppi41_enabled(ep->musb))
+ txcsr |= MUSB_TXCSR_DMAMODE;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ }
+@@ -292,15 +319,14 @@ start:
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+- else if (is_cppi_enabled() || tusb_dma_omap())
++ else if (is_cppi_enabled(musb) || is_cppi41_enabled(musb)
++ || tusb_dma_omap(musb))
+ musb_h_tx_dma_start(hw_ep);
+ }
+ }
+
+ /* Context: caller owns controller lock, IRQs are blocked */
+ static void musb_giveback(struct musb *musb, struct urb *urb, int status)
+-__releases(musb->lock)
+-__acquires(musb->lock)
+ {
+ dev_dbg(musb->controller,
+ "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
+@@ -311,10 +337,7 @@ __acquires(musb->lock)
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+- usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+- spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+- spin_lock(&musb->lock);
+ }
+
+ /* For bulk/interrupt endpoints only */
+@@ -336,6 +359,15 @@ static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+
+ usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
+ }
++/* Used to complete urb giveback */
++void musb_gb_work(struct work_struct *data)
++{
++ struct musb *musb = container_of(data, struct musb, gb_work);
++ struct urb *urb;
++
++ while ((urb = pop_queue(musb)) != 0)
++ musb_giveback(musb, urb, 0);
++}
+
+ /*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+@@ -366,20 +398,36 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ break;
+ }
+
+- qh->is_ready = 0;
+- musb_giveback(musb, urb, status);
+- qh->is_ready = ready;
++ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
++ /* If URB completed with error then giveback first */
++ if (status != 0) {
++ qh->is_ready = 0;
++ spin_unlock(&musb->lock);
++ musb_giveback(musb, urb, status);
++ spin_lock(&musb->lock);
++ qh->is_ready = ready;
++ }
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
++ struct dma_controller *dma = musb->dma_controller;
+
+- if (is_in)
++ if (is_in) {
+ ep->rx_reinit = 1;
+- else
++ if (ep->rx_channel) {
++ dma->channel_release(ep->rx_channel);
++ ep->rx_channel = NULL;
++ }
++ } else {
+ ep->tx_reinit = 1;
++ if (ep->tx_channel) {
++ dma->channel_release(ep->tx_channel);
++ ep->tx_channel = NULL;
++ }
++ }
+
+ /* Clobber old pointers to this qh */
+ musb_ep_set_qh(ep, is_in, NULL);
+@@ -417,6 +465,12 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
++
++ /* if URB is successfully completed then giveback in workqueue */
++ if (status == 0) {
++ push_queue(musb, urb);
++ queue_work(musb->gb_queue, &musb->gb_work);
++ }
+ }
+
+ static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+@@ -456,7 +510,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+- /* musb_ep_select(mbase, epnum); */
++ /* musb_ep_select(musb, mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+@@ -517,7 +571,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+ urb->status = -EREMOTEIO;
+ }
+
+- musb_read_fifo(hw_ep, length, buf);
++ musb->ops->read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+@@ -615,36 +669,38 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
+ u16 csr;
+ u8 mode;
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+- if (length > channel->max_len)
+- length = channel->max_len;
++ if (is_inventra_dma(hw_ep->musb)) {
++ if (length > channel->max_len)
++ length = channel->max_len;
+
+- csr = musb_readw(epio, MUSB_TXCSR);
+- if (length > pkt_size) {
+- mode = 1;
+- csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+- /* autoset shouldn't be set in high bandwidth */
+- if (qh->hb_mult == 1)
+- csr |= MUSB_TXCSR_AUTOSET;
++ csr = musb_readw(epio, MUSB_TXCSR);
++ if (length > pkt_size) {
++ mode = 1;
++ csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
++ /* autoset shouldn't be set in high bandwidth */
++ if (qh->hb_mult == 1)
++ csr |= MUSB_TXCSR_AUTOSET;
++ } else {
++ mode = 0;
++ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
++ csr |= MUSB_TXCSR_DMAENAB; /* against progrmr's guide */
++ }
++ channel->desired_mode = mode;
++ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+- mode = 0;
+- csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
+- csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
+- }
+- channel->desired_mode = mode;
+- musb_writew(epio, MUSB_TXCSR, csr);
+-#else
+- if (!is_cppi_enabled() && !tusb_dma_omap())
+- return false;
++ if (!is_cppi_enabled(hw_ep->musb)
++ && !is_cppi41_enabled(hw_ep->musb)
++ && !tusb_dma_omap(hw_ep->musb))
++ return false;
+
+- channel->actual_len = 0;
++ channel->actual_len = 0;
+
+- /*
+- * TX uses "RNDIS" mode automatically but needs help
+- * to identify the zero-length-final-packet case.
+- */
+- mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
+-#endif
++ /*
++ * TX uses "RNDIS" mode automatically but needs help
++ * to identify the zero-length-final-packet case.
++ */
++ mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
++ }
+
+ qh->segsize = length;
+
+@@ -654,7 +710,8 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
+ */
+ wmb();
+
+- if (!dma->channel_program(channel, pkt_size, mode,
++ if (!dma->channel_program(channel, pkt_size |
++ (qh->hb_mult << 11), mode,
+ urb->transfer_dma + offset, length)) {
+ dma->channel_release(channel);
+ hw_ep->tx_channel = NULL;
+@@ -692,7 +749,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+@@ -779,9 +836,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
+ else
+- musb_writew(epio, MUSB_TXMAXP,
+- qh->maxpacket |
+- ((qh->hb_mult - 1) << 11));
++ musb_writew(epio, MUSB_TXMAXP, qh->maxpacket |
++ (qh->hb_mult << 11));
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+@@ -803,7 +859,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ if (load_count) {
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+- musb_write_fifo(hw_ep, load_count, buf);
++ musb->ops->write_fifo(hw_ep, load_count, buf);
+ }
+
+ /* re-enable interrupt */
+@@ -828,9 +884,9 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+- if (csr & (MUSB_RXCSR_RXPKTRDY
+- | MUSB_RXCSR_DMAENAB
+- | MUSB_RXCSR_H_REQPKT))
++ if (csr & (MUSB_RXCSR_RXPKTRDY | (is_cppi_enabled(musb)
++ || is_cppi41_enabled(musb)) ? 0 : MUSB_RXCSR_DMAENAB
++ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+@@ -840,7 +896,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+
+ /* kick things off */
+
+- if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
++ if ((is_cppi_enabled(musb) || is_cppi41_enabled(musb) ||
++ tusb_dma_omap(musb)) && dma_channel) {
+ /* Candidate for DMA */
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+@@ -854,7 +911,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(dma_channel,
+- packet_sz, !(urb->transfer_flags &
++ packet_sz | (qh->hb_mult << 11),
++ !(urb->transfer_flags &
+ URB_SHORT_NOT_OK),
+ urb->transfer_dma + offset,
+ qh->segsize);
+@@ -872,6 +930,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
+ }
+ }
+
++/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
++ * the end; avoids starvation for other endpoints.
++ */
++static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
++ int is_in)
++{
++ struct dma_channel *dma;
++ struct urb *urb;
++ void __iomem *mbase = musb->mregs;
++ void __iomem *epio = ep->regs;
++ struct musb_qh *cur_qh, *next_qh;
++ u16 rx_csr, tx_csr;
++
++ musb_ep_select(musb, mbase, ep->epnum);
++ if (is_in) {
++ dma = is_dma_capable() ? ep->rx_channel : NULL;
++
++ /* clear nak timeout bit */
++ rx_csr = musb_readw(epio, MUSB_RXCSR);
++ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
++ rx_csr &= ~MUSB_RXCSR_DATAERROR;
++ musb_writew(epio, MUSB_RXCSR, rx_csr);
++
++ cur_qh = first_qh(&musb->in_bulk);
++ } else {
++ dma = is_dma_capable() ? ep->tx_channel : NULL;
++
++ /* clear nak timeout bit */
++ tx_csr = musb_readw(epio, MUSB_TXCSR);
++ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
++ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
++ musb_writew(epio, MUSB_TXCSR, tx_csr);
++
++ cur_qh = first_qh(&musb->out_bulk);
++ }
++ if (cur_qh) {
++ urb = next_urb(cur_qh);
++ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
++ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
++ musb->dma_controller->channel_abort(dma);
++ urb->actual_length += dma->actual_len;
++ dma->actual_len = 0L;
++ }
++ musb_save_toggle(cur_qh, is_in, urb);
++
++ if (is_in) {
++ /* move cur_qh to end of queue */
++ list_move_tail(&cur_qh->ring, &musb->in_bulk);
++
++ /* get the next qh from musb->in_bulk */
++ next_qh = first_qh(&musb->in_bulk);
++
++ /* set rx_reinit and schedule the next qh */
++ ep->rx_reinit = 1;
++ } else {
++ /* move cur_qh to end of queue */
++ list_move_tail(&cur_qh->ring, &musb->out_bulk);
++
++ /* get the next qh from musb->out_bulk */
++ next_qh = first_qh(&musb->out_bulk);
++
++ /* set tx_reinit and schedule the next qh */
++ ep->tx_reinit = 1;
++ }
++ musb_start_urb(musb, is_in, next_qh);
++ }
++}
+
+ /*
+ * Service the default endpoint (ep0) as host.
+@@ -894,7 +1019,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+- musb_read_fifo(hw_ep, fifo_count, fifo_dest);
++ musb->ops->read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+@@ -933,7 +1058,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+ fifo_count,
+ (fifo_count == 1) ? "" : "s",
+ fifo_dest);
+- musb_write_fifo(hw_ep, fifo_count, fifo_dest);
++ musb->ops->write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+@@ -968,7 +1093,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+- musb_ep_select(mbase, 0);
++ musb_ep_select(musb, mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+@@ -1054,6 +1179,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
++ /* disable ping token in status phase */
++ csr |= MUSB_CSR0_H_DIS_PING;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+@@ -1074,8 +1201,6 @@ done:
+ }
+
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+-
+ /* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+@@ -1088,8 +1213,6 @@ done:
+ short packets in mode 1.
+ */
+
+-#endif
+-
+ /* Service a Tx-Available or dma completion irq for the endpoint */
+ void musb_host_tx(struct musb *musb, u8 epnum)
+ {
+@@ -1107,7 +1230,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ struct dma_channel *dma;
+ bool transfer_pending = false;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+@@ -1136,20 +1259,27 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ status = -ETIMEDOUT;
+
+ } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+- dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
++ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
++ && !list_is_singular(&musb->out_bulk)) {
+
+- /* NOTE: this code path would be a good place to PAUSE a
+- * transfer, if there's some other (nonperiodic) tx urb
+- * that could use this fifo. (dma complicates it...)
+- * That's already done for bulk RX transfers.
+- *
+- * if (bulk && qh->ring.next != &musb->out_bulk), then
+- * we have a candidate... NAKing is *NOT* an error
+- */
+- musb_ep_select(mbase, epnum);
+- musb_writew(epio, MUSB_TXCSR,
+- MUSB_TXCSR_H_WZC_BITS
+- | MUSB_TXCSR_TXPKTRDY);
++ dev_dbg(musb->controller, "TX end %d NAK timeout\n", epnum);
++ musb_bulk_nak_timeout(musb, hw_ep, 0);
++ } else {
++ dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
++
++ /* NOTE: this code path would be a good place to PAUSE a
++ * transfer, if there's some other (nonperiodic) tx urb
++ * that could use this fifo. (dma complicates it...)
++ * That's already done for bulk RX transfers.
++ *
++ * if (bulk && qh->ring.next != &musb->out_bulk), then
++ * we have a candidate... NAKing is *NOT* an error
++ */
++ musb_ep_select(musb, mbase, epnum);
++ musb_writew(epio, MUSB_TXCSR,
++ MUSB_TXCSR_H_WZC_BITS
++ | MUSB_TXCSR_TXPKTRDY);
++ }
+ return;
+ }
+
+@@ -1170,7 +1300,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ );
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+@@ -1302,7 +1432,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
+ if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
+ offset, length)) {
+- if (is_cppi_enabled() || tusb_dma_omap())
++ if (is_cppi_enabled(musb) || is_cppi41_enabled(musb) ||
++ tusb_dma_omap(musb))
+ musb_h_tx_dma_start(hw_ep);
+ return;
+ }
+@@ -1322,17 +1453,15 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ length = qh->maxpacket;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
+- musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
++ musb->ops->write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ qh->segsize = length;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+ }
+
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+-
+ /* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+@@ -1368,52 +1497,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
+ * last packet of one URB's transfer.
+ */
+
+-#endif
+-
+-/* Schedule next QH from musb->in_bulk and move the current qh to
+- * the end; avoids starvation for other endpoints.
+- */
+-static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
+-{
+- struct dma_channel *dma;
+- struct urb *urb;
+- void __iomem *mbase = musb->mregs;
+- void __iomem *epio = ep->regs;
+- struct musb_qh *cur_qh, *next_qh;
+- u16 rx_csr;
+-
+- musb_ep_select(mbase, ep->epnum);
+- dma = is_dma_capable() ? ep->rx_channel : NULL;
+-
+- /* clear nak timeout bit */
+- rx_csr = musb_readw(epio, MUSB_RXCSR);
+- rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+- rx_csr &= ~MUSB_RXCSR_DATAERROR;
+- musb_writew(epio, MUSB_RXCSR, rx_csr);
+-
+- cur_qh = first_qh(&musb->in_bulk);
+- if (cur_qh) {
+- urb = next_urb(cur_qh);
+- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+- musb->dma_controller->channel_abort(dma);
+- urb->actual_length += dma->actual_len;
+- dma->actual_len = 0L;
+- }
+- musb_save_toggle(cur_qh, 1, urb);
+-
+- /* move cur_qh to end of queue */
+- list_move_tail(&cur_qh->ring, &musb->in_bulk);
+-
+- /* get the next qh from musb->in_bulk */
+- next_qh = first_qh(&musb->in_bulk);
+-
+- /* set rx_reinit and schedule the next qh */
+- ep->rx_reinit = 1;
+- musb_start_urb(musb, 1, next_qh);
+- }
+-}
+-
+ /*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+@@ -1433,7 +1516,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ u32 status;
+ struct dma_channel *dma;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+
+ urb = next_urb(qh);
+ dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+@@ -1490,10 +1573,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ if (usb_pipebulk(urb->pipe)
+ && qh->mux == 1
+ && !list_is_singular(&musb->in_bulk)) {
+- musb_bulk_rx_nak_timeout(musb, hw_ep);
++ musb_bulk_nak_timeout(musb, hw_ep, 1);
+ return;
+ }
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+@@ -1537,8 +1620,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+-#ifndef CONFIG_USB_INVENTRA_DMA
+- if (rx_csr & MUSB_RXCSR_H_REQPKT) {
++ if (!is_inventra_dma(musb) && (rx_csr & MUSB_RXCSR_H_REQPKT)) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+@@ -1555,11 +1637,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ xfer_len, dma ? ", dma" : "");
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | rx_csr);
+ }
+-#endif
+ if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+ xfer_len = dma->actual_len;
+
+@@ -1567,9 +1648,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_RXPKTRDY);
++
++ if (is_cppi_enabled(musb) || is_cppi41_enabled(musb))
++ val |= MUSB_RXCSR_DMAENAB;
++
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+-#ifdef CONFIG_USB_INVENTRA_DMA
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+@@ -1584,14 +1668,33 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+
+ if (++qh->iso_idx >= urb->number_of_packets)
+ done = true;
+- else
++ else if (is_cppi_enabled(musb) ||
++ is_cppi41_enabled(musb)) {
++ struct dma_controller *c;
++ void *buf;
++ u32 length, ret;
++
++ c = musb->dma_controller;
++ buf = (void *)
++ urb->iso_frame_desc[qh->iso_idx].offset
++ + (u32)urb->transfer_dma;
++
++ length =
++ urb->iso_frame_desc[qh->iso_idx].length;
++
++ ret = c->channel_program(dma, qh->maxpacket |
++ (qh->hb_mult << 11),
++ 0, (u32) buf, length);
+ done = false;
+-
++ } else {
++ done = false;
++ }
+ } else {
+- /* done if urb buffer is full or short packet is recd */
+- done = (urb->actual_length + xfer_len >=
+- urb->transfer_buffer_length
+- || dma->actual_len < qh->maxpacket);
++ /* done if urb buffer is full or short packet is recd */
++ done = (urb->actual_length + xfer_len >=
++ urb->transfer_buffer_length
++ || dma->actual_len < qh->maxpacket
++ || dma->actual_len % qh->maxpacket);
+ }
+
+ /* send IN token for next packet, without AUTOREQ */
+@@ -1601,13 +1704,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+- dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
++ dev_dbg(musb->controller,
++ "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+-#else
+- done = true;
+-#endif
+ } else if (urb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+@@ -1618,14 +1719,13 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+
+ /* SCRUB (RX) */
+ /* do the proper sequence to abort the transfer */
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, val);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+-#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma) {
+ struct dma_controller *c;
+ u16 rx_count;
+@@ -1729,7 +1829,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ * adjusted first...
+ */
+ ret = c->channel_program(
+- dma, qh->maxpacket,
++ dma, qh->maxpacket | ((qh->hb_mult - 1) << 11),
+ dma->desired_mode, buf, length);
+
+ if (!ret) {
+@@ -1739,7 +1839,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+ /* REVISIT reset CSR */
+ }
+ }
+-#endif /* Mentor DMA */
+
+ if (!dma) {
+ /* Unmap the buffer so that CPU can use it */
+@@ -1849,14 +1948,14 @@ static int musb_schedule(
+ else
+ head = &musb->out_bulk;
+
+- /* Enable bulk RX NAK timeout scheme when bulk requests are
++ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
+ * multiplexed. This scheme doen't work in high speed to full
+ * speed scenario as NAK interrupts are not coming from a
+ * full speed device connected to a high speed device.
+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
+ * 4 (8 frame or 8ms) for FS device.
+ */
+- if (is_in && qh->dev)
++ if (qh->dev)
+ qh->intv_reg =
+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
+ goto success;
+@@ -1904,6 +2003,8 @@ static int musb_urb_enqueue(
+ qh = ret ? NULL : hep->hcpriv;
+ if (qh)
+ urb->hcpriv = qh;
++
++ INIT_LIST_HEAD(&urb->giveback_list);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* DMA mapping was already done, if needed, and this urb is on
+@@ -2081,7 +2182,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
+ int status = 0;
+ u16 csr;
+
+- musb_ep_select(regs, hw_end);
++ musb_ep_select(ep->musb, regs, hw_end);
+
+ if (is_dma_capable()) {
+ struct dma_channel *dma;
+@@ -2167,8 +2268,12 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
+ int ready = qh->is_ready;
+
++ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
++
+ qh->is_ready = 0;
++ spin_unlock(&musb->lock);
+ musb_giveback(musb, urb, 0);
++ spin_lock(&musb->lock);
+ qh->is_ready = ready;
+
+ /* If nothing else (usually musb_giveback) is using it
+@@ -2229,8 +2334,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+ * other transfers, and since !qh->is_ready nothing
+ * will activate any of these as it advances.
+ */
+- while (!list_empty(&hep->urb_list))
+- musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
++ while (!list_empty(&hep->urb_list)) {
++ urb = next_urb(qh);
++ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
++ spin_unlock(&musb->lock);
++ musb_giveback(musb, urb, -ESHUTDOWN);
++ spin_lock(&musb->lock);
++ }
+
+ hep->hcpriv = NULL;
+ list_del(&qh->ring);
+@@ -2290,7 +2400,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
+ }
+
+ if (musb->is_active) {
+- WARNING("trying to suspend as %s while active\n",
++ dev_dbg(musb->controller, "trying to suspend as %s while active\n",
+ otg_state_string(musb->xceiv->state));
+ return -EBUSY;
+ } else
+@@ -2299,7 +2409,6 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
+
+ static int musb_bus_resume(struct usb_hcd *hcd)
+ {
+- /* resuming child port does the work */
+ return 0;
+ }
+
+diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
+index 03c6ccd..af554fe 100644
+--- a/drivers/usb/musb/musb_io.h
++++ b/drivers/usb/musb/musb_io.h
+@@ -61,75 +61,26 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
+ /* NOTE: these offsets are all in bytes */
+
+ static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+- { return __raw_readw(addr + offset); }
++ { return readw(addr + offset); }
+
+ static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+- { return __raw_readl(addr + offset); }
++ { return readl(addr + offset); }
+
+
+ static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+- { __raw_writew(data, addr + offset); }
++ { writew(data, addr + offset); }
+
+ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+- { __raw_writel(data, addr + offset); }
+-
+-
+-#ifdef CONFIG_USB_MUSB_TUSB6010
+-
+-/*
+- * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+- */
+-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+-{
+- u16 tmp;
+- u8 val;
+-
+- tmp = __raw_readw(addr + (offset & ~1));
+- if (offset & 1)
+- val = (tmp >> 8);
+- else
+- val = tmp & 0xff;
+-
+- return val;
+-}
+-
+-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+-{
+- u16 tmp;
+-
+- tmp = __raw_readw(addr + (offset & ~1));
+- if (offset & 1)
+- tmp = (data << 8) | (tmp & 0xff);
+- else
+- tmp = (tmp & 0xff00) | data;
+-
+- __raw_writew(tmp, addr + (offset & ~1));
+-}
++ { writel(data, addr + offset); }
+
+ #else
+
+-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+- { return __raw_readb(addr + offset); }
+-
+-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+- { __raw_writeb(data, addr + offset); }
+-
+-#endif /* CONFIG_USB_MUSB_TUSB6010 */
+-
+-#else
+-
+-static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+- { return (u8) (bfin_read16(addr + offset)); }
+-
+ static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return bfin_read16(addr + offset); }
+
+ static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return (u32) (bfin_read16(addr + offset)); }
+
+-static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+- { bfin_write16(addr + offset, (u16) data); }
+-
+ static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { bfin_write16(addr + offset, data); }
+
+diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
+new file mode 100644
+index 0000000..e3aa42f
+--- /dev/null
++++ b/drivers/usb/musb/musb_procfs.c
+@@ -0,0 +1,808 @@
++/*
++ * MUSB OTG driver debug support
++ *
++ * Copyright 2005 Mentor Graphics Corporation
++ * Copyright (C) 2005-2006 by Texas Instruments
++ * Copyright (C) 2006-2007 Nokia Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
++ * 02110-1301 USA
++ *
++ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
++ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h> /* FIXME remove procfs writes */
++#include <mach/hardware.h>
++
++#include "musb_core.h"
++
++#include "davinci.h"
++
++static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
++{
++ int count;
++ int tmp;
++ struct usb_host_endpoint *hep = qh->hep;
++ struct urb *urb;
++
++ count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n",
++ qh, qh->dev->devnum, qh->epnum,
++ ({ char *s; switch (qh->type) {
++ case USB_ENDPOINT_XFER_BULK:
++ s = "-bulk"; break;
++ case USB_ENDPOINT_XFER_INT:
++ s = "-int"; break;
++ case USB_ENDPOINT_XFER_CONTROL:
++ s = ""; break;
++ default:
++ s = "iso"; break;
++ }; s; }),
++ qh->maxpacket);
++ if (count <= 0)
++ return 0;
++ buf += count;
++ max -= count;
++
++ list_for_each_entry(urb, &hep->urb_list, urb_list) {
++ tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
++ usb_pipein(urb->pipe) ? "in" : "out",
++ urb, urb->actual_length,
++ urb->transfer_buffer_length);
++ if (tmp <= 0)
++ break;
++ tmp = min(tmp, (int)max);
++ count += tmp;
++ buf += tmp;
++ max -= tmp;
++ }
++ return count;
++}
++
++static int
++dump_queue(struct list_head *q, char *buf, unsigned max)
++{
++ int count = 0;
++ struct musb_qh *qh;
++
++ list_for_each_entry(qh, q, ring) {
++ int tmp;
++
++ tmp = dump_qh(qh, buf, max);
++ if (tmp <= 0)
++ break;
++ tmp = min(tmp, (int)max);
++ count += tmp;
++ buf += tmp;
++ max -= tmp;
++ }
++ return count;
++}
++
++static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
++{
++ char *buf = buffer;
++ int code = 0;
++ void __iomem *regs = ep->hw_ep->regs;
++ char *mode = "1buf";
++
++ if (ep->is_in) {
++ if (ep->hw_ep->tx_double_buffered)
++ mode = "2buf";
++ } else {
++ if (ep->hw_ep->rx_double_buffered)
++ mode = "2buf";
++ }
++
++ do {
++ struct usb_request *req;
++
++ code = snprintf(buf, max,
++ "\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
++ ep->name, ep->current_epnum,
++ mode, ep->dma ? " dma" : "",
++ musb_readw(regs,
++ (ep->is_in || !ep->current_epnum)
++ ? MUSB_TXCSR
++ : MUSB_RXCSR),
++ musb_readw(regs, ep->is_in
++ ? MUSB_TXMAXP
++ : MUSB_RXMAXP)
++ );
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++
++ if ((is_cppi_enabled(ep->musb) || is_cppi41_enabled(ep->musb))
++ && ep->current_epnum) {
++ unsigned cppi = ep->current_epnum - 1;
++ void __iomem *base = ep->musb->ctrl_base;
++ unsigned off1 = cppi << 2;
++ void __iomem *ram = base;
++ char tmp[16];
++
++ if (ep->is_in) {
++ ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
++ tmp[0] = 0;
++ } else {
++ ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
++ snprintf(tmp, sizeof tmp, "%d left, ",
++ musb_readl(base,
++ DAVINCI_RXCPPI_BUFCNT0_REG + off1));
++ }
++
++ code = snprintf(buf, max, "%cX DMA%d: %s"
++ "%08x %08x, %08x %08x; "
++ "%08x %08x %08x .. %08x\n",
++ ep->is_in ? 'T' : 'R',
++ ep->current_epnum - 1, tmp,
++ musb_readl(ram, 0 * 4),
++ musb_readl(ram, 1 * 4),
++ musb_readl(ram, 2 * 4),
++ musb_readl(ram, 3 * 4),
++ musb_readl(ram, 4 * 4),
++ musb_readl(ram, 5 * 4),
++ musb_readl(ram, 6 * 4),
++ musb_readl(ram, 7 * 4));
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++
++ if (list_empty(&ep->req_list)) {
++ code = snprintf(buf, max, "\t(queue empty)\n");
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ break;
++ }
++ list_for_each_entry(req, &ep->req_list, list) {
++ code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
++ req,
++ req->zero ? "zero, " : "",
++ req->short_not_ok ? "!short, " : "",
++ req->actual, req->length);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++ } while (0);
++ return buf - buffer;
++}
++
++static int
++dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
++{
++ int code = 0;
++ char *buf = aBuffer;
++ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
++
++ do {
++ musb_ep_select(musb, musb->mregs, epnum);
++ if (is_host_active(musb)) {
++ int dump_rx, dump_tx;
++ void __iomem *regs = hw_ep->regs;
++
++ /* TEMPORARY (!) until we have a real periodic
++ * schedule tree ...
++ */
++ if (!epnum) {
++ /* control is shared, uses RX queue
++ * but (mostly) shadowed tx registers
++ */
++ dump_tx = !list_empty(&musb->control);
++ dump_rx = 0;
++ } else if (hw_ep == musb->bulk_ep) {
++ dump_tx = !list_empty(&musb->out_bulk);
++ dump_rx = !list_empty(&musb->in_bulk);
++ } else if (hw_ep->in_qh || hw_ep->out_qh) {
++ if (hw_ep->in_qh)
++ dump_rx = 1;
++ else
++ dump_rx = 0;
++ dump_tx = !dump_rx;
++ } else
++ break;
++ /* END TEMPORARY */
++
++
++ if (dump_rx) {
++ code = snprintf(buf, max,
++ "\nRX%d: %s rxcsr %04x interval %02x "
++ "max %04x type %02x; "
++ "dev %d hub %d port %d"
++ "\n",
++ epnum,
++ hw_ep->rx_double_buffered
++ ? "2buf" : "1buf",
++ musb_readw(regs, MUSB_RXCSR),
++ musb_readb(regs, MUSB_RXINTERVAL),
++ musb_readw(regs, MUSB_RXMAXP),
++ musb_readb(regs, MUSB_RXTYPE),
++ /* FIXME: assumes multipoint */
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_RXFUNCADDR)),
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_RXHUBADDR)),
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_RXHUBPORT))
++ );
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++
++ if ((is_cppi_enabled(musb) ||
++ is_cppi41_enabled(musb))
++ && epnum
++ && hw_ep->rx_channel) {
++ unsigned cppi = epnum - 1;
++ unsigned off1 = cppi << 2;
++ void __iomem *base;
++ void __iomem *ram;
++ char tmp[16];
++
++ base = musb->ctrl_base;
++ ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
++ cppi) + base;
++ snprintf(tmp, sizeof tmp, "%d left, ",
++ musb_readl(base,
++ DAVINCI_RXCPPI_BUFCNT0_REG
++ + off1));
++
++ code = snprintf(buf, max,
++ " rx dma%d: %s"
++ "%08x %08x, %08x %08x; "
++ "%08x %08x %08x .. %08x\n",
++ cppi, tmp,
++ musb_readl(ram, 0 * 4),
++ musb_readl(ram, 1 * 4),
++ musb_readl(ram, 2 * 4),
++ musb_readl(ram, 3 * 4),
++ musb_readl(ram, 4 * 4),
++ musb_readl(ram, 5 * 4),
++ musb_readl(ram, 6 * 4),
++ musb_readl(ram, 7 * 4));
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++
++ if (hw_ep == musb->bulk_ep
++ && !list_empty(
++ &musb->in_bulk)) {
++ code = dump_queue(&musb->in_bulk,
++ buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ } else if (hw_ep->in_qh) {
++ code = dump_qh(hw_ep->in_qh,
++ buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++ }
++
++ if (dump_tx) {
++ code = snprintf(buf, max,
++ "\nTX%d: %s txcsr %04x interval %02x "
++ "max %04x type %02x; "
++ "dev %d hub %d port %d"
++ "\n",
++ epnum,
++ hw_ep->tx_double_buffered
++ ? "2buf" : "1buf",
++ musb_readw(regs, MUSB_TXCSR),
++ musb_readb(regs, MUSB_TXINTERVAL),
++ musb_readw(regs, MUSB_TXMAXP),
++ musb_readb(regs, MUSB_TXTYPE),
++ /* FIXME: assumes multipoint */
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_TXFUNCADDR)),
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_TXHUBADDR)),
++ musb_readb(musb->mregs,
++ MUSB_BUSCTL_OFFSET(epnum,
++ MUSB_TXHUBPORT))
++ );
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++
++ if ((is_cppi_enabled(musb) ||
++ is_cppi41_enabled(musb))
++ && epnum
++ && hw_ep->tx_channel) {
++ unsigned cppi = epnum - 1;
++ void __iomem *base;
++ void __iomem *ram;
++
++ base = musb->ctrl_base;
++ ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
++ cppi) + base;
++ code = snprintf(buf, max,
++ " tx dma%d: "
++ "%08x %08x, %08x %08x; "
++ "%08x %08x %08x .. %08x\n",
++ cppi,
++ musb_readl(ram, 0 * 4),
++ musb_readl(ram, 1 * 4),
++ musb_readl(ram, 2 * 4),
++ musb_readl(ram, 3 * 4),
++ musb_readl(ram, 4 * 4),
++ musb_readl(ram, 5 * 4),
++ musb_readl(ram, 6 * 4),
++ musb_readl(ram, 7 * 4));
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++
++ if (hw_ep == musb->control_ep
++ && !list_empty(
++ &musb->control)) {
++ code = dump_queue(&musb->control,
++ buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ } else if (hw_ep == musb->bulk_ep
++ && !list_empty(
++ &musb->out_bulk)) {
++ code = dump_queue(&musb->out_bulk,
++ buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ } else if (hw_ep->out_qh) {
++ code = dump_qh(hw_ep->out_qh,
++ buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++ }
++ }
++ if (is_peripheral_active(musb)) {
++ code = 0;
++
++ if (hw_ep->ep_in.desc || !epnum) {
++ code = dump_ep(&hw_ep->ep_in, buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++ if (hw_ep->ep_out.desc) {
++ code = dump_ep(&hw_ep->ep_out, buf, max);
++ if (code <= 0)
++ break;
++ code = min(code, (int) max);
++ buf += code;
++ max -= code;
++ }
++ }
++ } while (0);
++
++ return buf - aBuffer;
++}
++
++/* Dump the current status and compile options.
++ * @param musb the device driver instance
++ * @param buffer where to dump the status; it must be big enough to hold the
++ * result otherwise "BAD THINGS HAPPENS(TM)".
++ */
++static int dump_header_stats(struct musb *musb, char *buffer)
++{
++ int code, count = 0;
++ const void __iomem *mbase = musb->mregs;
++
++ *buffer = 0;
++ count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
++ "(Power=%02x, DevCtl=%02x)\n",
++ (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
++ musb_readb(mbase, MUSB_POWER),
++ musb_readb(mbase, MUSB_DEVCTL));
++ if (count <= 0)
++ return 0;
++ buffer += count;
++
++ code = sprintf(buffer, "OTG state: %s; %sactive\n",
++ otg_state_string(musb->xceiv->state),
++ musb->is_active ? "" : "in");
++ if (code <= 0)
++ goto done;
++ buffer += code;
++ count += code;
++
++ code = sprintf(buffer,
++ "Options: "
++#ifdef CONFIG_MUSB_PIO_ONLY
++ "pio"
++#elif defined(CONFIG_USB_TI_CPPI_DMA)
++ "cppi-dma"
++#elif defined(CONFIG_USB_INVENTRA_DMA)
++ "musb-dma"
++#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
++ "tusb-omap-dma"
++#else
++ "?dma?"
++#endif
++ ", "
++ "otg (peripheral+host)"
++ ", [eps=%d]\n",
++ musb->nr_endpoints);
++ if (code <= 0)
++ goto done;
++ count += code;
++ buffer += code;
++
++ code = sprintf(buffer, "Peripheral address: %02x\n",
++ musb_readb(musb->ctrl_base, MUSB_FADDR));
++ if (code <= 0)
++ goto done;
++ buffer += code;
++ count += code;
++
++ code = sprintf(buffer, "Root port status: %08x\n",
++ musb->port1_status);
++ if (code <= 0)
++ goto done;
++ buffer += code;
++ count += code;
++
++#ifdef CONFIG_ARCH_DAVINCI
++ code = sprintf(buffer,
++ "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
++ "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
++ "\n",
++ musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
++ musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
++ readl((void __force __iomem *)
++ IO_ADDRESS(USBPHY_CTL_PADDR)),
++ musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
++ musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_USB_INT_SOURCE_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_USB_INT_MASK_REG));
++ if (code <= 0)
++ goto done;
++ count += code;
++ buffer += code;
++#endif /* DAVINCI */
++
++#ifdef CONFIG_USB_TUSB6010
++ code = sprintf(buffer,
++ "TUSB6010: devconf %08x, phy enable %08x drive %08x"
++ "\n\totg %03x timer %08x"
++ "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
++ "\n",
++ musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
++ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
++ musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
++ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
++ musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
++ musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
++ musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
++ musb_readl(musb->ctrl_base, TUSB_INT_SRC),
++ musb_readl(musb->ctrl_base, TUSB_INT_MASK));
++ if (code <= 0)
++ goto done;
++ count += code;
++ buffer += code;
++#endif /* DAVINCI */
++
++ if ((is_cppi_enabled(musb) || is_cppi41_enabled(musb))
++ && musb->dma_controller) {
++ code = sprintf(buffer,
++ "CPPI: txcr=%d txsrc=%01x txena=%01x; "
++ "rxcr=%d rxsrc=%01x rxena=%01x "
++ "\n",
++ musb_readl(musb->ctrl_base,
++ DAVINCI_TXCPPI_CTRL_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_TXCPPI_RAW_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_TXCPPI_INTENAB_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_RXCPPI_CTRL_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_RXCPPI_RAW_REG),
++ musb_readl(musb->ctrl_base,
++ DAVINCI_RXCPPI_INTENAB_REG));
++ if (code <= 0)
++ goto done;
++ count += code;
++ buffer += code;
++ }
++
++ if (is_peripheral_enabled(musb)) {
++ code = sprintf(buffer, "Gadget driver: %s\n",
++ musb->gadget_driver
++ ? musb->gadget_driver->driver.name
++ : "(none)");
++ if (code <= 0)
++ goto done;
++ count += code;
++ buffer += code;
++ }
++
++done:
++ return count;
++}
++
++/* Write to ProcFS
++ *
++ * C soft-connect
++ * c soft-disconnect
++ * I enable HS
++ * i disable HS
++ * s stop session
++ * F force session (OTG-unfriendly)
++ * E rElinquish bus (OTG)
++ * H request host mode
++ * h cancel host request
++ * T start sending TEST_PACKET
++ * D<num> set/query the debug level
++ */
++static int musb_proc_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ char cmd;
++ u8 reg;
++ struct musb *musb = (struct musb *)data;
++ void __iomem *mbase = musb->mregs;
++
++ /* MOD_INC_USE_COUNT; */
++
++ if (unlikely(copy_from_user(&cmd, buffer, 1)))
++ return -EFAULT;
++
++ switch (cmd) {
++ case 'S':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_POWER)
++ | MUSB_POWER_SUSPENDM;
++ musb_writeb(mbase, MUSB_POWER, reg);
++ }
++ break;
++
++ case 'C':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_POWER)
++ | MUSB_POWER_SOFTCONN;
++ musb_writeb(mbase, MUSB_POWER, reg);
++ }
++ break;
++
++ case 'c':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_POWER)
++ & ~MUSB_POWER_SOFTCONN;
++ musb_writeb(mbase, MUSB_POWER, reg);
++ }
++ break;
++
++ case 'I':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_POWER)
++ | MUSB_POWER_HSENAB;
++ musb_writeb(mbase, MUSB_POWER, reg);
++ }
++ break;
++
++ case 'i':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_POWER)
++ & ~MUSB_POWER_HSENAB;
++ musb_writeb(mbase, MUSB_POWER, reg);
++ }
++ break;
++
++ case 'F':
++ reg = musb_readb(mbase, MUSB_DEVCTL);
++ reg |= MUSB_DEVCTL_SESSION;
++ musb_writeb(mbase, MUSB_DEVCTL, reg);
++ break;
++
++ case 'H':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_DEVCTL);
++ reg |= MUSB_DEVCTL_HR;
++ musb_writeb(mbase, MUSB_DEVCTL, reg);
++ /* MUSB_HST_MODE( ((struct musb*)data) ); */
++ /* WARNING("Host Mode\n"); */
++ }
++ break;
++
++ case 'h':
++ if (mbase) {
++ reg = musb_readb(mbase, MUSB_DEVCTL);
++ reg &= ~MUSB_DEVCTL_HR;
++ musb_writeb(mbase, MUSB_DEVCTL, reg);
++ }
++ break;
++
++ case 'T':
++ if (mbase) {
++ musb_load_testpacket(musb);
++ musb_writeb(mbase, MUSB_TESTMODE,
++ MUSB_TEST_PACKET);
++ musb_writew(musb->endpoints[0].regs,
++ MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
++ dev_dbg(musb->controller,
++ "musb:testmode sending test packet\n");
++ }
++ break;
++
++ case 'b':
++ /* generate software babble interrupt */
++ musb_simulate_babble_intr(musb);
++ break;
++
++ case 'K':
++ /* enable babble workaround */
++ musb->enable_babble_work = 1;
++ INFO("enabled babble workaround\n");
++ break;
++
++ case 'k':
++ /* disable babble workaround */
++ musb->enable_babble_work = 0;
++ INFO("disabled babble workaround\n");
++ break;
++
++ case '?':
++ INFO("?: you are seeing it\n");
++ INFO("S: suspend the usb bus\n");
++ INFO("C/c: soft connect enable/disable\n");
++ INFO("I/i: hispeed enable/disable\n");
++ INFO("F: force session start\n");
++ INFO("H: host mode\n");
++ INFO("T: start sending TEST_PACKET\n");
++ INFO("D: set/read dbug level\n");
++ INFO("K/k: enable/disable babble workaround\n");
++ break;
++
++ default:
++ ERR("Command %c not implemented\n", cmd);
++ break;
++ }
++
++ musb_platform_try_idle(musb, 0);
++
++ return count;
++}
++
++static int musb_proc_read(char *page, char **start,
++ off_t off, int count, int *eof, void *data)
++{
++ char *buffer = page;
++ int code = 0;
++ unsigned long flags;
++ struct musb *musb = data;
++ unsigned epnum;
++
++ count -= off;
++ count -= 1; /* for NUL at end */
++ if (count <= 0)
++ return -EINVAL;
++
++ spin_lock_irqsave(&musb->lock, flags);
++
++ code = dump_header_stats(musb, buffer);
++ if (code > 0) {
++ buffer += code;
++ count -= code;
++ }
++
++ /* generate the report for the end points */
++ /* REVISIT ... not unless something's connected! */
++ for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
++ epnum++) {
++ code = dump_end_info(musb, epnum, buffer, count);
++ if (code > 0) {
++ buffer += code;
++ count -= code;
++ }
++ }
++
++ musb_platform_try_idle(musb, 0);
++
++ spin_unlock_irqrestore(&musb->lock, flags);
++ *eof = 1;
++
++ return buffer - page;
++}
++
++void musb_debug_delete(char *name, struct musb *musb)
++{
++ if (musb->proc_entry)
++ remove_proc_entry(name, NULL);
++}
++
++struct proc_dir_entry *__devinit
++musb_debug_create(char *name, struct musb *data)
++{
++ struct proc_dir_entry *pde;
++
++ /* FIXME convert everything to seq_file; then later, debugfs */
++
++ if (!name)
++ return NULL;
++
++ pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
++ data->proc_entry = pde;
++ if (pde) {
++ pde->data = data;
++ /* pde->owner = THIS_MODULE; */
++
++ pde->read_proc = musb_proc_read;
++ pde->write_proc = musb_proc_write;
++
++ pde->size = 0;
++
++ pr_debug("Registered /proc/%s\n", name);
++ } else {
++ pr_debug("Cannot create a valid proc file entry");
++ }
++
++ return pde;
++}
+diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
+index 03f2655..b222c08 100644
+--- a/drivers/usb/musb/musb_regs.h
++++ b/drivers/usb/musb/musb_regs.h
+@@ -234,12 +234,8 @@
+ #define MUSB_TESTMODE 0x0F /* 8 bit */
+
+ /* Get offset for a given FIFO from musb->mregs */
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+-#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+-#else
++#define MUSB_TUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+ #define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+-#endif
+
+ /*
+ * Additional Control Registers
+@@ -288,21 +284,22 @@
+ #define MUSB_FIFOSIZE 0x0F
+ #define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
++#if 0
+ /* Offsets to endpoint registers in indexed model (using INDEX register) */
+-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
++#define MUSB_INDEXED_OFFSET(_musb, _epnum, _offset) \
+ (0x10 + (_offset))
+
+ /* Offsets to endpoint registers in flat models */
+-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
++#define MUSB_FLAT_OFFSET(_musb, _epnum, _offset) \
+ (0x100 + (0x10*(_epnum)) + (_offset))
++#endif
++
++#define MUSB_OFFSET(_musb, _epnum, _offset) \
++ ((_musb)->ops->flags & MUSB_GLUE_EP_ADDR_INDEXED_MAPPING ? \
++ (0x10 + (_offset)) : (0x100 + (0x10*(_epnum)) + (_offset)))
++
+
+-#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+- defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
+-/* TUSB6010 EP0 configuration register is special */
+-#define MUSB_TUSB_OFFSET(_epnum, _offset) \
+- (0x10 + _offset)
+ #include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+-#endif
+
+ #define MUSB_TXCSR_MODE 0x2000
+
+@@ -506,11 +503,11 @@ static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+ #define MUSB_TXCOUNT 0x28
+
+ /* Offsets to endpoint registers in indexed model (using INDEX register) */
+-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
++#define MUSB_INDEXED_OFFSET(_musb, _epnum, _offset) \
+ (0x40 + (_offset))
+
+ /* Offsets to endpoint registers in flat models */
+-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
++#define MUSB_FLAT_OFFSET(_musb, _epnum, _offset) \
+ (USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
+
+ /* Not implemented - HW has separate Tx/Rx FIFO */
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index e9f80ad..211c24a 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -82,17 +82,15 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable;
+- if (musb->is_active)
+- mod_timer(&musb->otg_timer, jiffies
+- + msecs_to_jiffies(
+- OTG_TIME_A_AIDL_BDIS));
+ musb_platform_try_idle(musb, 0);
+ break;
+ case OTG_STATE_B_HOST:
+- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+- musb->is_active = is_otg_enabled(musb)
++ if (is_otg_enabled(musb)) {
++ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
++ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable;
+- musb_platform_try_idle(musb, 0);
++ musb_platform_try_idle(musb, 0);
++ }
+ break;
+ default:
+ dev_dbg(musb->controller, "bogus rh suspend? %s\n",
+@@ -116,7 +114,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+- if (musb->xceiv->state == OTG_STATE_B_IDLE) {
++ if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) {
+ dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ return;
+@@ -186,15 +184,8 @@ void musb_root_disconnect(struct musb *musb)
+ musb->is_active = 0;
+
+ switch (musb->xceiv->state) {
+- case OTG_STATE_A_SUSPEND:
+- if (is_otg_enabled(musb)
+- && musb->xceiv->host->b_hnp_enable) {
+- musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+- musb->g.is_a_peripheral = 1;
+- break;
+- }
+- /* FALLTHROUGH */
+ case OTG_STATE_A_HOST:
++ case OTG_STATE_A_SUSPEND:
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ break;
+@@ -414,6 +405,10 @@ int musb_hub_control(
+ goto error;
+ }
+ musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
++ if (wIndex == 4) {
++ musb_writew(musb->endpoints[0].regs,
++ MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
++ }
+ break;
+ default:
+ goto error;
+diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
+index 57a6085..9ba7a30 100644
+--- a/drivers/usb/musb/musbhsdma.c
++++ b/drivers/usb/musb/musbhsdma.c
+@@ -30,6 +30,7 @@
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
++#include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
+ #include <linux/platform_device.h>
+@@ -205,14 +206,14 @@ static int dma_channel_abort(struct dma_channel *channel)
+ {
+ struct musb_dma_channel *musb_channel = channel->private_data;
+ void __iomem *mbase = musb_channel->controller->base;
+-
++ struct musb *musb = musb_channel->controller->private_data;
+ u8 bchannel = musb_channel->idx;
+ int offset;
+ u16 csr;
+
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ if (musb_channel->transmit) {
+- offset = MUSB_EP_OFFSET(musb_channel->epnum,
++ offset = MUSB_EP_OFFSET(musb, musb_channel->epnum,
+ MUSB_TXCSR);
+
+ /*
+@@ -225,7 +226,7 @@ static int dma_channel_abort(struct dma_channel *channel)
+ csr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(mbase, offset, csr);
+ } else {
+- offset = MUSB_EP_OFFSET(musb_channel->epnum,
++ offset = MUSB_EP_OFFSET(musb, musb_channel->epnum,
+ MUSB_RXCSR);
+
+ csr = musb_readw(mbase, offset);
+@@ -336,7 +337,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
+ (musb_channel->max_packet_sz - 1)))
+ ) {
+ u8 epnum = musb_channel->epnum;
+- int offset = MUSB_EP_OFFSET(epnum,
++ int offset = MUSB_EP_OFFSET(musb, epnum,
+ MUSB_TXCSR);
+ u16 txcsr;
+
+@@ -344,7 +345,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
+ * The programming guide says that we
+ * must clear DMAENAB before DMAMODE.
+ */
+- musb_ep_select(mbase, epnum);
++ musb_ep_select(musb, mbase, epnum);
+ txcsr = musb_readw(mbase, offset);
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_AUTOSET);
+@@ -366,7 +367,7 @@ done:
+ return retval;
+ }
+
+-void dma_controller_destroy(struct dma_controller *c)
++void inventra_dma_controller_destroy(struct dma_controller *c)
+ {
+ struct musb_dma_controller *controller = container_of(c,
+ struct musb_dma_controller, controller);
+@@ -379,9 +380,10 @@ void dma_controller_destroy(struct dma_controller *c)
+
+ kfree(controller);
+ }
++EXPORT_SYMBOL(inventra_dma_controller_destroy);
+
+-struct dma_controller *__init
+-dma_controller_create(struct musb *musb, void __iomem *base)
++struct dma_controller *__devinit
++inventra_dma_controller_create(struct musb *musb, void __iomem *base)
+ {
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+@@ -411,7 +413,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
+ if (request_irq(irq, dma_controller_irq, 0,
+ dev_name(musb->controller), &controller->controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+- dma_controller_destroy(&controller->controller);
++ inventra_dma_controller_destroy(&controller->controller);
+
+ return NULL;
+ }
+@@ -420,3 +422,18 @@ dma_controller_create(struct musb *musb, void __iomem *base)
+
+ return &controller->controller;
+ }
++EXPORT_SYMBOL(inventra_dma_controller_create);
++
++MODULE_DESCRIPTION("MUSB Inventra dma controller driver");
++MODULE_LICENSE("GPL v2");
++
++static int __init inventra_dma_init(void)
++{
++ return 0;
++}
++module_init(inventra_dma_init);
++
++static void __exit inventra_dma__exit(void)
++{
++}
++module_exit(inventra_dma__exit);
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index ba85f27..dd907d5 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -29,7 +29,6 @@
+ #include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
+-#include <linux/clk.h>
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+@@ -96,6 +95,7 @@ static void musb_do_idle(unsigned long _musb)
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+
++#define MUSB_TIMEOUT_A_WAIT_BCON 1100
+
+ static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
+ {
+@@ -228,21 +228,25 @@ static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+ {
+ struct musb *musb = container_of(nb, struct musb, nb);
++
++ musb->xceiv_event = event;
++ schedule_work(&musb->otg_notifier_work);
++
++ return 0;
++}
++
++static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
++{
++ struct musb *musb = container_of(data_notifier_work, struct musb, otg_notifier_work);
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *pdata = dev->platform_data;
+ struct omap_musb_board_data *data = pdata->board_data;
+
+- switch (event) {
++ switch (musb->xceiv_event) {
+ case USB_EVENT_ID:
+ dev_dbg(musb->controller, "ID GND\n");
+
+- if (is_otg_enabled(musb)) {
+- if (musb->gadget_driver) {
+- pm_runtime_get_sync(musb->controller);
+- otg_init(musb->xceiv);
+- omap2430_musb_set_vbus(musb, 1);
+- }
+- } else {
++ if (!is_otg_enabled(musb) || musb->gadget_driver) {
+ pm_runtime_get_sync(musb->controller);
+ otg_init(musb->xceiv);
+ omap2430_musb_set_vbus(musb, 1);
+@@ -274,10 +278,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+- return NOTIFY_DONE;
+ }
+-
+- return NOTIFY_OK;
+ }
+
+ static int omap2430_musb_init(struct musb *musb)
+@@ -291,12 +292,14 @@ static int omap2430_musb_init(struct musb *musb)
+ * up through ULPI. TWL4030-family PMICs include one,
+ * which needs a driver, drivers aren't always needed.
+ */
+- musb->xceiv = otg_get_transceiver();
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv) {
+ pr_err("HS USB OTG: no transceiver configured\n");
+ return -ENODEV;
+ }
+
++ INIT_WORK(&musb->otg_notifier_work, musb_otg_notifier_work);
++
+ status = pm_runtime_get_sync(dev);
+ if (status < 0) {
+ dev_err(dev, "pm_runtime_get_sync FAILED");
+@@ -329,12 +332,12 @@ static int omap2430_musb_init(struct musb *musb)
+ if (status)
+ dev_dbg(musb->controller, "notification register failed\n");
+
++ musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return 0;
+
+ err1:
+- pm_runtime_disable(dev);
+ return status;
+ }
+
+@@ -350,20 +353,19 @@ static void omap2430_musb_enable(struct musb *musb)
+
+ case USB_EVENT_ID:
+ otg_init(musb->xceiv);
+- if (data->interface_type == MUSB_INTERFACE_UTMI) {
+- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+- /* start the session */
+- devctl |= MUSB_DEVCTL_SESSION;
+- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+- MUSB_DEVCTL_BDEVICE) {
+- cpu_relax();
+-
+- if (time_after(jiffies, timeout)) {
+- dev_err(musb->controller,
+- "configured as A device timeout");
+- break;
+- }
++ if (data->interface_type != MUSB_INTERFACE_UTMI)
++ break;
++ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
++ /* start the session */
++ devctl |= MUSB_DEVCTL_SESSION;
++ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
++ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
++ MUSB_DEVCTL_BDEVICE) {
++ cpu_relax();
++
++ if (time_after(jiffies, timeout)) {
++ dev_err(dev, "configured as A device timeout");
++ break;
+ }
+ }
+ break;
+@@ -394,9 +396,15 @@ static int omap2430_musb_exit(struct musb *musb)
+ }
+
+ static const struct musb_platform_ops omap2430_ops = {
++ .fifo_mode = 4,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING |
++ MUSB_GLUE_DMA_INVENTRA,
+ .init = omap2430_musb_init,
+ .exit = omap2430_musb_exit,
+
++ .read_fifo = musb_read_fifo,
++ .write_fifo = musb_write_fifo,
++
+ .set_mode = omap2430_musb_set_mode,
+ .try_idle = omap2430_musb_try_idle,
+
+@@ -404,6 +412,9 @@ static const struct musb_platform_ops omap2430_ops = {
+
+ .enable = omap2430_musb_enable,
+ .disable = omap2430_musb_disable,
++
++ .dma_controller_create = inventra_dma_controller_create,
++ .dma_controller_destroy = inventra_dma_controller_destroy,
+ };
+
+ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
+@@ -421,12 +432,13 @@ static int __init omap2430_probe(struct platform_device *pdev)
+ goto err0;
+ }
+
+- musb = platform_device_alloc("musb-hdrc", -1);
++ musb = platform_device_alloc("musb-hdrc", pdev->id);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err1;
+ }
+
++ ev_set_name(&pdev->dev, "musb-omap2430");
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &omap2430_dmamask;
+ musb->dev.coherent_dma_mask = omap2430_dmamask;
+@@ -478,7 +490,6 @@ static int __exit omap2430_remove(struct platform_device *pdev)
+ platform_device_del(glue->musb);
+ platform_device_put(glue->musb);
+ pm_runtime_put(&pdev->dev);
+- pm_runtime_disable(&pdev->dev);
+ kfree(glue);
+
+ return 0;
+@@ -491,6 +502,9 @@ static int omap2430_runtime_suspend(struct device *dev)
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
++ musb->context.otg_interfsel = musb_readl(musb->mregs,
++ OTG_INTERFSEL);
++
+ omap2430_low_level_exit(musb);
+ otg_set_suspend(musb->xceiv, 1);
+
+@@ -503,6 +517,9 @@ static int omap2430_runtime_resume(struct device *dev)
+ struct musb *musb = glue_to_musb(glue);
+
+ omap2430_low_level_init(musb);
++ musb_writel(musb->mregs, OTG_INTERFSEL,
++ musb->context.otg_interfsel);
++
+ otg_set_suspend(musb->xceiv, 0);
+
+ return 0;
+diff --git a/drivers/usb/musb/ti81xx.c b/drivers/usb/musb/ti81xx.c
+new file mode 100644
+index 0000000..662aceb
+--- /dev/null
++++ b/drivers/usb/musb/ti81xx.c
+@@ -0,0 +1,1649 @@
++/*
++ * Texas Instruments TI81XX "usb platform glue layer"
++ *
++ * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
++ *
++ * Based on the DaVinci "glue layer" code.
++ * Copyright (C) 2005-2006 by Texas Instruments
++ *
++ * This file is part of the Inventra Controller Driver for Linux.
++ *
++ * The Inventra Controller Driver for Linux is free software; you
++ * can redistribute it and/or modify it under the terms of the GNU
++ * General Public License version 2 as published by the Free Software
++ * Foundation.
++ *
++ * The Inventra Controller Driver for Linux is distributed in
++ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
++ * without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ * License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with The Inventra Controller Driver for Linux ; if not,
++ * write to the Free Software Foundation, Inc., 59 Temple Place,
++ * Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/usb/otg.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++
++#include "cppi41.h"
++#include "ti81xx.h"
++
++#include "musb_core.h"
++#include "cppi41_dma.h"
++
++#ifdef CONFIG_PM
++struct ti81xx_usbss_regs {
++ u32 sysconfig;
++
++ u32 irq_en_set;
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ u32 irq_dma_th_tx0[4];
++ u32 irq_dma_th_rx0[4];
++ u32 irq_dma_th_tx1[4];
++ u32 irq_dma_th_rx1[4];
++ u32 irq_dma_en[2];
++
++ u32 irq_frame_th_tx0[4];
++ u32 irq_frame_th_rx0[4];
++ u32 irq_frame_th_tx1[4];
++ u32 irq_frame_th_rx1[4];
++ u32 irq_frame_en[2];
++#endif
++};
++
++struct ti81xx_usb_regs {
++ u32 control;
++
++ u32 irq_en_set[2];
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ u32 tx_mode;
++ u32 rx_mode;
++ u32 grndis_size[15];
++ u32 auto_req;
++ u32 teardn;
++ u32 th_xdma_idle;
++#endif
++ u32 srp_fix;
++ u32 phy_utmi;
++ u32 mgc_utmi_loopback;
++ u32 mode;
++};
++#endif
++
++struct ti81xx_glue {
++ struct device *dev;
++ struct resource *mem_pa; /* usbss memory resource */
++ void *mem_va; /* ioremapped virtual address */
++ struct platform_device *musb[2];/* child musb pdevs */
++ u8 irq; /* usbss irq */
++ u8 first; /* ignore first call of resume */
++
++#ifdef CONFIG_PM
++ struct ti81xx_usbss_regs usbss_regs;
++ struct ti81xx_usb_regs usb_regs[2];
++#endif
++};
++
++static u64 musb_dmamask = DMA_BIT_MASK(32);
++static void *usbss_virt_base;
++static u8 usbss_init_done;
++struct musb *gmusb[2];
++
++u8 usbid_sw_ctrl;
++#undef USB_TI81XX_DEBUG
++
++#ifdef USB_TI81XX_DEBUG
++#define dprintk(x, ...) printk(x, ## __VA_ARGS__)
++#else
++#define dprintk(x, ...)
++#endif
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++static irqreturn_t cppi41dma_Interrupt(int irq, void *hci);
++static u8 cppi41_init_done;
++static void *cppi41_dma_base;
++#define CPPI41_ADDR(offs) ((void *)((u32)cppi41_dma_base + (offs - 0x2000)))
++#endif
++
++extern void omap_ctrl_writel(u32 val, u16 offset);
++extern u32 omap_ctrl_readl(u16 offset);
++
++static inline u32 usbss_read(u32 offset)
++{
++ if (!usbss_init_done)
++ return 0;
++ return readl(usbss_virt_base + offset);
++}
++
++static inline void usbss_write(u32 offset, u32 data)
++{
++ if (!usbss_init_done)
++ return ;
++ writel(data, usbss_virt_base + offset);
++}
++
++static void usbotg_ss_init(void)
++{
++ if (!usbss_init_done) {
++ usbss_init_done = 1;
++
++ /* clear any USBSS interrupts */
++ usbss_write(USBSS_IRQ_EOI, 0);
++ usbss_write(USBSS_IRQ_STATUS, usbss_read(USBSS_IRQ_STATUS));
++ }
++}
++static void usbotg_ss_uninit(void)
++{
++ if (usbss_init_done) {
++ usbss_init_done = 0;
++ usbss_virt_base = 0;
++ }
++}
++void set_frame_threshold(struct musb *musb, u8 is_tx, u8 epnum, u8 value, u8 en_intr)
++{
++ u32 base, reg_val, frame_intr = 0, frame_base = 0;
++ u32 offs = epnum/4*4;
++ u8 indx = (epnum % 4) * 8;
++
++ if (is_tx)
++ base = musb->id ? USBSS_IRQ_FRAME_THRESHOLD_TX1 :
++ USBSS_IRQ_FRAME_THRESHOLD_TX0;
++ else
++ base = musb->id ? USBSS_IRQ_FRAME_THRESHOLD_RX1 :
++ USBSS_IRQ_FRAME_THRESHOLD_RX0;
++
++ reg_val = usbss_read(base + offs);
++ reg_val &= ~(0xFF << indx);
++ reg_val |= (value << indx);
++ usbss_write(base + offs, reg_val);
++
++ if (en_intr) {
++ frame_base = musb->id ? USBSS_IRQ_FRAME_ENABLE_1 :
++ USBSS_IRQ_FRAME_ENABLE_0;
++ frame_intr = musb->id ? usbss_read(USBSS_IRQ_FRAME_ENABLE_0) :
++ usbss_read(USBSS_IRQ_FRAME_ENABLE_1);
++ frame_intr |= is_tx ? (1 << epnum) : (1 << (16 + epnum));
++ usbss_write(frame_base, frame_intr);
++ dev_dbg(musb->controller, "%s: framebase=%x, frame_intr=%x\n",
++ is_tx ? "tx" : "rx", frame_base, frame_intr);
++ }
++}
++
++void set_dma_threshold(struct musb *musb, u8 is_tx, u8 epnum, u8 value)
++{
++ u32 base, reg_val;
++ u32 offs = epnum/4*4;
++ u8 indx = (epnum % 4) * 8;
++
++ if (musb->id == 0)
++ base = is_tx ? USBSS_IRQ_DMA_THRESHOLD_TX0 :
++ USBSS_IRQ_DMA_THRESHOLD_RX0;
++ else
++ base = is_tx ? USBSS_IRQ_DMA_THRESHOLD_TX1 :
++ USBSS_IRQ_DMA_THRESHOLD_RX1;
++
++ reg_val = usbss_read(base + offs);
++ reg_val &= ~(0xFF << indx);
++ reg_val |= (value << indx);
++ dev_dbg(musb->controller, "base=%x, offs=%x, indx=%d, reg_val = (%x)%x\n",
++ base, offs, indx, reg_val, usbss_read(base + offs));
++ usbss_write(base + offs, reg_val);
++}
++
++/* ti81xx specific read/write functions */
++u16 ti81xx_musb_readw(const void __iomem *addr, unsigned offset)
++{
++ u32 tmp;
++ u16 val;
++
++ tmp = readl(addr + (offset & ~3));
++
++ switch (offset & 0x3) {
++ case 0:
++ val = (tmp & 0xffff);
++ break;
++ case 1:
++ val = (tmp >> 8) & 0xffff;
++ break;
++ case 2:
++ case 3:
++ default:
++ val = (tmp >> 16) & 0xffff;
++ break;
++ }
++ return val;
++}
++
++void ti81xx_musb_writew(void __iomem *addr, unsigned offset, u16 data)
++{
++ __raw_writew(data, addr + offset);
++}
++
++u8 ti81xx_musb_readb(const void __iomem *addr, unsigned offset)
++{
++ u32 tmp;
++ u8 val;
++
++ tmp = readl(addr + (offset & ~3));
++
++ switch (offset & 0x3) {
++ case 0:
++ val = tmp & 0xff;
++ break;
++ case 1:
++ val = (tmp >> 8);
++ break;
++ case 2:
++ val = (tmp >> 16);
++ break;
++ case 3:
++ default:
++ val = (tmp >> 24);
++ break;
++ }
++ return val;
++}
++void ti81xx_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
++{
++ __raw_writeb(data, addr + offset);
++}
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++/*
++ * CPPI 4.1 resources used for USB OTG controller module:
++ *
++ tx/rx completion queues for usb0 */
++static u16 tx_comp_q[] = {93, 94, 95, 96, 97,
++ 98, 99, 100, 101, 102,
++ 103, 104, 105, 106, 107 };
++
++static u16 rx_comp_q[] = {109, 110, 111, 112, 113,
++ 114, 115, 116, 117, 118,
++ 119, 120, 121, 122, 123 };
++
++/* tx/rx completion queues for usb1 */
++static u16 tx_comp_q1[] = {125, 126, 127, 128, 129,
++ 130, 131, 132, 133, 134,
++ 135, 136, 137, 138, 139 };
++
++static u16 rx_comp_q1[] = {141, 142, 143, 144, 145,
++ 146, 147, 148, 149, 150,
++ 151, 152, 153, 154, 155 };
++
++/* Fair scheduling */
++u32 dma_sched_table[] = {
++ 0x81018000, 0x83038202, 0x85058404, 0x87078606,
++ 0x89098808, 0x8b0b8a0a, 0x8d0d8c0c, 0x8f0f8e0e,
++ 0x91119010, 0x93139212, 0x95159414, 0x97179616,
++ 0x99199818, 0x9b1b9a1a, 0x9d1d9c1c, 0x00009e1e,
++};
++
++/* cppi41 dma tx channel info */
++static const struct cppi41_tx_ch tx_ch_info[] = {
++ [0] = {
++ .port_num = 1,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 32} , {0, 33} }
++ },
++ [1] = {
++ .port_num = 2,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 34} , {0, 35} }
++ },
++ [2] = {
++ .port_num = 3,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 36} , {0, 37} }
++ },
++ [3] = {
++ .port_num = 4,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 38} , {0, 39} }
++ },
++ [4] = {
++ .port_num = 5,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 40} , {0, 41} }
++ },
++ [5] = {
++ .port_num = 6,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 42} , {0, 43} }
++ },
++ [6] = {
++ .port_num = 7,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 44} , {0, 45} }
++ },
++ [7] = {
++ .port_num = 8,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 46} , {0, 47} }
++ },
++ [8] = {
++ .port_num = 9,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 48} , {0, 49} }
++ },
++ [9] = {
++ .port_num = 10,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 50} , {0, 51} }
++ },
++ [10] = {
++ .port_num = 11,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 52} , {0, 53} }
++ },
++ [11] = {
++ .port_num = 12,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 54} , {0, 55} }
++ },
++ [12] = {
++ .port_num = 13,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 56} , {0, 57} }
++ },
++ [13] = {
++ .port_num = 14,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 58} , {0, 59} }
++ },
++ [14] = {
++ .port_num = 15,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 60} , {0, 61} }
++ },
++ [15] = {
++ .port_num = 1,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 62} , {0, 63} }
++ },
++ [16] = {
++ .port_num = 2,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 64} , {0, 65} }
++ },
++ [17] = {
++ .port_num = 3,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 66} , {0, 67} }
++ },
++ [18] = {
++ .port_num = 4,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 68} , {0, 69} }
++ },
++ [19] = {
++ .port_num = 5,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 70} , {0, 71} }
++ },
++ [20] = {
++ .port_num = 6,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 72} , {0, 73} }
++ },
++ [21] = {
++ .port_num = 7,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 74} , {0, 75} }
++ },
++ [22] = {
++ .port_num = 8,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 76} , {0, 77} }
++ },
++ [23] = {
++ .port_num = 9,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 78} , {0, 79} }
++ },
++ [24] = {
++ .port_num = 10,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 80} , {0, 81} }
++ },
++ [25] = {
++ .port_num = 11,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 82} , {0, 83} }
++ },
++ [26] = {
++ .port_num = 12,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 84} , {0, 85} }
++ },
++ [27] = {
++ .port_num = 13,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 86} , {0, 87} }
++ },
++ [28] = {
++ .port_num = 14,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 88} , {0, 89} }
++ },
++ [29] = {
++ .port_num = 15,
++ .num_tx_queue = 2,
++ .tx_queue = { {0, 90} , {0, 91} }
++ }
++};
++
++/* Queues 0 to 66 are pre-assigned, others are spare */
++static const u32 assigned_queues[] = { 0xffffffff, /* queue 0..31 */
++ 0xffffffff, /* queue 32..63 */
++ 0xffffffff, /* queue 64..95 */
++ 0xffffffff, /* queue 96..127 */
++ 0x0fffffff /* queue 128..155 */
++ };
++
++int __devinit cppi41_init(u8 id, u8 irq, int num_instances)
++{
++ struct usb_cppi41_info *cppi_info = &usb_cppi41_info[id];
++ u16 numch, blknum, order;
++ u32 i;
++
++ /* init cppi info structure */
++ cppi_info->dma_block = 0;
++ for (i = 0 ; i < USB_CPPI41_NUM_CH ; i++)
++ cppi_info->ep_dma_ch[i] = i + (15 * id);
++
++ cppi_info->q_mgr = 0;
++ cppi_info->num_tx_comp_q = 15;
++ cppi_info->num_rx_comp_q = 15;
++ cppi_info->tx_comp_q = id ? tx_comp_q1 : tx_comp_q;
++ cppi_info->rx_comp_q = id ? rx_comp_q1 : rx_comp_q;
++ cppi_info->bd_intr_ctrl = 1;
++
++ if (cppi41_init_done)
++ return 0;
++
++ blknum = cppi_info->dma_block;
++
++ /* Queue manager information */
++ cppi41_queue_mgr[0].num_queue = 159;
++ cppi41_queue_mgr[0].queue_types = CPPI41_FREE_DESC_BUF_QUEUE |
++ CPPI41_UNASSIGNED_QUEUE;
++ cppi41_queue_mgr[0].base_fdbq_num = 0;
++ cppi41_queue_mgr[0].assigned = assigned_queues;
++
++ /* init DMA block */
++ cppi41_dma_block[0].num_tx_ch = 30;
++ cppi41_dma_block[0].num_rx_ch = 30;
++ cppi41_dma_block[0].tx_ch_info = tx_ch_info;
++
++ /* initilize cppi41 dma & Qmgr address */
++ cppi41_dma_base = ioremap(TI81XX_USB_CPPIDMA_BASE,
++ TI81XX_USB_CPPIDMA_LEN);
++
++ cppi41_queue_mgr[0].q_mgr_rgn_base = CPPI41_ADDR(QMGR_RGN_OFFS);
++ cppi41_queue_mgr[0].desc_mem_rgn_base = CPPI41_ADDR(QMRG_DESCRGN_OFFS);
++ cppi41_queue_mgr[0].q_mgmt_rgn_base = CPPI41_ADDR(QMGR_REG_OFFS);
++ cppi41_queue_mgr[0].q_stat_rgn_base = CPPI41_ADDR(QMGR_STAT_OFFS);
++ cppi41_dma_block[0].global_ctrl_base = CPPI41_ADDR(DMA_GLBCTRL_OFFS);
++ cppi41_dma_block[0].ch_ctrl_stat_base = CPPI41_ADDR(DMA_CHCTRL_OFFS);
++ cppi41_dma_block[0].sched_ctrl_base = CPPI41_ADDR(DMA_SCHED_OFFS);
++ cppi41_dma_block[0].sched_table_base = CPPI41_ADDR(DMA_SCHEDTBL_OFFS);
++
++ /* Initialize for Linking RAM region 0 alone */
++ cppi41_queue_mgr_init(cppi_info->q_mgr, 0, 0x3fff);
++
++ numch = USB_CPPI41_NUM_CH * 2 * num_instances;
++ cppi41_dma_block[0].num_max_ch = numch;
++
++ order = get_count_order(numch);
++
++ /* TODO: check two teardown desc per channel (5 or 7 ?)*/
++ if (order < 5)
++ order = 5;
++
++ cppi41_dma_block_init(blknum, cppi_info->q_mgr, order,
++ dma_sched_table, numch);
++
++ /* attach to the IRQ */
++ if (request_irq(irq, cppi41dma_Interrupt, 0, "cppi41_dma", 0))
++ printk(KERN_INFO "request_irq %d failed!\n", irq);
++ else
++ printk(KERN_INFO "registerd cppi-dma Intr @ IRQ %d\n", irq);
++
++ cppi41_init_done = 1;
++
++ printk(KERN_INFO "Cppi41 Init Done Qmgr-base(%p) dma-base(%p)\n",
++ cppi41_queue_mgr[0].q_mgr_rgn_base,
++ cppi41_dma_block[0].global_ctrl_base);
++
++ /* enable all usbss the interrupts */
++ usbss_write(USBSS_IRQ_EOI, 0);
++ usbss_write(USBSS_IRQ_ENABLE_SET, USBSS_INTR_FLAGS);
++ usbss_write(USBSS_IRQ_DMA_ENABLE_0, 0xFFFeFFFe);
++
++ printk(KERN_INFO "Cppi41 Init Done\n");
++
++ return 0;
++}
++
++void cppi41_free(void)
++{
++ u32 numch, blknum, order;
++ struct usb_cppi41_info *cppi_info = &usb_cppi41_info[0];
++
++ if (!cppi41_init_done)
++ return ;
++
++ numch = cppi41_dma_block[0].num_max_ch;
++ order = get_count_order(numch);
++ blknum = cppi_info->dma_block;
++
++ cppi41_dma_block_uninit(blknum, cppi_info->q_mgr, order,
++ dma_sched_table, numch);
++ cppi41_queue_mgr_uninit(cppi_info->q_mgr);
++
++ iounmap(cppi41_dma_base);
++ cppi41_dma_base = 0;
++ cppi41_init_done = 0;
++}
++
++int cppi41_disable_sched_rx(void)
++{
++ cppi41_dma_sched_tbl_init(0, 0, dma_sched_table, 30);
++ return 0;
++}
++
++int cppi41_enable_sched_rx(void)
++{
++ cppi41_dma_sched_tbl_init(0, 0, dma_sched_table, 30);
++ return 0;
++}
++#endif /* CONFIG_USB_TI_CPPI41_DMA */
++
++/*
++ * Because we don't set CTRL.UINT, it's "important" to:
++ * - not read/write INTRUSB/INTRUSBE (except during
++ * initial setup, as a workaround);
++ * - use INTSET/INTCLR instead.
++ */
++
++/**
++ * ti81xx_musb_enable - enable interrupts
++ */
++void ti81xx_musb_enable(struct musb *musb)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++ u32 epmask, coremask;
++
++ /* Workaround: setup IRQs through both register sets. */
++ epmask = ((musb->epmask & USB_TX_EP_MASK) << USB_INTR_TX_SHIFT) |
++ ((musb->epmask & USB_RX_EP_MASK) << USB_INTR_RX_SHIFT);
++ coremask = (0x01ff << USB_INTR_USB_SHIFT);
++
++ coremask &= ~MUSB_INTR_SOF;
++
++ musb_writel(reg_base, USB_EP_INTR_SET_REG, epmask);
++ musb_writel(reg_base, USB_CORE_INTR_SET_REG, coremask);
++ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
++ if (is_otg_enabled(musb))
++ musb_writel(reg_base, USB_CORE_INTR_SET_REG,
++ USB_INTR_DRVVBUS << USB_INTR_USB_SHIFT);
++}
++
++/**
++ * ti81xx_musb_disable - disable HDRC and flush interrupts
++ */
++void ti81xx_musb_disable(struct musb *musb)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++
++ musb_writel(reg_base, USB_CORE_INTR_CLEAR_REG, USB_INTR_USB_MASK);
++ musb_writel(reg_base, USB_EP_INTR_CLEAR_REG,
++ USB_TX_INTR_MASK | USB_RX_INTR_MASK);
++ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
++ musb_writel(reg_base, USB_IRQ_EOI, 0);
++}
++
++#define POLL_SECONDS 2
++
++static void otg_timer(unsigned long _musb)
++{
++ struct musb *musb = (void *)_musb;
++ void __iomem *mregs = musb->mregs;
++ u8 devctl;
++ unsigned long flags;
++
++ /* We poll because DaVinci's won't expose several OTG-critical
++ * status change events (from the transceiver) otherwise.
++ */
++ devctl = musb_readb(mregs, MUSB_DEVCTL);
++ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
++ otg_state_string(musb->xceiv->state));
++
++ spin_lock_irqsave(&musb->lock, flags);
++ switch (musb->xceiv->state) {
++ case OTG_STATE_A_WAIT_BCON:
++ devctl &= ~MUSB_DEVCTL_SESSION;
++ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
++
++ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
++ if (devctl & MUSB_DEVCTL_HM) {
++ musb->xceiv->state = OTG_STATE_A_IDLE;
++ MUSB_HST_MODE(musb);
++ } else {
++ musb->xceiv->state = OTG_STATE_B_IDLE;
++ MUSB_DEV_MODE(musb);
++ mod_timer(&musb->otg_workaround,
++ jiffies + POLL_SECONDS * HZ);
++ }
++ break;
++ case OTG_STATE_A_WAIT_VFALL:
++ /*
++ * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
++ * RTL seems to mis-handle session "start" otherwise (or in
++ * our case "recover"), in routine "VBUS was valid by the time
++ * VBUSERR got reported during enumeration" cases.
++ */
++ if (devctl & MUSB_DEVCTL_VBUS) {
++ mod_timer(&musb->otg_workaround,
++ jiffies + POLL_SECONDS * HZ);
++ break;
++ }
++ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
++ musb_writel(musb->ctrl_base, USB_CORE_INTR_SET_REG,
++ MUSB_INTR_VBUSERROR << USB_INTR_USB_SHIFT);
++ break;
++ case OTG_STATE_B_IDLE:
++ if (!is_peripheral_enabled(musb))
++ break;
++
++ /*
++ * There's no ID-changed IRQ, so we have no good way to tell
++ * when to switch to the A-Default state machine (by setting
++ * the DEVCTL.SESSION flag).
++ *
++ * Workaround: whenever we're in B_IDLE, try setting the
++ * session flag every few seconds. If it works, ID was
++ * grounded and we're now in the A-Default state machine.
++ *
++ * NOTE: setting the session flag is _supposed_ to trigger
++ * SRP but clearly it doesn't.
++ */
++ devctl = musb_readb(mregs, MUSB_DEVCTL);
++ if (devctl & MUSB_DEVCTL_HM) {
++ musb->xceiv->state = OTG_STATE_A_IDLE;
++ } else {
++ mod_timer(&musb->otg_workaround,
++ jiffies + POLL_SECONDS * HZ);
++ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl |
++ MUSB_DEVCTL_SESSION);
++ }
++ break;
++ default:
++ break;
++ }
++ spin_unlock_irqrestore(&musb->lock, flags);
++}
++
++void ti81xx_musb_try_idle(struct musb *musb, unsigned long timeout)
++{
++ if (!is_otg_enabled(musb))
++ return;
++
++ if (timeout == 0)
++ timeout = jiffies + msecs_to_jiffies(3);
++
++ /* Never idle if active, or when VBUS timeout is not set as host */
++ if (musb->is_active || (musb->a_wait_bcon == 0 &&
++ musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
++ dev_dbg(musb->controller, "%s active, deleting timer\n",
++ otg_state_string(musb->xceiv->state));
++ del_timer(&musb->otg_workaround);
++ musb->last_timer = jiffies;
++ return;
++ }
++
++ if (time_after(musb->last_timer, timeout) &&
++ timer_pending(&musb->otg_workaround)) {
++ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
++ return;
++ }
++ musb->last_timer = timeout;
++
++ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
++ otg_state_string(musb->xceiv->state),
++ jiffies_to_msecs(timeout - jiffies));
++ mod_timer(&musb->otg_workaround, timeout);
++}
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++static irqreturn_t cppi41dma_Interrupt(int irq, void *hci)
++{
++ struct musb *musb = hci;
++ u32 intr_status;
++ irqreturn_t ret = IRQ_NONE;
++ u32 q_cmpl_status_0, q_cmpl_status_1, q_cmpl_status_2;
++ u32 usb0_tx_intr, usb0_rx_intr;
++ u32 usb1_tx_intr, usb1_rx_intr;
++ void *q_mgr_base = cppi41_queue_mgr[0].q_mgr_rgn_base;
++ unsigned long flags;
++
++ musb = hci;
++ /*
++ * CPPI 4.1 interrupts share the same IRQ and the EOI register but
++ * don't get reflected in the interrupt source/mask registers.
++ */
++ /*
++ * Check for the interrupts from Tx/Rx completion queues; they
++ * are level-triggered and will stay asserted until the queues
++ * are emptied. We're using the queue pending register 0 as a
++ * substitute for the interrupt status register and reading it
++ * directly for speed.
++ */
++ intr_status = usbss_read(USBSS_IRQ_STATUS);
++
++ if (intr_status)
++ usbss_write(USBSS_IRQ_STATUS, intr_status);
++ else
++ printk(KERN_DEBUG "spurious usbss intr\n");
++
++ q_cmpl_status_0 = musb_readl(q_mgr_base, CPPI41_QSTATUS_REG2);
++ q_cmpl_status_1 = musb_readl(q_mgr_base, CPPI41_QSTATUS_REG3);
++ q_cmpl_status_2 = musb_readl(q_mgr_base, CPPI41_QSTATUS_REG4);
++
++ /* USB0 tx/rx completion */
++ /* usb0 tx completion interrupt for ep1..15 */
++ usb0_tx_intr = (q_cmpl_status_0 >> 29) |
++ ((q_cmpl_status_1 & 0xFFF) << 3);
++ usb0_rx_intr = ((q_cmpl_status_1 & 0x07FFe000) >> 13);
++
++ usb1_tx_intr = (q_cmpl_status_1 >> 29) |
++ ((q_cmpl_status_2 & 0xFFF) << 3);
++ usb1_rx_intr = ((q_cmpl_status_2 & 0x0fffe000) >> 13);
++
++ /* get proper musb handle based usb0/usb1 ctrl-id */
++
++ dev_dbg(musb->controller, "CPPI 4.1 IRQ: Tx %x, Rx %x\n", usb0_tx_intr,
++ usb0_rx_intr);
++ if (gmusb[0] && (usb0_tx_intr || usb0_rx_intr)) {
++ spin_lock_irqsave(&gmusb[0]->lock, flags);
++ cppi41_completion(gmusb[0], usb0_rx_intr,
++ usb0_tx_intr);
++ spin_unlock_irqrestore(&gmusb[0]->lock, flags);
++ ret = IRQ_HANDLED;
++ }
++
++ dev_dbg(musb->controller, "CPPI 4.1 IRQ: Tx %x, Rx %x\n", usb1_tx_intr,
++ usb1_rx_intr);
++ if (gmusb[1] && (usb1_rx_intr || usb1_tx_intr)) {
++ spin_lock_irqsave(&gmusb[1]->lock, flags);
++ cppi41_completion(gmusb[1], usb1_rx_intr,
++ usb1_tx_intr);
++ spin_unlock_irqrestore(&gmusb[1]->lock, flags);
++ ret = IRQ_HANDLED;
++ }
++ usbss_write(USBSS_IRQ_EOI, 0);
++
++ return ret;
++}
++#endif
++
++int musb_simulate_babble(struct musb *musb)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++ void __iomem *mbase = musb->mregs;
++ u8 reg;
++
++ /* during babble condition musb controller
++ * remove the session
++ */
++ reg = musb_readb(mbase, MUSB_DEVCTL);
++ reg &= ~MUSB_DEVCTL_SESSION;
++ musb_writeb(mbase, MUSB_DEVCTL, reg);
++ mdelay(100);
++
++ /* generate s/w babble interrupt */
++ musb_writel(reg_base, USB_IRQ_STATUS_RAW_1,
++ MUSB_INTR_BABBLE);
++ return 0;
++}
++EXPORT_SYMBOL(musb_simulate_babble);
++
++void musb_babble_workaround(struct musb *musb)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++ struct device *dev = musb->controller;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++
++ /* Reset the controller */
++ musb_writel(reg_base, USB_CTRL_REG, USB_SOFT_RESET_MASK);
++ udelay(100);
++
++ /* Shutdown the on-chip PHY and its PLL. */
++ if (data->set_phy_power)
++ data->set_phy_power(musb->id, 0);
++ udelay(100);
++
++ musb_platform_set_mode(musb, MUSB_HOST);
++ udelay(100);
++
++ /* enable the usbphy */
++ if (data->set_phy_power)
++ data->set_phy_power(musb->id, 1);
++ mdelay(100);
++
++ /* save the usbotgss register contents */
++ musb_platform_enable(musb);
++
++ musb_start(musb);
++}
++
++static void evm_deferred_musb_restart(struct work_struct *work)
++{
++ struct musb *musb =
++ container_of(work, struct musb, work);
++
++ ERR("deferred musb restart musbid(%d)\n", musb->id);
++ musb_babble_workaround(musb);
++}
++
++static irqreturn_t ti81xx_interrupt(int irq, void *hci)
++{
++ struct musb *musb = hci;
++ void __iomem *reg_base = musb->ctrl_base;
++ unsigned long flags;
++ irqreturn_t ret = IRQ_NONE;
++ u32 pend1 = 0, pend2 = 0;
++ u32 epintr, usbintr;
++ u8 is_babble = 0;
++
++ spin_lock_irqsave(&musb->lock, flags);
++
++ /* Acknowledge and handle non-CPPI interrupts */
++ /* Get endpoint interrupts */
++ epintr = musb_readl(reg_base, USB_EP_INTR_STATUS_REG);
++ musb->int_rx = (epintr & USB_RX_INTR_MASK) >> USB_INTR_RX_SHIFT;
++ musb->int_tx = (epintr & USB_TX_INTR_MASK) >> USB_INTR_TX_SHIFT;
++ if (epintr)
++ musb_writel(reg_base, USB_EP_INTR_STATUS_REG, epintr);
++
++ /* Get usb core interrupts */
++ usbintr = musb_readl(reg_base, USB_CORE_INTR_STATUS_REG);
++ if (!usbintr && !epintr) {
++ dev_dbg(musb->controller, "sprious interrupt\n");
++ goto eoi;
++ }
++
++ if (usbintr)
++ musb_writel(reg_base, USB_CORE_INTR_STATUS_REG, usbintr);
++ musb->int_usb = (usbintr & USB_INTR_USB_MASK) >> USB_INTR_USB_SHIFT;
++
++ dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n", usbintr, epintr);
++ /*
++ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
++ * AM3517's missing ID change IRQ. We need an ID change IRQ to
++ * switch appropriately between halves of the OTG state machine.
++ * Managing DEVCTL.SESSION per Mentor docs requires that we know its
++ * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
++ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
++ */
++ if ((usbintr & MUSB_INTR_BABBLE) && is_otg_enabled(musb)
++ && (musb->xceiv->state == OTG_STATE_A_HOST))
++ is_babble = 1;
++ else if ((usbintr & MUSB_INTR_BABBLE) && !is_otg_enabled(musb)
++ && is_host_enabled(musb))
++ is_babble = 1;
++
++ if (is_babble) {
++ if (musb->enable_babble_work)
++ musb->int_usb |= MUSB_INTR_DISCONNECT;
++
++ ERR("CAUTION: musb%d: Babble Interrupt Occured\n", musb->id);
++ ERR("Please issue long reset to make usb functional !!\n");
++ }
++
++ if (usbintr & (USB_INTR_DRVVBUS << USB_INTR_USB_SHIFT)) {
++ int drvvbus = musb_readl(reg_base, USB_STAT_REG);
++ void __iomem *mregs = musb->mregs;
++ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
++ int err;
++
++ err = is_host_enabled(musb) && (musb->int_usb &
++ MUSB_INTR_VBUSERROR);
++ if (err) {
++ /*
++ * The Mentor core doesn't debounce VBUS as needed
++ * to cope with device connect current spikes. This
++ * means it's not uncommon for bus-powered devices
++ * to get VBUS errors during enumeration.
++ *
++ * This is a workaround, but newer RTL from Mentor
++ * seems to allow a better one: "re"-starting sessions
++ * without waiting for VBUS to stop registering in
++ * devctl.
++ */
++ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
++ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
++ mod_timer(&musb->otg_workaround,
++ jiffies + POLL_SECONDS * HZ);
++ WARNING("VBUS error workaround (delay coming)\n");
++ } else if (is_host_enabled(musb) && drvvbus) {
++ musb->is_active = 1;
++ MUSB_HST_MODE(musb);
++ musb->xceiv->default_a = 1;
++ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
++ del_timer(&musb->otg_workaround);
++ } else {
++ musb->is_active = 0;
++ MUSB_DEV_MODE(musb);
++ musb->xceiv->default_a = 0;
++ musb->xceiv->state = OTG_STATE_B_IDLE;
++ }
++
++ /* NOTE: this must complete power-on within 100 ms. */
++ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
++ drvvbus ? "on" : "off",
++ otg_state_string(musb->xceiv->state),
++ err ? " ERROR" : "",
++ devctl);
++ ret = IRQ_HANDLED;
++ }
++
++ if (musb->int_tx || musb->int_rx || musb->int_usb)
++ ret |= musb_interrupt(musb);
++
++ eoi:
++ /* EOI needs to be written for the IRQ to be re-asserted. */
++ if (ret == IRQ_HANDLED || epintr || usbintr) {
++ /* write EOI */
++ musb_writel(reg_base, USB_IRQ_EOI, 1);
++ }
++
++ /* Poll for ID change */
++ if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
++ mod_timer(&musb->otg_workaround, jiffies + POLL_SECONDS * HZ);
++
++ spin_unlock_irqrestore(&musb->lock, flags);
++
++ if (ret != IRQ_HANDLED) {
++ if (epintr || usbintr)
++ /*
++ * We sometimes get unhandled IRQs in the peripheral
++ * mode from EP0 and SOF...
++ */
++ dev_dbg(musb->controller, "Unhandled USB IRQ %08x-%08x\n",
++ epintr, usbintr);
++ else if (printk_ratelimit())
++ /*
++ * We've seen series of spurious interrupts in the
++ * peripheral mode after USB reset and then after some
++ * time a real interrupt storm starting...
++ */
++ dev_dbg(musb->controller, "Spurious IRQ, CPPI 4.1 status %08x, %08x\n",
++ pend1, pend2);
++ }
++
++ if (is_babble) {
++ if (!musb->enable_babble_work) {
++ musb_writeb(musb->mregs, MUSB_DEVCTL,
++ musb_readb(musb->mregs, MUSB_DEVCTL) |
++ MUSB_DEVCTL_SESSION);
++ } else {
++ ERR("Babble: devtcl(%x)Restarting musb....\n",
++ musb_readb(musb->mregs, MUSB_DEVCTL));
++ schedule_work(&musb->work);
++ }
++ }
++ return ret;
++}
++int ti81xx_musb_set_mode(struct musb *musb, u8 musb_mode)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++ u32 regval;
++
++ /* TODO: implement this using CONF0 */
++ if (musb_mode == MUSB_HOST) {
++ regval = musb_readl(reg_base, USB_MODE_REG);
++
++ regval &= ~USBMODE_USBID_HIGH;
++ if (usbid_sw_ctrl && cpu_is_ti816x())
++ regval |= USBMODE_USBID_MUXSEL;
++
++ musb_writel(reg_base, USB_MODE_REG, regval);
++ musb_writel(musb->ctrl_base, USB_PHY_UTMI_REG, 0x02);
++ dev_dbg(musb->controller, "host: value of mode reg=%x regval(%x)\n",
++ musb_readl(reg_base, USB_MODE_REG), regval);
++ } else if (musb_mode == MUSB_PERIPHERAL) {
++ /* TODO commmented writing 8 to USB_MODE_REG device
++ mode is not working */
++ regval = musb_readl(reg_base, USB_MODE_REG);
++
++ regval |= USBMODE_USBID_HIGH;
++ if (usbid_sw_ctrl && cpu_is_ti816x())
++ regval |= USBMODE_USBID_MUXSEL;
++
++ musb_writel(reg_base, USB_MODE_REG, regval);
++ dev_dbg(musb->controller, "device: value of mode reg=%x regval(%x)\n",
++ musb_readl(reg_base, USB_MODE_REG), regval);
++ } else if (musb_mode == MUSB_OTG) {
++ musb_writel(musb->ctrl_base, USB_PHY_UTMI_REG, 0x02);
++ } else
++ return -EIO;
++
++ return 0;
++}
++
++int ti81xx_musb_init(struct musb *musb)
++{
++ void __iomem *reg_base = musb->ctrl_base;
++ struct device *dev = musb->controller;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++ u32 rev;
++ u8 mode;
++
++ if (musb->id < 2)
++ gmusb[musb->id] = musb;
++
++ usb_nop_xceiv_register(musb->id);
++
++ musb->xceiv = otg_get_transceiver(musb->id);
++ if (!musb->xceiv)
++ return -ENODEV;
++
++ /* mentor is at offset of 0x400 in am3517/ti81xx */
++ musb->mregs += USB_MENTOR_CORE_OFFSET;
++
++ /* Returns zero if e.g. not clocked */
++ rev = musb_readl(reg_base, USB_REVISION_REG);
++ if (!rev)
++ return -ENODEV;
++
++ if (is_host_enabled(musb))
++ setup_timer(&musb->otg_workaround, otg_timer,
++ (unsigned long) musb);
++
++ /* Reset the controller */
++ musb_writel(reg_base, USB_CTRL_REG, USB_SOFT_RESET_MASK);
++
++ /* wait till reset bit clears */
++ while ((musb_readl(reg_base, USB_CTRL_REG) & 0x1))
++ cpu_relax();
++
++ /* Start the on-chip PHY and its PLL. */
++ if (data->set_phy_power)
++ data->set_phy_power(musb->id, 1);
++
++ musb->a_wait_bcon = A_WAIT_BCON_TIMEOUT;
++ musb->isr = ti81xx_interrupt;
++
++ if (cpu_is_ti816x())
++ usbid_sw_ctrl = 1;
++
++ if (is_otg_enabled(musb)) {
++ /* if usb-id contolled through software for ti816x then
++ * configure the usb0 in peripheral mode and usb1 in
++ * host mode
++ */
++ if (usbid_sw_ctrl && cpu_is_ti816x())
++ mode = musb->id ? MUSB_HOST : MUSB_PERIPHERAL;
++ else
++ mode = MUSB_OTG;
++ } else
++ /* set musb controller to host mode */
++ mode = is_host_enabled(musb) ? MUSB_HOST : MUSB_PERIPHERAL;
++
++ /* set musb controller to host mode */
++ musb_platform_set_mode(musb, mode);
++
++ /* enable babble workaround */
++ INIT_WORK(&musb->work, evm_deferred_musb_restart);
++ musb->enable_babble_work = 0;
++
++ musb_writel(reg_base, USB_IRQ_EOI, 0);
++
++ return 0;
++}
++
++/* TI81xx supports only 32bit read operation */
++void ti81xx_musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
++{
++ void __iomem *fifo = hw_ep->fifo;
++ u32 val;
++ int i;
++
++ /* Read for 32bit-aligned destination address */
++ if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
++ readsl(fifo, dst, len >> 2);
++ dst += len & ~0x03;
++ len &= 0x03;
++ }
++ /*
++ * Now read the remaining 1 to 3 byte or complete length if
++ * unaligned address.
++ */
++ if (len > 4) {
++ for (i = 0; i < (len >> 2); i++) {
++ *(u32 *) dst = musb_readl(fifo, 0);
++ dst += 4;
++ }
++ len &= 0x03;
++ }
++ if (len > 0) {
++ val = musb_readl(fifo, 0);
++ memcpy(dst, &val, len);
++ }
++}
++
++int ti81xx_musb_exit(struct musb *musb)
++{
++ struct device *dev = musb->controller;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++
++ if (is_host_enabled(musb))
++ del_timer_sync(&musb->otg_workaround);
++
++ /* Shutdown the on-chip PHY and its PLL. */
++ if (data->set_phy_power)
++ data->set_phy_power(musb->id, 0);
++
++ otg_put_transceiver(musb->xceiv);
++ usb_nop_xceiv_unregister(musb->id);
++
++ return 0;
++}
++
++static struct musb_platform_ops ti81xx_ops = {
++ .fifo_mode = 4,
++ .flags = MUSB_GLUE_EP_ADDR_FLAT_MAPPING | MUSB_GLUE_DMA_CPPI41,
++ .init = ti81xx_musb_init,
++ .exit = ti81xx_musb_exit,
++
++ .enable = ti81xx_musb_enable,
++ .disable = ti81xx_musb_disable,
++
++ .try_idle = ti81xx_musb_try_idle,
++ .set_mode = ti81xx_musb_set_mode,
++
++ .read_fifo = ti81xx_musb_read_fifo,
++ .write_fifo = musb_write_fifo,
++
++ .dma_controller_create = cppi41_dma_controller_create,
++ .dma_controller_destroy = cppi41_dma_controller_destroy,
++ .simulate_babble_intr = musb_simulate_babble,
++};
++
++static void __devexit ti81xx_delete_musb_pdev(struct ti81xx_glue *glue, u8 id)
++{
++ platform_device_del(glue->musb[id]);
++ platform_device_put(glue->musb[id]);
++}
++
++static int __devinit ti81xx_create_musb_pdev(struct ti81xx_glue *glue, u8 id)
++{
++ struct device *dev = glue->dev;
++ struct platform_device *pdev = to_platform_device(dev);
++ struct musb_hdrc_platform_data *pdata = dev->platform_data;
++ struct omap_musb_board_data *bdata = pdata->board_data;
++ struct platform_device *musb;
++ struct resource *res;
++ struct resource resources[2];
++ char res_name[10];
++ int ret = 0;
++
++ /* get memory resource */
++ sprintf(res_name, "musb%d", id);
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
++ if (!res) {
++ dev_err(dev, "%s get mem resource failed\n", res_name);
++ ret = -ENODEV;
++ goto err0;
++ }
++ res->parent = NULL;
++ resources[0] = *res;
++
++ /* get irq resource */
++ sprintf(res_name, "musb%d-irq", id);
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
++ if (!res) {
++ dev_err(dev, "%s get irq resource failed\n", res_name);
++ ret = -ENODEV;
++ goto err0;
++ }
++ res->parent = NULL;
++ resources[1] = *res;
++
++ /* allocate the child platform device */
++ musb = platform_device_alloc("musb-hdrc", id);
++ if (!musb) {
++ dev_err(dev, "failed to allocate musb device\n");
++ goto err0;
++ }
++
++ musb->id = id;
++ musb->dev.parent = dev;
++ musb->dev.dma_mask = &musb_dmamask;
++ musb->dev.coherent_dma_mask = musb_dmamask;
++
++ glue->musb[id] = musb;
++
++ pdata->platform_ops = &ti81xx_ops;
++
++ ret = platform_device_add_resources(musb, resources, 2);
++ if (ret) {
++ dev_err(dev, "failed to add resources\n");
++ goto err1;
++ }
++
++ if (id == 0)
++ pdata->mode = bdata->mode & USB0PORT_MODEMASK;
++ else
++ pdata->mode = (bdata->mode & USB1PORT_MODEMASK)
++ >> USB1PORT_MODESHIFT;
++
++ dev_info(dev, "musb%d, board_mode=0x%x, plat_mode=0x%x\n",
++ id, bdata->mode, pdata->mode);
++
++ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
++ if (ret) {
++ dev_err(dev, "failed to add platform_data\n");
++ goto err1;
++ }
++
++ ret = platform_device_add(musb);
++ if (ret) {
++ dev_err(dev, "failed to register musb device\n");
++ goto err1;
++ }
++
++ return 0;
++
++err1:
++ platform_device_put(musb);
++err0:
++ return ret;
++}
++
++static int __init ti81xx_probe(struct platform_device *pdev)
++{
++ struct ti81xx_glue *glue;
++ struct device *dev = &pdev->dev;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++ int ret = 0, i;
++ struct resource *res;
++
++ /* allocate glue */
++ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
++ if (!glue) {
++ dev_err(&pdev->dev, "unable to allocate glue memory\n");
++ ret = -ENOMEM;
++ goto err0;
++ }
++
++ /* get memory resource */
++ glue->mem_pa = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!glue->mem_pa) {
++ dev_err(&pdev->dev, "failed to get usbss mem resourse\n");
++ ret = -ENODEV;
++ goto err1;
++ }
++
++ /* get memory resource */
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usbss-irq");
++ if (!res) {
++ dev_err(&pdev->dev, "failed to get usbss irq resourse\n");
++ ret = -ENODEV;
++ goto err1;
++ }
++ glue->irq = res->start;
++
++ /* iomap for usbss mem space */
++ glue->mem_va =
++ ioremap(glue->mem_pa->start, resource_size(glue->mem_pa));
++ if (!glue->mem_va) {
++ dev_err(&pdev->dev, "usbss ioremap failed\n");
++ ret = -ENOMEM;
++ goto err1;
++ }
++ usbss_virt_base = glue->mem_va;
++
++ glue->first = 1;
++ glue->dev = &pdev->dev;
++ platform_set_drvdata(pdev, glue);
++
++ /* enable clocks */
++ pm_runtime_enable(&pdev->dev);
++ ret = pm_runtime_get_sync(&pdev->dev);
++ if (ret < 0) {
++ dev_err(dev, "pm_runtime_get_sync FAILED");
++ goto err2;
++ }
++
++ /* usb subsystem init */
++ usbotg_ss_init();
++
++ /* clear any USBSS interrupts */
++ writel(0, glue->mem_va + USBSS_IRQ_EOI);
++ writel(readl(glue->mem_va + USBSS_IRQ_STATUS),
++ glue->mem_va + USBSS_IRQ_STATUS);
++
++ /* create the child platform device for mulitple instances of musb */
++ for (i = 0; i <= data->instances; ++i) {
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ /* initialize the cppi41dma init */
++ cppi41_init(i, glue->irq, data->instances+1);
++#endif
++ ret = ti81xx_create_musb_pdev(glue, i);
++ if (ret != 0)
++ goto err3;
++ }
++
++ return 0;
++
++err3:
++ pm_runtime_put_sync(&pdev->dev);
++err2:
++ pm_runtime_disable(&pdev->dev);
++ iounmap(glue->mem_va);
++err1:
++ kfree(glue);
++err0:
++ return ret;
++}
++
++static int __exit ti81xx_remove(struct platform_device *pdev)
++{
++ struct ti81xx_glue *glue = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++ int i;
++
++ /* delete the child platform device for mulitple instances of musb */
++ for (i = 0; i <= data->instances; ++i)
++ ti81xx_delete_musb_pdev(glue, i);
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ cppi41_free();
++#endif
++ /* iounmap */
++ iounmap(glue->mem_va);
++ usbotg_ss_uninit();
++
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ kfree(glue);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static void ti81xx_save_context(struct ti81xx_glue *glue)
++{
++ struct ti81xx_usbss_regs *usbss = &glue->usbss_regs;
++ u8 i, j;
++
++ /* save USBSS register */
++ usbss->irq_en_set = usbss_read(USBSS_IRQ_ENABLE_SET);
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ for (i = 0 ; i < 4 ; i++) {
++ usbss->irq_dma_th_tx0[i] =
++ usbss_read(USBSS_IRQ_DMA_THRESHOLD_TX0 + (0x4 * i));
++ usbss->irq_dma_th_rx0[i] =
++ usbss_read(USBSS_IRQ_DMA_THRESHOLD_RX0 + (0x4 * i));
++ usbss->irq_dma_th_tx1[i] =
++ usbss_read(USBSS_IRQ_DMA_THRESHOLD_TX1 + (0x4 * i));
++ usbss->irq_dma_th_rx1[i] =
++ usbss_read(USBSS_IRQ_DMA_THRESHOLD_RX1 + (0x4 * i));
++
++ usbss->irq_frame_th_tx0[i] =
++ usbss_read(USBSS_IRQ_FRAME_THRESHOLD_TX0 + (0x4 * i));
++ usbss->irq_frame_th_rx0[i] =
++ usbss_read(USBSS_IRQ_FRAME_THRESHOLD_RX0 + (0x4 * i));
++ usbss->irq_frame_th_tx1[i] =
++ usbss_read(USBSS_IRQ_FRAME_THRESHOLD_TX1 + (0x4 * i));
++ usbss->irq_frame_th_rx1[i] =
++ usbss_read(USBSS_IRQ_FRAME_THRESHOLD_RX1 + (0x4 * i));
++ }
++ for (i = 0 ; i < 2 ; i++) {
++ usbss->irq_dma_en[i] =
++ usbss_read(USBSS_IRQ_DMA_ENABLE_0 + (0x4 * i));
++ usbss->irq_frame_en[i] =
++ usbss_read(USBSS_IRQ_FRAME_ENABLE_0 + (0x4 * i));
++ }
++#endif
++ /* save usbX register */
++ for (i = 0 ; i < 2 ; i++) {
++ struct ti81xx_usb_regs *usb = &glue->usb_regs[i];
++ struct musb *musb = platform_get_drvdata(glue->musb[i]);
++ void __iomem *cbase = musb->ctrl_base;
++
++ /* disable the timers */
++ if (timer_pending(&musb->otg_workaround) &&
++ is_host_enabled(musb)) {
++ del_timer_sync(&musb->otg_workaround);
++ musb->en_otgw_timer = 1;
++ }
++
++ if (timer_pending(&musb->otg_workaround) &&
++ is_otg_enabled(musb)) {
++ del_timer_sync(&musb->otg_timer);
++ musb->en_otg_timer = 1;
++ }
++
++ musb_save_context(musb);
++ usb->control = musb_readl(cbase, USB_CTRL_REG);
++
++ for (j = 0 ; j < 2 ; j++)
++ usb->irq_en_set[j] = musb_readl(cbase,
++ USB_IRQ_ENABLE_SET_0 + (0x4 * j));
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ usb->tx_mode = musb_readl(cbase, USB_TX_MODE_REG);
++ usb->rx_mode = musb_readl(cbase, USB_RX_MODE_REG);
++
++ for (j = 0 ; j < 15 ; j++)
++ usb->grndis_size[j] = musb_readl(cbase,
++ USB_GENERIC_RNDIS_EP_SIZE_REG(j + 1));
++
++ usb->auto_req = musb_readl(cbase, TI81XX_USB_AUTOREQ_REG);
++ usb->teardn = musb_readl(cbase, TI81XX_USB_TEARDOWN_REG);
++ usb->th_xdma_idle = musb_readl(cbase, USB_TH_XDMA_IDLE_REG);
++#endif
++ usb->srp_fix = musb_readl(cbase, USB_SRP_FIX_TIME_REG);
++ usb->phy_utmi = musb_readl(cbase, USB_PHY_UTMI_REG);
++ usb->mgc_utmi_loopback = musb_readl(cbase, USB_PHY_UTMI_LB_REG);
++ usb->mode = musb_readl(cbase, USB_MODE_REG);
++ }
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ /* save CPPI4.1 DMA register for dma block 0 */
++ cppi41_save_context(0);
++#endif
++}
++static void ti81xx_restore_context(struct ti81xx_glue *glue)
++{
++ struct ti81xx_usbss_regs *usbss = &glue->usbss_regs;
++ u8 i, j;
++
++ /* restore USBSS register */
++ usbss_write(USBSS_IRQ_ENABLE_SET, usbss->irq_en_set);
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ for (i = 0 ; i < 4 ; i++) {
++ usbss_write(USBSS_IRQ_DMA_THRESHOLD_TX0 + (0x4 * i),
++ usbss->irq_dma_th_tx0[i]);
++ usbss_write(USBSS_IRQ_DMA_THRESHOLD_RX0 + (0x4 * i),
++ usbss->irq_dma_th_rx0[i]);
++ usbss_write(USBSS_IRQ_DMA_THRESHOLD_TX1 + (0x4 * i),
++ usbss->irq_dma_th_tx1[i]);
++ usbss_write(USBSS_IRQ_DMA_THRESHOLD_RX1 + (0x4 * i),
++ usbss->irq_dma_th_rx1[i]);
++
++ usbss_write(USBSS_IRQ_FRAME_THRESHOLD_TX0 + (0x4 * i),
++ usbss->irq_frame_th_tx0[i]);
++ usbss_write(USBSS_IRQ_FRAME_THRESHOLD_RX0 + (0x4 * i),
++ usbss->irq_frame_th_rx0[i]);
++ usbss_write(USBSS_IRQ_FRAME_THRESHOLD_TX1 + (0x4 * i),
++ usbss->irq_frame_th_tx1[i]);
++ usbss_write(USBSS_IRQ_FRAME_THRESHOLD_RX1 + (0x4 * i),
++ usbss->irq_frame_th_rx1[i]);
++ }
++ for (i = 0 ; i < 2 ; i++) {
++ usbss_write(USBSS_IRQ_DMA_ENABLE_0 + (0x4 * i),
++ usbss->irq_dma_en[i]);
++ usbss_write(USBSS_IRQ_FRAME_ENABLE_0 + (0x4 * i),
++ usbss->irq_frame_en[i]);
++ }
++#endif
++ /* restore usbX register */
++ for (i = 0 ; i < 2 ; i++) {
++ struct ti81xx_usb_regs *usb = &glue->usb_regs[i];
++ struct musb *musb = platform_get_drvdata(glue->musb[i]);
++ void __iomem *cbase = musb->ctrl_base;
++
++ musb_restore_context(musb);
++ musb_writel(cbase, USB_CTRL_REG, usb->control);
++
++ for (j = 0 ; j < 2 ; j++)
++ musb_writel(cbase, USB_IRQ_ENABLE_SET_0 + (0x4 * j),
++ usb->irq_en_set[j]);
++
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ musb_writel(cbase, USB_TX_MODE_REG, usb->tx_mode);
++ musb_writel(cbase, USB_RX_MODE_REG, usb->rx_mode);
++
++ for (j = 0 ; j < 15 ; j++)
++ musb_writel(cbase, USB_GENERIC_RNDIS_EP_SIZE_REG(j + 1),
++ usb->grndis_size[j]);
++
++ musb_writel(cbase, TI81XX_USB_AUTOREQ_REG, usb->auto_req);
++ musb_writel(cbase, TI81XX_USB_TEARDOWN_REG, usb->teardn);
++ musb_writel(cbase, USB_TH_XDMA_IDLE_REG, usb->th_xdma_idle);
++#endif
++ musb_writel(cbase, USB_SRP_FIX_TIME_REG, usb->srp_fix);
++ musb_writel(cbase, USB_PHY_UTMI_REG, usb->phy_utmi);
++ musb_writel(cbase, USB_PHY_UTMI_LB_REG, usb->mgc_utmi_loopback);
++ musb_writel(cbase, USB_MODE_REG, usb->mode);
++
++ /* reenable the timers */
++ if (musb->en_otgw_timer && is_host_enabled(musb)) {
++ mod_timer(&musb->otg_workaround,
++ jiffies + POLL_SECONDS * HZ);
++ musb->en_otgw_timer = 0;
++ }
++ if (musb->en_otg_timer && is_otg_enabled(musb)) {
++ mod_timer(&musb->otg_timer,
++ jiffies + POLL_SECONDS * HZ);
++ musb->en_otg_timer = 0;
++ }
++ }
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ /* restore CPPI4.1 DMA register for dma block 0 */
++ cppi41_restore_context(0, dma_sched_table);
++#endif
++ /* controller needs delay for successful resume */
++ msleep(200);
++}
++static int ti81xx_runtime_suspend(struct device *dev)
++{
++ struct ti81xx_glue *glue = dev_get_drvdata(dev);
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++ int i;
++
++ /* save wrappers and cppi4.1 dma register */
++ ti81xx_save_context(glue);
++
++ /* Shutdown the on-chip PHY and its PLL. */
++ for (i = 0; i <= data->instances; ++i) {
++ if (data->set_phy_power)
++ data->set_phy_power(i, 0);
++ }
++
++ return 0;
++}
++
++static int ti81xx_runtime_resume(struct device *dev)
++{
++ struct ti81xx_glue *glue = dev_get_drvdata(dev);
++ struct musb_hdrc_platform_data *plat = dev->platform_data;
++ struct omap_musb_board_data *data = plat->board_data;
++ int i;
++
++ /*
++ * ignore first call of resume as all registers are not yet
++ * initialized
++ */
++ if (glue->first) {
++ glue->first = 0;
++ return 0;
++ }
++
++ /* Start the on-chip PHY and its PLL. */
++ for (i = 0; i <= data->instances; ++i) {
++ if (data->set_phy_power)
++ data->set_phy_power(i, 1);
++ }
++
++ /* restore wrappers and cppi4.1 dma register */
++ ti81xx_restore_context(glue);
++
++ return 0;
++}
++
++static const struct dev_pm_ops ti81xx_pm_ops = {
++ .runtime_suspend = ti81xx_runtime_suspend,
++ .runtime_resume = ti81xx_runtime_resume,
++};
++
++#define DEV_PM_OPS (&ti81xx_pm_ops)
++#else
++#define DEV_PM_OPS NULL
++#endif
++
++static struct platform_driver ti81xx_musb_driver = {
++ .remove = __exit_p(ti81xx_remove),
++ .driver = {
++ .name = "musb-ti81xx",
++ .pm = DEV_PM_OPS,
++ },
++};
++
++MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
++MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
++MODULE_LICENSE("GPL v2");
++
++static int __init ti81xx_glue_init(void)
++{
++ return platform_driver_probe(&ti81xx_musb_driver, ti81xx_probe);
++}
++subsys_initcall(ti81xx_glue_init);
++
++static void __exit ti81xx_glue_exit(void)
++{
++#ifdef CONFIG_USB_TI_CPPI41_DMA
++ /* free the usbss irq */
++ free_irq(TI81XX_IRQ_USBSS, 0);
++#endif
++
++ /* disable the interrupts */
++ usbss_write(USBSS_IRQ_EOI, 0);
++ usbss_write(USBSS_IRQ_ENABLE_SET, 0);
++ usbss_write(USBSS_IRQ_DMA_ENABLE_0, 0);
++
++ /* unregister platform driver */
++ platform_driver_unregister(&ti81xx_musb_driver);
++}
++module_exit(ti81xx_glue_exit);
+diff --git a/drivers/usb/musb/ti81xx.h b/drivers/usb/musb/ti81xx.h
+new file mode 100644
+index 0000000..e0dbd3e
+--- /dev/null
++++ b/drivers/usb/musb/ti81xx.h
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2005-2006 by Texas Instruments
++ *
++ * The Inventra Controller Driver for Linux is free software; you
++ * can redistribute it and/or modify it under the terms of the GNU
++ * General Public License version 2 as published by the Free Software
++ * Foundation.
++ */
++
++#ifndef __MUSB_HDRDF_H__
++#define __MUSB_HDRDF_H__
++
++#define TI81XX_USB_CPPIDMA_BASE 0x47402000
++#define TI81XX_USB_CPPIDMA_LEN 0x5FFF
++#define TI81XX_IRQ_USBSS 17
++
++/* Netra USB susbsystem register offsets */
++#define USBSS_REVISION 0x0000
++#define USBSS_SYSCONFIG 0x0010
++/* USBSS EOI interrupt register */
++#define USBSS_IRQ_EOI 0x0020
++/* USBSS interrupt generation/status register */
++#define USBSS_IRQ_STATUS_RAW 0x0024
++/* USBSS interrupt status register */
++#define USBSS_IRQ_STATUS 0x0028
++/* USBSS interrupt enable register */
++#define USBSS_IRQ_ENABLE_SET 0x002c
++/* USBSS interrupt clear register */
++#define USBSS_IRQ_ENABLE_CLEAR 0x0030
++/* USB0: TxDMA 8bit tx completion interrupt pacing
++ threshold value for ep1..15 */
++#define USBSS_IRQ_DMA_THRESHOLD_TX0 0x0100
++/* USB0: RxDMA 8bit rx completion interrupt pacing
++ threshold value for ep1..15 */
++#define USBSS_IRQ_DMA_THRESHOLD_RX0 0x0110
++/* USB1: TxDMA 8bit tx completion interrupt pacing
++ threshold value for ep1..15 */
++#define USBSS_IRQ_DMA_THRESHOLD_TX1 0x0120
++/* USB1: RxDMA 8bit rx completion interrupt pacing
++ threshold value for ep1..15 */
++#define USBSS_IRQ_DMA_THRESHOLD_RX1 0x0130
++/* USB0: TxDMA threshold enable tx completion for ep1..ep15
++ RxDMA threshold enable rx completion for ep1..ep15 */
++#define USBSS_IRQ_DMA_ENABLE_0 0x0140
++/* USB1: TxDMA threshold enable for ep1..ep15
++ RxDMA threshold enable for ep1..ep15 */
++#define USBSS_IRQ_DMA_ENABLE_1 0x0144
++/* USB0: TxDMA Frame threshold for tx completion for ep1..ep15
++ RxDMA Frame threshold for rx completion for ep1..ep15 */
++#define USBSS_IRQ_FRAME_THRESHOLD_TX0 0x0200
++#define USBSS_IRQ_FRAME_THRESHOLD_RX0 0x0210
++/* USB1: TxDMA Frame threshold for tx completion for ep1..ep15
++ RxDMA Frame threshold for rx completion for ep1..ep15 */
++#define USBSS_IRQ_FRAME_THRESHOLD_TX1 0x0220
++#define USBSS_IRQ_FRAME_THRESHOLD_RX1 0x0230
++/* USB0: Frame threshold enable tx completion for ep1..ep15
++ Frame threshold enable rx completion for ep1..ep15 */
++#define USBSS_IRQ_FRAME_ENABLE_0 0x0240
++#define USBSS_IRQ_FRAME_ENABLE_1 0x0244
++
++
++/* USB 2.0 OTG module registers */
++#define USB_REVISION_REG 0x0000
++#define USB_CTRL_REG 0x0014
++#define USB_STAT_REG 0x0018
++#define USB_IRQ_MERGED_STATUS 0x0020
++#define USB_IRQ_EOI 0x0024
++#define USB_IRQ_STATUS_RAW_0 0x0028
++#define USB_IRQ_STATUS_RAW_1 0x002c
++#define USB_IRQ_STATUS_0 0x0030
++#define USB_IRQ_STATUS_1 0x0034
++#define USB_IRQ_ENABLE_SET_0 0x0038
++#define USB_IRQ_ENABLE_SET_1 0x003c
++#define USB_IRQ_ENABLE_CLR_0 0x0040
++#define USB_IRQ_ENABLE_CLR_1 0x0044
++
++#define USB_EP_INTR_SET_REG (USB_IRQ_ENABLE_SET_0)
++#define USB_CORE_INTR_SET_REG (USB_IRQ_ENABLE_SET_1)
++#define USB_EP_INTR_CLEAR_REG (USB_IRQ_ENABLE_CLR_0)
++#define USB_CORE_INTR_CLEAR_REG (USB_IRQ_ENABLE_CLR_1)
++#define USB_EP_INTR_STATUS_REG (USB_IRQ_STATUS_0)
++#define USB_CORE_INTR_STATUS_REG (USB_IRQ_STATUS_1)
++
++#define USB_GRNDIS_EPSIZE_OFFS 0X0080
++#define USB_SRP_FIX_TIME_REG 0x00d4
++#define USB_TH_XDMA_IDLE_REG 0x00dc
++#define USB_PHY_UTMI_REG 0x00e0
++#define USB_PHY_UTMI_LB_REG 0x00e4
++#define USB_MODE_REG 0x00e8
++
++#define QUEUE_THRESHOLD_INTR_ENABLE_REG 0xc0
++#define QUEUE_63_THRESHOLD_REG 0xc4
++#define QUEUE_63_THRESHOLD_INTR_CLEAR_REG 0xc8
++#define QUEUE_65_THRESHOLD_REG 0xd4
++#define QUEUE_65_THRESHOLD_INTR_CLEAR_REG 0xd8
++
++/* Control register bits */
++#define USB_SOFT_RESET_MASK 1
++
++/* Mode register bits */
++#define USB_MODE_SHIFT(n) ((((n) - 1) << 1))
++#define USB_MODE_MASK(n) (3 << USB_MODE_SHIFT(n))
++#define USB_RX_MODE_SHIFT(n) USB_MODE_SHIFT(n)
++#define USB_TX_MODE_SHIFT(n) USB_MODE_SHIFT(n)
++#define USB_RX_MODE_MASK(n) USB_MODE_MASK(n)
++#define USB_TX_MODE_MASK(n) USB_MODE_MASK(n)
++#define USB_TRANSPARENT_MODE 0
++#define USB_RNDIS_MODE 1
++#define USB_CDC_MODE 2
++#define USB_GENERIC_RNDIS_MODE 3
++
++/* AutoReq register bits */
++#define USB_RX_AUTOREQ_SHIFT(n) (((n) - 1) << 1)
++#define USB_RX_AUTOREQ_MASK(n) (3 << USB_RX_AUTOREQ_SHIFT(n))
++#define USB_NO_AUTOREQ 0
++#define USB_AUTOREQ_ALL_BUT_EOP 1
++#define USB_AUTOREQ_ALWAYS 3
++
++/* Teardown register bits */
++#define USB_TX_TDOWN_SHIFT(n) (16 + (n))
++#define USB_TX_TDOWN_MASK(n) (1 << USB_TX_TDOWN_SHIFT(n))
++#define USB_RX_TDOWN_SHIFT(n) (n)
++#define USB_RX_TDOWN_MASK(n) (1 << USB_RX_TDOWN_SHIFT(n))
++
++/* USB interrupt register bits */
++#define USB_INTR_USB_SHIFT 0
++#define USB_INTR_USB_MASK (0x1ff << USB_INTR_USB_SHIFT) /* 8 Mentor */
++ /* interrupts and DRVVBUS interrupt */
++#define USB_INTR_DRVVBUS 0x100
++#define USB_INTR_RX_SHIFT 16
++#define USB_INTR_TX_SHIFT 0
++
++#define USB_MENTOR_CORE_OFFSET 0x400
++#define USB_CPPI41_NUM_CH 15
++
++#define MAX_MUSB_INSTANCE 2
++/* CPPI 4.1 queue manager registers */
++#define QMGR_PEND0_REG 0x4090
++#define QMGR_PEND1_REG 0x4094
++#define QMGR_PEND2_REG 0x4098
++
++#define QMGR_RGN_OFFS 0x4000
++#define QMRG_DESCRGN_OFFS 0x5000
++#define QMGR_REG_OFFS 0x6000
++#define QMGR_STAT_OFFS 0x7000
++#define DMA_GLBCTRL_OFFS 0x2000
++#define DMA_CHCTRL_OFFS 0x2800
++#define DMA_SCHED_OFFS 0x3000
++#define DMA_SCHEDTBL_OFFS 0x3800
++
++#define USB_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
++#define USB_RX_EP_MASK 0xfffe /* 15 Rx EPs */
++
++#define USB_TX_INTR_MASK (USB_TX_EP_MASK << USB_INTR_TX_SHIFT)
++#define USB_RX_INTR_MASK (USB_RX_EP_MASK << USB_INTR_RX_SHIFT)
++
++#define A_WAIT_BCON_TIMEOUT 1100 /* in ms */
++
++#define USBSS_INTR_RX_STARV 0x00000001
++#define USBSS_INTR_PD_CMPL 0x00000004
++#define USBSS_INTR_TX_CMPL 0x00000500
++#define USBSS_INTR_RX_CMPL 0x00000A00
++#define USBSS_INTR_FLAGS (USBSS_INTR_PD_CMPL | USBSS_INTR_TX_CMPL \
++ | USBSS_INTR_RX_CMPL)
++
++#define USBMODE_USBID_MUXSEL 0x80
++#define USBMODE_USBID_HIGH 0x100
++
++#define USB0PORT_MODEMASK 0x0f
++#define USB1PORT_MODEMASK 0xf0
++#define USB1PORT_MODESHIFT 4
++extern void usb_nop_xceiv_register(int id);
++#endif
+diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
+index ec14801..9dd16b9 100644
+--- a/drivers/usb/musb/tusb6010.c
++++ b/drivers/usb/musb/tusb6010.c
+@@ -40,11 +40,11 @@ static void tusb_musb_set_vbus(struct musb *musb, int is_on);
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+-u8 tusb_get_revision(struct musb *musb)
++static u16 tusb_get_revision(struct musb *musb)
+ {
+ void __iomem *tbase = musb->ctrl_base;
+ u32 die_id;
+- u8 rev;
++ u16 rev;
+
+ rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+ if (TUSB_REV_MAJOR(rev) == 3) {
+@@ -56,11 +56,12 @@ u8 tusb_get_revision(struct musb *musb)
+
+ return rev;
+ }
++EXPORT_SYMBOL_GPL(tusb_get_revision);
+
+ static int tusb_print_revision(struct musb *musb)
+ {
+ void __iomem *tbase = musb->ctrl_base;
+- u8 rev;
++ u16 rev;
+
+ rev = tusb_get_revision(musb);
+
+@@ -171,7 +172,8 @@ static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+ }
+ }
+
+-void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
++static void tusb_musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
++ const u8 *buf)
+ {
+ struct musb *musb = hw_ep->musb;
+ void __iomem *ep_conf = hw_ep->conf;
+@@ -221,7 +223,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+ tusb_fifo_write_unaligned(fifo, buf, len);
+ }
+
+-void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
++static void tusb_musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+ {
+ struct musb *musb = hw_ep->musb;
+ void __iomem *ep_conf = hw_ep->conf;
+@@ -854,7 +856,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
+
+ dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
+ real_dma_src = ~real_dma_src & dma_src;
+- if (tusb_dma_omap() && real_dma_src) {
++ if (tusb_dma_omap(musb) && real_dma_src) {
+ int tx_source = (real_dma_src & 0xffff);
+ int i;
+
+@@ -1074,8 +1076,8 @@ static int tusb_musb_init(struct musb *musb)
+ void __iomem *sync = NULL;
+ int ret;
+
+- usb_nop_xceiv_register();
+- musb->xceiv = otg_get_transceiver();
++ usb_nop_xceiv_register(musb->id);
++ musb->xceiv = otg_get_transceiver(musb->id);
+ if (!musb->xceiv)
+ return -ENODEV;
+
+@@ -1128,7 +1130,7 @@ done:
+ iounmap(sync);
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+ }
+ return ret;
+ }
+@@ -1144,11 +1146,14 @@ static int tusb_musb_exit(struct musb *musb)
+ iounmap(musb->sync_va);
+
+ otg_put_transceiver(musb->xceiv);
+- usb_nop_xceiv_unregister();
++ usb_nop_xceiv_unregister(musb->id);
+ return 0;
+ }
+
+ static const struct musb_platform_ops tusb_ops = {
++ .fifo_mode = 4,
++ .flags = MUSB_GLUE_TUSB_STYLE |
++ MUSB_GLUE_EP_ADDR_INDEXED_MAPPING,
+ .init = tusb_musb_init,
+ .exit = tusb_musb_exit,
+
+@@ -1158,8 +1163,15 @@ static const struct musb_platform_ops tusb_ops = {
+ .set_mode = tusb_musb_set_mode,
+ .try_idle = tusb_musb_try_idle,
+
++ .get_hw_revision = tusb_get_revision,
++
+ .vbus_status = tusb_musb_vbus_status,
+ .set_vbus = tusb_musb_set_vbus,
++ .read_fifo = tusb_musb_read_fifo,
++ .write_fifo = tusb_musb_write_fifo,
++
++ .dma_controller_create = tusb_dma_controller_create,
++ .dma_controller_destroy = tusb_dma_controller_destroy,
+ };
+
+ static u64 tusb_dmamask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
+index 35c933a..72cdad2 100644
+--- a/drivers/usb/musb/tusb6010.h
++++ b/drivers/usb/musb/tusb6010.h
+@@ -12,20 +12,6 @@
+ #ifndef __TUSB6010_H__
+ #define __TUSB6010_H__
+
+-extern u8 tusb_get_revision(struct musb *musb);
+-
+-#ifdef CONFIG_USB_TUSB6010
+-#define musb_in_tusb() 1
+-#else
+-#define musb_in_tusb() 0
+-#endif
+-
+-#ifdef CONFIG_USB_TUSB_OMAP_DMA
+-#define tusb_dma_omap() 1
+-#else
+-#define tusb_dma_omap() 0
+-#endif
+-
+ /* VLYNQ control register. 32-bit at offset 0x000 */
+ #define TUSB_VLYNQ_CTRL 0x004
+
+diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
+index b67b4bc..4af0c2d 100644
+--- a/drivers/usb/musb/tusb6010_omap.c
++++ b/drivers/usb/musb/tusb6010_omap.c
+@@ -178,12 +178,12 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_TO_DEVICE);
+- musb_write_fifo(hw_ep, pio, buf);
++ musb->ops->write_fifo(hw_ep, pio, buf);
+ } else {
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_FROM_DEVICE);
+- musb_read_fifo(hw_ep, pio, buf);
++ musb->ops->read_fifo(hw_ep, pio, buf);
+ }
+ channel->actual_len += pio;
+ }
+@@ -211,7 +211,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+
+ if (chdat->tx) {
+ dev_dbg(musb->controller, "terminating short tx packet\n");
+- musb_ep_select(mbase, chdat->epnum);
++ musb_ep_select(musb, mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+ | MUSB_TXCSR_P_WZC_BITS;
+@@ -386,14 +386,14 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ * Prepare MUSB for DMA transfer
+ */
+ if (chdat->tx) {
+- musb_ep_select(mbase, chdat->epnum);
++ musb_ep_select(musb, mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ } else {
+- musb_ep_select(mbase, chdat->epnum);
++ musb_ep_select(musb, mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_DMAENAB;
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+@@ -642,7 +642,7 @@ static void tusb_omap_dma_release(struct dma_channel *channel)
+ channel = NULL;
+ }
+
+-void dma_controller_destroy(struct dma_controller *c)
++void tusb_dma_controller_destroy(struct dma_controller *c)
+ {
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+@@ -661,9 +661,10 @@ void dma_controller_destroy(struct dma_controller *c)
+
+ kfree(tusb_dma);
+ }
++EXPORT_SYMBOL(tusb_dma_controller_destroy);
+
+-struct dma_controller *__init
+-dma_controller_create(struct musb *musb, void __iomem *base)
++struct dma_controller *__devinit
++tusb_dma_controller_create(struct musb *musb, void __iomem *base)
+ {
+ void __iomem *tbase = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+@@ -697,7 +698,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+- if (tusb_get_revision(musb) >= TUSB_REV_30)
++ if (musb_platform_get_hw_revision(musb) >= TUSB_REV_30)
+ tusb_dma->multichannel = 1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+@@ -721,7 +722,22 @@ dma_controller_create(struct musb *musb, void __iomem *base)
+ return &tusb_dma->controller;
+
+ cleanup:
+- dma_controller_destroy(&tusb_dma->controller);
++ tusb_dma_controller_destroy(&tusb_dma->controller);
+ out:
+ return NULL;
+ }
++EXPORT_SYMBOL(tusb_dma_controller_create);
++
++MODULE_DESCRIPTION("TUSB dma controller driver for musb");
++MODULE_LICENSE("GPL v2");
++
++static int __init tusb_dma_init(void)
++{
++ return 0;
++}
++module_init(tusb_dma_init);
++
++static void __exit tusb_dma__exit(void)
++{
++}
++module_exit(tusb_dma__exit);
+diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
+index f7e04bf..8209e23 100644
+--- a/drivers/usb/musb/ux500.c
++++ b/drivers/usb/musb/ux500.c
+@@ -54,6 +54,7 @@ static int ux500_musb_exit(struct musb *musb)
+ }
+
+ static const struct musb_platform_ops ux500_ops = {
++ .fifo_mode = 5,
+ .init = ux500_musb_init,
+ .exit = ux500_musb_exit,
+ };
+diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
+index ef4333f..a163632 100644
+--- a/drivers/usb/musb/ux500_dma.c
++++ b/drivers/usb/musb/ux500_dma.c
+@@ -37,7 +37,6 @@ struct ux500_dma_channel {
+ struct dma_channel channel;
+ struct ux500_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+- struct work_struct channel_work;
+ struct dma_chan *dma_chan;
+ unsigned int cur_len;
+ dma_cookie_t cookie;
+@@ -56,31 +55,11 @@ struct ux500_dma_controller {
+ dma_addr_t phy_base;
+ };
+
+-/* Work function invoked from DMA callback to handle tx transfers. */
+-static void ux500_tx_work(struct work_struct *data)
+-{
+- struct ux500_dma_channel *ux500_channel = container_of(data,
+- struct ux500_dma_channel, channel_work);
+- struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+- struct musb *musb = hw_ep->musb;
+- unsigned long flags;
+-
+- dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
+- hw_ep->epnum);
+-
+- spin_lock_irqsave(&musb->lock, flags);
+- ux500_channel->channel.actual_len = ux500_channel->cur_len;
+- ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
+- musb_dma_completion(musb, hw_ep->epnum,
+- ux500_channel->is_tx);
+- spin_unlock_irqrestore(&musb->lock, flags);
+-}
+-
+ /* Work function invoked from DMA callback to handle rx transfers. */
+-static void ux500_rx_work(struct work_struct *data)
++void ux500_dma_callback(void *private_data)
+ {
+- struct ux500_dma_channel *ux500_channel = container_of(data,
+- struct ux500_dma_channel, channel_work);
++ struct dma_channel *channel = private_data;
++ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+@@ -94,14 +73,7 @@ static void ux500_rx_work(struct work_struct *data)
+ musb_dma_completion(musb, hw_ep->epnum,
+ ux500_channel->is_tx);
+ spin_unlock_irqrestore(&musb->lock, flags);
+-}
+-
+-void ux500_dma_callback(void *private_data)
+-{
+- struct dma_channel *channel = (struct dma_channel *)private_data;
+- struct ux500_dma_channel *ux500_channel = channel->private_data;
+
+- schedule_work(&ux500_channel->channel_work);
+ }
+
+ static bool ux500_configure_channel(struct dma_channel *channel,
+@@ -330,7 +302,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
+ void **param_array;
+ struct ux500_dma_channel *channel_array;
+ u32 ch_count;
+- void (*musb_channel_work)(struct work_struct *);
+ dma_cap_mask_t mask;
+
+ if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
+@@ -347,7 +318,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
+ channel_array = controller->rx_channel;
+ ch_count = data->num_rx_channels;
+ param_array = data->dma_rx_param_array;
+- musb_channel_work = ux500_rx_work;
+
+ for (dir = 0; dir < 2; dir++) {
+ for (ch_num = 0; ch_num < ch_count; ch_num++) {
+@@ -374,15 +344,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
+ return -EBUSY;
+ }
+
+- INIT_WORK(&ux500_channel->channel_work,
+- musb_channel_work);
+ }
+
+ /* Prepare the loop for TX channels */
+ channel_array = controller->tx_channel;
+ ch_count = data->num_tx_channels;
+ param_array = data->dma_tx_param_array;
+- musb_channel_work = ux500_tx_work;
+ is_tx = 1;
+ }
+
+diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
+index c1e3600..31b4fa3 100644
+--- a/drivers/usb/otg/nop-usb-xceiv.c
++++ b/drivers/usb/otg/nop-usb-xceiv.c
+@@ -37,24 +37,24 @@ struct nop_usb_xceiv {
+ struct device *dev;
+ };
+
+-static struct platform_device *pd;
++static struct platform_device *pd[2] = {NULL, NULL};
+
+-void usb_nop_xceiv_register(void)
++void usb_nop_xceiv_register(int id)
+ {
+- if (pd)
++ if (pd[id])
+ return;
+- pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
+- if (!pd) {
++ pd[id] = platform_device_register_simple("nop_usb_xceiv", id, NULL, 0);
++ if (!pd[id]) {
+ printk(KERN_ERR "Unable to register usb nop transceiver\n");
+ return;
+ }
+ }
+ EXPORT_SYMBOL(usb_nop_xceiv_register);
+
+-void usb_nop_xceiv_unregister(void)
++void usb_nop_xceiv_unregister(int id)
+ {
+- platform_device_unregister(pd);
+- pd = NULL;
++ platform_device_unregister(pd[id]);
++ pd[id] = NULL;
+ }
+ EXPORT_SYMBOL(usb_nop_xceiv_unregister);
+
+@@ -122,6 +122,7 @@ static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev)
+ nop->otg.set_host = nop_set_host;
+ nop->otg.set_peripheral = nop_set_peripheral;
+ nop->otg.set_suspend = nop_set_suspend;
++ nop->otg.id = pdev->id;
+
+ err = otg_set_transceiver(&nop->otg);
+ if (err) {
+@@ -144,7 +145,7 @@ static int __devexit nop_usb_xceiv_remove(struct platform_device *pdev)
+ {
+ struct nop_usb_xceiv *nop = platform_get_drvdata(pdev);
+
+- otg_set_transceiver(NULL);
++ otg_reset_transceiver(&nop->otg);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(nop);
+diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
+index 307c27b..15eefbf 100644
+--- a/drivers/usb/otg/otg.c
++++ b/drivers/usb/otg/otg.c
+@@ -15,7 +15,7 @@
+
+ #include <linux/usb/otg.h>
+
+-static struct otg_transceiver *xceiv;
++static struct otg_transceiver *xceiv[2];
+
+ /**
+ * otg_get_transceiver - find the (single) OTG transceiver
+@@ -26,11 +26,11 @@ static struct otg_transceiver *xceiv;
+ *
+ * For use by USB host and peripheral drivers.
+ */
+-struct otg_transceiver *otg_get_transceiver(void)
++struct otg_transceiver *otg_get_transceiver(int id)
+ {
+- if (xceiv)
+- get_device(xceiv->dev);
+- return xceiv;
++ if (xceiv[id])
++ get_device(xceiv[id]->dev);
++ return xceiv[id];
+ }
+ EXPORT_SYMBOL(otg_get_transceiver);
+
+@@ -59,13 +59,30 @@ EXPORT_SYMBOL(otg_put_transceiver);
+ */
+ int otg_set_transceiver(struct otg_transceiver *x)
+ {
+- if (xceiv && x)
++ if ( x && xceiv[x->id])
+ return -EBUSY;
+- xceiv = x;
++ xceiv[x->id] = x;
+ return 0;
+ }
+ EXPORT_SYMBOL(otg_set_transceiver);
+
++
++/**
++ * otg_set_transceiver - declare the (single) OTG transceiver
++ * @x: the USB OTG transceiver to be used; or NULL
++ *
++ * This call is exclusively for use by transceiver drivers, which
++ * coordinate the activities of drivers for host and peripheral
++ * controllers, and in some cases for VBUS current regulation.
++ */
++int otg_reset_transceiver(struct otg_transceiver *x)
++{
++ if (x && xceiv[x->id])
++ xceiv[x->id] = NULL;
++ return 0;
++}
++EXPORT_SYMBOL(otg_reset_transceiver);
++
+ const char *otg_state_string(enum usb_otg_state state)
+ {
+ switch (state) {
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index aa0d183..3906c42 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -755,7 +755,7 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
+
+ if (!driver ||
+ !driver->setup ||
+- driver->speed < USB_SPEED_FULL)
++ driver->max_speed < USB_SPEED_FULL)
+ return -EINVAL;
+
+ /* first hook up the driver ... */
+@@ -816,11 +816,6 @@ static int usbhsg_stop(struct usbhs_priv *priv)
+ return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
+ }
+
+-static void usbhs_mod_gadget_release(struct device *pdev)
+-{
+- /* do nothing */
+-}
+-
+ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+ {
+ struct usbhsg_gpriv *gpriv;
+@@ -869,10 +864,9 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+ */
+ dev_set_name(&gpriv->gadget.dev, "gadget");
+ gpriv->gadget.dev.parent = dev;
+- gpriv->gadget.dev.release = usbhs_mod_gadget_release;
+ gpriv->gadget.name = "renesas_usbhs_udc";
+ gpriv->gadget.ops = &usbhsg_gadget_ops;
+- gpriv->gadget.is_dualspeed = 1;
++ gpriv->gadget.max_speed = USB_SPEED_HIGH;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
+diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
+index d83e967..64754de 100644
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -30,7 +30,7 @@ config VGASTATE
+ config VIDEO_OUTPUT_CONTROL
+ tristate "Lowlevel video output switch controls"
+ help
+- This framework adds support for low-level control of the video
++ This framework adds support for low-level control of the video
+ output switch.
+
+ menuconfig FB
+@@ -691,7 +691,7 @@ config FB_STI
+ BIOS routines contained in a ROM chip in HP PA-RISC based machines.
+ Enabling this option will implement the linux framebuffer device
+ using calls to the STI BIOS routines for initialisation.
+-
++
+ If you enable this option, you will get a planar framebuffer device
+ /dev/fb which will work on the most common HP graphic cards of the
+ NGLE family, including the artist chips (in the 7xx and Bxxx series),
+@@ -1132,36 +1132,36 @@ config FB_I810
+ select FB_CFB_IMAGEBLIT
+ select VGASTATE
+ help
+- This driver supports the on-board graphics built in to the Intel 810
++ This driver supports the on-board graphics built in to the Intel 810
+ and 815 chipsets. Say Y if you have and plan to use such a board.
+
+ To compile this driver as a module, choose M here: the
+ module will be called i810fb.
+
+- For more information, please read
++ For more information, please read
+ <file:Documentation/fb/intel810.txt>
+
+ config FB_I810_GTF
+ bool "use VESA Generalized Timing Formula"
+ depends on FB_I810
+ help
+- If you say Y, then the VESA standard, Generalized Timing Formula
++ If you say Y, then the VESA standard, Generalized Timing Formula
+ or GTF, will be used to calculate the required video timing values
+- per video mode. Since the GTF allows nondiscrete timings
++ per video mode. Since the GTF allows nondiscrete timings
+ (nondiscrete being a range of values as opposed to discrete being a
+- set of values), you'll be able to use any combination of horizontal
++ set of values), you'll be able to use any combination of horizontal
+ and vertical resolutions, and vertical refresh rates without having
+ to specify your own timing parameters. This is especially useful
+- to maximize the performance of an aging display, or if you just
+- have a display with nonstandard dimensions. A VESA compliant
++ to maximize the performance of an aging display, or if you just
++ have a display with nonstandard dimensions. A VESA compliant
+ monitor is recommended, but can still work with non-compliant ones.
+- If you need or want this, then select this option. The timings may
+- not be compliant with Intel's recommended values. Use at your own
++ If you need or want this, then select this option. The timings may
++ not be compliant with Intel's recommended values. Use at your own
+ risk.
+
+- If you say N, the driver will revert to discrete video timings
++ If you say N, the driver will revert to discrete video timings
+ using a set recommended by Intel in their documentation.
+-
++
+ If unsure, say N.
+
+ config FB_I810_I2C
+@@ -1279,10 +1279,10 @@ config FB_MATROX_G
+ G450/G550 secondary head and digital output are supported without
+ additional modules.
+
+- The driver starts in monitor mode. You must use the matroxset tool
+- (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
+- swap primary and secondary head outputs, or to change output mode.
+- Secondary head driver always start in 640x480 resolution and you
++ The driver starts in monitor mode. You must use the matroxset tool
++ (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
++ swap primary and secondary head outputs, or to change output mode.
++ Secondary head driver always start in 640x480 resolution and you
+ must use fbset to change it.
+
+ Do not forget that second head supports only 16 and 32 bpp
+@@ -1365,7 +1365,7 @@ config FB_RADEON_I2C
+ select FB_DDC
+ default y
+ help
+- Say Y here if you want DDC/I2C support for your Radeon board.
++ Say Y here if you want DDC/I2C support for your Radeon board.
+
+ config FB_RADEON_BACKLIGHT
+ bool "Support for backlight control"
+@@ -1599,7 +1599,7 @@ config FB_NEOMAGIC
+ select VGASTATE
+ help
+ This driver supports notebooks with NeoMagic PCI chips.
+- Say Y if you have such a graphics card.
++ Say Y if you have such a graphics card.
+
+ To compile this driver as a module, choose M here: the
+ module will be called neofb.
+@@ -1654,7 +1654,7 @@ config FB_VOODOO1
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+- Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
++ Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
+ Voodoo2 (cvg) based graphics card.
+
+ To compile this driver as a module, choose M here: the
+@@ -2229,7 +2229,7 @@ config FB_SH7760
+
+ config FB_DA8XX
+ tristate "DA8xx/OMAP-L1xx Framebuffer support"
+- depends on FB && ARCH_DAVINCI_DA8XX
++ depends on FB && (ARCH_DAVINCI_DA8XX || SOC_OMAPAM33XX)
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+@@ -2238,6 +2238,17 @@ config FB_DA8XX
+ found on DA8xx/OMAP-L1xx SoCs.
+ If unsure, say N.
+
++config FB_DA8XX_CONSISTENT_DMA_SIZE
++ int "Consistent DMA memory size (MB)"
++ depends on (FB_DA8XX && MACH_AM335XEVM)
++ range 1 14
++ default 4
++ help
++ Increase the DMA consistent memory size according to your video
++ memory needs, for example if you want to use multiple planes.
++ The size must be 2MB aligned.
++ If unsure say 1.
++
+ config FB_VIRTUAL
+ tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
+ depends on FB
+@@ -2409,6 +2420,17 @@ config FB_PUV3_UNIGFX
+ Choose this option if you want to use the Unigfx device as a
+ framebuffer device. Without the support of PCI & AGP.
+
++config FB_ST7735
++ tristate "ST7735 framebuffer support"
++ depends on FB && SPI
++ select FB_SYS_FILLRECT
++ select FB_SYS_COPYAREA
++ select FB_SYS_IMAGEBLIT
++ select FB_SYS_FOPS
++ select FB_DEFERRED_IO
++ help
++ Framebuffer support for the ST7735 display controller in SPI mode.
++
+ source "drivers/video/omap/Kconfig"
+ source "drivers/video/omap2/Kconfig"
+
+diff --git a/drivers/video/Makefile b/drivers/video/Makefile
+index 9b9d8ff..c6d9851 100644
+--- a/drivers/video/Makefile
++++ b/drivers/video/Makefile
+@@ -143,6 +143,7 @@ obj-$(CONFIG_FB_MSM) += msm/
+ obj-$(CONFIG_FB_NUC900) += nuc900fb.o
+ obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+ obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
++obj-$(CONFIG_FB_ST7735) += st7735fb.o
+
+ # Platform or fallback drivers go here
+ obj-$(CONFIG_FB_UVESA) += uvesafb.o
+diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
+index 278aeaa..df9dac7 100644
+--- a/drivers/video/backlight/Kconfig
++++ b/drivers/video/backlight/Kconfig
+@@ -342,6 +342,14 @@ config BACKLIGHT_AAT2870
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ backlight driver.
+
++config BACKLIGHT_TLC59108
++ tristate "TLC59108 LCD Backlight Driver"
++ depends on I2C && BACKLIGHT_CLASS_DEVICE
++ default n
++ help
++ If you have an LCD Panel with backlight control via TLC59108,
++ say Y to enable its LCD control driver.
++
+ endif # BACKLIGHT_CLASS_DEVICE
+
+ endif # BACKLIGHT_LCD_SUPPORT
+diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
+index fdd1fc4..ba31474 100644
+--- a/drivers/video/backlight/Makefile
++++ b/drivers/video/backlight/Makefile
+@@ -39,4 +39,5 @@ obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
+ obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+ obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
+ obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
++obj-$(CONFIG_BACKLIGHT_TLC59108) += tlc59108.o
+
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index 8b5b2a4..48092b4 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -17,7 +17,7 @@
+ #include <linux/fb.h>
+ #include <linux/backlight.h>
+ #include <linux/err.h>
+-#include <linux/pwm.h>
++#include <linux/pwm/pwm.h>
+ #include <linux/pwm_backlight.h>
+ #include <linux/slab.h>
+
+@@ -49,13 +49,14 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
+ brightness = pb->notify(pb->dev, brightness);
+
+ if (brightness == 0) {
+- pwm_config(pb->pwm, 0, pb->period);
+- pwm_disable(pb->pwm);
++ pwm_set_duty_ns(pb->pwm, 0);
++ pwm_stop(pb->pwm);
+ } else {
+ brightness = pb->lth_brightness +
+ (brightness * (pb->period - pb->lth_brightness) / max);
+- pwm_config(pb->pwm, brightness, pb->period);
+- pwm_enable(pb->pwm);
++ pwm_set_period_ns(pb->pwm, pb->period);
++ pwm_set_duty_ns(pb->pwm, brightness);
++ pwm_start(pb->pwm);
+ }
+
+ if (pb->notify_after)
+@@ -117,7 +118,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
+ (data->pwm_period_ns / data->max_brightness);
+ pb->dev = &pdev->dev;
+
+- pb->pwm = pwm_request(data->pwm_id, "backlight");
++ pb->pwm = pwm_request(data->pwm_id, data->ch, "backlight");
+ if (IS_ERR(pb->pwm)) {
+ dev_err(&pdev->dev, "unable to request PWM for backlight\n");
+ ret = PTR_ERR(pb->pwm);
+@@ -143,7 +144,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
+ return 0;
+
+ err_bl:
+- pwm_free(pb->pwm);
++ pwm_release(pb->pwm);
+ err_pwm:
+ kfree(pb);
+ err_alloc:
+@@ -159,9 +160,9 @@ static int pwm_backlight_remove(struct platform_device *pdev)
+ struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
+
+ backlight_device_unregister(bl);
+- pwm_config(pb->pwm, 0, pb->period);
+- pwm_disable(pb->pwm);
+- pwm_free(pb->pwm);
++ pwm_set_duty_ns(pb->pwm, 0);
++ pwm_stop(pb->pwm);
++ pwm_release(pb->pwm);
+ kfree(pb);
+ if (data->exit)
+ data->exit(&pdev->dev);
+@@ -177,10 +178,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
+
+ if (pb->notify)
+ pb->notify(pb->dev, 0);
+- pwm_config(pb->pwm, 0, pb->period);
+- pwm_disable(pb->pwm);
+- if (pb->notify_after)
+- pb->notify_after(pb->dev, 0);
++ pwm_set_duty_ns(pb->pwm, 0);
++ pwm_stop(pb->pwm);
+ return 0;
+ }
+
+diff --git a/drivers/video/backlight/tlc59108.c b/drivers/video/backlight/tlc59108.c
+new file mode 100755
+index 0000000..d7e9a4f
+--- /dev/null
++++ b/drivers/video/backlight/tlc59108.c
+@@ -0,0 +1,170 @@
++/*
++ * ti81xxhdmi_tlc59108.c
++ *
++ * Copyright (C) 2011 Texas Instruments
++ * Author: Senthil Natarajan
++ *
++ * tlc59108 HDMI Driver
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program. If not, see <http://www.gnu.org/licenses/>.
++ * History:
++ *
++ * Senthil Natarajan<senthil.n@ti.com> July 2011 I2C driver for tlc59108
++ * backlight control
++ */
++
++#include <linux/i2c.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/backlight.h>
++#include <linux/fb.h>
++
++#define tlc59108_MODULE_NAME "tlc59108"
++#define TLC59108_MODE1 0x00
++#define TLC59108_PWM2 0x04
++#define TLC59108_LEDOUT0 0x0c
++#define TLC59108_LEDOUT1 0x0d
++#define TLC59108_MAX_BRIGHTNESS 0xFF
++
++struct tlc59108_bl {
++ struct i2c_client *client;
++ struct backlight_device *bl;
++};
++
++static void tlc59108_bl_set_backlight(struct tlc59108_bl *data, int brightness)
++{
++ /* Set Mode1 Register */
++ i2c_smbus_write_byte_data(data->client, TLC59108_MODE1, 0x00);
++
++ /* Set LEDOUT0 Register */
++ i2c_smbus_write_byte_data(data->client, TLC59108_LEDOUT0, 0x21);
++
++ /* Set Backlight Duty Cycle*/
++ i2c_smbus_write_byte_data(data->client, TLC59108_PWM2,
++ brightness & 0xff);
++}
++
++static int tlc59108_bl_get_brightness(struct backlight_device *dev)
++{
++ struct backlight_properties *props = &dev->props;
++
++ return props->brightness;
++}
++
++static int tlc59108_bl_update_status(struct backlight_device *dev)
++{
++ struct backlight_properties *props = &dev->props;
++ struct tlc59108_bl *data = dev_get_drvdata(&dev->dev);
++ int brightness = props->brightness;
++
++ tlc59108_bl_set_backlight(data, brightness);
++
++ return 0;
++}
++
++static const struct backlight_ops bl_ops = {
++ .get_brightness = tlc59108_bl_get_brightness,
++ .update_status = tlc59108_bl_update_status,
++};
++
++static int tlc59108_probe(struct i2c_client *c, const struct i2c_device_id *id)
++{
++ struct backlight_properties props;
++ struct tlc59108_bl *data = kzalloc(sizeof(struct tlc59108_bl),
++ GFP_KERNEL);
++ int ret = 0;
++
++ if (!data)
++ return -ENOMEM;
++
++ i2c_set_clientdata(c, data);
++ data->client = c;
++
++ /* FIXME: This is definitely how it should be done.
++ * Someone more familiar with the framework needs to make it proper
++ */
++ if (cpu_is_am335x()) {
++ /* Set LED4 to always on in LEDOUT1 Register*/
++ ret = i2c_smbus_write_byte_data(data->client, TLC59108_LEDOUT1, 0x01);
++ if(ret < 0)
++ pr_err("Could not set LED4 to fully on\n");
++ }
++
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = TLC59108_MAX_BRIGHTNESS;
++ props.type = BACKLIGHT_RAW;
++ data->bl = backlight_device_register("tlc59108-bl", &c->dev, data,
++ &bl_ops, &props);
++ if (IS_ERR(data->bl)) {
++ ret = PTR_ERR(data->bl);
++ goto err_reg;
++ }
++
++ data->bl->props.brightness = TLC59108_MAX_BRIGHTNESS;
++
++ backlight_update_status(data->bl);
++
++ return 0;
++
++err_reg:
++ data->bl = NULL;
++ kfree(data);
++ return ret;
++}
++
++static int tlc59108_remove(struct i2c_client *c)
++{
++ struct tlc59108_bl *data = i2c_get_clientdata(c);
++
++ backlight_device_unregister(data->bl);
++ data->bl = NULL;
++
++ kfree(data);
++
++ return 0;
++}
++
++/* I2C Device ID table */
++static const struct i2c_device_id tlc59108_id[] = {
++ { "tlc59108", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, tlc59108_id);
++
++/* I2C driver data */
++static struct i2c_driver tlc59108_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = tlc59108_MODULE_NAME,
++ },
++ .probe = tlc59108_probe,
++ .remove = tlc59108_remove,
++ .id_table = tlc59108_id,
++};
++
++static int __init tlc59108_init(void)
++{
++ return i2c_add_driver(&tlc59108_driver);
++}
++
++static void __exit tlc59108_exit(void)
++{
++ i2c_del_driver(&tlc59108_driver);
++}
++
++module_init(tlc59108_init);
++module_exit(tlc59108_exit);
++
++MODULE_DESCRIPTION("LCD/Backlight control for TLC59108");
++MODULE_AUTHOR("Senthil Natarajan <senthil.n@ti.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
+index 56720fb..46b03f5 100644
+--- a/drivers/video/bf54x-lq043fb.c
++++ b/drivers/video/bf54x-lq043fb.c
+@@ -4,7 +4,7 @@
+ * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * Created:
+- * Description: ADSP-BF54x Framebufer driver
++ * Description: ADSP-BF54x Framebuffer driver
+ *
+ *
+ * Modified:
+diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
+index d5e1267..7a0c05f 100644
+--- a/drivers/video/bfin-t350mcqb-fb.c
++++ b/drivers/video/bfin-t350mcqb-fb.c
+@@ -4,7 +4,7 @@
+ * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * Created:
+- * Description: Blackfin LCD Framebufer driver
++ * Description: Blackfin LCD Framebuffer driver
+ *
+ *
+ * Modified:
+diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
+index 29577bf..86b19ac 100644
+--- a/drivers/video/da8xx-fb.c
++++ b/drivers/video/da8xx-fb.c
+@@ -30,8 +30,12 @@
+ #include <linux/clk.h>
+ #include <linux/cpufreq.h>
+ #include <linux/console.h>
++#include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
+ #include <video/da8xx-fb.h>
++#include <asm/mach-types.h>
+
+ #define DRIVER_NAME "da8xx_lcdc"
+
+@@ -82,6 +86,8 @@
+ #define LCD_V2_LIDD_CLK_EN BIT(1)
+ #define LCD_V2_CORE_CLK_EN BIT(0)
+ #define LCD_V2_LPP_B10 26
++#define LCD_V2_TFT_24BPP_MODE BIT(25)
++#define LCD_V2_TFT_24BPP_UNPACK BIT(26)
+
+ /* LCD Raster Timing 2 Register */
+ #define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+@@ -126,6 +132,8 @@
+ #define RIGHT_MARGIN 64
+ #define UPPER_MARGIN 32
+ #define LOWER_MARGIN 32
++#define WAIT_FOR_FRAME_DONE true
++#define NO_WAIT_FOR_FRAME_DONE false
+
+ static resource_size_t da8xx_fb_reg_base;
+ static struct resource *lcdc_regs;
+@@ -134,15 +142,16 @@ static irq_handler_t lcdc_irq_handler;
+
+ static inline unsigned int lcdc_read(unsigned int addr)
+ {
+- return (unsigned int)__raw_readl(da8xx_fb_reg_base + (addr));
++ return (unsigned int)readl(da8xx_fb_reg_base + (addr));
+ }
+
+ static inline void lcdc_write(unsigned int val, unsigned int addr)
+ {
+- __raw_writel(val, da8xx_fb_reg_base + (addr));
++ writel(val, da8xx_fb_reg_base + (addr));
+ }
+
+ struct da8xx_fb_par {
++ struct device *dev;
+ resource_size_t p_palette_base;
+ unsigned char *v_palette_base;
+ dma_addr_t vram_phys;
+@@ -152,15 +161,23 @@ struct da8xx_fb_par {
+ unsigned int dma_end;
+ struct clk *lcdc_clk;
+ int irq;
+- unsigned short pseudo_palette[16];
++ unsigned long pseudo_palette[32];
+ unsigned int palette_sz;
+ unsigned int pxl_clk;
+ int blank;
+ wait_queue_head_t vsync_wait;
+ int vsync_flag;
+ int vsync_timeout;
++ spinlock_t lock_for_chan_update;
++
++ /*
++ * LCDC has 2 ping pong DMA channels, channel 0
++ * and channel 1.
++ */
++ unsigned int which_dma_channel_done;
+ #ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
++ unsigned int lcd_fck_rate;
+ #endif
+ void (*panel_power_ctrl)(int);
+ };
+@@ -174,7 +191,7 @@ static struct fb_var_screeninfo da8xx_fb_var __devinitdata = {
+ .activate = 0,
+ .height = -1,
+ .width = -1,
+- .pixclock = 46666, /* 46us - AUO display */
++ .pixclock = 33333,/*Pico Sec*/
+ .accel_flags = 0,
+ .left_margin = LEFT_MARGIN,
+ .right_margin = RIGHT_MARGIN,
+@@ -209,6 +226,9 @@ struct da8xx_panel {
+ unsigned char invert_pxl_clk; /* Invert Pixel clock */
+ };
+
++static vsync_callback_t vsync_cb_handler;
++static void *vsync_cb_arg;
++
+ static struct da8xx_panel known_lcd_panels[] = {
+ /* Sharp LCD035Q3DG01 */
+ [0] = {
+@@ -232,12 +252,54 @@ static struct da8xx_panel known_lcd_panels[] = {
+ .hfp = 2,
+ .hbp = 2,
+ .hsw = 41,
+- .vfp = 2,
+- .vbp = 2,
++ .vfp = 3,
++ .vbp = 3,
+ .vsw = 10,
+ .pxl_clk = 7833600,
+ .invert_pxl_clk = 0,
+ },
++ /* ThreeFive S9700RTWV35TR */
++ [2] = {
++ .name = "TFC_S9700RTWV35TR_01B",
++ .width = 800,
++ .height = 480,
++ .hfp = 39,
++ .hbp = 39,
++ .hsw = 47,
++ .vfp = 13,
++ .vbp = 29,
++ .vsw = 2,
++ .pxl_clk = 30000000,
++ .invert_pxl_clk = 0,
++ },
++ [3] = {
++ /* 1024 x 768 @ 60 Hz Reduced blanking VESA CVT 0.79M3-R */
++ .name = "1024x768@60",
++ .width = 1024,
++ .height = 768,
++ .hfp = 48,
++ .hbp = 80,
++ .hsw = 32,
++ .vfp = 3,
++ .vbp = 15,
++ .vsw = 4,
++ .pxl_clk = 56000000,
++ .invert_pxl_clk = 0,
++ },
++ [4] = {
++ /* CDTech S035Q01 */
++ .name = "CDTech_S035Q01",
++ .width = 320,
++ .height = 240,
++ .hfp = 58,
++ .hbp = 21,
++ .hsw = 47,
++ .vfp = 23,
++ .vbp = 11,
++ .vsw = 2,
++ .pxl_clk = 8000000,
++ .invert_pxl_clk = 0,
++ },
+ };
+
+ /* Enable the Raster Engine of the LCD Controller */
+@@ -245,27 +307,58 @@ static inline void lcd_enable_raster(void)
+ {
+ u32 reg;
+
++ /* Put LCDC in reset for several cycles */
++ if (lcd_revision == LCD_VERSION_2)
++ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
++
++ mdelay(1);
++
+ /* Bring LCDC out of reset */
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_CLK_RESET_REG);
+
++ mdelay(1);
++
++ /* Above reset sequence doesnot reset register context */
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ if (!(reg & LCD_RASTER_ENABLE))
+ lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ }
+
+ /* Disable the Raster Engine of the LCD Controller */
+-static inline void lcd_disable_raster(void)
++static inline void lcd_disable_raster(bool wait_for_frame_done)
+ {
+ u32 reg;
++ u32 loop_cnt = 0;
++ u32 stat;
++ u32 i = 0;
++
++ if (wait_for_frame_done)
++ loop_cnt = 5000;
+
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ if (reg & LCD_RASTER_ENABLE)
+ lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+- if (lcd_revision == LCD_VERSION_2)
+- /* Write 1 to reset LCDC */
+- lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
++ /* Wait for the current frame to complete */
++ do {
++ if (lcd_revision == LCD_VERSION_1)
++ stat = lcdc_read(LCD_STAT_REG);
++ else
++ stat = lcdc_read(LCD_RAW_STAT_REG);
++
++ mdelay(1);
++ } while (!(stat & BIT(0)) && (i++ < loop_cnt));
++
++ if (lcd_revision == LCD_VERSION_1)
++ lcdc_write(stat, LCD_STAT_REG);
++ else
++ lcdc_write(stat, LCD_MASKED_STAT_REG);
++
++ if ((loop_cnt != 0) && (i >= loop_cnt)) {
++ printk(KERN_ERR "LCD Controller timed out\n");
++ return;
++ }
+ }
+
+ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
+@@ -292,7 +385,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
+ } else {
+ reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
+ LCD_V2_END_OF_FRAME0_INT_ENA |
+- LCD_V2_END_OF_FRAME1_INT_ENA;
++ LCD_V2_END_OF_FRAME1_INT_ENA |
++ LCD_V2_UNDERFLOW_INT_ENA | LCD_SYNC_LOST;
+ lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
+ }
+ reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
+@@ -329,8 +423,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
+ lcd_enable_raster();
+ }
+
+-/* Configure the Burst Size of DMA */
+-static int lcd_cfg_dma(int burst_size)
++/* Configure the Burst Size and fifo threhold of DMA */
++static int lcd_cfg_dma(int burst_size, int fifo_th)
+ {
+ u32 reg;
+
+@@ -354,6 +448,9 @@ static int lcd_cfg_dma(int burst_size)
+ default:
+ return -EINVAL;
+ }
++
++ reg |= (fifo_th << 8);
++
+ lcdc_write(reg, LCD_DMA_CTRL_REG);
+
+ return 0;
+@@ -510,6 +607,13 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
+ reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8);
+ if (raster_order)
+ reg |= LCD_RASTER_ORDER;
++
++ if (bpp == 24)
++ reg |= (LCD_TFT_MODE | LCD_V2_TFT_24BPP_MODE);
++ else if (bpp == 32)
++ reg |= (LCD_TFT_MODE | LCD_V2_TFT_24BPP_MODE
++ | LCD_V2_TFT_24BPP_UNPACK);
++
+ lcdc_write(reg, LCD_RASTER_CTRL_REG);
+
+ switch (bpp) {
+@@ -517,6 +621,8 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
+ case 2:
+ case 4:
+ case 16:
++ case 24:
++ case 32:
+ par->palette_sz = 16 * 2;
+ break;
+
+@@ -575,6 +681,23 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ update_hw = 1;
+ palette[0] = 0x4000;
+ }
++ } else if (((info->var.bits_per_pixel == 32) && regno < 32) ||
++ ((info->var.bits_per_pixel == 24) && regno < 24)) {
++ red >>= (24 - info->var.red.length);
++ red <<= info->var.red.offset;
++
++ green >>= (24 - info->var.green.length);
++ green <<= info->var.green.offset;
++
++ blue >>= (24 - info->var.blue.length);
++ blue <<= info->var.blue.offset;
++
++ par->pseudo_palette[regno] = red | green | blue;
++
++ if (palette[0] != 0x4000) {
++ update_hw = 1;
++ palette[0] = 0x4000;
++ }
+ }
+
+ /* Update the palette in the h/w as needed. */
+@@ -587,7 +710,7 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ static void lcd_reset(struct da8xx_fb_par *par)
+ {
+ /* Disable the Raster if previously Enabled */
+- lcd_disable_raster();
++ lcd_disable_raster(NO_WAIT_FOR_FRAME_DONE);
+
+ /* DMA has to be disabled */
+ lcdc_write(0, LCD_DMA_CTRL_REG);
+@@ -636,8 +759,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
+ lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) &
+ ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
+
+- /* Configure the DMA burst size. */
+- ret = lcd_cfg_dma(cfg->dma_burst_sz);
++ /* Configure the DMA burst size and fifo threshold. */
++ ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th);
+ if (ret < 0)
+ return ret;
+
+@@ -653,7 +776,9 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
+ if (ret < 0)
+ return ret;
+
+- if (QVGA != cfg->p_disp_panel->panel_type)
++
++ if ((QVGA != cfg->p_disp_panel->panel_type) &&
++ (WVGA != cfg->p_disp_panel->panel_type))
+ return -EINVAL;
+
+ if (cfg->bpp <= cfg->p_disp_panel->max_bpp &&
+@@ -676,6 +801,32 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
+ return 0;
+ }
+
++int register_vsync_cb(vsync_callback_t handler, void *arg, int idx)
++{
++ if ((vsync_cb_handler == NULL) && (vsync_cb_arg == NULL)) {
++ vsync_cb_handler = handler;
++ vsync_cb_arg = arg;
++ } else {
++ return -EEXIST;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(register_vsync_cb);
++
++int unregister_vsync_cb(vsync_callback_t handler, void *arg, int idx)
++{
++ if ((vsync_cb_handler == handler) && (vsync_cb_arg == arg)) {
++ vsync_cb_handler = NULL;
++ vsync_cb_arg = NULL;
++ } else {
++ return -ENXIO;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(unregister_vsync_cb);
++
+ /* IRQ handler for version 2 of LCDC */
+ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
+ {
+@@ -684,7 +835,8 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
+ u32 reg_int;
+
+ if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
+- lcd_disable_raster();
++ printk(KERN_ERR "LCDC sync lost or underflow error occured\n");
++ lcd_disable_raster(NO_WAIT_FOR_FRAME_DONE);
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+ lcd_enable_raster();
+ } else if (stat & LCD_PL_LOAD_DONE) {
+@@ -694,7 +846,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
+ * interrupt via the following write to the status register. If
+ * this is done after then one gets multiple PL done interrupts.
+ */
+- lcd_disable_raster();
++ lcd_disable_raster(NO_WAIT_FOR_FRAME_DONE);
+
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+
+@@ -709,21 +861,27 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
+ lcdc_write(stat, LCD_MASKED_STAT_REG);
+
+ if (stat & LCD_END_OF_FRAME0) {
++ par->which_dma_channel_done = 0;
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
++ if (vsync_cb_handler)
++ vsync_cb_handler(vsync_cb_arg);
+ }
+
+ if (stat & LCD_END_OF_FRAME1) {
++ par->which_dma_channel_done = 1;
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
++ if (vsync_cb_handler)
++ vsync_cb_handler(vsync_cb_arg);
+ }
+ }
+
+@@ -739,9 +897,12 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
+ u32 reg_ras;
+
+ if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
+- lcd_disable_raster();
++ printk(KERN_ERR "LCDC sync lost or underflow error occured\n");
++ lcd_disable_raster(NO_WAIT_FOR_FRAME_DONE);
++ clk_disable(par->lcdc_clk);
+ lcdc_write(stat, LCD_STAT_REG);
+ lcd_enable_raster();
++ clk_enable(par->lcdc_clk);
+ } else if (stat & LCD_PL_LOAD_DONE) {
+ /*
+ * Must disable raster before changing state of any control bit.
+@@ -749,7 +910,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
+ * interrupt via the following write to the status register. If
+ * this is done after then one gets multiple PL done interrupts.
+ */
+- lcd_disable_raster();
++ lcd_disable_raster(NO_WAIT_FOR_FRAME_DONE);
+
+ lcdc_write(stat, LCD_STAT_REG);
+
+@@ -822,6 +983,24 @@ static int fb_check_var(struct fb_var_screeninfo *var,
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
++ case 24:
++ var->red.offset = 16;
++ var->red.length = 8;
++ var->green.offset = 8;
++ var->green.length = 8;
++ var->blue.offset = 0;
++ var->blue.length = 8;
++ break;
++ case 32:
++ var->transp.offset = 24;
++ var->transp.length = 8;
++ var->red.offset = 16;
++ var->red.length = 8;
++ var->green.offset = 8;
++ var->green.length = 8;
++ var->blue.offset = 0;
++ var->blue.length = 8;
++ break;
+ default:
+ err = -EINVAL;
+ }
+@@ -840,11 +1019,12 @@ static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
+ struct da8xx_fb_par *par;
+
+ par = container_of(nb, struct da8xx_fb_par, freq_transition);
+- if (val == CPUFREQ_PRECHANGE) {
+- lcd_disable_raster();
+- } else if (val == CPUFREQ_POSTCHANGE) {
+- lcd_calc_clk_divider(par);
+- lcd_enable_raster();
++ if (val == CPUFREQ_POSTCHANGE) {
++ if (par->lcd_fck_rate != clk_get_rate(par->lcdc_clk)) {
++ lcd_disable_raster(WAIT_FOR_FRAME_DONE);
++ lcd_calc_clk_divider(par);
++ lcd_enable_raster();
++ }
+ }
+
+ return 0;
+@@ -878,7 +1058,7 @@ static int __devexit fb_remove(struct platform_device *dev)
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+- lcd_disable_raster();
++ lcd_disable_raster(WAIT_FOR_FRAME_DONE);
+ lcdc_write(0, LCD_RASTER_CTRL_REG);
+
+ /* disable DMA */
+@@ -891,8 +1071,8 @@ static int __devexit fb_remove(struct platform_device *dev)
+ dma_free_coherent(NULL, par->vram_size, par->vram_virt,
+ par->vram_phys);
+ free_irq(par->irq, par);
+- clk_disable(par->lcdc_clk);
+- clk_put(par->lcdc_clk);
++ pm_runtime_put_sync(&dev->dev);
++ pm_runtime_disable(&dev->dev);
+ framebuffer_release(info);
+ iounmap((void __iomem *)da8xx_fb_reg_base);
+ release_mem_region(lcdc_regs->start, resource_size(lcdc_regs));
+@@ -931,6 +1111,14 @@ static int fb_wait_for_vsync(struct fb_info *info)
+ if (ret == 0)
+ return -ETIMEDOUT;
+
++ if (par->panel_power_ctrl) {
++ /* Switch off panel power and backlight */
++ par->panel_power_ctrl(0);
++
++ /* Switch on panel power and backlight */
++ par->panel_power_ctrl(1);
++ }
++
+ return 0;
+ }
+
+@@ -991,7 +1179,7 @@ static int cfb_blank(int blank, struct fb_info *info)
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+- lcd_disable_raster();
++ lcd_disable_raster(WAIT_FOR_FRAME_DONE);
+ break;
+ default:
+ ret = -EINVAL;
+@@ -1013,6 +1201,7 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ unsigned int end;
+ unsigned int start;
++ unsigned long irq_flags;
+
+ if (var->xoffset != fbi->var.xoffset ||
+ var->yoffset != fbi->var.yoffset) {
+@@ -1030,6 +1219,21 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
+ end = start + fbi->var.yres * fix->line_length - 1;
+ par->dma_start = start;
+ par->dma_end = end;
++ spin_lock_irqsave(&par->lock_for_chan_update,
++ irq_flags);
++ if (par->which_dma_channel_done == 0) {
++ lcdc_write(par->dma_start,
++ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
++ lcdc_write(par->dma_end,
++ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
++ } else if (par->which_dma_channel_done == 1) {
++ lcdc_write(par->dma_start,
++ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
++ lcdc_write(par->dma_end,
++ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
++ }
++ spin_unlock_irqrestore(&par->lock_for_chan_update,
++ irq_flags);
+ }
+ }
+
+@@ -1090,9 +1294,11 @@ static int __devinit fb_probe(struct platform_device *device)
+ ret = -ENODEV;
+ goto err_ioremap;
+ }
+- ret = clk_enable(fb_clk);
+- if (ret)
+- goto err_clk_put;
++
++ pm_runtime_irq_safe(&device->dev);
++ pm_runtime_enable(&device->dev);
++ pm_runtime_get_sync(&device->dev);
++
+
+ /* Determine LCD IP Version */
+ switch (lcdc_read(LCD_PID_REG)) {
+@@ -1100,6 +1306,7 @@ static int __devinit fb_probe(struct platform_device *device)
+ lcd_revision = LCD_VERSION_1;
+ break;
+ case 0x4F200800:
++ case 0x4F201000:
+ lcd_revision = LCD_VERSION_2;
+ break;
+ default:
+@@ -1120,7 +1327,7 @@ static int __devinit fb_probe(struct platform_device *device)
+ if (i == ARRAY_SIZE(known_lcd_panels)) {
+ dev_err(&device->dev, "GLCD: No valid panel found\n");
+ ret = -ENODEV;
+- goto err_clk_disable;
++ goto err_pm_runtime_disable;
+ } else
+ dev_info(&device->dev, "GLCD: Found %s panel\n",
+ fb_pdata->type);
+@@ -1132,11 +1339,15 @@ static int __devinit fb_probe(struct platform_device *device)
+ if (!da8xx_fb_info) {
+ dev_dbg(&device->dev, "Memory allocation failed for fb_info\n");
+ ret = -ENOMEM;
+- goto err_clk_disable;
++ goto err_pm_runtime_disable;
+ }
+
+ par = da8xx_fb_info->par;
++ par->dev = &device->dev;
+ par->lcdc_clk = fb_clk;
++#ifdef CONFIG_CPU_FREQ
++ par->lcd_fck_rate = clk_get_rate(fb_clk);
++#endif
+ par->pxl_clk = lcdc_info->pxl_clk;
+ if (fb_pdata->panel_power_ctrl) {
+ par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
+@@ -1210,6 +1421,11 @@ static int __devinit fb_probe(struct platform_device *device)
+ da8xx_fb_var.hsync_len = lcdc_info->hsw;
+ da8xx_fb_var.vsync_len = lcdc_info->vsw;
+
++ da8xx_fb_var.right_margin = lcdc_info->hfp;
++ da8xx_fb_var.left_margin = lcdc_info->hbp;
++ da8xx_fb_var.lower_margin = lcdc_info->vfp;
++ da8xx_fb_var.upper_margin = lcdc_info->vbp;
++
+ /* Initialize fbinfo */
+ da8xx_fb_info->flags = FBINFO_FLAG_DEFAULT;
+ da8xx_fb_info->fix = da8xx_fb_fix;
+@@ -1233,6 +1449,8 @@ static int __devinit fb_probe(struct platform_device *device)
+ /* initialize the vsync wait queue */
+ init_waitqueue_head(&par->vsync_wait);
+ par->vsync_timeout = HZ / 5;
++ par->which_dma_channel_done = -1;
++ spin_lock_init(&par->lock_for_chan_update);
+
+ /* Register the Frame Buffer */
+ if (register_framebuffer(da8xx_fb_info) < 0) {
+@@ -1264,8 +1482,8 @@ static int __devinit fb_probe(struct platform_device *device)
+ irq_freq:
+ #ifdef CONFIG_CPU_FREQ
+ lcd_da8xx_cpufreq_deregister(par);
+-#endif
+ err_cpu_freq:
++#endif
+ unregister_framebuffer(da8xx_fb_info);
+
+ err_dealloc_cmap:
+@@ -1281,13 +1499,12 @@ err_release_fb_mem:
+ err_release_fb:
+ framebuffer_release(da8xx_fb_info);
+
+-err_clk_disable:
+- clk_disable(fb_clk);
+-
+-err_clk_put:
+- clk_put(fb_clk);
++err_pm_runtime_disable:
++ pm_runtime_put_sync(&device->dev);
++ pm_runtime_disable(&device->dev);
+
+ err_ioremap:
++
+ iounmap((void __iomem *)da8xx_fb_reg_base);
+
+ err_request_mem:
+@@ -1297,6 +1514,64 @@ err_request_mem:
+ }
+
+ #ifdef CONFIG_PM
++
++struct lcdc_context {
++ u32 clk_enable;
++ u32 ctrl;
++ u32 dma_ctrl;
++ u32 raster_timing_0;
++ u32 raster_timing_1;
++ u32 raster_timing_2;
++ u32 int_enable_set;
++ u32 dma_frm_buf_base_addr_0;
++ u32 dma_frm_buf_ceiling_addr_0;
++ u32 dma_frm_buf_base_addr_1;
++ u32 dma_frm_buf_ceiling_addr_1;
++ u32 raster_ctrl;
++} reg_context;
++
++static void lcd_context_save(void)
++{
++ reg_context.clk_enable = lcdc_read(LCD_CLK_ENABLE_REG);
++ reg_context.ctrl = lcdc_read(LCD_CTRL_REG);
++ reg_context.dma_ctrl = lcdc_read(LCD_DMA_CTRL_REG);
++ reg_context.raster_timing_0 = lcdc_read(LCD_RASTER_TIMING_0_REG);
++ reg_context.raster_timing_1 = lcdc_read(LCD_RASTER_TIMING_1_REG);
++ reg_context.raster_timing_2 = lcdc_read(LCD_RASTER_TIMING_2_REG);
++ reg_context.int_enable_set = lcdc_read(LCD_INT_ENABLE_SET_REG);
++ reg_context.dma_frm_buf_base_addr_0 =
++ lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
++ reg_context.dma_frm_buf_ceiling_addr_0 =
++ lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
++ reg_context.dma_frm_buf_base_addr_1 =
++ lcdc_read(LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
++ reg_context.dma_frm_buf_ceiling_addr_1 =
++ lcdc_read(LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
++ reg_context.raster_ctrl = lcdc_read(LCD_RASTER_CTRL_REG);
++ return;
++}
++
++static void lcd_context_restore(void)
++{
++ lcdc_write(reg_context.clk_enable, LCD_CLK_ENABLE_REG);
++ lcdc_write(reg_context.ctrl, LCD_CTRL_REG);
++ lcdc_write(reg_context.dma_ctrl, LCD_DMA_CTRL_REG);
++ lcdc_write(reg_context.raster_timing_0, LCD_RASTER_TIMING_0_REG);
++ lcdc_write(reg_context.raster_timing_1, LCD_RASTER_TIMING_1_REG);
++ lcdc_write(reg_context.raster_timing_2, LCD_RASTER_TIMING_2_REG);
++ lcdc_write(reg_context.int_enable_set, LCD_INT_ENABLE_SET_REG);
++ lcdc_write(reg_context.dma_frm_buf_base_addr_0,
++ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
++ lcdc_write(reg_context.dma_frm_buf_ceiling_addr_0,
++ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
++ lcdc_write(reg_context.dma_frm_buf_base_addr_1,
++ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
++ lcdc_write(reg_context.dma_frm_buf_ceiling_addr_1,
++ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
++ lcdc_write(reg_context.raster_ctrl, LCD_RASTER_CTRL_REG);
++ return;
++}
++
+ static int fb_suspend(struct platform_device *dev, pm_message_t state)
+ {
+ struct fb_info *info = platform_get_drvdata(dev);
+@@ -1307,8 +1582,10 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
+ par->panel_power_ctrl(0);
+
+ fb_set_suspend(info, 1);
+- lcd_disable_raster();
+- clk_disable(par->lcdc_clk);
++ lcd_disable_raster(WAIT_FOR_FRAME_DONE);
++ lcd_context_save();
++
++ pm_runtime_put(&dev->dev);
+ console_unlock();
+
+ return 0;
+@@ -1319,11 +1596,16 @@ static int fb_resume(struct platform_device *dev)
+ struct da8xx_fb_par *par = info->par;
+
+ console_lock();
++
++ pm_runtime_get_sync(&dev->dev);
++
++ msleep(1);
++ lcd_context_restore();
++ lcd_enable_raster();
++
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(1);
+
+- clk_enable(par->lcdc_clk);
+- lcd_enable_raster();
+ fb_set_suspend(info, 0);
+ console_unlock();
+
+diff --git a/drivers/video/st7735fb.c b/drivers/video/st7735fb.c
+new file mode 100644
+index 0000000..500cc88
+--- /dev/null
++++ b/drivers/video/st7735fb.c
+@@ -0,0 +1,516 @@
++/*
++ * linux/drivers/video/st7735fb.c -- FB driver for ST7735 LCD controller
++ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
++ *
++ * Copyright (C) 2011, Matt Porter
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fb.h>
++#include <linux/gpio.h>
++#include <linux/spi/spi.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++
++#include <video/st7735fb.h>
++
++static struct st7735_function st7735_cfg_script[] = {
++ { ST7735_START, ST7735_START},
++ { ST7735_CMD, ST7735_SWRESET},
++ { ST7735_DELAY, 150},
++ { ST7735_CMD, ST7735_SLPOUT},
++ { ST7735_DELAY, 500},
++ { ST7735_CMD, ST7735_FRMCTR1},
++ { ST7735_DATA, 0x01},
++ { ST7735_DATA, 0x2c},
++ { ST7735_DATA, 0x2d},
++ { ST7735_CMD, ST7735_FRMCTR2},
++ { ST7735_DATA, 0x01},
++ { ST7735_DATA, 0x2c},
++ { ST7735_DATA, 0x2d},
++ { ST7735_CMD, ST7735_FRMCTR3},
++ { ST7735_DATA, 0x01},
++ { ST7735_DATA, 0x2c},
++ { ST7735_DATA, 0x2d},
++ { ST7735_DATA, 0x01},
++ { ST7735_DATA, 0x2c},
++ { ST7735_DATA, 0x2d},
++ { ST7735_CMD, ST7735_INVCTR},
++ { ST7735_DATA, 0x07},
++ { ST7735_CMD, ST7735_PWCTR1},
++ { ST7735_DATA, 0xa2},
++ { ST7735_DATA, 0x02},
++ { ST7735_DATA, 0x84},
++ { ST7735_CMD, ST7735_PWCTR2},
++ { ST7735_DATA, 0xc5},
++ { ST7735_CMD, ST7735_PWCTR3},
++ { ST7735_DATA, 0x0a},
++ { ST7735_DATA, 0x00},
++ { ST7735_CMD, ST7735_PWCTR4},
++ { ST7735_DATA, 0x8a},
++ { ST7735_DATA, 0x2a},
++ { ST7735_CMD, ST7735_PWCTR5},
++ { ST7735_DATA, 0x8a},
++ { ST7735_DATA, 0xee},
++ { ST7735_CMD, ST7735_VMCTR1},
++ { ST7735_DATA, 0x0e},
++ { ST7735_CMD, ST7735_INVOFF},
++ { ST7735_CMD, ST7735_MADCTL},
++ { ST7735_DATA, 0xc8},
++ { ST7735_CMD, ST7735_COLMOD},
++ { ST7735_DATA, 0x05},
++ { ST7735_CMD, ST7735_CASET},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x7f},
++ { ST7735_CMD, ST7735_RASET},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x9f},
++ { ST7735_CMD, ST7735_GMCTRP1},
++ { ST7735_DATA, 0x02},
++ { ST7735_DATA, 0x1c},
++ { ST7735_DATA, 0x07},
++ { ST7735_DATA, 0x12},
++ { ST7735_DATA, 0x37},
++ { ST7735_DATA, 0x32},
++ { ST7735_DATA, 0x29},
++ { ST7735_DATA, 0x2d},
++ { ST7735_DATA, 0x29},
++ { ST7735_DATA, 0x25},
++ { ST7735_DATA, 0x2b},
++ { ST7735_DATA, 0x39},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x01},
++ { ST7735_DATA, 0x03},
++ { ST7735_DATA, 0x10},
++ { ST7735_CMD, ST7735_GMCTRN1},
++ { ST7735_DATA, 0x03},
++ { ST7735_DATA, 0x1d},
++ { ST7735_DATA, 0x07},
++ { ST7735_DATA, 0x06},
++ { ST7735_DATA, 0x2e},
++ { ST7735_DATA, 0x2c},
++ { ST7735_DATA, 0x29},
++ { ST7735_DATA, 0x2d},
++ { ST7735_DATA, 0x2e},
++ { ST7735_DATA, 0x2e},
++ { ST7735_DATA, 0x37},
++ { ST7735_DATA, 0x3f},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x00},
++ { ST7735_DATA, 0x02},
++ { ST7735_DATA, 0x10},
++ { ST7735_CMD, ST7735_DISPON},
++ { ST7735_DELAY, 100},
++ { ST7735_CMD, ST7735_NORON},
++ { ST7735_DELAY, 10},
++ { ST7735_END, ST7735_END},
++};
++
++static struct fb_fix_screeninfo st7735fb_fix __devinitdata = {
++ .id = "ST7735",
++ .type = FB_TYPE_PACKED_PIXELS,
++ .visual = FB_VISUAL_PSEUDOCOLOR,
++ .xpanstep = 0,
++ .ypanstep = 0,
++ .ywrapstep = 0,
++ .line_length = WIDTH*BPP/8,
++ .accel = FB_ACCEL_NONE,
++};
++
++static struct fb_var_screeninfo st7735fb_var __devinitdata = {
++ .xres = WIDTH,
++ .yres = HEIGHT,
++ .xres_virtual = WIDTH,
++ .yres_virtual = HEIGHT,
++ .bits_per_pixel = BPP,
++ .nonstd = 1,
++};
++
++static int st7735_write(struct st7735fb_par *par, u8 data)
++{
++ u8 txbuf[2]; /* allocation from stack must go */
++
++ txbuf[0] = data;
++
++ return spi_write(par->spi, &txbuf[0], 1);
++}
++
++static void st7735_write_data(struct st7735fb_par *par, u8 data)
++{
++ int ret = 0;
++
++ /* Set data mode */
++ gpio_set_value(par->dc, 1);
++
++ ret = st7735_write(par, data);
++ if (ret < 0)
++ pr_err("%s: write data %02x failed with status %d\n",
++ par->info->fix.id, data, ret);
++}
++
++static int st7735_write_data_buf(struct st7735fb_par *par,
++ u8 *txbuf, int size)
++{
++ /* Set data mode */
++ gpio_set_value(par->dc, 1);
++
++ /* Write entire buffer */
++ return spi_write(par->spi, txbuf, size);
++}
++
++static void st7735_write_cmd(struct st7735fb_par *par, u8 data)
++{
++ int ret = 0;
++
++ /* Set command mode */
++ gpio_set_value(par->dc, 0);
++
++ ret = st7735_write(par, data);
++ if (ret < 0)
++ pr_err("%s: write command %02x failed with status %d\n",
++ par->info->fix.id, data, ret);
++}
++
++static void st7735_run_cfg_script(struct st7735fb_par *par)
++{
++ int i = 0;
++ int end_script = 0;
++
++ do {
++ switch (st7735_cfg_script[i].cmd)
++ {
++ case ST7735_START:
++ break;
++ case ST7735_CMD:
++ st7735_write_cmd(par,
++ st7735_cfg_script[i].data & 0xff);
++ break;
++ case ST7735_DATA:
++ st7735_write_data(par,
++ st7735_cfg_script[i].data & 0xff);
++ break;
++ case ST7735_DELAY:
++ mdelay(st7735_cfg_script[i].data);
++ break;
++ case ST7735_END:
++ end_script = 1;
++ }
++ i++;
++ } while (!end_script);
++}
++
++static void st7735_set_addr_win(struct st7735fb_par *par,
++ int xs, int ys, int xe, int ye)
++{
++ st7735_write_cmd(par, ST7735_CASET);
++ st7735_write_data(par, 0x00);
++ st7735_write_data(par, xs+2);
++ st7735_write_data(par, 0x00);
++ st7735_write_data(par, xe+2);
++ st7735_write_cmd(par, ST7735_RASET);
++ st7735_write_data(par, 0x00);
++ st7735_write_data(par, ys+1);
++ st7735_write_data(par, 0x00);
++ st7735_write_data(par, ye+1);
++}
++
++static void st7735_reset(struct st7735fb_par *par)
++{
++ /* Reset controller */
++ gpio_set_value(par->rst, 0);
++ udelay(10);
++ gpio_set_value(par->rst, 1);
++ mdelay(120);
++}
++
++static void st7735fb_update_display(struct st7735fb_par *par)
++{
++ int ret = 0;
++ u8 *vmem = par->info->screen_base;
++
++ /*
++ TODO:
++ Allow a subset of pages to be passed in
++ (for deferred I/O). Check pages against
++ pan display settings to see if they
++ should be updated.
++ */
++ /* For now, just write the full 40KiB on each update */
++
++ /* Set row/column data window */
++ st7735_set_addr_win(par, 0, 0, WIDTH-1, HEIGHT-1);
++
++ /* Internal RAM write command */
++ st7735_write_cmd(par, ST7735_RAMWR);
++
++ /* Blast framebuffer to ST7735 internal display RAM */
++ ret = st7735_write_data_buf(par, vmem, WIDTH*HEIGHT*BPP/8);
++ if (ret < 0)
++ pr_err("%s: spi_write failed to update display buffer\n",
++ par->info->fix.id);
++}
++
++static void st7735fb_deferred_io(struct fb_info *info,
++ struct list_head *pagelist)
++{
++ st7735fb_update_display(info->par);
++}
++
++static int st7735fb_init_display(struct st7735fb_par *par)
++{
++ /* TODO: Need some error checking on gpios */
++
++ /* Request GPIOs and initialize to default values */
++ gpio_request_one(par->rst, GPIOF_OUT_INIT_HIGH,
++ "ST7735 Reset Pin");
++ gpio_request_one(par->dc, GPIOF_OUT_INIT_LOW,
++ "ST7735 Data/Command Pin");
++
++ st7735_reset(par);
++
++ st7735_run_cfg_script(par);
++
++ return 0;
++}
++
++void st7735fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++ struct st7735fb_par *par = info->par;
++
++ sys_fillrect(info, rect);
++
++ st7735fb_update_display(par);
++}
++
++void st7735fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
++{
++ struct st7735fb_par *par = info->par;
++
++ sys_copyarea(info, area);
++
++ st7735fb_update_display(par);
++}
++
++void st7735fb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++ struct st7735fb_par *par = info->par;
++
++ sys_imageblit(info, image);
++
++ st7735fb_update_display(par);
++}
++
++static ssize_t st7735fb_write(struct fb_info *info, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct st7735fb_par *par = info->par;
++ unsigned long p = *ppos;
++ void *dst;
++ int err = 0;
++ unsigned long total_size;
++
++ if (info->state != FBINFO_STATE_RUNNING)
++ return -EPERM;
++
++ total_size = info->fix.smem_len;
++
++ if (p > total_size)
++ return -EFBIG;
++
++ if (count > total_size) {
++ err = -EFBIG;
++ count = total_size;
++ }
++
++ if (count + p > total_size) {
++ if (!err)
++ err = -ENOSPC;
++
++ count = total_size - p;
++ }
++
++ dst = (void __force *) (info->screen_base + p);
++
++ if (copy_from_user(dst, buf, count))
++ err = -EFAULT;
++
++ if (!err)
++ *ppos += count;
++
++ st7735fb_update_display(par);
++
++ return (err) ? err : count;
++}
++
++static struct fb_ops st7735fb_ops = {
++ .owner = THIS_MODULE,
++ .fb_read = fb_sys_read,
++ .fb_write = st7735fb_write,
++ .fb_fillrect = st7735fb_fillrect,
++ .fb_copyarea = st7735fb_copyarea,
++ .fb_imageblit = st7735fb_imageblit,
++};
++
++static struct fb_deferred_io st7735fb_defio = {
++ .delay = HZ,
++ .deferred_io = st7735fb_deferred_io,
++};
++
++static int __devinit st7735fb_probe (struct spi_device *spi)
++{
++ int chip = spi_get_device_id(spi)->driver_data;
++ struct st7735fb_platform_data *pdata = spi->dev.platform_data;
++ int vmem_size = WIDTH*HEIGHT*BPP/8;
++ u8 *vmem;
++ struct fb_info *info;
++ struct st7735fb_par *par;
++ int retval = -ENOMEM;
++
++ if (chip != ST7735_DISPLAY_AF_TFT18) {
++ pr_err("%s: only the %s device is supported\n", DRVNAME,
++ to_spi_driver(spi->dev.driver)->id_table->name);
++ return -EINVAL;
++ }
++
++ if (!pdata) {
++ pr_err("%s: platform data required for rst and dc info\n",
++ DRVNAME);
++ return -EINVAL;
++ }
++
++ vmem = vzalloc(vmem_size);
++ if (!vmem)
++ return retval;
++
++ info = framebuffer_alloc(sizeof(struct st7735fb_par), &spi->dev);
++ if (!info)
++ goto fballoc_fail;
++
++ info->screen_base = (u8 __force __iomem *)vmem;
++ info->fbops = &st7735fb_ops;
++ info->fix = st7735fb_fix;
++ info->fix.smem_len = vmem_size;
++ info->var = st7735fb_var;
++ /* Choose any packed pixel format as long as it's RGB565 */
++ info->var.red.offset = 11;
++ info->var.red.length = 5;
++ info->var.green.offset = 5;
++ info->var.green.length = 6;
++ info->var.blue.offset = 0;
++ info->var.blue.length = 5;
++ info->var.transp.offset = 0;
++ info->var.transp.length = 0;
++ info->flags = FBINFO_FLAG_DEFAULT |
++#ifdef __LITTLE_ENDIAN
++ FBINFO_FOREIGN_ENDIAN |
++#endif
++ FBINFO_VIRTFB;
++
++ info->fbdefio = &st7735fb_defio;
++ fb_deferred_io_init(info);
++
++ par = info->par;
++ par->info = info;
++ par->spi = spi;
++ par->rst = pdata->rst_gpio;
++ par->dc = pdata->dc_gpio;
++
++ retval = register_framebuffer(info);
++ if (retval < 0)
++ goto fbreg_fail;
++
++ spi_set_drvdata(spi, info);
++
++ retval = st7735fb_init_display(par);
++ if (retval < 0)
++ goto init_fail;
++
++ printk(KERN_INFO
++ "fb%d: %s frame buffer device,\n\tusing %d KiB of video memory\n",
++ info->node, info->fix.id, vmem_size);
++
++ return 0;
++
++
++ /* TODO: release gpios on fail */
++init_fail:
++ spi_set_drvdata(spi, NULL);
++
++fbreg_fail:
++ framebuffer_release(info);
++
++fballoc_fail:
++ vfree(vmem);
++
++ return retval;
++}
++
++static int __devexit st7735fb_remove(struct spi_device *spi)
++{
++ struct fb_info *info = spi_get_drvdata(spi);
++
++ spi_set_drvdata(spi, NULL);
++
++ if (info) {
++ unregister_framebuffer(info);
++ vfree(info->screen_base);
++ framebuffer_release(info);
++ }
++
++ /* TODO: release gpios */
++
++ return 0;
++}
++
++static const struct spi_device_id st7735fb_ids[] = {
++ { "adafruit_tft18", ST7735_DISPLAY_AF_TFT18 },
++ { },
++};
++
++MODULE_DEVICE_TABLE(spi, st7735fb_ids);
++
++static struct spi_driver st7735fb_driver = {
++ .driver = {
++ .name = "st7735fb",
++ .owner = THIS_MODULE,
++ },
++ .id_table = st7735fb_ids,
++ .probe = st7735fb_probe,
++ .remove = __devexit_p(st7735fb_remove),
++};
++
++static int __init st7735fb_init(void)
++{
++ return spi_register_driver(&st7735fb_driver);
++}
++
++static void __exit st7735fb_exit(void)
++{
++ spi_unregister_driver(&st7735fb_driver);
++}
++
++/* ------------------------------------------------------------------------- */
++
++module_init(st7735fb_init);
++module_exit(st7735fb_exit);
++
++MODULE_DESCRIPTION("FB driver for ST7735 display controller");
++MODULE_AUTHOR("Matt Porter");
++MODULE_LICENSE("GPL");
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3ce7613..d0dc109 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1881,7 +1881,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
+ * a[0] = 'a';
+ * truncate(f, 4096);
+ * we have in the page first buffer_head mapped via page_mkwrite call back
+- * but other bufer_heads would be unmapped but dirty(dirty done via the
++ * but other buffer_heads would be unmapped but dirty (dirty done via the
+ * do_wp_page). So writepage should write the first block. If we modify
+ * the mmap area beyond 1024 we will again get a page_fault and the
+ * page_mkwrite callback will do the block allocation and mark the
+diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
+index 5c93ffc..05f0754 100644
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -554,7 +554,7 @@ int cleanup_journal_tail(journal_t *journal)
+ * them.
+ *
+ * Called with j_list_lock held.
+- * Returns number of bufers reaped (for debug)
++ * Returns number of buffers reaped (for debug)
+ */
+
+ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 16a698b..d49d202 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -565,7 +565,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+- * Returns number of bufers reaped (for debug)
++ * Returns number of buffers reaped (for debug)
+ */
+
+ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index cf0ac05..33e06d2 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1370,7 +1370,7 @@ restart:
+ goto restart;
+ }
+ /*
+- * clear the LRU reference count so the bufer doesn't get
++ * clear the LRU reference count so the buffer doesn't get
+ * ignored in xfs_buf_rele().
+ */
+ atomic_set(&bp->b_lru_ref, 0);
+diff --git a/include/linux/can/platform/d_can.h b/include/linux/can/platform/d_can.h
+new file mode 100644
+index 0000000..fe9df47
+--- /dev/null
++++ b/include/linux/can/platform/d_can.h
+@@ -0,0 +1,40 @@
++/*
++ * D_CAN controller driver platform header
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * Bosch D_CAN controller is compliant to CAN protocol version 2.0 part A and B.
++ * Bosch D_CAN user manual can be obtained from:
++ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/can/
++ * d_can_users_manual_111.pdf
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __CAN_PLATFORM_TI_D_CAN_H__
++#define __CAN_PLATFORM_TI_D_CAN_H__
++
++/**
++ * struct d_can_platform_data - DCAN Platform Data
++ *
++ * @num_of_msg_objs: Number of message objects
++ * @dma_support: DMA support is required/not
++ * @ram_init: DCAN RAM initialization
++ *
++ * Platform data structure to get all platform specific settings.
++ * this structure also accounts the fact that the IP may have different
++ * RAM and mailbox offsets for different SOC's
++ */
++struct d_can_platform_data {
++ u32 num_of_msg_objs;
++ bool dma_support;
++ void (*ram_init) (unsigned int, unsigned int);
++};
++#endif
+diff --git a/include/linux/cpsw.h b/include/linux/cpsw.h
+new file mode 100644
+index 0000000..f1bb9d3
+--- /dev/null
++++ b/include/linux/cpsw.h
+@@ -0,0 +1,54 @@
++#ifndef __CPSW_H__
++#define __CPSW_H__
++
++#include <linux/if_ether.h>
++
++enum {
++ CPSW_VERSION_1 = 0, /* TI8148 */
++ CPSW_VERSION_2, /* AM33XX */
++};
++
++struct cpsw_slave_data {
++ u32 slave_reg_ofs;
++ u32 sliver_reg_ofs;
++ const char *phy_id;
++ int phy_if;
++ u8 mac_addr[ETH_ALEN];
++};
++
++struct cpsw_platform_data {
++ u32 ss_reg_ofs; /* Subsystem control register offset */
++ int channels; /* number of cpdma channels (symmetric) */
++ u32 cpdma_reg_ofs; /* cpdma register offset */
++
++ int slaves; /* number of slave cpgmac ports */
++ struct cpsw_slave_data *slave_data;
++
++ u32 ale_reg_ofs; /* address lookup engine reg offset */
++ int ale_entries; /* ale table size */
++
++ u32 host_port_reg_ofs; /* cpsw cpdma host port registers */
++
++ u32 hw_stats_reg_ofs; /* cpsw hardware statistics counters */
++
++ u32 bd_ram_ofs; /* embedded buffer descriptor RAM offset*/
++ u32 bd_ram_size; /*buffer descriptor ram size */
++ u32 hw_ram_addr; /*if the HW address for BD RAM is different */
++
++ u8 mac_addr[ETH_ALEN];
++
++ int rx_descs;
++
++ void (*phy_control)(bool enabled);
++
++ u32 mac_control;
++
++ u32 gigabit_en; /* Is gigabit capable AND enabled */
++ u32 rmii_en; /* Is RMII mode capable AND enabled */
++ u32 host_port_num; /* The port number for the host port */
++
++ bool no_bd_ram; /* no embedded BD ram*/
++ u8 version;
++};
++
++#endif /* __CPSW_H__ */
+diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
+index 92a0dc7..fd38249 100644
+--- a/include/linux/i2c-omap.h
++++ b/include/linux/i2c-omap.h
+@@ -35,6 +35,7 @@ struct omap_i2c_bus_platform_data {
+ u32 rev;
+ u32 flags;
+ void (*set_mpu_wkup_lat)(struct device *dev, long set);
++ int (*device_reset) (struct device *dev);
+ };
+
+ #endif
+diff --git a/include/linux/input/ti_tscadc.h b/include/linux/input/ti_tscadc.h
+new file mode 100644
+index 0000000..fc239c6
+--- /dev/null
++++ b/include/linux/input/ti_tscadc.h
+@@ -0,0 +1,26 @@
++/**
++ * struct tsc_data Touchscreen wire configuration
++ * @wires: Wires refer to application modes
++ * i.e. 4/5/8 wire touchscreen support
++ * on the platform
++ * @analog_input: Analog inputs refer to the order in which the
++ * connections are made to the AFE. If the connections
++ * are as : XPUL = AN0,XNUR = AN1,YPLL = AN2,
++ * YNLR = AN3, then this variable is set to 1.
++ * Else if the connections are as :
++ * XPUL = AN0,YPLL = AN1,XNUR = AN2,
++ * YNLR = AN3, then set this variable to
++ * 0.
++ * @x_plate_resistance: X plate resistance.
++ */
++#include <linux/device.h>
++
++#define TI_TSCADC_TSCMODE 0
++#define TI_TSCADC_GENMODE 1
++
++struct tsc_data {
++ int wires;
++ int analog_input;
++ int x_plate_resistance;
++ int mode;
++};
+diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
+index f1664c6..32d4912 100644
+--- a/include/linux/lis3lv02d.h
++++ b/include/linux/lis3lv02d.h
+@@ -25,6 +25,7 @@
+ * @axis_x: Sensor orientation remapping for x-axis
+ * @axis_y: Sensor orientation remapping for y-axis
+ * @axis_z: Sensor orientation remapping for z-axis
++ * @g_range: Value contains the acceleration range, +/-2, +/-4 and +/-8
+ * @driver_features: Enable bits for different features. Disabled by default
+ * @default_rate: Default sampling rate. 0 means reset default
+ * @setup_resources: Interrupt line setup call back function
+@@ -113,6 +114,7 @@ struct lis3lv02d_platform_data {
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
++ u8 g_range;
+ #define LIS3_USE_BLOCK_READ 0x02
+ u16 driver_features;
+ int default_rate;
+diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
+new file mode 100644
+index 0000000..e030ef9
+--- /dev/null
++++ b/include/linux/mfd/tps65217.h
+@@ -0,0 +1,283 @@
++/*
++ * linux/mfd/tps65217.h
++ *
++ * Functions to access TPS65217 power management chip.
++ *
++ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation version 2.
++ *
++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
++ * kind, whether express or implied; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __LINUX_MFD_TPS65217_H
++#define __LINUX_MFD_TPS65217_H
++
++#include <linux/i2c.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
++
++/* I2C ID for TPS65217 part */
++#define TPS65217_I2C_ID 0x24
++
++/* All register addresses */
++#define TPS65217_REG_CHIPID 0X00
++#define TPS65217_REG_PPATH 0X01
++#define TPS65217_REG_INT 0X02
++#define TPS65217_REG_CHGCONFIG0 0X03
++#define TPS65217_REG_CHGCONFIG1 0X04
++#define TPS65217_REG_CHGCONFIG2 0X05
++#define TPS65217_REG_CHGCONFIG3 0X06
++#define TPS65217_REG_WLEDCTRL1 0X07
++#define TPS65217_REG_WLEDCTRL2 0X08
++#define TPS65217_REG_MUXCTRL 0X09
++#define TPS65217_REG_STATUS 0X0A
++#define TPS65217_REG_PASSWORD 0X0B
++#define TPS65217_REG_PGOOD 0X0C
++#define TPS65217_REG_DEFPG 0X0D
++#define TPS65217_REG_DEFDCDC1 0X0E
++#define TPS65217_REG_DEFDCDC2 0X0F
++#define TPS65217_REG_DEFDCDC3 0X10
++#define TPS65217_REG_DEFSLEW 0X11
++#define TPS65217_REG_DEFLDO1 0X12
++#define TPS65217_REG_DEFLDO2 0X13
++#define TPS65217_REG_DEFLS1 0X14
++#define TPS65217_REG_DEFLS2 0X15
++#define TPS65217_REG_ENABLE 0X16
++#define TPS65217_REG_DEFUVLO 0X18
++#define TPS65217_REG_SEQ1 0X19
++#define TPS65217_REG_SEQ2 0X1A
++#define TPS65217_REG_SEQ3 0X1B
++#define TPS65217_REG_SEQ4 0X1C
++#define TPS65217_REG_SEQ5 0X1D
++#define TPS65217_REG_SEQ6 0X1E
++
++/* Register field definitions */
++#define TPS65217_CHIPID_CHIP_MASK 0xF0
++#define TPS65217_CHIPID_REV_MASK 0x0F
++
++#define TPS65217_PPATH_ACSINK_ENABLE BIT(7)
++#define TPS65217_PPATH_USBSINK_ENABLE BIT(6)
++#define TPS65217_PPATH_AC_PW_ENABLE BIT(5)
++#define TPS65217_PPATH_USB_PW_ENABLE BIT(4)
++#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
++#define TPS65217_PPATH_USB_CURRENT_MASK 0x03
++
++#define TPS65217_INT_PBM BIT(6)
++#define TPS65217_INT_ACM BIT(5)
++#define TPS65217_INT_USBM BIT(4)
++#define TPS65217_INT_PBI BIT(2)
++#define TPS65217_INT_ACI BIT(1)
++#define TPS65217_INT_USBI BIT(0)
++
++#define TPS65217_CHGCONFIG0_TREG BIT(7)
++#define TPS65217_CHGCONFIG0_DPPM BIT(6)
++#define TPS65217_CHGCONFIG0_TSUSP BIT(5)
++#define TPS65217_CHGCONFIG0_TERMI BIT(4)
++#define TPS65217_CHGCONFIG0_ACTIVE BIT(3)
++#define TPS65217_CHGCONFIG0_CHGTOUT BIT(2)
++#define TPS65217_CHGCONFIG0_PCHGTOUT BIT(1)
++#define TPS65217_CHGCONFIG0_BATTEMP BIT(0)
++
++#define TPS65217_CHGCONFIG1_TMR_MASK 0xC0
++#define TPS65217_CHGCONFIG1_TMR_ENABLE BIT(5)
++#define TPS65217_CHGCONFIG1_NTC_TYPE BIT(4)
++#define TPS65217_CHGCONFIG1_RESET BIT(3)
++#define TPS65217_CHGCONFIG1_TERM BIT(2)
++#define TPS65217_CHGCONFIG1_SUSP BIT(1)
++#define TPS65217_CHGCONFIG1_CHG_EN BIT(0)
++
++#define TPS65217_CHGCONFIG2_DYNTMR BIT(7)
++#define TPS65217_CHGCONFIG2_VPREGHG BIT(6)
++#define TPS65217_CHGCONFIG2_VOREG_MASK 0x30
++
++#define TPS65217_CHGCONFIG3_ICHRG_MASK 0xC0
++#define TPS65217_CHGCONFIG3_DPPMTH_MASK 0x30
++#define TPS65217_CHGCONFIG2_PCHRGT BIT(3)
++#define TPS65217_CHGCONFIG2_TERMIF 0x06
++#define TPS65217_CHGCONFIG2_TRANGE BIT(0)
++
++#define TPS65217_WLEDCTRL1_ISINK_ENABLE BIT(3)
++#define TPS65217_WLEDCTRL1_ISEL BIT(2)
++#define TPS65217_WLEDCTRL1_FDIM_MASK 0x03
++
++#define TPS65217_WLEDCTRL2_DUTY_MASK 0x7F
++
++#define TPS65217_MUXCTRL_MUX_MASK 0x07
++
++#define TPS65217_STATUS_OFF BIT(7)
++#define TPS65217_STATUS_ACPWR BIT(3)
++#define TPS65217_STATUS_USBPWR BIT(2)
++#define TPS65217_STATUS_PB BIT(0)
++
++#define TPS65217_PASSWORD_REGS_UNLOCK 0x7D
++
++#define TPS65217_PGOOD_LDO3_PG BIT(6)
++#define TPS65217_PGOOD_LDO4_PG BIT(5)
++#define TPS65217_PGOOD_DC1_PG BIT(4)
++#define TPS65217_PGOOD_DC2_PG BIT(3)
++#define TPS65217_PGOOD_DC3_PG BIT(2)
++#define TPS65217_PGOOD_LDO1_PG BIT(1)
++#define TPS65217_PGOOD_LDO2_PG BIT(0)
++
++#define TPS65217_DEFPG_LDO1PGM BIT(3)
++#define TPS65217_DEFPG_LDO2PGM BIT(2)
++#define TPS65217_DEFPG_PGDLY_MASK 0x03
++
++#define TPS65217_DEFDCDCX_XADJX BIT(7)
++#define TPS65217_DEFDCDCX_DCDC_MASK 0x3F
++
++#define TPS65217_DEFSLEW_GO BIT(7)
++#define TPS65217_DEFSLEW_GODSBL BIT(6)
++#define TPS65217_DEFSLEW_PFM_EN1 BIT(5)
++#define TPS65217_DEFSLEW_PFM_EN2 BIT(4)
++#define TPS65217_DEFSLEW_PFM_EN3 BIT(3)
++#define TPS65217_DEFSLEW_SLEW_MASK 0x07
++
++#define TPS65217_DEFLDO1_LDO1_MASK 0x0F
++
++#define TPS65217_DEFLDO2_TRACK BIT(6)
++#define TPS65217_DEFLDO2_LDO2_MASK 0x3F
++
++#define TPS65217_DEFLDO3_LDO3_EN BIT(5)
++#define TPS65217_DEFLDO3_LDO3_MASK 0x1F
++
++#define TPS65217_DEFLDO4_LDO4_EN BIT(5)
++#define TPS65217_DEFLDO4_LDO4_MASK 0x1F
++
++#define TPS65217_ENABLE_LS1_EN BIT(6)
++#define TPS65217_ENABLE_LS2_EN BIT(5)
++#define TPS65217_ENABLE_DC1_EN BIT(4)
++#define TPS65217_ENABLE_DC2_EN BIT(3)
++#define TPS65217_ENABLE_DC3_EN BIT(2)
++#define TPS65217_ENABLE_LDO1_EN BIT(1)
++#define TPS65217_ENABLE_LDO2_EN BIT(0)
++
++#define TPS65217_DEFUVLO_UVLOHYS BIT(2)
++#define TPS65217_DEFUVLO_UVLO_MASK 0x03
++
++#define TPS65217_SEQ1_DC1_SEQ_MASK 0xF0
++#define TPS65217_SEQ1_DC2_SEQ_MASK 0x0F
++
++#define TPS65217_SEQ2_DC3_SEQ_MASK 0xF0
++#define TPS65217_SEQ2_LDO1_SEQ_MASK 0x0F
++
++#define TPS65217_SEQ3_LDO2_SEQ_MASK 0xF0
++#define TPS65217_SEQ3_LDO3_SEQ_MASK 0x0F
++
++#define TPS65217_SEQ4_LDO4_SEQ_MASK 0xF0
++
++#define TPS65217_SEQ5_DLY1_MASK 0xC0
++#define TPS65217_SEQ5_DLY2_MASK 0x30
++#define TPS65217_SEQ5_DLY3_MASK 0x0C
++#define TPS65217_SEQ5_DLY4_MASK 0x03
++
++#define TPS65217_SEQ6_DLY5_MASK 0xC0
++#define TPS65217_SEQ6_DLY6_MASK 0x30
++#define TPS65217_SEQ6_SEQUP BIT(2)
++#define TPS65217_SEQ6_SEQDWN BIT(1)
++#define TPS65217_SEQ6_INSTDWN BIT(0)
++
++#define TPS65217_MAX_REGISTER 0x1E
++#define TPS65217_PROTECT_NONE 0
++#define TPS65217_PROTECT_L1 1
++#define TPS65217_PROTECT_L2 2
++
++
++enum tps65217_regulator_id {
++ /* DCDC's */
++ TPS65217_DCDC_1,
++ TPS65217_DCDC_2,
++ TPS65217_DCDC_3,
++ /* LDOs */
++ TPS65217_LDO_1,
++ TPS65217_LDO_2,
++ TPS65217_LDO_3,
++ TPS65217_LDO_4,
++};
++
++#define TPS65217_MAX_REG_ID TPS65217_LDO_4
++
++/* Number of step-down converters available */
++#define TPS65217_NUM_DCDC 3
++/* Number of LDO voltage regulators available */
++#define TPS65217_NUM_LDO 4
++/* Number of total regulators available */
++#define TPS65217_NUM_REGULATOR (TPS65217_NUM_DCDC + TPS65217_NUM_LDO)
++
++/**
++ * struct tps65217_board - packages regulator init data
++ * @tps65217_regulator_data: regulator initialization values
++ *
++ * Board data may be used to initialize regulator.
++ */
++struct tps65217_board {
++ struct regulator_init_data *tps65217_init_data;
++};
++
++/**
++ * struct tps_info - packages regulator constraints
++ * @name: Voltage regulator name
++ * @min_uV: minimum micro volts
++ * @max_uV: minimum micro volts
++ * @vsel_to_uv: Function pointer to get voltage from selector
++ * @uv_to_vsel: Function pointer to get selector from voltage
++ * @table: Table for non-uniform voltage step-size
++ * @table_len: Length of the voltage table
++ * @enable_mask: Regulator enable mask bits
++ * @set_vout_reg: Regulator output voltage set register
++ * @set_vout_mask: Regulator output voltage set mask
++ *
++ * This data is used to check the regualtor voltage limits while setting.
++ */
++struct tps_info {
++ const char *name;
++ int min_uV;
++ int max_uV;
++ int (*vsel_to_uv)(unsigned int vsel);
++ int (*uv_to_vsel)(int uV, unsigned int *vsel);
++ const int *table;
++ unsigned int table_len;
++ unsigned int enable_mask;
++ unsigned int set_vout_reg;
++ unsigned int set_vout_mask;
++};
++
++/**
++ * struct tps65217 - tps65217 sub-driver chip access routines
++ *
++ * Device data may be used to access the TPS65217 chip
++ */
++
++struct tps65217 {
++ struct device *dev;
++ struct tps65217_board *pdata;
++ struct regulator_desc desc[TPS65217_NUM_REGULATOR];
++ struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
++ struct tps_info *info[TPS65217_NUM_REGULATOR];
++ struct regmap *regmap;
++
++ /* Client devices */
++ struct platform_device *regulator_pdev[TPS65217_NUM_REGULATOR];
++};
++
++static inline struct tps65217 *dev_to_tps65217(struct device *dev)
++{
++ return dev_get_drvdata(dev);
++}
++
++int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
++ unsigned int *val);
++int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
++ unsigned int val, unsigned int level);
++int tps65217_set_bits(struct tps65217 *tps, unsigned int reg,
++ unsigned int mask, unsigned int val, unsigned int level);
++int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
++ unsigned int mask, unsigned int level);
++
++#endif /* __LINUX_MFD_TPS65217_H */
+diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
+index 8bf2cb9..cfc1f76 100644
+--- a/include/linux/mfd/tps65910.h
++++ b/include/linux/mfd/tps65910.h
+@@ -17,10 +17,17 @@
+ #ifndef __LINUX_MFD_TPS65910_H
+ #define __LINUX_MFD_TPS65910_H
+
++#include <linux/gpio.h>
++#include <linux/regulator/machine.h>
++
+ /* TPS chip id list */
+ #define TPS65910 0
+ #define TPS65911 1
+
++/* I2C Slave Address 7-bit */
++#define TPS65910_I2C_ID0 0x12 /* Smart Reflex */
++#define TPS65910_I2C_ID1 0x2D /* general-purpose control */
++
+ /* TPS regulator type list */
+ #define REGULATOR_LDO 0
+ #define REGULATOR_DCDC 1
+@@ -740,6 +747,34 @@
+ #define TPS65910_GPIO_STS BIT(1)
+ #define TPS65910_GPIO_SET BIT(0)
+
++/* Regulator Index Definitions */
++#define TPS65910_REG_VRTC 0
++#define TPS65910_REG_VIO 1
++#define TPS65910_REG_VDD1 2
++#define TPS65910_REG_VDD2 3
++#define TPS65910_REG_VDD3 4
++#define TPS65910_REG_VDIG1 5
++#define TPS65910_REG_VDIG2 6
++#define TPS65910_REG_VPLL 7
++#define TPS65910_REG_VDAC 8
++#define TPS65910_REG_VAUX1 9
++#define TPS65910_REG_VAUX2 10
++#define TPS65910_REG_VAUX33 11
++#define TPS65910_REG_VMMC 12
++
++#define TPS65911_REG_VDDCTRL 4
++#define TPS65911_REG_LDO1 5
++#define TPS65911_REG_LDO2 6
++#define TPS65911_REG_LDO3 7
++#define TPS65911_REG_LDO4 8
++#define TPS65911_REG_LDO5 9
++#define TPS65911_REG_LDO6 10
++#define TPS65911_REG_LDO7 11
++#define TPS65911_REG_LDO8 12
++
++/* Max number of TPS65910/11 regulators */
++#define TPS65910_NUM_REGS 13
++
+ /**
+ * struct tps65910_board
+ * Board platform data may be used to initialize regulators.
+@@ -751,7 +786,7 @@ struct tps65910_board {
+ int irq_base;
+ int vmbch_threshold;
+ int vmbch2_threshold;
+- struct regulator_init_data *tps65910_pmic_init_data;
++ struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
+ };
+
+ /**
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index c8ef9bc..80caf1d 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -455,7 +455,7 @@ struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *);
+ void (*remove)(struct mmc_card *);
+- int (*suspend)(struct mmc_card *, pm_message_t);
++ int (*suspend)(struct mmc_card *);
+ int (*resume)(struct mmc_card *);
+ };
+
+diff --git a/include/linux/platform_data/cbus.h b/include/linux/platform_data/cbus.h
+new file mode 100644
+index 0000000..7a977e1
+--- /dev/null
++++ b/include/linux/platform_data/cbus.h
+@@ -0,0 +1,31 @@
++/*
++ * cbus.h - CBUS platform_data definition
++ *
++ * Copyright (C) 2004 - 2009 Nokia Corporation
++ *
++ * Written by Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General
++ * Public License. See the file "COPYING" in the main directory of this
++ * archive for more details.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef __INCLUDE_LINUX_CBUS_H
++#define __INCLUDE_LINUX_CBUS_H
++
++struct cbus_host_platform_data {
++ int dat_gpio;
++ int clk_gpio;
++ int sel_gpio;
++};
++
++#endif /* __INCLUDE_LINUX_CBUS_H */
+diff --git a/include/linux/pwm/ehrpwm.h b/include/linux/pwm/ehrpwm.h
+new file mode 100644
+index 0000000..53a4481
+--- /dev/null
++++ b/include/linux/pwm/ehrpwm.h
+@@ -0,0 +1,190 @@
++#ifndef __EHRPWM_H__
++#define __EHRPWM_H__
++
++#include <linux/pwm/pwm.h>
++
++#define NCHAN 2
++
++struct ehrpwm_pwm;
++
++typedef int (*p_fcallback) (struct ehrpwm_pwm *, void *data);
++
++struct et_int {
++ void *data;
++ p_fcallback pcallback;
++};
++
++struct tz_int {
++ void *data;
++ p_fcallback pcallback;
++};
++
++struct ehrpwm_context {
++ u32 tbctl;
++ u32 tbprd;
++ u32 hrcfg;
++ u32 aqctla;
++ u32 aqctlb;
++ u32 cmpa;
++ u32 cmpb;
++ u32 tzctl;
++ u32 tzflg;
++ u32 tzclr;
++ u32 tzfrc;
++};
++
++struct ehrpwm_pwm {
++ struct pwm_device pwm[NCHAN];
++ struct pwm_device_ops ops;
++ spinlock_t lock;
++ struct clk *clk;
++ void __iomem *mmio_base;
++ unsigned short prescale_val;
++ int irq[2];
++ struct et_int st_etint;
++ struct tz_int st_tzint;
++ u8 version;
++ void __iomem *config_mem_base;
++ struct device *dev;
++ struct ehrpwm_context ctx;
++};
++
++enum tz_event {
++ TZ_ONE_SHOT_EVENT = 0,
++ TZ_CYCLE_BY_CYCLE,
++ TZ_OSHT_CBC,
++ TZ_DIS_EVT,
++};
++
++enum config_mask {
++ CONFIG_NS,
++ CONFIG_TICKS,
++};
++
++enum db_edge_delay {
++ RISING_EDGE_DELAY,
++ FALLING_EDGE_DELAY,
++};
++
++struct aq_config_params {
++ unsigned char ch;
++ unsigned char ctreqzro;
++ unsigned char ctreqprd;
++ unsigned char ctreqcmpaup;
++ unsigned char ctreqcmpadown;
++ unsigned char ctreqcmpbup;
++ unsigned char ctreqcmpbdown;
++};
++
++int ehrpwm_tb_set_prescalar_val(struct pwm_device *p, unsigned char
++ clkdiv, unsigned char hspclkdiv);
++
++int ehrpwm_tb_config_sync(struct pwm_device *p, unsigned char phsen,
++ unsigned char syncosel);
++
++int ehrpwm_tb_set_counter_mode(struct pwm_device *p, unsigned char
++ ctrmode, unsigned char phsdir);
++
++int ehrpwm_tb_force_sync(struct pwm_device *p);
++
++int ehrpwm_tb_set_periodload(struct pwm_device *p, unsigned char
++ loadmode);
++
++int ehrpwm_tb_read_status(struct pwm_device *p, unsigned short *val);
++
++int ehrpwm_tb_read_counter(struct pwm_device *p, unsigned short *val);
++
++int ehrpwm_tb_set_period(struct pwm_device *p, unsigned short val);
++
++int ehrpwm_tb_set_phase(struct pwm_device *p, unsigned short val);
++
++int ehrpwm_cmp_set_cmp_ctl(struct pwm_device *p, unsigned char
++ shdwamode, unsigned char shdwbmode, unsigned char loadamode,
++ unsigned char loadbmode);
++
++int ehrpwm_cmp_set_cmp_val(struct pwm_device *p, unsigned char reg,
++ unsigned short val);
++
++int ehrpwm_aq_set_act_ctrl(struct pwm_device *p,
++ struct aq_config_params *cfg);
++
++int ehrpwm_aq_set_one_shot_act(struct pwm_device *p, unsigned char ch,
++ unsigned char act);
++
++int ehrpwm_aq_ot_frc(struct pwm_device *p, unsigned char ch);
++
++int ehrpwm_aq_set_csfrc_load_mode(struct pwm_device *p, unsigned char
++ loadmode);
++
++int ehrpwm_aq_continuous_frc(struct pwm_device *p, unsigned char ch,
++ unsigned char act);
++
++int ehrpwm_db_get_max_delay(struct pwm_device *p,
++ enum config_mask cfgmask, unsigned long *delay_val);
++
++int ehrpwm_db_get_delay(struct pwm_device *p, unsigned char edge,
++ enum config_mask cfgmask, unsigned long *delay_val);
++
++int ehrpwm_db_set_delay(struct pwm_device *p, unsigned char edge,
++ enum config_mask cfgmask, unsigned long delay);
++
++int ehrpwm_db_set_mode(struct pwm_device *p, unsigned char inmode,
++ unsigned char polsel, unsigned char outmode);
++
++int ehrpwm_pc_configure(struct pwm_device *p, unsigned char chpduty,
++ unsigned char chpfreq, unsigned char oshtwidth);
++
++int ehrpwm_pc_en_dis(struct pwm_device *p, unsigned char chpen);
++
++int ehrpwm_tz_sel_event(struct pwm_device *p, unsigned char input,
++ enum tz_event evt);
++
++int ehrpwm_tz_set_action(struct pwm_device *p, unsigned char ch,
++ unsigned char act);
++
++int ehrpwm_tz_set_int_en_dis(struct pwm_device *p, enum tz_event event,
++ unsigned char int_en_dis);
++
++int ehrpwm_tz_force_evt(struct pwm_device *p, enum tz_event event);
++
++int ehrpwm_tz_read_status(struct pwm_device *p, unsigned short *status);
++
++int ehrpwm_tz_clr_evt_status(struct pwm_device *p);
++
++int ehrpwm_tz_clr_int_status(struct pwm_device *p);
++
++int ehrpwm_et_set_sel_evt(struct pwm_device *p, unsigned char evt,
++ unsigned char prd);
++
++int ehrpwm_et_int_en_dis(struct pwm_device *p, unsigned char en_dis);
++
++int ehrpwm_et_read_evt_cnt(struct pwm_device *p, unsigned long *evtcnt);
++
++int pwm_et_read_int_status(struct pwm_device *p,
++ unsigned long *status);
++
++int ehrpwm_et_frc_int(struct pwm_device *p);
++
++int ehrpwm_et_clr_int(struct pwm_device *p);
++
++int ehrpwm_hr_set_phase(struct pwm_device *p, unsigned char val);
++
++int ehrpwm_hr_set_cmpval(struct pwm_device *p, unsigned char val);
++
++int ehrpwm_hr_config(struct pwm_device *p, unsigned char loadmode,
++ unsigned char ctlmode, unsigned char edgemode);
++
++int ehrpwm_et_cb_register(struct pwm_device *p, void *data,
++ p_fcallback cb);
++
++int ehrpwm_tz_cb_register(struct pwm_device *p, void *data,
++ p_fcallback cb);
++
++int ehrpwm_pwm_suspend(struct pwm_device *p, enum
++ config_mask config_mask,
++ unsigned long val);
++
++#define ENABLE 1
++#define DISABLE 0
++
++#endif
+diff --git a/include/linux/pwm/pwm.h b/include/linux/pwm/pwm.h
+new file mode 100644
+index 0000000..99c08f5
+--- /dev/null
++++ b/include/linux/pwm/pwm.h
+@@ -0,0 +1,194 @@
++/*
++ * Copyright (C) 2011 Bill Gatliff < bgat@billgatliff.com>
++ * Copyright (C) 2011 Arun Murthy <arun.murth@stericsson.com>
++ *
++ * This program is free software; you may redistribute and/or modify
++ * it under the terms of the GNU General Public License version 2, as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
++ * USA
++ */
++#ifndef __LINUX_PWM_H
++#define __LINUX_PWM_H
++
++enum {
++ FLAG_REGISTERED = 0,
++ FLAG_REQUESTED = 1,
++ FLAG_STOP = 2,
++ FLAG_RUNNING = 3,
++};
++
++enum {
++ PWM_CONFIG_DUTY_TICKS = 0,
++ PWM_CONFIG_PERIOD_TICKS = 1,
++ PWM_CONFIG_POLARITY = 2,
++ PWM_CONFIG_START = 3,
++ PWM_CONFIG_STOP = 4,
++
++ PWM_CONFIG_HANDLER = 5,
++
++ PWM_CONFIG_DUTY_NS = 6,
++ PWM_CONFIG_DUTY_PERCENT = 7,
++ PWM_CONFIG_PERIOD_NS = 8,
++};
++
++struct pwm_config;
++struct pwm_device;
++
++typedef int (*pwm_handler_t)(struct pwm_device *p, void *data);
++typedef void (*pwm_callback_t)(struct pwm_device *p);
++
++struct pwm_device_ops {
++ int (*request) (struct pwm_device *p);
++ void (*release) (struct pwm_device *p);
++ int (*config) (struct pwm_device *p,
++ struct pwm_config *c);
++ int (*config_nosleep) (struct pwm_device *p,
++ struct pwm_config *c);
++ int (*synchronize) (struct pwm_device *p,
++ struct pwm_device *to_p);
++ int (*unsynchronize) (struct pwm_device *p,
++ struct pwm_device *from_p);
++ int (*set_callback) (struct pwm_device *p,
++ pwm_callback_t callback);
++ int (*freq_transition_notifier_cb) (struct pwm_device *p);
++};
++
++struct pwm_config {
++ unsigned long config_mask;
++ unsigned long duty_ticks;
++ unsigned long period_ticks;
++ int polarity;
++
++ pwm_handler_t handler;
++
++ unsigned long duty_ns;
++ unsigned long period_ns;
++ int duty_percent;
++};
++
++struct pwm_device {
++ struct list_head list;
++
++ struct device *dev;
++ struct pwm_device_ops *ops;
++
++ void *data;
++
++ const char *label;
++ pid_t pid;
++
++ volatile unsigned long flags;
++
++ unsigned long tick_hz;
++
++ pwm_callback_t callback;
++
++ struct work_struct handler_work;
++ pwm_handler_t handler;
++ void *handler_data;
++
++ int active_high;
++ unsigned long period_ticks;
++ unsigned long duty_ticks;
++ unsigned long period_ns;
++ unsigned long duty_ns;
++ struct notifier_block freq_transition;
++ unsigned long max_period_ticks;
++ spinlock_t pwm_lock;
++};
++
++#include <linux/semaphore.h>
++#include <linux/pwm/ehrpwm.h>
++
++enum {
++ PWM_VERSION_0,
++ PWM_VERSION_1,
++};
++
++struct pwm_chan_attrib {
++ int max_freq;
++};
++
++#define PWM_CHANNEL NCHAN
++
++struct pwmss_platform_data {
++ int channel_mask;
++ u8 version;
++ struct pwm_chan_attrib chan_attrib[PWM_CHANNEL];
++};
++
++struct pwm_device *pwm_request_byname(const char *name, const char *label);
++struct pwm_device *pwm_request(const char *bus_id, int id, const char *label);
++void pwm_release(struct pwm_device *p);
++
++static inline int pwm_is_registered(struct pwm_device *p)
++{
++ return test_bit(FLAG_REGISTERED, &p->flags);
++}
++
++static inline int pwm_is_requested(struct pwm_device *p)
++{
++ return test_bit(FLAG_REQUESTED, &p->flags);
++}
++
++static inline int pwm_is_running(struct pwm_device *p)
++{
++ return test_bit(FLAG_RUNNING, &p->flags);
++}
++
++static inline void pwm_set_drvdata(struct pwm_device *p, void *data)
++{
++ p->data = data;
++}
++
++static inline void *pwm_get_drvdata(const struct pwm_device *p)
++{
++ return p->data;
++}
++
++unsigned long pwm_ns_to_ticks(struct pwm_device *p, unsigned long nsecs);
++unsigned long pwm_ticks_to_ns(struct pwm_device *p, unsigned long ticks);
++
++int pwm_config_nosleep(struct pwm_device *p, struct pwm_config *c);
++int pwm_config(struct pwm_device *p, struct pwm_config *c);
++
++int pwm_set_period_ns(struct pwm_device *p, unsigned long period_ns);
++unsigned long pwm_get_period_ns(struct pwm_device *p);
++int pwm_set_duty_ns(struct pwm_device *p, unsigned long duty_ns);
++unsigned long pwm_get_duty_ns(struct pwm_device *p);
++int pwm_set_duty_percent(struct pwm_device *p, int percent);
++int pwm_set_polarity(struct pwm_device *p, int active_high);
++
++int pwm_start(struct pwm_device *p);
++int pwm_stop(struct pwm_device *p);
++
++int pwm_synchronize(struct pwm_device *p, struct pwm_device *to_p);
++int pwm_unsynchronize(struct pwm_device *p, struct pwm_device *from_p);
++int pwm_set_handler(struct pwm_device *p, pwm_handler_t handler, void *data);
++
++int pwm_register(struct pwm_device *p, struct device *parent, int id);
++int pwm_register_byname(struct pwm_device *p, struct device *parent,
++ const char *name);
++int pwm_unregister(struct pwm_device *p);
++
++#ifdef CONFIG_GPIO_PWM
++struct pwm_device *gpio_pwm_create(int gpio);
++int gpio_pwm_destroy(struct pwm_device *p);
++#endif
++int pwm_set_frequency(struct pwm_device *p, unsigned long freq);
++unsigned long pwm_get_frequency(struct pwm_device *p);
++int pwm_set_period_ticks(struct pwm_device *p,
++ unsigned long ticks);
++unsigned long pwm_get_duty_percent(struct pwm_device *p);
++int pwm_set_duty_ticks(struct pwm_device *p,
++ unsigned long ticks);
++#endif
+diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
+index 63d2df4..b2777cd 100644
+--- a/include/linux/pwm_backlight.h
++++ b/include/linux/pwm_backlight.h
+@@ -7,7 +7,8 @@
+ #include <linux/backlight.h>
+
+ struct platform_pwm_backlight_data {
+- int pwm_id;
++ const char *pwm_id;
++ int ch;
+ unsigned int max_brightness;
+ unsigned int dft_brightness;
+ unsigned int lth_brightness;
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 2cf4226..6b8fba3 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -800,13 +800,14 @@ static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
+ return offset < 4096;
+ }
+
++/*
++ * Intended to be called only from the kfree_rcu() macro.
++ */
+ static __always_inline
+ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
+ {
+ typedef void (*rcu_callback)(struct rcu_head *);
+
+- BUILD_BUG_ON(!__builtin_constant_p(offset));
+-
+ /* See the kfree_rcu() header comment. */
+ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
+
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7503352..7626e5a 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1211,6 +1211,7 @@ struct urb {
+ struct list_head urb_list; /* list head for use by the urb's
+ * current owner */
+ struct list_head anchor_list; /* the URB may be anchored */
++ struct list_head giveback_list; /* to postpone the giveback call */
+ struct usb_anchor *anchor;
+ struct usb_device *dev; /* (in) pointer to associated device */
+ struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */
+diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
+index 1d3a675..317d892 100644
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -477,8 +477,8 @@ struct usb_gadget_ops {
+ * driver setup() requests
+ * @ep_list: List of other endpoints supported by the device.
+ * @speed: Speed of current connection to USB host.
+- * @is_dualspeed: True if the controller supports both high and full speed
+- * operation. If it does, the gadget driver must also support both.
++ * @max_speed: Maximal speed the UDC can handle. UDC must support this
++ * and all slower speeds.
+ * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
+ * gadget driver must provide a USB OTG descriptor.
+ * @is_a_peripheral: False unless is_otg, the "A" end of a USB cable
+@@ -518,7 +518,7 @@ struct usb_gadget {
+ struct usb_ep *ep0;
+ struct list_head ep_list; /* of usb_ep */
+ enum usb_device_speed speed;
+- unsigned is_dualspeed:1;
++ enum usb_device_speed max_speed;
+ unsigned is_otg:1;
+ unsigned is_a_peripheral:1;
+ unsigned b_hnp_enable:1;
+@@ -549,7 +549,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
+ static inline int gadget_is_dualspeed(struct usb_gadget *g)
+ {
+ #ifdef CONFIG_USB_GADGET_DUALSPEED
+- /* runtime test would check "g->is_dualspeed" ... that might be
++ /* runtime test would check "g->max_speed" ... that might be
+ * useful to work around hardware bugs, but is mostly pointless
+ */
+ return 1;
+@@ -567,7 +567,7 @@ static inline int gadget_is_superspeed(struct usb_gadget *g)
+ {
+ #ifdef CONFIG_USB_GADGET_SUPERSPEED
+ /*
+- * runtime test would check "g->is_superspeed" ... that might be
++ * runtime test would check "g->max_speed" ... that might be
+ * useful to work around hardware bugs, but is mostly pointless
+ */
+ return 1;
+@@ -760,7 +760,7 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+ /**
+ * struct usb_gadget_driver - driver for usb 'slave' devices
+ * @function: String describing the gadget's function
+- * @speed: Highest speed the driver handles.
++ * @max_speed: Highest speed the driver handles.
+ * @setup: Invoked for ep0 control requests that aren't handled by
+ * the hardware level driver. Most calls must be handled by
+ * the gadget driver, including descriptor and configuration
+@@ -824,7 +824,7 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+ */
+ struct usb_gadget_driver {
+ char *function;
+- enum usb_device_speed speed;
++ enum usb_device_speed max_speed;
+ void (*unbind)(struct usb_gadget *);
+ int (*setup)(struct usb_gadget *,
+ const struct usb_ctrlrequest *);
+diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
+index eb50525..2c59816 100644
+--- a/include/linux/usb/musb.h
++++ b/include/linux/usb/musb.h
+@@ -62,6 +62,7 @@ struct musb_hdrc_eps_bits {
+ struct musb_hdrc_config {
+ struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
+ unsigned fifo_cfg_size; /* size of the fifo configuration */
++ unsigned short fifo_mode; /* fifo mode to be selected */
+
+ /* MUSB configuration-specific details */
+ unsigned multipoint:1; /* multipoint device */
+diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
+index d87f44f..da6be3c 100644
+--- a/include/linux/usb/otg.h
++++ b/include/linux/usb/otg.h
+@@ -63,6 +63,7 @@ struct otg_transceiver {
+ struct device *dev;
+ const char *label;
+ unsigned int flags;
++ u8 id;
+
+ u8 default_a;
+ enum usb_otg_state state;
+@@ -116,17 +117,18 @@ struct otg_transceiver {
+
+ /* for board-specific init logic */
+ extern int otg_set_transceiver(struct otg_transceiver *);
++extern int otg_reset_transceiver(struct otg_transceiver *);
+
+ #if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
+ /* sometimes transceivers are accessed only through e.g. ULPI */
+-extern void usb_nop_xceiv_register(void);
+-extern void usb_nop_xceiv_unregister(void);
++extern void usb_nop_xceiv_register(int id);
++extern void usb_nop_xceiv_unregister(int id);
+ #else
+-static inline void usb_nop_xceiv_register(void)
++static inline void usb_nop_xceiv_register(int id)
+ {
+ }
+
+-static inline void usb_nop_xceiv_unregister(void)
++static inline void usb_nop_xceiv_unregister(int id)
+ {
+ }
+ #endif
+@@ -166,11 +168,11 @@ otg_shutdown(struct otg_transceiver *otg)
+
+ /* for usb host and peripheral controller drivers */
+ #ifdef CONFIG_USB_OTG_UTILS
+-extern struct otg_transceiver *otg_get_transceiver(void);
++extern struct otg_transceiver *otg_get_transceiver(int id);
+ extern void otg_put_transceiver(struct otg_transceiver *);
+ extern const char *otg_state_string(enum usb_otg_state state);
+ #else
+-static inline struct otg_transceiver *otg_get_transceiver(void)
++static inline struct otg_transceiver *otg_get_transceiver(int id)
+ {
+ return NULL;
+ }
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 4bde182..dcdfc2b 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -131,6 +131,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
+ */
+ extern rwlock_t vmlist_lock;
+ extern struct vm_struct *vmlist;
++extern __init void vm_area_add_early(struct vm_struct *vm);
+ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+
+ #ifdef CONFIG_SMP
+diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
+index 4b69739..e6911c6 100644
+--- a/include/linux/wl12xx.h
++++ b/include/linux/wl12xx.h
+@@ -54,6 +54,8 @@ struct wl12xx_platform_data {
+ int board_ref_clock;
+ int board_tcxo_clock;
+ unsigned long platform_quirks;
++ int bt_enable_gpio;
++ int wlan_enable_gpio;
+ };
+
+ /* Platform does not support level trigger interrupts */
+diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
+index 89d43b3..6a6a69f 100644
+--- a/include/video/da8xx-fb.h
++++ b/include/video/da8xx-fb.h
+@@ -13,7 +13,8 @@
+ #define DA8XX_FB_H
+
+ enum panel_type {
+- QVGA = 0
++ QVGA = 0,
++ WVGA,
+ };
+
+ enum panel_shade {
+@@ -28,7 +29,7 @@ enum raster_load_mode {
+ };
+
+ struct display_panel {
+- enum panel_type panel_type; /* QVGA */
++ enum panel_type panel_type;
+ int max_bpp;
+ int min_bpp;
+ enum panel_shade panel_shade;
+@@ -82,6 +83,9 @@ struct lcd_ctrl_config {
+
+ /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+ unsigned char raster_order;
++
++ /* DMA FIFO threshold */
++ int fifo_th;
+ };
+
+ struct lcd_sync_arg {
+@@ -100,5 +104,9 @@ struct lcd_sync_arg {
+ #define FBIPUT_HSYNC _IOW('F', 9, int)
+ #define FBIPUT_VSYNC _IOW('F', 10, int)
+
++typedef void (*vsync_callback_t)(void *arg);
++int register_vsync_cb(vsync_callback_t handler, void *arg, int idx);
++int unregister_vsync_cb(vsync_callback_t handler, void *arg, int idx);
++
+ #endif /* ifndef DA8XX_FB_H */
+
+diff --git a/include/video/st7735fb.h b/include/video/st7735fb.h
+new file mode 100644
+index 0000000..250f036
+--- /dev/null
++++ b/include/video/st7735fb.h
+@@ -0,0 +1,86 @@
++/*
++ * linux/include/video/st7735fb.h -- FB driver for ST7735 LCD controller
++ *
++ * Copyright (C) 2011, Matt Porter
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#define DRVNAME "st7735fb"
++#define WIDTH 128
++#define HEIGHT 160
++#define BPP 16
++
++/* Supported display modules */
++#define ST7735_DISPLAY_AF_TFT18 0 /* Adafruit SPI TFT 1.8" */
++
++/* Init script function */
++struct st7735_function {
++ u16 cmd;
++ u16 data;
++};
++
++/* Init script commands */
++enum st7735_cmd {
++ ST7735_START,
++ ST7735_END,
++ ST7735_CMD,
++ ST7735_DATA,
++ ST7735_DELAY
++};
++
++struct st7735fb_par {
++ struct spi_device *spi;
++ struct fb_info *info;
++ int rst;
++ int dc;
++};
++
++struct st7735fb_platform_data {
++ int rst_gpio;
++ int dc_gpio;
++};
++
++/* ST7735 Commands */
++#define ST7735_NOP 0x0
++#define ST7735_SWRESET 0x01
++#define ST7735_RDDID 0x04
++#define ST7735_RDDST 0x09
++#define ST7735_SLPIN 0x10
++#define ST7735_SLPOUT 0x11
++#define ST7735_PTLON 0x12
++#define ST7735_NORON 0x13
++#define ST7735_INVOFF 0x20
++#define ST7735_INVON 0x21
++#define ST7735_DISPOFF 0x28
++#define ST7735_DISPON 0x29
++#define ST7735_CASET 0x2A
++#define ST7735_RASET 0x2B
++#define ST7735_RAMWR 0x2C
++#define ST7735_RAMRD 0x2E
++#define ST7735_COLMOD 0x3A
++#define ST7735_MADCTL 0x36
++#define ST7735_FRMCTR1 0xB1
++#define ST7735_FRMCTR2 0xB2
++#define ST7735_FRMCTR3 0xB3
++#define ST7735_INVCTR 0xB4
++#define ST7735_DISSET5 0xB6
++#define ST7735_PWCTR1 0xC0
++#define ST7735_PWCTR2 0xC1
++#define ST7735_PWCTR3 0xC2
++#define ST7735_PWCTR4 0xC3
++#define ST7735_PWCTR5 0xC4
++#define ST7735_VMCTR1 0xC5
++#define ST7735_RDID1 0xDA
++#define ST7735_RDID2 0xDB
++#define ST7735_RDID3 0xDC
++#define ST7735_RDID4 0xDD
++#define ST7735_GMCTRP1 0xE0
++#define ST7735_GMCTRN1 0xE1
++#define ST7735_PWCTR6 0xFC
++
++
++
++
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index a7b80c1..3380297 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -691,7 +691,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
+ outbuf = malloc(BZIP2_IOBUF_SIZE);
+
+ if (!outbuf) {
+- error("Could not allocate output bufer");
++ error("Could not allocate output buffer");
+ return RETVAL_OUT_OF_MEMORY;
+ }
+ if (buf)
+@@ -699,7 +699,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
+ else
+ inbuf = malloc(BZIP2_IOBUF_SIZE);
+ if (!inbuf) {
+- error("Could not allocate input bufer");
++ error("Could not allocate input buffer");
+ i = RETVAL_OUT_OF_MEMORY;
+ goto exit_0;
+ }
+diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
+index 476c65a..32adb73 100644
+--- a/lib/decompress_unlzma.c
++++ b/lib/decompress_unlzma.c
+@@ -562,7 +562,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
+ else
+ inbuf = malloc(LZMA_IOBUF_SIZE);
+ if (!inbuf) {
+- error("Could not allocate input bufer");
++ error("Could not allocate input buffer");
+ goto exit_0;
+ }
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 27be2f0..21fdf46 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1118,6 +1118,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
+ EXPORT_SYMBOL(vm_map_ram);
+
+ /**
++ * vm_area_add_early - add vmap area early during boot
++ * @vm: vm_struct to add
++ *
++ * This function is used to add fixed kernel vm area to vmlist before
++ * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
++ * should contain proper values and the other fields should be zero.
++ *
++ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
++ */
++void __init vm_area_add_early(struct vm_struct *vm)
++{
++ struct vm_struct *tmp, **p;
++
++ BUG_ON(vmap_initialized);
++ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
++ if (tmp->addr >= vm->addr) {
++ BUG_ON(tmp->addr < vm->addr + vm->size);
++ break;
++ } else
++ BUG_ON(tmp->addr + tmp->size > vm->addr);
++ }
++ vm->next = *p;
++ *p = vm;
++}
++
++/**
+ * vm_area_register_early - register vmap area early during boot
+ * @vm: vm_struct to register
+ * @align: requested alignment
+@@ -1139,8 +1165,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
+
+ vm->addr = (void *)addr;
+
+- vm->next = vmlist;
+- vmlist = vm;
++ vm_area_add_early(vm);
+ }
+
+ void __init vmalloc_init(void)
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 87d5ef1..d0dbac1 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -1147,6 +1147,11 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
+ codec->cache_only = 1;
+ ret = regulator_bulk_disable(ARRAY_SIZE(aic3x->supplies),
+ aic3x->supplies);
++ /* Enable cache sync if regulator disable
++ * event is not triggerd.
++ * ToDo : Revisit later to fix it
++ */
++ codec->cache_sync = 1;
+ }
+ out:
+ return ret;
+diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
+index 9e11a14..6b8dc37 100644
+--- a/sound/soc/davinci/Kconfig
++++ b/sound/soc/davinci/Kconfig
+@@ -6,6 +6,14 @@ config SND_DAVINCI_SOC
+ the DAVINCI AC97 or I2S interface. You will also need
+ to select the audio interfaces to support below.
+
++config SND_AM33XX_SOC
++ tristate "SoC Audio for the AM33XX chip"
++ depends on SOC_OMAPAM33XX
++ help
++ Say Y or M if you want to add support for codecs attached to
++ the AM33XX I2S interface. You will also need to select the
++ audio interfaces to support below.
++
+ config SND_DAVINCI_SOC_I2S
+ tristate
+
+@@ -25,6 +33,15 @@ config SND_DAVINCI_SOC_EVM
+ Say Y if you want to add support for SoC audio on TI
+ DaVinci DM6446, DM355 or DM365 EVM platforms.
+
++config SND_AM335X_SOC_EVM
++ tristate "SoC Audio support for AM335X EVM"
++ depends on SND_AM33XX_SOC
++ select SND_DAVINCI_SOC_MCASP
++ select SND_SOC_TLV320AIC3X
++ help
++ Say Y if you want to add support for SoC audio on
++ AM335X EVM
++
+ choice
+ prompt "DM365 codec select"
+ depends on SND_DAVINCI_SOC_EVM
+diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
+index a93679d..a7af786 100644
+--- a/sound/soc/davinci/Makefile
++++ b/sound/soc/davinci/Makefile
+@@ -5,6 +5,7 @@ snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
+ snd-soc-davinci-vcif-objs:= davinci-vcif.o
+
+ obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o
++obj-$(CONFIG_SND_AM33XX_SOC) += snd-soc-davinci.o
+ obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
+ obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
+ obj-$(CONFIG_SND_DAVINCI_SOC_VCIF) += snd-soc-davinci-vcif.o
+@@ -17,4 +18,5 @@ obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o
+ obj-$(CONFIG_SND_DM6467_SOC_EVM) += snd-soc-evm.o
+ obj-$(CONFIG_SND_DA830_SOC_EVM) += snd-soc-evm.o
+ obj-$(CONFIG_SND_DA850_SOC_EVM) += snd-soc-evm.o
++obj-$(CONFIG_SND_AM335X_SOC_EVM) += snd-soc-evm.o
+ obj-$(CONFIG_SND_DAVINCI_SOC_SFFSDR) += snd-soc-sffsdr.o
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index f78c3f0..f6a62c3 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -22,9 +22,8 @@
+ #include <asm/dma.h>
+ #include <asm/mach-types.h>
+
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include <mach/edma.h>
+-#include <mach/mux.h>
+
+ #include "davinci-pcm.h"
+ #include "davinci-i2s.h"
+@@ -56,6 +55,9 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
+ else if (machine_is_davinci_da830_evm() ||
+ machine_is_davinci_da850_evm())
+ sysclk = 24576000;
++ /* On AM335X, CODEC gets MCLK from external Xtal (12MHz). */
++ else if (machine_is_am335xevm())
++ sysclk = 12000000;
+
+ else
+ return -EINVAL;
+@@ -239,6 +241,17 @@ static struct snd_soc_dai_link da850_evm_dai = {
+ .ops = &evm_ops,
+ };
+
++static struct snd_soc_dai_link am335x_evm_dai = {
++ .name = "TLV320AIC3X",
++ .stream_name = "AIC3X",
++ .cpu_dai_name = "davinci-mcasp.1",
++ .codec_dai_name = "tlv320aic3x-hifi",
++ .codec_name = "tlv320aic3x-codec.2-001b",
++ .platform_name = "davinci-pcm-audio",
++ .init = evm_aic3x_init,
++ .ops = &evm_ops,
++};
++
+ /* davinci dm6446 evm audio machine driver */
+ static struct snd_soc_card dm6446_snd_soc_card_evm = {
+ .name = "DaVinci DM6446 EVM",
+@@ -279,6 +292,12 @@ static struct snd_soc_card da850_snd_soc_card = {
+ .num_links = 1,
+ };
+
++static struct snd_soc_card am335x_snd_soc_card = {
++ .name = "AM335X EVM",
++ .dai_link = &am335x_evm_dai,
++ .num_links = 1,
++};
++
+ static struct platform_device *evm_snd_device;
+
+ static int __init evm_init(void)
+@@ -305,6 +324,9 @@ static int __init evm_init(void)
+ } else if (machine_is_davinci_da850_evm()) {
+ evm_snd_dev_data = &da850_snd_soc_card;
+ index = 0;
++ } else if (machine_is_am335xevm()) {
++ evm_snd_dev_data = &am335x_snd_soc_card;
++ index = 0;
+ } else
+ return -EINVAL;
+
+diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
+index 300e121..81a3e16 100644
+--- a/sound/soc/davinci/davinci-i2s.c
++++ b/sound/soc/davinci/davinci-i2s.c
+@@ -23,7 +23,7 @@
+ #include <sound/initval.h>
+ #include <sound/soc.h>
+
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+
+ #include "davinci-pcm.h"
+ #include "davinci-i2s.h"
+diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
+index 7173df2..0136cc4 100644
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -21,7 +21,7 @@
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+-#include <linux/clk.h>
++#include <linux/pm_runtime.h>
+
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+@@ -108,6 +108,10 @@
+ #define DAVINCI_MCASP_WFIFOSTS (0x1014)
+ #define DAVINCI_MCASP_RFIFOCTL (0x1018)
+ #define DAVINCI_MCASP_RFIFOSTS (0x101C)
++#define MCASP_VER3_WFIFOCTL (0x1000)
++#define MCASP_VER3_WFIFOSTS (0x1004)
++#define MCASP_VER3_RFIFOCTL (0x1008)
++#define MCASP_VER3_RFIFOSTS (0x100C)
+
+ /*
+ * DAVINCI_MCASP_PWREMUMGT_REG - Power Down and Emulation Management
+@@ -380,14 +384,34 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)
+ static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
+ {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+- if (dev->txnumevt) /* enable FIFO */
+- mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ if (dev->txnumevt) { /* flush and enable FIFO */
++ if (dev->version == MCASP_VERSION_3) {
++ mcasp_clr_bits(dev->base + MCASP_VER3_WFIFOCTL,
+ FIFO_ENABLE);
++ mcasp_set_bits(dev->base + MCASP_VER3_WFIFOCTL,
++ FIFO_ENABLE);
++ } else {
++ mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ FIFO_ENABLE);
++ mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ FIFO_ENABLE);
++ }
++ }
+ mcasp_start_tx(dev);
+ } else {
+- if (dev->rxnumevt) /* enable FIFO */
+- mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++ if (dev->rxnumevt) { /* flush and enable FIFO */
++ if (dev->version == MCASP_VERSION_3) {
++ mcasp_clr_bits(dev->base + MCASP_VER3_RFIFOCTL,
++ FIFO_ENABLE);
++ mcasp_set_bits(dev->base + MCASP_VER3_RFIFOCTL,
++ FIFO_ENABLE);
++ } else {
++ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++ FIFO_ENABLE);
++ mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+ FIFO_ENABLE);
++ }
++ }
+ mcasp_start_rx(dev);
+ }
+ }
+@@ -407,14 +431,24 @@ static void mcasp_stop_tx(struct davinci_audio_dev *dev)
+ static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
+ {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+- if (dev->txnumevt) /* disable FIFO */
+- mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+- FIFO_ENABLE);
++ if (dev->txnumevt) { /* disable FIFO */
++ if (dev->version == MCASP_VERSION_3)
++ mcasp_clr_bits(dev->base + MCASP_VER3_WFIFOCTL,
++ FIFO_ENABLE);
++ else
++ mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ FIFO_ENABLE);
++ }
+ mcasp_stop_tx(dev);
+ } else {
+- if (dev->rxnumevt) /* disable FIFO */
+- mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+- FIFO_ENABLE);
++ if (dev->rxnumevt) { /* disable FIFO */
++ if (dev->version == MCASP_VERSION_3)
++ mcasp_clr_bits(dev->base + MCASP_VER3_RFIFOCTL,
++ FIFO_ENABLE);
++ else
++ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++ FIFO_ENABLE);
++ }
+ mcasp_stop_rx(dev);
+ }
+ }
+@@ -565,7 +599,7 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
+ TXSSZ(fmt), TXSSZ(0x0F));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
+ TXROT(7));
+- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
++ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(0),
+ RXROT(7));
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
+@@ -613,20 +647,36 @@ static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
+ if (dev->txnumevt * tx_ser > 64)
+ dev->txnumevt = 1;
+
+- mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, tx_ser,
++ if (dev->version == MCASP_VERSION_3) {
++ mcasp_mod_bits(dev->base + MCASP_VER3_WFIFOCTL, tx_ser,
+ NUMDMA_MASK);
+- mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ mcasp_mod_bits(dev->base + MCASP_VER3_WFIFOCTL,
++ ((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
++ } else {
++ mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
++ tx_ser, NUMDMA_MASK);
++ mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+ ((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
++ }
+ }
+
+ if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (dev->rxnumevt * rx_ser > 64)
+ dev->rxnumevt = 1;
+
+- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, rx_ser,
++ if (dev->version == MCASP_VERSION_3) {
++ mcasp_mod_bits(dev->base + MCASP_VER3_RFIFOCTL, rx_ser,
+ NUMDMA_MASK);
+- mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+- ((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
++ mcasp_mod_bits(dev->base + MCASP_VER3_RFIFOCTL,
++ ((dev->rxnumevt * rx_ser) << 8),
++ NUMEVT_MASK);
++ } else {
++ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++ rx_ser, NUMDMA_MASK);
++ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
++ ((dev->rxnumevt * rx_ser) << 8),
++ NUMEVT_MASK);
++ }
+ }
+ }
+
+@@ -776,20 +826,18 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+- if (!dev->clk_active) {
+- clk_enable(dev->clk);
+- dev->clk_active = 1;
+- }
++ ret = pm_runtime_get_sync(dev->dev);
++ if (ret < 0)
++ dev_err(dev->dev, "failed to get runtime pm\n");
++
+ davinci_mcasp_start(dev, substream->stream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ davinci_mcasp_stop(dev, substream->stream);
+- if (dev->clk_active) {
+- clk_disable(dev->clk);
+- dev->clk_active = 0;
+- }
+-
++ ret = pm_runtime_put_sync(dev->dev);
++ if (ret < 0)
++ dev_err(dev->dev, "failed to put runtime pm\n");
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+@@ -887,15 +935,14 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ }
+
+ pdata = pdev->dev.platform_data;
+- dev->clk = clk_get(&pdev->dev, NULL);
+- if (IS_ERR(dev->clk)) {
+- ret = -ENODEV;
++ pm_runtime_enable(&pdev->dev);
++
++ ret = pm_runtime_get_sync(&pdev->dev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to get runtime pm\n");
+ goto err_release_region;
+ }
+
+- clk_enable(dev->clk);
+- dev->clk_active = 1;
+-
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (!dev->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+@@ -911,16 +958,35 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ dev->version = pdata->version;
+ dev->txnumevt = pdata->txnumevt;
+ dev->rxnumevt = pdata->rxnumevt;
++ dev->dev = &pdev->dev;
++
++ if (dev->version == MCASP_VERSION_3) {
++ dev->xrsrctl = kzalloc((sizeof(unsigned int) *
++ dev->num_serializer),
++ GFP_KERNEL);
++ if (!dev->xrsrctl) {
++ ret = -ENOMEM;
++ dev_err(&pdev->dev, "err: mem alloc xrsrctl\n");
++ goto err_release_clk;
++ }
++ }
+
+ dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
+ dma_data->asp_chan_q = pdata->asp_chan_q;
+ dma_data->ram_chan_q = pdata->ram_chan_q;
+ dma_data->sram_size = pdata->sram_size_playback;
+- dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
++ if (dev->version == MCASP_VERSION_3)
++ dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset);
++ else
++ dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
+ mem->start);
+
+- /* first TX, then RX */
+- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
++ if (dev->version == MCASP_VERSION_3)
++ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
++ else
++ /* first TX, then RX */
++ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
++
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ ret = -ENODEV;
+@@ -933,10 +999,17 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ dma_data->asp_chan_q = pdata->asp_chan_q;
+ dma_data->ram_chan_q = pdata->ram_chan_q;
+ dma_data->sram_size = pdata->sram_size_capture;
+- dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
++ if (dev->version == MCASP_VERSION_3)
++ dma_data->dma_addr = (dma_addr_t) (pdata->rx_dma_offset);
++ else
++ dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
+ mem->start);
+
+- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
++ if (dev->version == MCASP_VERSION_3)
++ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
++ else
++ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
++
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ ret = -ENODEV;
+@@ -952,11 +1025,13 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ return 0;
+
+ err_iounmap:
++ if (dev->version == MCASP_VERSION_3)
++ kfree(dev->xrsrctl);
+ iounmap(dev->base);
+ err_release_clk:
+- clk_disable(dev->clk);
+- clk_put(dev->clk);
++ pm_runtime_put_sync(&pdev->dev);
+ err_release_region:
++ pm_runtime_disable(&pdev->dev);
+ release_mem_region(mem->start, resource_size(mem));
+ err_release_data:
+ kfree(dev);
+@@ -970,21 +1045,129 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
+ struct resource *mem;
+
+ snd_soc_unregister_dai(&pdev->dev);
+- clk_disable(dev->clk);
+- clk_put(dev->clk);
+- dev->clk = NULL;
+-
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+-
++ if (dev->version == MCASP_VERSION_3)
++ kfree(dev->xrsrctl);
+ kfree(dev);
+
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int davinci_mcasp_suspend(struct platform_device *pdev,
++ pm_message_t state)
++{
++ int ret = 0, idx;
++ struct davinci_audio_dev *dev = dev_get_drvdata(&pdev->dev);
++
++ if (dev->version == MCASP_VERSION_3) {
++ dev->gblctlx = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_GBLCTLX_REG);
++ dev->txmask = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_TXMASK_REG);
++ dev->txfmt = mcasp_get_reg(dev->base + DAVINCI_MCASP_TXFMT_REG);
++ dev->txfmctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_TXFMCTL_REG);
++ dev->aclkxctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_ACLKXCTL_REG);
++ dev->ahclkxctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_AHCLKXCTL_REG);
++ dev->txtdm = mcasp_get_reg(dev->base + DAVINCI_MCASP_TXTDM_REG);
++ dev->wfifoctl = mcasp_get_reg(dev->base + MCASP_VER3_WFIFOCTL);
++
++ dev->gblctlr = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_GBLCTLR_REG);
++ dev->rxmask = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_RXMASK_REG);
++ dev->rxfmt = mcasp_get_reg(dev->base + DAVINCI_MCASP_RXFMT_REG);
++ dev->rxfmctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_RXFMCTL_REG);
++ dev->aclkrctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_ACLKRCTL_REG);
++ dev->ahclkrctl = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_AHCLKRCTL_REG);
++ dev->rxtdm = mcasp_get_reg(dev->base + DAVINCI_MCASP_RXTDM_REG);
++ dev->rfifoctl = mcasp_get_reg(dev->base + MCASP_VER3_RFIFOCTL);
++
++ for (idx = 0; idx < dev->num_serializer; idx++) {
++ dev->xrsrctl[idx] = mcasp_get_reg(dev->base +
++ DAVINCI_MCASP_XRSRCTL_REG(idx));
++ }
++
++ dev->pfunc = mcasp_get_reg(dev->base + DAVINCI_MCASP_PFUNC_REG);
++ dev->pdir = mcasp_get_reg(dev->base + DAVINCI_MCASP_PDIR_REG);
++ }
++
++ ret = pm_runtime_put_sync(&pdev->dev);
++ if (ret < 0)
++ dev_err(&pdev->dev, "failed to get runtime pm\n");
++
++ /* only values < 0 indicate errors */
++ return IS_ERR_VALUE(ret) ? ret : 0;
++}
++
++static int davinci_mcasp_resume(struct platform_device *pdev)
++{
++ int ret = 0, idx;
++ struct davinci_audio_dev *dev = dev_get_drvdata(&pdev->dev);
++
++ ret = pm_runtime_get_sync(&pdev->dev);
++ if (ret < 0)
++ dev_err(&pdev->dev, "failed to get runtime pm\n");
++
++ if (dev->version == MCASP_VERSION_3) {
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG,
++ dev->gblctlx);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG,
++ dev->txmask);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMT_REG, dev->txfmt);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
++ dev->txfmctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
++ dev->aclkxctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
++ dev->ahclkxctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, dev->txtdm);
++ mcasp_set_reg(dev->base + MCASP_VER3_WFIFOCTL, dev->wfifoctl);
++
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG,
++ dev->gblctlr);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
++ dev->rxmask);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXFMT_REG, dev->rxfmt);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
++ dev->rxfmctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_ACLKRCTL_REG,
++ dev->aclkrctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
++ dev->ahclkrctl);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, dev->rxtdm);
++ mcasp_set_reg(dev->base + MCASP_VER3_RFIFOCTL, dev->rfifoctl);
++
++ for (idx = 0; idx < dev->num_serializer; idx++) {
++ mcasp_set_reg((dev->base +
++ DAVINCI_MCASP_XRSRCTL_REG(idx)),
++ dev->xrsrctl[idx]);
++ }
++
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_PFUNC_REG, dev->pfunc);
++ mcasp_set_reg(dev->base + DAVINCI_MCASP_PDIR_REG, dev->pdir);
++ }
++ /* only values < 0 indicate errors */
++ return IS_ERR_VALUE(ret) ? ret : 0;
++}
++#endif
++
+ static struct platform_driver davinci_mcasp_driver = {
+ .probe = davinci_mcasp_probe,
+ .remove = davinci_mcasp_remove,
++#ifdef CONFIG_PM
++ .suspend = davinci_mcasp_suspend,
++ .resume = davinci_mcasp_resume,
++#endif
+ .driver = {
+ .name = "davinci-mcasp",
+ .owner = THIS_MODULE,
+diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
+index 4681acc..abbb7cc 100644
+--- a/sound/soc/davinci/davinci-mcasp.h
++++ b/sound/soc/davinci/davinci-mcasp.h
+@@ -19,7 +19,7 @@
+ #define DAVINCI_MCASP_H
+
+ #include <linux/io.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+ #include "davinci-pcm.h"
+
+ #define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_96000
+@@ -38,6 +38,7 @@ enum {
+
+ struct davinci_audio_dev {
+ struct davinci_pcm_dma_params dma_params[2];
++ struct device *dev;
+ void __iomem *base;
+ int sample_rate;
+ struct clk *clk;
+@@ -54,6 +55,30 @@ struct davinci_audio_dev {
+ /* McASP FIFO related */
+ u8 txnumevt;
+ u8 rxnumevt;
++
++ /* backup related */
++ unsigned int *xrsrctl;
++ unsigned int pfunc;
++ unsigned int pdir;
++
++ unsigned int gblctlx;
++ unsigned int txmask;
++ unsigned int txfmt;
++ unsigned int txfmctl;
++ unsigned int aclkxctl;
++ unsigned int ahclkxctl;
++ unsigned int txtdm;
++ unsigned int wfifoctl;
++
++ unsigned int gblctlr;
++ unsigned int rxmask;
++ unsigned int rxfmt;
++ unsigned int rxfmctl;
++ unsigned int aclkrctl;
++ unsigned int ahclkrctl;
++ unsigned int rxtdm;
++ unsigned int rfifoctl;
++
+ };
+
+ #endif /* DAVINCI_MCASP_H */
+diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
+index d5fe08c..83d5312 100644
+--- a/sound/soc/davinci/davinci-pcm.c
++++ b/sound/soc/davinci/davinci-pcm.c
+@@ -265,16 +265,18 @@ static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
+ {
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ struct snd_dma_buffer *iram_dma = NULL;
+- dma_addr_t iram_phys = 0;
++ phys_addr_t iram_phys;
+ void *iram_virt = NULL;
+
+ if (buf->private_data || !size)
+ return 0;
+
+ ppcm->period_bytes_max = size;
+- iram_virt = sram_alloc(size, &iram_phys);
++ iram_virt = (void *)gen_pool_alloc(davinci_gen_pool, size);
+ if (!iram_virt)
+ goto exit1;
++ iram_phys = gen_pool_virt_to_phys(davinci_gen_pool,
++ (unsigned long)iram_virt);
+ iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
+ if (!iram_dma)
+ goto exit2;
+@@ -286,7 +288,7 @@ static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
+ return 0;
+ exit2:
+ if (iram_virt)
+- sram_free(iram_virt, size);
++ gen_pool_free(davinci_gen_pool, (unsigned long)iram_virt, size);
+ exit1:
+ return -ENOMEM;
+ }
+@@ -820,7 +822,8 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
+ buf->area = NULL;
+ iram_dma = buf->private_data;
+ if (iram_dma) {
+- sram_free(iram_dma->area, iram_dma->bytes);
++ gen_pool_free(davinci_gen_pool,
++ (unsigned long)iram_dma->area, iram_dma->bytes);
+ kfree(iram_dma);
+ }
+ }
+diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
+index c0d6c9b..2c3eadd 100644
+--- a/sound/soc/davinci/davinci-pcm.h
++++ b/sound/soc/davinci/davinci-pcm.h
+@@ -13,7 +13,7 @@
+ #define _DAVINCI_PCM_H
+
+ #include <mach/edma.h>
+-#include <mach/asp.h>
++#include <asm/hardware/asp.h>
+
+
+ struct davinci_pcm_dma_params {
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index adb372d..216e33a 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -230,18 +230,6 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
+ if (!cmp) {
+ he->period += period;
+ ++he->nr_events;
+-
+- /* If the map of an existing hist_entry has
+- * become out-of-date due to an exec() or
+- * similar, update it. Otherwise we will
+- * mis-adjust symbol addresses when computing
+- * the history counter to increment.
+- */
+- if (he->ms.map != entry->ms.map) {
+- he->ms.map = entry->ms.map;
+- if (he->ms.map)
+- he->ms.map->referenced = true;
+- }
+ goto out;
+ }
+