diff options
author | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2010-12-12 22:57:16 +0000 |
---|---|---|
committer | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2010-12-12 22:57:16 +0000 |
commit | 18a076fccdba90cd7bea1dd4cff1b6b0559b75f0 (patch) | |
tree | 24f859f8849a0d5180c1bef85c758ce70aa43c80 | |
parent | 8d8d887cb0eb75aed9192aadf2000308292548d1 (diff) |
[lantiq] adds new lantiq kernel. once the codebase is fully tested and know to be working on all the devices previously supported by ifxmips, we will drop ifxmips support.
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@24526 3c298f89-4303-0410-b956-a3cf2f4a3e73
52 files changed, 40307 insertions, 0 deletions
diff --git a/package/lqdsl-app/Makefile b/package/lqdsl-app/Makefile new file mode 100644 index 0000000000..18ce2a9a54 --- /dev/null +++ b/package/lqdsl-app/Makefile @@ -0,0 +1,81 @@ +# +# Copyright (C) 2009-2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +# ralph / blogic + +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/kernel.mk + +PKG_BASE_NAME:=dsl_cpe_control_danube +PKG_VERSION:=3.24.4.4 +PKG_RELEASE:=1 +PKG_SOURCE:=$(PKG_BASE_NAME)-$(PKG_VERSION).tar.gz +PKG_BUILD_DIR:=$(BUILD_DIR)/dsl_cpe_control-$(PKG_VERSION) +PKG_SOURCE_URL:=http://mirror2.openwrt.org/sources/ +PKG_MD5SUM:=ee315306626b68794d3d3636dabfe161 + +include $(INCLUDE_DIR)/package.mk + +define Package/lqdsl-app + SECTION:=net + CATEGORY:=Network + TITLE:=Lantiq DSL userland tool + URL:=http://www.lantiq.com/ + MAINTAINER:=Lantiq + DEPENDS:=@TARGET_lantiq_xway +kmod-lqdsl +libpthread +endef + +define Package/lqdsl-app/description + Infineon DSL CPE API for Amazon SE, Danube and Vinax. +endef + +IFX_DSL_MAX_DEVICE=1 +IFX_DSL_LINES_PER_DEVICE=1 +IFX_DSL_CHANNELS_PER_LINE=1 +#CONFIG_IFX_CLI=y + +CONFIGURE_ARGS += \ + --with-max-device="$(IFX_DSL_MAX_DEVICE)" \ + --with-lines-per-device="$(IFX_DSL_LINES_PER_DEVICE)" \ + --with-channels-per-line="$(IFX_DSL_CHANNELS_PER_LINE)" \ + --enable-danube \ + --enable-driver-include="-I$(STAGING_DIR)/usr/include" \ + --enable-debug-prints \ + --enable-add-appl-cflags="-DMAX_CLI_PIPES=2" \ + --enable-cmv-scripts \ + --enable-debug-tool-interface \ + --enable-adsl-led \ + --enable-dsl-ceoc \ + --enable-script-notification \ + --enable-dsl-pm \ + --enable-dsl-pm-total \ + --enable-dsl-pm-history \ + --enable-dsl-pm-showtime \ + --enable-dsl-pm-channel-counters \ + --enable-dsl-pm-datapath-counters \ + --enable-dsl-pm-line-counters \ + --enable-dsl-pm-channel-thresholds \ + --enable-dsl-pm-datapath-thresholds \ + --enable-dsl-pm-line-thresholds \ + --enable-dsl-pm-optional-parameters + +ifeq ($(CONFIG_IFX_CLI),y) +CONFIGURE_ARGS += \ + --enable-cli-support \ + --enable-soap-support +endif + +TARGET_CFLAGS += -I$(LINUX_DIR)/include + +define Package/lqdsl-app/install + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/ifx_cpe_control_init.sh $(1)/etc/init.d/ + + $(INSTALL_DIR) $(1)/sbin + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/dsl_cpe_control $(1)/sbin +endef + +$(eval $(call BuildPackage,lqdsl-app)) diff --git a/package/lqdsl-app/files/ifx_cpe_control_init.sh b/package/lqdsl-app/files/ifx_cpe_control_init.sh new file mode 100644 index 0000000000..91316938ce --- /dev/null +++ b/package/lqdsl-app/files/ifx_cpe_control_init.sh @@ -0,0 +1,21 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2008 OpenWrt.org +START=99 + +start() { + + # start CPE dsl daemon in the background + /sbin/dsl_cpe_control -i -f /lib/firmware/ModemHWE.bin & + +# PS=`ps` +# echo $PS | grep -q dsl_cpe_control && { +# # workaround for nfs: allow write to pipes for non-root +# while [ ! -e /tmp/pipe/dsl_cpe1_ack ] ; do sleep 1; done +# chmod a+w /tmp/pipe/dsl_* +# } + echo $PS | grep -q dsl_cpe_control || { + echo "Start of dsl_cpe_control failed!!!" + false + } + +} diff --git a/package/lqdsl/Config.in b/package/lqdsl/Config.in new file mode 100644 index 0000000000..fe503b8538 --- /dev/null +++ b/package/lqdsl/Config.in @@ -0,0 +1,25 @@ + +choice + prompt "Firmware" + default LANTIQ_ANNEX_B + depends on PACKAGE_kmod-lqdsl + help + This option controls which firmware is loaded + +config LANTIQ_ANNEX_A + bool "Annex-A" + help + Annex-A + +config LANTIQ_ANNEX_B + bool "Annex-B" + help + Annex-B + +endchoice + +config LANTIQ_DSL_DEBUG + bool "lqdsl debugging" + depends on PACKAGE_kmod-lqdsl + help + Say Y, if you need lantiq-dsl to display debug messages. diff --git a/package/lqdsl/Makefile b/package/lqdsl/Makefile new file mode 100644 index 0000000000..bfadd7150d --- /dev/null +++ b/package/lqdsl/Makefile @@ -0,0 +1,158 @@ +# +# Copyright (C) 2009-2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +# ralph / blogic + +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/kernel.mk + +PKG_NAME:=lqdsl +PKG_BASE_NAME:=drv_dsl_cpe_api_danube +PKG_VERSION:=3.24.4.4 +PKG_RELEASE:=2 +PKG_SOURCE:=$(PKG_BASE_NAME)-$(PKG_VERSION).tar.gz +PKG_BUILD_DIR:=$(KERNEL_BUILD_DIR)/drv_dsl_cpe_api-$(PKG_VERSION) +PKG_SOURCE_URL:=http://mirror2.openwrt.org/sources/ +PKG_MD5SUM:=c45bc531c1ed2ac80f68fb986b63bb87 + +ifeq ($(DUMP),) + STAMP_CONFIGURED:=$(strip $(STAMP_CONFIGURED))_$(shell grep '^CONFIG_LANTIQ_DSL_' $(TOPDIR)/.config | md5s) +endif + +FW_BASE_NAME:=dsl_danube_firmware_adsl +FW_A_VER:=02.04.04.00.00.01 +FW_B_VER:=02.04.01.07.00.02 +FW_A_FILE_VER:=244001 +FW_B_FILE_VER:=241702 +FW_A_MD5:=f717db3067a0049a26e233ab11238710 +FW_B_MD5:=349de7cd20368f4ac9b7e8322114a512 + +PATCH_DIR ?= ./patches$(if $(wildcard ./patches-$(KERNEL_PATCHVER)),-$(KERNEL_PATCHVER)) + +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/lqdsl + SECTION:=sys + CATEGORY:=Kernel modules + SUBMENU:=Network Devices + TITLE:=Lantiq dsl driver + URL:=http://www.lantiq.com/ + MAINTAINER:=Lantiq + DEPENDS:=@TARGET_lantiq_xway +kmod-atm + FILES:=$(PKG_BUILD_DIR)/src/mei/lantiq_mei.ko \ + $(PKG_BUILD_DIR)/src/drv_dsl_cpe_api.ko \ + $(PKG_BUILD_DIR)/src/mei/lantiq_atm.ko + AUTOLOAD:=$(call AutoLoad,50,lantiq_mei drv_dsl_cpe_api lantiq_atm) +endef + +define KernelPackage/lqdsl/description + Infineon DSL CPE API for Amazon SE, Danube and Vinax. + + This package contains the DSL CPE API driver for Amazon SE & Danube. + + Supported Devices: + - Amazon SE + - Danube +endef + +define KernelPackage/lqdsl/config + source "$(SOURCE)/Config.in" +endef + +define Download/annex-a + FILE:=$(FW_BASE_NAME)_a-$(FW_A_VER).tar.gz + URL:=http://mirror2.openwrt.org/sources/ + MD5SUM:=$(FW_A_MD5) +endef +$(eval $(call Download,annex-a)) + +define Download/annex-b + FILE:=$(FW_BASE_NAME)_b-$(FW_B_VER).tar.gz + URL:=http://mirror2.openwrt.org/sources/ + MD5SUM:=$(FW_B_MD5) +endef +$(eval $(call Download,annex-b)) + +IFX_DSL_MAX_DEVICE=1 +IFX_DSL_LINES_PER_DEVICE=1 +IFX_DSL_CHANNELS_PER_LINE=1 + +CONFIGURE_ARGS += --enable-kernel-include="$(LINUX_DIR)/include" \ + --with-max-device="$(IFX_DSL_MAX_DEVICE)" \ + --with-lines-per-device="$(IFX_DSL_LINES_PER_DEVICE)" \ + --with-channels-per-line="$(IFX_DSL_CHANNELS_PER_LINE)" \ + --enable-danube \ + --enable-add-drv-cflags="-DMODULE" \ + --disable-dsl-delt-static \ + --disable-adsl-led \ + --enable-dsl-ceoc \ + --enable-dsl-pm \ + --enable-dsl-pm-total \ + --enable-dsl-pm-history \ + --enable-dsl-pm-showtime \ + --enable-dsl-pm-channel-counters \ + --enable-dsl-pm-datapath-counters \ + --enable-dsl-pm-line-counters \ + --enable-dsl-pm-channel-thresholds \ + --enable-dsl-pm-datapath-thresholds \ + --enable-dsl-pm-line-thresholds \ + --enable-dsl-pm-optional-parameters \ + --enable-linux-26 \ + --enable-kernelbuild="$(LINUX_DIR)" \ + ARCH=$(LINUX_KARCH) + +EXTRA_CFLAGS = -fno-pic -mno-abicalls -mlong-calls -G 0 + +ifeq ($(CONFIG_LANTIQ_DSL_DEBUG),y) +CONFIGURE_ARGS += \ + --enable-debug=yes \ + --enable-debug-prints=yes +EXTRA_CFLAGS += -DDEBUG +endif + +define Build/Prepare + $(PKG_UNPACK) + $(INSTALL_DIR) $(PKG_BUILD_DIR)/src/mei/ + $(CP) ./src/* $(PKG_BUILD_DIR)/src/mei/ + $(Build/Patch) +ifeq ($(CONFIG_LANTIQ_DSL_FIRMWARE),) + $(TAR) -C $(PKG_BUILD_DIR) -xzf $(DL_DIR)/$(FW_BASE_NAME)_a-$(FW_A_VER).tar.gz + $(TAR) -C $(PKG_BUILD_DIR) -xzf $(DL_DIR)/$(FW_BASE_NAME)_b-$(FW_B_VER).tar.gz +endif +endef + +define Build/Configure + (cd $(PKG_BUILD_DIR); aclocal && autoconf && automake) + $(call Build/Configure/Default) +endef + +define Build/Compile + cd $(LINUX_DIR); \ + ARCH=mips CROSS_COMPILE="$(KERNEL_CROSS)" \ + $(MAKE) M=$(PKG_BUILD_DIR)/src/mei/ V=1 modules + $(call Build/Compile/Default) +endef + +define Build/InstallDev + $(INSTALL_DIR) $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_ioctl.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_adslmib.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_adslmib_ioctl.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_g997.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_types.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_pm.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_api_error.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_danube_ctx.h $(1)/usr/include + $(CP) $(PKG_BUILD_DIR)/src/include/drv_dsl_cpe_cmv_danube.h $(1)/usr/include +endef + +define KernelPackage/lqdsl/install + $(INSTALL_DIR) $(1)/lib/firmware/ + $(CP) $(PKG_BUILD_DIR)/$(FW_BASE_NAME)_$(if $(CONFIG_LANTIQ_ANNEX_A),a_$(FW_A_FILE_VER),b_$(FW_B_FILE_VER)).bin $(1)/lib/firmware/ModemHWE.bin +endef + +$(eval $(call KernelPackage,lqdsl)) diff --git a/package/lqdsl/patches/100-dsl_compat.patch b/package/lqdsl/patches/100-dsl_compat.patch new file mode 100644 index 0000000000..334ac6736e --- /dev/null +++ b/package/lqdsl/patches/100-dsl_compat.patch @@ -0,0 +1,62 @@ +--- a/src/include/drv_dsl_cpe_device_danube.h ++++ b/src/include/drv_dsl_cpe_device_danube.h +@@ -24,7 +24,7 @@ + #include "drv_dsl_cpe_simulator_danube.h" + #else + /* Include for the low level driver interface header file */ +-#include "asm/ifx/ifx_mei_bsp.h" ++#include "mei/ifxmips_mei_interface.h" + #endif /* defined(DSL_CPE_SIMULATOR_DRIVER) && defined(WIN32)*/ + + #define DSL_MAX_LINE_NUMBER 1 +--- a/src/common/drv_dsl_cpe_os_linux.c ++++ b/src/common/drv_dsl_cpe_os_linux.c +@@ -11,6 +11,7 @@ + #ifdef __LINUX__ + + #define DSL_INTERN ++#include <linux/device.h> + + #include "drv_dsl_cpe_api.h" + #include "drv_dsl_cpe_api_ioctl.h" +@@ -1058,6 +1059,7 @@ static void DSL_DRV_DebugInit(void) + /* Entry point of driver */ + int __init DSL_ModuleInit(void) + { ++ struct class *dsl_class; + DSL_int_t i; + + printk(DSL_DRV_CRLF DSL_DRV_CRLF "Infineon CPE API Driver version: %s" DSL_DRV_CRLF, +@@ -1104,7 +1106,8 @@ int __init DSL_ModuleInit(void) + } + + DSL_DRV_DevNodeInit(); +- ++ dsl_class = class_create(THIS_MODULE, "dsl_cpe_api"); ++ device_create(dsl_class, NULL, MKDEV(DRV_DSL_CPE_API_DEV_MAJOR, 0), NULL, "dsl_cpe_api"); + return 0; + } + +--- a/src/include/drv_dsl_cpe_os_linux.h ++++ b/src/include/drv_dsl_cpe_os_linux.h +@@ -17,17 +17,17 @@ + #endif + + #include <asm/ioctl.h> +-#include <linux/autoconf.h> ++#include <generated/autoconf.h> + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/init.h> + #include <linux/ctype.h> + #include <linux/version.h> + #include <linux/spinlock.h> +- ++#include <linux/sched.h> + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)) +- #include <linux/utsrelease.h> ++ #include <generated/utsrelease.h> + #endif + + #include <linux/types.h> diff --git a/package/lqdsl/patches/500-portability.patch b/package/lqdsl/patches/500-portability.patch new file mode 100644 index 0000000000..48e9d173d4 --- /dev/null +++ b/package/lqdsl/patches/500-portability.patch @@ -0,0 +1,218 @@ +Index: drv_dsl_cpe_api-3.24.4.4/configure.in +=================================================================== +--- drv_dsl_cpe_api-3.24.4.4.orig/configure.in 2009-08-13 13:39:21.000000000 +0200 ++++ drv_dsl_cpe_api-3.24.4.4/configure.in 2010-10-14 02:14:55.000000000 +0200 +@@ -310,7 +310,7 @@ + AC_ARG_ENABLE(kernelbuild, + AC_HELP_STRING(--enable-kernel-build=x,Set the target kernel build path), + [ +- if test -e $enableval/include/linux/autoconf.h; then ++ if test -e $enableval/include/linux/autoconf.h -o -e $enableval/include/generated/autoconf.h; then + AC_SUBST([KERNEL_BUILD_PATH],[$enableval]) + else + AC_MSG_ERROR([The kernel build directory is not valid or not configured!]) +@@ -333,12 +333,12 @@ + echo Set the lib_ifxos include path $enableval + AC_SUBST([IFXOS_INCLUDE_PATH],[$enableval]) + else +- echo -e Set the default lib_ifxos include path $DEFAULT_IFXOS_INCLUDE_PATH ++ echo Set the default lib_ifxos include path $DEFAULT_IFXOS_INCLUDE_PATH + AC_SUBST([IFXOS_INCLUDE_PATH],[$DEFAULT_IFXOS_INCLUDE_PATH]) + fi + ], + [ +- echo -e Set the default lib_ifxos include path $DEFAULT_IFXOS_INCLUDE_PATH ++ echo Set the default lib_ifxos include path $DEFAULT_IFXOS_INCLUDE_PATH + AC_SUBST([IFXOS_INCLUDE_PATH],[$DEFAULT_IFXOS_INCLUDE_PATH]) + ] + ) +@@ -1702,73 +1702,73 @@ + AC_SUBST([DISTCHECK_CONFIGURE_PARAMS],[$CONFIGURE_OPTIONS]) + + AC_CONFIG_COMMANDS_PRE([ +-echo -e "------------------------------------------------------------------------" +-echo -e " Configuration for drv_dsl_cpe_api:" +-echo -e " Configure model type: $DSL_CONFIG_MODEL_TYPE" +-echo -e " Source code location: $srcdir" +-echo -e " Compiler: $CC" +-echo -e " Compiler c-flags: $CFLAGS" +-echo -e " Extra compiler c-flags: $EXTRA_DRV_CFLAGS" +-echo -e " Host System Type: $host" +-echo -e " Install path: $prefix" +-echo -e " Linux kernel include path: $KERNEL_INCL_PATH" +-echo -e " Linux kernel build path: $KERNEL_BUILD_PATH" +-echo -e " Linux kernel architecture: $KERNEL_ARCH" +-echo -e " Include IFXOS: $INCLUDE_DSL_CPE_API_IFXOS_SUPPORT" +-echo -e " IFXOS include path: $IFXOS_INCLUDE_PATH" +-echo -e " Driver Include Path $DSL_DRIVER_INCL_PATH" +-echo -e " DSL device: $DSL_DEVICE_NAME" +-echo -e " Max device number: $DSL_DRV_MAX_DEVICE_NUMBER" +-echo -e " Channels per line: $DSL_CHANNELS_PER_LINE" +-echo -e " Build lib (only for kernel 2.6) $DSL_CPE_API_LIBRARY_BUILD_2_6" +-echo -e " DSL data led flash frequency: $DSL_DATA_LED_FLASH_FREQUENCY Hz" +-echo -e " Disable debug prints: $DSL_DEBUG_DISABLE" +-echo -e " Preselection of max. debug level: $DSL_DBG_MAX_LEVEL_SET" +-echo -e " Preselected max. debug level: $DSL_DBG_MAX_LEVEL_PRE" +-echo -e " Include deprecated functions: $INCLUDE_DEPRECATED" +-echo -e " Include Device Exception Codes: $INCLUDE_DEVICE_EXCEPTION_CODES" +-echo -e " Include FW request support: $INCLUDE_FW_REQUEST_SUPPORT" +-echo -e " Include ADSL trace buffer: $INCLUDE_DSL_CPE_TRACE_BUFFER" +-echo -e " Include ADSL MIB: $INCLUDE_DSL_ADSL_MIB" +-echo -e " Include ADSL LED: $INCLUDE_ADSL_LED" +-echo -e " Include CEOC: $INCLUDE_DSL_CEOC" +-echo -e " Include config get support: $INCLUDE_DSL_CONFIG_GET" +-echo -e " Include System i/f configuration: $INCLUDE_DSL_SYSTEM_INTERFACE" +-echo -e " Include Resource Statistics: $INCLUDE_DSL_RESOURCE_STATISTICS" +-echo -e " Include Framing Parameters: $INCLUDE_DSL_FRAMING_PARAMETERS" +-echo -e " Include G997 Line Inventory: $INCLUDE_DSL_G997_LINE_INVENTORY" +-echo -e " Include G997 Framing Parameters: $INCLUDE_DSL_G997_FRAMING_PARAMETERS" +-echo -e " Include G997 per tone data: $INCLUDE_DSL_G997_PER_TONE" +-echo -e " Include G997 status: $INCLUDE_DSL_G997_STATUS" +-echo -e " Include G997 alarm: $INCLUDE_DSL_G997_ALARM" +-echo -e " Include DSL Bonding: $INCLUDE_DSL_BONDING" +-echo -e " Include Misc Line Status $INCLUDE_DSL_CPE_MISC_LINE_STATUS" +-echo -e " Include DELT: $INCLUDE_DSL_DELT" +-echo -e " Include DELT data static storage: $DSL_CPE_STATIC_DELT_DATA" +-echo -e " Include PM: $INCLUDE_DSL_PM" +-echo -e " Include PM config: $INCLUDE_DSL_CPE_PM_CONFIG" +-echo -e " Include PM total: $INCLUDE_DSL_CPE_PM_TOTAL_COUNTERS" +-echo -e " Include PM history: $INCLUDE_DSL_CPE_PM_HISTORY" +-echo -e " Include PM showtime: $INCLUDE_DSL_CPE_PM_SHOWTIME_COUNTERS" +-echo -e " Include PM optional: $INCLUDE_DSL_CPE_PM_OPTIONAL_PARAMETERS" +-echo -e " Include PM line: $INCLUDE_DSL_CPE_PM_LINE_COUNTERS" +-echo -e " Include PM line event showtime: $INCLUDE_DSL_CPE_PM_LINE_EVENT_SHOWTIME_COUNTERS" +-echo -e " Include PM channel: $INCLUDE_DSL_CPE_PM_CHANNEL_COUNTERS" +-echo -e " Include PM channel extended: $INCLUDE_DSL_CPE_PM_CHANNEL_EXT_COUNTERS" +-echo -e " Include PM data path: $INCLUDE_DSL_CPE_PM_DATA_PATH_COUNTERS" +-echo -e " Include PM data path failure: $INCLUDE_DSL_CPE_PM_DATA_PATH_FAILURE_COUNTERS" +-echo -e " Include PM ReTx: $INCLUDE_DSL_CPE_PM_RETX_COUNTERS" +-echo -e " Include PM line threshold: $INCLUDE_DSL_CPE_PM_LINE_THRESHOLDS" +-echo -e " Include PM channel threshold: $INCLUDE_DSL_CPE_PM_CHANNEL_THRESHOLDS" +-echo -e " Include PM data path threshold: $INCLUDE_DSL_CPE_PM_DATA_PATH_THRESHOLDS" +-echo -e " Include PM ReTx threshold: $INCLUDE_DSL_CPE_PM_RETX_THRESHOLDS" +-echo -e " Include FW memory free support: $INCLUDE_DSL_FIRMWARE_MEMORY_FREE" +-echo -e "----------------------- deprectated ! ----------------------------------" +-echo -e " Include PM line failure: $INCLUDE_DSL_CPE_PM_LINE_FAILURE_COUNTERS" +-echo -e "" +-echo -e " Settings:" +-echo -e " Configure options: $CONFIGURE_OPTIONS" +-echo -e "------------------------------------------------------------------------" ++echo "------------------------------------------------------------------------" ++echo " Configuration for drv_dsl_cpe_api:" ++echo " Configure model type: $DSL_CONFIG_MODEL_TYPE" ++echo " Source code location: $srcdir" ++echo " Compiler: $CC" ++echo " Compiler c-flags: $CFLAGS" ++echo " Extra compiler c-flags: $EXTRA_DRV_CFLAGS" ++echo " Host System Type: $host" ++echo " Install path: $prefix" ++echo " Linux kernel include path: $KERNEL_INCL_PATH" ++echo " Linux kernel build path: $KERNEL_BUILD_PATH" ++echo " Linux kernel architecture: $KERNEL_ARCH" ++echo " Include IFXOS: $INCLUDE_DSL_CPE_API_IFXOS_SUPPORT" ++echo " IFXOS include path: $IFXOS_INCLUDE_PATH" ++echo " Driver Include Path $DSL_DRIVER_INCL_PATH" ++echo " DSL device: $DSL_DEVICE_NAME" ++echo " Max device number: $DSL_DRV_MAX_DEVICE_NUMBER" ++echo " Channels per line: $DSL_CHANNELS_PER_LINE" ++echo " Build lib (only for kernel 2.6) $DSL_CPE_API_LIBRARY_BUILD_2_6" ++echo " DSL data led flash frequency: $DSL_DATA_LED_FLASH_FREQUENCY Hz" ++echo " Disable debug prints: $DSL_DEBUG_DISABLE" ++echo " Preselection of max. debug level: $DSL_DBG_MAX_LEVEL_SET" ++echo " Preselected max. debug level: $DSL_DBG_MAX_LEVEL_PRE" ++echo " Include deprecated functions: $INCLUDE_DEPRECATED" ++echo " Include Device Exception Codes: $INCLUDE_DEVICE_EXCEPTION_CODES" ++echo " Include FW request support: $INCLUDE_FW_REQUEST_SUPPORT" ++echo " Include ADSL trace buffer: $INCLUDE_DSL_CPE_TRACE_BUFFER" ++echo " Include ADSL MIB: $INCLUDE_DSL_ADSL_MIB" ++echo " Include ADSL LED: $INCLUDE_ADSL_LED" ++echo " Include CEOC: $INCLUDE_DSL_CEOC" ++echo " Include config get support: $INCLUDE_DSL_CONFIG_GET" ++echo " Include System i/f configuration: $INCLUDE_DSL_SYSTEM_INTERFACE" ++echo " Include Resource Statistics: $INCLUDE_DSL_RESOURCE_STATISTICS" ++echo " Include Framing Parameters: $INCLUDE_DSL_FRAMING_PARAMETERS" ++echo " Include G997 Line Inventory: $INCLUDE_DSL_G997_LINE_INVENTORY" ++echo " Include G997 Framing Parameters: $INCLUDE_DSL_G997_FRAMING_PARAMETERS" ++echo " Include G997 per tone data: $INCLUDE_DSL_G997_PER_TONE" ++echo " Include G997 status: $INCLUDE_DSL_G997_STATUS" ++echo " Include G997 alarm: $INCLUDE_DSL_G997_ALARM" ++echo " Include DSL Bonding: $INCLUDE_DSL_BONDING" ++echo " Include Misc Line Status $INCLUDE_DSL_CPE_MISC_LINE_STATUS" ++echo " Include DELT: $INCLUDE_DSL_DELT" ++echo " Include DELT data static storage: $DSL_CPE_STATIC_DELT_DATA" ++echo " Include PM: $INCLUDE_DSL_PM" ++echo " Include PM config: $INCLUDE_DSL_CPE_PM_CONFIG" ++echo " Include PM total: $INCLUDE_DSL_CPE_PM_TOTAL_COUNTERS" ++echo " Include PM history: $INCLUDE_DSL_CPE_PM_HISTORY" ++echo " Include PM showtime: $INCLUDE_DSL_CPE_PM_SHOWTIME_COUNTERS" ++echo " Include PM optional: $INCLUDE_DSL_CPE_PM_OPTIONAL_PARAMETERS" ++echo " Include PM line: $INCLUDE_DSL_CPE_PM_LINE_COUNTERS" ++echo " Include PM line event showtime: $INCLUDE_DSL_CPE_PM_LINE_EVENT_SHOWTIME_COUNTERS" ++echo " Include PM channel: $INCLUDE_DSL_CPE_PM_CHANNEL_COUNTERS" ++echo " Include PM channel extended: $INCLUDE_DSL_CPE_PM_CHANNEL_EXT_COUNTERS" ++echo " Include PM data path: $INCLUDE_DSL_CPE_PM_DATA_PATH_COUNTERS" ++echo " Include PM data path failure: $INCLUDE_DSL_CPE_PM_DATA_PATH_FAILURE_COUNTERS" ++echo " Include PM ReTx: $INCLUDE_DSL_CPE_PM_RETX_COUNTERS" ++echo " Include PM line threshold: $INCLUDE_DSL_CPE_PM_LINE_THRESHOLDS" ++echo " Include PM channel threshold: $INCLUDE_DSL_CPE_PM_CHANNEL_THRESHOLDS" ++echo " Include PM data path threshold: $INCLUDE_DSL_CPE_PM_DATA_PATH_THRESHOLDS" ++echo " Include PM ReTx threshold: $INCLUDE_DSL_CPE_PM_RETX_THRESHOLDS" ++echo " Include FW memory free support: $INCLUDE_DSL_FIRMWARE_MEMORY_FREE" ++echo "----------------------- deprectated ! ----------------------------------" ++echo " Include PM line failure: $INCLUDE_DSL_CPE_PM_LINE_FAILURE_COUNTERS" ++echo "" ++echo " Settings:" ++echo " Configure options: $CONFIGURE_OPTIONS" ++echo "------------------------------------------------------------------------" + ]) + + AC_CONFIG_FILES([Makefile src/Makefile]) +Index: drv_dsl_cpe_api-3.24.4.4/src/Makefile.am +=================================================================== +--- drv_dsl_cpe_api-3.24.4.4.orig/src/Makefile.am 2009-07-03 14:06:34.000000000 +0200 ++++ drv_dsl_cpe_api-3.24.4.4/src/Makefile.am 2010-10-14 02:14:55.000000000 +0200 +@@ -303,7 +303,7 @@ + drv_dsl_cpe_api_OBJS = "$(subst .c,.o,$(filter %.c,$(drv_dsl_cpe_api_SOURCES)))" + + drv_dsl_cpe_api.ko: $(drv_dsl_cpe_api_SOURCES) +- @echo -e "drv_dsl_cpe_api: Making Linux 2.6.x kernel object" ++ @echo "drv_dsl_cpe_api: Making Linux 2.6.x kernel object" + if test ! -e common/drv_dsl_cpe_api.c ; then \ + echo "copy source files (as links only!)"; \ + for f in $(filter %.c,$(drv_dsl_cpe_api_SOURCES)); do \ +@@ -311,10 +311,10 @@ + cp -s $(addprefix @abs_srcdir@/,$$f) $(PWD)/`dirname $$f`/ ; \ + done \ + fi +- @echo -e "# drv_dsl_cpe_api: Generated to build Linux 2.6.x kernel object" > $(PWD)/Kbuild +- @echo -e "obj-m := $(subst .ko,.o,$@)" >> $(PWD)/Kbuild +- @echo -e "$(subst .ko,,$@)-y := $(drv_dsl_cpe_api_OBJS)" >> $(PWD)/Kbuild +- @echo -e "EXTRA_CFLAGS := $(CFLAGS) -DHAVE_CONFIG_H $(drv_dsl_cpe_api_CFLAGS) $(DSL_DRIVER_INCL_PATH) $(IFXOS_INCLUDE_PATH) -I@abs_srcdir@/include -I$(PWD)/include" >> $(PWD)/Kbuild ++ @echo "# drv_dsl_cpe_api: Generated to build Linux 2.6.x kernel object" > $(PWD)/Kbuild ++ @echo "obj-m := $(subst .ko,.o,$@)" >> $(PWD)/Kbuild ++ @echo "$(subst .ko,,$@)-y := $(drv_dsl_cpe_api_OBJS)" >> $(PWD)/Kbuild ++ @echo "EXTRA_CFLAGS := $(CFLAGS) -DHAVE_CONFIG_H $(drv_dsl_cpe_api_CFLAGS) $(DSL_DRIVER_INCL_PATH) $(IFXOS_INCLUDE_PATH) -I@abs_srcdir@/include -I$(PWD)/include" >> $(PWD)/Kbuild + $(MAKE) ARCH=@KERNEL_ARCH@ -C @KERNEL_BUILD_PATH@ O=@KERNEL_BUILD_PATH@ M=$(PWD) modules + + clean-generic: +Index: drv_dsl_cpe_api-3.24.4.4/src/include/drv_dsl_cpe_os_linux.h +=================================================================== +--- drv_dsl_cpe_api-3.24.4.4.orig/src/include/drv_dsl_cpe_os_linux.h 2010-10-14 02:14:55.000000000 +0200 ++++ drv_dsl_cpe_api-3.24.4.4/src/include/drv_dsl_cpe_os_linux.h 2010-10-14 02:14:55.000000000 +0200 +@@ -16,8 +16,6 @@ + extern "C" { + #endif + +-#include <asm/ioctl.h> +-#include <generated/autoconf.h> + #include <linux/module.h> + #include <linux/kernel.h> + #include <linux/init.h> +@@ -40,6 +38,7 @@ + #include <linux/poll.h> + #include <asm/uaccess.h> + #include <linux/smp_lock.h> ++#include <asm/ioctl.h> + + #ifdef INCLUDE_DSL_CPE_API_IFXOS_SUPPORT + /** IFXOS includes*/ diff --git a/package/lqdsl/src/Makefile b/package/lqdsl/src/Makefile new file mode 100644 index 0000000000..89c4e712d7 --- /dev/null +++ b/package/lqdsl/src/Makefile @@ -0,0 +1,3 @@ +obj-m = lantiq_mei.o lantiq_atm.o + +lantiq_atm-objs := ifxmips_atm_core.o ifxmips_atm_danube.o diff --git a/package/lqdsl/src/ifx_atm.h b/package/lqdsl/src/ifx_atm.h new file mode 100644 index 0000000000..32deca54e0 --- /dev/null +++ b/package/lqdsl/src/ifx_atm.h @@ -0,0 +1,108 @@ +/****************************************************************************** +** +** FILE NAME : ifx_atm.h +** PROJECT : UEIP +** MODULES : ATM +** +** DATE : 17 Jun 2009 +** AUTHOR : Xu Liang +** DESCRIPTION : Global ATM driver header file +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 07 JUL 2009 Xu Liang Init Version +*******************************************************************************/ + +#ifndef IFX_ATM_H +#define IFX_ATM_H + + + +/* + * ATM MIB + */ + +typedef struct { + __u32 ifHCInOctets_h; /*!< byte counter of ingress cells (upper 32 bits, total 64 bits) */ + __u32 ifHCInOctets_l; /*!< byte counter of ingress cells (lower 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_h; /*!< byte counter of egress cells (upper 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_l; /*!< byte counter of egress cells (lower 32 bits, total 64 bits) */ + __u32 ifInErrors; /*!< counter of error ingress cells */ + __u32 ifInUnknownProtos; /*!< counter of unknown ingress cells */ + __u32 ifOutErrors; /*!< counter of error egress cells */ +} atm_cell_ifEntry_t; + +typedef struct { + __u32 ifHCInOctets_h; /*!< byte counter of ingress packets (upper 32 bits, total 64 bits) */ + __u32 ifHCInOctets_l; /*!< byte counter of ingress packets (lower 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_h; /*!< byte counter of egress packets (upper 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_l; /*!< byte counter of egress packets (lower 32 bits, total 64 bits) */ + __u32 ifInUcastPkts; /*!< counter of ingress packets */ + __u32 ifOutUcastPkts; /*!< counter of egress packets */ + __u32 ifInErrors; /*!< counter of error ingress packets */ + __u32 ifInDiscards; /*!< counter of dropped ingress packets */ + __u32 ifOutErros; /*!< counter of error egress packets */ + __u32 ifOutDiscards; /*!< counter of dropped egress packets */ +} atm_aal5_ifEntry_t; + +typedef struct { + __u32 aal5VccCrcErrors; /*!< counter of ingress packets with CRC error */ + __u32 aal5VccSarTimeOuts; /*!< counter of ingress packets with Re-assemble timeout */ //no timer support yet + __u32 aal5VccOverSizedSDUs; /*!< counter of oversized ingress packets */ +} atm_aal5_vcc_t; + +typedef struct { + int vpi; /*!< VPI of the VCC to get MIB counters */ + int vci; /*!< VCI of the VCC to get MIB counters */ + atm_aal5_vcc_t mib_vcc; /*!< structure to get MIB counters */ +} atm_aal5_vcc_x_t; + +/*! + \brief ATM IOCTL Magic Number + */ +#define PPE_ATM_IOC_MAGIC 'o' +/*! + \brief ATM IOCTL Command - Get Cell Level MIB Counters + + This command is obsolete. User can get cell level MIB from DSL API. + This command uses structure "atm_cell_ifEntry_t" as parameter for output of MIB counters. + */ +#define PPE_ATM_MIB_CELL _IOW(PPE_ATM_IOC_MAGIC, 0, atm_cell_ifEntry_t) +/*! + \brief ATM IOCTL Command - Get AAL5 Level MIB Counters + + Get AAL5 packet counters. + This command uses structure "atm_aal5_ifEntry_t" as parameter for output of MIB counters. + */ +#define PPE_ATM_MIB_AAL5 _IOW(PPE_ATM_IOC_MAGIC, 1, atm_aal5_ifEntry_t) +/*! + \brief ATM IOCTL Command - Get Per PVC MIB Counters + + Get AAL5 packet counters for each PVC. + This command uses structure "atm_aal5_vcc_x_t" as parameter for input of VPI/VCI information and output of MIB counters. + */ +#define PPE_ATM_MIB_VCC _IOWR(PPE_ATM_IOC_MAGIC, 2, atm_aal5_vcc_x_t) +/*! + \brief Total Number of ATM IOCTL Commands + */ +#define PPE_ATM_IOC_MAXNR 3 + +/*@}*/ + + + +#ifdef __KERNEL__ +struct port_cell_info { + unsigned int port_num; + unsigned int tx_link_rate[2]; +}; +#endif +#endif diff --git a/package/lqdsl/src/ifxmips_atm.h b/package/lqdsl/src/ifxmips_atm.h new file mode 100644 index 0000000000..ed90b5d4d7 --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm.h @@ -0,0 +1,172 @@ +/****************************************************************************** +** +** FILE NAME : ifx_atm.h +** PROJECT : UEIP +** MODULES : ATM +** +** DATE : 17 Jun 2009 +** AUTHOR : Xu Liang +** DESCRIPTION : Global ATM driver header file +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 07 JUL 2009 Xu Liang Init Version +*******************************************************************************/ + +#ifndef IFX_ATM_H +#define IFX_ATM_H + + + +/*! + \defgroup IFX_ATM UEIP Project - ATM driver module + \brief UEIP Project - ATM driver module, support Danube, Amazon-SE, AR9, VR9. + */ + +/*! + \defgroup IFX_ATM_IOCTL IOCTL Commands + \ingroup IFX_ATM + \brief IOCTL Commands used by user application. + */ + +/*! + \defgroup IFX_ATM_STRUCT Structures + \ingroup IFX_ATM + \brief Structures used by user application. + */ + +/*! + \file ifx_atm.h + \ingroup IFX_ATM + \brief ATM driver header file + */ + + + +/* + * #################################### + * Definition + * #################################### + */ + +/*! + \addtogroup IFX_ATM_STRUCT + */ +/*@{*/ + +/* + * ATM MIB + */ + +typedef struct { + __u32 ifHCInOctets_h; /*!< byte counter of ingress cells (upper 32 bits, total 64 bits) */ + __u32 ifHCInOctets_l; /*!< byte counter of ingress cells (lower 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_h; /*!< byte counter of egress cells (upper 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_l; /*!< byte counter of egress cells (lower 32 bits, total 64 bits) */ + __u32 ifInErrors; /*!< counter of error ingress cells */ + __u32 ifInUnknownProtos; /*!< counter of unknown ingress cells */ + __u32 ifOutErrors; /*!< counter of error egress cells */ +} atm_cell_ifEntry_t; + +typedef struct { + __u32 ifHCInOctets_h; /*!< byte counter of ingress packets (upper 32 bits, total 64 bits) */ + __u32 ifHCInOctets_l; /*!< byte counter of ingress packets (lower 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_h; /*!< byte counter of egress packets (upper 32 bits, total 64 bits) */ + __u32 ifHCOutOctets_l; /*!< byte counter of egress packets (lower 32 bits, total 64 bits) */ + __u32 ifInUcastPkts; /*!< counter of ingress packets */ + __u32 ifOutUcastPkts; /*!< counter of egress packets */ + __u32 ifInErrors; /*!< counter of error ingress packets */ + __u32 ifInDiscards; /*!< counter of dropped ingress packets */ + __u32 ifOutErros; /*!< counter of error egress packets */ + __u32 ifOutDiscards; /*!< counter of dropped egress packets */ +} atm_aal5_ifEntry_t; + +typedef struct { + __u32 aal5VccCrcErrors; /*!< counter of ingress packets with CRC error */ + __u32 aal5VccSarTimeOuts; /*!< counter of ingress packets with Re-assemble timeout */ //no timer support yet + __u32 aal5VccOverSizedSDUs; /*!< counter of oversized ingress packets */ +} atm_aal5_vcc_t; + +typedef struct { + int vpi; /*!< VPI of the VCC to get MIB counters */ + int vci; /*!< VCI of the VCC to get MIB counters */ + atm_aal5_vcc_t mib_vcc; /*!< structure to get MIB counters */ +} atm_aal5_vcc_x_t; + +/*@}*/ + + + +/* + * #################################### + * IOCTL + * #################################### + */ + +/*! + \addtogroup IFX_ATM_IOCTL + */ +/*@{*/ + +/* + * ioctl Command + */ +/*! + \brief ATM IOCTL Magic Number + */ +#define PPE_ATM_IOC_MAGIC 'o' +/*! + \brief ATM IOCTL Command - Get Cell Level MIB Counters + + This command is obsolete. User can get cell level MIB from DSL API. + This command uses structure "atm_cell_ifEntry_t" as parameter for output of MIB counters. + */ +#define PPE_ATM_MIB_CELL _IOW(PPE_ATM_IOC_MAGIC, 0, atm_cell_ifEntry_t) +/*! + \brief ATM IOCTL Command - Get AAL5 Level MIB Counters + + Get AAL5 packet counters. + This command uses structure "atm_aal5_ifEntry_t" as parameter for output of MIB counters. + */ +#define PPE_ATM_MIB_AAL5 _IOW(PPE_ATM_IOC_MAGIC, 1, atm_aal5_ifEntry_t) +/*! + \brief ATM IOCTL Command - Get Per PVC MIB Counters + + Get AAL5 packet counters for each PVC. + This command uses structure "atm_aal5_vcc_x_t" as parameter for input of VPI/VCI information and output of MIB counters. + */ +#define PPE_ATM_MIB_VCC _IOWR(PPE_ATM_IOC_MAGIC, 2, atm_aal5_vcc_x_t) +/*! + \brief Total Number of ATM IOCTL Commands + */ +#define PPE_ATM_IOC_MAXNR 3 + +/*@}*/ + + + +/* + * #################################### + * API + * #################################### + */ + +#ifdef __KERNEL__ +struct port_cell_info { + unsigned int port_num; + unsigned int tx_link_rate[2]; +}; +#endif + + + +#endif // IFX_ATM_H + diff --git a/package/lqdsl/src/ifxmips_atm_core.c b/package/lqdsl/src/ifxmips_atm_core.c new file mode 100644 index 0000000000..ae85d70650 --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_core.c @@ -0,0 +1,2510 @@ +/****************************************************************************** +** +** FILE NAME : ifxmips_atm_core.c +** PROJECT : UEIP +** MODULES : ATM +** +** DATE : 7 Jul 2009 +** AUTHOR : Xu Liang +** DESCRIPTION : ATM driver common source file (core functions) +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 07 JUL 2009 Xu Liang Init Version +*******************************************************************************/ + + + +/* + * #################################### + * Version No. + * #################################### + */ + +#define IFX_ATM_VER_MAJOR 1 +#define IFX_ATM_VER_MID 0 +#define IFX_ATM_VER_MINOR 8 + + + +/* + * #################################### + * Head File + * #################################### + */ + +/* + * Common Head File + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/atmdev.h> +#include <linux/atm.h> +#include <linux/clk.h> + +/* + * Chip Specific Head File + */ +#include <lantiq.h> +#include <lantiq_regs.h> +#include "ifxmips_atm_core.h" + + + +/* + * #################################### + * Kernel Version Adaption + * #################################### + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) + #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0) + #define MODULE_PARM(a, b) module_param(a, int, 0) +#else + #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b) +#endif + + + +/*! + \addtogroup IFXMIPS_ATM_MODULE_PARAMS + */ +/*@{*/ +/* + * #################################### + * Parameters to Configure PPE + * #################################### + */ +/*! + \brief QSB cell delay variation due to concurrency + */ +static int qsb_tau = 1; /* QSB cell delay variation due to concurrency */ +/*! + \brief QSB scheduler burst length + */ +static int qsb_srvm = 0x0F; /* QSB scheduler burst length */ +/*! + \brief QSB time step, all legal values are 1, 2, 4 + */ +static int qsb_tstep = 4 ; /* QSB time step, all legal values are 1, 2, 4 */ + +/*! + \brief Write descriptor delay + */ +static int write_descriptor_delay = 0x20; /* Write descriptor delay */ + +/*! + \brief AAL5 padding byte ('~') + */ +static int aal5_fill_pattern = 0x007E; /* AAL5 padding byte ('~') */ +/*! + \brief Max frame size for RX + */ +static int aal5r_max_packet_size = 0x0700; /* Max frame size for RX */ +/*! + \brief Min frame size for RX + */ +static int aal5r_min_packet_size = 0x0000; /* Min frame size for RX */ +/*! + \brief Max frame size for TX + */ +static int aal5s_max_packet_size = 0x0700; /* Max frame size for TX */ +/*! + \brief Min frame size for TX + */ +static int aal5s_min_packet_size = 0x0000; /* Min frame size for TX */ +/*! + \brief Drop error packet in RX path + */ +static int aal5r_drop_error_packet = 1; /* Drop error packet in RX path */ + +/*! + \brief Number of descriptors per DMA RX channel + */ +static int dma_rx_descriptor_length = 128; /* Number of descriptors per DMA RX channel */ +/*! + \brief Number of descriptors per DMA TX channel + */ +static int dma_tx_descriptor_length = 64; /* Number of descriptors per DMA TX channel */ +/*! + \brief PPE core clock cycles between descriptor write and effectiveness in external RAM + */ +static int dma_rx_clp1_descriptor_threshold = 38; +/*@}*/ + +MODULE_PARM(qsb_tau, "i"); +MODULE_PARM_DESC(qsb_tau, "Cell delay variation. Value must be > 0"); +MODULE_PARM(qsb_srvm, "i"); +MODULE_PARM_DESC(qsb_srvm, "Maximum burst size"); +MODULE_PARM(qsb_tstep, "i"); +MODULE_PARM_DESC(qsb_tstep, "n*32 cycles per sbs cycles n=1,2,4"); + +MODULE_PARM(write_descriptor_delay, "i"); +MODULE_PARM_DESC(write_descriptor_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM"); + +MODULE_PARM(aal5_fill_pattern, "i"); +MODULE_PARM_DESC(aal5_fill_pattern, "Filling pattern (PAD) for AAL5 frames"); +MODULE_PARM(aal5r_max_packet_size, "i"); +MODULE_PARM_DESC(aal5r_max_packet_size, "Max packet size in byte for downstream AAL5 frames"); +MODULE_PARM(aal5r_min_packet_size, "i"); +MODULE_PARM_DESC(aal5r_min_packet_size, "Min packet size in byte for downstream AAL5 frames"); +MODULE_PARM(aal5s_max_packet_size, "i"); +MODULE_PARM_DESC(aal5s_max_packet_size, "Max packet size in byte for upstream AAL5 frames"); +MODULE_PARM(aal5s_min_packet_size, "i"); +MODULE_PARM_DESC(aal5s_min_packet_size, "Min packet size in byte for upstream AAL5 frames"); +MODULE_PARM(aal5r_drop_error_packet, "i"); +MODULE_PARM_DESC(aal5r_drop_error_packet, "Non-zero value to drop error packet for downstream"); + +MODULE_PARM(dma_rx_descriptor_length, "i"); +MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)"); +MODULE_PARM(dma_tx_descriptor_length, "i"); +MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)"); +MODULE_PARM(dma_rx_clp1_descriptor_threshold, "i"); +MODULE_PARM_DESC(dma_rx_clp1_descriptor_threshold, "Descriptor threshold for cells with cell loss priority 1"); + + + +/* + * #################################### + * Definition + * #################################### + */ + +#define DUMP_SKB_LEN ~0 + + + +/* + * #################################### + * Declaration + * #################################### + */ + +/* + * Network Operations + */ +static int ppe_ioctl(struct atm_dev *, unsigned int, void *); +static int ppe_open(struct atm_vcc *); +static void ppe_close(struct atm_vcc *); +static int ppe_send(struct atm_vcc *, struct sk_buff *); +static int ppe_send_oam(struct atm_vcc *, void *, int); +static int ppe_change_qos(struct atm_vcc *, struct atm_qos *, int); + +/* + * ADSL LED + */ +static INLINE int adsl_led_flash(void); + +/* + * 64-bit operation used by MIB calculation + */ +static INLINE void u64_add_u32(ppe_u64_t, unsigned int, ppe_u64_t *); + +/* + * buffer manage functions + */ +static INLINE struct sk_buff* alloc_skb_rx(void); +static INLINE struct sk_buff* alloc_skb_tx(unsigned int); +struct sk_buff* atm_alloc_tx(struct atm_vcc *, unsigned int); +static INLINE void atm_free_tx_skb_vcc(struct sk_buff *, struct atm_vcc *); +static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int); +static INLINE int get_tx_desc(unsigned int); + +/* + * mailbox handler and signal function + */ +static INLINE void mailbox_oam_rx_handler(void); +static INLINE void mailbox_aal_rx_handler(void); +#if defined(ENABLE_TASKLET) && ENABLE_TASKLET + static void do_ppe_tasklet(unsigned long); +#endif +static irqreturn_t mailbox_irq_handler(int, void *); +static INLINE void mailbox_signal(unsigned int, int); + +/* + * QSB & HTU setting functions + */ +static void set_qsb(struct atm_vcc *, struct atm_qos *, unsigned int); +static void qsb_global_set(void); +static INLINE void set_htu_entry(unsigned int, unsigned int, unsigned int, int, int); +static INLINE void clear_htu_entry(unsigned int); +static void validate_oam_htu_entry(void); +static void invalidate_oam_htu_entry(void); + +/* + * look up for connection ID + */ +static INLINE int find_vpi(unsigned int); +static INLINE int find_vpivci(unsigned int, unsigned int); +static INLINE int find_vcc(struct atm_vcc *); + +/* + * Debug Functions + */ +#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB + static void dump_skb(struct sk_buff *, u32, char *, int, int, int); +#else + #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0) +#endif + +/* + * Proc File Functions + */ +static INLINE void proc_file_create(void); +static INLINE void proc_file_delete(void); +static int proc_read_version(char *, char **, off_t, int, int *, void *); +static int proc_read_mib(char *, char **, off_t, int, int *, void *); +static int proc_write_mib(struct file *, const char *, unsigned long, void *); +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + static int proc_read_dbg(char *, char **, off_t, int, int *, void *); + static int proc_write_dbg(struct file *, const char *, unsigned long, void *); +#endif +#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC + static int proc_read_htu(char *, char **, off_t, int, int *, void *); + static int proc_read_txq(char *, char **, off_t, int, int *, void *); +#endif + +/* + * Proc Help Functions + */ +static int stricmp(const char *, const char *); +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + static int strincmp(const char *, const char *, int); +#endif +static INLINE int ifx_atm_version(char *); +//static INLINE int print_reset_domain(char *, int); +//static INLINE int print_reset_handler(char *, int, ifx_rcu_handler_t *); + +/* + * Init & clean-up functions + */ +#ifdef MODULE + static INLINE void reset_ppe(void); +#endif +static INLINE void check_parameters(void); +static INLINE int init_priv_data(void); +static INLINE void clear_priv_data(void); +static INLINE void init_rx_tables(void); +static INLINE void init_tx_tables(void); + +/* + * Exteranl Function + */ +#if defined(CONFIG_IFX_OAM) || defined(CONFIG_IFX_OAM_MODULE) + extern void ifx_push_oam(unsigned char *); +#else + static inline void ifx_push_oam(unsigned char *dummy) {} +#endif +#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE) + extern int ifx_mei_atm_led_blink(void); + extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr); +#else + static inline int ifx_mei_atm_led_blink(void) { return IFX_SUCCESS; } + static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr) + { + if ( is_showtime != NULL ) + *is_showtime = 0; + return IFX_SUCCESS; + } +#endif + +/* + * External variable + */ +extern struct sk_buff* (*ifx_atm_alloc_tx)(struct atm_vcc *, unsigned int); +#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE) + extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *); + extern int (*ifx_mei_atm_showtime_exit)(void); +#else + int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL; + EXPORT_SYMBOL(ifx_mei_atm_showtime_enter); + int (*ifx_mei_atm_showtime_exit)(void) = NULL; + EXPORT_SYMBOL(ifx_mei_atm_showtime_exit); +#endif + + + +/* + * #################################### + * Local Variable + * #################################### + */ + +static struct atm_priv_data g_atm_priv_data; + +static struct atmdev_ops g_ifx_atm_ops = { + .open = ppe_open, + .close = ppe_close, + .ioctl = ppe_ioctl, + .send = ppe_send, + .send_oam = ppe_send_oam, + .change_qos = ppe_change_qos, + .owner = THIS_MODULE, +}; + +#if defined(ENABLE_TASKLET) && ENABLE_TASKLET + DECLARE_TASKLET(g_dma_tasklet, do_ppe_tasklet, 0); +#endif + +static int g_showtime = 0; +static void *g_xdata_addr = NULL; + +unsigned int ifx_atm_dbg_enable = 0; + +static struct proc_dir_entry* g_atm_dir = NULL; + + + +/* + * #################################### + * Local Function + * #################################### + */ + +static int ppe_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg) +{ + int ret = 0; + atm_cell_ifEntry_t mib_cell; + atm_aal5_ifEntry_t mib_aal5; + atm_aal5_vcc_x_t mib_vcc; + unsigned int value; + int conn; + + if ( _IOC_TYPE(cmd) != PPE_ATM_IOC_MAGIC + || _IOC_NR(cmd) >= PPE_ATM_IOC_MAXNR ) + return -ENOTTY; + + if ( _IOC_DIR(cmd) & _IOC_READ ) + ret = !access_ok(VERIFY_WRITE, arg, _IOC_SIZE(cmd)); + else if ( _IOC_DIR(cmd) & _IOC_WRITE ) + ret = !access_ok(VERIFY_READ, arg, _IOC_SIZE(cmd)); + if ( ret ) + return -EFAULT; + + switch ( cmd ) + { + case PPE_ATM_MIB_CELL: /* cell level MIB */ + /* These MIB should be read at ARC side, now put zero only. */ + mib_cell.ifHCInOctets_h = 0; + mib_cell.ifHCInOctets_l = 0; + mib_cell.ifHCOutOctets_h = 0; + mib_cell.ifHCOutOctets_l = 0; + mib_cell.ifInErrors = 0; + mib_cell.ifInUnknownProtos = WAN_MIB_TABLE->wrx_drophtu_cell; + mib_cell.ifOutErrors = 0; + + ret = sizeof(mib_cell) - copy_to_user(arg, &mib_cell, sizeof(mib_cell)); + break; + + case PPE_ATM_MIB_AAL5: /* AAL5 MIB */ + value = WAN_MIB_TABLE->wrx_total_byte; + u64_add_u32(g_atm_priv_data.wrx_total_byte, value - g_atm_priv_data.prev_wrx_total_byte, &g_atm_priv_data.wrx_total_byte); + g_atm_priv_data.prev_wrx_total_byte = value; + mib_aal5.ifHCInOctets_h = g_atm_priv_data.wrx_total_byte.h; + mib_aal5.ifHCInOctets_l = g_atm_priv_data.wrx_total_byte.l; + + value = WAN_MIB_TABLE->wtx_total_byte; + u64_add_u32(g_atm_priv_data.wtx_total_byte, value - g_atm_priv_data.prev_wtx_total_byte, &g_atm_priv_data.wtx_total_byte); + g_atm_priv_data.prev_wtx_total_byte = value; + mib_aal5.ifHCOutOctets_h = g_atm_priv_data.wtx_total_byte.h; + mib_aal5.ifHCOutOctets_l = g_atm_priv_data.wtx_total_byte.l; + + mib_aal5.ifInUcastPkts = g_atm_priv_data.wrx_pdu; + mib_aal5.ifOutUcastPkts = WAN_MIB_TABLE->wtx_total_pdu; + mib_aal5.ifInErrors = WAN_MIB_TABLE->wrx_err_pdu; + mib_aal5.ifInDiscards = WAN_MIB_TABLE->wrx_dropdes_pdu + g_atm_priv_data.wrx_drop_pdu; + mib_aal5.ifOutErros = g_atm_priv_data.wtx_err_pdu; + mib_aal5.ifOutDiscards = g_atm_priv_data.wtx_drop_pdu; + + ret = sizeof(mib_aal5) - copy_to_user(arg, &mib_aal5, sizeof(mib_aal5)); + break; + + case PPE_ATM_MIB_VCC: /* VCC related MIB */ + copy_from_user(&mib_vcc, arg, sizeof(mib_vcc)); + conn = find_vpivci(mib_vcc.vpi, mib_vcc.vci); + if ( conn >= 0 ) + { + mib_vcc.mib_vcc.aal5VccCrcErrors = g_atm_priv_data.conn[conn].aal5_vcc_crc_err; + mib_vcc.mib_vcc.aal5VccOverSizedSDUs = g_atm_priv_data.conn[conn].aal5_vcc_oversize_sdu; + mib_vcc.mib_vcc.aal5VccSarTimeOuts = 0; /* no timer support */ + ret = sizeof(mib_vcc) - copy_to_user(arg, &mib_vcc, sizeof(mib_vcc)); + } + else + ret = -EINVAL; + break; + + default: + ret = -ENOIOCTLCMD; + } + + return ret; +} + +static int ppe_open(struct atm_vcc *vcc) +{ + int ret; + short vpi = vcc->vpi; + int vci = vcc->vci; + struct port *port = &g_atm_priv_data.port[(int)vcc->dev->dev_data]; + int conn; + int f_enable_irq = 0; +#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX + int sys_flag; +#endif + + if ( vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0 ) + return -EPROTONOSUPPORT; + + /* check bandwidth */ + if ( (vcc->qos.txtp.traffic_class == ATM_CBR && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) + || (vcc->qos.txtp.traffic_class == ATM_VBR_RT && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) + || (vcc->qos.txtp.traffic_class == ATM_VBR_NRT && vcc->qos.txtp.scr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) + || (vcc->qos.txtp.traffic_class == ATM_UBR_PLUS && vcc->qos.txtp.min_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) ) + { + ret = -EINVAL; + goto PPE_OPEN_EXIT; + } + + /* check existing vpi,vci */ + conn = find_vpivci(vpi, vci); + if ( conn >= 0 ) { + ret = -EADDRINUSE; + goto PPE_OPEN_EXIT; + } + + /* check whether it need to enable irq */ + if ( g_atm_priv_data.conn_table == 0 ) + f_enable_irq = 1; + + /* allocate connection */ + for ( conn = 0; conn < MAX_PVC_NUMBER; conn++ ) { + if ( test_and_set_bit(conn, &g_atm_priv_data.conn_table) == 0 ) { + g_atm_priv_data.conn[conn].vcc = vcc; + break; + } + } + if ( conn == MAX_PVC_NUMBER ) + { + ret = -EINVAL; + goto PPE_OPEN_EXIT; + } + + /* reserve bandwidth */ + switch ( vcc->qos.txtp.traffic_class ) { + case ATM_CBR: + case ATM_VBR_RT: + port->tx_current_cell_rate += vcc->qos.txtp.max_pcr; + break; + case ATM_VBR_NRT: + port->tx_current_cell_rate += vcc->qos.txtp.scr; + break; + case ATM_UBR_PLUS: + port->tx_current_cell_rate += vcc->qos.txtp.min_pcr; + break; + } + + /* set qsb */ + set_qsb(vcc, &vcc->qos, conn); + + /* update atm_vcc structure */ + vcc->itf = (int)vcc->dev->dev_data; + vcc->vpi = vpi; + vcc->vci = vci; + set_bit(ATM_VF_READY, &vcc->flags); + + /* enable irq */ + if (f_enable_irq ) { + ifx_atm_alloc_tx = atm_alloc_tx; + + *MBOX_IGU1_ISRC = (1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM); + *MBOX_IGU1_IER = (1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM); + + enable_irq(PPE_MAILBOX_IGU1_INT); + } + + /* set port */ + WTX_QUEUE_CONFIG(conn)->sbid = (int)vcc->dev->dev_data; + + /* set htu entry */ + set_htu_entry(vpi, vci, conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0, 0); + +#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX + // ReTX: occupy second QID + local_irq_save(sys_flag); + if ( g_retx_htu && vcc->qos.aal == ATM_AAL5 ) + { + int retx_conn = (conn + 8) % 16; // ReTX queue + + if ( retx_conn < MAX_PVC_NUMBER && test_and_set_bit(retx_conn, &g_atm_priv_data.conn_table) == 0 ) { + g_atm_priv_data.conn[retx_conn].vcc = vcc; + set_htu_entry(vpi, vci, retx_conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0, 1); + } + } + local_irq_restore(sys_flag); +#endif + + ret = 0; + +PPE_OPEN_EXIT: + return ret; +} + +static void ppe_close(struct atm_vcc *vcc) +{ + int conn; + struct port *port; + struct connection *connection; + + if ( vcc == NULL ) + return; + + /* get connection id */ + conn = find_vcc(vcc); + if ( conn < 0 ) { + err("can't find vcc"); + goto PPE_CLOSE_EXIT; + } + connection = &g_atm_priv_data.conn[conn]; + port = &g_atm_priv_data.port[connection->port]; + + /* clear htu */ + clear_htu_entry(conn); + + /* release connection */ + clear_bit(conn, &g_atm_priv_data.conn_table); + connection->vcc = NULL; + connection->aal5_vcc_crc_err = 0; + connection->aal5_vcc_oversize_sdu = 0; + + /* disable irq */ + if ( g_atm_priv_data.conn_table == 0 ) { + disable_irq(PPE_MAILBOX_IGU1_INT); + ifx_atm_alloc_tx = NULL; + } + + /* release bandwidth */ + switch ( vcc->qos.txtp.traffic_class ) + { + case ATM_CBR: + case ATM_VBR_RT: + port->tx_current_cell_rate -= vcc->qos.txtp.max_pcr; + break; + case ATM_VBR_NRT: + port->tx_current_cell_rate -= vcc->qos.txtp.scr; + break; + case ATM_UBR_PLUS: + port->tx_current_cell_rate -= vcc->qos.txtp.min_pcr; + break; + } + +PPE_CLOSE_EXIT: + return; +} + +static int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb) +{ + int ret; + int conn; + int desc_base; + struct tx_descriptor reg_desc = {0}; + + if ( vcc == NULL || skb == NULL ) + return -EINVAL; + + skb_get(skb); + atm_free_tx_skb_vcc(skb, vcc); + + conn = find_vcc(vcc); + if ( conn < 0 ) { + ret = -EINVAL; + goto FIND_VCC_FAIL; + } + + if ( !g_showtime ) { + err("not in showtime"); + ret = -EIO; + goto PPE_SEND_FAIL; + } + + if ( vcc->qos.aal == ATM_AAL5 ) { + int byteoff; + int datalen; + struct tx_inband_header *header; + + datalen = skb->len; + byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1); + + if ( skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH ) { + struct sk_buff *new_skb; + + new_skb = alloc_skb_tx(datalen); + if ( new_skb == NULL ) { + err("ALLOC_SKB_TX_FAIL"); + ret = -ENOMEM; + goto PPE_SEND_FAIL; + } + skb_put(new_skb, datalen); + memcpy(new_skb->data, skb->data, datalen); + dev_kfree_skb_any(skb); + skb = new_skb; + byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1); + } + + skb_push(skb, byteoff + TX_INBAND_HEADER_LENGTH); + + header = (struct tx_inband_header *)skb->data; + + /* setup inband trailer */ + header->uu = 0; + header->cpi = 0; + header->pad = aal5_fill_pattern; + header->res1 = 0; + + /* setup cell header */ + header->clp = (vcc->atm_options & ATM_ATMOPT_CLP) ? 1 : 0; + header->pti = ATM_PTI_US0; + header->vci = vcc->vci; + header->vpi = vcc->vpi; + header->gfc = 0; + + /* setup descriptor */ + reg_desc.dataptr = (unsigned int)skb->data >> 2; + reg_desc.datalen = datalen; + reg_desc.byteoff = byteoff; + reg_desc.iscell = 0; + } + else { + /* if data pointer is not aligned, allocate new sk_buff */ + if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 ) { + struct sk_buff *new_skb; + + err("skb->data not aligned"); + + new_skb = alloc_skb_tx(skb->len); + if ( new_skb == NULL ) { + err("ALLOC_SKB_TX_FAIL"); + ret = -ENOMEM; + goto PPE_SEND_FAIL; + } + skb_put(new_skb, skb->len); + memcpy(new_skb->data, skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = new_skb; + } + + reg_desc.dataptr = (unsigned int)skb->data >> 2; + reg_desc.datalen = skb->len; + reg_desc.byteoff = 0; + reg_desc.iscell = 1; + } + + reg_desc.own = 1; + reg_desc.c = 1; + reg_desc.sop = reg_desc.eop = 1; + + desc_base = get_tx_desc(conn); + if ( desc_base < 0 ) { + err("ALLOC_TX_CONNECTION_FAIL"); + ret = -EIO; + goto PPE_SEND_FAIL; + } + + if ( vcc->stats ) + atomic_inc(&vcc->stats->tx); + if ( vcc->qos.aal == ATM_AAL5 ) + g_atm_priv_data.wtx_pdu++; + + /* update descriptor send pointer */ + if ( g_atm_priv_data.conn[conn].tx_skb[desc_base] != NULL ) + dev_kfree_skb_any(g_atm_priv_data.conn[conn].tx_skb[desc_base]); + g_atm_priv_data.conn[conn].tx_skb[desc_base] = skb; + + /* write discriptor to memory and write back cache */ + g_atm_priv_data.conn[conn].tx_desc[desc_base] = reg_desc; + dma_cache_wback((unsigned long)skb->data, skb->len); + + dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 1); + + mailbox_signal(conn, 1); + + adsl_led_flash(); + + return 0; + +FIND_VCC_FAIL: + err("FIND_VCC_FAIL"); + g_atm_priv_data.wtx_err_pdu++; + dev_kfree_skb_any(skb); + return ret; + +PPE_SEND_FAIL: + if ( vcc->qos.aal == ATM_AAL5 ) + g_atm_priv_data.wtx_drop_pdu++; + if ( vcc->stats ) + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return ret; +} + +static int ppe_send_oam(struct atm_vcc *vcc, void *cell, int flags) +{ + int conn; + struct uni_cell_header *uni_cell_header = (struct uni_cell_header *)cell; + int desc_base; + struct sk_buff *skb; + struct tx_descriptor reg_desc = {0}; + + if ( ((uni_cell_header->pti == ATM_PTI_SEGF5 || uni_cell_header->pti == ATM_PTI_E2EF5) + && find_vpivci(uni_cell_header->vpi, uni_cell_header->vci) < 0) + || ((uni_cell_header->vci == 0x03 || uni_cell_header->vci == 0x04) + && find_vpi(uni_cell_header->vpi) < 0) ) + return -EINVAL; + + if ( !g_showtime ) { + err("not in showtime"); + return -EIO; + } + + conn = find_vcc(vcc); + if ( conn < 0 ) { + err("FIND_VCC_FAIL"); + return -EINVAL; + } + + skb = alloc_skb_tx(CELL_SIZE); + if ( skb == NULL ) { + err("ALLOC_SKB_TX_FAIL"); + return -ENOMEM; + } + memcpy(skb->data, cell, CELL_SIZE); + + reg_desc.dataptr = (unsigned int)skb->data >> 2; + reg_desc.datalen = CELL_SIZE; + reg_desc.byteoff = 0; + reg_desc.iscell = 1; + + reg_desc.own = 1; + reg_desc.c = 1; + reg_desc.sop = reg_desc.eop = 1; + + desc_base = get_tx_desc(conn); + if ( desc_base < 0 ) { + dev_kfree_skb_any(skb); + err("ALLOC_TX_CONNECTION_FAIL"); + return -EIO; + } + + if ( vcc->stats ) + atomic_inc(&vcc->stats->tx); + + /* update descriptor send pointer */ + if ( g_atm_priv_data.conn[conn].tx_skb[desc_base] != NULL ) + dev_kfree_skb_any(g_atm_priv_data.conn[conn].tx_skb[desc_base]); + g_atm_priv_data.conn[conn].tx_skb[desc_base] = skb; + + /* write discriptor to memory and write back cache */ + g_atm_priv_data.conn[conn].tx_desc[desc_base] = reg_desc; + dma_cache_wback((unsigned long)skb->data, CELL_SIZE); + + dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 1); + + mailbox_signal(conn, 1); + + adsl_led_flash(); + + return 0; +} + +static int ppe_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags) +{ + int conn; + + if ( vcc == NULL || qos == NULL ) + return -EINVAL; + + conn = find_vcc(vcc); + if ( conn < 0 ) + return -EINVAL; + + set_qsb(vcc, qos, conn); + + return 0; +} + +static INLINE int adsl_led_flash(void) +{ + return ifx_mei_atm_led_blink(); +} + +/* + * Description: + * Add a 32-bit value to 64-bit value, and put result in a 64-bit variable. + * Input: + * opt1 --- ppe_u64_t, first operand, a 64-bit unsigned integer value + * opt2 --- unsigned int, second operand, a 32-bit unsigned integer value + * ret --- ppe_u64_t, pointer to a variable to hold result + * Output: + * none + */ +static INLINE void u64_add_u32(ppe_u64_t opt1, unsigned int opt2, ppe_u64_t *ret) +{ + ret->l = opt1.l + opt2; + if ( ret->l < opt1.l || ret->l < opt2 ) + ret->h++; +} + +static INLINE struct sk_buff* alloc_skb_rx(void) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(RX_DMA_CH_AAL_BUF_SIZE + DATA_BUFFER_ALIGNMENT); + if ( skb != NULL ) { + /* must be burst length alignment */ + if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 ) + skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1)); + /* pub skb in reserved area "skb->data - 4" */ + *((struct sk_buff **)skb->data - 1) = skb; + /* write back and invalidate cache */ + dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb)); + /* invalidate cache */ + dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data); + } + + return skb; +} + +static INLINE struct sk_buff* alloc_skb_tx(unsigned int size) +{ + struct sk_buff *skb; + + /* allocate memory including header and padding */ + size += TX_INBAND_HEADER_LENGTH + MAX_TX_PACKET_ALIGN_BYTES + MAX_TX_PACKET_PADDING_BYTES; + size &= ~(DATA_BUFFER_ALIGNMENT - 1); + skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT); + /* must be burst length alignment */ + if ( skb != NULL ) + skb_reserve(skb, (~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1)) + TX_INBAND_HEADER_LENGTH); + return skb; +} + +struct sk_buff* atm_alloc_tx(struct atm_vcc *vcc, unsigned int size) +{ + int conn; + struct sk_buff *skb; + + /* oversize packet */ + if ( size > aal5s_max_packet_size ) { + err("atm_alloc_tx: oversize packet"); + return NULL; + } + /* send buffer overflow */ + if ( atomic_read(&sk_atm(vcc)->sk_wmem_alloc) && !atm_may_send(vcc, size) ) { + err("atm_alloc_tx: send buffer overflow"); + return NULL; + } + conn = find_vcc(vcc); + if ( conn < 0 ) { + err("atm_alloc_tx: unknown VCC"); + return NULL; + } + + skb = dev_alloc_skb(size); + if ( skb == NULL ) { + err("atm_alloc_tx: sk buffer is used up"); + return NULL; + } + + atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); + + return skb; +} + +static INLINE void atm_free_tx_skb_vcc(struct sk_buff *skb, struct atm_vcc *vcc) +{ + if ( vcc->pop != NULL ) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +} + +static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr) +{ + unsigned int skb_dataptr; + struct sk_buff *skb; + + skb_dataptr = ((dataptr - 1) << 2) | KSEG1; + skb = *(struct sk_buff **)skb_dataptr; + + ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr); + ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr); + + return skb; +} + +static INLINE int get_tx_desc(unsigned int conn) +{ + int desc_base = -1; + struct connection *p_conn = &g_atm_priv_data.conn[conn]; + + if ( p_conn->tx_desc[p_conn->tx_desc_pos].own == 0 ) { + desc_base = p_conn->tx_desc_pos; + if ( ++(p_conn->tx_desc_pos) == dma_tx_descriptor_length ) + p_conn->tx_desc_pos = 0; + } + + return desc_base; +} + +static INLINE void mailbox_oam_rx_handler(void) +{ + unsigned int vlddes = WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM)->vlddes; + struct rx_descriptor reg_desc; + struct uni_cell_header *header; + int conn; + struct atm_vcc *vcc; + unsigned int i; + + for ( i = 0; i < vlddes; i++ ) { + do { + reg_desc = g_atm_priv_data.oam_desc[g_atm_priv_data.oam_desc_pos]; + } while ( reg_desc.own || !reg_desc.c ); // keep test OWN and C bit until data is ready + + header = (struct uni_cell_header *)&g_atm_priv_data.oam_buf[g_atm_priv_data.oam_desc_pos * RX_DMA_CH_OAM_BUF_SIZE]; + + if ( header->pti == ATM_PTI_SEGF5 || header->pti == ATM_PTI_E2EF5 ) + conn = find_vpivci(header->vpi, header->vci); + else if ( header->vci == 0x03 || header->vci == 0x04 ) + conn = find_vpi(header->vpi); + else + conn = -1; + + if ( conn >= 0 && g_atm_priv_data.conn[conn].vcc != NULL ) { + vcc = g_atm_priv_data.conn[conn].vcc; + + if ( vcc->push_oam != NULL ) + vcc->push_oam(vcc, header); + else + ifx_push_oam((unsigned char *)header); + + adsl_led_flash(); + } + + reg_desc.byteoff = 0; + reg_desc.datalen = RX_DMA_CH_OAM_BUF_SIZE; + reg_desc.own = 1; + reg_desc.c = 0; + + g_atm_priv_data.oam_desc[g_atm_priv_data.oam_desc_pos] = reg_desc; + if ( ++g_atm_priv_data.oam_desc_pos == RX_DMA_CH_OAM_DESC_LEN ) + g_atm_priv_data.oam_desc_pos = 0; + + mailbox_signal(RX_DMA_CH_OAM, 0); + } +} + +static INLINE void mailbox_aal_rx_handler(void) +{ + unsigned int vlddes = WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL)->vlddes; + struct rx_descriptor reg_desc; + int conn; + struct atm_vcc *vcc; + struct sk_buff *skb, *new_skb; + struct rx_inband_trailer *trailer; + unsigned int i; + + for ( i = 0; i < vlddes; i++ ) { + do { + reg_desc = g_atm_priv_data.aal_desc[g_atm_priv_data.aal_desc_pos]; + } while ( reg_desc.own || !reg_desc.c ); // keep test OWN and C bit until data is ready + + conn = reg_desc.id; + + if ( g_atm_priv_data.conn[conn].vcc != NULL ) { + vcc = g_atm_priv_data.conn[conn].vcc; + + skb = get_skb_rx_pointer(reg_desc.dataptr); + + if ( reg_desc.err ) { + if ( vcc->qos.aal == ATM_AAL5 ) { + trailer = (struct rx_inband_trailer *)((unsigned int)skb->data + ((reg_desc.byteoff + reg_desc.datalen + MAX_RX_PACKET_PADDING_BYTES) & ~MAX_RX_PACKET_PADDING_BYTES)); + if ( trailer->stw_crc ) + g_atm_priv_data.conn[conn].aal5_vcc_crc_err++; + if ( trailer->stw_ovz ) + g_atm_priv_data.conn[conn].aal5_vcc_oversize_sdu++; + g_atm_priv_data.wrx_drop_pdu++; + } + if ( vcc->stats ) { + atomic_inc(&vcc->stats->rx_drop); + atomic_inc(&vcc->stats->rx_err); + } + } + else if ( atm_charge(vcc, skb->truesize) ) { + new_skb = alloc_skb_rx(); + if ( new_skb != NULL ) { + skb_reserve(skb, reg_desc.byteoff); + skb_put(skb, reg_desc.datalen); + ATM_SKB(skb)->vcc = vcc; + + dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 0); + + vcc->push(vcc, skb); + + if ( vcc->qos.aal == ATM_AAL5 ) + g_atm_priv_data.wrx_pdu++; + if ( vcc->stats ) + atomic_inc(&vcc->stats->rx); + adsl_led_flash(); + + reg_desc.dataptr = (unsigned int)new_skb->data >> 2; + } + else { + atm_return(vcc, skb->truesize); + if ( vcc->qos.aal == ATM_AAL5 ) + g_atm_priv_data.wrx_drop_pdu++; + if ( vcc->stats ) + atomic_inc(&vcc->stats->rx_drop); + } + } + else { + if ( vcc->qos.aal == ATM_AAL5 ) + g_atm_priv_data.wrx_drop_pdu++; + if ( vcc->stats ) + atomic_inc(&vcc->stats->rx_drop); + } + } + else { + g_atm_priv_data.wrx_drop_pdu++; + } + + reg_desc.byteoff = 0; + reg_desc.datalen = RX_DMA_CH_AAL_BUF_SIZE; + reg_desc.own = 1; + reg_desc.c = 0; + + g_atm_priv_data.aal_desc[g_atm_priv_data.aal_desc_pos] = reg_desc; + if ( ++g_atm_priv_data.aal_desc_pos == dma_rx_descriptor_length ) + g_atm_priv_data.aal_desc_pos = 0; + + mailbox_signal(RX_DMA_CH_AAL, 0); + } +} + +#if defined(ENABLE_TASKLET) && ENABLE_TASKLET +static void do_ppe_tasklet(unsigned long arg) +{ + *MBOX_IGU1_ISRC = *MBOX_IGU1_ISR; + mailbox_oam_rx_handler(); + mailbox_aal_rx_handler(); + if ( (*MBOX_IGU1_ISR & ((1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM))) != 0 ) + tasklet_schedule(&g_dma_tasklet); + else + enable_irq(PPE_MAILBOX_IGU1_INT); +} +#endif + +static irqreturn_t mailbox_irq_handler(int irq, void *dev_id) +{ + if ( !*MBOX_IGU1_ISR ) + return IRQ_HANDLED; + +#if defined(ENABLE_TASKLET) && ENABLE_TASKLET + disable_irq(PPE_MAILBOX_IGU1_INT); + tasklet_schedule(&g_dma_tasklet); +#else + *MBOX_IGU1_ISRC = *MBOX_IGU1_ISR; + mailbox_oam_rx_handler(); + mailbox_aal_rx_handler(); +#endif + + return IRQ_HANDLED; +} + +static INLINE void mailbox_signal(unsigned int queue, int is_tx) +{ + if ( is_tx ) { + while ( MBOX_IGU3_ISR_ISR(queue + FIRST_QSB_QID + 16) ); + *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(queue + FIRST_QSB_QID + 16); + } + else { + while ( MBOX_IGU3_ISR_ISR(queue) ); + *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(queue); + } +} + +static void set_qsb(struct atm_vcc *vcc, struct atm_qos *qos, unsigned int queue) +{ + struct clk *clk = clk_get(0, "fpi"); + unsigned int qsb_clk = clk_get_rate(clk); + unsigned int qsb_qid = queue + FIRST_QSB_QID; + union qsb_queue_parameter_table qsb_queue_parameter_table = {{0}}; + union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table = {{0}}; + unsigned int tmp; + +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) { + static char *str_traffic_class[9] = { + "ATM_NONE", + "ATM_UBR", + "ATM_CBR", + "ATM_VBR", + "ATM_ABR", + "ATM_ANYCLASS", + "ATM_VBR_RT", + "ATM_UBR_PLUS", + "ATM_MAX_PCR" + }; + printk(KERN_INFO "QoS Parameters:\n"); + printk(KERN_INFO "\tAAL : %d\n", qos->aal); + printk(KERN_INFO "\tTX Traffic Class: %s\n", str_traffic_class[qos->txtp.traffic_class]); + printk(KERN_INFO "\tTX Max PCR : %d\n", qos->txtp.max_pcr); + printk(KERN_INFO "\tTX Min PCR : %d\n", qos->txtp.min_pcr); + printk(KERN_INFO "\tTX PCR : %d\n", qos->txtp.pcr); + printk(KERN_INFO "\tTX Max CDV : %d\n", qos->txtp.max_cdv); + printk(KERN_INFO "\tTX Max SDU : %d\n", qos->txtp.max_sdu); + printk(KERN_INFO "\tTX SCR : %d\n", qos->txtp.scr); + printk(KERN_INFO "\tTX MBS : %d\n", qos->txtp.mbs); + printk(KERN_INFO "\tTX CDV : %d\n", qos->txtp.cdv); + printk(KERN_INFO "\tRX Traffic Class: %s\n", str_traffic_class[qos->rxtp.traffic_class]); + printk(KERN_INFO "\tRX Max PCR : %d\n", qos->rxtp.max_pcr); + printk(KERN_INFO "\tRX Min PCR : %d\n", qos->rxtp.min_pcr); + printk(KERN_INFO "\tRX PCR : %d\n", qos->rxtp.pcr); + printk(KERN_INFO "\tRX Max CDV : %d\n", qos->rxtp.max_cdv); + printk(KERN_INFO "\tRX Max SDU : %d\n", qos->rxtp.max_sdu); + printk(KERN_INFO "\tRX SCR : %d\n", qos->rxtp.scr); + printk(KERN_INFO "\tRX MBS : %d\n", qos->rxtp.mbs); + printk(KERN_INFO "\tRX CDV : %d\n", qos->rxtp.cdv); + } +#endif // defined(DEBUG_QOS) && DEBUG_QOS + + /* + * Peak Cell Rate (PCR) Limiter + */ + if ( qos->txtp.max_pcr == 0 ) + qsb_queue_parameter_table.bit.tp = 0; /* disable PCR limiter */ + else { + /* peak cell rate would be slightly lower than requested [maximum_rate / pcr = (qsb_clock / 8) * (time_step / 4) / pcr] */ + tmp = ((qsb_clk * qsb_tstep) >> 5) / qos->txtp.max_pcr + 1; + /* check if overflow takes place */ + qsb_queue_parameter_table.bit.tp = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp; + } + + // A funny issue. Create two PVCs, one UBR and one UBR with max_pcr. + // Send packets to these two PVCs at same time, it trigger strange behavior. + // In A1, RAM from 0x80000000 to 0x0x8007FFFF was corrupted with fixed pattern 0x00000000 0x40000000. + // In A4, PPE firmware keep emiting unknown cell and do not respond to driver. + // To work around, create UBR always with max_pcr. + // If user want to create UBR without max_pcr, we give a default one larger than line-rate. + if ( qos->txtp.traffic_class == ATM_UBR && qsb_queue_parameter_table.bit.tp == 0 ) { + int port = g_atm_priv_data.conn[queue].port; + unsigned int max_pcr = g_atm_priv_data.port[port].tx_max_cell_rate + 1000; + + tmp = ((qsb_clk * qsb_tstep) >> 5) / max_pcr + 1; + if ( tmp > QSB_TP_TS_MAX ) + tmp = QSB_TP_TS_MAX; + else if ( tmp < 1 ) + tmp = 1; + qsb_queue_parameter_table.bit.tp = tmp; + } + + /* + * Weighted Fair Queueing Factor (WFQF) + */ + switch ( qos->txtp.traffic_class ) { + case ATM_CBR: + case ATM_VBR_RT: + /* real time queue gets weighted fair queueing bypass */ + qsb_queue_parameter_table.bit.wfqf = 0; + break; + case ATM_VBR_NRT: + case ATM_UBR_PLUS: + /* WFQF calculation here is based on virtual cell rates, to reduce granularity for high rates */ + /* WFQF is maximum cell rate / garenteed cell rate */ + /* wfqf = qsb_minimum_cell_rate * QSB_WFQ_NONUBR_MAX / requested_minimum_peak_cell_rate */ + if ( qos->txtp.min_pcr == 0 ) + qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX; + else + { + tmp = QSB_GCR_MIN * QSB_WFQ_NONUBR_MAX / qos->txtp.min_pcr; + if ( tmp == 0 ) + qsb_queue_parameter_table.bit.wfqf = 1; + else if ( tmp > QSB_WFQ_NONUBR_MAX ) + qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX; + else + qsb_queue_parameter_table.bit.wfqf = tmp; + } + break; + default: + case ATM_UBR: + qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_UBR_BYPASS; + } + + /* + * Sustained Cell Rate (SCR) Leaky Bucket Shaper VBR.0/VBR.1 + */ + if ( qos->txtp.traffic_class == ATM_VBR_RT || qos->txtp.traffic_class == ATM_VBR_NRT ) { + if ( qos->txtp.scr == 0 ) { + /* disable shaper */ + qsb_queue_vbr_parameter_table.bit.taus = 0; + qsb_queue_vbr_parameter_table.bit.ts = 0; + } + else { + /* Cell Loss Priority (CLP) */ + if ( (vcc->atm_options & ATM_ATMOPT_CLP) ) + /* CLP1 */ + qsb_queue_parameter_table.bit.vbr = 1; + else + /* CLP0 */ + qsb_queue_parameter_table.bit.vbr = 0; + /* Rate Shaper Parameter (TS) and Burst Tolerance Parameter for SCR (tauS) */ + tmp = ((qsb_clk * qsb_tstep) >> 5) / qos->txtp.scr + 1; + qsb_queue_vbr_parameter_table.bit.ts = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp; + tmp = (qos->txtp.mbs - 1) * (qsb_queue_vbr_parameter_table.bit.ts - qsb_queue_parameter_table.bit.tp) / 64; + if ( tmp == 0 ) + qsb_queue_vbr_parameter_table.bit.taus = 1; + else if ( tmp > QSB_TAUS_MAX ) + qsb_queue_vbr_parameter_table.bit.taus = QSB_TAUS_MAX; + else + qsb_queue_vbr_parameter_table.bit.taus = tmp; + } + } + else { + qsb_queue_vbr_parameter_table.bit.taus = 0; + qsb_queue_vbr_parameter_table.bit.ts = 0; + } + + /* Queue Parameter Table (QPT) */ + *QSB_RTM = QSB_RTM_DM_SET(QSB_QPT_SET_MASK); + *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_parameter_table.dword); + *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(qsb_qid); +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) + printk("QPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC); +#endif + /* Queue VBR Paramter Table (QVPT) */ + *QSB_RTM = QSB_RTM_DM_SET(QSB_QVPT_SET_MASK); + *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table.dword); + *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(qsb_qid); +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) + printk("QVPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC); +#endif + +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) { + printk("set_qsb\n"); + printk(" qsb_clk = %lu\n", (unsigned long)qsb_clk); + printk(" qsb_queue_parameter_table.bit.tp = %d\n", (int)qsb_queue_parameter_table.bit.tp); + printk(" qsb_queue_parameter_table.bit.wfqf = %d (0x%08X)\n", (int)qsb_queue_parameter_table.bit.wfqf, (int)qsb_queue_parameter_table.bit.wfqf); + printk(" qsb_queue_parameter_table.bit.vbr = %d\n", (int)qsb_queue_parameter_table.bit.vbr); + printk(" qsb_queue_parameter_table.dword = 0x%08X\n", (int)qsb_queue_parameter_table.dword); + printk(" qsb_queue_vbr_parameter_table.bit.ts = %d\n", (int)qsb_queue_vbr_parameter_table.bit.ts); + printk(" qsb_queue_vbr_parameter_table.bit.taus = %d\n", (int)qsb_queue_vbr_parameter_table.bit.taus); + printk(" qsb_queue_vbr_parameter_table.dword = 0x%08X\n", (int)qsb_queue_vbr_parameter_table.dword); + } +#endif +} + +static void qsb_global_set(void) +{ + struct clk *clk = clk_get(0, "fpi"); + unsigned int qsb_clk = clk_get_rate(clk); + int i; + unsigned int tmp1, tmp2, tmp3; + + *QSB_ICDV = QSB_ICDV_TAU_SET(qsb_tau); + *QSB_SBL = QSB_SBL_SBL_SET(qsb_srvm); + *QSB_CFG = QSB_CFG_TSTEPC_SET(qsb_tstep >> 1); +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) { + printk("qsb_clk = %u\n", qsb_clk); + printk("QSB_ICDV (%08X) = %d (%d), QSB_SBL (%08X) = %d (%d), QSB_CFG (%08X) = %d (%d)\n", (unsigned int)QSB_ICDV, *QSB_ICDV, QSB_ICDV_TAU_SET(qsb_tau), (unsigned int)QSB_SBL, *QSB_SBL, QSB_SBL_SBL_SET(qsb_srvm), (unsigned int)QSB_CFG, *QSB_CFG, QSB_CFG_TSTEPC_SET(qsb_tstep >> 1)); + } +#endif + + /* + * set SCT and SPT per port + */ + for ( i = 0; i < ATM_PORT_NUMBER; i++ ) { + if ( g_atm_priv_data.port[i].tx_max_cell_rate != 0 ) { + tmp1 = ((qsb_clk * qsb_tstep) >> 1) / g_atm_priv_data.port[i].tx_max_cell_rate; + tmp2 = tmp1 >> 6; /* integer value of Tsb */ + tmp3 = (tmp1 & ((1 << 6) - 1)) + 1; /* fractional part of Tsb */ + /* carry over to integer part (?) */ + if ( tmp3 == (1 << 6) ) + { + tmp3 = 0; + tmp2++; + } + if ( tmp2 == 0 ) + tmp2 = tmp3 = 1; + /* 1. set mask */ + /* 2. write value to data transfer register */ + /* 3. start the tranfer */ + /* SCT (FracRate) */ + *QSB_RTM = QSB_RTM_DM_SET(QSB_SET_SCT_MASK); + *QSB_RTD = QSB_RTD_TTV_SET(tmp3); + *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SCT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(i & 0x01); +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) + printk("SCT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC); +#endif + /* SPT (SBV + PN + IntRage) */ + *QSB_RTM = QSB_RTM_DM_SET(QSB_SET_SPT_MASK); + *QSB_RTD = QSB_RTD_TTV_SET(QSB_SPT_SBV_VALID | QSB_SPT_PN_SET(i & 0x01) | QSB_SPT_INTRATE_SET(tmp2)); + *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SPT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(i & 0x01); +#if defined(DEBUG_QOS) && DEBUG_QOS + if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) + printk("SPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC); +#endif + } + } +} + +static INLINE void set_htu_entry(unsigned int vpi, unsigned int vci, unsigned int queue, int aal5, int is_retx) +{ + struct htu_entry htu_entry = { res1: 0x00, + clp: is_retx ? 0x01 : 0x00, + pid: g_atm_priv_data.conn[queue].port & 0x01, + vpi: vpi, + vci: vci, + pti: 0x00, + vld: 0x01}; + + struct htu_mask htu_mask = { set: 0x01, +#if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX + clp: 0x01, + pid_mask: 0x02, +#else + clp: g_retx_htu ? 0x00 : 0x01, + pid_mask: RETX_MODE_CFG->retx_en ? 0x03 : 0x02, +#endif + vpi_mask: 0x00, +#if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX + vci_mask: 0x0000, +#else + vci_mask: RETX_MODE_CFG->retx_en ? 0xFF00 : 0x0000, +#endif + pti_mask: 0x03, // 0xx, user data + clear: 0x00}; + + struct htu_result htu_result = {res1: 0x00, + cellid: queue, + res2: 0x00, + type: aal5 ? 0x00 : 0x01, + ven: 0x01, + res3: 0x00, + qid: queue}; + + *HTU_RESULT(queue + OAM_HTU_ENTRY_NUMBER) = htu_result; + *HTU_MASK(queue + OAM_HTU_ENTRY_NUMBER) = htu_mask; + *HTU_ENTRY(queue + OAM_HTU_ENTRY_NUMBER) = htu_entry; +} + +static INLINE void clear_htu_entry(unsigned int queue) +{ + HTU_ENTRY(queue + OAM_HTU_ENTRY_NUMBER)->vld = 0; +} + +static void validate_oam_htu_entry(void) +{ + HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY)->vld = 1; + HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY)->vld = 1; + HTU_ENTRY(OAM_F5_HTU_ENTRY)->vld = 1; +#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX + HTU_ENTRY(OAM_ARQ_HTU_ENTRY)->vld = 1; +#endif +} + +static void invalidate_oam_htu_entry(void) +{ + HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY)->vld = 0; + HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY)->vld = 0; + HTU_ENTRY(OAM_F5_HTU_ENTRY)->vld = 0; +#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX + HTU_ENTRY(OAM_ARQ_HTU_ENTRY)->vld = 0; +#endif +} + +static INLINE int find_vpi(unsigned int vpi) +{ + int i; + unsigned int bit; + + for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) { + if ( (g_atm_priv_data.conn_table & bit) != 0 + && g_atm_priv_data.conn[i].vcc != NULL + && vpi == g_atm_priv_data.conn[i].vcc->vpi ) + return i; + } + + return -1; +} + +static INLINE int find_vpivci(unsigned int vpi, unsigned int vci) +{ + int i; + unsigned int bit; + + for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) { + if ( (g_atm_priv_data.conn_table & bit) != 0 + && g_atm_priv_data.conn[i].vcc != NULL + && vpi == g_atm_priv_data.conn[i].vcc->vpi + && vci == g_atm_priv_data.conn[i].vcc->vci ) + return i; + } + + return -1; +} + +static INLINE int find_vcc(struct atm_vcc *vcc) +{ + int i; + unsigned int bit; + + for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) { + if ( (g_atm_priv_data.conn_table & bit) != 0 + && g_atm_priv_data.conn[i].vcc == vcc ) + return i; + } + + return -1; +} + +#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB +static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx) +{ + int i; + + if ( !(ifx_atm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) ) + return; + + if ( skb->len < len ) + len = skb->len; + + if ( len > RX_DMA_CH_AAL_BUF_SIZE ) { + printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len); + return; + } + + if ( ch >= 0 ) + printk("%s (port %d, ch %d)\n", title, port, ch); + else + printk("%s\n", title); + printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len); + for ( i = 1; i <= len; i++ ) { + if ( i % 16 == 1 ) + printk(" %4d:", i - 1); + printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF)); + if ( i % 16 == 0 ) + printk("\n"); + } + if ( (i - 1) % 16 != 0 ) + printk("\n"); +} +#endif + +static INLINE void proc_file_create(void) +{ +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + struct proc_dir_entry *res; +#endif + + g_atm_dir = proc_mkdir("driver/ifx_atm", NULL); + + create_proc_read_entry("version", + 0, + g_atm_dir, + proc_read_version, + NULL); + + res = create_proc_entry("mib", + 0, + g_atm_dir); + if ( res != NULL ) { + res->read_proc = proc_read_mib; + res->write_proc = proc_write_mib; + } + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + res = create_proc_entry("dbg", + 0, + g_atm_dir); + if ( res != NULL ) { + res->read_proc = proc_read_dbg; + res->write_proc = proc_write_dbg; + } +#endif + +#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC + create_proc_read_entry("htu", + 0, + g_atm_dir, + proc_read_htu, + NULL); + + create_proc_read_entry("txq", + 0, + g_atm_dir, + proc_read_txq, + NULL); +#endif +} + +static INLINE void proc_file_delete(void) +{ +#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC + remove_proc_entry("txq", g_atm_dir); + + remove_proc_entry("htu", g_atm_dir); +#endif + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + remove_proc_entry("dbg", g_atm_dir); +#endif + + remove_proc_entry("version", g_atm_dir); + + remove_proc_entry("driver/ifx_atm", NULL); +} + +static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data) +{ + int len = 0; + + len += ifx_atm_version(buf + len); + + if ( offset >= len ) { + *start = buf; + *eof = 1; + return 0; + } + *start = buf + offset; + if ( (len -= offset) > count ) + return count; + *eof = 1; + return len; +} + +static int proc_read_mib(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page + off + len, "Firmware\n"); + len += sprintf(page + off + len, " wrx_drophtu_cell = %u\n", WAN_MIB_TABLE->wrx_drophtu_cell); + len += sprintf(page + off + len, " wrx_dropdes_pdu = %u\n", WAN_MIB_TABLE->wrx_dropdes_pdu); + len += sprintf(page + off + len, " wrx_correct_pdu = %u\n", WAN_MIB_TABLE->wrx_correct_pdu); + len += sprintf(page + off + len, " wrx_err_pdu = %u\n", WAN_MIB_TABLE->wrx_err_pdu); + len += sprintf(page + off + len, " wrx_dropdes_cell = %u\n", WAN_MIB_TABLE->wrx_dropdes_cell); + len += sprintf(page + off + len, " wrx_correct_cell = %u\n", WAN_MIB_TABLE->wrx_correct_cell); + len += sprintf(page + off + len, " wrx_err_cell = %u\n", WAN_MIB_TABLE->wrx_err_cell); + len += sprintf(page + off + len, " wrx_total_byte = %u\n", WAN_MIB_TABLE->wrx_total_byte); + len += sprintf(page + off + len, " wtx_total_pdu = %u\n", WAN_MIB_TABLE->wtx_total_pdu); + len += sprintf(page + off + len, " wtx_total_cell = %u\n", WAN_MIB_TABLE->wtx_total_cell); + len += sprintf(page + off + len, " wtx_total_byte = %u\n", WAN_MIB_TABLE->wtx_total_byte); + len += sprintf(page + off + len, "Driver\n"); + len += sprintf(page + off + len, " wrx_pdu = %u\n", g_atm_priv_data.wrx_pdu); + len += sprintf(page + off + len, " wrx_drop_pdu = %u\n", g_atm_priv_data.wrx_drop_pdu); + len += sprintf(page + off + len, " wtx_pdu = %u\n", g_atm_priv_data.wtx_pdu); + len += sprintf(page + off + len, " wtx_err_pdu = %u\n", g_atm_priv_data.wtx_err_pdu); + len += sprintf(page + off + len, " wtx_drop_pdu = %u\n", g_atm_priv_data.wtx_drop_pdu); + + *eof = 1; + + return len; +} + +static int proc_write_mib(struct file *file, const char *buf, unsigned long count, void *data) +{ + char str[2048]; + char *p; + int len, rlen; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + while ( rlen && str[rlen - 1] <= ' ' ) + rlen--; + str[rlen] = 0; + for ( p = str; *p && *p <= ' '; p++, rlen-- ); + if ( !*p ) + return 0; + + if ( stricmp(p, "clear") == 0 || stricmp(p, "clear all") == 0 + || stricmp(p, "clean") == 0 || stricmp(p, "clean all") == 0 ) { + memset(WAN_MIB_TABLE, 0, sizeof(*WAN_MIB_TABLE)); + g_atm_priv_data.wrx_pdu = 0; + g_atm_priv_data.wrx_drop_pdu = 0; + g_atm_priv_data.wtx_pdu = 0; + g_atm_priv_data.wtx_err_pdu = 0; + g_atm_priv_data.wtx_drop_pdu = 0; + } + + return count; +} + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC + +static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page + off + len, "error print - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "debug print - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "assert - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "qos - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ? "enabled" : "disabled"); + len += sprintf(page + off + len, "dump init - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_INIT) ? "enabled" : "disabled"); + + *eof = 1; + + return len; +} + +static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data) +{ + static const char *dbg_enable_mask_str[] = { + " error print", + " err", + " debug print", + " dbg", + " assert", + " assert", + " dump rx skb", + " rx", + " dump tx skb", + " tx", + " dump qos", + " qos", + " dump init", + " init", + " all" + }; + static const int dbg_enable_mask_str_len[] = { + 12, 4, + 12, 4, + 7, 7, + 12, 3, + 12, 3, + 9, 4, + 10, 5, + 4 + }; + u32 dbg_enable_mask[] = { + DBG_ENABLE_MASK_ERR, + DBG_ENABLE_MASK_DEBUG_PRINT, + DBG_ENABLE_MASK_ASSERT, + DBG_ENABLE_MASK_DUMP_SKB_RX, + DBG_ENABLE_MASK_DUMP_SKB_TX, + DBG_ENABLE_MASK_DUMP_QOS, + DBG_ENABLE_MASK_DUMP_INIT, + DBG_ENABLE_MASK_ALL + }; + + char str[2048]; + char *p; + + int len, rlen; + + int f_enable = 0; + int i; + + len = count < sizeof(str) ? count : sizeof(str) - 1; + rlen = len - copy_from_user(str, buf, len); + while ( rlen && str[rlen - 1] <= ' ' ) + rlen--; + str[rlen] = 0; + for ( p = str; *p && *p <= ' '; p++, rlen-- ); + if ( !*p ) + return 0; + + if ( strincmp(p, "enable", 6) == 0 ) { + p += 6; + f_enable = 1; + } + else if ( strincmp(p, "disable", 7) == 0 ) { + p += 7; + f_enable = -1; + } + else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) { + printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/all] > /proc/eth/dbg\n"); + } + + if ( f_enable ) { + if ( *p == 0 ) { + if ( f_enable > 0 ) + ifx_atm_dbg_enable |= DBG_ENABLE_MASK_ALL; + else + ifx_atm_dbg_enable &= ~DBG_ENABLE_MASK_ALL; + } + else { + do { + for ( i = 0; i < NUM_ENTITY(dbg_enable_mask_str); i++ ) + if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) { + if ( f_enable > 0 ) + ifx_atm_dbg_enable |= dbg_enable_mask[i >> 1]; + else + ifx_atm_dbg_enable &= ~dbg_enable_mask[i >> 1]; + p += dbg_enable_mask_str_len[i]; + break; + } + } while ( i < NUM_ENTITY(dbg_enable_mask_str) ); + } + } + + return count; +} + +#endif + +#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC + +static INLINE int print_htu(char *buf, int i) +{ + int len = 0; + + if ( HTU_ENTRY(i)->vld ) { + len += sprintf(buf + len, "%2d. valid\n", i); + len += sprintf(buf + len, " entry 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32*)HTU_ENTRY(i), HTU_ENTRY(i)->pid, HTU_ENTRY(i)->vpi, HTU_ENTRY(i)->vci, HTU_ENTRY(i)->pti); + len += sprintf(buf + len, " mask 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32*)HTU_MASK(i), HTU_MASK(i)->pid_mask, HTU_MASK(i)->vpi_mask, HTU_MASK(i)->vci_mask, HTU_MASK(i)->pti_mask); + len += sprintf(buf + len, " result 0x%08x - type: %s, qid: %d", *(u32*)HTU_RESULT(i), HTU_RESULT(i)->type ? "cell" : "AAL5", HTU_RESULT(i)->qid); + if ( HTU_RESULT(i)->type ) + len += sprintf(buf + len, ", cell id: %d, verification: %s", HTU_RESULT(i)->cellid, HTU_RESULT(i)->ven ? "on" : "off"); + len += sprintf(buf + len, "\n"); + } + else + len += sprintf(buf + len, "%2d. invalid\n", i); + + return len; +} + +static int proc_read_htu(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + int len = 0; + int len_max = off + count; + char *pstr; + char str[1024]; + int llen; + + int htuts = *CFG_WRX_HTUTS; + int i; + + pstr = *start = page; + + llen = sprintf(pstr, "HTU Table (Max %d):\n", htuts); + pstr += llen; + len += llen; + + for ( i = 0; i < htuts; i++ ) { + llen = print_htu(str, i); + if ( len <= off && len + llen > off ) { + memcpy(pstr, str + off - len, len + llen - off); + pstr += len + llen - off; + } + else if ( len > off ) { + memcpy(pstr, str, llen); + pstr += llen; + } + len += llen; + if ( len >= len_max ) + goto PROC_READ_HTU_OVERRUN_END; + } + + *eof = 1; + + return len - off; + +PROC_READ_HTU_OVERRUN_END: + + return len - llen - off; +} + +static INLINE int print_tx_queue(char *buf, int i) +{ + int len = 0; + + if ( (*WTX_DMACH_ON & (1 << i)) ) { + len += sprintf(buf + len, "%2d. valid\n", i); + len += sprintf(buf + len, " queue 0x%08x - sbid %u, qsb %s\n", *(u32*)WTX_QUEUE_CONFIG(i), (unsigned int)WTX_QUEUE_CONFIG(i)->sbid, WTX_QUEUE_CONFIG(i)->qsben ? "enable" : "disable"); + len += sprintf(buf + len, " dma 0x%08x - base %08x, len %u, vlddes %u\n", *(u32*)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes); + } + else + len += sprintf(buf + len, "%2d. invalid\n", i); + + return len; +} + +static int proc_read_txq(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + int len = 0; + int len_max = off + count; + char *pstr; + char str[1024]; + int llen; + + int i; + + pstr = *start = page; + + llen = sprintf(pstr, "TX Queue Config (Max %d):\n", *CFG_WTX_DCHNUM); + pstr += llen; + len += llen; + + for ( i = 0; i < 16; i++ ) { + llen = print_tx_queue(str, i); + if ( len <= off && len + llen > off ) { + memcpy(pstr, str + off - len, len + llen - off); + pstr += len + llen - off; + } + else if ( len > off ) { + memcpy(pstr, str, llen); + pstr += llen; + } + len += llen; + if ( len >= len_max ) + goto PROC_READ_HTU_OVERRUN_END; + } + + *eof = 1; + + return len - off; + +PROC_READ_HTU_OVERRUN_END: + + return len - llen - off; +} + +#endif + +static int stricmp(const char *p1, const char *p2) +{ + int c1, c2; + + while ( *p1 && *p2 ) + { + c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1; + c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2; + if ( (c1 -= c2) ) + return c1; + p1++; + p2++; + } + + return *p1 - *p2; +} + +#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC +static int strincmp(const char *p1, const char *p2, int n) +{ + int c1 = 0, c2; + + while ( n && *p1 && *p2 ) + { + c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1; + c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2; + if ( (c1 -= c2) ) + return c1; + p1++; + p2++; + n--; + } + + return n ? *p1 - *p2 : c1; +} +#endif + +static INLINE int ifx_atm_version(char *buf) +{ + int len = 0; + unsigned int major, minor; + + ifx_atm_get_fw_ver(&major, &minor); + + len += sprintf(buf + len, "Infineon Technologies ATM driver version %d.%d.%d\n", IFX_ATM_VER_MAJOR, IFX_ATM_VER_MID, IFX_ATM_VER_MINOR); + len += sprintf(buf + len, "Infineon Technologies ATM (A1) firmware version %d.%d\n", major, minor); + + return len; +} + +#ifdef MODULE +static INLINE void reset_ppe(void) +{ + // TODO: +} +#endif + +static INLINE void check_parameters(void) +{ + /* Please refer to Amazon spec 15.4 for setting these values. */ + if ( qsb_tau < 1 ) + qsb_tau = 1; + if ( qsb_tstep < 1 ) + qsb_tstep = 1; + else if ( qsb_tstep > 4 ) + qsb_tstep = 4; + else if ( qsb_tstep == 3 ) + qsb_tstep = 2; + + /* There is a delay between PPE write descriptor and descriptor is */ + /* really stored in memory. Host also has this delay when writing */ + /* descriptor. So PPE will use this value to determine if the write */ + /* operation makes effect. */ + if ( write_descriptor_delay < 0 ) + write_descriptor_delay = 0; + + if ( aal5_fill_pattern < 0 ) + aal5_fill_pattern = 0; + else + aal5_fill_pattern &= 0xFF; + + /* Because of the limitation of length field in descriptors, the packet */ + /* size could not be larger than 64K minus overhead size. */ + if ( aal5r_max_packet_size < 0 ) + aal5r_max_packet_size = 0; + else if ( aal5r_max_packet_size >= 65535 - MAX_RX_FRAME_EXTRA_BYTES ) + aal5r_max_packet_size = 65535 - MAX_RX_FRAME_EXTRA_BYTES; + if ( aal5r_min_packet_size < 0 ) + aal5r_min_packet_size = 0; + else if ( aal5r_min_packet_size > aal5r_max_packet_size ) + aal5r_min_packet_size = aal5r_max_packet_size; + if ( aal5s_max_packet_size < 0 ) + aal5s_max_packet_size = 0; + else if ( aal5s_max_packet_size >= 65535 - MAX_TX_FRAME_EXTRA_BYTES ) + aal5s_max_packet_size = 65535 - MAX_TX_FRAME_EXTRA_BYTES; + if ( aal5s_min_packet_size < 0 ) + aal5s_min_packet_size = 0; + else if ( aal5s_min_packet_size > aal5s_max_packet_size ) + aal5s_min_packet_size = aal5s_max_packet_size; + + if ( dma_rx_descriptor_length < 2 ) + dma_rx_descriptor_length = 2; + if ( dma_tx_descriptor_length < 2 ) + dma_tx_descriptor_length = 2; + if ( dma_rx_clp1_descriptor_threshold < 0 ) + dma_rx_clp1_descriptor_threshold = 0; + else if ( dma_rx_clp1_descriptor_threshold > dma_rx_descriptor_length ) + dma_rx_clp1_descriptor_threshold = dma_rx_descriptor_length; + + if ( dma_tx_descriptor_length < 2 ) + dma_tx_descriptor_length = 2; +} + +static INLINE int init_priv_data(void) +{ + void *p; + int i; + struct rx_descriptor rx_desc = {0}; + struct sk_buff *skb; + volatile struct tx_descriptor *p_tx_desc; + struct sk_buff **ppskb; + + // clear atm private data structure + memset(&g_atm_priv_data, 0, sizeof(g_atm_priv_data)); + + // allocate memory for RX (AAL) descriptors + p = kzalloc(dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL); + if ( p == NULL ) + return IFX_ERROR; + dma_cache_wback_inv((unsigned long)p, dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT); + g_atm_priv_data.aal_desc_base = p; + p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1); + g_atm_priv_data.aal_desc = (volatile struct rx_descriptor *)p; + + // allocate memory for RX (OAM) descriptors + p = kzalloc(RX_DMA_CH_OAM_DESC_LEN * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL); + if ( p == NULL ) + return IFX_ERROR; + dma_cache_wback_inv((unsigned long)p, RX_DMA_CH_OAM_DESC_LEN * sizeof(struct rx_descriptor) + DESC_ALIGNMENT); + g_atm_priv_data.oam_desc_base = p; + p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1); + g_atm_priv_data.oam_desc = (volatile struct rx_descriptor *)p; + + // allocate memory for RX (OAM) buffer + p = kzalloc(RX_DMA_CH_OAM_DESC_LEN * RX_DMA_CH_OAM_BUF_SIZE + DATA_BUFFER_ALIGNMENT, GFP_KERNEL); + if ( p == NULL ) + return IFX_ERROR; + dma_cache_wback_inv((unsigned long)p, RX_DMA_CH_OAM_DESC_LEN * RX_DMA_CH_OAM_BUF_SIZE + DATA_BUFFER_ALIGNMENT); + g_atm_priv_data.oam_buf_base = p; + p = (void *)(((unsigned int)p + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1)); + g_atm_priv_data.oam_buf = p; + + // allocate memory for TX descriptors + p = kzalloc(MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL); + if ( p == NULL ) + return IFX_ERROR; + dma_cache_wback_inv((unsigned long)p, MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT); + g_atm_priv_data.tx_desc_base = p; + + // allocate memory for TX skb pointers + p = kzalloc(MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL); + if ( p == NULL ) + return IFX_ERROR; + dma_cache_wback_inv((unsigned long)p, MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4); + g_atm_priv_data.tx_skb_base = p; + + // setup RX (AAL) descriptors + rx_desc.own = 1; + rx_desc.c = 0; + rx_desc.sop = 1; + rx_desc.eop = 1; + rx_desc.byteoff = 0; + rx_desc.id = 0; + rx_desc.err = 0; + rx_desc.datalen = RX_DMA_CH_AAL_BUF_SIZE; + for ( i = 0; i < dma_rx_descriptor_length; i++ ) { + skb = alloc_skb_rx(); + if ( skb == NULL ) + return IFX_ERROR; + rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF; + g_atm_priv_data.aal_desc[i] = rx_desc; + } + + // setup RX (OAM) descriptors + p = (void *)((unsigned int)g_atm_priv_data.oam_buf | KSEG1); + rx_desc.own = 1; + rx_desc.c = 0; + rx_desc.sop = 1; + rx_desc.eop = 1; + rx_desc.byteoff = 0; + rx_desc.id = 0; + rx_desc.err = 0; + rx_desc.datalen = RX_DMA_CH_OAM_BUF_SIZE; + for ( i = 0; i < RX_DMA_CH_OAM_DESC_LEN; i++ ) { + rx_desc.dataptr = ((unsigned int)p >> 2) & 0x0FFFFFFF; + g_atm_priv_data.oam_desc[i] = rx_desc; + p = (void *)((unsigned int)p + RX_DMA_CH_OAM_BUF_SIZE); + } + + // setup TX descriptors and skb pointers + p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_atm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1); + ppskb = (struct sk_buff **)(((unsigned int)g_atm_priv_data.tx_skb_base + 3) & ~3); + for ( i = 0; i < MAX_PVC_NUMBER; i++ ) { + g_atm_priv_data.conn[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length]; + g_atm_priv_data.conn[i].tx_skb = &ppskb[i * dma_tx_descriptor_length]; + } + + for ( i = 0; i < ATM_PORT_NUMBER; i++ ) + g_atm_priv_data.port[i].tx_max_cell_rate = DEFAULT_TX_LINK_RATE; + + return IFX_SUCCESS; +} + +static INLINE void clear_priv_data(void) +{ + int i, j; + struct sk_buff *skb; + + for ( i = 0; i < MAX_PVC_NUMBER; i++ ) { + if ( g_atm_priv_data.conn[i].tx_skb != NULL ) { + for ( j = 0; j < dma_tx_descriptor_length; j++ ) + if ( g_atm_priv_data.conn[i].tx_skb[j] != NULL ) + dev_kfree_skb_any(g_atm_priv_data.conn[i].tx_skb[j]); + } + } + + if ( g_atm_priv_data.tx_skb_base != NULL ) + kfree(g_atm_priv_data.tx_skb_base); + + if ( g_atm_priv_data.tx_desc_base != NULL ) + kfree(g_atm_priv_data.tx_desc_base); + + if ( g_atm_priv_data.oam_buf_base != NULL ) + kfree(g_atm_priv_data.oam_buf_base); + + if ( g_atm_priv_data.oam_desc_base != NULL ) + kfree(g_atm_priv_data.oam_desc_base); + + if ( g_atm_priv_data.aal_desc_base != NULL ) { + for ( i = 0; i < dma_rx_descriptor_length; i++ ) { + if ( g_atm_priv_data.aal_desc[i].sop || g_atm_priv_data.aal_desc[i].eop ) { // descriptor initialized + skb = get_skb_rx_pointer(g_atm_priv_data.aal_desc[i].dataptr); + dev_kfree_skb_any(skb); + } + } + kfree(g_atm_priv_data.aal_desc_base); + } +} + +static INLINE void init_rx_tables(void) +{ + int i; + struct wrx_queue_config wrx_queue_config = {0}; + struct wrx_dma_channel_config wrx_dma_channel_config = {0}; + struct htu_entry htu_entry = {0}; + struct htu_result htu_result = {0}; + struct htu_mask htu_mask = { set: 0x01, + clp: 0x01, + pid_mask: 0x00, + vpi_mask: 0x00, + vci_mask: 0x00, + pti_mask: 0x00, + clear: 0x00}; + + /* + * General Registers + */ + *CFG_WRX_HTUTS = MAX_PVC_NUMBER + OAM_HTU_ENTRY_NUMBER; + *CFG_WRX_QNUM = MAX_QUEUE_NUMBER; + *CFG_WRX_DCHNUM = RX_DMA_CH_TOTAL; + *WRX_DMACH_ON = (1 << RX_DMA_CH_TOTAL) - 1; + *WRX_HUNT_BITTH = DEFAULT_RX_HUNT_BITTH; + + /* + * WRX Queue Configuration Table + */ + wrx_queue_config.uumask = 0; + wrx_queue_config.cpimask = 0; + wrx_queue_config.uuexp = 0; + wrx_queue_config.cpiexp = 0; + wrx_queue_config.mfs = aal5r_max_packet_size; + wrx_queue_config.oversize = aal5r_max_packet_size; + wrx_queue_config.undersize = aal5r_min_packet_size; + wrx_queue_config.errdp = aal5r_drop_error_packet; + wrx_queue_config.dmach = RX_DMA_CH_AAL; + for ( i = 0; i < MAX_QUEUE_NUMBER; i++ ) + *WRX_QUEUE_CONFIG(i) = wrx_queue_config; + WRX_QUEUE_CONFIG(OAM_RX_QUEUE)->dmach = RX_DMA_CH_OAM; + + /* + * WRX DMA Channel Configuration Table + */ + wrx_dma_channel_config.chrl = 0; + wrx_dma_channel_config.clp1th = dma_rx_clp1_descriptor_threshold; + wrx_dma_channel_config.mode = 0; + wrx_dma_channel_config.rlcfg = 0; + + wrx_dma_channel_config.deslen = RX_DMA_CH_OAM_DESC_LEN; + wrx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.oam_desc >> 2) & 0x0FFFFFFF; + *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM) = wrx_dma_channel_config; + + wrx_dma_channel_config.deslen = dma_rx_descriptor_length; + wrx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.aal_desc >> 2) & 0x0FFFFFFF; + *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL) = wrx_dma_channel_config; + + /* + * HTU Tables + */ + for ( i = 0; i < MAX_PVC_NUMBER; i++ ) + { + htu_result.qid = (unsigned int)i; + + *HTU_ENTRY(i + OAM_HTU_ENTRY_NUMBER) = htu_entry; + *HTU_MASK(i + OAM_HTU_ENTRY_NUMBER) = htu_mask; + *HTU_RESULT(i + OAM_HTU_ENTRY_NUMBER) = htu_result; + } + /* OAM HTU Entry */ + htu_entry.vci = 0x03; + htu_mask.pid_mask = 0x03; + htu_mask.vpi_mask = 0xFF; + htu_mask.vci_mask = 0x0000; + htu_mask.pti_mask = 0x07; + htu_result.cellid = OAM_RX_QUEUE; + htu_result.type = 1; + htu_result.ven = 1; + htu_result.qid = OAM_RX_QUEUE; + *HTU_RESULT(OAM_F4_SEG_HTU_ENTRY) = htu_result; + *HTU_MASK(OAM_F4_SEG_HTU_ENTRY) = htu_mask; + *HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY) = htu_entry; + htu_entry.vci = 0x04; + htu_result.cellid = OAM_RX_QUEUE; + htu_result.type = 1; + htu_result.ven = 1; + htu_result.qid = OAM_RX_QUEUE; + *HTU_RESULT(OAM_F4_TOT_HTU_ENTRY) = htu_result; + *HTU_MASK(OAM_F4_TOT_HTU_ENTRY) = htu_mask; + *HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY) = htu_entry; + htu_entry.vci = 0x00; + htu_entry.pti = 0x04; + htu_mask.vci_mask = 0xFFFF; + htu_mask.pti_mask = 0x01; + htu_result.cellid = OAM_RX_QUEUE; + htu_result.type = 1; + htu_result.ven = 1; + htu_result.qid = OAM_RX_QUEUE; + *HTU_RESULT(OAM_F5_HTU_ENTRY) = htu_result; + *HTU_MASK(OAM_F5_HTU_ENTRY) = htu_mask; + *HTU_ENTRY(OAM_F5_HTU_ENTRY) = htu_entry; +#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX + htu_entry.pid = 0x0; + htu_entry.vpi = 0x01; + htu_entry.vci = 0x0001; + htu_entry.pti = 0x00; + htu_mask.pid_mask = 0x0; + htu_mask.vpi_mask = 0x00; + htu_mask.vci_mask = 0x0000; + htu_mask.pti_mask = 0x3; + htu_result.cellid = OAM_RX_QUEUE; + htu_result.type = 1; + htu_result.ven = 1; + htu_result.qid = OAM_RX_QUEUE; + *HTU_RESULT(OAM_ARQ_HTU_ENTRY) = htu_result; + *HTU_MASK(OAM_ARQ_HTU_ENTRY) = htu_mask; + *HTU_ENTRY(OAM_ARQ_HTU_ENTRY) = htu_entry; +#endif +} + +static INLINE void init_tx_tables(void) +{ + int i; + struct wtx_queue_config wtx_queue_config = {0}; + struct wtx_dma_channel_config wtx_dma_channel_config = {0}; + struct wtx_port_config wtx_port_config = { res1: 0, + qid: 0, + qsben: 1}; + + /* + * General Registers + */ + *CFG_WTX_DCHNUM = MAX_TX_DMA_CHANNEL_NUMBER; + *WTX_DMACH_ON = ((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1) ^ ((1 << FIRST_QSB_QID) - 1); + *CFG_WRDES_DELAY = write_descriptor_delay; + + /* + * WTX Port Configuration Table + */ + for ( i = 0; i < ATM_PORT_NUMBER; i++ ) + *WTX_PORT_CONFIG(i) = wtx_port_config; + + /* + * WTX Queue Configuration Table + */ + wtx_queue_config.type = 0x0; + wtx_queue_config.qsben = 1; + wtx_queue_config.sbid = 0; + for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) + *WTX_QUEUE_CONFIG(i) = wtx_queue_config; + + /* + * WTX DMA Channel Configuration Table + */ + wtx_dma_channel_config.mode = 0; + wtx_dma_channel_config.deslen = 0; + wtx_dma_channel_config.desba = 0; + for ( i = 0; i < FIRST_QSB_QID; i++ ) + *WTX_DMA_CHANNEL_CONFIG(i) = wtx_dma_channel_config; + /* normal connection */ + wtx_dma_channel_config.deslen = dma_tx_descriptor_length; + for ( ; i < MAX_TX_DMA_CHANNEL_NUMBER ; i++ ) { + wtx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.conn[i - FIRST_QSB_QID].tx_desc >> 2) & 0x0FFFFFFF; + *WTX_DMA_CHANNEL_CONFIG(i) = wtx_dma_channel_config; + } +} + + + +/* + * #################################### + * Global Function + * #################################### + */ + +static int atm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr) +{ + int i, j; + + ASSERT(port_cell != NULL, "port_cell is NULL"); + ASSERT(xdata_addr != NULL, "xdata_addr is NULL"); + + for ( j = 0; j < ATM_PORT_NUMBER && j < port_cell->port_num; j++ ) + if ( port_cell->tx_link_rate[j] > 0 ) + break; + for ( i = 0; i < ATM_PORT_NUMBER && i < port_cell->port_num; i++ ) + g_atm_priv_data.port[i].tx_max_cell_rate = port_cell->tx_link_rate[i] > 0 ? port_cell->tx_link_rate[i] : port_cell->tx_link_rate[j]; + + qsb_global_set(); + + for ( i = 0; i < MAX_PVC_NUMBER; i++ ) + if ( g_atm_priv_data.conn[i].vcc != NULL ) + set_qsb(g_atm_priv_data.conn[i].vcc, &g_atm_priv_data.conn[i].vcc->qos, i); + + // TODO: ReTX set xdata_addr + g_xdata_addr = xdata_addr; + + g_showtime = 1; + +#if defined(CONFIG_VR9) + IFX_REG_W32(0x0F, UTP_CFG); +#endif + + pr_debug("enter showtime, cell rate: 0 - %d, 1 - %d, xdata addr: 0x%08x\n", g_atm_priv_data.port[0].tx_max_cell_rate, g_atm_priv_data.port[1].tx_max_cell_rate, (unsigned int)g_xdata_addr); + + return IFX_SUCCESS; +} + +static int atm_showtime_exit(void) +{ +#if defined(CONFIG_VR9) + IFX_REG_W32(0x00, UTP_CFG); +#endif + + g_showtime = 0; + + // TODO: ReTX clean state + g_xdata_addr = NULL; + + pr_debug("leave showtime\n"); + + return IFX_SUCCESS; +} + + + +/* + * #################################### + * Init/Cleanup API + * #################################### + */ + +/* + * Description: + * Initialize global variables, PP32, comunication structures, register IRQ + * and register device. + * Input: + * none + * Output: + * 0 --- successful + * else --- failure, usually it is negative value of error code + */ +static int __devinit ifx_atm_init(void) +{ + int ret; + int port_num; + struct port_cell_info port_cell = {0}; + int i, j; + char ver_str[256]; + +#ifdef MODULE + reset_ppe(); +#endif + + check_parameters(); + + ret = init_priv_data(); + if ( ret != IFX_SUCCESS ) { + err("INIT_PRIV_DATA_FAIL"); + goto INIT_PRIV_DATA_FAIL; + } + + ifx_atm_init_chip(); + init_rx_tables(); + init_tx_tables(); + + /* create devices */ + for ( port_num = 0; port_num < ATM_PORT_NUMBER; port_num++ ) { + g_atm_priv_data.port[port_num].dev = atm_dev_register("ifxmips_atm", &g_ifx_atm_ops, -1, NULL); + if ( !g_atm_priv_data.port[port_num].dev ) { + err("failed to register atm device %d!", port_num); + ret = -EIO; + goto ATM_DEV_REGISTER_FAIL; + } + else { + g_atm_priv_data.port[port_num].dev->ci_range.vpi_bits = 8; + g_atm_priv_data.port[port_num].dev->ci_range.vci_bits = 16; + g_atm_priv_data.port[port_num].dev->link_rate = g_atm_priv_data.port[port_num].tx_max_cell_rate; + g_atm_priv_data.port[port_num].dev->dev_data = (void*)port_num; + } + } + + /* register interrupt handler */ + ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "atm_mailbox_isr", &g_atm_priv_data); + if ( ret ) { + if ( ret == -EBUSY ) { + err("IRQ may be occupied by other driver, please reconfig to disable it."); + } + else { + err("request_irq fail"); + } + goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL; + } + disable_irq(PPE_MAILBOX_IGU1_INT); + + ret = ifx_pp32_start(0); + if ( ret ) { + err("ifx_pp32_start fail!"); + goto PP32_START_FAIL; + } + + port_cell.port_num = ATM_PORT_NUMBER; + ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr); + if ( g_showtime ) { + for ( i = 0; i < ATM_PORT_NUMBER; i++ ) + if ( port_cell.tx_link_rate[i] != 0 ) + break; + for ( j = 0; j < ATM_PORT_NUMBER; j++ ) + g_atm_priv_data.port[j].tx_max_cell_rate = port_cell.tx_link_rate[j] != 0 ? port_cell.tx_link_rate[j] : port_cell.tx_link_rate[i]; + } + + qsb_global_set(); + validate_oam_htu_entry(); + + /* create proc file */ + proc_file_create(); + + ifx_mei_atm_showtime_enter = atm_showtime_enter; + ifx_mei_atm_showtime_exit = atm_showtime_exit; + + ifx_atm_version(ver_str); + printk(KERN_INFO "%s", ver_str); + + printk("ifxmips_atm: ATM init succeed\n"); + + return IFX_SUCCESS; + +PP32_START_FAIL: + free_irq(PPE_MAILBOX_IGU1_INT, &g_atm_priv_data); +REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL: +ATM_DEV_REGISTER_FAIL: + while ( port_num-- > 0 ) + atm_dev_deregister(g_atm_priv_data.port[port_num].dev); +INIT_PRIV_DATA_FAIL: + clear_priv_data(); + printk("ifxmips_atm: ATM init failed\n"); + return ret; +} + +/* + * Description: + * Release memory, free IRQ, and deregister device. + * Input: + * none + * Output: + * none + */ +static void __exit ifx_atm_exit(void) +{ + int port_num; + + ifx_mei_atm_showtime_enter = NULL; + ifx_mei_atm_showtime_exit = NULL; + + proc_file_delete(); + + invalidate_oam_htu_entry(); + + ifx_pp32_stop(0); + + free_irq(PPE_MAILBOX_IGU1_INT, &g_atm_priv_data); + + for ( port_num = 0; port_num < ATM_PORT_NUMBER; port_num++ ) + atm_dev_deregister(g_atm_priv_data.port[port_num].dev); + + ifx_atm_uninit_chip(); + + clear_priv_data(); +} + +module_init(ifx_atm_init); +module_exit(ifx_atm_exit); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/package/lqdsl/src/ifxmips_atm_core.h b/package/lqdsl/src/ifxmips_atm_core.h new file mode 100644 index 0000000000..8f0a3ebfba --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_core.h @@ -0,0 +1,249 @@ +/****************************************************************************** +** +** FILE NAME : ifxmips_atm_core.h +** PROJECT : UEIP +** MODULES : ATM +** +** DATE : 7 Jul 2009 +** AUTHOR : Xu Liang +** DESCRIPTION : ATM driver header file (core functions) +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 17 JUN 2009 Xu Liang Init Version +*******************************************************************************/ + +#ifndef IFXMIPS_ATM_CORE_H +#define IFXMIPS_ATM_CORE_H + + +#include "ifxmips_compat.h" +#include "ifx_atm.h" +#include "ifxmips_atm_ppe_common.h" +#include "ifxmips_atm_fw_regs_common.h" + + + +/* + * #################################### + * Definition + * #################################### + */ + +/* + * Compile Options + */ + +#define ENABLE_DEBUG 1 + +#define ENABLE_ASSERT 1 + +#define INLINE + +#define DEBUG_DUMP_SKB 1 + +#define DEBUG_QOS 1 + +#define ENABLE_DBG_PROC 1 + +#define ENABLE_FW_PROC 1 + +#ifdef CONFIG_IFX_ATM_TASKLET + #define ENABLE_TASKLET 1 +#endif + + +/* + * Debug/Assert/Error Message + */ + +#define DBG_ENABLE_MASK_ERR (1 << 0) +#define DBG_ENABLE_MASK_DEBUG_PRINT (1 << 1) +#define DBG_ENABLE_MASK_ASSERT (1 << 2) +#define DBG_ENABLE_MASK_DUMP_SKB_RX (1 << 8) +#define DBG_ENABLE_MASK_DUMP_SKB_TX (1 << 9) +#define DBG_ENABLE_MASK_DUMP_QOS (1 << 10) +#define DBG_ENABLE_MASK_DUMP_INIT (1 << 11) +#define DBG_ENABLE_MASK_ALL (DBG_ENABLE_MASK_ERR | DBG_ENABLE_MASK_DEBUG_PRINT | DBG_ENABLE_MASK_ASSERT | DBG_ENABLE_MASK_DUMP_SKB_RX | DBG_ENABLE_MASK_DUMP_SKB_TX | DBG_ENABLE_MASK_DUMP_QOS | DBG_ENABLE_MASK_DUMP_INIT) + +#define err(format, arg...) do { if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ERR) ) printk(KERN_ERR __FILE__ ":%d:%s: " format "\n", __LINE__, __FUNCTION__, ##arg); } while ( 0 ) + +#if defined(ENABLE_DEBUG) && ENABLE_DEBUG + #undef dbg + #define dbg(format, arg...) do { if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ) printk(KERN_WARNING __FILE__ ":%d:%s: " format "\n", __LINE__, __FUNCTION__, ##arg); } while ( 0 ) +#else + #if !defined(dbg) + #define dbg(format, arg...) + #endif +#endif + +#if defined(ENABLE_ASSERT) && ENABLE_ASSERT + #define ASSERT(cond, format, arg...) do { if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ASSERT) && !(cond) ) printk(KERN_ERR __FILE__ ":%d:%s: " format "\n", __LINE__, __FUNCTION__, ##arg); } while ( 0 ) +#else + #define ASSERT(cond, format, arg...) +#endif + + +/* + * Constants + */ +#define DEFAULT_TX_LINK_RATE 3200 // in cells + +/* + * ATM Port, QSB Queue, DMA RX/TX Channel Parameters + */ +#define ATM_PORT_NUMBER 2 +#define MAX_QUEUE_NUMBER 16 +#define OAM_RX_QUEUE 15 +#define QSB_RESERVE_TX_QUEUE 0 +#define FIRST_QSB_QID 1 +#define MAX_PVC_NUMBER (MAX_QUEUE_NUMBER - FIRST_QSB_QID) +#define MAX_RX_DMA_CHANNEL_NUMBER 8 +#define MAX_TX_DMA_CHANNEL_NUMBER 16 +#define DATA_BUFFER_ALIGNMENT EMA_ALIGNMENT +#define DESC_ALIGNMENT 8 +#define DEFAULT_RX_HUNT_BITTH 4 + +/* + * RX DMA Channel Allocation + */ +#define RX_DMA_CH_OAM 0 +#define RX_DMA_CH_AAL 1 +#define RX_DMA_CH_TOTAL 2 +#define RX_DMA_CH_OAM_DESC_LEN 32 +#define RX_DMA_CH_OAM_BUF_SIZE (CELL_SIZE & ~15) +#define RX_DMA_CH_AAL_BUF_SIZE (2048 - 48) + +/* + * OAM Constants + */ +#define OAM_HTU_ENTRY_NUMBER 3 +#define OAM_F4_SEG_HTU_ENTRY 0 +#define OAM_F4_TOT_HTU_ENTRY 1 +#define OAM_F5_HTU_ENTRY 2 +#define OAM_F4_CELL_ID 0 +#define OAM_F5_CELL_ID 15 +//#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX +// #undef OAM_HTU_ENTRY_NUMBER +// #define OAM_HTU_ENTRY_NUMBER 4 +// #define OAM_ARQ_HTU_ENTRY 3 +//#endif + +/* + * RX Frame Definitions + */ +#define MAX_RX_PACKET_ALIGN_BYTES 3 +#define MAX_RX_PACKET_PADDING_BYTES 3 +#define RX_INBAND_TRAILER_LENGTH 8 +#define MAX_RX_FRAME_EXTRA_BYTES (RX_INBAND_TRAILER_LENGTH + MAX_RX_PACKET_ALIGN_BYTES + MAX_RX_PACKET_PADDING_BYTES) + +/* + * TX Frame Definitions + */ +#define MAX_TX_HEADER_ALIGN_BYTES 12 +#define MAX_TX_PACKET_ALIGN_BYTES 3 +#define MAX_TX_PACKET_PADDING_BYTES 3 +#define TX_INBAND_HEADER_LENGTH 8 +#define MAX_TX_FRAME_EXTRA_BYTES (TX_INBAND_HEADER_LENGTH + MAX_TX_HEADER_ALIGN_BYTES + MAX_TX_PACKET_ALIGN_BYTES + MAX_TX_PACKET_PADDING_BYTES) + +/* + * Cell Constant + */ +#define CELL_SIZE ATM_AAL0_SDU + + + +/* + * #################################### + * Data Type + * #################################### + */ + +typedef struct { + unsigned int h; + unsigned int l; +} ppe_u64_t; + +struct port { + unsigned int tx_max_cell_rate; + unsigned int tx_current_cell_rate; + + struct atm_dev *dev; +}; + +struct connection { + struct atm_vcc *vcc; + + volatile struct tx_descriptor + *tx_desc; + unsigned int tx_desc_pos; + struct sk_buff **tx_skb; + + unsigned int aal5_vcc_crc_err; /* number of packets with CRC error */ + unsigned int aal5_vcc_oversize_sdu; /* number of packets with oversize error */ + + unsigned int port; +}; + +struct atm_priv_data { + unsigned long conn_table; + struct connection conn[MAX_PVC_NUMBER]; + + volatile struct rx_descriptor + *aal_desc; + unsigned int aal_desc_pos; + + volatile struct rx_descriptor + *oam_desc; + unsigned char *oam_buf; + unsigned int oam_desc_pos; + + struct port port[ATM_PORT_NUMBER]; + + unsigned int wrx_pdu; /* successfully received AAL5 packet */ + unsigned int wrx_drop_pdu; /* AAL5 packet dropped by driver on RX */ + unsigned int wtx_pdu; /* successfully tranmitted AAL5 packet */ + unsigned int wtx_err_pdu; /* error AAL5 packet */ + unsigned int wtx_drop_pdu; /* AAL5 packet dropped by driver on TX */ + + ppe_u64_t wrx_total_byte; + ppe_u64_t wtx_total_byte; + unsigned int prev_wrx_total_byte; + unsigned int prev_wtx_total_byte; + + void *aal_desc_base; + void *oam_desc_base; + void *oam_buf_base; + void *tx_desc_base; + void *tx_skb_base; +}; + + + +/* + * #################################### + * Declaration + * #################################### + */ + +extern unsigned int ifx_atm_dbg_enable; + +extern void ifx_atm_get_fw_ver(unsigned int *major, unsigned int *minor); + +extern void ifx_atm_init_chip(void); +extern void ifx_atm_uninit_chip(void); + +extern int ifx_pp32_start(int pp32); +extern void ifx_pp32_stop(int pp32); + + + +#endif // IFXMIPS_ATM_CORE_H diff --git a/package/lqdsl/src/ifxmips_atm_danube.c b/package/lqdsl/src/ifxmips_atm_danube.c new file mode 100644 index 0000000000..5698e0877c --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_danube.c @@ -0,0 +1,271 @@ +/****************************************************************************** +** +** FILE NAME : ifxmips_atm_danube.c +** PROJECT : UEIP +** MODULES : ATM +** +** DATE : 7 Jul 2009 +** AUTHOR : Xu Liang +** DESCRIPTION : ATM driver common source file (core functions) +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 07 JUL 2009 Xu Liang Init Version +*******************************************************************************/ + + + +/* + * #################################### + * Head File + * #################################### + */ + +/* + * Common Head File + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <asm/delay.h> + +/* + * Chip Specific Head File + */ +#include <lantiq.h> +#include <lantiq_regs.h> +#include "ifxmips_compat.h" +#include "ifxmips_atm_core.h" +#include "ifxmips_atm_fw_danube.h" + + + +/* + * #################################### + * Definition + * #################################### + */ + +/* + * EMA Settings + */ +#define EMA_CMD_BUF_LEN 0x0040 +#define EMA_CMD_BASE_ADDR (0x00001580 << 2) +#define EMA_DATA_BUF_LEN 0x0100 +#define EMA_DATA_BASE_ADDR (0x00001900 << 2) +#define EMA_WRITE_BURST 0x2 +#define EMA_READ_BURST 0x2 + + + +/* + * #################################### + * Declaration + * #################################### + */ + +/* + * Hardware Init/Uninit Functions + */ +static inline void init_pmu(void); +static inline void uninit_pmu(void); +static inline void init_ema(void); +static inline void init_mailbox(void); +static inline void init_atm_tc(void); +static inline void clear_share_buffer(void); + + + +/* + * #################################### + * Local Variable + * #################################### + */ + + + +/* + * #################################### + * Local Function + * #################################### + */ + +static inline void init_pmu(void) +{ + //*(unsigned long *)0xBF10201C &= ~((1 << 15) | (1 << 13) | (1 << 9)); + PPE_TOP_PMU_SETUP(IFX_PMU_ENABLE); + PPE_SLL01_PMU_SETUP(IFX_PMU_ENABLE); + PPE_TC_PMU_SETUP(IFX_PMU_ENABLE); + PPE_EMA_PMU_SETUP(IFX_PMU_ENABLE); + PPE_QSB_PMU_SETUP(IFX_PMU_ENABLE); + PPE_TPE_PMU_SETUP(IFX_PMU_ENABLE); + DSL_DFE_PMU_SETUP(IFX_PMU_ENABLE); +} + +static inline void uninit_pmu(void) +{ + PPE_SLL01_PMU_SETUP(IFX_PMU_DISABLE); + PPE_TC_PMU_SETUP(IFX_PMU_DISABLE); + PPE_EMA_PMU_SETUP(IFX_PMU_DISABLE); + PPE_QSB_PMU_SETUP(IFX_PMU_DISABLE); + PPE_TPE_PMU_SETUP(IFX_PMU_DISABLE); + DSL_DFE_PMU_SETUP(IFX_PMU_DISABLE); + PPE_TOP_PMU_SETUP(IFX_PMU_DISABLE); +} + +static inline void init_ema(void) +{ + IFX_REG_W32((EMA_CMD_BUF_LEN << 16) | (EMA_CMD_BASE_ADDR >> 2), EMA_CMDCFG); + IFX_REG_W32((EMA_DATA_BUF_LEN << 16) | (EMA_DATA_BASE_ADDR >> 2), EMA_DATACFG); + IFX_REG_W32(0x000000FF, EMA_IER); + IFX_REG_W32(EMA_READ_BURST | (EMA_WRITE_BURST << 2), EMA_CFG); +} + +static inline void init_mailbox(void) +{ + IFX_REG_W32(0xFFFFFFFF, MBOX_IGU1_ISRC); + IFX_REG_W32(0x00000000, MBOX_IGU1_IER); + IFX_REG_W32(0xFFFFFFFF, MBOX_IGU3_ISRC); + IFX_REG_W32(0x00000000, MBOX_IGU3_IER); +} + +static inline void init_atm_tc(void) +{ + // for ReTX expansion in future + //*FFSM_CFG0 = SET_BITS(*FFSM_CFG0, 5, 0, 6); // pnum = 6 + //*FFSM_CFG1 = SET_BITS(*FFSM_CFG1, 5, 0, 6); // pnum = 6 +} + +static inline void clear_share_buffer(void) +{ + volatile u32 *p = SB_RAM0_ADDR(0); + unsigned int i; + + for ( i = 0; i < SB_RAM0_DWLEN + SB_RAM1_DWLEN + SB_RAM2_DWLEN + SB_RAM3_DWLEN; i++ ) + IFX_REG_W32(0, p++); +} + +/* + * Description: + * Download PPE firmware binary code. + * Input: + * src --- u32 *, binary code buffer + * dword_len --- unsigned int, binary code length in DWORD (32-bit) + * Output: + * int --- IFX_SUCCESS: Success + * else: Error Code + */ +static inline int pp32_download_code(u32 *code_src, unsigned int code_dword_len, u32 *data_src, unsigned int data_dword_len) +{ + volatile u32 *dest; + + if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0 + || data_src == 0 || ((unsigned long)data_src & 0x03) != 0 ) + return IFX_ERROR; + + if ( code_dword_len <= CDM_CODE_MEMORYn_DWLEN(0) ) + IFX_REG_W32(0x00, CDM_CFG); + else + IFX_REG_W32(0x02, CDM_CFG); + + /* copy code */ + dest = CDM_CODE_MEMORY(0, 0); + while ( code_dword_len-- > 0 ) + IFX_REG_W32(*code_src++, dest++); + + /* copy data */ + dest = CDM_DATA_MEMORY(0, 0); + while ( data_dword_len-- > 0 ) + IFX_REG_W32(*data_src++, dest++); + + return IFX_SUCCESS; +} + + + +/* + * #################################### + * Global Function + * #################################### + */ + +extern void ifx_atm_get_fw_ver(unsigned int *major, unsigned int *minor) +{ + ASSERT(major != NULL, "pointer is NULL"); + ASSERT(minor != NULL, "pointer is NULL"); + + *major = ATM_FW_VER_MAJOR; + *minor = ATM_FW_VER_MINOR; +} + +void ifx_atm_init_chip(void) +{ + init_pmu(); + + init_ema(); + + init_mailbox(); + + init_atm_tc(); + + clear_share_buffer(); +} + +void ifx_atm_uninit_chip(void) +{ + uninit_pmu(); +} + +/* + * Description: + * Initialize and start up PP32. + * Input: + * none + * Output: + * int --- IFX_SUCCESS: Success + * else: Error Code + */ +int ifx_pp32_start(int pp32) +{ + int ret; + + /* download firmware */ + ret = pp32_download_code(firmware_binary_code, sizeof(firmware_binary_code) / sizeof(*firmware_binary_code), firmware_binary_data, sizeof(firmware_binary_data) / sizeof(*firmware_binary_data)); + if ( ret != IFX_SUCCESS ) + return ret; + + /* run PP32 */ + IFX_REG_W32(DBG_CTRL_START_SET(1), PP32_DBG_CTRL); + + /* idle for a while to let PP32 init itself */ + udelay(10); + + return IFX_SUCCESS; +} + +/* + * Description: + * Halt PP32. + * Input: + * none + * Output: + * none + */ +void ifx_pp32_stop(int pp32) +{ + /* halt PP32 */ + IFX_REG_W32(DBG_CTRL_STOP_SET(1), PP32_DBG_CTRL); +} diff --git a/package/lqdsl/src/ifxmips_atm_fw_danube.h b/package/lqdsl/src/ifxmips_atm_fw_danube.h new file mode 100644 index 0000000000..c36c96845c --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_fw_danube.h @@ -0,0 +1,429 @@ +#ifndef IFXMIPS_ATM_FW_DANUBE_H +#define IFXMIPS_ATM_FW_DANUBE_H + + +/****************************************************************************** +** +** FILE NAME : ifxmips_atm_fw_danube.h +** PROJECT : Danube +** MODULES : ATM (ADSL) +** +** DATE : 1 AUG 2005 +** AUTHOR : Xu Liang +** DESCRIPTION : ATM Driver (PP32 Firmware) +** COPYRIGHT : Copyright (c) 2006 +** Infineon Technologies AG +** Am Campeon 1-12, 85579 Neubiberg, Germany +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** HISTORY +** $Date $Author $Comment +** 4 AUG 2005 Xu Liang Initiate Version +** 23 OCT 2006 Xu Liang Add GPL header. +*******************************************************************************/ + + +#define ATM_FW_VER_MAJOR 0 +#define ATM_FW_VER_MINOR 1 + + +static unsigned int firmware_binary_code[] = { + 0x800004A0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x8000FFC8, 0x00000000, 0x00000000, 0x00000000, + 0xC1000002, 0xD90C0000, 0xC2000002, 0xDA080001, 0x80004710, 0xC2000000, 0xDA080001, 0x80003D98, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80003D50, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80004F18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80003C50, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC0400000, 0xC0004840, 0xC8840000, 0x800043D0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC0400002, 0xC0004840, 0xC8840000, 0x80004350, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC3C00004, 0xDBC80001, 0xC10C0002, 0xD90C0000, 0x8000FEC8, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC10E0002, 0xD90C0000, 0xC0004808, 0xC8400000, 0x80004380, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC3E1FFFE, 0x597DFFFE, 0x593DFE14, 0x900004D9, 0x00000000, 0x00000000, 0x00000000, 0x90CC0481, + 0x00000000, 0x00000000, 0x00000000, 0xC3C00000, 0xDBC80001, 0xC1400008, 0xC1900000, 0x71948000, + 0x15000100, 0xC140000A, 0xC1900002, 0x71948000, 0x15000100, 0xC140000C, 0xC1900004, 0x71948000, + 0x15000100, 0xC1400004, 0xC1900006, 0x71948000, 0x15000100, 0xC1400006, 0xC1900008, 0x71948000, + 0x15000100, 0xC140000E, 0xC190000A, 0x71948000, 0x15000100, 0xC1400000, 0xC190000C, 0x71948000, + 0x15000100, 0xC1400002, 0xC190000E, 0x71948000, 0x15000100, 0xC0400000, 0xC11C0000, 0xC000082C, + 0xCD040E08, 0xC11C0002, 0xC000082C, 0xCD040E08, 0xC0400002, 0xC11C0000, 0xC000082C, 0xCD040E08, + 0xC11C0002, 0xC000082C, 0xCD040E08, 0xC0000824, 0x00000000, 0xCBC00001, 0xCB800001, 0xCB400001, + 0xCB000000, 0xC0004878, 0x5BFC4000, 0xCFC00001, 0x5BB84000, 0xCF800001, 0x5B744000, 0xCF400001, + 0x5B304000, 0xCF000000, 0xC0000A10, 0x00000000, 0xCBC00001, 0xCB800000, 0xC0004874, 0x5BFC4000, + 0xCFC00001, 0x5BB84000, 0xCF800000, 0xC30001FE, 0xC000140A, 0xCF000000, 0xC3000000, 0x7F018000, + 0xC000042E, 0xCF000000, 0xC000040E, 0xCF000000, 0xC3C1FFFE, 0xC000490E, 0xCFC00080, 0xC000492C, + 0xCFC00080, 0xC0004924, 0xCFC00040, 0xC0004912, 0xCFC00040, 0xC0004966, 0xCFC00040, 0xC0004968, + 0xCFC00080, 0xC000496A, 0xCFC00080, 0xC3C00000, 0xC2800020, 0xC3000000, 0x7F018000, 0x6FF88000, + 0x6FD44000, 0x4395C000, 0x5BB84A00, 0x5838000A, 0xCF000000, 0x5BFC0002, 0xB7E8FFA8, 0x00000000, + 0xC3C00000, 0xC2800010, 0x6FF86000, 0x47F9C000, 0x5BB84C80, 0xC3400000, 0x58380004, 0xCB420080, + 0x00000000, 0x58380008, 0xCF400080, 0x5BFC0002, 0xB7E8FF90, 0x00000000, 0xC3C00000, 0xC2800020, + 0xC348001E, 0xC3000000, 0x7F018000, 0x6FF8A000, 0x6FD44000, 0x4579C000, 0x47F9C000, 0x5BB84E20, + 0x58380008, 0xCF400420, 0x5838000A, 0xCF000000, 0x5BFC0002, 0xB7E8FF90, 0x00000000, 0x00000000, + 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, 0x80000518, 0x00000000, 0x80002118, 0x00000000, + 0x8000FFC8, 0xC0004958, 0xC8400000, 0x00000000, 0xC3C00002, 0x7BC42000, 0xCC400000, 0xC0004848, + 0xCB840000, 0xC000495C, 0xCAC40000, 0xC0004844, 0xC8840000, 0x46F90000, 0x8400FF6A, 0xC000487C, + 0xC8040000, 0x00000000, 0x00000000, 0x40080000, 0xCA000000, 0xC0001624, 0xCB040000, 0xA63C005A, + 0x00000000, 0x00000000, 0xA71EFF02, 0x00000000, 0xC0000824, 0xCA840000, 0x6CA08000, 0x6CA42000, + 0x46610000, 0x42290000, 0xC35E0002, 0xC6340068, 0xC0001624, 0xCF440080, 0xC2000000, 0xC161FFFE, + 0x5955FFFE, 0x15400000, 0x00000000, 0xC0004844, 0xC8840000, 0xC000082C, 0xCA040040, 0x00000000, + 0x00000000, 0x58880002, 0xB608FFF8, 0x00000000, 0xC0800000, 0xC0004844, 0xCC840040, 0x5AEC0002, + 0xC000495C, 0xCEC40000, 0x5E6C0006, 0x84000048, 0xC0004848, 0xCB840000, 0xC0000838, 0xC2500002, + 0xCE440808, 0x5FB80002, 0xC0004848, 0xCF840000, 0x5EEC0002, 0xC000495C, 0xCEC40000, 0x00000000, + 0xC121FFFE, 0x5911FE14, 0x15000000, 0x8000FD80, 0xC000495A, 0xC8400000, 0x00000000, 0xC3C00002, + 0x7BC42000, 0xCC400000, 0xC0004960, 0xCAC40000, 0x00000000, 0x00000000, 0x5EEC0000, 0x840000F2, + 0x00000000, 0xB6FC0030, 0xC0001600, 0xCA040000, 0x00000000, 0x00000000, 0xA61E00B2, 0x6FE90000, + 0xC0000A28, 0xCE840808, 0xC2C00000, 0xC2800004, 0xB6E80080, 0xC0001604, 0xCA840000, 0xC0004960, + 0xCEC40000, 0xA69EFCA2, 0x00000000, 0x6FE90000, 0xC0000A28, 0xCE840808, 0xC2C00002, 0xC0001600, + 0xCA040000, 0x00000000, 0x00000000, 0xA61E000A, 0x6FE90000, 0xC0000A28, 0xCE840808, 0xC2C00000, + 0xC0001604, 0xCA840000, 0xC0004960, 0xCEC40000, 0xA69EFC0A, 0xC2400000, 0xC0000A14, 0xCA440030, + 0x00000000, 0x00000000, 0x46E52000, 0xA4400000, 0xC2800000, 0xDFEB0031, 0x8000FFF8, 0xDFEA0031, + 0xB668FB82, 0x00000000, 0xC00048A0, 0xCB040000, 0xC0000A10, 0xCA840000, 0x6F208000, 0x6F242000, + 0x46610000, 0x42A10000, 0xC2400000, 0xC0000A14, 0xCA440030, 0xC35E0002, 0xC6340068, 0xC0001604, + 0xCF440080, 0x5B300002, 0xB670FFF8, 0x5AEC0002, 0xC3000000, 0xC00048A0, 0xCF040000, 0xC0004960, + 0xCEC40000, 0x8000FAC0, 0xC0004918, 0xD2800000, 0xC2000000, 0xDF600040, 0x5E600080, 0x8400025A, + 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000480A, 0xCA000000, 0xC0004912, + 0xCA400000, 0xC0004924, 0xCA800000, 0xC0004966, 0xCAC00000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0x76610000, 0x76A10000, 0x76E10000, 0x840001B2, 0xC0004918, 0xCA400000, 0xC28001FE, + 0x76A10000, 0x5A640002, 0x6A254010, 0x5EE80000, 0x84000002, 0x6AA54000, 0x8000FFF8, 0xC6280000, + 0x62818008, 0xC0004918, 0xCF000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC0004966, + 0xCA400000, 0xC2000002, 0x6A310000, 0x7E010000, 0x76252000, 0xCE400000, 0x00000000, 0xC121FFFE, + 0x5911FE14, 0x15000000, 0x6F346000, 0x4735A000, 0x5B744C80, 0xC2800000, 0x58340006, 0xCA800080, + 0xC2C00000, 0x58340000, 0xCAC000E0, 0xC2400000, 0x5834000A, 0xCA420080, 0x6EA82000, 0x42E9E000, + 0x6F2CA000, 0x42E56000, 0x5AEC1400, 0xC3990040, 0xC7381C20, 0xC6F80068, 0x99005930, 0xDB980000, + 0xDBD80001, 0x00000000, 0xDEA00000, 0x47210000, 0x8400FD68, 0xC0004958, 0xC8400000, 0x00000000, + 0xC3C00002, 0x7BC42000, 0xCC400000, 0xC0004848, 0xCB840000, 0xC0004844, 0xC8840000, 0x5FB80000, + 0x8400F7DA, 0xC0001A1C, 0xCA000000, 0xC2400002, 0x6A452000, 0x76610000, 0x8400F7AA, 0xC000487C, + 0xC8040000, 0x00000000, 0x00000000, 0x40080000, 0xCA000000, 0xC4240000, 0x00000000, 0xA63C17BA, + 0x00000000, 0xC0004878, 0xC8040000, 0x6C908000, 0x44908000, 0x44908000, 0x40100000, 0xCA000000, + 0xC4240000, 0x00000000, 0xC0004934, 0xCE000000, 0xC2800002, 0xC4681C10, 0xC62821D8, 0xC2600010, + 0x5A650040, 0xC0004800, 0xCB400000, 0xC2200400, 0x5A200000, 0xC7601048, 0xC0001220, 0xCE800000, + 0xC0001200, 0xCE400000, 0xC0001202, 0xCE000000, 0xC0001240, 0xCB400000, 0x00000000, 0x00000000, + 0xA754FFC0, 0xC2000000, 0xC7600048, 0xA7520022, 0x00000000, 0x00000000, 0x990060A8, 0xC0004822, + 0xC9400000, 0xC1800002, 0x80001668, 0x58204080, 0xC2000000, 0xCA000020, 0xC2400000, 0xCA414008, + 0xC2800000, 0xCA812008, 0xC2C00000, 0xCAC20020, 0xC0004938, 0xCE000000, 0xC0004920, 0xCE400000, + 0xC0004916, 0xCE800000, 0xC0004922, 0xCEC00000, 0xA6400520, 0x00000000, 0xC0004938, 0xCBC00000, + 0x00000000, 0xC3800000, 0x6FF48000, 0x6FD44000, 0x4355A000, 0x5B744A00, 0x58340000, 0xCB802018, + 0x00000000, 0xC2000000, 0x6FB46000, 0x47B5A000, 0x5B744C80, 0x5834000C, 0xCA000028, 0xC000491A, + 0xCF800000, 0x5E200000, 0x84000452, 0xC2000000, 0xDF610050, 0x5E6001E8, 0x8800FFD0, 0xC2000002, + 0xC2400466, 0xC2A00000, 0x5AA80000, 0xC0001006, 0xCE000000, 0xC0001008, 0xCE400000, 0xC000100A, + 0xCE800000, 0x99005370, 0xC1A0FFFE, 0xC0000824, 0xC9840068, 0xC0004934, 0xCA400000, 0xC2000000, + 0xC2800002, 0x990053B0, 0xDA980000, 0xC6140000, 0xC6580000, 0xC161FFFE, 0x5955FFFE, 0x15400000, + 0x00000000, 0x99005498, 0xC000491A, 0xC9400000, 0x00000000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0xC0004922, 0xCA001120, 0xC3C00000, 0xC3800000, 0xC0004930, 0xCE001120, 0xC0004932, + 0xCBC000E0, 0xC2800000, 0xC000491E, 0xCFC00000, 0xC0004862, 0xCA800068, 0xC3A0001A, 0x5BB94000, + 0xC6B80068, 0xC000491C, 0xCF800000, 0x99005708, 0xC000491C, 0xC1400000, 0xC9420050, 0x00000000, + 0x00000000, 0x00000000, 0xA8E2FFC8, 0xC2000000, 0xC1220002, 0xD90C0000, 0xDF600040, 0x5E600080, + 0x8400FFDA, 0xC000491C, 0xCA000000, 0xC000491E, 0xCA400000, 0x00000000, 0x00000000, 0x99005930, + 0xDA180000, 0xDA580001, 0x00000000, 0xC2000000, 0xDF610050, 0x5E6001FE, 0x8800FFD0, 0xC0004916, + 0xCA800000, 0xC2C00000, 0xDFEC0050, 0xC2400000, 0x46E52000, 0x84000032, 0x5EA80000, 0x84000022, + 0xC2600002, 0x990060A8, 0xC000482E, 0xC9400000, 0xC1800002, 0x80000018, 0xC2600000, 0x990060A8, + 0xC000482C, 0xC9400000, 0xC1800002, 0xC2000068, 0xC6240080, 0xC0004930, 0xCE400088, 0xC000491A, + 0xC9800000, 0xC0004862, 0xC9400000, 0x6D9C6000, 0x459CE000, 0x59DC4C80, 0x99005790, 0xD9580000, + 0xD9980001, 0xD9D40000, 0x99005708, 0xC000491C, 0xC1400000, 0xC9420050, 0xC2000000, 0xDF600040, + 0x5E600080, 0x8400FFD2, 0x00000000, 0xC000491C, 0xCA000000, 0xC000491E, 0xCA400000, 0x00000000, + 0x00000000, 0x99005930, 0xDA180000, 0xDA580001, 0x00000000, 0x800010D0, 0x00000000, 0x990060A8, + 0xC000482A, 0xC9400000, 0xC1800002, 0x800010A0, 0xC0004938, 0xCBC00000, 0x00000000, 0x00000000, + 0x6FF88000, 0x6FD44000, 0x4395C000, 0x5BB84A00, 0x58380008, 0xCA000000, 0x00000000, 0x00000000, + 0xA6000362, 0x00000000, 0xC0004938, 0xCBC00000, 0xC3000000, 0x00000000, 0x6FF88000, 0x6FD44000, + 0x4395C000, 0x5BB84A00, 0x58380000, 0xCB002018, 0xC2000000, 0x58380008, 0xCA020080, 0x5838000C, + 0xCAC00000, 0x5838000E, 0xCA400000, 0xC000491A, 0xCF000000, 0xC0004930, 0xCEC00000, 0xC000493C, + 0xCE000000, 0xC0004932, 0xCE400000, 0x5E200000, 0x84000108, 0xC2800000, 0xA6FE009A, 0x6F206000, + 0x47210000, 0x5A204C80, 0x5820000C, 0xCA800028, 0x00000000, 0x00000000, 0x5EA80000, 0x840001DA, + 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0x99005498, 0xC000491A, 0xC9400000, + 0x00000000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, 0xC0004930, 0xCAC00000, 0xC0004932, + 0xCA400000, 0xC7EC1120, 0xC0004930, 0xCEC00000, 0x5838000C, 0xCEC00000, 0x58000002, 0xCE400000, + 0xC0004934, 0xCA000000, 0xC2400002, 0x6E642000, 0x6E642000, 0x76252000, 0x84000012, 0xC2400002, + 0x6E684000, 0x58380008, 0xCE800208, 0xA6000000, 0x6E682000, 0x58380008, 0xCE800108, 0xC2400002, + 0x6E642000, 0x76252000, 0x840000D2, 0x58380008, 0xCA000000, 0xC2800000, 0xC2400000, 0xA60200A0, + 0xDBA80000, 0x6F386000, 0x4739C000, 0x5BB84C80, 0x58380004, 0xCA400080, 0x58380002, 0xCA800080, + 0x00000000, 0xDEB80000, 0x46694000, 0x88000048, 0x00000000, 0xC0004824, 0xCA000000, 0xC2400002, + 0x6E640000, 0x5A200002, 0xCE000000, 0x58380008, 0xCE400008, 0x80000000, 0x00000000, 0x80000030, + 0xC0004934, 0xCA000000, 0x00000000, 0x00000000, 0xA6020C4A, 0x00000000, 0x00000000, 0x80000C80, + 0xC2800000, 0xC2000200, 0xC240001A, 0xDF690050, 0x46A14000, 0x46694000, 0x8800FFBA, 0xC2000006, + 0xC2600982, 0x5A643B6E, 0x5838000A, 0xCA800000, 0xC0001006, 0xCE000000, 0xC0001008, 0xCE400000, + 0xC000100A, 0xCE800000, 0x99005370, 0xC1A0FFFE, 0xC0000824, 0xC9840068, 0xC2000000, 0xC0004930, + 0xCA02E010, 0x58380026, 0xCA400000, 0x00000000, 0xC2800000, 0x990053B0, 0xDA980000, 0xC6140000, + 0xC6580000, 0xC0004934, 0xCA000000, 0x00000000, 0x00000000, 0xA6020002, 0x00000000, 0x00000000, + 0x80000300, 0xC0004938, 0xCBC00000, 0xC0004878, 0xC8040000, 0x6C908000, 0x44908000, 0x44908000, + 0x40100000, 0xCA000000, 0xC4240000, 0x00000000, 0x58240018, 0xCA000000, 0x6FF88000, 0x6FD44000, + 0x4395C000, 0x5BB84A00, 0xC3000000, 0xC3400002, 0xC2C00000, 0xC62C0080, 0xC6270040, 0xC0004940, + 0xCE400040, 0xC6260040, 0xC0004942, 0xCE400040, 0xC000493C, 0xCA000000, 0x5EEC0000, 0x84000172, + 0x5A6C0010, 0x46614000, 0x88000178, 0x5A600052, 0x466D4000, 0x88000160, 0x58380006, 0xCA800000, + 0xC0004940, 0xCA000000, 0xC2400000, 0xC6A70040, 0x7E412000, 0x76252000, 0xC2000000, 0xC6A10040, + 0x46610000, 0x84000120, 0xC0004942, 0xCA000000, 0xC2400000, 0xC6A60040, 0x7E412000, 0x76252000, + 0xC2000000, 0xC6A00040, 0x58380002, 0xCA800000, 0x46610000, 0x840000D0, 0xC2400000, 0xC6A60080, + 0x46E50000, 0x880000C2, 0xC2400000, 0xC6A40080, 0x58380008, 0xCA800000, 0x466D0000, 0x880000A2, + 0x00000000, 0xA682FFF8, 0x00000000, 0xC7700B08, 0xA6840078, 0x00000000, 0xC7700A08, 0x80000068, + 0xC7700208, 0xC000493C, 0xCAC00000, 0x80000048, 0xC7700308, 0xC000493C, 0xCAC00000, 0x80000028, + 0xC7700908, 0x80000018, 0xC7700808, 0x80000008, 0xC7700708, 0x8000FFF8, 0xC7700508, 0xC0004944, + 0xCF000000, 0xC000493E, 0xCEC00000, 0xC0004938, 0xCA400000, 0xC000493C, 0xCB800000, 0xC000493E, + 0xCB400000, 0xC3000000, 0x6E608000, 0x6E544000, 0x42150000, 0x5A204A00, 0x5AA00008, 0x58200004, + 0xCB000080, 0xC0004934, 0xCA000000, 0xC2400000, 0xC0004930, 0xCA42E010, 0xC3C00018, 0xA6020078, + 0x00000000, 0x43656000, 0x46F90000, 0x88000038, 0x47AD6000, 0x6EE04010, 0x5BE00004, 0xC2000000, + 0xC6E00010, 0x5E200000, 0x8400002A, 0x5BFC0002, 0x80000018, 0xC3C00004, 0x5A2C0008, 0x46390000, + 0x8800FFFA, 0x5FB80008, 0x6FE04000, 0x42390000, 0x46312000, 0x88000050, 0xC2400000, 0xC0004930, + 0xCA42E010, 0xC2060002, 0xC6800000, 0xCE000308, 0x6FE04000, 0x4631C000, 0x5F700010, 0x4675A000, + 0xC2000000, 0xC6340010, 0xC25A000A, 0xC000491A, 0xCA401C20, 0xC2800000, 0xC0004932, 0xCA8000E0, + 0xC0004862, 0xCA400068, 0x6FA04010, 0x42290000, 0xC000491E, 0xCE000000, 0xC7E41050, 0xC000491C, + 0xCE400000, 0x6FE04000, 0x43A1C000, 0xC000493C, 0xCF800000, 0xC000493E, 0xCF400000, 0xC000493A, + 0xCFC00000, 0x8000FFF0, 0x00000000, 0x00000000, 0x00000000, 0xC2000000, 0xDCE00000, 0xA622FFB8, + 0xC1220002, 0xD90C0000, 0xC0004938, 0xCBC00000, 0xC0004944, 0xCB400000, 0xC0004862, 0xCB000000, + 0xC0004934, 0xCA000000, 0x6FF88000, 0x6FD44000, 0x4395C000, 0x5BB84A00, 0xA6020248, 0xC2400000, + 0x58380008, 0xCA406008, 0xDFE80000, 0xC2218E08, 0x5A21BAF6, 0x46294000, 0x8400000A, 0xC2080002, + 0x7235A000, 0x80000040, 0x5E640000, 0x8400000A, 0xC20C0002, 0x7235A000, 0x80000018, 0xC2000000, + 0xC760E718, 0xC7604220, 0x5E200000, 0x8400025A, 0xC2200002, 0xC0004930, 0xCE001008, 0x990060A8, + 0xC0004828, 0xC9400000, 0xC1800002, 0x58380000, 0xCA000000, 0x00000000, 0x00000000, 0xA6000112, + 0xC0004940, 0xCA800000, 0xC0004942, 0xCA400000, 0xC7600080, 0xC6A01840, 0xC6601040, 0xC000493A, + 0xCA400000, 0xC0004934, 0xCA800000, 0xC0007200, 0x40300000, 0x40240000, 0x5C000004, 0x5EC07400, + 0x8800FFFA, 0x5C000200, 0xCE000000, 0x58000002, 0x5EC07400, 0x8800FFFA, 0x5C000200, 0xCE800000, + 0xC000493E, 0xCA000000, 0xC2400000, 0x5838000C, 0xCE400000, 0x990060A8, 0xC0004830, 0xC9400000, + 0xC6180000, 0xC0004930, 0xC6100080, 0xCD000080, 0x80000090, 0xC2400002, 0x58380008, 0xCE400008, + 0xC0004944, 0xCF400000, 0x80000260, 0xC000493C, 0xCA400000, 0xDFE80000, 0x5A300018, 0xC0007200, + 0x40200000, 0xCA000000, 0x58380008, 0xC6501080, 0xCD001080, 0x5838000A, 0xCE800000, 0x58380026, + 0xCE000000, 0xC0004944, 0xCF400000, 0x99005708, 0xC000491C, 0xC1400000, 0xC9420050, 0x80000020, + 0x00000000, 0x990060A8, 0xC0004826, 0xC9400000, 0xC1800002, 0x8000FDC0, 0xC2000000, 0xC2400200, + 0xDF600040, 0xB624FFCA, 0xC000491C, 0xCA400000, 0xC000491E, 0xCA800000, 0x99005930, 0xDA580000, + 0xDA980001, 0x00000000, 0xC0004934, 0xCA000000, 0x00000000, 0xC2800000, 0xA6020140, 0xC2400004, + 0xC2000200, 0xDF690050, 0x46A14000, 0x46694000, 0x8800FFC2, 0x00000000, 0xC000491A, 0xC9800000, + 0xC0004862, 0xC9400000, 0x6D9C6000, 0x459CE000, 0x59DC4C80, 0x99005790, 0xD9580000, 0xD9980001, + 0xD9D40000, 0x99005708, 0xC000491C, 0xC1400000, 0xC9420050, 0xC2000000, 0xC2400200, 0xDF600040, + 0xB624FFCA, 0xC000491C, 0xCA400000, 0xC000491E, 0xCA800000, 0x99005930, 0xDA580000, 0xDA980001, + 0x00000000, 0x58380008, 0xCA400000, 0xC2000000, 0xCE000020, 0xC2A1FFFE, 0x5AA9FFFE, 0xCE001080, + 0x5838000A, 0xCE800000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC0000838, 0xC2500002, + 0xCE440808, 0xC0004848, 0xCB840000, 0xC2000000, 0xC000082C, 0xCA040030, 0x5FB80002, 0xC0004848, + 0xCF840000, 0x58880002, 0xB608FFF8, 0x00000000, 0xC0800000, 0xC0004844, 0xCC840000, 0x00000000, + 0xC121FFFE, 0x5911FE14, 0x15000000, 0x8000DEC0, 0xC2000000, 0xDF600040, 0x5E200080, 0x84000252, + 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000480C, 0xCA000000, 0xC0004910, + 0xCA400000, 0xC000492C, 0xCA800000, 0xC0004968, 0xCAC00000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0x76610000, 0x76A10000, 0x762D6000, 0x840001AA, 0xC0004926, 0xCA400000, 0xC201FFFE, + 0x762D6000, 0x5A640002, 0x6AE50010, 0x5F200000, 0x84000002, 0x6A250000, 0x8000FFF8, 0xC6E00000, + 0x62014008, 0xC0004926, 0xCE800000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC0004968, + 0xCA400000, 0xC2000002, 0x6A290000, 0x7E010000, 0x76252000, 0xCE400000, 0x00000000, 0xC121FFFE, + 0x5911FE14, 0x15000000, 0x6EB4A000, 0x6E944000, 0x4575A000, 0x46B5A000, 0x5B744E20, 0x58340002, + 0xC2000000, 0xCA0000E0, 0x5834002E, 0xC2400000, 0xCA400080, 0x6EB0A000, 0x6EBC4000, 0x47F18000, + 0x46B18000, 0x5B300E4E, 0x5B300004, 0x6E642000, 0x4225E000, 0xC39A8024, 0xC7380068, 0xC6B81C20, + 0x99005930, 0xDB980000, 0xDBD80001, 0x00000000, 0xC2000000, 0xDF600040, 0x5E200080, 0x8400028A, + 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000490E, 0xCA000000, 0xC000492A, + 0xCA400000, 0xC000496A, 0xCB000000, 0xC0004956, 0xCAC00000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0x76318000, 0x76718000, 0x840001EA, 0xC201FFFE, 0x76318000, 0x5AEC0002, 0x6B2D0010, + 0x5EA00000, 0x84000002, 0x6A2D0000, 0x8000FFF8, 0xC7200000, 0x62016008, 0xC0004956, 0xCEC00000, + 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000496A, 0xCA400000, 0xC2000002, 0x6A2D0000, + 0x7E010000, 0x76252000, 0xCE400000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, 0x6EF4A000, + 0x6ED44000, 0x4575A000, 0x46F5A000, 0x5B744E20, 0x5834000E, 0xC2000000, 0xCA0000E0, 0x58340008, + 0xC2400000, 0xCA420080, 0x5834000C, 0xC2800000, 0xCA832018, 0x6E644010, 0x42250000, 0x4229E000, + 0xC39A8008, 0x58340008, 0xCB809020, 0x58340008, 0xC2800000, 0xCA810018, 0x6EE0A000, 0x6EE44000, + 0x46610000, 0x46E10000, 0x5A200008, 0x5A200E28, 0x42290000, 0xC6380068, 0xC6F81C20, 0x99005930, + 0xDB980000, 0xDBD80001, 0x00000000, 0xC000495A, 0xC8400000, 0x00000000, 0xC3C00002, 0x7BC42000, + 0xCC400000, 0xC0001A1C, 0xCA000000, 0xC2400008, 0x6A452000, 0x76610000, 0x8400D91A, 0xC0000A28, + 0xC3800000, 0xCB840030, 0xC0000A14, 0xC3400000, 0xCB440030, 0xC0004880, 0xCB040000, 0xB7B4D8CA, + 0x58041802, 0xCAC00000, 0xA7000018, 0x00000000, 0x00000000, 0xA6C8D898, 0xC2800000, 0xC6E80020, + 0x80000030, 0xC2800000, 0xC7282020, 0xC000490E, 0xCA400000, 0x6BE9E000, 0x00000000, 0x77E52000, + 0x8400D848, 0x6EA0A000, 0x6E944000, 0x45610000, 0x46A10000, 0x5A204E20, 0x5820000C, 0xCA000000, + 0xC0004946, 0xCE800000, 0xA6220348, 0x00000000, 0xC2200060, 0xC0004948, 0xCE000010, 0xCE001040, + 0xC240000A, 0xC000494A, 0xCE400000, 0xC2B60002, 0xC0004964, 0xCE801B08, 0x99005C00, 0xC00048A0, + 0xC8840000, 0x00000000, 0xC0004946, 0xCBC00000, 0x00000000, 0x00000000, 0x6FF8A000, 0x6FD44000, + 0x4579C000, 0x47F9C000, 0x5BB84E20, 0x990059C0, 0xDBD80000, 0xDB980001, 0x00000000, 0x99005708, + 0xC000491C, 0xC1400000, 0xC9420050, 0xC000491C, 0x99005BB8, 0xC9400001, 0xC9800000, 0x00000000, + 0x99005930, 0xD9580000, 0xD9980001, 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, + 0x990055F8, 0xDBD80000, 0xDB980001, 0xC7D80000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, + 0x6FF8A000, 0x6FD44000, 0x4579C000, 0x47F9C000, 0x5BB84E20, 0x58380010, 0xCA000000, 0xC0004874, + 0xC8040000, 0x6C908000, 0x44908000, 0x44908000, 0x40100000, 0xCA400000, 0xC4340000, 0x00000000, + 0xC7400000, 0xCE000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000490E, 0xCA400000, + 0xC2800002, 0x6ABD4000, 0x72A52000, 0xCE400000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, + 0x990060A8, 0xC0004836, 0xC9400000, 0xC1800002, 0x00000000, 0x00000000, 0x00000000, 0xA8E2FFC8, + 0x00000000, 0xC1220002, 0xD90C0000, 0xC2000000, 0xC0000A14, 0xCA040030, 0xC0000A28, 0xC2500002, + 0xCE440808, 0x58880002, 0xB608FFF8, 0xC00048A0, 0xC0800000, 0xCC840000, 0x8000D498, 0xC0004946, + 0xCBC00000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000490E, 0xCA400000, 0xC2800002, + 0x6ABD4000, 0x72A52000, 0xCE400000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, 0x6FF8A000, + 0x6FD44000, 0x4579C000, 0x47F9C000, 0x5BB84E20, 0x58380008, 0xCA000000, 0x5838000C, 0xCA400000, + 0xC3400000, 0xC6340008, 0xC000494E, 0xCF400000, 0xC2800000, 0xC62A0080, 0xC3000000, 0xC6308020, + 0x6F304000, 0x43298000, 0xC000493C, 0xCF000000, 0xC2C00000, 0xC66C0080, 0xC0004950, 0xCEC00000, + 0xC2800000, 0xC66AE028, 0xC0004954, 0xCE800000, 0x5F740000, 0x84000188, 0x5E300028, 0x462D2000, + 0x84000152, 0x462D2000, 0x8800011A, 0x5E300018, 0x462D2000, 0x88000012, 0x462D2000, 0x8400002A, + 0x00000000, 0x800000A8, 0x00000000, 0x99005D40, 0xDBD80000, 0xDB980001, 0xC7800000, 0xC3400002, + 0xC000494E, 0xCF400000, 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC000490E, 0xCA400000, + 0xC2800002, 0x6ABD4000, 0x7E814000, 0x76A52000, 0xCE400000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0xC2200060, 0xC0004948, 0xCE001040, 0xC2000000, 0xC000494C, 0xCE000000, 0x80000068, + 0x00000000, 0x99005D40, 0xDBD80000, 0xDB980001, 0xC7800000, 0x99005F40, 0xDBD80000, 0xDB980001, + 0xC7800000, 0xC2200058, 0xC0004948, 0xCE001040, 0xC2000002, 0xC000494C, 0xCE000000, 0xC2000006, + 0xC0001006, 0xCE000000, 0x5838000A, 0xCA400000, 0xC2200982, 0x5A203B6E, 0xC0001008, 0xCE000000, + 0xC000100A, 0xCE400000, 0xC0004954, 0xCA800000, 0xC200000C, 0xC000494A, 0xCE000000, 0xC0004948, + 0xCE800010, 0xC2B60000, 0xC0004964, 0xCE800000, 0x99005C00, 0xC00048A0, 0xC8840000, 0x00000000, + 0xC0004946, 0xCBC00000, 0xC000494C, 0xCA000000, 0x6FF8A000, 0x6FD44000, 0x4579C000, 0x47F9C000, + 0x5BB84E20, 0x5E200000, 0x840000E2, 0x00000000, 0x990059C0, 0xDBD80000, 0xDB980001, 0x00000000, + 0x99005708, 0xC000491C, 0xC1400000, 0xC9420050, 0xC000491C, 0x99005BB8, 0xC9400001, 0xC9800000, + 0x00000000, 0x99005930, 0xD9580000, 0xD9980001, 0x00000000, 0xC161FFFE, 0x5955FFFE, 0x15400000, + 0x00000000, 0x990055F8, 0xDBD80000, 0xDB980001, 0xC7D80000, 0x00000000, 0xC121FFFE, 0x5911FE14, + 0x15000000, 0xC000493C, 0xCA800000, 0xC000494E, 0xCAC00000, 0xC3000018, 0xC3400006, 0x5E200000, + 0x84000012, 0xC2800000, 0xC2C00000, 0xC300001E, 0xC3400000, 0xC6AC1080, 0xC72C0420, 0xC76C0818, + 0x58380010, 0xCA800000, 0x58380008, 0xCEC00000, 0xC6280108, 0xC0004874, 0xC8040000, 0x6C908000, + 0x44908000, 0x44908000, 0x40100000, 0xCB000000, 0xC4340000, 0x00000000, 0xC7400000, 0xCE800000, + 0xC0004952, 0xCE800000, 0x00000000, 0x00000000, 0x00000000, 0xA8E2FFC8, 0x00000000, 0xC000494C, + 0xCA000000, 0xC0004950, 0xCAC00000, 0x5E200000, 0x84000052, 0xDFE80000, 0x7E814000, 0x5834001A, + 0xCE800000, 0x990060A8, 0xC0004834, 0xC9400000, 0xC1800002, 0x990060A8, 0xC0004838, 0xC9400000, + 0xC6D80000, 0xC1220002, 0xD90C0000, 0x5E200000, 0x84000028, 0x5838002C, 0xCB000000, 0xDFE80000, + 0x00000000, 0x58380014, 0xCF000000, 0x80000000, 0xC2A1FFFE, 0x5AA9FFFE, 0x5838000A, 0xCE800000, + 0xC3000000, 0xC0000A14, 0xCB040030, 0xC2D00002, 0xC0000A28, 0xCEC40808, 0xC000494E, 0xCA800000, + 0x58880002, 0xB4B0FFF8, 0xC00048A0, 0xC0800000, 0xCC840000, 0x5EA80000, 0x8400013A, 0x5E200000, + 0x84000128, 0xC000493C, 0xCA800000, 0x00000000, 0x00000000, 0x5AA80060, 0xCE800000, 0x99005D40, + 0xDBD80000, 0xDB980001, 0xC7800000, 0x99005F40, 0xDBD80000, 0xDB980001, 0xC7800000, 0xC0004952, + 0xCAC00000, 0x58380000, 0xCA800000, 0xC30C0002, 0xC7F00020, 0xA6800078, 0x00000000, 0x00000000, + 0xC161FFFE, 0x5955FFFE, 0x15400000, 0x00000000, 0xC0001800, 0xCA000000, 0x00000000, 0x00000000, + 0xA60CFFCA, 0xC6F00508, 0xC6B0C408, 0xCF000000, 0x00000000, 0xC121FFFE, 0x5911FE14, 0x15000000, + 0x8000CB48, 0xDCBC0001, 0x5FFC0000, 0x8400003A, 0xC3800002, 0xDB880001, 0x5FFC0004, 0x8400C4B2, + 0xC3800000, 0xDB880001, 0xC3CE0002, 0xC0000800, 0xCFC00708, 0xC3E1FFFE, 0x597DFFFE, 0x593DFE14, + 0x94000001, 0x00000000, 0x00000000, 0x00000000, 0xC000487C, 0xC8040000, 0x00000000, 0x00000000, + 0x40080000, 0xCBC00000, 0xC4380000, 0x00000000, 0xC000480E, 0xCA000000, 0xC0004858, 0xCB440000, + 0x00000000, 0x00000000, 0x46350000, 0x88000098, 0x00000000, 0xA7C00028, 0xC0004854, 0xC1000002, + 0xCD040000, 0xC11C0000, 0xC000082C, 0xCD040E08, 0x800000C0, 0x00000000, 0xA7D20118, 0x00000000, + 0xC7E14048, 0xC2400000, 0xC6246030, 0xC200006A, 0x46610000, 0xC6240038, 0xC0000810, 0xCE440038, + 0x8000FF58, 0xC2000000, 0xC0000808, 0xCA040018, 0xC11C0000, 0xC000082C, 0xCD040E08, 0x5A200002, + 0x5E600010, 0x8400FFF8, 0xC2000000, 0xC0000808, 0xCE040018, 0xC3400000, 0x80000010, 0xC1200002, + 0xC0000818, 0xCD041008, 0x5B740002, 0xC0004858, 0xCF440000, 0x99005348, 0xC0004848, 0xC9440000, + 0xC1800000, 0xC11C0002, 0xC000082C, 0xCD040E08, 0x800005C8, 0x5B740002, 0xC0004858, 0xCF440000, + 0xC7800000, 0xC13C0002, 0xCD001E08, 0xC0004848, 0xC9440000, 0xC1800000, 0xC000082C, 0xC9840030, + 0x59540002, 0xC0004848, 0xCD440000, 0x58880002, 0xB4980540, 0x00000000, 0xC0800000, 0x80000530, + 0xC000487C, 0xC8040000, 0x00000000, 0x00000000, 0x40080000, 0xCBC00000, 0xC4280000, 0x00000000, + 0xA7C00110, 0xC000484C, 0xCA040000, 0xC2400000, 0xC0001AEC, 0xCA440020, 0x5A200002, 0xC000484C, + 0xCE040000, 0xB624006A, 0xC6800000, 0xC13C0002, 0xCD001E08, 0xC0004848, 0xC9440000, 0xC1800000, + 0xC000082C, 0xC9840030, 0x59540002, 0xC0004848, 0xCD440000, 0x58880002, 0xB4980430, 0x00000000, + 0xC0800000, 0x80000420, 0xC0004854, 0xC1000004, 0xCD040000, 0xC0000820, 0xC2000002, 0xCE040000, + 0xC2000000, 0xC000484C, 0xCE040000, 0xC0004858, 0xCE040000, 0x8000FF10, 0xC0004854, 0xC1000000, + 0xCD040000, 0xC11C0000, 0xC000082C, 0xCD040E08, 0x99005348, 0xC0004848, 0xC9440000, 0xC1800000, + 0xC1200000, 0xC0000818, 0xCD041008, 0xC11C0002, 0xC000082C, 0xCD040E08, 0xC2000000, 0xC000484C, + 0xCE040000, 0x80000320, 0xC0001AC0, 0xCB840000, 0xC000487C, 0xC8040000, 0x00000000, 0x00000000, + 0x40080000, 0xCBC00000, 0xC4280000, 0x00000000, 0xA780022A, 0x00000000, 0x00000000, 0xA7C001EA, + 0x00000000, 0xC0001B00, 0xC2060006, 0xCE040310, 0xA7E801A2, 0x00000000, 0xC0004850, 0xCA040000, + 0xC2400000, 0xC0001AEC, 0xCA448020, 0x5A200002, 0xC0004850, 0xCE040000, 0xB624008A, 0x00000000, + 0xC6800000, 0xC13C0002, 0xCD001E08, 0xC0001ACC, 0xC2000002, 0xCE040008, 0xC0004848, 0xC9440000, + 0xC1800000, 0xC000082C, 0xC9840030, 0x59540002, 0xC0004848, 0xCD440000, 0x58880002, 0xB49801A8, + 0x00000000, 0xC0800000, 0x80000198, 0xC0004854, 0xC1000000, 0xCD040000, 0xC11C0000, 0xC000082C, + 0xCD040E08, 0x99005348, 0xC0004848, 0xC9440000, 0xC1800000, 0xC2000000, 0xC0000820, 0xCE040000, + 0xC1200000, 0xC0000818, 0xCD041008, 0xC11C0002, 0xC000082C, 0xCD040E08, 0xC0004850, 0xCE040000, + 0xC2000002, 0xC0001ACC, 0xCE040010, 0x800000D0, 0xC2000002, 0xC0004850, 0xCE040000, 0x8000FE70, + 0xC2000000, 0xC0004850, 0xCE040000, 0xA7E60012, 0x00000000, 0xC2000002, 0xC0001B00, 0xCE040008, + 0x8000FE58, 0x00000000, 0xA7860032, 0x00000000, 0xC6800000, 0xC13C0002, 0xCD001E08, 0xC2020002, + 0xC7E2A548, 0xC0001B00, 0xCE040000, 0x8000FE00, 0xC2040002, 0xC0001B00, 0xCE040208, 0x8000FDE0, + 0xC2C80002, 0x6AC56000, 0xDACC0000, 0xC0004854, 0xCB440000, 0xC0004848, 0xCB840000, 0xC0000838, + 0xC3C00000, 0xCBC40030, 0x5EF40004, 0x8400000A, 0xC3000000, 0xC0001ACC, 0xCF040108, 0x47BD8000, + 0x84000012, 0x47BD8000, 0x88000018, 0xC1006E8C, 0x8000B908, 0xC0004840, 0xCC840000, 0x8000F6B8, + 0xC0001AC0, 0xCAC40000, 0xC0004854, 0xCB440000, 0xA6C0FBD2, 0x00000000, 0x5EF40000, 0x8400F712, + 0x5EF40002, 0x8400F9A2, 0x5EF40004, 0x8400FBA2, 0xC1006CE8, 0x8000B880, 0x00000000, 0xC0800000, + 0xDF4B0040, 0xC0004900, 0xCB800000, 0xC2000000, 0xC000490A, 0xA78000B0, 0xCBC00000, 0xC1000000, + 0xD9000001, 0xC1000002, 0xD90C0000, 0x6FF46000, 0x47F5A000, 0x5B744C80, 0xC2400000, 0x58340004, + 0xCA400080, 0xC0004900, 0xCE000008, 0x5A640002, 0x58340004, 0xC6500080, 0xCD000080, 0xC0004914, + 0xCA400000, 0xC2000002, 0x6A3D0000, 0x72252000, 0xCE400000, 0xC0000408, 0xCE000000, 0xA78200B8, + 0xC0004908, 0xCBC00000, 0xC1000000, 0xD9000001, 0xC1000002, 0xD90C0000, 0x6FF4A000, 0x6FD44000, + 0x4575A000, 0x47F5A000, 0x5B744E20, 0xC2800000, 0x58340006, 0xCA800080, 0xC2000000, 0xC0004900, + 0xCE000108, 0x5EA80002, 0x58340006, 0xC6900080, 0xCD000080, 0x5A7C0020, 0xC2000002, 0x6A250000, + 0xC0000408, 0xCE000000, 0xDCA80001, 0x5EA80000, 0x8400B6F0, 0x00000000, 0xA4800210, 0x00000000, + 0xC3C00000, 0xC000140E, 0xCBC00020, 0xC3400000, 0xC2400000, 0x6FF86000, 0x47F9C000, 0x5BB84C80, + 0x58380008, 0xCB400080, 0x58380006, 0xCA400080, 0x5F740002, 0x58380008, 0xC7500080, 0xCD000080, + 0xC2000000, 0x58380004, 0xCA020080, 0xC3000000, 0x5838000C, 0xCB000028, 0x5A640002, 0x46250000, + 0x8400FFF8, 0xC2400000, 0x58380006, 0xC6500080, 0xCD000080, 0xC2000000, 0x5838000A, 0xCA020080, + 0x5B300002, 0x5838000C, 0xC7100028, 0xCD000028, 0xC2420020, 0x5A200004, 0x46612000, 0x8400FFF8, + 0xC2000000, 0x5838000A, 0xC6101080, 0xCD001080, 0xC0004966, 0xCA400000, 0xC2000002, 0x6A3D0000, + 0x72252000, 0xCE400000, 0x5F740000, 0x84000028, 0xC0004912, 0xCA000000, 0xC2C00002, 0x6AFD6000, + 0x7EC16000, 0x76E10000, 0xCE000000, 0x5F300020, 0x84000028, 0xC0004924, 0xCA000000, 0xC2C00002, + 0x6AFD6000, 0x7EC16000, 0x76E10000, 0xCE000000, 0xA4820050, 0xC2400000, 0xC000140E, 0xCA408020, + 0xC2000002, 0xC0004900, 0xCE000008, 0xC000490A, 0xCE400000, 0xC1000000, 0xD9000001, 0xD8400080, + 0xC1000004, 0xD9000001, 0xA4840288, 0x00000000, 0xC3C00000, 0xC000140E, 0xCBC10020, 0xC2800000, + 0xC2000000, 0x6FF8A000, 0x6FD44000, 0x4579C000, 0x47F9C000, 0x5BB84E20, 0x5838002E, 0xCA800080, + 0x58380006, 0xCA020080, 0xC3400000, 0x5838002E, 0xCB420080, 0x5AA80002, 0x46290000, 0x8400FFF8, + 0xC2800000, 0x5838002E, 0xC6900080, 0xCD000080, 0x5F740002, 0x5838002E, 0xC7501080, 0xCD001080, + 0xC0004968, 0xCA400000, 0xC2000002, 0x6A3D0000, 0x72252000, 0xCE400000, 0xC000492A, 0xCA800000, + 0x5E740000, 0x84000028, 0xC0004910, 0xCA000000, 0xC2C00002, 0x6AFD6000, 0x7EC16000, 0x76E10000, + 0xCE000000, 0x6ABD4010, 0xA68000D2, 0x00000000, 0xC0004910, 0xCA000000, 0xC2C00002, 0x6AFD6000, + 0x7EC16000, 0x76E10000, 0xCE000000, 0x58380032, 0xCA000000, 0x58000002, 0xCA400000, 0x5838000C, + 0x00000000, 0xCE000001, 0xCE400000, 0xC000492A, 0xCA000000, 0xC2C00002, 0x6AFD6000, 0x72E10000, + 0xCE000000, 0xC000492C, 0xCA000000, 0xC2C00002, 0x6AFD6000, 0x72E10000, 0xCE000000, 0x80000028, + 0xC000492C, 0xCA000000, 0xC2C00002, 0x6AFD6000, 0x7EC16000, 0x76E10000, 0xCE000000, 0xA4880100, + 0xC2C00000, 0xC000140E, 0xCAC20020, 0xC000490E, 0xCA400000, 0xC2000002, 0x6A2D0000, 0x7E010000, + 0x76252000, 0xCE400000, 0xC000496A, 0xCA400000, 0xC2000002, 0x6A2D0000, 0x72252000, 0xCE400000, + 0x6EF0A000, 0x6ED44000, 0x45718000, 0x46F18000, 0x5B304E20, 0x58300000, 0xCA000000, 0x00000000, + 0xC2400002, 0x76252000, 0x84000032, 0xC24C0002, 0xC6E40020, 0xC624C408, 0x58300010, 0xCA400508, + 0x00000000, 0xC0001800, 0xCE400000, 0xA4860050, 0xC2400000, 0xC000140E, 0xCA418020, 0xC2020002, + 0xC0004900, 0xCE000108, 0xC0004908, 0xCE400000, 0xC1000000, 0xD9000001, 0xD8400080, 0xC1000004, + 0xD9000001, 0xC0001408, 0xCC800000, 0xC10E0002, 0xD90C0000, 0x8000EDA8, 0xDFBC0001, 0xC000496E, + 0x99006050, 0xC9400000, 0xC7D80000, 0x00000000, 0xC5700000, 0x5EF00020, 0x88000130, 0x6F346000, + 0x4735A000, 0x5B744C80, 0x58340008, 0xC2400000, 0xCA400080, 0x00000000, 0xC2000000, 0x5A640002, + 0xCE400080, 0x58340004, 0xCA000080, 0x00000000, 0x00000000, 0x5E200002, 0xCE000080, 0xC0004912, + 0xCA800000, 0xC2400002, 0x6A712000, 0x72694000, 0xCE800000, 0x5E200000, 0x8400003A, 0xC000480A, + 0xCA000000, 0xC0000408, 0xCA800000, 0x76610000, 0x00000000, 0x72294000, 0xCE800000, 0x80000020, + 0xC0004914, 0xCA000000, 0x7E412000, 0x00000000, 0x76610000, 0xCE000000, 0x800000B8, 0x6EF4A000, + 0x6ED44000, 0x4575A000, 0x46F5A000, 0x5B744E20, 0x5834002E, 0xC2400000, 0xCA420080, 0x00000000, + 0xC2000000, 0x5A640002, 0xC6501080, 0xCD001080, 0x58340006, 0xCA000080, 0x00000000, 0x00000000, + 0x5A200002, 0xCE000080, 0xC0004910, 0xCA400000, 0xC2000002, 0x6A2D0000, 0x72252000, 0xCE400000, + 0xC2000002, 0x6A310000, 0xC000042A, 0xCE000000, 0xC1040002, 0xD90C0000, 0x00000000, 0x8000EB18, + 0x00000000, 0xC4980930, 0x9D000000, 0xC5580030, 0xC0000838, 0xCD840000, 0xC1440200, 0xC1C03200, + 0xC55C1078, 0xC000100E, 0x9D000000, 0xCD800000, 0xC000100C, 0xCDC00000, 0xC0004862, 0xC9C00000, + 0x00000000, 0x00000000, 0xD9D80001, 0xC0007200, 0x401C0000, 0x5DC07400, 0x8800FFFA, 0x5C000200, + 0xCD800000, 0xC1F0000A, 0x71D4A000, 0xDD980000, 0xDD9C0001, 0x41D8E000, 0xC5D40268, 0xC0001010, + 0xCD400000, 0x6C9C8000, 0x449CE000, 0x449CE000, 0x59DC0004, 0xC1601260, 0xC5D40268, 0x9D000000, + 0xC0001012, 0xCD400000, 0x00000000, 0x00000000, 0xD9580000, 0x6D586000, 0x4558C000, 0x59984C80, + 0xD9980001, 0x5818000A, 0xC1800000, 0xC9800080, 0xC0005400, 0x6D5CA000, 0x401C0000, 0x40180000, + 0xC9400000, 0x58000002, 0x00000000, 0xC9C00000, 0xC0004930, 0xCD400000, 0xC0004932, 0xCDC00000, + 0x59980004, 0xC1C20020, 0xB59CFFF8, 0x00000000, 0xC1800000, 0xDD9C0001, 0x581C000A, 0xCD800080, + 0x581C000C, 0xC1800000, 0xC9800028, 0xC1C00002, 0xDD940000, 0x69D4E000, 0x5D980002, 0xCD800028, + 0xC0004924, 0xC9800000, 0x00000000, 0x9D000000, 0x00000000, 0x71D8C000, 0xCD800000, 0xC000492A, + 0xC9400000, 0xC1C00002, 0x69D8E000, 0x7DC0C000, 0x7594A000, 0xCD400000, 0xC000492C, 0xC9400000, + 0xDD800001, 0x58000032, 0x75D4A000, 0x84000078, 0xC9400001, 0xC9800000, 0xDD800001, 0x5800000C, + 0x00000000, 0xCD400001, 0xCD800000, 0xC000492C, 0xC9400000, 0xC000492A, 0xC9800000, 0x71D4A000, + 0xC000492C, 0xCD400000, 0x71D8C000, 0xC000492A, 0xCD800000, 0x9D000000, 0x00000000, 0x00000000, + 0x00000000, 0xC0004862, 0xC9800000, 0x00000000, 0xC1C00200, 0x4194C000, 0x45D8E000, 0x8800FFFA, + 0xC5D80000, 0xC0004862, 0xCD800000, 0xC0001406, 0xC9800000, 0xC1C00002, 0x9D000000, 0xC5D80A08, + 0xC5581050, 0xCD800000, 0xC0004930, 0xC9800000, 0xC0004932, 0xC9C00000, 0xC140000E, 0xC5581C20, + 0xDD940000, 0xC0007200, 0x40140000, 0x5D407400, 0x8800FFFA, 0x5C000200, 0xCD800000, 0x58000002, + 0x5D407400, 0x8800FFFA, 0x5C000200, 0xCDC00000, 0xDD540000, 0xC1C00000, 0x58140006, 0xC9C20080, + 0xC1800000, 0x58140000, 0xC98000E0, 0x6DDC2000, 0xC000491E, 0x41D8E000, 0xCDC00000, 0xDD980000, + 0xC1C00022, 0xC5D80D78, 0xDD940001, 0xC5581C20, 0xC000491C, 0xCD800000, 0xDD540000, 0xC1C00000, + 0x58140006, 0xC9C20080, 0xC1800000, 0x58140004, 0xC9820080, 0x00000000, 0x59DC0002, 0x459CC000, + 0x8400FFF8, 0xC1C00000, 0x9D000000, 0x58140006, 0xC5D81080, 0xCD801080, 0xC0004860, 0xC9400000, + 0xC1800100, 0xC1D00002, 0x58146B00, 0xD5800000, 0x58000002, 0xD5800001, 0x59540004, 0xB558FFF8, + 0xC0004860, 0xC1400000, 0xCD400000, 0xDD980001, 0x9D000000, 0xDD940000, 0xC0001404, 0xCDC00808, + 0xC1C00000, 0xC1800200, 0x5D980004, 0xDF5D0050, 0x45D8A000, 0x8800FFDA, 0xDD800001, 0x5800000C, + 0x00000000, 0xC9400001, 0xC9800000, 0xC1C00002, 0xC5D43F08, 0xC5D81E08, 0xC0004862, 0xC9C00000, + 0x00000000, 0x00000000, 0x581C7200, 0x5DC07400, 0x8800FFFA, 0x5C000200, 0xCD400000, 0x58000002, + 0x5DC07400, 0x8800FFFA, 0x5C000200, 0xCD800000, 0xC0004862, 0xC9C00000, 0x00000000, 0xC15004C0, + 0xC5D40068, 0xDD9C0000, 0xC5D41C20, 0xC1C00000, 0xDD800001, 0x58000030, 0xC9C00080, 0xDD800001, + 0x58000002, 0xC9800000, 0x6DDC2000, 0xC000491C, 0x41D8E000, 0xCD400001, 0xCDC00000, 0xDD940001, + 0xC1C00000, 0x58140030, 0xC9C00080, 0xC1800000, 0x58140006, 0xC9820080, 0x00000000, 0x59DC0002, + 0x459CC000, 0x8400FFF8, 0xC1C00000, 0x9D000000, 0x58140030, 0xC5D80080, 0xCD800080, 0xC1C00000, + 0xDF5C0040, 0x5DDC0080, 0x8400FFD2, 0x00000000, 0x9D000000, 0x00000000, 0x00000000, 0x00000000, + 0xC160FFFE, 0xC0000A10, 0xC9440068, 0xC1A0FFFE, 0x59980E28, 0xC000100C, 0xCD400000, 0xC000100E, + 0xCD800000, 0xC0004964, 0xC9800000, 0x00000000, 0xC170000A, 0x7194A000, 0x6C988000, 0x4498C000, + 0x4498C000, 0x59980004, 0xC5940278, 0xC0001010, 0xCD400000, 0xC0004946, 0xC9400000, 0x00000000, + 0x00000000, 0x6D58A000, 0x6D5C4000, 0x45D8C000, 0x4558C000, 0xC000494A, 0xC9400000, 0xC0004948, + 0xC9C00000, 0x4194C000, 0xC1400012, 0xC55C1820, 0x9D000000, 0xC59C0270, 0xC0001012, 0xCDC00000, + 0xC1400000, 0x58000012, 0xC9410040, 0xC0004950, 0xC9C00000, 0xC5580000, 0xC5940840, 0xC5581080, + 0xD9940000, 0xC000493C, 0xC9400000, 0xC0004954, 0xC9800000, 0x59DC00A8, 0x455CE000, 0x41D8E000, + 0x5D5C0030, 0x8800FFF8, 0xC1C00030, 0xC1800000, 0xC5D84030, 0xC1400000, 0xC5D40010, 0x5DD40002, + 0x8400005A, 0x5DD40004, 0x84000082, 0x5DD40006, 0x840000AA, 0x5DD80026, 0x840000D2, 0xDD540000, + 0xDD800001, 0x58000008, 0x40180000, 0xCD400000, 0x59980002, 0x8000FFA8, 0xDD540000, 0xDD800001, + 0x58000008, 0x40180000, 0xCD4000C0, 0x59980002, 0x8000FF70, 0xDD540000, 0xDD800001, 0x58000008, + 0x40180000, 0xCD400080, 0x59980002, 0x8000FF38, 0xDD540000, 0xDD800001, 0x58000008, 0x40180000, + 0xCD400040, 0x59980002, 0x8000FF00, 0x00000000, 0x9D000000, 0x00000000, 0x00000000, 0x00000000, + 0x58000012, 0xC9400000, 0xC0004954, 0xC9C00000, 0xC0004950, 0xC9400080, 0xDD800001, 0x58000028, + 0x5D9C0000, 0x8400003A, 0x5D9C0002, 0x8400003A, 0x5D9C0004, 0x84000052, 0xC55B0040, 0xC55C08C0, + 0xCD800041, 0xCDC008C0, 0x80000048, 0xCD400000, 0x80000038, 0xC55900C0, 0xC55C1840, 0xCD8000C1, + 0xCDC01840, 0x80000010, 0xC55A0080, 0xC55C1080, 0xCD800081, 0xCDC01080, 0x9D000000, 0x00000000, + 0x00000000, 0x00000000, 0x59540002, 0x6994E018, 0x61C0C008, 0x4194A000, 0x5D940040, 0x8800FFFA, + 0xC5940000, 0x9D000000, 0xCD400000, 0x00000000, 0x00000000, 0x9D000000, 0x4158A000, 0xCD400000, + 0x00000000, +}; + +static unsigned int firmware_binary_data[] = { +}; + + +#endif // IFXMIPS_ATM_FW_DANUBE_H diff --git a/package/lqdsl/src/ifxmips_atm_fw_regs_common.h b/package/lqdsl/src/ifxmips_atm_fw_regs_common.h new file mode 100644 index 0000000000..a5f59b8c40 --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_fw_regs_common.h @@ -0,0 +1,364 @@ +#ifndef IFXMIPS_ATM_FW_REGS_COMMON_H +#define IFXMIPS_ATM_FW_REGS_COMMON_H + + + +#if defined(CONFIG_DANUBE) + #include "ifxmips_atm_fw_regs_danube.h" +#elif defined(CONFIG_AMAZON_SE) + #include "ifxmips_atm_fw_regs_amazon_se.h" +#elif defined(CONFIG_AR9) + #include "ifxmips_atm_fw_regs_ar9.h" +#elif defined(CONFIG_VR9) + #include "ifxmips_atm_fw_regs_vr9.h" +#else + #error Platform is not specified! +#endif + + + +/* + * PPE ATM Cell Header + */ +#if defined(__BIG_ENDIAN) + struct uni_cell_header { + unsigned int gfc :4; + unsigned int vpi :8; + unsigned int vci :16; + unsigned int pti :3; + unsigned int clp :1; + }; +#else + struct uni_cell_header { + unsigned int clp :1; + unsigned int pti :3; + unsigned int vci :16; + unsigned int vpi :8; + unsigned int gfc :4; + }; +#endif // defined(__BIG_ENDIAN) + +/* + * Inband Header and Trailer + */ +#if defined(__BIG_ENDIAN) + struct rx_inband_trailer { + /* 0 - 3h */ + unsigned int uu :8; + unsigned int cpi :8; + unsigned int stw_res1:4; + unsigned int stw_clp :1; + unsigned int stw_ec :1; + unsigned int stw_uu :1; + unsigned int stw_cpi :1; + unsigned int stw_ovz :1; + unsigned int stw_mfl :1; + unsigned int stw_usz :1; + unsigned int stw_crc :1; + unsigned int stw_il :1; + unsigned int stw_ra :1; + unsigned int stw_res2:2; + /* 4 - 7h */ + unsigned int gfc :4; + unsigned int vpi :8; + unsigned int vci :16; + unsigned int pti :3; + unsigned int clp :1; + }; + + struct tx_inband_header { + /* 0 - 3h */ + unsigned int gfc :4; + unsigned int vpi :8; + unsigned int vci :16; + unsigned int pti :3; + unsigned int clp :1; + /* 4 - 7h */ + unsigned int uu :8; + unsigned int cpi :8; + unsigned int pad :8; + unsigned int res1 :8; + }; +#else + struct rx_inband_trailer { + /* 0 - 3h */ + unsigned int stw_res2:2; + unsigned int stw_ra :1; + unsigned int stw_il :1; + unsigned int stw_crc :1; + unsigned int stw_usz :1; + unsigned int stw_mfl :1; + unsigned int stw_ovz :1; + unsigned int stw_cpi :1; + unsigned int stw_uu :1; + unsigned int stw_ec :1; + unsigned int stw_clp :1; + unsigned int stw_res1:4; + unsigned int cpi :8; + unsigned int uu :8; + /* 4 - 7h */ + unsigned int clp :1; + unsigned int pti :3; + unsigned int vci :16; + unsigned int vpi :8; + unsigned int gfc :4; + }; + + struct tx_inband_header { + /* 0 - 3h */ + unsigned int clp :1; + unsigned int pti :3; + unsigned int vci :16; + unsigned int vpi :8; + unsigned int gfc :4; + /* 4 - 7h */ + unsigned int res1 :8; + unsigned int pad :8; + unsigned int cpi :8; + unsigned int uu :8; + }; +#endif // defined(__BIG_ENDIAN) + +/* + * MIB Table Maintained by Firmware + */ +struct wan_mib_table { + u32 res1; + u32 wrx_drophtu_cell; + u32 wrx_dropdes_pdu; + u32 wrx_correct_pdu; + u32 wrx_err_pdu; + u32 wrx_dropdes_cell; + u32 wrx_correct_cell; + u32 wrx_err_cell; + u32 wrx_total_byte; + u32 res2; + u32 wtx_total_pdu; + u32 wtx_total_cell; + u32 wtx_total_byte; +}; + +/* + * Host-PPE Communication Data Structure + */ + +#if defined(__BIG_ENDIAN) + struct wrx_queue_config { + /* 0h */ + unsigned int res2 :27; + unsigned int dmach :4; + unsigned int errdp :1; + /* 1h */ + unsigned int oversize :16; + unsigned int undersize :16; + /* 2h */ + unsigned int res1 :16; + unsigned int mfs :16; + /* 3h */ + unsigned int uumask :8; + unsigned int cpimask :8; + unsigned int uuexp :8; + unsigned int cpiexp :8; + }; + + struct wtx_port_config { + unsigned int res1 :27; + unsigned int qid :4; + unsigned int qsben :1; + }; + + struct wtx_queue_config { + unsigned int res1 :25; + unsigned int sbid :1; + unsigned int res2 :3; + unsigned int type :2; + unsigned int qsben :1; + }; + + struct wrx_dma_channel_config { + /* 0h */ + unsigned int res1 :1; + unsigned int mode :2; + unsigned int rlcfg :1; + unsigned int desba :28; + /* 1h */ + unsigned int chrl :16; + unsigned int clp1th :16; + /* 2h */ + unsigned int deslen :16; + unsigned int vlddes :16; + }; + + struct wtx_dma_channel_config { + /* 0h */ + unsigned int res2 :1; + unsigned int mode :2; + unsigned int res3 :1; + unsigned int desba :28; + /* 1h */ + unsigned int res1 :32; + /* 2h */ + unsigned int deslen :16; + unsigned int vlddes :16; + }; + + struct htu_entry { + unsigned int res1 :1; + unsigned int clp :1; + unsigned int pid :2; + unsigned int vpi :8; + unsigned int vci :16; + unsigned int pti :3; + unsigned int vld :1; + }; + + struct htu_mask { + unsigned int set :1; + unsigned int clp :1; + unsigned int pid_mask :2; + unsigned int vpi_mask :8; + unsigned int vci_mask :16; + unsigned int pti_mask :3; + unsigned int clear :1; + }; + + struct htu_result { + unsigned int res1 :12; + unsigned int cellid :4; + unsigned int res2 :5; + unsigned int type :1; + unsigned int ven :1; + unsigned int res3 :5; + unsigned int qid :4; + }; + + struct rx_descriptor { + /* 0 - 3h */ + unsigned int own :1; + unsigned int c :1; + unsigned int sop :1; + unsigned int eop :1; + unsigned int res1 :3; + unsigned int byteoff :2; + unsigned int res2 :2; + unsigned int id :4; + unsigned int err :1; + unsigned int datalen :16; + /* 4 - 7h */ + unsigned int res3 :4; + unsigned int dataptr :28; + }; + + struct tx_descriptor { + /* 0 - 3h */ + unsigned int own :1; + unsigned int c :1; + unsigned int sop :1; + unsigned int eop :1; + unsigned int byteoff :5; + unsigned int res1 :5; + unsigned int iscell :1; + unsigned int clp :1; + unsigned int datalen :16; + /* 4 - 7h */ + unsigned int res2 :4; + unsigned int dataptr :28; + }; +#else + struct wrx_queue_config { + /* 0h */ + unsigned int errdp :1; + unsigned int dmach :4; + unsigned int res2 :27; + /* 1h */ + unsigned int undersize :16; + unsigned int oversize :16; + /* 2h */ + unsigned int mfs :16; + unsigned int res1 :16; + /* 3h */ + unsigned int cpiexp :8; + unsigned int uuexp :8; + unsigned int cpimask :8; + unsigned int uumask :8; + }; + + struct wtx_port_config { + unsigned int qsben :1; + unsigned int qid :4; + unsigned int res1 :27; + }; + + struct wtx_queue_config { + unsigned int qsben :1; + unsigned int type :2; + unsigned int res2 :3; + unsigned int sbid :1; + unsigned int res1 :25; + }; + + struct wrx_dma_channel_config + { + /* 0h */ + unsigned int desba :28; + unsigned int rlcfg :1; + unsigned int mode :2; + unsigned int res1 :1; + /* 1h */ + unsigned int clp1th :16; + unsigned int chrl :16; + /* 2h */ + unsigned int vlddes :16; + unsigned int deslen :16; + }; + + struct wtx_dma_channel_config { + /* 0h */ + unsigned int desba :28; + unsigned int res3 :1; + unsigned int mode :2; + unsigned int res2 :1; + /* 1h */ + unsigned int res1 :32; + /* 2h */ + unsigned int vlddes :16; + unsigned int deslen :16; + }; + + struct rx_descriptor { + /* 4 - 7h */ + unsigned int dataptr :28; + unsigned int res3 :4; + /* 0 - 3h */ + unsigned int datalen :16; + unsigned int err :1; + unsigned int id :4; + unsigned int res2 :2; + unsigned int byteoff :2; + unsigned int res1 :3; + unsigned int eop :1; + unsigned int sop :1; + unsigned int c :1; + unsigned int own :1; + }; + + struct tx_descriptor { + /* 4 - 7h */ + unsigned int dataptr :28; + unsigned int res2 :4; + /* 0 - 3h */ + unsigned int datalen :16; + unsigned int clp :1; + unsigned int iscell :1; + unsigned int res1 :5; + unsigned int byteoff :5; + unsigned int eop :1; + unsigned int sop :1; + unsigned int c :1; + unsigned int own :1; + }; +#endif // defined(__BIG_ENDIAN) + + + +#endif // IFXMIPS_ATM_FW_REGS_COMMON_H diff --git a/package/lqdsl/src/ifxmips_atm_fw_regs_danube.h b/package/lqdsl/src/ifxmips_atm_fw_regs_danube.h new file mode 100644 index 0000000000..c0dfc6a2e0 --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_fw_regs_danube.h @@ -0,0 +1,30 @@ +#ifndef IFXMIPS_ATM_FW_REGS_DANUBE_H +#define IFXMIPS_ATM_FW_REGS_DANUBE_H + + + +/* + * Host-PPE Communication Data Address Mapping + */ +#define FW_VER_ID SB_BUFFER(0x2001) +#define CFG_WRX_HTUTS SB_BUFFER(0x2400) /* WAN RX HTU Table Size, must be configured before enable PPE firmware. */ +#define CFG_WRX_QNUM SB_BUFFER(0x2401) /* WAN RX Queue Number */ +#define CFG_WRX_DCHNUM SB_BUFFER(0x2402) /* WAN RX DMA Channel Number, no more than 8, must be configured before enable PPE firmware. */ +#define CFG_WTX_DCHNUM SB_BUFFER(0x2403) /* WAN TX DMA Channel Number, no more than 16, must be configured before enable PPE firmware. */ +#define CFG_WRDES_DELAY SB_BUFFER(0x2404) /* WAN Descriptor Write Delay, must be configured before enable PPE firmware. */ +#define WRX_DMACH_ON SB_BUFFER(0x2405) /* WAN RX DMA Channel Enable, must be configured before enable PPE firmware. */ +#define WTX_DMACH_ON SB_BUFFER(0x2406) /* WAN TX DMA Channel Enable, must be configured before enable PPE firmware. */ +#define WRX_HUNT_BITTH SB_BUFFER(0x2407) /* WAN RX HUNT Threshold, must be between 2 to 8. */ +#define WRX_QUEUE_CONFIG(i) ((struct wrx_queue_config*) SB_BUFFER(0x2500 + (i) * 20)) +#define WRX_DMA_CHANNEL_CONFIG(i) ((struct wrx_dma_channel_config*) SB_BUFFER(0x2640 + (i) * 7)) +#define WTX_PORT_CONFIG(i) ((struct wtx_port_config*) SB_BUFFER(0x2440 + (i))) +#define WTX_QUEUE_CONFIG(i) ((struct wtx_queue_config*) SB_BUFFER(0x2710 + (i) * 27)) +#define WTX_DMA_CHANNEL_CONFIG(i) ((struct wtx_dma_channel_config*) SB_BUFFER(0x2711 + (i) * 27)) +#define WAN_MIB_TABLE ((struct wan_mib_table*) SB_BUFFER(0x2410)) +#define HTU_ENTRY(i) ((struct htu_entry*) SB_BUFFER(0x2000 + (i))) +#define HTU_MASK(i) ((struct htu_mask*) SB_BUFFER(0x2020 + (i))) +#define HTU_RESULT(i) ((struct htu_result*) SB_BUFFER(0x2040 + (i))) + + + +#endif // IFXMIPS_ATM_FW_REGS_DANUBE_H diff --git a/package/lqdsl/src/ifxmips_atm_ppe_common.h b/package/lqdsl/src/ifxmips_atm_ppe_common.h new file mode 100644 index 0000000000..c922276dfb --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_ppe_common.h @@ -0,0 +1,231 @@ +#ifndef IFXMIPS_ATM_PPE_COMMON_H +#define IFXMIPS_ATM_PPE_COMMON_H + +#if defined(CONFIG_LANTIQ) + #include "ifxmips_atm_ppe_danube.h" + #define CONFIG_DANUBE +#elif defined(CONFIG_DANUBE) + #include "ifxmips_atm_ppe_danube.h" +#elif defined(CONFIG_AMAZON_SE) + #include "ifxmips_atm_ppe_amazon_se.h" +#elif defined(CONFIG_AR9) + #include "ifxmips_atm_ppe_ar9.h" +#elif defined(CONFIG_VR9) + #include "ifxmips_atm_ppe_vr9.h" +#else + #error Platform is not specified! +#endif + + +/* + * Code/Data Memory (CDM) Interface Configuration Register + */ +#define CDM_CFG PPE_REG_ADDR(0x0100) + +#define CDM_CFG_RAM1 GET_BITS(*CDM_CFG, 3, 2) +#define CDM_CFG_RAM0 (*CDM_CFG & (1 << 1)) + +#define CDM_CFG_RAM1_SET(value) SET_BITS(0, 3, 2, value) +#define CDM_CFG_RAM0_SET(value) ((value) ? (1 << 1) : 0) + +/* + * QSB Internal Cell Delay Variation Register + */ +#define QSB_ICDV QSB_CONF_REG_ADDR(0x0007) + +#define QSB_ICDV_TAU GET_BITS(*QSB_ICDV, 5, 0) + +#define QSB_ICDV_TAU_SET(value) SET_BITS(0, 5, 0, value) + +/* + * QSB Scheduler Burst Limit Register + */ +#define QSB_SBL QSB_CONF_REG_ADDR(0x0009) + +#define QSB_SBL_SBL GET_BITS(*QSB_SBL, 3, 0) + +#define QSB_SBL_SBL_SET(value) SET_BITS(0, 3, 0, value) + +/* + * QSB Configuration Register + */ +#define QSB_CFG QSB_CONF_REG_ADDR(0x000A) + +#define QSB_CFG_TSTEPC GET_BITS(*QSB_CFG, 1, 0) + +#define QSB_CFG_TSTEPC_SET(value) SET_BITS(0, 1, 0, value) + +/* + * QSB RAM Transfer Table Register + */ +#define QSB_RTM QSB_CONF_REG_ADDR(0x000B) + +#define QSB_RTM_DM (*QSB_RTM) + +#define QSB_RTM_DM_SET(value) ((value) & 0xFFFFFFFF) + +/* + * QSB RAM Transfer Data Register + */ +#define QSB_RTD QSB_CONF_REG_ADDR(0x000C) + +#define QSB_RTD_TTV (*QSB_RTD) + +#define QSB_RTD_TTV_SET(value) ((value) & 0xFFFFFFFF) + +/* + * QSB RAM Access Register + */ +#define QSB_RAMAC QSB_CONF_REG_ADDR(0x000D) + +#define QSB_RAMAC_RW (*QSB_RAMAC & (1 << 31)) +#define QSB_RAMAC_TSEL GET_BITS(*QSB_RAMAC, 27, 24) +#define QSB_RAMAC_LH (*QSB_RAMAC & (1 << 16)) +#define QSB_RAMAC_TESEL GET_BITS(*QSB_RAMAC, 9, 0) + +#define QSB_RAMAC_RW_SET(value) ((value) ? (1 << 31) : 0) +#define QSB_RAMAC_TSEL_SET(value) SET_BITS(0, 27, 24, value) +#define QSB_RAMAC_LH_SET(value) ((value) ? (1 << 16) : 0) +#define QSB_RAMAC_TESEL_SET(value) SET_BITS(0, 9, 0, value) + +/* + * QSB Queue Scheduling and Shaping Definitions + */ +#define QSB_WFQ_NONUBR_MAX 0x3f00 +#define QSB_WFQ_UBR_BYPASS 0x3fff +#define QSB_TP_TS_MAX 65472 +#define QSB_TAUS_MAX 64512 +#define QSB_GCR_MIN 18 + +/* + * QSB Constant + */ +#define QSB_RAMAC_RW_READ 0 +#define QSB_RAMAC_RW_WRITE 1 + +#define QSB_RAMAC_TSEL_QPT 0x01 +#define QSB_RAMAC_TSEL_SCT 0x02 +#define QSB_RAMAC_TSEL_SPT 0x03 +#define QSB_RAMAC_TSEL_VBR 0x08 + +#define QSB_RAMAC_LH_LOW 0 +#define QSB_RAMAC_LH_HIGH 1 + +#define QSB_QPT_SET_MASK 0x0 +#define QSB_QVPT_SET_MASK 0x0 +#define QSB_SET_SCT_MASK 0x0 +#define QSB_SET_SPT_MASK 0x0 +#define QSB_SET_SPT_SBVALID_MASK 0x7FFFFFFF + +#define QSB_SPT_SBV_VALID (1 << 31) +#define QSB_SPT_PN_SET(value) (((value) & 0x01) ? (1 << 16) : 0) +#define QSB_SPT_INTRATE_SET(value) SET_BITS(0, 13, 0, value) + +/* + * QSB Queue Parameter Table Entry and Queue VBR Parameter Table Entry + */ +#if defined(__BIG_ENDIAN) + union qsb_queue_parameter_table { + struct { + unsigned int res1 :1; + unsigned int vbr :1; + unsigned int wfqf :14; + unsigned int tp :16; + } bit; + u32 dword; + }; + + union qsb_queue_vbr_parameter_table { + struct { + unsigned int taus :16; + unsigned int ts :16; + } bit; + u32 dword; + }; +#else + union qsb_queue_parameter_table { + struct { + unsigned int tp :16; + unsigned int wfqf :14; + unsigned int vbr :1; + unsigned int res1 :1; + } bit; + u32 dword; + }; + + union qsb_queue_vbr_parameter_table { + struct { + unsigned int ts :16; + unsigned int taus :16; + } bit; + u32 dword; + }; +#endif // defined(__BIG_ENDIAN) + +/* + * Mailbox IGU0 Registers + */ +#define MBOX_IGU0_ISRS PPE_REG_ADDR(0x0200) +#define MBOX_IGU0_ISRC PPE_REG_ADDR(0x0201) +#define MBOX_IGU0_ISR PPE_REG_ADDR(0x0202) +#define MBOX_IGU0_IER PPE_REG_ADDR(0x0203) + +#define MBOX_IGU0_ISRS_SET(n) (1 << (n)) +#define MBOX_IGU0_ISRC_CLEAR(n) (1 << (n)) +#define MBOX_IGU0_ISR_ISR(n) (*MBOX_IGU0_ISR & (1 << (n))) +#define MBOX_IGU0_IER_EN(n) (*MBOX_IGU0_IER & (1 << (n))) +#define MBOX_IGU0_IER_EN_SET(n) (1 << (n)) + +/* + * Mailbox IGU1 Registers + */ +#define MBOX_IGU1_ISRS PPE_REG_ADDR(0x0204) +#define MBOX_IGU1_ISRC PPE_REG_ADDR(0x0205) +#define MBOX_IGU1_ISR PPE_REG_ADDR(0x0206) +#define MBOX_IGU1_IER PPE_REG_ADDR(0x0207) + +#define MBOX_IGU1_ISRS_SET(n) (1 << (n)) +#define MBOX_IGU1_ISRC_CLEAR(n) (1 << (n)) +#define MBOX_IGU1_ISR_ISR(n) (*MBOX_IGU1_ISR & (1 << (n))) +#define MBOX_IGU1_IER_EN(n) (*MBOX_IGU1_IER & (1 << (n))) +#define MBOX_IGU1_IER_EN_SET(n) (1 << (n)) + +/* + * Mailbox IGU3 Registers + */ +#define MBOX_IGU3_ISRS PPE_REG_ADDR(0x0214) +#define MBOX_IGU3_ISRC PPE_REG_ADDR(0x0215) +#define MBOX_IGU3_ISR PPE_REG_ADDR(0x0216) +#define MBOX_IGU3_IER PPE_REG_ADDR(0x0217) + +#define MBOX_IGU3_ISRS_SET(n) (1 << (n)) +#define MBOX_IGU3_ISRC_CLEAR(n) (1 << (n)) +#define MBOX_IGU3_ISR_ISR(n) (*MBOX_IGU3_ISR & (1 << (n))) +#define MBOX_IGU3_IER_EN(n) (*MBOX_IGU3_IER & (1 << (n))) +#define MBOX_IGU3_IER_EN_SET(n) (1 << (n)) + +/* + * RTHA/TTHA Registers + */ +#define SFSM_STATE0 PPE_REG_ADDR(0x0410) +#define SFSM_STATE1 PPE_REG_ADDR(0x0411) +#define SFSM_DBA0 PPE_REG_ADDR(0x0412) +#define SFSM_DBA1 PPE_REG_ADDR(0x0413) +#define SFSM_CBA0 PPE_REG_ADDR(0x0414) +#define SFSM_CBA1 PPE_REG_ADDR(0x0415) +#define SFSM_CFG0 PPE_REG_ADDR(0x0416) +#define SFSM_CFG1 PPE_REG_ADDR(0x0417) +#define SFSM_PGCNT0 PPE_REG_ADDR(0x041C) +#define SFSM_PGCNT1 PPE_REG_ADDR(0x041D) +#define FFSM_DBA0 PPE_REG_ADDR(0x0508) +#define FFSM_DBA1 PPE_REG_ADDR(0x0509) +#define FFSM_CFG0 PPE_REG_ADDR(0x050A) +#define FFSM_CFG1 PPE_REG_ADDR(0x050B) +#define FFSM_IDLE_HEAD_BC0 PPE_REG_ADDR(0x050E) +#define FFSM_IDLE_HEAD_BC1 PPE_REG_ADDR(0x050F) +#define FFSM_PGCNT0 PPE_REG_ADDR(0x0514) +#define FFSM_PGCNT1 PPE_REG_ADDR(0x0515) + + + +#endif // IFXMIPS_ATM_PPE_COMMON_H diff --git a/package/lqdsl/src/ifxmips_atm_ppe_danube.h b/package/lqdsl/src/ifxmips_atm_ppe_danube.h new file mode 100644 index 0000000000..a60abe1d55 --- /dev/null +++ b/package/lqdsl/src/ifxmips_atm_ppe_danube.h @@ -0,0 +1,100 @@ +#ifndef IFXMIPS_ATM_PPE_DANUBE_H +#define IFXMIPS_ATM_PPE_DANUBE_H + +#include <lantiq.h> + +/* + * FPI Configuration Bus Register and Memory Address Mapping + */ +#define IFX_PPE (KSEG1 | 0x1E180000) +#define PP32_DEBUG_REG_ADDR(i, x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x0000) << 2))) +#define PPM_INT_REG_ADDR(i, x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x0030) << 2))) +#define PP32_INTERNAL_RES_ADDR(i, x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x0040) << 2))) +#define CDM_CODE_MEMORY(i, x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x1000) << 2))) +#define PPE_REG_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x4000) << 2))) +#define CDM_DATA_MEMORY(i, x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x5000) << 2))) +#define PPM_INT_UNIT_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x6000) << 2))) +#define PPM_TIMER0_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x6100) << 2))) +#define PPM_TASK_IND_REG_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x6200) << 2))) +#define PPS_BRK_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x6300) << 2))) +#define PPM_TIMER1_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x6400) << 2))) +#define SB_RAM0_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x8000) << 2))) +#define SB_RAM1_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x8400) << 2))) +#define SB_RAM2_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x8C00) << 2))) +#define SB_RAM3_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0x9600) << 2))) +#define QSB_CONF_REG_ADDR(x) ((volatile unsigned int*)(IFX_PPE + (((x) + 0xC000) << 2))) + +/* + * DWORD-Length of Memory Blocks + */ +#define PP32_DEBUG_REG_DWLEN 0x0030 +#define PPM_INT_REG_DWLEN 0x0010 +#define PP32_INTERNAL_RES_DWLEN 0x00C0 +#define CDM_CODE_MEMORYn_DWLEN(n) ((n) == 0 ? 0x1000 : 0x0800) +#define PPE_REG_DWLEN 0x1000 +#define CDM_DATA_MEMORY_DWLEN CDM_CODE_MEMORYn_DWLEN(1) +#define PPM_INT_UNIT_DWLEN 0x0100 +#define PPM_TIMER0_DWLEN 0x0100 +#define PPM_TASK_IND_REG_DWLEN 0x0100 +#define PPS_BRK_DWLEN 0x0100 +#define PPM_TIMER1_DWLEN 0x0100 +#define SB_RAM0_DWLEN 0x0400 +#define SB_RAM1_DWLEN 0x0800 +#define SB_RAM2_DWLEN 0x0A00 +#define SB_RAM3_DWLEN 0x0400 +#define QSB_CONF_REG_DWLEN 0x0100 + +/* + * PP32 to FPI Address Mapping + */ +#define SB_BUFFER(__sb_addr) ((volatile unsigned int *)((((__sb_addr) >= 0x2000) && ((__sb_addr) <= 0x23FF)) ? SB_RAM0_ADDR((__sb_addr) - 0x2000) : \ + (((__sb_addr) >= 0x2400) && ((__sb_addr) <= 0x2BFF)) ? SB_RAM1_ADDR((__sb_addr) - 0x2400) : \ + (((__sb_addr) >= 0x2C00) && ((__sb_addr) <= 0x35FF)) ? SB_RAM2_ADDR((__sb_addr) - 0x2C00) : \ + (((__sb_addr) >= 0x3600) && ((__sb_addr) <= 0x39FF)) ? SB_RAM3_ADDR((__sb_addr) - 0x3600) : \ + 0)) + +/* + * PP32 Debug Control Register + */ +#define PP32_DBG_CTRL PP32_DEBUG_REG_ADDR(0, 0x0000) + +#define DBG_CTRL_START_SET(value) ((value) ? (1 << 0) : 0) +#define DBG_CTRL_STOP_SET(value) ((value) ? (1 << 1) : 0) +#define DBG_CTRL_STEP_SET(value) ((value) ? (1 << 2) : 0) + +#define PP32_HALT_STAT PP32_DEBUG_REG_ADDR(0, 0x0001) + +#define PP32_BRK_SRC PP32_DEBUG_REG_ADDR(0, 0x0002) + +#define PP32_DBG_PC_MIN(i) PP32_DEBUG_REG_ADDR(0, 0x0010 + (i)) +#define PP32_DBG_PC_MAX(i) PP32_DEBUG_REG_ADDR(0, 0x0014 + (i)) +#define PP32_DBG_DATA_MIN(i) PP32_DEBUG_REG_ADDR(0, 0x0018 + (i)) +#define PP32_DBG_DATA_MAX(i) PP32_DEBUG_REG_ADDR(0, 0x001A + (i)) +#define PP32_DBG_DATA_VAL(i) PP32_DEBUG_REG_ADDR(0, 0x001C + (i)) + +#define PP32_DBG_CUR_PC PP32_DEBUG_REG_ADDR(0, 0x0080) + +#define PP32_DBG_TASK_NO PP32_DEBUG_REG_ADDR(0, 0x0081) + +/* + * EMA Registers + */ +#define EMA_CMDCFG PPE_REG_ADDR(0x0A00) +#define EMA_DATACFG PPE_REG_ADDR(0x0A01) +#define EMA_CMDCNT PPE_REG_ADDR(0x0A02) +#define EMA_DATACNT PPE_REG_ADDR(0x0A03) +#define EMA_ISR PPE_REG_ADDR(0x0A04) +#define EMA_IER PPE_REG_ADDR(0x0A05) +#define EMA_CFG PPE_REG_ADDR(0x0A06) +#define EMA_SUBID PPE_REG_ADDR(0x0A07) + +#define EMA_ALIGNMENT 4 + +/* + * Mailbox IGU1 Interrupt + */ +#define PPE_MAILBOX_IGU1_INT LQ_PPE_MBOX_INT + + + +#endif // IFXMIPS_ATM_PPE_DANUBE_H diff --git a/package/lqdsl/src/ifxmips_compat.h b/package/lqdsl/src/ifxmips_compat.h new file mode 100644 index 0000000000..f5b5768466 --- /dev/null +++ b/package/lqdsl/src/ifxmips_compat.h @@ -0,0 +1,43 @@ +#ifndef _IFXMIPS_COMPAT_H__ +#define _IFXMIPS_COMPAT_H__ + +#define IFX_SUCCESS 0 +#define IFX_ERROR (-1) + +#define ATM_VBR_NRT ATM_VBR +#define ATM_VBR_RT 6 +#define ATM_UBR_PLUS 7 +#define ATM_GFR 8 + +#define NUM_ENTITY(x) (sizeof(x) / sizeof(*(x))) + +#define SET_BITS(x, msb, lsb, value) \ + (((x) & ~(((1 << ((msb) + 1)) - 1) ^ ((1 << (lsb)) - 1))) | (((value) & ((1 << (1 + (msb) - (lsb))) - 1)) << (lsb))) + + +#define IFX_PMU_ENABLE 1 +#define IFX_PMU_DISABLE 0 + +#define IFX_PMU_MODULE_DSL_DFE (1 << 9) +#define IFX_PMU_MODULE_AHBS (1 << 13) +#define IFX_PMU_MODULE_PPE_QSB (1 << 18) +#define IFX_PMU_MODULE_PPE_SLL01 (1 << 19) +#define IFX_PMU_MODULE_PPE_TC (1 << 21) +#define IFX_PMU_MODULE_PPE_EMA (1 << 22) +#define IFX_PMU_MODULE_PPE_TOP (1 << 29) + +#define ifx_pmu_set(a,b) {if(a == IFX_PMU_ENABLE) lq_pmu_enable(b); else lq_pmu_disable(b);} + +#define PPE_TOP_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_PPE_TOP, (__x)) +#define PPE_SLL01_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_PPE_SLL01, (__x)) +#define PPE_TC_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_PPE_TC, (__x)) +#define PPE_EMA_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_PPE_EMA, (__x)) +#define PPE_QSB_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_PPE_QSB, (__x)) +#define PPE_TPE_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_AHBS, (__x)) +#define DSL_DFE_PMU_SETUP(__x) ifx_pmu_set(IFX_PMU_MODULE_DSL_DFE, (__x)) + +#define IFX_REG_W32(_v, _r) __raw_writel((_v), (_r)) + +#define CONFIG_IFXMIPS_DSL_CPE_MEI y + +#endif diff --git a/package/lqdsl/src/ifxmips_mei_interface.h b/package/lqdsl/src/ifxmips_mei_interface.h new file mode 100644 index 0000000000..ffde6113c8 --- /dev/null +++ b/package/lqdsl/src/ifxmips_mei_interface.h @@ -0,0 +1,700 @@ +/****************************************************************************** + + Copyright (c) 2009 + Infineon Technologies AG + Am Campeon 1-12; 81726 Munich, Germany + + For licensing information, see the file 'LICENSE' in the root folder of + this software module. + +******************************************************************************/ + +#ifndef IFXMIPS_MEI_H +#define IFXMIPS_MEI_H + +#define CONFIG_DANUBE 1 + +#if !defined(CONFIG_DANUBE) && !defined(CONFIG_AMAZON_SE) && !defined(CONFIG_AR9) && !defined(CONFIG_VR9) +#error Platform undefined!!! +#endif + +#ifdef IFX_MEI_BSP +/** This is the character datatype. */ +typedef char DSL_char_t; +/** This is the unsigned 8-bit datatype. */ +typedef unsigned char DSL_uint8_t; +/** This is the signed 8-bit datatype. */ +typedef signed char DSL_int8_t; +/** This is the unsigned 16-bit datatype. */ +typedef unsigned short DSL_uint16_t; +/** This is the signed 16-bit datatype. */ +typedef signed short DSL_int16_t; +/** This is the unsigned 32-bit datatype. */ +typedef unsigned long DSL_uint32_t; +/** This is the signed 32-bit datatype. */ +typedef signed long DSL_int32_t; +/** This is the float datatype. */ +typedef float DSL_float_t; +/** This is the void datatype. */ +typedef void DSL_void_t; +/** integer type, width is depending on processor arch */ +typedef int DSL_int_t; +/** unsigned integer type, width is depending on processor arch */ +typedef unsigned int DSL_uint_t; +typedef struct file DSL_DRV_file_t; +typedef struct inode DSL_DRV_inode_t; + +/** + * Defines all possible CMV groups + * */ +typedef enum { + DSL_CMV_GROUP_CNTL = 1, + DSL_CMV_GROUP_STAT = 2, + DSL_CMV_GROUP_INFO = 3, + DSL_CMV_GROUP_TEST = 4, + DSL_CMV_GROUP_OPTN = 5, + DSL_CMV_GROUP_RATE = 6, + DSL_CMV_GROUP_PLAM = 7, + DSL_CMV_GROUP_CNFG = 8 +} DSL_CmvGroup_t; +/** + * Defines all opcode types + * */ +typedef enum { + H2D_CMV_READ = 0x00, + H2D_CMV_WRITE = 0x04, + H2D_CMV_INDICATE_REPLY = 0x10, + H2D_ERROR_OPCODE_UNKNOWN =0x20, + H2D_ERROR_CMV_UNKNOWN =0x30, + + D2H_CMV_READ_REPLY =0x01, + D2H_CMV_WRITE_REPLY = 0x05, + D2H_CMV_INDICATE = 0x11, + D2H_ERROR_OPCODE_UNKNOWN = 0x21, + D2H_ERROR_CMV_UNKNOWN = 0x31, + D2H_ERROR_CMV_READ_NOT_AVAILABLE = 0x41, + D2H_ERROR_CMV_WRITE_ONLY = 0x51, + D2H_ERROR_CMV_READ_ONLY = 0x61, + + H2D_DEBUG_READ_DM = 0x02, + H2D_DEBUG_READ_PM = 0x06, + H2D_DEBUG_WRITE_DM = 0x0a, + H2D_DEBUG_WRITE_PM = 0x0e, + + D2H_DEBUG_READ_DM_REPLY = 0x03, + D2H_DEBUG_READ_FM_REPLY = 0x07, + D2H_DEBUG_WRITE_DM_REPLY = 0x0b, + D2H_DEBUG_WRITE_FM_REPLY = 0x0f, + D2H_ERROR_ADDR_UNKNOWN = 0x33, + + D2H_AUTONOMOUS_MODEM_READY_MSG = 0xf1 +} DSL_CmvOpcode_t; + +/* mutex macros */ +#define MEI_MUTEX_INIT(id,flag) \ + sema_init(&id,flag) +#define MEI_MUTEX_LOCK(id) \ + down_interruptible(&id) +#define MEI_MUTEX_UNLOCK(id) \ + up(&id) +#define MEI_WAIT(ms) \ + {\ + set_current_state(TASK_INTERRUPTIBLE);\ + schedule_timeout(ms);\ + } +#define MEI_INIT_WAKELIST(name,queue) \ + init_waitqueue_head(&queue) + +/* wait for an event, timeout is measured in ms */ +#define MEI_WAIT_EVENT_TIMEOUT(ev,timeout)\ + interruptible_sleep_on_timeout(&ev,timeout * HZ / 1000) +#define MEI_WAKEUP_EVENT(ev)\ + wake_up_interruptible(&ev) +#endif /* IFX_MEI_BSP */ + +/*** Register address offsets, relative to MEI_SPACE_ADDRESS ***/ +#define ME_DX_DATA (0x0000) +#define ME_VERSION (0x0004) +#define ME_ARC_GP_STAT (0x0008) +#define ME_DX_STAT (0x000C) +#define ME_DX_AD (0x0010) +#define ME_DX_MWS (0x0014) +#define ME_ME2ARC_INT (0x0018) +#define ME_ARC2ME_STAT (0x001C) +#define ME_ARC2ME_MASK (0x0020) +#define ME_DBG_WR_AD (0x0024) +#define ME_DBG_RD_AD (0x0028) +#define ME_DBG_DATA (0x002C) +#define ME_DBG_DECODE (0x0030) +#define ME_CONFIG (0x0034) +#define ME_RST_CTRL (0x0038) +#define ME_DBG_MASTER (0x003C) +#define ME_CLK_CTRL (0x0040) +#define ME_BIST_CTRL (0x0044) +#define ME_BIST_STAT (0x0048) +#define ME_XDATA_BASE_SH (0x004c) +#define ME_XDATA_BASE (0x0050) +#define ME_XMEM_BAR_BASE (0x0054) +#define ME_XMEM_BAR0 (0x0054) +#define ME_XMEM_BAR1 (0x0058) +#define ME_XMEM_BAR2 (0x005C) +#define ME_XMEM_BAR3 (0x0060) +#define ME_XMEM_BAR4 (0x0064) +#define ME_XMEM_BAR5 (0x0068) +#define ME_XMEM_BAR6 (0x006C) +#define ME_XMEM_BAR7 (0x0070) +#define ME_XMEM_BAR8 (0x0074) +#define ME_XMEM_BAR9 (0x0078) +#define ME_XMEM_BAR10 (0x007C) +#define ME_XMEM_BAR11 (0x0080) +#define ME_XMEM_BAR12 (0x0084) +#define ME_XMEM_BAR13 (0x0088) +#define ME_XMEM_BAR14 (0x008C) +#define ME_XMEM_BAR15 (0x0090) +#define ME_XMEM_BAR16 (0x0094) + +#define WHILE_DELAY 20000 +/* +** Define where in ME Processor's memory map the Stratify chip lives +*/ + +#define MAXSWAPSIZE (8 * 1024) //8k *(32bits) + +// Mailboxes +#define MSG_LENGTH 16 // x16 bits +#define YES_REPLY 1 +#define NO_REPLY 0 + +#define CMV_TIMEOUT 1000 //jiffies + +// Block size per BAR +#define SDRAM_SEGMENT_SIZE (64*1024) +// Number of Bar registers +#define MAX_BAR_REGISTERS (17) + +#define XDATA_REGISTER (15) + +// ARC register addresss +#define ARC_STATUS 0x0 +#define ARC_LP_START 0x2 +#define ARC_LP_END 0x3 +#define ARC_DEBUG 0x5 +#define ARC_INT_MASK 0x10A + +#define IRAM0_BASE (0x00000) +#define IRAM1_BASE (0x04000) +#if defined(CONFIG_DANUBE) +#define BRAM_BASE (0x0A000) +#elif defined(CONFIG_AMAZON_SE) || defined(CONFIG_AR9) || defined(CONFIG_VR9) +#define BRAM_BASE (0x08000) +#endif +#define XRAM_BASE (0x18000) +#define YRAM_BASE (0x1A000) +#define EXT_MEM_BASE (0x80000) +#define ARC_GPIO_CTRL (0xC030) +#define ARC_GPIO_DATA (0xC034) + +#define IRAM0_SIZE (16*1024) +#define IRAM1_SIZE (16*1024) +#define BRAM_SIZE (12*1024) +#define XRAM_SIZE (8*1024) +#define YRAM_SIZE (8*1024) +#define EXT_MEM_SIZE (1536*1024) + +#define ADSL_BASE (0x20000) +#define CRI_BASE (ADSL_BASE + 0x11F00) +#define CRI_CCR0 (CRI_BASE + 0x00) +#define CRI_RST (CRI_BASE + 0x04*4) +#define ADSL_DILV_BASE (ADSL_BASE+0x20000) + +// +#define IRAM0_ADDR_BIT_MASK 0xFFF +#define IRAM1_ADDR_BIT_MASK 0xFFF +#define BRAM_ADDR_BIT_MASK 0xFFF +#define RX_DILV_ADDR_BIT_MASK 0x1FFF + +/*** Bit definitions ***/ +#define ARC_AUX_HALT (1 << 25) +#define ARC_DEBUG_HALT (1 << 1) +#define FALSE 0 +#define TRUE 1 +#define BIT0 (1<<0) +#define BIT1 (1<<1) +#define BIT2 (1<<2) +#define BIT3 (1<<3) +#define BIT4 (1<<4) +#define BIT5 (1<<5) +#define BIT6 (1<<6) +#define BIT7 (1<<7) +#define BIT8 (1<<8) +#define BIT9 (1<<9) +#define BIT10 (1<<10) +#define BIT11 (1<<11) +#define BIT12 (1<<12) +#define BIT13 (1<<13) +#define BIT14 (1<<14) +#define BIT15 (1<<15) +#define BIT16 (1<<16) +#define BIT17 (1<<17) +#define BIT18 (1<<18) +#define BIT19 (1<<19) +#define BIT20 (1<<20) +#define BIT21 (1<<21) +#define BIT22 (1<<22) +#define BIT23 (1<<23) +#define BIT24 (1<<24) +#define BIT25 (1<<25) +#define BIT26 (1<<26) +#define BIT27 (1<<27) +#define BIT28 (1<<28) +#define BIT29 (1<<29) +#define BIT30 (1<<30) +#define BIT31 (1<<31) + +// CRI_CCR0 Register definitions +#define CLK_2M_MODE_ENABLE BIT6 +#define ACL_CLK_MODE_ENABLE BIT4 +#define FDF_CLK_MODE_ENABLE BIT2 +#define STM_CLK_MODE_ENABLE BIT0 + +// CRI_RST Register definitions +#define FDF_SRST BIT3 +#define MTE_SRST BIT2 +#define FCI_SRST BIT1 +#define AAI_SRST BIT0 + +// MEI_TO_ARC_INTERRUPT Register definitions +#define MEI_TO_ARC_INT1 BIT3 +#define MEI_TO_ARC_INT0 BIT2 +#define MEI_TO_ARC_CS_DONE BIT1 //need to check +#define MEI_TO_ARC_MSGAV BIT0 + +// ARC_TO_MEI_INTERRUPT Register definitions +#define ARC_TO_MEI_INT1 BIT8 +#define ARC_TO_MEI_INT0 BIT7 +#define ARC_TO_MEI_CS_REQ BIT6 +#define ARC_TO_MEI_DBG_DONE BIT5 +#define ARC_TO_MEI_MSGACK BIT4 +#define ARC_TO_MEI_NO_ACCESS BIT3 +#define ARC_TO_MEI_CHECK_AAITX BIT2 +#define ARC_TO_MEI_CHECK_AAIRX BIT1 +#define ARC_TO_MEI_MSGAV BIT0 + +// ARC_TO_MEI_INTERRUPT_MASK Register definitions +#define GP_INT1_EN BIT8 +#define GP_INT0_EN BIT7 +#define CS_REQ_EN BIT6 +#define DBG_DONE_EN BIT5 +#define MSGACK_EN BIT4 +#define NO_ACC_EN BIT3 +#define AAITX_EN BIT2 +#define AAIRX_EN BIT1 +#define MSGAV_EN BIT0 + +#define MEI_SOFT_RESET BIT0 + +#define HOST_MSTR BIT0 + +#define JTAG_MASTER_MODE 0x0 +#define MEI_MASTER_MODE HOST_MSTR + +// MEI_DEBUG_DECODE Register definitions +#define MEI_DEBUG_DEC_MASK (0x3) +#define MEI_DEBUG_DEC_AUX_MASK (0x0) +#define ME_DBG_DECODE_DMP1_MASK (0x1) +#define MEI_DEBUG_DEC_DMP2_MASK (0x2) +#define MEI_DEBUG_DEC_CORE_MASK (0x3) + +#define AUX_STATUS (0x0) +#define AUX_ARC_GPIO_CTRL (0x10C) +#define AUX_ARC_GPIO_DATA (0x10D) +// ARC_TO_MEI_MAILBOX[11] is a special location used to indicate +// page swap requests. +#if defined(CONFIG_DANUBE) +#define OMBOX_BASE 0xDF80 +#define ARC_TO_MEI_MAILBOX 0xDFA0 +#define IMBOX_BASE 0xDFC0 +#define MEI_TO_ARC_MAILBOX 0xDFD0 +#elif defined(CONFIG_AMAZON_SE) || defined(CONFIG_AR9) || defined(CONFIG_VR9) +#define OMBOX_BASE 0xAF80 +#define ARC_TO_MEI_MAILBOX 0xAFA0 +#define IMBOX_BASE 0xAFC0 +#define MEI_TO_ARC_MAILBOX 0xAFD0 +#endif + +#define MEI_TO_ARC_MAILBOXR (MEI_TO_ARC_MAILBOX + 0x2C) +#define ARC_MEI_MAILBOXR (ARC_TO_MEI_MAILBOX + 0x2C) +#define OMBOX1 (OMBOX_BASE+0x4) + +// Codeswap request messages are indicated by setting BIT31 +#define OMB_CODESWAP_MESSAGE_MSG_TYPE_MASK (0x80000000) + +// Clear Eoc messages received are indicated by setting BIT17 +#define OMB_CLEAREOC_INTERRUPT_CODE (0x00020000) +#define OMB_REBOOT_INTERRUPT_CODE (1 << 18) + +/* +** Swap page header +*/ +// Page must be loaded at boot time if size field has BIT31 set +#define BOOT_FLAG (BIT31) +#define BOOT_FLAG_MASK ~BOOT_FLAG + +#define FREE_RELOAD 1 +#define FREE_SHOWTIME 2 +#define FREE_ALL 3 + +// marcos +#define IFX_MEI_WRITE_REGISTER_L(data,addr) *((volatile u32*)(addr)) = (u32)(data) +#define IFX_MEI_READ_REGISTER_L(addr) (*((volatile u32*)(addr))) +#define SET_BIT(reg, mask) reg |= (mask) +#define CLEAR_BIT(reg, mask) reg &= (~mask) +#define CLEAR_BITS(reg, mask) CLEAR_BIT(reg, mask) +//#define SET_BITS(reg, mask) SET_BIT(reg, mask) +#define SET_BITFIELD(reg, mask, off, val) {reg &= (~mask); reg |= (val << off);} + +#define ALIGN_SIZE ( 1L<<10 ) //1K size align +#define MEM_ALIGN(addr) (((addr) + ALIGN_SIZE - 1) & ~ (ALIGN_SIZE -1) ) + +// swap marco +#define MEI_HALF_WORD_SWAP(data) {data = ((data & 0xffff)<<16) + ((data & 0xffff0000)>>16);} +#define MEI_BYTE_SWAP(data) {data = ((data & 0xff)<<24) + ((data & 0xff00)<<8)+ ((data & 0xff0000)>>8)+ ((data & 0xff000000)>>24);} + + +#ifdef CONFIG_PROC_FS +typedef struct reg_entry +{ + int *flag; + char name[30]; /* big enough to hold names */ + char description[100]; /* big enough to hold description */ + unsigned short low_ino; +} reg_entry_t; +#endif +// Swap page header describes size in 32-bit words, load location, and image offset +// for program and/or data segments +typedef struct _arc_swp_page_hdr { + u32 p_offset; //Offset bytes of progseg from beginning of image + u32 p_dest; //Destination addr of progseg on processor + u32 p_size; //Size in 32-bitwords of program segment + u32 d_offset; //Offset bytes of dataseg from beginning of image + u32 d_dest; //Destination addr of dataseg on processor + u32 d_size; //Size in 32-bitwords of data segment +} ARC_SWP_PAGE_HDR; + +/* +** Swap image header +*/ +#define GET_PROG 0 // Flag used for program mem segment +#define GET_DATA 1 // Flag used for data mem segment + +// Image header contains size of image, checksum for image, and count of +// page headers. Following that are 'count' page headers followed by +// the code and/or data segments to be loaded +typedef struct _arc_img_hdr { + u32 size; // Size of binary image in bytes + u32 checksum; // Checksum for image + u32 count; // Count of swp pages in image + ARC_SWP_PAGE_HDR page[1]; // Should be "count" pages - '1' to make compiler happy +} ARC_IMG_HDR; + +typedef struct smmu_mem_info { + int type; + int boot; + unsigned long nCopy; + unsigned long size; + unsigned char *address; + unsigned char *org_address; +} smmu_mem_info_t; + +#ifdef __KERNEL__ +typedef struct ifx_mei_device_private { + int modem_ready; + int arcmsgav; + int cmv_reply; + int cmv_waiting; + // Mei to ARC CMV count, reply count, ARC Indicator count + int modem_ready_cnt; + int cmv_count; + int reply_count; + unsigned long image_size; + int nBar; + u16 Recent_indicator[MSG_LENGTH]; + + u16 CMV_RxMsg[MSG_LENGTH] __attribute__ ((aligned (4))); + + smmu_mem_info_t adsl_mem_info[MAX_BAR_REGISTERS]; + ARC_IMG_HDR *img_hdr; + // to wait for arc cmv reply, sleep on wait_queue_arcmsgav; + wait_queue_head_t wait_queue_arcmsgav; + wait_queue_head_t wait_queue_modemready; + struct semaphore mei_cmv_sema; +} ifx_mei_device_private_t; +#endif +typedef struct winhost_message { + union { + u16 RxMessage[MSG_LENGTH] __attribute__ ((aligned (4))); + u16 TxMessage[MSG_LENGTH] __attribute__ ((aligned (4))); + } msg; +} DSL_DEV_WinHost_Message_t; +/******************************************************************************************************** + * DSL CPE API Driver Stack Interface Definitions + * *****************************************************************************************************/ +/** IOCTL codes for bsp driver */ +#define DSL_IOC_MEI_BSP_MAGIC 's' + +#define DSL_FIO_BSP_DSL_START _IO (DSL_IOC_MEI_BSP_MAGIC, 0) +#define DSL_FIO_BSP_RUN _IO (DSL_IOC_MEI_BSP_MAGIC, 1) +#define DSL_FIO_BSP_FREE_RESOURCE _IO (DSL_IOC_MEI_BSP_MAGIC, 2) +#define DSL_FIO_BSP_RESET _IO (DSL_IOC_MEI_BSP_MAGIC, 3) +#define DSL_FIO_BSP_REBOOT _IO (DSL_IOC_MEI_BSP_MAGIC, 4) +#define DSL_FIO_BSP_HALT _IO (DSL_IOC_MEI_BSP_MAGIC, 5) +#define DSL_FIO_BSP_BOOTDOWNLOAD _IO (DSL_IOC_MEI_BSP_MAGIC, 6) +#define DSL_FIO_BSP_JTAG_ENABLE _IO (DSL_IOC_MEI_BSP_MAGIC, 7) +#define DSL_FIO_FREE_RESOURCE _IO (DSL_IOC_MEI_BSP_MAGIC, 8) +#define DSL_FIO_ARC_MUX_TEST _IO (DSL_IOC_MEI_BSP_MAGIC, 9) +#define DSL_FIO_BSP_REMOTE _IOW (DSL_IOC_MEI_BSP_MAGIC, 10, u32) +#define DSL_FIO_BSP_GET_BASE_ADDRESS _IOR (DSL_IOC_MEI_BSP_MAGIC, 11, u32) +#define DSL_FIO_BSP_IS_MODEM_READY _IOR (DSL_IOC_MEI_BSP_MAGIC, 12, u32) +#define DSL_FIO_BSP_GET_VERSION _IOR (DSL_IOC_MEI_BSP_MAGIC, 13, DSL_DEV_Version_t) +#define DSL_FIO_BSP_CMV_WINHOST _IOWR(DSL_IOC_MEI_BSP_MAGIC, 14, DSL_DEV_WinHost_Message_t) +#define DSL_FIO_BSP_CMV_READ _IOWR(DSL_IOC_MEI_BSP_MAGIC, 15, DSL_DEV_MeiReg_t) +#define DSL_FIO_BSP_CMV_WRITE _IOW (DSL_IOC_MEI_BSP_MAGIC, 16, DSL_DEV_MeiReg_t) +#define DSL_FIO_BSP_DEBUG_READ _IOWR(DSL_IOC_MEI_BSP_MAGIC, 17, DSL_DEV_MeiDebug_t) +#define DSL_FIO_BSP_DEBUG_WRITE _IOWR(DSL_IOC_MEI_BSP_MAGIC, 18, DSL_DEV_MeiDebug_t) +#define DSL_FIO_BSP_GET_CHIP_INFO _IOR (DSL_IOC_MEI_BSP_MAGIC, 19, DSL_DEV_HwVersion_t) + +#define DSL_DEV_MEIDEBUG_BUFFER_SIZES 512 + +typedef struct DSL_DEV_MeiDebug +{ + DSL_uint32_t iAddress; + DSL_uint32_t iCount; + DSL_uint32_t buffer[DSL_DEV_MEIDEBUG_BUFFER_SIZES]; +} DSL_DEV_MeiDebug_t; /* meidebug */ + +/** + * Structure is used for debug access only. + * Refer to configure option INCLUDE_ADSL_WINHOST_DEBUG */ +typedef struct struct_meireg +{ + /* + * Specifies that address for debug access */ + unsigned long iAddress; + /* + * Specifies the pointer to the data that has to be written or returns a + * pointer to the data that has been read out*/ + unsigned long iData; +} DSL_DEV_MeiReg_t; /* meireg */ + +typedef struct DSL_DEV_Device +{ + DSL_int_t nInUse; /* modem state, update by bsp driver, */ + DSL_void_t *pPriv; + DSL_uint32_t base_address; /* mei base address */ + DSL_int_t nIrq[2]; /* irq number */ +#define IFX_DFEIR 0 +#define IFX_DYING_GASP 1 + DSL_DEV_MeiDebug_t lop_debugwr; /* dying gasp */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + struct module *owner; +#endif +} DSL_DEV_Device_t; /* ifx_adsl_device_t */ + +#define DSL_DEV_PRIVATE(dev) ((ifx_mei_device_private_t*)(dev->pPriv)) + +typedef struct DSL_DEV_Version /* ifx_adsl_bsp_version */ +{ + unsigned long major; + unsigned long minor; + unsigned long revision; +} DSL_DEV_Version_t; /* ifx_adsl_bsp_version_t */ + +typedef struct DSL_DEV_ChipInfo +{ + unsigned long major; + unsigned long minor; +} DSL_DEV_HwVersion_t; + +typedef struct +{ + DSL_uint8_t dummy; +} DSL_DEV_DeviceConfig_t; + +/** error code definitions */ +typedef enum DSL_DEV_MeiError +{ + DSL_DEV_MEI_ERR_SUCCESS = 0, + DSL_DEV_MEI_ERR_FAILURE = -1, + DSL_DEV_MEI_ERR_MAILBOX_FULL = -2, + DSL_DEV_MEI_ERR_MAILBOX_EMPTY = -3, + DSL_DEV_MEI_ERR_MAILBOX_TIMEOUT = -4 +} DSL_DEV_MeiError_t; /* MEI_ERROR */ + +typedef enum { + DSL_BSP_MEMORY_READ=0, + DSL_BSP_MEMORY_WRITE, +} DSL_BSP_MemoryAccessType_t; /* ifx_adsl_memory_access_type_t */ + +typedef enum +{ + DSL_LED_LINK_ID=0, + DSL_LED_DATA_ID +} DSL_DEV_LedId_t; /* ifx_adsl_led_id_t */ + +typedef enum +{ + DSL_LED_LINK_TYPE=0, + DSL_LED_DATA_TYPE +} DSL_DEV_LedType_t; /* ifx_adsl_led_type_t */ + +typedef enum +{ + DSL_LED_HD_CPU=0, + DSL_LED_HD_FW +} DSL_DEV_LedHandler_t; /* ifx_adsl_led_handler_t */ + +typedef enum { + DSL_LED_ON=0, + DSL_LED_OFF, + DSL_LED_FLASH, +} DSL_DEV_LedMode_t; /* ifx_adsl_led_mode_t */ + +typedef enum { + DSL_CPU_HALT=0, + DSL_CPU_RUN, + DSL_CPU_RESET, +} DSL_DEV_CpuMode_t; /* ifx_adsl_cpu_mode_t */ + +#if 0 +typedef enum { + DSL_BSP_EVENT_DYING_GASP = 0, + DSL_BSP_EVENT_CEOC_IRQ, +} DSL_BSP_Event_id_t; /* ifx_adsl_event_id_t */ + +typedef union DSL_BSP_CB_Param +{ + DSL_uint32_t nIrqMessage; +} DSL_BSP_CB_Param_t; /* ifx_adsl_cbparam_t */ + +typedef struct DSL_BSP_CB_Event +{ + DSL_BSP_Event_id_t nID; + DSL_DEV_Device_t *pDev; + DSL_BSP_CB_Param_t *pParam; +} DSL_BSP_CB_Event_t; /* ifx_adsl_cb_event_t */ +#endif + +/* external functions (from the BSP Driver) */ +extern DSL_DEV_Device_t* DSL_BSP_DriverHandleGet(int, int); +extern DSL_int_t DSL_BSP_DriverHandleDelete(DSL_DEV_Device_t *); +extern DSL_DEV_MeiError_t DSL_BSP_FWDownload(DSL_DEV_Device_t *, const DSL_char_t *, DSL_uint32_t, DSL_int32_t *, DSL_int32_t *); +extern int DSL_BSP_KernelIoctls(DSL_DEV_Device_t *, unsigned int, unsigned long); +extern DSL_DEV_MeiError_t DSL_BSP_SendCMV(DSL_DEV_Device_t *, DSL_uint16_t *, DSL_int_t, DSL_uint16_t *); +extern DSL_DEV_MeiError_t DSL_BSP_AdslLedInit(DSL_DEV_Device_t *, DSL_DEV_LedId_t, DSL_DEV_LedType_t, DSL_DEV_LedHandler_t); +extern DSL_DEV_MeiError_t DSL_BSP_Showtime(DSL_DEV_Device_t *, DSL_uint32_t, DSL_uint32_t); +extern int DSL_BSP_ATMLedCBRegister( int (*ifx_adsl_ledcallback)(void)); +extern DSL_DEV_MeiError_t DSL_BSP_MemoryDebugAccess(DSL_DEV_Device_t *, DSL_BSP_MemoryAccessType_t, DSL_uint32_t, DSL_uint32_t *, DSL_uint32_t); +extern volatile DSL_DEV_Device_t *adsl_dev; + +/** + * Dummy structure by now to show mechanism of extended data that will be + * provided within event callback itself. + * */ +typedef struct +{ + /** + * Dummy value */ + DSL_uint32_t nDummy1; +} DSL_BSP_CB_Event1DataDummy_t; + +/** + * Dummy structure by now to show mechanism of extended data that will be + * provided within event callback itself. + * */ +typedef struct +{ + /** + * Dummy value */ + DSL_uint32_t nDummy2; +} DSL_BSP_CB_Event2DataDummy_t; + +/** + * encapsulate all data structures that are necessary for status event + * callbacks. + * */ +typedef union +{ + DSL_BSP_CB_Event1DataDummy_t dataEvent1; + DSL_BSP_CB_Event2DataDummy_t dataEvent2; +} DSL_BSP_CB_DATA_Union_t; + + +typedef enum +{ + /** + * Informs the upper layer driver (DSL CPE API) about a reboot request from the + * firmware. + * \note This event does NOT include any additional data. + * More detailed information upon reboot reason has to be requested from + * upper layer software via CMV (INFO 109) if necessary. */ + DSL_BSP_CB_FIRST = 0, + DSL_BSP_CB_DYING_GASP, + DSL_BSP_CB_CEOC_IRQ, + DSL_BSP_CB_FIRMWARE_REBOOT, + /** + * Delimiter only */ + DSL_BSP_CB_LAST +} DSL_BSP_CB_Type_t; + +/** + * Specifies the common event type that has to be used for registering and + * signalling of interrupts/autonomous status events from MEI BSP Driver. + * + * \param pDev + * Context pointer from MEI BSP Driver. + * + * \param IFX_ADSL_BSP_CallbackType_t + * Specifies the event callback type (reason of callback). Regrading to the + * setting of this value the data which is included in the following union + * might have different meanings. + * Please refer to the description of the union to get information about the + * meaning of the included data. + * + * \param pData + * Data according to \ref DSL_BSP_CB_DATA_Union_t. + * If this pointer is NULL there is no additional data available. + * + * \return depending on event + */ +typedef int (*DSL_BSP_EventCallback_t) +( + DSL_DEV_Device_t *pDev, + DSL_BSP_CB_Type_t nCallbackType, + DSL_BSP_CB_DATA_Union_t *pData +); + +typedef struct { + DSL_BSP_EventCallback_t function; + DSL_BSP_CB_Type_t event; + DSL_BSP_CB_DATA_Union_t *pData; +} DSL_BSP_EventCallBack_t; + +extern int DSL_BSP_EventCBRegister(DSL_BSP_EventCallBack_t *); +extern int DSL_BSP_EventCBUnregister(DSL_BSP_EventCallBack_t *); + +/** Modem states */ +#define DSL_DEV_STAT_InitState 0x0000 +#define DSL_DEV_STAT_ReadyState 0x0001 +#define DSL_DEV_STAT_FailState 0x0002 +#define DSL_DEV_STAT_IdleState 0x0003 +#define DSL_DEV_STAT_QuietState 0x0004 +#define DSL_DEV_STAT_GhsState 0x0005 +#define DSL_DEV_STAT_FullInitState 0x0006 +#define DSL_DEV_STAT_ShowTimeState 0x0007 +#define DSL_DEV_STAT_FastRetrainState 0x0008 +#define DSL_DEV_STAT_LoopDiagMode 0x0009 +#define DSL_DEV_STAT_ShortInit 0x000A /* Bis short initialization */ + +#define DSL_DEV_STAT_CODESWAP_COMPLETE 0x0002 + +#endif //IFXMIPS_MEI_H diff --git a/package/lqdsl/src/lantiq_mei.c b/package/lqdsl/src/lantiq_mei.c new file mode 100644 index 0000000000..7eeab33686 --- /dev/null +++ b/package/lqdsl/src/lantiq_mei.c @@ -0,0 +1,3002 @@ +/****************************************************************************** + + Copyright (c) 2009 + Infineon Technologies AG + Am Campeon 1-12; 81726 Munich, Germany + + For licensing information, see the file 'LICENSE' in the root folder of + this software module. + +******************************************************************************/ + +/*! + \defgroup AMAZON_S_MEI Amazon-S MEI Driver Module + \brief Amazon-S MEI driver module + */ + +/*! + \defgroup Internal Compile Parametere + \ingroup AMAZON_S_MEI + \brief exported functions for other driver use + */ + +/*! + \file amazon_s_mei_bsp.c + \ingroup AMAZON_S_MEI + \brief Amazon-S MEI driver file + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <generated/utsrelease.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/sched.h> +#include <asm/uaccess.h> +#include <asm/hardirq.h> + +#include <lantiq.h> +#include <lantiq_regs.h> +#include "ifxmips_atm.h" +#define IFX_MEI_BSP +#include "ifxmips_mei_interface.h" + +/*#define LQ_RCU_RST IFX_RCU_RST_REQ +#define LQ_RCU_RST_REQ_ARC_JTAG IFX_RCU_RST_REQ_ARC_JTAG +#define LQ_RCU_RST_REQ_DFE IFX_RCU_RST_REQ_DFE +#define LQ_RCU_RST_REQ_AFE IFX_RCU_RST_REQ_AFE +#define IFXMIPS_FUSE_BASE_ADDR IFX_FUSE_BASE_ADDR +#define IFXMIPS_ICU_IM0_IER IFX_ICU_IM0_IER +#define IFXMIPS_ICU_IM2_IER IFX_ICU_IM2_IER +#define LQ_MEI_INT IFX_MEI_INT +#define LQ_MEI_DYING_GASP_INT IFX_MEI_DYING_GASP_INT +#define LQ_MEI_BASE_ADDR IFX_MEI_SPACE_ACCESS +#define IFXMIPS_PMU_PWDCR IFX_PMU_PWDCR +#define IFXMIPS_MPS_CHIPID IFX_MPS_CHIPID + +#define ifxmips_port_reserve_pin ifx_gpio_pin_reserve +#define ifxmips_port_set_dir_in ifx_gpio_dir_in_set +#define ifxmips_port_clear_altsel0 ifx_gpio_altsel0_set +#define ifxmips_port_clear_altsel1 ifx_gpio_altsel1_clear +#define ifxmips_port_set_open_drain ifx_gpio_open_drain_clear +#define ifxmips_port_free_pin ifx_gpio_pin_free +#define ifxmips_mask_and_ack_irq bsp_mask_and_ack_irq +#define IFXMIPS_MPS_CHIPID_VERSION_GET IFX_MCD_CHIPID_VERSION_GET +#define lq_r32(reg) __raw_readl(reg) +#define lq_w32(val, reg) __raw_writel(val, reg) +#define lq_w32_mask(clear, set, reg) lq_w32((lq_r32(reg) & ~clear) | set, reg) +*/ + +#define LQ_RCU_RST_REQ_DFE (1 << 7) +#define LQ_RCU_RST_REQ_AFE (1 << 11) +#define LQ_PMU_PWDCR ((u32 *)(LQ_PMU_BASE_ADDR + 0x001C)) +#define LQ_PMU_PWDSR ((u32 *)(LQ_PMU_BASE_ADDR + 0x0020)) +#define LQ_RCU_RST ((u32 *)(LQ_RCU_BASE_ADDR + 0x0010)) +#define LQ_RCU_RST_ALL 0x40000000 +#define LQ_ICU_BASE_ADDR (KSEG1 | 0x1F880200) + +#define LQ_ICU_IM0_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0000)) +#define LQ_ICU_IM0_IER ((u32 *)(LQ_ICU_BASE_ADDR + 0x0008)) +#define LQ_ICU_IM0_IOSR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0010)) +#define LQ_ICU_IM0_IRSR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0018)) +#define LQ_ICU_IM0_IMR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0020)) + + +#define LQ_ICU_IM1_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0028)) +#define LQ_ICU_IM2_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0050)) +#define LQ_ICU_IM3_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0078)) +#define LQ_ICU_IM4_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x00A0)) + +#define LQ_ICU_OFFSET (LQ_ICU_IM1_ISR - LQ_ICU_IM0_ISR) +#define LQ_ICU_IM2_IER (LQ_ICU_IM0_IER + LQ_ICU_OFFSET) + +#define IFX_MEI_EMSG(fmt, args...) pr_err("[%s %d]: " fmt,__FUNCTION__, __LINE__, ## args) +#define IFX_MEI_DMSG(fmt, args...) pr_debug("[%s %d]: " fmt,__FUNCTION__, __LINE__, ## args) + +#define LQ_FUSE_BASE (KSEG1 + 0x1F107354) + +#ifdef CONFIG_LQ_MEI_FW_LOOPBACK +//#define DFE_MEM_TEST +//#define DFE_PING_TEST +#define DFE_ATM_LOOPBACK + + +#ifdef DFE_ATM_LOOPBACK +#include <asm/ifxmips/ifxmips_mei_fw_loopback.h> +#endif + +void dfe_loopback_irq_handler (DSL_DEV_Device_t *pDev); + +#endif //CONFIG_AMAZON_S_MEI_FW_LOOPBACK + +DSL_DEV_Version_t bsp_mei_version = { + major: 5, + minor: 0, + revision:0 +}; +DSL_DEV_HwVersion_t bsp_chip_info; + +#define IFX_MEI_DEVNAME "ifx_mei" +#define BSP_MAX_DEVICES 1 + +DSL_DEV_MeiError_t DSL_BSP_FWDownload (DSL_DEV_Device_t *, const char *, unsigned long, long *, long *); +DSL_DEV_MeiError_t DSL_BSP_Showtime (DSL_DEV_Device_t *, DSL_uint32_t, DSL_uint32_t); +DSL_DEV_MeiError_t DSL_BSP_AdslLedInit (DSL_DEV_Device_t *, DSL_DEV_LedId_t, DSL_DEV_LedType_t, DSL_DEV_LedHandler_t); +//DSL_DEV_MeiError_t DSL_BSP_AdslLedSet (DSL_DEV_Device_t *, DSL_DEV_LedId_t, DSL_DEV_LedMode_t); +DSL_DEV_MeiError_t DSL_BSP_MemoryDebugAccess (DSL_DEV_Device_t *, DSL_BSP_MemoryAccessType_t, DSL_uint32_t, DSL_uint32_t*, DSL_uint32_t); +DSL_DEV_MeiError_t DSL_BSP_SendCMV (DSL_DEV_Device_t *, u16 *, int, u16 *); + +int DSL_BSP_KernelIoctls (DSL_DEV_Device_t *, unsigned int, unsigned long); + +static DSL_DEV_MeiError_t IFX_MEI_RunAdslModem (DSL_DEV_Device_t *); +static DSL_DEV_MeiError_t IFX_MEI_CpuModeSet (DSL_DEV_Device_t *, DSL_DEV_CpuMode_t); +static DSL_DEV_MeiError_t IFX_MEI_DownloadBootCode (DSL_DEV_Device_t *); +static DSL_DEV_MeiError_t IFX_MEI_ArcJtagEnable (DSL_DEV_Device_t *, int); +static DSL_DEV_MeiError_t IFX_MEI_AdslMailboxIRQEnable (DSL_DEV_Device_t *, int); + +static int IFX_MEI_GetPage (DSL_DEV_Device_t *, u32, u32, u32, u32 *, u32 *); +static int IFX_MEI_BarUpdate (DSL_DEV_Device_t *, int); + +static ssize_t IFX_MEI_Write (DSL_DRV_file_t *, const char *, size_t, loff_t *); +static int IFX_MEI_UserIoctls (DSL_DRV_inode_t *, DSL_DRV_file_t *, unsigned int, unsigned long); +static int IFX_MEI_Open (DSL_DRV_inode_t *, DSL_DRV_file_t *); +static int IFX_MEI_Release (DSL_DRV_inode_t *, DSL_DRV_file_t *); + +void AMAZON_SE_MEI_ARC_MUX_Test(void); + +#ifdef CONFIG_PROC_FS +static int IFX_MEI_ProcRead (struct file *, char *, size_t, loff_t *); +static ssize_t IFX_MEI_ProcWrite (struct file *, const char *, size_t, loff_t *); + +#define PROC_ITEMS 11 +#define MEI_DIRNAME "ifxmips_mei" + +static struct proc_dir_entry *meidir; +static struct file_operations IFX_MEI_ProcOperations = { + read:IFX_MEI_ProcRead, + write:IFX_MEI_ProcWrite, +}; +static reg_entry_t regs[BSP_MAX_DEVICES][PROC_ITEMS]; //total items to be monitored by /proc/mei +#define NUM_OF_REG_ENTRY (sizeof(regs[0])/sizeof(reg_entry_t)) +#endif //CONFIG_PROC_FS + +void IFX_MEI_ARC_MUX_Test(void); + +static int adsl_dummy_ledcallback(void); + +int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL; +EXPORT_SYMBOL(ifx_mei_atm_showtime_enter); + +int (*ifx_mei_atm_showtime_exit)(void) = NULL; +EXPORT_SYMBOL(ifx_mei_atm_showtime_exit); + +static int (*g_adsl_ledcallback)(void) = adsl_dummy_ledcallback; + +static unsigned int g_tx_link_rate[2] = {0}; + +static void *g_xdata_addr = NULL; + +static u32 *mei_arc_swap_buff = NULL; // holding swap pages + +extern void lq_mask_and_ack_irq(unsigned int irq_nr); +#define MEI_MASK_AND_ACK_IRQ lq_mask_and_ack_irq + +#define MEI_MAJOR 105 +static int dev_major = MEI_MAJOR; + +static struct file_operations bsp_mei_operations = { + owner:THIS_MODULE, + open:IFX_MEI_Open, + release:IFX_MEI_Release, + write:IFX_MEI_Write, + ioctl:IFX_MEI_UserIoctls, +}; + +static DSL_DEV_Device_t dsl_devices[BSP_MAX_DEVICES]; + +static ifx_mei_device_private_t + sDanube_Mei_Private[BSP_MAX_DEVICES]; + +static DSL_BSP_EventCallBack_t dsl_bsp_event_callback[DSL_BSP_CB_LAST + 1]; + +/** + * Write a value to register + * This function writes a value to danube register + * + * \param ul_address The address to write + * \param ul_data The value to write + * \ingroup Internal + */ +static void +IFX_MEI_LongWordWrite (u32 ul_address, u32 ul_data) +{ + IFX_MEI_WRITE_REGISTER_L (ul_data, ul_address); + wmb(); + return; +} + +/** + * Write a value to register + * This function writes a value to danube register + * + * \param pDev the device pointer + * \param ul_address The address to write + * \param ul_data The value to write + * \ingroup Internal + */ +static void +IFX_MEI_LongWordWriteOffset (DSL_DEV_Device_t * pDev, u32 ul_address, + u32 ul_data) +{ + IFX_MEI_WRITE_REGISTER_L (ul_data, pDev->base_address + ul_address); + wmb(); + return; +} + +/** + * Read the danube register + * This function read the value from danube register + * + * \param ul_address The address to write + * \param pul_data Pointer to the data + * \ingroup Internal + */ +static void +IFX_MEI_LongWordRead (u32 ul_address, u32 * pul_data) +{ + *pul_data = IFX_MEI_READ_REGISTER_L (ul_address); + rmb(); + return; +} + +/** + * Read the danube register + * This function read the value from danube register + * + * \param pDev the device pointer + * \param ul_address The address to write + * \param pul_data Pointer to the data + * \ingroup Internal + */ +static void +IFX_MEI_LongWordReadOffset (DSL_DEV_Device_t * pDev, u32 ul_address, + u32 * pul_data) +{ + *pul_data = IFX_MEI_READ_REGISTER_L (pDev->base_address + ul_address); + rmb(); + return; +} + +/** + * Write several DWORD datas to ARC memory via ARC DMA interface + * This function writes several DWORD datas to ARC memory via DMA interface. + * + * \param pDev the device pointer + * \param destaddr The address to write + * \param databuff Pointer to the data buffer + * \param databuffsize Number of DWORDs to write + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_DMAWrite (DSL_DEV_Device_t * pDev, u32 destaddr, + u32 * databuff, u32 databuffsize) +{ + u32 *p = databuff; + u32 temp; + + if (destaddr & 3) + return DSL_DEV_MEI_ERR_FAILURE; + + // Set the write transfer address + IFX_MEI_LongWordWriteOffset (pDev, ME_DX_AD, destaddr); + + // Write the data pushed across DMA + while (databuffsize--) { + temp = *p; + if (destaddr == MEI_TO_ARC_MAILBOX) + MEI_HALF_WORD_SWAP (temp); + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DX_DATA, temp); + p++; + } + + return DSL_DEV_MEI_ERR_SUCCESS; + +} + +/** + * Read several DWORD datas from ARC memory via ARC DMA interface + * This function reads several DWORD datas from ARC memory via DMA interface. + * + * \param pDev the device pointer + * \param srcaddr The address to read + * \param databuff Pointer to the data buffer + * \param databuffsize Number of DWORDs to read + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_DMARead (DSL_DEV_Device_t * pDev, u32 srcaddr, u32 * databuff, + u32 databuffsize) +{ + u32 *p = databuff; + u32 temp; + + if (srcaddr & 3) + return DSL_DEV_MEI_ERR_FAILURE; + + // Set the read transfer address + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DX_AD, srcaddr); + + // Read the data popped across DMA + while (databuffsize--) { + IFX_MEI_LongWordReadOffset (pDev, (u32) ME_DX_DATA, &temp); + if (databuff == (u32 *) DSL_DEV_PRIVATE(pDev)->CMV_RxMsg) // swap half word + MEI_HALF_WORD_SWAP (temp); + *p = temp; + p++; + } + + return DSL_DEV_MEI_ERR_SUCCESS; + +} + +/** + * Switch the ARC control mode + * This function switchs the ARC control mode to JTAG mode or MEI mode + * + * \param pDev the device pointer + * \param mode The mode want to switch: JTAG_MASTER_MODE or MEI_MASTER_MODE. + * \ingroup Internal + */ +static void +IFX_MEI_ControlModeSet (DSL_DEV_Device_t * pDev, int mode) +{ + u32 temp = 0x0; + + IFX_MEI_LongWordReadOffset (pDev, (u32) ME_DBG_MASTER, &temp); + switch (mode) { + case JTAG_MASTER_MODE: + temp &= ~(HOST_MSTR); + break; + case MEI_MASTER_MODE: + temp |= (HOST_MSTR); + break; + default: + IFX_MEI_EMSG ("IFX_MEI_ControlModeSet: unkonwn mode [%d]\n", mode); + return; + } + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_MASTER, temp); +} + +/** + * Disable ARC to MEI interrupt + * + * \param pDev the device pointer + * \ingroup Internal + */ +static void +IFX_MEI_IRQDisable (DSL_DEV_Device_t * pDev) +{ + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_MASK, 0x0); +} + +/** + * Eable ARC to MEI interrupt + * + * \param pDev the device pointer + * \ingroup Internal + */ +static void +IFX_MEI_IRQEnable (DSL_DEV_Device_t * pDev) +{ + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_MASK, MSGAV_EN); +} + +/** + * Poll for transaction complete signal + * This function polls and waits for transaction complete signal. + * + * \param pDev the device pointer + * \ingroup Internal + */ +static void +meiPollForDbgDone (DSL_DEV_Device_t * pDev) +{ + u32 query = 0; + int i = 0; + + while (i < WHILE_DELAY) { + IFX_MEI_LongWordReadOffset (pDev, (u32) ME_ARC2ME_STAT, &query); + query &= (ARC_TO_MEI_DBG_DONE); + if (query) + break; + i++; + if (i == WHILE_DELAY) { + IFX_MEI_EMSG ("PollforDbg fail!\n"); + } + } + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_STAT, ARC_TO_MEI_DBG_DONE); // to clear this interrupt +} + +/** + * ARC Debug Memory Access for a single DWORD reading. + * This function used for direct, address-based access to ARC memory. + * + * \param pDev the device pointer + * \param DEC_mode ARC memory space to used + * \param address Address to read + * \param data Pointer to data + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +_IFX_MEI_DBGLongWordRead (DSL_DEV_Device_t * pDev, u32 DEC_mode, + u32 address, u32 * data) +{ + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_DECODE, DEC_mode); + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_RD_AD, address); + meiPollForDbgDone (pDev); + IFX_MEI_LongWordReadOffset (pDev, (u32) ME_DBG_DATA, data); + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * ARC Debug Memory Access for a single DWORD writing. + * This function used for direct, address-based access to ARC memory. + * + * \param pDev the device pointer + * \param DEC_mode ARC memory space to used + * \param address The address to write + * \param data The data to write + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +_IFX_MEI_DBGLongWordWrite (DSL_DEV_Device_t * pDev, u32 DEC_mode, + u32 address, u32 data) +{ + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_DECODE, DEC_mode); + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_WR_AD, address); + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_DBG_DATA, data); + meiPollForDbgDone (pDev); + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * ARC Debug Memory Access for writing. + * This function used for direct, address-based access to ARC memory. + * + * \param pDev the device pointer + * \param destaddr The address to read + * \param databuffer Pointer to data + * \param databuffsize The number of DWORDs to read + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ + +static DSL_DEV_MeiError_t +IFX_MEI_DebugWrite (DSL_DEV_Device_t * pDev, u32 destaddr, + u32 * databuff, u32 databuffsize) +{ + u32 i; + u32 temp = 0x0; + u32 address = 0x0; + u32 *buffer = 0x0; + + // Open the debug port before DMP memory write + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + + // For the requested length, write the address and write the data + address = destaddr; + buffer = databuff; + for (i = 0; i < databuffsize; i++) { + temp = *buffer; + _IFX_MEI_DBGLongWordWrite (pDev, ME_DBG_DECODE_DMP1_MASK, address, temp); + address += 4; + buffer++; + } + + // Close the debug port after DMP memory write + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * ARC Debug Memory Access for reading. + * This function used for direct, address-based access to ARC memory. + * + * \param pDev the device pointer + * \param srcaddr The address to read + * \param databuffer Pointer to data + * \param databuffsize The number of DWORDs to read + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_DebugRead (DSL_DEV_Device_t * pDev, u32 srcaddr, u32 * databuff, u32 databuffsize) +{ + u32 i; + u32 temp = 0x0; + u32 address = 0x0; + u32 *buffer = 0x0; + + // Open the debug port before DMP memory read + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + + // For the requested length, write the address and read the data + address = srcaddr; + buffer = databuff; + for (i = 0; i < databuffsize; i++) { + _IFX_MEI_DBGLongWordRead (pDev, ME_DBG_DECODE_DMP1_MASK, address, &temp); + *buffer = temp; + address += 4; + buffer++; + } + + // Close the debug port after DMP memory read + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * Send a message to ARC MailBox. + * This function sends a message to ARC Mailbox via ARC DMA interface. + * + * \param pDev the device pointer + * \param msgsrcbuffer Pointer to message. + * \param msgsize The number of words to write. + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_MailboxWrite (DSL_DEV_Device_t * pDev, u16 * msgsrcbuffer, + u16 msgsize) +{ + int i; + u32 arc_mailbox_status = 0x0; + u32 temp = 0; + DSL_DEV_MeiError_t meiMailboxError = DSL_DEV_MEI_ERR_SUCCESS; + + // Write to mailbox + meiMailboxError = + IFX_MEI_DMAWrite (pDev, MEI_TO_ARC_MAILBOX, (u32 *) msgsrcbuffer, msgsize / 2); + meiMailboxError = + IFX_MEI_DMAWrite (pDev, MEI_TO_ARC_MAILBOXR, (u32 *) (&temp), 1); + + // Notify arc that mailbox write completed + DSL_DEV_PRIVATE(pDev)->cmv_waiting = 1; + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ME2ARC_INT, MEI_TO_ARC_MSGAV); + + i = 0; + while (i < WHILE_DELAY) { // wait for ARC to clear the bit + IFX_MEI_LongWordReadOffset (pDev, (u32) ME_ME2ARC_INT, &arc_mailbox_status); + if ((arc_mailbox_status & MEI_TO_ARC_MSGAV) != MEI_TO_ARC_MSGAV) + break; + i++; + if (i == WHILE_DELAY) { + IFX_MEI_EMSG (">>> Timeout waiting for ARC to clear MEI_TO_ARC_MSGAV!!!" + " MEI_TO_ARC message size = %d DWORDs <<<\n", msgsize/2); + meiMailboxError = DSL_DEV_MEI_ERR_FAILURE; + } + } + + return meiMailboxError; +} + +/** + * Read a message from ARC MailBox. + * This function reads a message from ARC Mailbox via ARC DMA interface. + * + * \param pDev the device pointer + * \param msgsrcbuffer Pointer to message. + * \param msgsize The number of words to read + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_MailboxRead (DSL_DEV_Device_t * pDev, u16 * msgdestbuffer, + u16 msgsize) +{ + DSL_DEV_MeiError_t meiMailboxError = DSL_DEV_MEI_ERR_SUCCESS; + // Read from mailbox + meiMailboxError = + IFX_MEI_DMARead (pDev, ARC_TO_MEI_MAILBOX, (u32 *) msgdestbuffer, msgsize / 2); + + // Notify arc that mailbox read completed + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_STAT, ARC_TO_MEI_MSGAV); + + return meiMailboxError; +} + +/** + * Download boot pages to ARC. + * This function downloads boot pages to ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_DownloadBootPages (DSL_DEV_Device_t * pDev) +{ + int boot_loop; + int page_size; + u32 dest_addr; + + /* + ** DMA the boot code page(s) + */ + + for (boot_loop = 1; + boot_loop < + (DSL_DEV_PRIVATE(pDev)->img_hdr-> count); boot_loop++) { + if ((DSL_DEV_PRIVATE(pDev)-> img_hdr->page[boot_loop].p_size) & BOOT_FLAG) { + page_size = IFX_MEI_GetPage (pDev, boot_loop, + GET_PROG, MAXSWAPSIZE, + mei_arc_swap_buff, + &dest_addr); + if (page_size > 0) { + IFX_MEI_DMAWrite (pDev, dest_addr, + mei_arc_swap_buff, + page_size); + } + } + if ((DSL_DEV_PRIVATE(pDev)-> img_hdr->page[boot_loop].d_size) & BOOT_FLAG) { + page_size = IFX_MEI_GetPage (pDev, boot_loop, + GET_DATA, MAXSWAPSIZE, + mei_arc_swap_buff, + &dest_addr); + if (page_size > 0) { + IFX_MEI_DMAWrite (pDev, dest_addr, + mei_arc_swap_buff, + page_size); + } + } + } + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * Initial efuse rar. + **/ +static void +IFX_MEI_FuseInit (DSL_DEV_Device_t * pDev) +{ + u32 data = 0; + IFX_MEI_DMAWrite (pDev, IRAM0_BASE, &data, 1); + IFX_MEI_DMAWrite (pDev, IRAM0_BASE + 4, &data, 1); + IFX_MEI_DMAWrite (pDev, IRAM1_BASE, &data, 1); + IFX_MEI_DMAWrite (pDev, IRAM1_BASE + 4, &data, 1); + IFX_MEI_DMAWrite (pDev, BRAM_BASE, &data, 1); + IFX_MEI_DMAWrite (pDev, BRAM_BASE + 4, &data, 1); + IFX_MEI_DMAWrite (pDev, ADSL_DILV_BASE, &data, 1); + IFX_MEI_DMAWrite (pDev, ADSL_DILV_BASE + 4, &data, 1); +} + +/** + * efuse rar program + **/ +static void +IFX_MEI_FuseProg (DSL_DEV_Device_t * pDev) +{ + u32 reg_data, fuse_value; + int i = 0; + + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); + while ((reg_data & 0x10000000) == 0) { + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); + i++; + /* 0x4000 translate to about 16 ms@111M, so should be enough */ + if (i == 0x4000) + return; + } + // STEP a: Prepare memory for external accesses + // Write fuse_en bit24 + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); + IFX_MEI_LongWordWrite ((u32) LQ_RCU_RST, reg_data | (1 << 24)); + + IFX_MEI_FuseInit (pDev); + for (i = 0; i < 4; i++) { + IFX_MEI_LongWordRead ((u32) (LQ_FUSE_BASE) + i * 4, &fuse_value); + switch (fuse_value & 0xF0000) { + case 0x80000: + reg_data = ((fuse_value & RX_DILV_ADDR_BIT_MASK) | + (RX_DILV_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, ADSL_DILV_BASE, ®_data, 1); + break; + case 0x90000: + reg_data = ((fuse_value & RX_DILV_ADDR_BIT_MASK) | + (RX_DILV_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, ADSL_DILV_BASE + 4, ®_data, 1); + break; + case 0xA0000: + reg_data = ((fuse_value & IRAM0_ADDR_BIT_MASK) | + (IRAM0_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, IRAM0_BASE, ®_data, 1); + break; + case 0xB0000: + reg_data = ((fuse_value & IRAM0_ADDR_BIT_MASK) | + (IRAM0_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, IRAM0_BASE + 4, ®_data, 1); + break; + case 0xC0000: + reg_data = ((fuse_value & IRAM1_ADDR_BIT_MASK) | + (IRAM1_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, IRAM1_BASE, ®_data, 1); + break; + case 0xD0000: + reg_data = ((fuse_value & IRAM1_ADDR_BIT_MASK) | + (IRAM1_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, IRAM1_BASE + 4, ®_data, 1); + break; + case 0xE0000: + reg_data = ((fuse_value & BRAM_ADDR_BIT_MASK) | + (BRAM_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, BRAM_BASE, ®_data, 1); + break; + case 0xF0000: + reg_data = ((fuse_value & BRAM_ADDR_BIT_MASK) | + (BRAM_ADDR_BIT_MASK + 0x1)); + IFX_MEI_DMAWrite (pDev, BRAM_BASE + 4, ®_data, 1); + break; + default: // PPE efuse + break; + } + } + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); + IFX_MEI_LongWordWrite ((u32) LQ_RCU_RST, reg_data & ~(1 << 24)); + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); +} + +/** + * Enable DFE Clock + * This function enables DFE Clock + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_EnableCLK (DSL_DEV_Device_t * pDev) +{ + u32 arc_debug_data = 0; + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + //enable ac_clk signal + _IFX_MEI_DBGLongWordRead (pDev, ME_DBG_DECODE_DMP1_MASK, + CRI_CCR0, &arc_debug_data); + arc_debug_data |= ACL_CLK_MODE_ENABLE; + _IFX_MEI_DBGLongWordWrite (pDev, ME_DBG_DECODE_DMP1_MASK, + CRI_CCR0, arc_debug_data); + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * Halt the ARC. + * This function halts the ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_HaltArc (DSL_DEV_Device_t * pDev) +{ + u32 arc_debug_data = 0x0; + + // Switch arc control from JTAG mode to MEI mode + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + _IFX_MEI_DBGLongWordRead (pDev, MEI_DEBUG_DEC_AUX_MASK, + ARC_DEBUG, &arc_debug_data); + arc_debug_data |= ARC_DEBUG_HALT; + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, + ARC_DEBUG, arc_debug_data); + // Switch arc control from MEI mode to JTAG mode + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + MEI_WAIT (10); + + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * Run the ARC. + * This function runs the ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_RunArc (DSL_DEV_Device_t * pDev) +{ + u32 arc_debug_data = 0x0; + + // Switch arc control from JTAG mode to MEI mode- write '1' to bit0 + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + _IFX_MEI_DBGLongWordRead (pDev, MEI_DEBUG_DEC_AUX_MASK, + AUX_STATUS, &arc_debug_data); + + // Write debug data reg with content ANDd with 0xFDFFFFFF (halt bit cleared) + arc_debug_data &= ~ARC_AUX_HALT; + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, + AUX_STATUS, arc_debug_data); + + // Switch arc control from MEI mode to JTAG mode- write '0' to bit0 + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + // Enable mask for arc codeswap interrupts + IFX_MEI_IRQEnable (pDev); + + return DSL_DEV_MEI_ERR_SUCCESS; + +} + +/** + * Reset the ARC. + * This function resets the ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_ResetARC (DSL_DEV_Device_t * pDev) +{ + u32 arc_debug_data = 0; + + IFX_MEI_HaltArc (pDev); + + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, &arc_debug_data); + IFX_MEI_LongWordWrite ((u32) LQ_RCU_RST, + arc_debug_data | LQ_RCU_RST_REQ_DFE | LQ_RCU_RST_REQ_AFE); + + // reset ARC + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_RST_CTRL, MEI_SOFT_RESET); + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_RST_CTRL, 0); + + IFX_MEI_IRQDisable (pDev); + + IFX_MEI_EnableCLK (pDev); + +#if 0 + // reset part of PPE + *(unsigned long *) (BSP_PPE32_SRST) = 0xC30; + *(unsigned long *) (BSP_PPE32_SRST) = 0xFFF; +#endif + + DSL_DEV_PRIVATE(pDev)->modem_ready = 0; + + return DSL_DEV_MEI_ERR_SUCCESS; +} + +DSL_DEV_MeiError_t +DSL_BSP_Showtime (DSL_DEV_Device_t * dev, DSL_uint32_t rate_fast, DSL_uint32_t rate_intl) +{ + struct port_cell_info port_cell = {0}; + + IFX_MEI_EMSG ("Datarate US intl = %d, fast = %d\n", (int)rate_intl, + (int)rate_fast); + + if ( rate_fast ) + g_tx_link_rate[0] = rate_fast / (53 * 8); + if ( rate_intl ) + g_tx_link_rate[1] = rate_intl / (53 * 8); + + if ( g_tx_link_rate[0] == 0 && g_tx_link_rate[1] == 0 ) { + IFX_MEI_EMSG ("Got rate fail.\n"); + } + + if ( ifx_mei_atm_showtime_enter ) + { + port_cell.port_num = 2; + port_cell.tx_link_rate[0] = g_tx_link_rate[0]; + port_cell.tx_link_rate[1] = g_tx_link_rate[1]; + ifx_mei_atm_showtime_enter(&port_cell, g_xdata_addr); + } + else + { + IFX_MEI_EMSG("no hookup from ATM driver to set cell rate\n"); + } + + return DSL_DEV_MEI_ERR_SUCCESS; +}; + +/** + * Reset/halt/run the DFE. + * This function provide operations to reset/halt/run the DFE. + * + * \param pDev the device pointer + * \param mode which operation want to do + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_CpuModeSet (DSL_DEV_Device_t *pDev, + DSL_DEV_CpuMode_t mode) +{ + DSL_DEV_MeiError_t err_ret = DSL_DEV_MEI_ERR_FAILURE; + switch (mode) { + case DSL_CPU_HALT: + err_ret = IFX_MEI_HaltArc (pDev); + break; + case DSL_CPU_RUN: + err_ret = IFX_MEI_RunArc (pDev); + break; + case DSL_CPU_RESET: + err_ret = IFX_MEI_ResetARC (pDev); + break; + default: + break; + } + return err_ret; +} + +/** + * Accress DFE memory. + * This function provide a way to access DFE memory; + * + * \param pDev the device pointer + * \param type read or write + * \param destaddr destination address + * \param databuff pointer to hold data + * \param databuffsize size want to read/write + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +DSL_DEV_MeiError_t +DSL_BSP_MemoryDebugAccess (DSL_DEV_Device_t * pDev, + DSL_BSP_MemoryAccessType_t type, + DSL_uint32_t destaddr, DSL_uint32_t *databuff, + DSL_uint32_t databuffsize) +{ + DSL_DEV_MeiError_t meierr = DSL_DEV_MEI_ERR_SUCCESS; + switch (type) { + case DSL_BSP_MEMORY_READ: + meierr = IFX_MEI_DebugRead (pDev, (u32)destaddr, (u32*)databuff, (u32)databuffsize); + break; + case DSL_BSP_MEMORY_WRITE: + meierr = IFX_MEI_DebugWrite (pDev, (u32)destaddr, (u32*)databuff, (u32)databuffsize); + break; + } + return DSL_DEV_MEI_ERR_SUCCESS; +}; + +/** + * Download boot code to ARC. + * This function downloads boot code to ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_DownloadBootCode (DSL_DEV_Device_t *pDev) +{ + IFX_MEI_IRQDisable (pDev); + + IFX_MEI_EnableCLK (pDev); + + IFX_MEI_FuseProg (pDev); //program fuse rar + + IFX_MEI_DownloadBootPages (pDev); + + return DSL_DEV_MEI_ERR_SUCCESS; +}; + +/** + * Enable Jtag debugger interface + * This function setups mips gpio to enable jtag debugger + * + * \param pDev the device pointer + * \param enable enable or disable + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_ArcJtagEnable (DSL_DEV_Device_t *dev, int enable) +{ + /* + int meierr=0; + u32 reg_data; + switch (enable) { + case 1: + //reserve gpio 9, 10, 11, 14, 19 for ARC JTAG + ifxmips_port_reserve_pin (0, 9); + ifxmips_port_reserve_pin (0, 10); + ifxmips_port_reserve_pin (0, 11); + ifxmips_port_reserve_pin (0, 14); + ifxmips_port_reserve_pin (1, 3); + + ifxmips_port_set_dir_in(0, 11); + ifxmips_port_clear_altsel0(0, 11); + ifxmips_port_clear_altsel1(0, 11); + ifxmips_port_set_open_drain(0, 11); + //enable ARC JTAG + IFX_MEI_LongWordRead ((u32) LQ_RCU_RST, ®_data); + IFX_MEI_LongWordWrite ((u32) LQ_RCU_RST, reg_data | LQ_RCU_RST_REQ_ARC_JTAG); + break; + case 0: + default: + break; + } +jtag_end: + if (meierr) + return DSL_DEV_MEI_ERR_FAILURE; +*/ + printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); + printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); + + return DSL_DEV_MEI_ERR_SUCCESS; +}; + +/** + * Enable DFE to MIPS interrupt + * This function enable DFE to MIPS interrupt + * + * \param pDev the device pointer + * \param enable enable or disable + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_AdslMailboxIRQEnable (DSL_DEV_Device_t *pDev, int enable) +{ + DSL_DEV_MeiError_t meierr; + switch (enable) { + case 0: + meierr = DSL_DEV_MEI_ERR_SUCCESS; + IFX_MEI_IRQDisable (pDev); + break; + case 1: + IFX_MEI_IRQEnable (pDev); + meierr = DSL_DEV_MEI_ERR_SUCCESS; + break; + default: + meierr = DSL_DEV_MEI_ERR_FAILURE; + break; + + } + return meierr; +} + +/** + * Get the modem status + * This function return the modem status + * + * \param pDev the device pointer + * \return 1: modem ready 0: not ready + * \ingroup Internal + */ +static int +IFX_MEI_IsModemReady (DSL_DEV_Device_t * pDev) +{ + return DSL_DEV_PRIVATE(pDev)->modem_ready; +} + +DSL_DEV_MeiError_t +DSL_BSP_AdslLedInit (DSL_DEV_Device_t * dev, + DSL_DEV_LedId_t led_number, + DSL_DEV_LedType_t type, + DSL_DEV_LedHandler_t handler) +{ +#if 0 + struct led_config_param param; + if (led_number == DSL_LED_LINK_ID && type == DSL_LED_LINK_TYPE && handler == /*DSL_LED_HD_CPU*/DSL_LED_HD_FW) { + param.operation_mask = CONFIG_OPERATION_UPDATE_SOURCE; + param.led = 0x01; + param.source = 0x01; +// bsp_led_config (¶m); + + } else if (led_number == DSL_LED_DATA_ID && type == DSL_LED_DATA_TYPE && (handler == DSL_LED_HD_FW)) { + param.operation_mask = CONFIG_OPERATION_UPDATE_SOURCE; + param.led = 0x02; + param.source = 0x02; +// bsp_led_config (¶m); + } +#endif + return DSL_DEV_MEI_ERR_SUCCESS; +}; +#if 0 +DSL_DEV_MeiError_t +DSL_BSP_AdslLedSet (DSL_DEV_Device_t * dev, DSL_DEV_LedId_t led_number, DSL_DEV_LedMode_t mode) +{ + printk(KERN_INFO "[%s %d]: mode = %#x, led_number = %d\n", __func__, __LINE__, mode, led_number); + switch (mode) { + case DSL_LED_OFF: + switch (led_number) { + case DSL_LED_LINK_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (1, 0); + bsp_led_set_data (1, 0); +#endif + break; + case DSL_LED_DATA_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (0, 0); + bsp_led_set_data (0, 0); +#endif + break; + } + break; + case DSL_LED_FLASH: + switch (led_number) { + case DSL_LED_LINK_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (1, 1); // data +#endif + break; + case DSL_LED_DATA_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (0, 1); // data +#endif + break; + } + break; + case DSL_LED_ON: + switch (led_number) { + case DSL_LED_LINK_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (1, 0); + bsp_led_set_data (1, 1); +#endif + break; + case DSL_LED_DATA_ID: +#ifdef CONFIG_BSP_LED + bsp_led_set_blink (0, 0); + bsp_led_set_data (0, 1); +#endif + break; + } + break; + } + return DSL_DEV_MEI_ERR_SUCCESS; +}; + +#endif + +/** +* Compose a message. +* This function compose a message from opcode, group, address, index, size, and data +* +* \param opcode The message opcode +* \param group The message group number +* \param address The message address. +* \param index The message index. +* \param size The number of words to read/write. +* \param data The pointer to data. +* \param CMVMSG The pointer to message buffer. +* \ingroup Internal +*/ +void +makeCMV (u8 opcode, u8 group, u16 address, u16 index, int size, u16 * data, u16 *CMVMSG) +{ + memset (CMVMSG, 0, MSG_LENGTH * 2); + CMVMSG[0] = (opcode << 4) + (size & 0xf); + CMVMSG[1] = (((index == 0) ? 0 : 1) << 7) + (group & 0x7f); + CMVMSG[2] = address; + CMVMSG[3] = index; + if (opcode == H2D_CMV_WRITE) + memcpy (CMVMSG + 4, data, size * 2); + return; +} + +/** + * Send a message to ARC and read the response + * This function sends a message to arc, waits the response, and reads the responses. + * + * \param pDev the device pointer + * \param request Pointer to the request + * \param reply Wait reply or not. + * \param response Pointer to the response + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +DSL_DEV_MeiError_t +DSL_BSP_SendCMV (DSL_DEV_Device_t * pDev, u16 * request, int reply, u16 * response) // write cmv to arc, if reply needed, wait for reply +{ + DSL_DEV_MeiError_t meierror; +#if defined(BSP_PORT_RTEMS) + int delay_counter = 0; +#endif + + if (MEI_MUTEX_LOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema)) + return -ERESTARTSYS; + + DSL_DEV_PRIVATE(pDev)->cmv_reply = reply; + memset (DSL_DEV_PRIVATE(pDev)->CMV_RxMsg, 0, + sizeof (DSL_DEV_PRIVATE(pDev)-> + CMV_RxMsg)); + DSL_DEV_PRIVATE(pDev)->arcmsgav = 0; + + meierror = IFX_MEI_MailboxWrite (pDev, request, MSG_LENGTH); + + if (meierror != DSL_DEV_MEI_ERR_SUCCESS) { + DSL_DEV_PRIVATE(pDev)->cmv_waiting = 0; + DSL_DEV_PRIVATE(pDev)->arcmsgav = 0; + IFX_MEI_EMSG ("MailboxWrite Fail!\n"); + IFX_MEI_EMSG ("Resetting ARC...\n"); + IFX_MEI_ResetARC(pDev); + MEI_MUTEX_UNLOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema); + return meierror; + } + else { + DSL_DEV_PRIVATE(pDev)->cmv_count++; + } + + if (DSL_DEV_PRIVATE(pDev)->cmv_reply == + NO_REPLY) { + MEI_MUTEX_UNLOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema); + return DSL_DEV_MEI_ERR_SUCCESS; + } + +#if !defined(BSP_PORT_RTEMS) + if (DSL_DEV_PRIVATE(pDev)->arcmsgav == 0) + MEI_WAIT_EVENT_TIMEOUT (DSL_DEV_PRIVATE(pDev)->wait_queue_arcmsgav, CMV_TIMEOUT); +#else + while (DSL_DEV_PRIVATE(pDev)->arcmsgav == 0 && delay_counter < CMV_TIMEOUT / 5) { + MEI_WAIT (5); + delay_counter++; + } +#endif + + DSL_DEV_PRIVATE(pDev)->cmv_waiting = 0; + if (DSL_DEV_PRIVATE(pDev)->arcmsgav == 0) { //CMV_timeout + DSL_DEV_PRIVATE(pDev)->arcmsgav = 0; + IFX_MEI_EMSG ("\%s: DSL_DEV_MEI_ERR_MAILBOX_TIMEOUT\n", + __FUNCTION__); + MEI_MUTEX_UNLOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema); + return DSL_DEV_MEI_ERR_MAILBOX_TIMEOUT; + } + else { + DSL_DEV_PRIVATE(pDev)->arcmsgav = 0; + DSL_DEV_PRIVATE(pDev)-> + reply_count++; + memcpy (response, DSL_DEV_PRIVATE(pDev)->CMV_RxMsg, MSG_LENGTH * 2); + MEI_MUTEX_UNLOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema); + return DSL_DEV_MEI_ERR_SUCCESS; + } + MEI_MUTEX_UNLOCK (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema); + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/** + * Reset the ARC, download boot codes, and run the ARC. + * This function resets the ARC, downloads boot codes to ARC, and runs the ARC. + * + * \param pDev the device pointer + * \return DSL_DEV_MEI_ERR_SUCCESS or DSL_DEV_MEI_ERR_FAILURE + * \ingroup Internal + */ +static DSL_DEV_MeiError_t +IFX_MEI_RunAdslModem (DSL_DEV_Device_t *pDev) +{ + int nSize = 0, idx = 0; + uint32_t im0_register, im2_register; +// DSL_DEV_WinHost_Message_t m; + + if (mei_arc_swap_buff == NULL) { + mei_arc_swap_buff = + (u32 *) kmalloc (MAXSWAPSIZE * 4, GFP_KERNEL); + if (mei_arc_swap_buff == NULL) { + IFX_MEI_EMSG (">>> malloc fail for codeswap buff!!! <<<\n"); + return DSL_DEV_MEI_ERR_FAILURE; + } + IFX_MEI_DMSG("allocate %dKB swap buff memory at: 0x%p\n", ksize(mei_arc_swap_buff)/1024, mei_arc_swap_buff); + } + + DSL_DEV_PRIVATE(pDev)->img_hdr = + (ARC_IMG_HDR *) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[0].address; + if ((DSL_DEV_PRIVATE(pDev)->img_hdr-> + count) * sizeof (ARC_SWP_PAGE_HDR) > SDRAM_SEGMENT_SIZE) { + IFX_MEI_EMSG ("firmware header size is bigger than 64K segment size\n"); + return DSL_DEV_MEI_ERR_FAILURE; + } + // check image size + for (idx = 0; idx < MAX_BAR_REGISTERS; idx++) { + nSize += DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].nCopy; + } + if (nSize != + DSL_DEV_PRIVATE(pDev)->image_size) { + IFX_MEI_EMSG ("Firmware download is not completed. Please download firmware again!\n"); + return DSL_DEV_MEI_ERR_FAILURE; + } + // TODO: check crc + /// + + IFX_MEI_ResetARC (pDev); + IFX_MEI_HaltArc (pDev); + IFX_MEI_BarUpdate (pDev, DSL_DEV_PRIVATE(pDev)->nBar); + + //IFX_MEI_DMSG("Starting to meiDownloadBootCode\n"); + + IFX_MEI_DownloadBootCode (pDev); + + im0_register = (*LQ_ICU_IM0_IER) & (1 << 20); + im2_register = (*LQ_ICU_IM2_IER) & (1 << 20); + /* Turn off irq */ + #ifdef CONFIG_LANTIQ_AMAZON_SE + disable_irq (IFXMIPS_USB_OC_INT0); + disable_irq (IFXMIPS_USB_OC_INT2); + #elif defined(CONFIG_LANTIQ_AR9) + disable_irq (IFXMIPS_USB_OC_INT0); + disable_irq (IFXMIPS_USB_OC_INT2); + #elif defined(CONFIG_SOC_LANTIQ_XWAY) + disable_irq (LQ_USB_OC_INT); + #else + #error unkonwn arch + #endif + disable_irq (pDev->nIrq[IFX_DYING_GASP]); + + IFX_MEI_RunArc (pDev); + + MEI_WAIT_EVENT_TIMEOUT (DSL_DEV_PRIVATE(pDev)->wait_queue_modemready, 1000); + + #ifdef CONFIG_LANTIQ_AMAZON_SE + MEI_MASK_AND_ACK_IRQ (IFXMIPS_USB_OC_INT0); + MEI_MASK_AND_ACK_IRQ (IFXMIPS_USB_OC_INT2); + #elif defined(CONFIG_LANTIQ_AMAZON_S) + MEI_MASK_AND_ACK_IRQ (IFXMIPS_USB_OC_INT0); + MEI_MASK_AND_ACK_IRQ (IFXMIPS_USB_OC_INT2); + #elif defined(CONFIG_SOC_LANTIQ_XWAY) + MEI_MASK_AND_ACK_IRQ (LQ_USB_OC_INT); + #else + #error unkonwn arch + #endif + MEI_MASK_AND_ACK_IRQ (pDev->nIrq[IFX_DYING_GASP]); + + /* Re-enable irq */ + enable_irq(pDev->nIrq[IFX_DYING_GASP]); + *LQ_ICU_IM0_IER |= im0_register; + *LQ_ICU_IM2_IER |= im2_register; + + if (DSL_DEV_PRIVATE(pDev)->modem_ready != 1) { + IFX_MEI_EMSG ("Modem failed to be ready!\n"); + return DSL_DEV_MEI_ERR_FAILURE; + } else { + IFX_MEI_DMSG("Modem is ready.\n"); + return DSL_DEV_MEI_ERR_SUCCESS; + } +} + +/** + * Get the page's data pointer + * This function caculats the data address from the firmware header. + * + * \param pDev the device pointer + * \param Page The page number. + * \param data Data page or program page. + * \param MaxSize The maximum size to read. + * \param Buffer Pointer to data. + * \param Dest Pointer to the destination address. + * \return The number of bytes to read. + * \ingroup Internal + */ +static int +IFX_MEI_GetPage (DSL_DEV_Device_t * pDev, u32 Page, u32 data, + u32 MaxSize, u32 * Buffer, u32 * Dest) +{ + u32 size; + u32 i; + u32 *p; + u32 idx, offset, nBar = 0; + + if (Page > DSL_DEV_PRIVATE(pDev)->img_hdr->count) + return -2; + /* + ** Get program or data size, depending on "data" flag + */ + size = (data == GET_DATA) ? (DSL_DEV_PRIVATE(pDev)->img_hdr->page[Page].d_size) : + (DSL_DEV_PRIVATE(pDev)->img_hdr->page[Page].p_size); + size &= BOOT_FLAG_MASK; // Clear boot bit! + if (size > MaxSize) + return -1; + + if (size == 0) + return 0; + /* + ** Get program or data offset, depending on "data" flag + */ + i = data ? (DSL_DEV_PRIVATE(pDev)->img_hdr->page[Page].d_offset) : + (DSL_DEV_PRIVATE(pDev)->img_hdr->page[Page].p_offset); + + /* + ** Copy data/program to buffer + */ + + idx = i / SDRAM_SEGMENT_SIZE; + offset = i % SDRAM_SEGMENT_SIZE; + p = (u32 *) ((u8 *) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].address + offset); + + for (i = 0; i < size; i++) { + if (offset + i * 4 - (nBar * SDRAM_SEGMENT_SIZE) >= SDRAM_SEGMENT_SIZE) { + idx++; + nBar++; + p = (u32 *) ((u8 *) KSEG1ADDR ((u32)DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].address)); + } + Buffer[i] = *p++; + } + + /* + ** Pass back data/program destination address + */ + *Dest = data ? (DSL_DEV_PRIVATE(pDev)-> img_hdr->page[Page].d_dest) : + (DSL_DEV_PRIVATE(pDev)->img_hdr->page[Page].p_dest); + + return size; +} + +/** + * Free the memory for ARC firmware + * + * \param pDev the device pointer + * \param type Free all memory or free the unused memory after showtime + * \ingroup Internal + */ +const char *free_str[4] = {"Invalid", "Free_Reload", "Free_Showtime", "Free_All"}; +static int +IFX_MEI_DFEMemoryFree (DSL_DEV_Device_t * pDev, int type) +{ + int idx = 0; + smmu_mem_info_t *adsl_mem_info = + DSL_DEV_PRIVATE(pDev)->adsl_mem_info; + + for (idx = 0; idx < MAX_BAR_REGISTERS; idx++) { + if (type == FREE_ALL ||adsl_mem_info[idx].type == type) { + if (adsl_mem_info[idx].size > 0) { + IFX_MEI_DMSG ("Freeing memory %p (%s)\n", adsl_mem_info[idx].org_address, free_str[adsl_mem_info[idx].type]); + if ( idx == XDATA_REGISTER ) { + g_xdata_addr = NULL; + if ( ifx_mei_atm_showtime_exit ) + ifx_mei_atm_showtime_exit(); + } + kfree (adsl_mem_info[idx].org_address); + adsl_mem_info[idx].org_address = 0; + adsl_mem_info[idx].address = 0; + adsl_mem_info[idx].size = 0; + adsl_mem_info[idx].type = 0; + adsl_mem_info[idx].nCopy = 0; + } + } + } + + if(mei_arc_swap_buff != NULL){ + IFX_MEI_DMSG("free %dKB swap buff memory at: 0x%p\n", ksize(mei_arc_swap_buff)/1024, mei_arc_swap_buff); + kfree(mei_arc_swap_buff); + mei_arc_swap_buff=NULL; + } + + return 0; +} +static int +IFX_MEI_DFEMemoryAlloc (DSL_DEV_Device_t * pDev, long size) +{ + unsigned long mem_ptr; + char *org_mem_ptr = NULL; + int idx = 0; + long total_size = 0; + int err = 0; + smmu_mem_info_t *adsl_mem_info = + ((ifx_mei_device_private_t *) pDev->pPriv)->adsl_mem_info; +// DSL_DEV_PRIVATE(pDev)->adsl_mem_info; + int allocate_size = SDRAM_SEGMENT_SIZE; + + IFX_MEI_DMSG("image_size = %ld\n", size); + // Alloc Swap Pages + for (idx = 0; size > 0 && idx < MAX_BAR_REGISTERS; idx++) { + // skip bar15 for XDATA usage. + if (idx == XDATA_REGISTER) + continue; +#if 0 + if (size < SDRAM_SEGMENT_SIZE) { + allocate_size = size; + if (allocate_size < 1024) + allocate_size = 1024; + } +#endif + if (idx == (MAX_BAR_REGISTERS - 1)) + allocate_size = size; + else + allocate_size = SDRAM_SEGMENT_SIZE; + org_mem_ptr = kmalloc (allocate_size + 1024, GFP_KERNEL); + if (org_mem_ptr == NULL) { + IFX_MEI_EMSG ("%d: kmalloc %d bytes memory fail!\n", idx, allocate_size); + err = -ENOMEM; + goto allocate_error; + } + mem_ptr = (unsigned long) (org_mem_ptr + 1023) & ~(1024 -1); + adsl_mem_info[idx].address = (char *) mem_ptr; + adsl_mem_info[idx].org_address = org_mem_ptr; + adsl_mem_info[idx].size = allocate_size; + size -= allocate_size; + total_size += allocate_size; + } + if (size > 0) { + IFX_MEI_EMSG ("Image size is too large!\n"); + err = -EFBIG; + goto allocate_error; + } + err = idx; + return err; + + allocate_error: + IFX_MEI_DFEMemoryFree (pDev, FREE_ALL); + return err; +} + +/** + * Program the BAR registers + * + * \param pDev the device pointer + * \param nTotalBar The number of bar to program. + * \ingroup Internal + */ +static int +IFX_MEI_BarUpdate (DSL_DEV_Device_t * pDev, int nTotalBar) +{ + int idx = 0; + smmu_mem_info_t *adsl_mem_info = + DSL_DEV_PRIVATE(pDev)->adsl_mem_info; + + for (idx = 0; idx < nTotalBar; idx++) { + //skip XDATA register + if (idx == XDATA_REGISTER) + continue; + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_XMEM_BAR_BASE + idx * 4, + (((uint32_t) adsl_mem_info[idx].address) & 0x0FFFFFFF)); + } + for (idx = nTotalBar; idx < MAX_BAR_REGISTERS; idx++) { + if (idx == XDATA_REGISTER) + continue; + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_XMEM_BAR_BASE + idx * 4, + (((uint32_t)adsl_mem_info[nTotalBar - 1].address) & 0x0FFFFFFF)); + /* These are for /proc/danube_mei/meminfo purpose */ + adsl_mem_info[idx].address = adsl_mem_info[nTotalBar - 1].address; + adsl_mem_info[idx].org_address = adsl_mem_info[nTotalBar - 1].org_address; + adsl_mem_info[idx].size = 0; /* Prevent it from being freed */ + } + + g_xdata_addr = adsl_mem_info[XDATA_REGISTER].address; + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_XMEM_BAR_BASE + XDATA_REGISTER * 4, + (((uint32_t) adsl_mem_info [XDATA_REGISTER].address) & 0x0FFFFFFF)); + // update MEI_XDATA_BASE_SH + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_XDATA_BASE_SH, + ((unsigned long)adsl_mem_info[XDATA_REGISTER].address) & 0x0FFFFFFF); + + return DSL_DEV_MEI_ERR_SUCCESS; +} + +/* This copies the firmware from secondary storage to 64k memory segment in SDRAM */ +DSL_DEV_MeiError_t +DSL_BSP_FWDownload (DSL_DEV_Device_t * pDev, const char *buf, + unsigned long size, long *loff, long *current_offset) +{ + ARC_IMG_HDR img_hdr_tmp; + smmu_mem_info_t *adsl_mem_info = DSL_DEV_PRIVATE(pDev)->adsl_mem_info; + + size_t nRead = 0, nCopy = 0; + char *mem_ptr; + ssize_t retval = -ENOMEM; + int idx = 0; + + IFX_MEI_DMSG("\n"); + + if (*loff == 0) { + if (size < sizeof (img_hdr_tmp)) { + IFX_MEI_EMSG ("Firmware size is too small!\n"); + return retval; + } + copy_from_user ((char *) &img_hdr_tmp, buf, sizeof (img_hdr_tmp)); + // header of image_size and crc are not included. + DSL_DEV_PRIVATE(pDev)->image_size = le32_to_cpu (img_hdr_tmp.size) + 8; + + if (DSL_DEV_PRIVATE(pDev)->image_size > 1024 * 1024) { + IFX_MEI_EMSG ("Firmware size is too large!\n"); + return retval; + } + // check if arc is halt + IFX_MEI_ResetARC (pDev); + IFX_MEI_HaltArc (pDev); + + IFX_MEI_DFEMemoryFree (pDev, FREE_ALL); //free all + + retval = IFX_MEI_DFEMemoryAlloc (pDev, DSL_DEV_PRIVATE(pDev)->image_size); + if (retval < 0) { + IFX_MEI_EMSG ("Error: No memory space left.\n"); + goto error; + } + for (idx = 0; idx < retval; idx++) { + //skip XDATA register + if (idx == XDATA_REGISTER) + continue; + if (idx * SDRAM_SEGMENT_SIZE < le32_to_cpu (img_hdr_tmp.page[0].p_offset)) + adsl_mem_info[idx].type = FREE_RELOAD; + else + adsl_mem_info[idx].type = FREE_SHOWTIME; + } + DSL_DEV_PRIVATE(pDev)->nBar = retval; + + DSL_DEV_PRIVATE(pDev)->img_hdr = + (ARC_IMG_HDR *) adsl_mem_info[0].address; + + adsl_mem_info[XDATA_REGISTER].org_address = kmalloc (SDRAM_SEGMENT_SIZE + 1024, GFP_KERNEL); + adsl_mem_info[XDATA_REGISTER].address = + (char *) ((unsigned long) (adsl_mem_info[XDATA_REGISTER].org_address + 1023) & 0xFFFFFC00); + + adsl_mem_info[XDATA_REGISTER].size = SDRAM_SEGMENT_SIZE; + + if (adsl_mem_info[XDATA_REGISTER].address == NULL) { + IFX_MEI_EMSG ("kmalloc memory fail!\n"); + retval = -ENOMEM; + goto error; + } + adsl_mem_info[XDATA_REGISTER].type = FREE_RELOAD; + IFX_MEI_DMSG("-> IFX_MEI_BarUpdate()\n"); + IFX_MEI_BarUpdate (pDev, (DSL_DEV_PRIVATE(pDev)->nBar)); + } + else if (DSL_DEV_PRIVATE(pDev)-> image_size == 0) { + IFX_MEI_EMSG ("Error: Firmware size=0! \n"); + goto error; + } + + nRead = 0; + while (nRead < size) { + long offset = ((long) (*loff) + nRead) % SDRAM_SEGMENT_SIZE; + idx = (((long) (*loff)) + nRead) / SDRAM_SEGMENT_SIZE; + mem_ptr = (char *) KSEG1ADDR ((unsigned long) (adsl_mem_info[idx].address) + offset); + if ((size - nRead + offset) > SDRAM_SEGMENT_SIZE) + nCopy = SDRAM_SEGMENT_SIZE - offset; + else + nCopy = size - nRead; + copy_from_user (mem_ptr, buf + nRead, nCopy); + for (offset = 0; offset < (nCopy / 4); offset++) { + ((unsigned long *) mem_ptr)[offset] = le32_to_cpu (((unsigned long *) mem_ptr)[offset]); + } + nRead += nCopy; + adsl_mem_info[idx].nCopy += nCopy; + } + + *loff += size; + *current_offset = size; + return DSL_DEV_MEI_ERR_SUCCESS; +error: + IFX_MEI_DFEMemoryFree (pDev, FREE_ALL); + return DSL_DEV_MEI_ERR_FAILURE; +} +/* + * Register a callback event. + * Return: + * -1 if the event already has a callback function registered. + * 0 success + */ +int DSL_BSP_EventCBRegister(DSL_BSP_EventCallBack_t *p) +{ + if (!p) { + IFX_MEI_EMSG("Invalid parameter!\n"); + return -EINVAL; + } + if (p->event > DSL_BSP_CB_LAST || p->event < DSL_BSP_CB_FIRST) { + IFX_MEI_EMSG("Invalid Event %d\n", p->event); + return -EINVAL; + } + if (dsl_bsp_event_callback[p->event].function) { + IFX_MEI_EMSG("Event %d already has a callback function registered!\n", p->event); + return -1; + } else { + dsl_bsp_event_callback[p->event].function = p->function; + dsl_bsp_event_callback[p->event].event = p->event; + dsl_bsp_event_callback[p->event].pData = p->pData; + } + return 0; +} +int DSL_BSP_EventCBUnregister(DSL_BSP_EventCallBack_t *p) +{ + if (!p) { + IFX_MEI_EMSG("Invalid parameter!\n"); + return -EINVAL; + } + if (p->event > DSL_BSP_CB_LAST || p->event < DSL_BSP_CB_FIRST) { + IFX_MEI_EMSG("Invalid Event %d\n", p->event); + return -EINVAL; + } + if (dsl_bsp_event_callback[p->event].function) { + IFX_MEI_EMSG("Unregistering Event %d...\n", p->event); + dsl_bsp_event_callback[p->event].function = NULL; + dsl_bsp_event_callback[p->event].pData = NULL; + } else { + IFX_MEI_EMSG("Event %d is not registered!\n", p->event); + return -1; + } + return 0; +} + +/** + * MEI Dying Gasp interrupt handler + * + * \param int1 + * \param void0 + * \param regs Pointer to the structure of danube mips registers + * \ingroup Internal + */ +static irqreturn_t IFX_MEI_Dying_Gasp_IrqHandle (int int1, void *void0) +{ + DSL_DEV_Device_t *pDev = (DSL_DEV_Device_t *) void0; + DSL_BSP_CB_Type_t event; + + if (pDev == NULL) + IFX_MEI_EMSG("Error: Got Interrupt but pDev is NULL!!!!\n"); + +#ifndef CONFIG_SMP + disable_irq (pDev->nIrq[IFX_DYING_GASP]); +#else + disable_irq_nosync(pDev->nIrq[IFX_DYING_GASP]); +#endif + event = DSL_BSP_CB_DYING_GASP; + + if (dsl_bsp_event_callback[event].function) + (*dsl_bsp_event_callback[event].function)(pDev, event, dsl_bsp_event_callback[event].pData); + +#ifdef CONFIG_USE_EMULATOR + IFX_MEI_EMSG("Dying Gasp! Shutting Down... (Work around for Amazon-S Venus emulator)\n"); +#else + IFX_MEI_EMSG("Dying Gasp! Shutting Down...\n"); +// kill_proc (1, SIGINT, 1); /* Ask init to reboot us */ +#endif + return IRQ_HANDLED; +} + +extern void ifx_usb_enable_afe_oc(void); + +/** + * MEI interrupt handler + * + * \param int1 + * \param void0 + * \param regs Pointer to the structure of danube mips registers + * \ingroup Internal + */ +static irqreturn_t IFX_MEI_IrqHandle (int int1, void *void0) +{ + u32 scratch; + DSL_DEV_Device_t *pDev = (DSL_DEV_Device_t *) void0; +#if defined(CONFIG_LQ_MEI_FW_LOOPBACK) && defined(DFE_PING_TEST) + dfe_loopback_irq_handler (pDev); + return IRQ_HANDLED; +#endif //CONFIG_AMAZON_S_MEI_FW_LOOPBACK + DSL_BSP_CB_Type_t event; + + if (pDev == NULL) + IFX_MEI_EMSG("Error: Got Interrupt but pDev is NULL!!!!\n"); + + IFX_MEI_DebugRead (pDev, ARC_MEI_MAILBOXR, &scratch, 1); + if (scratch & OMB_CODESWAP_MESSAGE_MSG_TYPE_MASK) { + IFX_MEI_EMSG("Receive Code Swap Request interrupt!!!\n"); + return IRQ_HANDLED; + } + else if (scratch & OMB_CLEAREOC_INTERRUPT_CODE) { + // clear eoc message interrupt + IFX_MEI_DMSG("OMB_CLEAREOC_INTERRUPT_CODE\n"); + event = DSL_BSP_CB_CEOC_IRQ; + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_STAT, ARC_TO_MEI_MSGAV); + if (dsl_bsp_event_callback[event].function) + (*dsl_bsp_event_callback[event].function)(pDev, event, dsl_bsp_event_callback[event].pData); + } else if (scratch & OMB_REBOOT_INTERRUPT_CODE) { + // Reboot + IFX_MEI_DMSG("OMB_REBOOT_INTERRUPT_CODE\n"); + event = DSL_BSP_CB_FIRMWARE_REBOOT; + + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_ARC2ME_STAT, ARC_TO_MEI_MSGAV); + + if (dsl_bsp_event_callback[event].function) + (*dsl_bsp_event_callback[event].function)(pDev, event, dsl_bsp_event_callback[event].pData); + } else { // normal message + IFX_MEI_MailboxRead (pDev, DSL_DEV_PRIVATE(pDev)->CMV_RxMsg, MSG_LENGTH); + if (DSL_DEV_PRIVATE(pDev)-> cmv_waiting == 1) { + DSL_DEV_PRIVATE(pDev)-> arcmsgav = 1; + DSL_DEV_PRIVATE(pDev)-> cmv_waiting = 0; +#if !defined(BSP_PORT_RTEMS) + MEI_WAKEUP_EVENT (DSL_DEV_PRIVATE(pDev)->wait_queue_arcmsgav); +#endif + } + else { + DSL_DEV_PRIVATE(pDev)-> modem_ready_cnt++; + memcpy ((char *) DSL_DEV_PRIVATE(pDev)->Recent_indicator, + (char *) DSL_DEV_PRIVATE(pDev)->CMV_RxMsg, MSG_LENGTH * 2); + if (((DSL_DEV_PRIVATE(pDev)->CMV_RxMsg[0] & 0xff0) >> 4) == D2H_AUTONOMOUS_MODEM_READY_MSG) { + //check ARC ready message + IFX_MEI_DMSG ("Got MODEM_READY_MSG\n"); + DSL_DEV_PRIVATE(pDev)->modem_ready = 1; + MEI_WAKEUP_EVENT (DSL_DEV_PRIVATE(pDev)->wait_queue_modemready); + } + } + } + + return IRQ_HANDLED; +} + +int +DSL_BSP_ATMLedCBRegister (int (*ifx_adsl_ledcallback) (void)) +{ + g_adsl_ledcallback = ifx_adsl_ledcallback; + return 0; +} + +int +DSL_BSP_ATMLedCBUnregister (int (*ifx_adsl_ledcallback) (void)) +{ + g_adsl_ledcallback = adsl_dummy_ledcallback; + return 0; +} + +#if 0 +int +DSL_BSP_EventCBRegister (int (*ifx_adsl_callback) + (DSL_BSP_CB_Event_t * param)) +{ + int error = 0; + + if (DSL_EventCB == NULL) { + DSL_EventCB = ifx_adsl_callback; + } + else { + error = -EIO; + } + return error; +} + +int +DSL_BSP_EventCBUnregister (int (*ifx_adsl_callback) + (DSL_BSP_CB_Event_t * param)) +{ + int error = 0; + + if (DSL_EventCB == ifx_adsl_callback) { + DSL_EventCB = NULL; + } + else { + error = -EIO; + } + return error; +} + +static int +DSL_BSP_GetEventCB (int (**ifx_adsl_callback) + (DSL_BSP_CB_Event_t * param)) +{ + *ifx_adsl_callback = DSL_EventCB; + return 0; +} +#endif + +#ifdef CONFIG_LQ_MEI_FW_LOOPBACK +#define mte_reg_base (0x4800*4+0x20000) + +/* Iridia Registers Address Constants */ +#define MTE_Reg(r) (int)(mte_reg_base + (r*4)) + +#define IT_AMODE MTE_Reg(0x0004) + +#define TIMER_DELAY (1024) +#define BC0_BYTES (32) +#define BC1_BYTES (30) +#define NUM_MB (12) +#define TIMEOUT_VALUE 2000 + +static void +BFMWait (u32 cycle) +{ + u32 i; + for (i = 0; i < cycle; i++); +} + +static void +WriteRegLong (u32 addr, u32 data) +{ + //*((volatile u32 *)(addr)) = data; + IFX_MEI_WRITE_REGISTER_L (data, addr); +} + +static u32 +ReadRegLong (u32 addr) +{ + // u32 rd_val; + //rd_val = *((volatile u32 *)(addr)); + // return rd_val; + return IFX_MEI_READ_REGISTER_L (addr); +} + +/* This routine writes the mailbox with the data in an input array */ +static void +WriteMbox (u32 * mboxarray, u32 size) +{ + IFX_MEI_DebugWrite (&dsl_devices[0], IMBOX_BASE, mboxarray, size); + IFX_MEI_DMSG("write to %X\n", IMBOX_BASE); + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], (u32) ME_ME2ARC_INT, MEI_TO_ARC_MSGAV); +} + +/* This routine reads the output mailbox and places the results into an array */ +static void +ReadMbox (u32 * mboxarray, u32 size) +{ + IFX_MEI_DebugRead (&dsl_devices[0], OMBOX_BASE, mboxarray, size); + IFX_MEI_DMSG("read from %X\n", OMBOX_BASE); +} + +static void +MEIWriteARCValue (u32 address, u32 value) +{ + u32 i, check = 0; + + /* Write address register */ + IFX_MEI_WRITE_REGISTER_L (address, ME_DBG_WR_AD + LQ_MEI_BASE_ADDR); + + /* Write data register */ + IFX_MEI_WRITE_REGISTER_L (value, ME_DBG_DATA + LQ_MEI_BASE_ADDR); + + /* wait until complete - timeout at 40 */ + for (i = 0; i < 40; i++) { + check = IFX_MEI_READ_REGISTER_L (ME_ARC2ME_STAT + LQ_MEI_BASE_ADDR); + + if ((check & ARC_TO_MEI_DBG_DONE)) + break; + } + /* clear the flag */ + IFX_MEI_WRITE_REGISTER_L (ARC_TO_MEI_DBG_DONE, ME_ARC2ME_STAT + LQ_MEI_BASE_ADDR); +} + +void +arc_code_page_download (uint32_t arc_code_length, uint32_t * start_address) +{ + int count; + + IFX_MEI_DMSG("try to download pages,size=%d\n", arc_code_length); + IFX_MEI_ControlModeSet (&dsl_devices[0], MEI_MASTER_MODE); + IFX_MEI_HaltArc (&dsl_devices[0]); + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], (u32) ME_DX_AD, 0); + for (count = 0; count < arc_code_length; count++) { + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], (u32) ME_DX_DATA, + *(start_address + count)); + } + IFX_MEI_ControlModeSet (&dsl_devices[0], JTAG_MASTER_MODE); +} +static int +load_jump_table (unsigned long addr) +{ + int i; + uint32_t addr_le, addr_be; + uint32_t jump_table[32]; + + for (i = 0; i < 16; i++) { + addr_le = i * 8 + addr; + addr_be = ((addr_le >> 16) & 0xffff); + addr_be |= ((addr_le & 0xffff) << 16); + jump_table[i * 2 + 0] = 0x0f802020; + jump_table[i * 2 + 1] = addr_be; + //printk("jt %X %08X %08X\n",i,jump_table[i*2+0],jump_table[i*2+1]); + } + arc_code_page_download (32, &jump_table[0]); +return 0; +} + +int got_int = 0; + +void +dfe_loopback_irq_handler (DSL_DEV_Device_t *pDev) +{ + uint32_t rd_mbox[10]; + + memset (&rd_mbox[0], 0, 10 * 4); + ReadMbox (&rd_mbox[0], 6); + if (rd_mbox[0] == 0x0) { + FX_MEI_DMSG("Get ARC_ACK\n"); + got_int = 1; + } + else if (rd_mbox[0] == 0x5) { + IFX_MEI_DMSG("Get ARC_BUSY\n"); + got_int = 2; + } + else if (rd_mbox[0] == 0x3) { + IFX_MEI_DMSG("Get ARC_EDONE\n"); + if (rd_mbox[1] == 0x0) { + got_int = 3; + IFX_MEI_DMSG("Get E_MEMTEST\n"); + if (rd_mbox[2] != 0x1) { + got_int = 4; + IFX_MEI_DMSG("Get Result %X\n", rd_mbox[2]); + } + } + } + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], (u32) ME_ARC2ME_STAT, + ARC_TO_MEI_DBG_DONE); + MEI_MASK_AND_ACK_IRQ (pDev->nIrq[IFX_DFEIR]); + disable_irq (pDev->nIrq[IFX_DFEIR]); + //got_int = 1; + return; +} + +static void +wait_mem_test_result (void) +{ + uint32_t mbox[5]; + mbox[0] = 0; + + IFX_MEI_DMSG("Waiting Starting\n"); + while (mbox[0] == 0) { + ReadMbox (&mbox[0], 5); + } + IFX_MEI_DMSG("Try to get mem test result.\n"); + ReadMbox (&mbox[0], 5); + if (mbox[0] == 0xA) { + IFX_MEI_DMSG("Success.\n"); + } + else if (mbox[0] == 0xA) { + IFX_MEI_EMSG("Fail,address %X,except data %X,receive data %X\n", + mbox[1], mbox[2], mbox[3]); + } + else { + IFX_MEI_EMSG("Fail\n"); + } +} + +static int +arc_ping_testing (DSL_DEV_Device_t *pDev) +{ +#define MEI_PING 0x00000001 + uint32_t wr_mbox[10], rd_mbox[10]; + int i; + + for (i = 0; i < 10; i++) { + wr_mbox[i] = 0; + rd_mbox[i] = 0; + } + + FX_MEI_DMSG("send ping msg\n"); + wr_mbox[0] = MEI_PING; + WriteMbox (&wr_mbox[0], 10); + + while (got_int == 0) { + MEI_WAIT (100); + } + + IFX_MEI_DMSG("send start event\n"); + got_int = 0; + + wr_mbox[0] = 0x4; + wr_mbox[1] = 0; + wr_mbox[2] = 0; + wr_mbox[3] = (uint32_t) 0xf5acc307e; + wr_mbox[4] = 5; + wr_mbox[5] = 2; + wr_mbox[6] = 0x1c000; + wr_mbox[7] = 64; + wr_mbox[8] = 0; + wr_mbox[9] = 0; + WriteMbox (&wr_mbox[0], 10); + DSL_ENABLE_IRQ (pDev->nIrq[IFX_DFEIR]); + //printk("IFX_MEI_MailboxWrite ret=%d\n",i); + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], + (u32) ME_ME2ARC_INT, + MEI_TO_ARC_MSGAV); + IFX_MEI_DMSG("sleeping\n"); + while (1) { + if (got_int > 0) { + + if (got_int > 3) + IFX_MEI_DMSG("got_int >>>> 3\n"); + else + IFX_MEI_DMSG("got int = %d\n", got_int); + got_int = 0; + //schedule(); + DSL_ENABLE_IRQ (pDev->nIrq[IFX_DFEIR]); + } + //mbox_read(&rd_mbox[0],6); + MEI_WAIT (100); + } + return 0; +} + +static DSL_DEV_MeiError_t +DFE_Loopback_Test (void) +{ + int i = 0; + u32 arc_debug_data = 0, temp; + DSL_DEV_Device_t *pDev = &dsl_devices[0]; + uint32_t wr_mbox[10]; + + IFX_MEI_ResetARC (pDev); + // start the clock + arc_debug_data = ACL_CLK_MODE_ENABLE; + IFX_MEI_DebugWrite (pDev, CRI_CCR0, &arc_debug_data, 1); + +#if defined( DFE_PING_TEST )|| defined( DFE_ATM_LOOPBACK) + // WriteARCreg(AUX_XMEM_LTEST,0); + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); +#define AUX_XMEM_LTEST 0x128 + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, AUX_XMEM_LTEST, 0); + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + // WriteARCreg(AUX_XDMA_GAP,0); + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); +#define AUX_XDMA_GAP 0x114 + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, AUX_XDMA_GAP, 0); + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); + temp = 0; + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, + (u32) ME_XDATA_BASE_SH + LQ_MEI_BASE_ADDR, temp); + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + i = IFX_MEI_DFEMemoryAlloc (pDev, SDRAM_SEGMENT_SIZE * 16); + if (i >= 0) { + int idx; + + for (idx = 0; idx < i; idx++) { + DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].type = FREE_RELOAD; + IFX_MEI_WRITE_REGISTER_L ((((uint32_t) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].address) & 0x0fffffff), + LQ_MEI_BASE_ADDR + ME_XMEM_BAR_BASE + idx * 4); + IFX_MEI_DMSG("bar%d(%X)=%X\n", idx, + LQ_MEI_BASE_ADDR + ME_XMEM_BAR_BASE + + idx * 4, (((uint32_t) + ((ifx_mei_device_private_t *) + pDev->pPriv)->adsl_mem_info[idx]. + address) & 0x0fffffff)); + memset ((u8 *) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[idx].address, 0, SDRAM_SEGMENT_SIZE); + } + + IFX_MEI_LongWordWriteOffset (pDev, (u32) ME_XDATA_BASE_SH, + ((unsigned long) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[XDATA_REGISTER].address) & 0x0FFFFFFF); + } + else { + IFX_MEI_EMSG ("cannot load image: no memory\n"); + return DSL_DEV_MEI_ERR_FAILURE; + } + //WriteARCreg(AUX_IC_CTRL,2); + IFX_MEI_DMSG("Setting MEI_MASTER_MODE..\n"); + IFX_MEI_ControlModeSet (pDev, MEI_MASTER_MODE); +#define AUX_IC_CTRL 0x11 + _IFX_MEI_DBGLongWordWrite (pDev, MEI_DEBUG_DEC_AUX_MASK, + AUX_IC_CTRL, 2); + IFX_MEI_DMSG("Setting JTAG_MASTER_MODE..\n"); + IFX_MEI_ControlModeSet (pDev, JTAG_MASTER_MODE); + + IFX_MEI_DMSG("Halting ARC...\n"); + IFX_MEI_HaltArc (&dsl_devices[0]); + +#ifdef DFE_PING_TEST + + IFX_MEI_DMSG("ping test image size=%d\n", sizeof (arc_ahb_access_code)); + memcpy ((u8 *) (DSL_DEV_PRIVATE(pDev)-> + adsl_mem_info[0].address + 0x1004), + &arc_ahb_access_code[0], sizeof (arc_ahb_access_code)); + load_jump_table (0x80000 + 0x1004); + +#endif //DFE_PING_TEST + + IFX_MEI_DMSG("ARC ping test code download complete\n"); +#endif //defined( DFE_PING_TEST )|| defined( DFE_ATM_LOOPBACK) +#ifdef DFE_MEM_TEST + IFX_MEI_LongWordWriteOffset (&dsl_devices[0], (u32) ME_ARC2ME_MASK, MSGAV_EN); + + arc_code_page_download (1537, &code_array[0]); + IFX_MEI_DMSG("ARC mem test code download complete\n"); +#endif //DFE_MEM_TEST +#ifdef DFE_ATM_LOOPBACK + arc_debug_data = 0xf; + arc_code_page_download (sizeof(code_array) / sizeof(*code_array), &code_array[0]); + wr_mbox[0] = 0; //TIMER_DELAY - org: 1024 + wr_mbox[1] = 0; //TXFB_START0 + wr_mbox[2] = 0x7f; //TXFB_END0 - org: 49 + wr_mbox[3] = 0x80; //TXFB_START1 - org: 80 + wr_mbox[4] = 0xff; //TXFB_END1 - org: 109 + wr_mbox[5] = 0x100; //RXFB_START0 - org: 0 + wr_mbox[6] = 0x17f; //RXFB_END0 - org: 49 + wr_mbox[7] = 0x180; //RXFB_START1 - org: 256 + wr_mbox[8] = 0x1ff; //RXFB_END1 - org: 315 + WriteMbox (&wr_mbox[0], 9); + // Start Iridia IT_AMODE (in dmp access) why is it required? + IFX_MEI_DebugWrite (&dsl_devices[0], 0x32010, &arc_debug_data, 1); +#endif //DFE_ATM_LOOPBACK + IFX_MEI_IRQEnable (pDev); + IFX_MEI_DMSG("run ARC...\n"); + IFX_MEI_RunArc (&dsl_devices[0]); + +#ifdef DFE_PING_TEST + arc_ping_testing (pDev); +#endif //DFE_PING_TEST +#ifdef DFE_MEM_TEST + wait_mem_test_result (); +#endif //DFE_MEM_TEST + + IFX_MEI_DFEMemoryFree (pDev, FREE_ALL); + return DSL_DEV_MEI_ERR_SUCCESS; +} + +#endif //CONFIG_AMAZON_S_MEI_FW_LOOPBACK + +static int +IFX_MEI_InitDevNode (int num) +{ + if (num == 0) { + if ((dev_major = register_chrdev (dev_major, IFX_MEI_DEVNAME, &bsp_mei_operations)) < 0) { + IFX_MEI_EMSG ("register_chrdev(%d %s) failed!\n", dev_major, IFX_MEI_DEVNAME); + return -ENODEV; + } + } + return 0; +} + +static int +IFX_MEI_CleanUpDevNode (int num) +{ + if (num == 0) + unregister_chrdev (dev_major, MEI_DIRNAME); + return 0; +} + +static int +IFX_MEI_InitDevice (int num) +{ + DSL_DEV_Device_t *pDev; + u32 temp; + pDev = &dsl_devices[num]; + if (pDev == NULL) + return -ENOMEM; + pDev->pPriv = &sDanube_Mei_Private[num]; + memset (pDev->pPriv, 0, sizeof (ifx_mei_device_private_t)); + + memset (&DSL_DEV_PRIVATE(pDev)-> + adsl_mem_info[0], 0, + sizeof (smmu_mem_info_t) * MAX_BAR_REGISTERS); + + if (num == 0) { + pDev->nIrq[IFX_DFEIR] = LQ_MEI_INT; + pDev->nIrq[IFX_DYING_GASP] = LQ_MEI_DYING_GASP_INT; + pDev->base_address = LQ_MEI_BASE_ADDR; + + /* Power up MEI */ +#ifdef CONFIG_LANTIQ_AMAZON_SE + *LQ_PMU_PWDCR &= ~(1 << 9); // enable dsl + *LQ_PMU_PWDCR &= ~(1 << 15); // enable AHB base +#else + temp = lq_r32(LQ_PMU_PWDCR); + temp &= 0xffff7dbe; + lq_w32(temp, LQ_PMU_PWDCR); +#endif + } + pDev->nInUse = 0; + DSL_DEV_PRIVATE(pDev)->modem_ready = 0; + DSL_DEV_PRIVATE(pDev)->arcmsgav = 0; + + MEI_INIT_WAKELIST ("arcq", DSL_DEV_PRIVATE(pDev)->wait_queue_arcmsgav); // for ARCMSGAV + MEI_INIT_WAKELIST ("arcr", DSL_DEV_PRIVATE(pDev)->wait_queue_modemready); // for arc modem ready + + MEI_MUTEX_INIT (DSL_DEV_PRIVATE(pDev)->mei_cmv_sema, 1); // semaphore initialization, mutex +#if 0 + MEI_MASK_AND_ACK_IRQ (pDev->nIrq[IFX_DFEIR]); + MEI_MASK_AND_ACK_IRQ (pDev->nIrq[IFX_DYING_GASP]); +#endif + if (request_irq (pDev->nIrq[IFX_DFEIR], IFX_MEI_IrqHandle, 0, "DFEIR", pDev) != 0) { + IFX_MEI_EMSG ("request_irq %d failed!\n", pDev->nIrq[IFX_DFEIR]); + return -1; + } + /*if (request_irq (pDev->nIrq[IFX_DYING_GASP], IFX_MEI_Dying_Gasp_IrqHandle, 0, "DYING_GASP", pDev) != 0) { + IFX_MEI_EMSG ("request_irq %d failed!\n", pDev->nIrq[IFX_DYING_GASP]); + return -1; + }*/ +// IFX_MEI_DMSG("Device %d initialized. IER %#x\n", num, bsp_get_irq_ier(pDev->nIrq[IFX_DYING_GASP])); + return 0; +} + +static int +IFX_MEI_ExitDevice (int num) +{ + DSL_DEV_Device_t *pDev; + pDev = &dsl_devices[num]; + + if (pDev == NULL) + return -EIO; + + disable_irq (pDev->nIrq[IFX_DFEIR]); + disable_irq (pDev->nIrq[IFX_DYING_GASP]); + + free_irq(pDev->nIrq[IFX_DFEIR], pDev); + free_irq(pDev->nIrq[IFX_DYING_GASP], pDev); + + return 0; +} + +static DSL_DEV_Device_t * +IFX_BSP_HandleGet (int maj, int num) +{ + if (num > BSP_MAX_DEVICES) + return NULL; + return &dsl_devices[num]; +} + +DSL_DEV_Device_t * +DSL_BSP_DriverHandleGet (int maj, int num) +{ + DSL_DEV_Device_t *pDev; + + if (num > BSP_MAX_DEVICES) + return NULL; + + pDev = &dsl_devices[num]; + if (!try_module_get(pDev->owner)) + return NULL; + + pDev->nInUse++; + return pDev; +} + +int +DSL_BSP_DriverHandleDelete (DSL_DEV_Device_t * nHandle) +{ + DSL_DEV_Device_t *pDev = (DSL_DEV_Device_t *) nHandle; + if (pDev->nInUse) + pDev->nInUse--; + module_put(pDev->owner); + return 0; +} + +static int +IFX_MEI_Open (DSL_DRV_inode_t * ino, DSL_DRV_file_t * fil) +{ + int maj = MAJOR (ino->i_rdev); + int num = MINOR (ino->i_rdev); + + DSL_DEV_Device_t *pDev = NULL; + if ((pDev = DSL_BSP_DriverHandleGet (maj, num)) == NULL) { + IFX_MEI_EMSG("open(%d:%d) fail!\n", maj, num); + return -EIO; + } + fil->private_data = pDev; + return 0; +} + +static int +IFX_MEI_Release (DSL_DRV_inode_t * ino, DSL_DRV_file_t * fil) +{ + //int maj = MAJOR(ino->i_rdev); + int num = MINOR (ino->i_rdev); + DSL_DEV_Device_t *pDev; + + pDev = &dsl_devices[num]; + if (pDev == NULL) + return -EIO; + DSL_BSP_DriverHandleDelete (pDev); + return 0; +} + +/** + * Callback function for linux userspace program writing + */ +static ssize_t +IFX_MEI_Write (DSL_DRV_file_t * filp, const char *buf, size_t size, loff_t * loff) +{ + DSL_DEV_MeiError_t mei_error = DSL_DEV_MEI_ERR_FAILURE; + long offset = 0; + DSL_DEV_Device_t *pDev = (DSL_DEV_Device_t *) filp->private_data; + + if (pDev == NULL) + return -EIO; + + mei_error = + DSL_BSP_FWDownload (pDev, buf, size, (long *) loff, &offset); + + if (mei_error == DSL_DEV_MEI_ERR_FAILURE) + return -EIO; + return (ssize_t) offset; +} + +/** + * Callback function for linux userspace program ioctling + */ +static int +IFX_MEI_IoctlCopyFrom (int from_kernel, char *dest, char *from, int size) +{ + int ret = 0; + + if (!from_kernel) + ret = copy_from_user ((char *) dest, (char *) from, size); + else + ret = (int)memcpy ((char *) dest, (char *) from, size); + return ret; +} + +static int +IFX_MEI_IoctlCopyTo (int from_kernel, char *dest, char *from, int size) +{ + int ret = 0; + + if (!from_kernel) + ret = copy_to_user ((char *) dest, (char *) from, size); + else + ret = (int)memcpy ((char *) dest, (char *) from, size); + return ret; +} + +static int +IFX_MEI_Ioctls (DSL_DEV_Device_t * pDev, int from_kernel, unsigned int command, unsigned long lon) +{ + int i = 0; + int meierr = DSL_DEV_MEI_ERR_SUCCESS; + u32 base_address = LQ_MEI_BASE_ADDR; + DSL_DEV_WinHost_Message_t winhost_msg, m; + DSL_DEV_MeiDebug_t debugrdwr; + DSL_DEV_MeiReg_t regrdwr; + + switch (command) { + + case DSL_FIO_BSP_CMV_WINHOST: + IFX_MEI_IoctlCopyFrom (from_kernel, (char *) winhost_msg.msg.TxMessage, + (char *) lon, MSG_LENGTH * 2); + + if ((meierr = DSL_BSP_SendCMV (pDev, winhost_msg.msg.TxMessage, YES_REPLY, + winhost_msg.msg.RxMessage)) != DSL_DEV_MEI_ERR_SUCCESS) { + IFX_MEI_EMSG ("WINHOST CMV fail :TxMessage:%X %X %X %X, RxMessage:%X %X %X %X %X\n", + winhost_msg.msg.TxMessage[0], winhost_msg.msg.TxMessage[1], winhost_msg.msg.TxMessage[2], winhost_msg.msg.TxMessage[3], + winhost_msg.msg.RxMessage[0], winhost_msg.msg.RxMessage[1], winhost_msg.msg.RxMessage[2], winhost_msg.msg.RxMessage[3], + winhost_msg.msg.RxMessage[4]); + meierr = DSL_DEV_MEI_ERR_FAILURE; + } + else { + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, + (char *) winhost_msg.msg.RxMessage, + MSG_LENGTH * 2); + } + break; + + case DSL_FIO_BSP_CMV_READ: + IFX_MEI_IoctlCopyFrom (from_kernel, (char *) (®rdwr), + (char *) lon, sizeof (DSL_DEV_MeiReg_t)); + + IFX_MEI_LongWordRead ((u32) regrdwr.iAddress, + (u32 *) & (regrdwr.iData)); + + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, + (char *) (®rdwr), + sizeof (DSL_DEV_MeiReg_t)); + + break; + + case DSL_FIO_BSP_CMV_WRITE: + IFX_MEI_IoctlCopyFrom (from_kernel, (char *) (®rdwr), + (char *) lon, sizeof (DSL_DEV_MeiReg_t)); + + IFX_MEI_LongWordWrite ((u32) regrdwr.iAddress, + regrdwr.iData); + break; + + case DSL_FIO_BSP_GET_BASE_ADDRESS: + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, + (char *) (&base_address), + sizeof (base_address)); + break; + + case DSL_FIO_BSP_IS_MODEM_READY: + i = IFX_MEI_IsModemReady (pDev); + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, + (char *) (&i), sizeof (int)); + meierr = DSL_DEV_MEI_ERR_SUCCESS; + break; + case DSL_FIO_BSP_RESET: + case DSL_FIO_BSP_REBOOT: + meierr = IFX_MEI_CpuModeSet (pDev, DSL_CPU_RESET); + meierr = IFX_MEI_CpuModeSet (pDev, DSL_CPU_HALT); + break; + + case DSL_FIO_BSP_HALT: + meierr = IFX_MEI_CpuModeSet (pDev, DSL_CPU_HALT); + break; + + case DSL_FIO_BSP_RUN: + meierr = IFX_MEI_CpuModeSet (pDev, DSL_CPU_RUN); + break; + case DSL_FIO_BSP_BOOTDOWNLOAD: + meierr = IFX_MEI_DownloadBootCode (pDev); + break; + case DSL_FIO_BSP_JTAG_ENABLE: + meierr = IFX_MEI_ArcJtagEnable (pDev, 1); + break; + + case DSL_FIO_BSP_REMOTE: + IFX_MEI_IoctlCopyFrom (from_kernel, (char *) (&i), + (char *) lon, sizeof (int)); + + meierr = IFX_MEI_AdslMailboxIRQEnable (pDev, i); + break; + + case DSL_FIO_BSP_DSL_START: + IFX_MEI_DMSG("DSL_FIO_BSP_DSL_START\n"); + if ((meierr = IFX_MEI_RunAdslModem (pDev)) != DSL_DEV_MEI_ERR_SUCCESS) { + IFX_MEI_EMSG ("IFX_MEI_RunAdslModem() error..."); + meierr = DSL_DEV_MEI_ERR_FAILURE; + } + break; + + case DSL_FIO_BSP_DEBUG_READ: + case DSL_FIO_BSP_DEBUG_WRITE: + IFX_MEI_IoctlCopyFrom (from_kernel, + (char *) (&debugrdwr), + (char *) lon, + sizeof (debugrdwr)); + + if (command == DSL_FIO_BSP_DEBUG_READ) + meierr = DSL_BSP_MemoryDebugAccess (pDev, + DSL_BSP_MEMORY_READ, + debugrdwr. + iAddress, + debugrdwr. + buffer, + debugrdwr. + iCount); + else + meierr = DSL_BSP_MemoryDebugAccess (pDev, + DSL_BSP_MEMORY_WRITE, + debugrdwr. + iAddress, + debugrdwr. + buffer, + debugrdwr. + iCount); + + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, (char *) (&debugrdwr), sizeof (debugrdwr)); + break; + case DSL_FIO_BSP_GET_VERSION: + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, (char *) (&bsp_mei_version), sizeof (DSL_DEV_Version_t)); + break; + +#define LQ_MPS_CHIPID_VERSION_GET(value) (((value) >> 28) & ((1 << 4) - 1)) + case DSL_FIO_BSP_GET_CHIP_INFO: + bsp_chip_info.major = 1; + bsp_chip_info.minor = LQ_MPS_CHIPID_VERSION_GET(*LQ_MPS_CHIPID); + IFX_MEI_IoctlCopyTo (from_kernel, (char *) lon, (char *) (&bsp_chip_info), sizeof (DSL_DEV_HwVersion_t)); + meierr = DSL_DEV_MEI_ERR_SUCCESS; + break; + + case DSL_FIO_BSP_FREE_RESOURCE: + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_STAT, 4, 0, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) { + meierr = DSL_DEV_MEI_ERR_FAILURE; + return -EIO; + } + IFX_MEI_DMSG("RxMessage[4] = %#x\n", m.msg.RxMessage[4]); + if (!(m.msg.RxMessage[4] & DSL_DEV_STAT_CODESWAP_COMPLETE)) { + meierr = DSL_DEV_MEI_ERR_FAILURE; + return -EAGAIN; + } + IFX_MEI_DMSG("Freeing all memories marked FREE_SHOWTIME\n"); + IFX_MEI_DFEMemoryFree (pDev, FREE_SHOWTIME); + meierr = DSL_DEV_MEI_ERR_SUCCESS; + break; +#ifdef CONFIG_IFXMIPS_AMAZON_SE + case DSL_FIO_ARC_MUX_TEST: + AMAZON_SE_MEI_ARC_MUX_Test(); + break; +#endif + default: +// IFX_MEI_EMSG("Invalid IOCTL command: %d\n"); + break; + } + return meierr; +} + +#ifdef CONFIG_IFXMIPS_AMAZON_SE +void AMAZON_SE_MEI_ARC_MUX_Test(void) +{ + u32 *p, i; + *LQ_RCU_RST |= LQ_RCU_RST_REQ_MUX_ARC; + + p = (u32*)(DFE_LDST_BASE_ADDR + IRAM0_BASE); + IFX_MEI_EMSG("Writing to IRAM0(%p)...\n", p); + for (i = 0; i < IRAM0_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + + p = (u32*)(DFE_LDST_BASE_ADDR + IRAM1_BASE); + IFX_MEI_EMSG("Writing to IRAM1(%p)...\n", p); + for (i = 0; i < IRAM1_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + + p = (u32*)(DFE_LDST_BASE_ADDR + BRAM_BASE); + IFX_MEI_EMSG("Writing to BRAM(%p)...\n", p); + for (i = 0; i < BRAM_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + + p = (u32*)(DFE_LDST_BASE_ADDR + XRAM_BASE); + IFX_MEI_EMSG("Writing to XRAM(%p)...\n", p); + for (i = 0; i < XRAM_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + + p = (u32*)(DFE_LDST_BASE_ADDR + YRAM_BASE); + IFX_MEI_EMSG("Writing to YRAM(%p)...\n", p); + for (i = 0; i < YRAM_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + + p = (u32*)(DFE_LDST_BASE_ADDR + EXT_MEM_BASE); + IFX_MEI_EMSG("Writing to EXT_MEM(%p)...\n", p); + for (i = 0; i < EXT_MEM_SIZE/sizeof(u32); i++, p++) { + *p = 0xdeadbeef; + if (*p != 0xdeadbeef) + IFX_MEI_EMSG("%p: %#x\n", p, *p); + } + *LQ_RCU_RST &= ~LQ_RCU_RST_REQ_MUX_ARC; +} +#endif +int +DSL_BSP_KernelIoctls (DSL_DEV_Device_t * pDev, unsigned int command, + unsigned long lon) +{ + int error = 0; + + error = IFX_MEI_Ioctls (pDev, 1, command, lon); + return error; +} + +static int +IFX_MEI_UserIoctls (DSL_DRV_inode_t * ino, DSL_DRV_file_t * fil, + unsigned int command, unsigned long lon) +{ + int error = 0; + int maj = MAJOR (ino->i_rdev); + int num = MINOR (ino->i_rdev); + DSL_DEV_Device_t *pDev; + + pDev = IFX_BSP_HandleGet (maj, num); + if (pDev == NULL) + return -EIO; + + error = IFX_MEI_Ioctls (pDev, 0, command, lon); + return error; +} + +#ifdef CONFIG_PROC_FS +/* + * Register a callback function for linux proc filesystem + */ +static int +IFX_MEI_InitProcFS (int num) +{ + struct proc_dir_entry *entry; + int i ; + DSL_DEV_Device_t *pDev; + reg_entry_t regs_temp[PROC_ITEMS] = { + /* flag, name, description } */ + {NULL, "arcmsgav", "arc to mei message ", 0}, + {NULL, "cmv_reply", "cmv needs reply", 0}, + {NULL, "cmv_waiting", "waiting for cmv reply from arc", 0}, + {NULL, "modem_ready_cnt", "ARC to MEI indicator count", 0}, + {NULL, "cmv_count", "MEI to ARC CMVs", 0}, + {NULL, "reply_count", "ARC to MEI Reply", 0}, + {NULL, "Recent_indicator", "most recent indicator", 0}, + {NULL, "fw_version", "Firmware Version", 0}, + {NULL, "fw_date", "Firmware Date", 0}, + {NULL, "meminfo", "Memory Allocation Information", 0}, + {NULL, "version", "MEI version information", 0}, + }; + + pDev = &dsl_devices[num]; + if (pDev == NULL) + return -ENOMEM; + + regs_temp[0].flag = &(DSL_DEV_PRIVATE(pDev)->arcmsgav); + regs_temp[1].flag = &(DSL_DEV_PRIVATE(pDev)->cmv_reply); + regs_temp[2].flag = &(DSL_DEV_PRIVATE(pDev)->cmv_waiting); + regs_temp[3].flag = &(DSL_DEV_PRIVATE(pDev)->modem_ready_cnt); + regs_temp[4].flag = &(DSL_DEV_PRIVATE(pDev)->cmv_count); + regs_temp[5].flag = &(DSL_DEV_PRIVATE(pDev)->reply_count); + regs_temp[6].flag = (int *) &(DSL_DEV_PRIVATE(pDev)->Recent_indicator); + + memcpy ((char *) regs[num], (char *) regs_temp, sizeof (regs_temp)); + // procfs + meidir = proc_mkdir (MEI_DIRNAME, NULL); + if (meidir == NULL) { + IFX_MEI_EMSG ("Failed to create /proc/%s\n", MEI_DIRNAME); + return (-ENOMEM); + } + + for (i = 0; i < NUM_OF_REG_ENTRY; i++) { + entry = create_proc_entry (regs[num][i].name, + S_IWUSR | S_IRUSR | S_IRGRP | + S_IROTH, meidir); + if (entry) { + regs[num][i].low_ino = entry->low_ino; + entry->proc_fops = &IFX_MEI_ProcOperations; + } + else { + IFX_MEI_EMSG ("Failed to create /proc/%s/%s\n", MEI_DIRNAME, regs[num][i].name); + return (-ENOMEM); + } + } + return 0; +} + +/* + * Reading function for linux proc filesystem + */ +static int +IFX_MEI_ProcRead (struct file *file, char *buf, size_t nbytes, loff_t * ppos) +{ + int i_ino = (file->f_dentry->d_inode)->i_ino; + char *p = buf; + int i; + int num; + reg_entry_t *entry = NULL; + DSL_DEV_Device_t *pDev = NULL; + DSL_DEV_WinHost_Message_t m; + + for (num = 0; num < BSP_MAX_DEVICES; num++) { + for (i = 0; i < NUM_OF_REG_ENTRY; i++) { + if (regs[num][i].low_ino == (unsigned short)i_ino) { + entry = ®s[num][i]; + pDev = &dsl_devices[num]; + break; + } + } + } + if (entry == NULL) + return -EINVAL; + else if (strcmp(entry->name, "meminfo") == 0) { + if (*ppos > 0) /* Assume reading completed in previous read */ + return 0; + p += sprintf (p, "No Address Size\n"); + for (i = 0; i < MAX_BAR_REGISTERS; i++) { + p += sprintf (p, "BAR[%02d] Addr:0x%08X Size:%lu\n", + i, (u32) DSL_DEV_PRIVATE(pDev)->adsl_mem_info[i].address, + DSL_DEV_PRIVATE(pDev)-> adsl_mem_info[i].size); + //printk( "BAR[%02d] Addr:0x%08X Size:%d\n",i,adsl_mem_info[i].address,adsl_mem_info[i].size); + } + *ppos += (p - buf); + } else if (strcmp(entry->name, "fw_version") == 0) { + if (*ppos > 0) /* Assume reading completed in previous read */ + return 0; + if (DSL_DEV_PRIVATE(pDev)->modem_ready_cnt < 1) + return -EAGAIN; + //major:bits 0-7 + //minor:bits 8-15 + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_INFO, 54, 0, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) + return -EIO; + p += sprintf(p, "FW Version: %d.%d.", m.msg.RxMessage[4] & 0xFF, (m.msg.RxMessage[4] >> 8) & 0xFF); + //sub_version:bits 4-7 + //int_version:bits 0-3 + //spl_appl:bits 8-13 + //rel_state:bits 14-15 + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_INFO, 54, 1, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) + return -EIO; + p += sprintf(p, "%d.%d.%d.%d\n", + (m.msg.RxMessage[4] >> 4) & 0xF, m.msg.RxMessage[4] & 0xF, + (m.msg.RxMessage[4] >> 14) & 3, (m.msg.RxMessage[4] >> 8) & 0x3F); + *ppos += (p - buf); + } else if (strcmp(entry->name, "fw_date") == 0) { + if (*ppos > 0) /* Assume reading completed in previous read */ + return 0; + if (DSL_DEV_PRIVATE(pDev)->modem_ready_cnt < 1) + return -EAGAIN; + + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_INFO, 55, 0, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) + return -EIO; + /* Day/Month */ + p += sprintf(p, "FW Date: %d.%d.", m.msg.RxMessage[4] & 0xFF, (m.msg.RxMessage[4] >> 8) & 0xFF); + + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_INFO, 55, 2, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) + return -EIO; + /* Year */ + p += sprintf(p, "%d ", m.msg.RxMessage[4]); + + makeCMV (H2D_CMV_READ, DSL_CMV_GROUP_INFO, 55, 1, 1, NULL, m.msg.TxMessage); + if (DSL_BSP_SendCMV (pDev, m.msg.TxMessage, YES_REPLY, m.msg.RxMessage) != DSL_DEV_MEI_ERR_SUCCESS) + return -EIO; + /* Hour:Minute */ + p += sprintf(p, "%d:%d\n", (m.msg.RxMessage[4] >> 8) & 0xFF, m.msg.RxMessage[4] & 0xFF); + + *ppos += (p - buf); + } else if (strcmp(entry->name, "version") == 0) { + if (*ppos > 0) /* Assume reading completed in previous read */ + return 0; + p += sprintf (p, "IFX MEI V%ld.%ld.%ld\n", bsp_mei_version.major, bsp_mei_version.minor, bsp_mei_version.revision); + + *ppos += (p - buf); + } else if (entry->flag != (int *) DSL_DEV_PRIVATE(pDev)->Recent_indicator) { + if (*ppos > 0) /* Assume reading completed in previous read */ + return 0; // indicates end of file + p += sprintf (p, "0x%08X\n\n", *(entry->flag)); + *ppos += (p - buf); + if ((p - buf) > nbytes) /* Assume output can be read at one time */ + return -EINVAL; + } else { + if ((int) (*ppos) / ((int) 7) == 16) + return 0; // indicate end of the message + p += sprintf (p, "0x%04X\n\n", *(((u16 *) (entry->flag)) + (int) (*ppos) / ((int) 7))); + *ppos += (p - buf); + } + return p - buf; +} + +/* + * Writing function for linux proc filesystem + */ +static ssize_t +IFX_MEI_ProcWrite (struct file *file, const char *buffer, size_t count, loff_t * ppos) +{ + int i_ino = (file->f_dentry->d_inode)->i_ino; + reg_entry_t *current_reg = NULL; + int i = 0; + int num = 0; + unsigned long newRegValue = 0; + char *endp = NULL; + DSL_DEV_Device_t *pDev = NULL; + + for (num = 0; num < BSP_MAX_DEVICES; num++) { + for (i = 0; i < NUM_OF_REG_ENTRY; i++) { + if (regs[num][i].low_ino == i_ino) { + current_reg = ®s[num][i]; + pDev = &dsl_devices[num]; + break; + } + } + } + if ((current_reg == NULL) + || (current_reg->flag == + (int *) DSL_DEV_PRIVATE(pDev)-> + Recent_indicator)) + return -EINVAL; + + newRegValue = simple_strtoul (buffer, &endp, 0); + *(current_reg->flag) = (int) newRegValue; + return (count + endp - buffer); +} +#endif //CONFIG_PROC_FS + +static int adsl_dummy_ledcallback(void) +{ + return 0; +} + +int ifx_mei_atm_led_blink(void) +{ + return g_adsl_ledcallback(); +} +EXPORT_SYMBOL(ifx_mei_atm_led_blink); + +int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr) +{ + int i; + + if ( is_showtime ) { + *is_showtime = g_tx_link_rate[0] == 0 && g_tx_link_rate[1] == 0 ? 0 : 1; + } + + if ( port_cell ) { + for ( i = 0; i < port_cell->port_num && i < 2; i++ ) + port_cell->tx_link_rate[i] = g_tx_link_rate[i]; + } + + if ( xdata_addr ) { + if ( g_tx_link_rate[0] == 0 && g_tx_link_rate[1] == 0 ) + *xdata_addr = NULL; + else + *xdata_addr = g_xdata_addr; + } + + return 0; +} +EXPORT_SYMBOL(ifx_mei_atm_showtime_check); + +/* + * Writing function for linux proc filesystem + */ +int __init +IFX_MEI_ModuleInit (void) +{ + int i = 0; + static struct class *dsl_class; + + pr_info("IFX MEI Version %ld.%02ld.%02ld", bsp_mei_version.major, bsp_mei_version.minor, bsp_mei_version.revision); + + for (i = 0; i < BSP_MAX_DEVICES; i++) { + if (IFX_MEI_InitDevice (i) != 0) { + IFX_MEI_EMSG("Init device fail!\n"); + return -EIO; + } + IFX_MEI_InitDevNode (i); +#ifdef CONFIG_PROC_FS + IFX_MEI_InitProcFS (i); +#endif + } + for (i = 0; i <= DSL_BSP_CB_LAST ; i++) + dsl_bsp_event_callback[i].function = NULL; + +#ifdef CONFIG_LQ_MEI_FW_LOOPBACK + IFX_MEI_DMSG("Start loopback test...\n"); + DFE_Loopback_Test (); +#endif + dsl_class = class_create(THIS_MODULE, "ifx_mei"); + device_create(dsl_class, NULL, MKDEV(MEI_MAJOR, 0), NULL, "ifx_mei"); + return 0; +} + +void __exit +IFX_MEI_ModuleExit (void) +{ + int i = 0; + int num; + + for (num = 0; num < BSP_MAX_DEVICES; num++) { + IFX_MEI_CleanUpDevNode (num); +#ifdef CONFIG_PROC_FS + for (i = 0; i < NUM_OF_REG_ENTRY; i++) { + remove_proc_entry (regs[num][i].name, meidir); + } +#endif + } + + remove_proc_entry (MEI_DIRNAME, NULL); + for (i = 0; i < BSP_MAX_DEVICES; i++) { + for (i = 0; i < BSP_MAX_DEVICES; i++) { + IFX_MEI_ExitDevice (i); + } + } +} + +/* export function for DSL Driver */ + +/* The functions of MEI_DriverHandleGet and MEI_DriverHandleDelete are +something like open/close in kernel space , where the open could be used +to register a callback for autonomous messages and returns a mei driver context pointer (comparable to the file descriptor in user space) + The context will be required for the multi line chips future! */ + +EXPORT_SYMBOL (DSL_BSP_DriverHandleGet); +EXPORT_SYMBOL (DSL_BSP_DriverHandleDelete); + +EXPORT_SYMBOL (DSL_BSP_ATMLedCBRegister); +EXPORT_SYMBOL (DSL_BSP_ATMLedCBUnregister); +EXPORT_SYMBOL (DSL_BSP_KernelIoctls); +EXPORT_SYMBOL (DSL_BSP_AdslLedInit); +//EXPORT_SYMBOL (DSL_BSP_AdslLedSet); +EXPORT_SYMBOL (DSL_BSP_FWDownload); +EXPORT_SYMBOL (DSL_BSP_Showtime); + +EXPORT_SYMBOL (DSL_BSP_MemoryDebugAccess); +EXPORT_SYMBOL (DSL_BSP_SendCMV); + +// provide a register/unregister function for DSL driver to register a event callback function +EXPORT_SYMBOL (DSL_BSP_EventCBRegister); +EXPORT_SYMBOL (DSL_BSP_EventCBUnregister); + +module_init (IFX_MEI_ModuleInit); +module_exit (IFX_MEI_ModuleExit); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/target/linux/lantiq/Makefile b/target/linux/lantiq/Makefile new file mode 100644 index 0000000000..5afe5662da --- /dev/null +++ b/target/linux/lantiq/Makefile @@ -0,0 +1,25 @@ +# +# Copyright (C) 2007-2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +include $(TOPDIR)/rules.mk + +ARCH:=mips +BOARD:=lantiq +BOARDNAME:=Lantiq GPON/XWAY +FEATURES:=squashfs jffs2 atm +SUBTARGETS:=xway + +LINUX_VERSION:=2.6.35.9 + +CFLAGS=-Os -pipe -mips32r2 -mtune=mips32r2 -funit-at-a-time + +include $(INCLUDE_DIR)/target.mk + +define Target/Description + Build firmware images for Lantiq SoC +endef + +$(eval $(call BuildTarget)) diff --git a/target/linux/lantiq/base-files/etc/config/network b/target/linux/lantiq/base-files/etc/config/network new file mode 100644 index 0000000000..183e6bf34c --- /dev/null +++ b/target/linux/lantiq/base-files/etc/config/network @@ -0,0 +1,26 @@ +config interface loopback + option ifname lo + option proto static + option ipaddr 127.0.0.1 + option netmask 255.0.0.0 + +config interface lan + option ifname eth0 + option type bridge + option proto static + option ipaddr 192.168.1.1 + option netmask 255.255.255.0 + +config atm-bridge + option unit 0 + option encaps llc + option vpi 1 + option vci 32 + option payload bridged # some ISPs need this set to 'routed' + +config interface wan + option ifname nas0 + option proto pppoe + option username "" + option password "" + option unit 0 diff --git a/target/linux/lantiq/base-files/etc/inittab b/target/linux/lantiq/base-files/etc/inittab new file mode 100644 index 0000000000..7989a7f60e --- /dev/null +++ b/target/linux/lantiq/base-files/etc/inittab @@ -0,0 +1,4 @@ +::sysinit:/etc/init.d/rcS S boot +::shutdown:/etc/init.d/rcS K stop +ttyS0::askfirst:/bin/ash --login +ttyS1::askfirst:/bin/ash --login diff --git a/target/linux/lantiq/base-files/lib/upgrade/platform.sh b/target/linux/lantiq/base-files/lib/upgrade/platform.sh new file mode 100755 index 0000000000..247ba1a25c --- /dev/null +++ b/target/linux/lantiq/base-files/lib/upgrade/platform.sh @@ -0,0 +1,25 @@ +PART_NAME=linux + +platform_check_image() { + [ "$ARGC" -gt 1 ] && return 1 + + case "$(get_magic_word "$1")" in + # .trx files + 2705) return 0;; + *) + echo "Invalid image type" + return 1 + ;; + esac +} + +# use default for platform_do_upgrade() + +disable_watchdog() { + killall watchdog + ( ps | grep -v 'grep' | grep '/dev/watchdog' ) && { + echo 'Could not disable watchdog' + return 1 + } +} +append sysupgrade_pre_upgrade disable_watchdog diff --git a/target/linux/lantiq/config-default b/target/linux/lantiq/config-default new file mode 100644 index 0000000000..95104710ac --- /dev/null +++ b/target/linux/lantiq/config-default @@ -0,0 +1,163 @@ +# CONFIG_TC35815 is not set +# CONFIG_TINY_RCU is not set +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_HAVE_IDE is not set +# CONFIG_64BIT is not set +# CONFIG_ALCHEMY_GPIO_INDIRECT is not set +# CONFIG_AR7 is not set +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_BCM47XX is not set +# CONFIG_BCM63XX is not set +# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set +# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set +# CONFIG_CPU_CAVIUM_OCTEON is not set +# CONFIG_CPU_LITTLE_ENDIAN is not set +# CONFIG_CPU_LOONGSON2E is not set +# CONFIG_CPU_LOONGSON2F is not set +# CONFIG_CPU_MIPS32_R1 is not set +# CONFIG_CPU_MIPS64_R1 is not set +# CONFIG_CPU_MIPS64_R2 is not set +# CONFIG_CPU_NEVADA is not set +# CONFIG_CPU_R10000 is not set +# CONFIG_CPU_R3000 is not set +# CONFIG_CPU_R4300 is not set +# CONFIG_CPU_R4X00 is not set +# CONFIG_CPU_R5000 is not set +# CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R5500 is not set +# CONFIG_CPU_R6000 is not set +# CONFIG_CPU_R8000 is not set +# CONFIG_CPU_RM7000 is not set +# CONFIG_CPU_RM9000 is not set +# CONFIG_CPU_SB1 is not set +# CONFIG_CPU_TX39XX is not set +# CONFIG_CPU_TX49XX is not set +# CONFIG_CPU_VR41XX is not set +# CONFIG_DM9000 is not set +# CONFIG_FSNOTIFY is not set +# CONFIG_HZ_100 is not set +# CONFIG_LOONGSON_UART_BASE is not set +# CONFIG_MACH_ALCHEMY is not set +# CONFIG_MACH_DECSTATION is not set +# CONFIG_MACH_JAZZ is not set +# CONFIG_MACH_LOONGSON is not set +# CONFIG_MACH_TX39XX is not set +# CONFIG_MACH_TX49XX is not set +# CONFIG_MACH_VR41XX is not set +# CONFIG_MIKROTIK_RB532 is not set +# CONFIG_MIPS_COBALT is not set +# CONFIG_MIPS_MALTA is not set +# CONFIG_MIPS_MT_SMP is not set +# CONFIG_MIPS_MT_SMTC is not set +# CONFIG_MIPS_SIM is not set +# CONFIG_MIPS_VPE_LOADER is not set +# CONFIG_NO_IOPORT is not set +# CONFIG_NXP_STB220 is not set +# CONFIG_NXP_STB225 is not set +# CONFIG_PMC_MSP is not set +# CONFIG_PMC_YOSEMITE is not set +# CONFIG_PNX8550_JBS is not set +# CONFIG_PNX8550_STB810 is not set +# CONFIG_POWERTV is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250 is not set +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SGI_IP22 is not set +# CONFIG_SGI_IP27 is not set +# CONFIG_SGI_IP28 is not set +# CONFIG_SGI_IP32 is not set +# CONFIG_SIBYTE_BIGSUR is not set +# CONFIG_SIBYTE_CARMEL is not set +# CONFIG_SIBYTE_CRHINE is not set +# CONFIG_SIBYTE_CRHONE is not set +# CONFIG_SIBYTE_LITTLESUR is not set +# CONFIG_SIBYTE_RHONE is not set +# CONFIG_SIBYTE_SENTOSA is not set +# CONFIG_SIBYTE_SWARM is not set +CONFIG_32BIT=y +CONFIG_ADM6996_PHY=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_ARCH_SUPPORTS_OPROFILE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_BITREVERSE=y +CONFIG_CEVT_R4K=y +CONFIG_CEVT_R4K_LIB=y +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_MIPS32=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_CSRC_R4K_LIB=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DEVPORT=y +CONFIG_DMA_NEED_PCI_MAP_STATE=y +CONFIG_DMA_NONCOHERENT=y +CONFIG_EARLY_PRINTK=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HW_RANDOM=y +CONFIG_HZ=250 +CONFIG_HZ_250=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQ_CPU=y +CONFIG_LANTIQ=y +CONFIG_MIPS=y +CONFIG_MIPS_L1_CACHE_SHIFT=5 +CONFIG_MIPS_MACHINE=y +CONFIG_MIPS_MT_DISABLED=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_GEOMETRY=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_LANTIQ=y +CONFIG_NLS=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PHYLIB=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_SERIAL_LANTIQ=y +CONFIG_SWAP_IO_SPACE=y +CONFIG_SWCONFIG=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y +CONFIG_SYS_SUPPORTS_MULTITHREADING=y +CONFIG_TRAD_SIGNALS=y +CONFIG_TREE_RCU=y +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_IFX_UDP_REDIRECT=y diff --git a/target/linux/lantiq/image/Makefile b/target/linux/lantiq/image/Makefile new file mode 100644 index 0000000000..ee1f86dd45 --- /dev/null +++ b/target/linux/lantiq/image/Makefile @@ -0,0 +1,89 @@ +# +# Copyright (C) 2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/image.mk + +JFFS2_BLOCKSIZE = 64k 128k 256k + +xway_cmdline=-console=ttyS1,115200 rootfstype=squashfs,jffs2 +falcon_cmdline=-console=ttyS0,115200 rootfstype=squashfs,jffs2 + +define CompressLzma + $(STAGING_DIR_HOST)/bin/lzma e $(1) $(2) +endef + +define PatchKernelLzma + cp $(KDIR)/vmlinux $(KDIR)/vmlinux-$(1) + $(STAGING_DIR_HOST)/bin/patch-cmdline $(KDIR)/vmlinux-$(1) '$(strip $(2))' + $(call CompressLzma,$(KDIR)/vmlinux-$(1),$(KDIR)/vmlinux-$(1).lzma) +endef + +define MkImageLzma + mkimage -A mips -O linux -T kernel -a 0x80002000 -C lzma \ + -e 0x80002000 -n 'MIPS OpenWrt Linux-$(LINUX_VERSION)' \ + -d $(KDIR)/vmlinux-$(1).lzma $(KDIR)/uImage-$(1) +endef + +define Image/Build/squashfs + cat $(KDIR)/uImage-$(2) $(KDIR)/root.$(1) > $(BIN_DIR)/$(IMG_PREFIX)-$(2)-$(1).image + $(call prepare_generic_squashfs,$(BIN_DIR)/$(IMG_PREFIX)-$(2)-$(1).image) +endef + +define Image/Build/jffs2-64k + dd if=$(KDIR)/uImage-$(2) of=$(KDIR)/uImage-$(2)-$(1) bs=64k conv=sync + cat $(KDIR)/uImage-$(2)-$(1) $(KDIR)/root.$(1) > $(BIN_DIR)/$(IMG_PREFIX)-$(2)-$(1).image +endef + +define Image/Build/jffs2-128k + dd if=$(KDIR)/uImage-$(2) of=$(KDIR)/uImage-$(2)-$(1) bs=128k conv=sync + cat $(KDIR)/uImage-$(2)-$(1) $(KDIR)/root.$(1) > $(BIN_DIR)/$(IMG_PREFIX)-$(2)-$(1).image +endef + +define Image/Build/jffs2-256k + dd if=$(KDIR)/uImage-$(2) of=$(KDIR)/uImage-$(2)-$(1) bs=256k conv=sync + cat $(KDIR)/uImage-$(2)-$(1) $(KDIR)/root.$(1) > $(BIN_DIR)/$(IMG_PREFIX)-$(2)-$(1).image +endef + +define Image/BuildKernel/Template + $(call PatchKernelLzma,$(1),$(if $(2),$(2) machtype=$(1),)) + $(call MkImageLzma,$(1)) + $(CP) $(KDIR)/uImage-$(1) $(BIN_DIR)/$(IMG_PREFIX)-$(1)-uImage +endef + +ifeq ($(CONFIG_SOC_LANTIQ_XWAY),y) +define Image/BuildKernel + $(call Image/BuildKernel/Template,EASY4010,$(xway_cmdline)) + $(call Image/BuildKernel/Template,EASY50712,$(xway_cmdline)) + $(call Image/BuildKernel/Template,EASY50812,$(xway_cmdline)) + $(call Image/BuildKernel/Template,ARV452,$(xway_cmdline)) + $(call Image/BuildKernel/Template,NONE) +endef + +define Image/Build + $(call Image/Build/$(1),$(1),EASY4010) + $(call Image/Build/$(1),$(1),EASY50712) + $(call Image/Build/$(1),$(1),EASY50812) + $(call Image/Build/$(1),$(1),ARV452) + $(call Image/Build/$(1),$(1),NONE) + $(CP) $(KDIR)/root.$(1) $(BIN_DIR)/$(IMG_PREFIX)-$(1).rootfs +endef +endif + +ifeq ($(CONFIG_SOC_LANTIQ_FALCON),y) +define Image/BuildKernel + $(call Image/BuildKernel/Template,EASY98000,$(falcon_cmdline)) + $(call Image/BuildKernel/Template,NONE) +endef + +define Image/Build + $(call Image/Build/$(1),$(1),EASY98000) + $(call Image/Build/$(1),$(1),NONE) + $(CP) $(KDIR)/root.$(1) $(BIN_DIR)/$(IMG_PREFIX)-$(1).rootfs +endef +endif + +$(eval $(call BuildImage)) diff --git a/target/linux/lantiq/modules.mk b/target/linux/lantiq/modules.mk new file mode 100644 index 0000000000..78289a1a11 --- /dev/null +++ b/target/linux/lantiq/modules.mk @@ -0,0 +1,47 @@ +# +# Copyright (C) 2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +define KernelPackage/lantiq-deu + TITLE:=Lantiq data encryption unit + SUBMENU:=$(CRYPTO_MENU) + DEPENDS:=@TARGET_lantiq + KCONFIG:=CONFIG_CRYPTO_DEV_LANTIQ \ + CONFIG_CRYPTO_HW=y \ + CONFIG_CRYPTO_DEV_LANTIQ_AES=y \ + CONFIG_CRYPTO_DEV_LANTIQ_DES=y \ + CONFIG_CRYPTO_DEV_LANTIQ_MD5=y \ + CONFIG_CRYPTO_DEV_LANTIQ_SHA1=y + $(call AddDepends/crypto) +endef + +define KernelPackage/lantiq-deu/description + Kernel support for the Lantiq crypto HW +endef + +$(eval $(call KernelPackage,lantiq-deu)) + +USB_MENU:=USB Support + +define KernelPackage/usb-dwc-otg + TITLE:=Synopsis DWC_OTG support + SUBMENU:=$(USB_MENU) + DEPENDS+=@TARGET_lantiq_xway +kmod-usb-core + KCONFIG:=CONFIG_DWC_OTG \ + CONFIG_DWC_OTG_DEBUG=n \ + CONFIG_DWC_OTG_LANTIQ=y \ + CONFIG_DWC_OTG_HOST_ONLY=y + FILES:=$(LINUX_DIR)/drivers/usb/dwc_otg/dwc_otg.ko + AUTOLOAD:=$(call AutoLoad,50,dwc_otg) +endef + +define KernelPackage/usb-dwc-otg/description + Kernel support for Synopsis USB on XWAY +endef + +$(eval $(call KernelPackage,usb-dwc-otg)) + + diff --git a/target/linux/lantiq/patches/000-mips-bad-intctl.patch b/target/linux/lantiq/patches/000-mips-bad-intctl.patch new file mode 100644 index 0000000000..7c0f52db06 --- /dev/null +++ b/target/linux/lantiq/patches/000-mips-bad-intctl.patch @@ -0,0 +1,35 @@ +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -1496,7 +1496,18 @@ void __cpuinit per_cpu_trap_init(void) + if (cpu_has_mips_r2) { + cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; + cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; ++ ++ if (!cp0_compare_irq) ++ cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; ++ + cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; ++ ++ if (!cp0_perfcount_irq) ++ cp0_perfcount_irq = CP0_LEGACY_PERFCNT_IRQ; ++ ++ if (arch_fixup_c0_irqs) ++ arch_fixup_c0_irqs(); ++ + if (cp0_perfcount_irq == cp0_compare_irq) + cp0_perfcount_irq = -1; + } else { +--- a/arch/mips/include/asm/irq.h ++++ b/arch/mips/include/asm/irq.h +@@ -133,9 +133,11 @@ extern void free_irqno(unsigned int irq) + * IE7. Since R2 their number has to be read from the c0_intctl register. + */ + #define CP0_LEGACY_COMPARE_IRQ 7 ++#define CP0_LEGACY_PERFCNT_IRQ 7 + + extern int cp0_compare_irq; + extern int cp0_compare_irq_shift; + extern int cp0_perfcount_irq; ++extern void __weak arch_fixup_c0_irqs(void); + + #endif /* _ASM_IRQ_H */ diff --git a/target/linux/lantiq/patches/010-mips_clocksource_init_war.patch b/target/linux/lantiq/patches/010-mips_clocksource_init_war.patch new file mode 100644 index 0000000000..81eabc6dcd --- /dev/null +++ b/target/linux/lantiq/patches/010-mips_clocksource_init_war.patch @@ -0,0 +1,33 @@ +--- a/arch/mips/kernel/cevt-r4k.c ++++ b/arch/mips/kernel/cevt-r4k.c +@@ -22,6 +22,22 @@ + + #ifndef CONFIG_MIPS_MT_SMTC + ++/* ++ * Compare interrupt can be routed and latched outside the core, ++ * so a single execution hazard barrier may not be enough to give ++ * it time to clear as seen in the Cause register. 4 time the ++ * pipeline depth seems reasonably conservative, and empirically ++ * works better in configurations with high CPU/bus clock ratios. ++ */ ++ ++#define compare_change_hazard() \ ++ do { \ ++ irq_disable_hazard(); \ ++ irq_disable_hazard(); \ ++ irq_disable_hazard(); \ ++ irq_disable_hazard(); \ ++ } while (0) ++ + static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) + { +@@ -31,6 +47,7 @@ static int mips_next_event(unsigned long + cnt = read_c0_count(); + cnt += delta; + write_c0_compare(cnt); ++ compare_change_hazard(); + res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; + return res; + } diff --git a/target/linux/lantiq/patches/020-genirq_fix.patch b/target/linux/lantiq/patches/020-genirq_fix.patch new file mode 100644 index 0000000000..0503d0d2ac --- /dev/null +++ b/target/linux/lantiq/patches/020-genirq_fix.patch @@ -0,0 +1,12 @@ +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -650,6 +650,9 @@ handle_percpu_irq(unsigned int irq, stru + + kstat_incr_irqs_this_cpu(irq, desc); + ++ if (unlikely(!desc->action || (desc->status & IRQ_DISABLED))) ++ return; ++ + if (desc->chip->ack) + desc->chip->ack(irq); + diff --git a/target/linux/lantiq/patches/100-board.patch b/target/linux/lantiq/patches/100-board.patch new file mode 100644 index 0000000000..201fa96737 --- /dev/null +++ b/target/linux/lantiq/patches/100-board.patch @@ -0,0 +1,641 @@ +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -139,6 +139,9 @@ config MACH_DECSTATION + + otherwise choose R3000. + ++config LANTIQ ++ bool "Lantiq MIPS" ++ + config MACH_JAZZ + bool "Jazz family of machines" + select ARC +@@ -693,6 +696,7 @@ source "arch/mips/txx9/Kconfig" + source "arch/mips/vr41xx/Kconfig" + source "arch/mips/cavium-octeon/Kconfig" + source "arch/mips/loongson/Kconfig" ++source "arch/mips/lantiq/Kconfig" + + endmenu + +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -317,6 +317,17 @@ cflags-$(CONFIG_MIPS_COBALT) += -I$(srct + load-$(CONFIG_MIPS_COBALT) += 0xffffffff80080000 + + # ++# Lantiq ++# ++load-$(CONFIG_LANTIQ) += 0xffffffff80002000 ++core-$(CONFIG_LANTIQ) += arch/mips/lantiq/ ++cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq ++core-$(CONFIG_SOC_LANTIQ_FALCON) += arch/mips/lantiq/falcon/ ++cflags-$(CONFIG_SOC_LANTIQ_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon ++core-$(CONFIG_SOC_LANTIQ_XWAY) += arch/mips/lantiq/xway/ ++cflags-$(CONFIG_SOC_LANTIQ_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway ++ ++# + # DECstation family + # + core-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/ +--- /dev/null ++++ b/arch/mips/lantiq/Kconfig +@@ -0,0 +1,36 @@ ++if LANTIQ ++ ++config SOC_LANTIQ ++ bool ++ select DMA_NONCOHERENT ++ select IRQ_CPU ++ select CEVT_R4K ++ select CSRC_R4K ++ select SYS_HAS_CPU_MIPS32_R1 ++ select SYS_HAS_CPU_MIPS32_R2 ++ select SYS_SUPPORTS_BIG_ENDIAN ++ select SYS_SUPPORTS_32BIT_KERNEL ++ select SYS_SUPPORTS_MULTITHREADING ++ select SYS_HAS_EARLY_PRINTK ++ select HW_HAS_PCI ++ select ARCH_REQUIRE_GPIOLIB ++ select SWAP_IO_SPACE ++ select MIPS_MACHINE ++ ++choice ++ prompt "SoC Type" ++ default SOC_LANTIQ_XWAY ++ ++#config SOC_LANTIQ_FALCON ++# bool "FALCON" ++# select SOC_LANTIQ ++ ++config SOC_LANTIQ_XWAY ++ bool "XWAY" ++ select SOC_LANTIQ ++endchoice ++ ++#source "arch/mips/lantiq/falcon/Kconfig" ++source "arch/mips/lantiq/xway/Kconfig" ++ ++endif +--- /dev/null ++++ b/arch/mips/lantiq/Makefile +@@ -0,0 +1,2 @@ ++obj-y := irq.o setup.o clk.o prom.o ++obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +--- /dev/null ++++ b/arch/mips/lantiq/irq.c +@@ -0,0 +1,212 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/interrupt.h> ++ ++#include <asm/bootinfo.h> ++#include <asm/irq_cpu.h> ++ ++#include <lantiq.h> ++#include <irq.h> ++ ++#define LQ_ICU_BASE_ADDR (KSEG1 | 0x1F880200) ++ ++#define LQ_ICU_IM0_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0000)) ++#define LQ_ICU_IM0_IER ((u32 *)(LQ_ICU_BASE_ADDR + 0x0008)) ++#define LQ_ICU_IM0_IOSR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0010)) ++#define LQ_ICU_IM0_IRSR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0018)) ++#define LQ_ICU_IM0_IMR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0020)) ++ ++#define LQ_ICU_IM1_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0028)) ++#define LQ_ICU_IM2_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0050)) ++#define LQ_ICU_IM3_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x0078)) ++#define LQ_ICU_IM4_ISR ((u32 *)(LQ_ICU_BASE_ADDR + 0x00A0)) ++ ++#define LQ_ICU_OFFSET (LQ_ICU_IM1_ISR - LQ_ICU_IM0_ISR) ++ ++void ++lq_disable_irq(unsigned int irq_nr) ++{ ++ u32 *ier = LQ_ICU_IM0_IER; ++ irq_nr -= INT_NUM_IRQ0; ++ ier += LQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); ++ irq_nr %= INT_NUM_IM_OFFSET; ++ lq_w32(lq_r32(ier) & ~(1 << irq_nr), ier); ++} ++EXPORT_SYMBOL(lq_disable_irq); ++ ++void ++lq_mask_and_ack_irq(unsigned int irq_nr) ++{ ++ u32 *ier = LQ_ICU_IM0_IER; ++ u32 *isr = LQ_ICU_IM0_ISR; ++ irq_nr -= INT_NUM_IRQ0; ++ ier += LQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); ++ isr += LQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); ++ irq_nr %= INT_NUM_IM_OFFSET; ++ lq_w32(lq_r32(ier) & ~(1 << irq_nr), ier); ++ lq_w32((1 << irq_nr), isr); ++} ++EXPORT_SYMBOL(lq_mask_and_ack_irq); ++ ++static void ++lq_ack_irq(unsigned int irq_nr) ++{ ++ u32 *isr = LQ_ICU_IM0_ISR; ++ irq_nr -= INT_NUM_IRQ0; ++ isr += LQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); ++ irq_nr %= INT_NUM_IM_OFFSET; ++ lq_w32((1 << irq_nr), isr); ++} ++ ++void ++lq_enable_irq(unsigned int irq_nr) ++{ ++ u32 *ier = LQ_ICU_IM0_IER; ++ irq_nr -= INT_NUM_IRQ0; ++ ier += LQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); ++ irq_nr %= INT_NUM_IM_OFFSET; ++ lq_w32(lq_r32(ier) | (1 << irq_nr), ier); ++} ++EXPORT_SYMBOL(lq_enable_irq); ++ ++static unsigned int ++lq_startup_irq(unsigned int irq) ++{ ++ lq_enable_irq(irq); ++ return 0; ++} ++ ++static void ++lq_end_irq(unsigned int irq) ++{ ++ if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) ++ lq_enable_irq(irq); ++} ++ ++static struct irq_chip ++lq_irq_type = { ++ "lq_irq", ++ .startup = lq_startup_irq, ++ .enable = lq_enable_irq, ++ .disable = lq_disable_irq, ++ .unmask = lq_enable_irq, ++ .ack = lq_ack_irq, ++ .mask = lq_disable_irq, ++ .mask_ack = lq_mask_and_ack_irq, ++ .end = lq_end_irq, ++}; ++ ++static void ++lq_hw_irqdispatch(int module) ++{ ++ u32 irq; ++ ++ irq = lq_r32(LQ_ICU_IM0_IOSR + (module * LQ_ICU_OFFSET)); ++ if (irq == 0) ++ return; ++ ++ /* silicon bug causes only the msb set to 1 to be valid. all ++ other bits might be bogus */ ++ irq = __fls(irq); ++ do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); ++} ++ ++#define DEFINE_HWx_IRQDISPATCH(x) \ ++static void lq_hw ## x ## _irqdispatch(void)\ ++{\ ++ lq_hw_irqdispatch(x); \ ++} ++static void lq_hw5_irqdispatch(void) ++{ ++ do_IRQ(MIPS_CPU_TIMER_IRQ); ++} ++DEFINE_HWx_IRQDISPATCH(0) ++DEFINE_HWx_IRQDISPATCH(1) ++DEFINE_HWx_IRQDISPATCH(2) ++DEFINE_HWx_IRQDISPATCH(3) ++DEFINE_HWx_IRQDISPATCH(4) ++/*DEFINE_HWx_IRQDISPATCH(5)*/ ++ ++asmlinkage void ++plat_irq_dispatch(void) ++{ ++ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; ++ unsigned int i; ++ ++ if (pending & CAUSEF_IP7) ++ { ++ do_IRQ(MIPS_CPU_TIMER_IRQ); ++ goto out; ++ } else { ++ for (i = 0; i < 5; i++) ++ { ++ if (pending & (CAUSEF_IP2 << i)) ++ { ++ lq_hw_irqdispatch(i); ++ goto out; ++ } ++ } ++ } ++ printk(KERN_ALERT "Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); ++ ++out: ++ return; ++} ++ ++static struct irqaction ++cascade = { ++ .handler = no_action, ++ .flags = IRQF_DISABLED, ++ .name = "cascade", ++}; ++ ++void __init ++arch_init_irq(void) ++{ ++ int i; ++ ++ for (i = 0; i < 5; i++) ++ lq_w32(0, LQ_ICU_IM0_IER + (i * LQ_ICU_OFFSET)); ++ ++ mips_cpu_irq_init(); ++ ++ for (i = 2; i <= 6; i++) ++ setup_irq(i, &cascade); ++ ++ if (cpu_has_vint) { ++ printk(KERN_INFO "Setting up vectored interrupts\n"); ++ set_vi_handler(2, lq_hw0_irqdispatch); ++ set_vi_handler(3, lq_hw1_irqdispatch); ++ set_vi_handler(4, lq_hw2_irqdispatch); ++ set_vi_handler(5, lq_hw3_irqdispatch); ++ set_vi_handler(6, lq_hw4_irqdispatch); ++ set_vi_handler(7, lq_hw5_irqdispatch); ++ } ++ ++ for (i = INT_NUM_IRQ0; i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) ++ set_irq_chip_and_handler(i, &lq_irq_type, ++ handle_level_irq); ++ ++ #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) ++ set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | ++ IE_IRQ3 | IE_IRQ4 | IE_IRQ5); ++ #else ++ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | ++ IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); ++ #endif ++} ++ ++void __cpuinit ++arch_fixup_c0_irqs(void) ++{ ++ /* FIXME: check for CPUID and only do fix for specific chips/versions */ ++ cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; ++ cp0_perfcount_irq = CP0_LEGACY_PERFCNT_IRQ; ++} +--- /dev/null ++++ b/arch/mips/lantiq/setup.c +@@ -0,0 +1,47 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/io.h> ++#include <linux/ioport.h> ++ ++#include <lantiq.h> ++#include <lantiq_regs.h> ++ ++void __init ++plat_mem_setup(void) ++{ ++ /* assume 16M as default */ ++ int memsize = 16; ++ char **envp = (char **) KSEG1ADDR(fw_arg2); ++ u32 status; ++ ++ /* make sure to have no "reverse endian" for user mode! */ ++ status = read_c0_status(); ++ status &= (~(1<<25)); ++ write_c0_status(status); ++ ++ ioport_resource.start = IOPORT_RESOURCE_START; ++ ioport_resource.end = IOPORT_RESOURCE_END; ++ iomem_resource.start = IOMEM_RESOURCE_START; ++ iomem_resource.end = IOMEM_RESOURCE_END; ++ ++ while (*envp) ++ { ++ char *e = (char *)KSEG1ADDR(*envp); ++ if (!strncmp(e, "memsize=", 8)) ++ { ++ e += 8; ++ memsize = simple_strtoul(e, NULL, 10); ++ } ++ envp++; ++ } ++ memsize *= 1024 * 1024; ++ add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); ++} +--- /dev/null ++++ b/arch/mips/lantiq/clk.c +@@ -0,0 +1,141 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 Thomas Langer, Lantiq Deutschland ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/io.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/kernel.h> ++#include <linux/types.h> ++#include <linux/clk.h> ++#include <linux/err.h> ++#include <linux/list.h> ++ ++#include <asm/time.h> ++#include <asm/irq.h> ++#include <asm/div64.h> ++ ++#include <lantiq.h> ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++#include <xway.h> ++#endif ++ ++extern unsigned long lq_get_cpu_hz(void); ++extern unsigned long lq_get_fpi_hz(void); ++extern unsigned long lq_get_io_region_clock(void); ++ ++struct clk { ++ const char *name; ++ unsigned long rate; ++ unsigned long (*get_rate) (void); ++}; ++ ++static struct clk *cpu_clk = 0; ++static int cpu_clk_cnt = 0; ++ ++static unsigned int r4k_offset; ++static unsigned int r4k_cur; ++ ++static struct clk cpu_clk_generic[] = { ++ { ++ .name = "cpu", ++ .get_rate = lq_get_cpu_hz, ++ }, { ++ .name = "fpi", ++ .get_rate = lq_get_fpi_hz, ++ }, { ++ .name = "io", ++ .get_rate = lq_get_io_region_clock, ++ }, ++}; ++ ++void ++clk_init(void) ++{ ++ int i; ++ cpu_clk = cpu_clk_generic; ++ cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic); ++ for(i = 0; i < cpu_clk_cnt; i++) ++ printk("%s: %ld\n", cpu_clk[i].name, clk_get_rate(&cpu_clk[i])); ++} ++ ++static inline int ++clk_good(struct clk *clk) ++{ ++ return clk && !IS_ERR(clk); ++} ++ ++unsigned long ++clk_get_rate(struct clk *clk) ++{ ++ if (unlikely(!clk_good(clk))) ++ return 0; ++ ++ if (clk->rate != 0) ++ return clk->rate; ++ ++ if (clk->get_rate != NULL) ++ return clk->get_rate(); ++ ++ return 0; ++} ++EXPORT_SYMBOL(clk_get_rate); ++ ++struct clk* ++clk_get(struct device *dev, const char *id) ++{ ++ int i; ++ for(i = 0; i < cpu_clk_cnt; i++) ++ if (!strcmp(id, cpu_clk[i].name)) ++ return &cpu_clk[i]; ++ BUG(); ++ return ERR_PTR(-ENOENT); ++} ++EXPORT_SYMBOL(clk_get); ++ ++void ++clk_put(struct clk *clk) ++{ ++ /* not used */ ++} ++EXPORT_SYMBOL(clk_put); ++ ++static inline u32 ++lq_get_counter_resolution(void) ++{ ++ u32 res; ++ __asm__ __volatile__( ++ ".set push\n" ++ ".set mips32r2\n" ++ ".set noreorder\n" ++ "rdhwr %0, $3\n" ++ "ehb\n" ++ ".set pop\n" ++ : "=&r" (res) ++ : /* no input */ ++ : "memory"); ++ instruction_hazard(); ++ return res; ++} ++ ++void __init ++plat_time_init(void) ++{ ++ struct clk *clk = clk_get(0, "cpu"); ++ mips_hpt_frequency = clk_get_rate(clk) / lq_get_counter_resolution(); ++ r4k_cur = (read_c0_count() + r4k_offset); ++ write_c0_compare(r4k_cur); ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++#define LQ_GPTU_GPT_CLC ((u32 *)(LQ_GPTU_BASE_ADDR + 0x0000)) ++ lq_pmu_enable(PMU_GPT); ++ lq_pmu_enable(PMU_FPI); ++ ++ lq_w32(0x100, LQ_GPTU_GPT_CLC); ++#endif ++} +--- /dev/null ++++ b/arch/mips/lantiq/prom.c +@@ -0,0 +1,118 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/clk.h> ++#include <asm/bootinfo.h> ++#include <asm/time.h> ++ ++#include <lantiq.h> ++ ++#include "prom.h" ++ ++static struct lq_soc_info soc_info; ++ ++/* for Multithreading (APRP) on MIPS34K */ ++unsigned long physical_memsize; ++ ++/* all access to the ebu must be locked */ ++DEFINE_SPINLOCK(ebu_lock); ++EXPORT_SYMBOL_GPL(ebu_lock); ++ ++extern void clk_init(void); ++ ++unsigned int ++lq_get_cpu_ver(void) ++{ ++ return soc_info.rev; ++} ++EXPORT_SYMBOL(lq_get_cpu_ver); ++ ++unsigned int ++lq_get_soc_type(void) ++{ ++ return soc_info.type; ++} ++EXPORT_SYMBOL(lq_get_soc_type); ++ ++const char* ++get_system_type(void) ++{ ++ return soc_info.sys_type; ++} ++ ++void ++prom_free_prom_memory(void) ++{ ++} ++ ++#ifdef CONFIG_IMAGE_CMDLINE_HACK ++extern char __image_cmdline[]; ++ ++static void __init ++prom_init_image_cmdline(void) ++{ ++ char *p = __image_cmdline; ++ int replace = 0; ++ ++ if (*p == '-') { ++ replace = 1; ++ p++; ++ } ++ ++ if (*p == '\0') ++ return; ++ ++ if (replace) { ++ strlcpy(arcs_cmdline, p, sizeof(arcs_cmdline)); ++ } else { ++ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); ++ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); ++ } ++} ++#else ++static void __init prom_init_image_cmdline(void) { return; } ++#endif ++ ++static void __init ++prom_init_cmdline(void) ++{ ++ int argc = fw_arg0; ++ char **argv = (char**)KSEG1ADDR(fw_arg1); ++ int i; ++ ++ arcs_cmdline[0] = '\0'; ++ if(argc) ++ for (i = 1; i < argc; i++) ++ { ++ strlcat(arcs_cmdline, (char*)KSEG1ADDR(argv[i]), COMMAND_LINE_SIZE); ++ if(i + 1 != argc) ++ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); ++ } ++ ++ if (!*arcs_cmdline) ++ strcpy(&(arcs_cmdline[0]), ++ "console=ttyS1,115200 rootfstype=squashfs,jffs2"); ++ prom_init_image_cmdline(); ++} ++ ++void __init ++prom_init(void) ++{ ++ struct clk *clk; ++ lq_soc_detect(&soc_info); ++ ++ clk_init(); ++ clk = clk_get(0, "cpu"); ++ snprintf(soc_info.sys_type, LQ_SYS_TYPE_LEN - 1, "%s rev1.%d %ldMhz", ++ soc_info.name, soc_info.rev, clk_get_rate(clk) / 1000000); ++ soc_info.sys_type[LQ_SYS_TYPE_LEN - 1] = '\0'; ++ printk("SoC: %s\n", soc_info.sys_type); ++ ++ prom_init_cmdline(); ++} +--- /dev/null ++++ b/arch/mips/lantiq/prom.h +@@ -0,0 +1,24 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LQ_PROM_H__ ++#define _LQ_PROM_H__ ++ ++#define LQ_SYS_TYPE_LEN 0x100 ++ ++struct lq_soc_info { ++ unsigned char *name; ++ unsigned int rev; ++ unsigned int partnum; ++ unsigned int type; ++ unsigned char sys_type[LQ_SYS_TYPE_LEN]; ++}; ++ ++void lq_soc_detect(struct lq_soc_info *i); ++ ++#endif diff --git a/target/linux/lantiq/patches/101-header.patch b/target/linux/lantiq/patches/101-header.patch new file mode 100644 index 0000000000..3d0caf2f16 --- /dev/null +++ b/target/linux/lantiq/patches/101-header.patch @@ -0,0 +1,136 @@ +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/war.h +@@ -0,0 +1,24 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ */ ++#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H ++#define __ASM_MIPS_MACH_LANTIQ_WAR_H ++ ++#define R4600_V1_INDEX_ICACHEOP_WAR 0 ++#define R4600_V1_HIT_CACHEOP_WAR 0 ++#define R4600_V2_HIT_CACHEOP_WAR 0 ++#define R5432_CP0_INTERRUPT_WAR 0 ++#define BCM1250_M3_WAR 0 ++#define SIBYTE_1956_WAR 0 ++#define MIPS4K_ICACHE_REFILL_WAR 0 ++#define MIPS_CACHE_SYNC_WAR 0 ++#define TX49XX_ICACHE_INDEX_INV_WAR 0 ++#define RM9000_CDEX_SMP_WAR 0 ++#define ICACHE_REFILLS_WORKAROUND_WAR 0 ++#define R10000_LLSC_WAR 0 ++#define MIPS34K_MISSED_ITLB_WAR 0 ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/lantiq.h +@@ -0,0 +1,47 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LANTIQ_H__ ++#define _LANTIQ_H__ ++ ++/* generic reg access functions */ ++#define lq_r32(reg) __raw_readl(reg) ++#define lq_w32(val, reg) __raw_writel(val, reg) ++#define lq_w32_mask(clear, set, reg) lq_w32((lq_r32(reg) & ~clear) | set, reg) ++ ++extern unsigned int lq_get_cpu_ver(void); ++extern unsigned int lq_get_soc_type(void); ++ ++/* clock speeds */ ++#define CLOCK_60M 60000000 ++#define CLOCK_83M 83333333 ++#define CLOCK_111M 111111111 ++#define CLOCK_111M 111111111 ++#define CLOCK_133M 133333333 ++#define CLOCK_167M 166666667 ++#define CLOCK_200M 200000000 ++#define CLOCK_333M 333333333 ++#define CLOCK_400M 400000000 ++ ++/* spinlock all ebu i/o */ ++extern spinlock_t ebu_lock; ++ ++/* some irq helpers */ ++extern void lq_disable_irq(unsigned int irq_nr); ++extern void lq_mask_and_ack_irq(unsigned int irq_nr); ++extern void lq_enable_irq(unsigned int irq_nr); ++ ++#define IOPORT_RESOURCE_START 0x10000000 ++#define IOPORT_RESOURCE_END 0xffffffff ++#define IOMEM_RESOURCE_START 0x10000000 ++#define IOMEM_RESOURCE_END 0xffffffff ++ ++#define LQ_FLASH_START 0x10000000 ++#define LQ_FLASH_MAX 0x04000000 ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/lantiq_regs.h +@@ -0,0 +1,17 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LANTIQ_REGS_H__ ++#define _LANTIQ_REGS_H__ ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++#include <xway.h> ++#include <xway_irq.h> ++#endif ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h +@@ -0,0 +1,36 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LANTIQ_PLATFORM_H__ ++#define _LANTIQ_PLATFORM_H__ ++ ++#include <linux/mtd/partitions.h> ++ ++/* struct used to pass info to network drivers */ ++enum { ++ MII_MODE, ++ REV_MII_MODE, ++}; ++ ++struct lq_eth_data { ++ unsigned char *mac; ++ int mii_mode; ++}; ++ ++/* struct used to pass info to the pci core */ ++enum { ++ PCI_CLOCK_INT = 0, ++ PCI_CLOCK_EXT ++}; ++ ++struct lq_pci_data { ++ int clock; ++ int req_mask; ++}; ++ ++#endif diff --git a/target/linux/lantiq/patches/104-board_xway.patch b/target/linux/lantiq/patches/104-board_xway.patch new file mode 100644 index 0000000000..1aaeab63e1 --- /dev/null +++ b/target/linux/lantiq/patches/104-board_xway.patch @@ -0,0 +1,3129 @@ + +--- /dev/null ++++ b/arch/mips/lantiq/xway/Kconfig +@@ -0,0 +1,19 @@ ++if SOC_LANTIQ_XWAY ++ ++menu "Mips Machine" ++ ++config LANTIQ_MACH_EASY50812 ++ bool "Easy50812" ++ default y ++ ++config LANTIQ_MACH_EASY50712 ++ bool "Easy50712" ++ default y ++ ++config LANTIQ_MACH_EASY4010 ++ bool "Easy4010" ++ default y ++ ++endmenu ++ ++endif +--- /dev/null ++++ b/arch/mips/lantiq/xway/gpio_ebu.c +@@ -0,0 +1,107 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/platform_device.h> ++#include <linux/mutex.h> ++#include <linux/gpio.h> ++ ++#include <xway.h> ++ ++#define LQ_EBU_BUSCON 0x1e7ff ++#define LQ_EBU_WP 0x80000000 ++ ++static int shadow = 0x0000; ++static void __iomem *virt; ++ ++static int ++lq_ebu_direction_output(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ return 0; ++} ++ ++static void ++lq_ebu_set(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ unsigned long flags; ++ if(value) ++ shadow |= (1 << offset); ++ else ++ shadow &= ~(1 << offset); ++ spin_lock_irqsave(&ebu_lock, flags); ++ lq_w32(LQ_EBU_BUSCON, LQ_EBU_BUSCON1); ++ *((__u16*)virt) = shadow; ++ lq_w32(LQ_EBU_BUSCON | LQ_EBU_WP, LQ_EBU_BUSCON1); ++ spin_unlock_irqrestore(&ebu_lock, flags); ++} ++ ++static struct gpio_chip ++lq_ebu_chip = ++{ ++ .label = "lq_ebu", ++ .direction_output = lq_ebu_direction_output, ++ .set = lq_ebu_set, ++ .base = 32, ++ .ngpio = 16, ++ .can_sleep = 1, ++ .owner = THIS_MODULE, ++}; ++ ++static int __devinit ++lq_ebu_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ int ret = 0; ++ if (!res) ++ return -ENOENT; ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if (!res) ++ return -EBUSY; ++ ++ /* tell the ebu controller which mem addr we will be using */ ++ lq_w32(pdev->resource->start | 0x1, LQ_EBU_ADDRSEL1); ++ lq_w32(LQ_EBU_BUSCON | LQ_EBU_WP, LQ_EBU_BUSCON1); ++ ++ virt = ioremap_nocache(res->start, resource_size(res)); ++ if (!virt) ++ { ++ dev_err(&pdev->dev, "Failed to ioremap mem region\n"); ++ ret = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ /* grab the default settings passed form the platform code */ ++ shadow = (unsigned int) pdev->dev.platform_data; ++ ++ ret = gpiochip_add(&lq_ebu_chip); ++ if (!ret) ++ return 0; ++ ++err_release_mem_region: ++ release_mem_region(res->start, resource_size(res)); ++ return ret; ++} ++ ++static struct platform_driver ++lq_ebu_driver = { ++ .probe = lq_ebu_probe, ++ .driver = { ++ .name = "lq_ebu", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init ++init_lq_ebu(void) ++{ ++ return platform_driver_register(&lq_ebu_driver); ++} ++ ++arch_initcall(init_lq_ebu); +--- /dev/null ++++ b/arch/mips/lantiq/xway/gpio_leds.c +@@ -0,0 +1,161 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2007 John Crispin <blogic@openwrt.org> ++ * ++ */ ++ ++#include <linux/slab.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/platform_device.h> ++#include <linux/mutex.h> ++#include <linux/gpio.h> ++ ++#include <xway.h> ++ ++#define LQ_STP_BASE 0x1E100BB0 ++#define LQ_STP_SIZE 0x40 ++ ++#define LQ_STP_CON0 0x00 ++#define LQ_STP_CON1 0x04 ++#define LQ_STP_CPU0 0x08 ++#define LQ_STP_CPU1 0x0C ++#define LQ_STP_AR 0x10 ++ ++#define STP_CON0_SWU (1 << 31) ++ ++#define LQ_STP_2HZ (0) ++#define LQ_STP_4HZ (1 << 23) ++#define LQ_STP_8HZ (2 << 23) ++#define LQ_STP_10HZ (3 << 23) ++#define LQ_STP_MASK (0xf << 23) ++ ++#define LQ_STP_UPD_SRC_FPI (1 << 31) ++#define LQ_STP_UPD_MASK (3 << 30) ++#define LQ_STP_ADSL_SRC (3 << 24) ++ ++#define LQ_STP_GROUP0 (1 << 0) ++ ++#define LQ_STP_RISING 0 ++#define LQ_STP_FALLING (1 << 26) ++#define LQ_STP_EDGE_MASK (1 << 26) ++ ++#define lq_stp_r32(reg) __raw_readl(virt + reg) ++#define lq_stp_w32(val, reg) __raw_writel(val, virt + reg) ++#define lq_stp_w32_mask(clear, set, reg) \ ++ lq_w32((lq_r32(virt + reg) & ~clear) | set, virt + reg) ++ ++static int shadow = 0xffff; ++static void __iomem *virt; ++ ++static int ++lq_stp_direction_output(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ return 0; ++} ++ ++static void ++lq_stp_set(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ if(value) ++ shadow |= (1 << offset); ++ else ++ shadow &= ~(1 << offset); ++ lq_stp_w32(shadow, LQ_STP_CPU0); ++} ++ ++static struct gpio_chip lq_stp_chip = ++{ ++ .label = "lq_stp", ++ .direction_output = lq_stp_direction_output, ++ .set = lq_stp_set, ++ .base = 48, ++ .ngpio = 24, ++ .can_sleep = 1, ++ .owner = THIS_MODULE, ++}; ++ ++static int ++lq_stp_hw_init(void) ++{ ++ /* the 3 pins used to control the external stp */ ++ lq_gpio_request(4, 1, 0, 1, "stp-st"); ++ lq_gpio_request(5, 1, 0, 1, "stp-d"); ++ lq_gpio_request(6, 1, 0, 1, "stp-sh"); ++ ++ /* sane defaults */ ++ lq_stp_w32(0, LQ_STP_AR); ++ lq_stp_w32(0, LQ_STP_CPU0); ++ lq_stp_w32(0, LQ_STP_CPU1); ++ lq_stp_w32(STP_CON0_SWU, LQ_STP_CON0); ++ lq_stp_w32(0, LQ_STP_CON1); ++ ++ /* rising or falling edge */ ++ lq_stp_w32_mask(LQ_STP_EDGE_MASK, LQ_STP_FALLING, LQ_STP_CON0); ++ ++ /* per default stp 15-0 are set */ ++ lq_stp_w32_mask(0, LQ_STP_GROUP0, LQ_STP_CON1); ++ ++ /* stp are update periodically by the FPID */ ++ lq_stp_w32_mask(LQ_STP_UPD_MASK, LQ_STP_UPD_SRC_FPI, LQ_STP_CON1); ++ ++ /* set stp update speed */ ++ lq_stp_w32_mask(LQ_STP_MASK, LQ_STP_8HZ, LQ_STP_CON1); ++ ++ /* adsl 0 and 1 stp are updated by the arc */ ++ lq_stp_w32_mask(0, LQ_STP_ADSL_SRC, LQ_STP_CON0); ++ ++ lq_pmu_enable(PMU_LED); ++ return 0; ++} ++ ++static int ++lq_stp_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ int ret = 0; ++ if (!res) ++ return -ENOENT; ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if (!res) ++ return -EBUSY; ++ virt = ioremap_nocache(res->start, resource_size(res)); ++ if(!virt) ++ { ++ ret = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ ret = gpiochip_add(&lq_stp_chip); ++ if(!ret) ++ return lq_stp_hw_init(); ++ ++ iounmap(virt); ++err_release_mem_region: ++ release_mem_region(res->start, resource_size(res)); ++ return ret; ++} ++ ++static struct platform_driver lq_stp_driver = { ++ .probe = lq_stp_probe, ++ .driver = { ++ .name = "lq_stp", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init ++init_lq_stp(void) ++{ ++ int ret = platform_driver_register(&lq_stp_driver); ++ if (ret) ++ printk(KERN_INFO ++ "lq_stp: error registering platfom driver"); ++ return ret; ++} ++ ++arch_initcall(init_lq_stp); +--- /dev/null ++++ b/arch/mips/lantiq/xway/mach-easy4010.c +@@ -0,0 +1,79 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/gpio.h> ++#include <linux/gpio_buttons.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/physmap.h> ++#include <linux/input.h> ++ ++#include <machine.h> ++ ++#include <xway.h> ++#include <lantiq_platform.h> ++ ++#include "devices.h" ++ ++#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition easy4010_partitions[] = ++{ ++ { ++ .name = "uboot", ++ .offset = 0x0, ++ .size = 0x20000, ++ }, ++ { ++ .name = "uboot_env", ++ .offset = 0x20000, ++ .size = 0x10000, ++ }, ++ { ++ .name = "linux", ++ .offset = 0x30000, ++ .size = 0x3D0000, ++ }, ++}; ++#endif ++ ++static struct physmap_flash_data easy4010_flash_data = { ++#ifdef CONFIG_MTD_PARTITIONS ++ .nr_parts = ARRAY_SIZE(easy4010_partitions), ++ .parts = easy4010_partitions, ++#endif ++}; ++ ++static struct lq_pci_data lq_pci_data = { ++ .clock = PCI_CLOCK_INT, ++ .req_mask = 0xf, ++}; ++ ++static struct lq_eth_data lq_eth_data = { ++ .mii_mode = REV_MII_MODE, ++}; ++ ++static void __init ++easy4010_init(void) ++{ ++ lq_register_gpio(); ++ lq_register_gpio_stp(); ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_nor(&easy4010_flash_data); ++ lq_register_wdt(); ++ lq_register_pci(&lq_pci_data); ++ lq_register_ethernet(&lq_eth_data); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_EASY4010, ++ "EASY4010", ++ "EASY4010 Eval Board", ++ easy4010_init); +--- /dev/null ++++ b/arch/mips/lantiq/xway/mach-easy50712.c +@@ -0,0 +1,79 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/gpio.h> ++#include <linux/gpio_buttons.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/physmap.h> ++#include <linux/input.h> ++ ++#include <machine.h> ++ ++#include <xway.h> ++#include <lantiq_platform.h> ++ ++#include "devices.h" ++ ++#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition easy50712_partitions[] = ++{ ++ { ++ .name = "uboot", ++ .offset = 0x0, ++ .size = 0x20000, ++ }, ++ { ++ .name = "uboot_env", ++ .offset = 0x20000, ++ .size = 0x10000, ++ }, ++ { ++ .name = "linux", ++ .offset = 0x30000, ++ .size = 0x3D0000, ++ }, ++}; ++#endif ++ ++static struct physmap_flash_data easy50712_flash_data = { ++#ifdef CONFIG_MTD_PARTITIONS ++ .nr_parts = ARRAY_SIZE(easy50712_partitions), ++ .parts = easy50712_partitions, ++#endif ++}; ++ ++static struct lq_pci_data lq_pci_data = { ++ .clock = PCI_CLOCK_INT, ++ .req_mask = 0xf, ++}; ++ ++static struct lq_eth_data lq_eth_data = { ++ .mii_mode = REV_MII_MODE, ++}; ++ ++static void __init ++easy50712_init(void) ++{ ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_gpio(); ++ lq_register_gpio_stp(); ++ lq_register_nor(&easy50712_flash_data); ++ lq_register_wdt(); ++ lq_register_pci(&lq_pci_data); ++ lq_register_ethernet(&lq_eth_data); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_EASY50712, ++ "EASY50712", ++ "EASY50712 Eval Board", ++ easy50712_init); +--- /dev/null ++++ b/arch/mips/lantiq/xway/mach-easy50812.c +@@ -0,0 +1,78 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/gpio.h> ++#include <linux/gpio_buttons.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/physmap.h> ++#include <linux/input.h> ++ ++#include <machine.h> ++ ++#include <xway.h> ++#include <lantiq_platform.h> ++ ++#include "devices.h" ++ ++#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition easy50812_partitions[] = ++{ ++ { ++ .name = "uboot", ++ .offset = 0x0, ++ .size = 0x40000, ++ }, ++ { ++ .name = "uboot_env", ++ .offset = 0x40000, ++ .size = 0x10000, ++ }, ++ { ++ .name = "linux", ++ .offset = 0x50000, ++ .size = 0x3B0000, ++ }, ++}; ++#endif ++ ++static struct physmap_flash_data easy50812_flash_data = { ++#ifdef CONFIG_MTD_PARTITIONS ++ .nr_parts = ARRAY_SIZE(easy50812_partitions), ++ .parts = easy50812_partitions, ++#endif ++}; ++ ++static struct lq_pci_data lq_pci_data = { ++ .clock = PCI_CLOCK_INT, ++ .req_mask = 0xf, ++}; ++ ++static struct lq_eth_data lq_eth_data = { ++ .mii_mode = REV_MII_MODE, ++}; ++ ++static void __init ++easy50812_init(void) ++{ ++ lq_register_gpio(); ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_nor(&easy50812_flash_data); ++ lq_register_wdt(); ++ lq_register_pci(&lq_pci_data); ++ lq_register_ethernet(&lq_eth_data); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_EASY50812, ++ "EASY50812", ++ "EASY50812 Eval Board", ++ easy50812_init); +--- /dev/null ++++ b/arch/mips/lantiq/xway/prom.c +@@ -0,0 +1,52 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/clk.h> ++#include <asm/bootinfo.h> ++#include <asm/time.h> ++ ++#include <xway.h> ++ ++#include "../prom.h" ++ ++#define SOC_DANUBE "Danube" ++#define SOC_TWINPASS "Twinpass" ++#define SOC_AR9 "AR9" ++ ++void __init ++lq_soc_detect(struct lq_soc_info *i) ++{ ++ i->partnum = (lq_r32(LQ_MPS_CHIPID) & 0x0FFFFFFF) >> 12; ++ i->rev = (lq_r32(LQ_MPS_CHIPID) & 0xF0000000) >> 28; ++ switch (i->partnum) ++ { ++ case SOC_ID_DANUBE1: ++ case SOC_ID_DANUBE2: ++ i->name = SOC_DANUBE; ++ i->type = SOC_TYPE_DANUBE; ++ break; ++ ++ case SOC_ID_TWINPASS: ++ i->name = SOC_TWINPASS; ++ i->type = SOC_TYPE_DANUBE; ++ break; ++ ++ case SOC_ID_ARX188: ++ case SOC_ID_ARX168: ++ case SOC_ID_ARX182: ++ i->name = SOC_AR9; ++ i->type = SOC_TYPE_AR9; ++ break; ++ ++ default: ++ printk(KERN_ERR "unknown chiprev : 0x%08X\n", i->partnum); ++ while(1) { }; ++ break; ++ } ++} +--- /dev/null ++++ b/arch/mips/lantiq/xway/devices.c +@@ -0,0 +1,278 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/string.h> ++#include <linux/mtd/physmap.h> ++#include <linux/kernel.h> ++#include <linux/reboot.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/etherdevice.h> ++#include <linux/reboot.h> ++#include <linux/time.h> ++#include <linux/io.h> ++#include <linux/gpio.h> ++#include <linux/leds.h> ++ ++#include <asm/bootinfo.h> ++#include <asm/irq.h> ++ ++#include <xway.h> ++#include <xway_irq.h> ++#include <lantiq_platform.h> ++ ++#define IRQ_RES(resname,irq) {.name=#resname,.start=(irq),.flags=IORESOURCE_IRQ} ++ ++/* gpio leds */ ++static struct gpio_led_platform_data lq_gpio_led_data; ++ ++static struct platform_device lq_gpio_leds = ++{ ++ .name = "leds-gpio", ++ .dev = { ++ .platform_data = (void *) &lq_gpio_led_data, ++ } ++}; ++ ++void __init ++lq_register_gpio_leds(struct gpio_led *leds, int cnt) ++{ ++ lq_gpio_led_data.leds = leds; ++ lq_gpio_led_data.num_leds = cnt; ++ platform_device_register(&lq_gpio_leds); ++} ++ ++/* serial to parallel conversion */ ++static struct resource lq_stp_resource = ++{ ++ .name = "stp", ++ .start = LQ_STP_BASE, ++ .end = LQ_STP_BASE + LQ_STP_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++void __init ++lq_register_gpio_stp(void) ++{ ++ platform_device_register_simple("lq_stp", 0, &lq_stp_resource, 1); ++} ++ ++/* nor flash */ ++static struct resource lq_nor_resource = ++{ ++ .name = "nor", ++ .start = LQ_FLASH_START, ++ .end = LQ_FLASH_START + LQ_FLASH_MAX - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device lq_nor = ++{ ++ .name = "lq_nor", ++ .resource = &lq_nor_resource, ++ .num_resources = 1, ++}; ++ ++void __init ++lq_register_nor(struct physmap_flash_data *data) ++{ ++ lq_nor.dev.platform_data = data; ++ platform_device_register(&lq_nor); ++} ++ ++/* watchdog */ ++static struct resource lq_wdt_resource = ++{ ++ .name = "watchdog", ++ .start = LQ_WDT_BASE, ++ .end = LQ_WDT_BASE + LQ_WDT_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++void __init ++lq_register_wdt(void) ++{ ++ platform_device_register_simple("lq_wdt", 0, &lq_wdt_resource, 1); ++} ++ ++/* gpio */ ++static struct resource lq_gpio_resource[] = { ++ { ++ .name = "gpio0", ++ .start = LQ_GPIO0_BASE_ADDR, ++ .end = LQ_GPIO0_BASE_ADDR + LQ_GPIO_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, { ++ .name = "gpio1", ++ .start = LQ_GPIO1_BASE_ADDR, ++ .end = LQ_GPIO1_BASE_ADDR + LQ_GPIO_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ } ++}; ++ ++void __init ++lq_register_gpio(void) ++{ ++ platform_device_register_simple("lq_gpio", 0, &lq_gpio_resource[0], 1); ++ platform_device_register_simple("lq_gpio", 1, &lq_gpio_resource[1], 1); ++} ++ ++/* pci */ ++static struct platform_device lq_pci = ++{ ++ .name = "lq_pci", ++ .num_resources = 0, ++}; ++ ++void __init ++lq_register_pci(struct lq_pci_data *data) ++{ ++ lq_pci.dev.platform_data = data; ++ platform_device_register(&lq_pci); ++} ++ ++/* ebu */ ++static struct resource lq_ebu_resource = ++{ ++ .name = "gpio_ebu", ++ .start = LQ_EBU_GPIO_START, ++ .end = LQ_EBU_GPIO_START + LQ_EBU_GPIO_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++void __init ++lq_register_gpio_ebu(unsigned int value) ++{ ++ platform_device_register_simple("lq_ebu", 0, &lq_ebu_resource, 1); ++} ++ ++/* ethernet */ ++unsigned char lq_ethaddr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; ++ ++static int __init ++lq_set_ethaddr(char *str) ++{ ++ sscanf(&str[8], "0%02hhx:0%02hhx:0%02hhx:0%02hhx:0%02hhx:0%02hhx", ++ &lq_ethaddr[0], &lq_ethaddr[1], &lq_ethaddr[2], ++ &lq_ethaddr[3], &lq_ethaddr[4], &lq_ethaddr[5]); ++ return 0; ++} ++__setup("ethaddr=", lq_set_ethaddr); ++ ++static struct resource lq_ethernet_resources = ++{ ++ .name = "etop", ++ .start = LQ_PPE32_BASE_ADDR, ++ .end = LQ_PPE32_BASE_ADDR + LQ_PPE32_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device lq_ethernet = ++{ ++ .name = "lq_etop", ++ .resource = &lq_ethernet_resources, ++ .num_resources = 1, ++}; ++ ++void __init ++lq_register_ethernet(struct lq_eth_data *eth) ++{ ++ if(!eth) ++ return; ++ if(!eth->mac) ++ eth->mac = lq_ethaddr; ++ if(!is_valid_ether_addr(eth->mac)) ++ random_ether_addr(eth->mac); ++ lq_ethernet.dev.platform_data = eth; ++ platform_device_register(&lq_ethernet); ++} ++ ++/* tapi */ ++static struct resource mps_resources[] = { ++ { ++ .name = "voice-mem", ++ .flags = IORESOURCE_MEM, ++ .start = 0x1f107000, ++ .end = 0x1f1073ff, ++ }, ++ { ++ .name = "voice-mailbox", ++ .flags = IORESOURCE_MEM, ++ .start = 0x1f200000, ++ .end = 0x1f2007ff, ++ }, ++}; ++ ++static struct platform_device mps_device = { ++ .name = "mps", ++ .resource = mps_resources, ++ .num_resources = ARRAY_SIZE(mps_resources), ++}; ++ ++static struct platform_device vmmc_device = { ++ .name = "vmmc", ++ .dev = { ++ .parent = &mps_device.dev, ++ }, ++}; ++ ++void __init ++lq_register_tapi(void) ++{ ++#define CP1_SIZE (1 << 20) ++ dma_addr_t dma; ++ mps_device.dev.platform_data = ++ (void*)CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, &dma, GFP_ATOMIC)); ++ platform_device_register(&mps_device); ++ platform_device_register(&vmmc_device); ++} ++ ++/* asc ports */ ++static struct resource lq_asc0_resources[] = ++{ ++ { ++ .start = LQ_ASC0_BASE, ++ .end = LQ_ASC0_BASE + LQ_ASC_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ IRQ_RES(tx, INT_NUM_IM3_IRL0), ++ IRQ_RES(rx, INT_NUM_IM3_IRL0 + 1), ++ IRQ_RES(err, INT_NUM_IM3_IRL0 + 2), ++}; ++ ++static struct resource lq_asc1_resources[] = ++{ ++ { ++ .start = LQ_ASC1_BASE, ++ .end = LQ_ASC1_BASE + LQ_ASC_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ IRQ_RES(tx, INT_NUM_IM3_IRL0 + 8), ++ IRQ_RES(rx, INT_NUM_IM3_IRL0 + 9), ++ IRQ_RES(err, INT_NUM_IM3_IRL0 + 10), ++}; ++ ++void __init ++lq_register_asc(int port) ++{ ++ switch (port) { ++ case 0: ++ platform_device_register_simple("lq_asc", 0, ++ lq_asc0_resources, ARRAY_SIZE(lq_asc0_resources)); ++ break; ++ case 1: ++ platform_device_register_simple("lq_asc", 1, ++ lq_asc1_resources, ARRAY_SIZE(lq_asc1_resources)); ++ break; ++ default: ++ break; ++ } ++} +--- /dev/null ++++ b/arch/mips/lantiq/xway/devices.h +@@ -0,0 +1,24 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LQ_DEVICES_H__ ++#define _LQ_DEVICES_H__ ++ ++#include <lantiq_platform.h> ++ ++extern void __init lq_register_gpio(void); ++extern void __init lq_register_gpio_stp(void); ++extern void __init lq_register_gpio_ebu(unsigned int value); ++extern void __init lq_register_gpio_leds(struct gpio_led *leds, int cnt); ++extern void __init lq_register_pci(struct lq_pci_data *data); ++extern void __init lq_register_nor(struct physmap_flash_data *data); ++extern void __init lq_register_wdt(void); ++extern void __init lq_register_ethernet(struct lq_eth_data *eth); ++extern void __init lq_register_asc(int port); ++ ++#endif +--- /dev/null ++++ b/arch/mips/lantiq/xway/dma.c +@@ -0,0 +1,701 @@ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/sched.h> ++#include <linux/kernel.h> ++#include <linux/slab.h> ++#include <linux/string.h> ++#include <linux/timer.h> ++#include <linux/fs.h> ++#include <linux/errno.h> ++#include <linux/stat.h> ++#include <linux/mm.h> ++#include <linux/tty.h> ++#include <linux/selection.h> ++#include <linux/kmod.h> ++#include <linux/vmalloc.h> ++#include <linux/interrupt.h> ++#include <linux/delay.h> ++#include <linux/uaccess.h> ++#include <linux/errno.h> ++#include <linux/io.h> ++ ++#include <xway.h> ++#include <xway_irq.h> ++#include <xway_dma.h> ++ ++#define LQ_DMA_CS ((u32 *)(LQ_DMA_BASE_ADDR + 0x18)) ++#define LQ_DMA_CIE ((u32 *)(LQ_DMA_BASE_ADDR + 0x2C)) ++#define LQ_DMA_IRNEN ((u32 *)(LQ_DMA_BASE_ADDR + 0xf4)) ++#define LQ_DMA_CCTRL ((u32 *)(LQ_DMA_BASE_ADDR + 0x1C)) ++#define LQ_DMA_CIS ((u32 *)(LQ_DMA_BASE_ADDR + 0x28)) ++#define LQ_DMA_CDLEN ((u32 *)(LQ_DMA_BASE_ADDR + 0x24)) ++#define LQ_DMA_PS ((u32 *)(LQ_DMA_BASE_ADDR + 0x40)) ++#define LQ_DMA_PCTRL ((u32 *)(LQ_DMA_BASE_ADDR + 0x44)) ++#define LQ_DMA_CTRL ((u32 *)(LQ_DMA_BASE_ADDR + 0x10)) ++#define LQ_DMA_CPOLL ((u32 *)(LQ_DMA_BASE_ADDR + 0x14)) ++#define LQ_DMA_CDBA ((u32 *)(LQ_DMA_BASE_ADDR + 0x20)) ++ ++/*25 descriptors for each dma channel,4096/8/20=25.xx*/ ++#define LQ_DMA_DESCRIPTOR_OFFSET 25 ++ ++#define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */ ++#define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */ ++#define DMA_INT_BUDGET 100 /*budget for interrupt handling */ ++#define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */ ++ ++extern void lq_mask_and_ack_irq(unsigned int irq_nr); ++extern void lq_enable_irq(unsigned int irq_nr); ++extern void lq_disable_irq(unsigned int irq_nr); ++ ++u64 *g_desc_list; ++struct dma_device_info dma_devs[MAX_DMA_DEVICE_NUM]; ++struct dma_channel_info dma_chan[MAX_DMA_CHANNEL_NUM]; ++ ++static const char *global_device_name[MAX_DMA_DEVICE_NUM] = ++ { "PPE", "DEU", "SPI", "SDIO", "MCTRL0", "MCTRL1" }; ++ ++struct dma_chan_map default_dma_map[MAX_DMA_CHANNEL_NUM] = { ++ {"PPE", LQ_DMA_RX, 0, LQ_DMA_CH0_INT, 0}, ++ {"PPE", LQ_DMA_TX, 0, LQ_DMA_CH1_INT, 0}, ++ {"PPE", LQ_DMA_RX, 1, LQ_DMA_CH2_INT, 1}, ++ {"PPE", LQ_DMA_TX, 1, LQ_DMA_CH3_INT, 1}, ++ {"PPE", LQ_DMA_RX, 2, LQ_DMA_CH4_INT, 2}, ++ {"PPE", LQ_DMA_TX, 2, LQ_DMA_CH5_INT, 2}, ++ {"PPE", LQ_DMA_RX, 3, LQ_DMA_CH6_INT, 3}, ++ {"PPE", LQ_DMA_TX, 3, LQ_DMA_CH7_INT, 3}, ++ {"DEU", LQ_DMA_RX, 0, LQ_DMA_CH8_INT, 0}, ++ {"DEU", LQ_DMA_TX, 0, LQ_DMA_CH9_INT, 0}, ++ {"DEU", LQ_DMA_RX, 1, LQ_DMA_CH10_INT, 1}, ++ {"DEU", LQ_DMA_TX, 1, LQ_DMA_CH11_INT, 1}, ++ {"SPI", LQ_DMA_RX, 0, LQ_DMA_CH12_INT, 0}, ++ {"SPI", LQ_DMA_TX, 0, LQ_DMA_CH13_INT, 0}, ++ {"SDIO", LQ_DMA_RX, 0, LQ_DMA_CH14_INT, 0}, ++ {"SDIO", LQ_DMA_TX, 0, LQ_DMA_CH15_INT, 0}, ++ {"MCTRL0", LQ_DMA_RX, 0, LQ_DMA_CH16_INT, 0}, ++ {"MCTRL0", LQ_DMA_TX, 0, LQ_DMA_CH17_INT, 0}, ++ {"MCTRL1", LQ_DMA_RX, 1, LQ_DMA_CH18_INT, 1}, ++ {"MCTRL1", LQ_DMA_TX, 1, LQ_DMA_CH19_INT, 1} ++}; ++ ++struct dma_chan_map *chan_map = default_dma_map; ++volatile u32 g_lq_dma_int_status; ++volatile int g_lq_dma_in_process; /* 0=not in process, 1=in process */ ++ ++void do_dma_tasklet(unsigned long); ++DECLARE_TASKLET(dma_tasklet, do_dma_tasklet, 0); ++ ++u8 *common_buffer_alloc(int len, int *byte_offset, void **opt) ++{ ++ u8 *buffer = kmalloc(len * sizeof(u8), GFP_KERNEL); ++ ++ *byte_offset = 0; ++ ++ return buffer; ++} ++ ++void common_buffer_free(u8 *dataptr, void *opt) ++{ ++ kfree(dataptr); ++} ++ ++void enable_ch_irq(struct dma_channel_info *pCh) ++{ ++ int chan_no = (int)(pCh - dma_chan); ++ unsigned long flag; ++ ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(0x4a, LQ_DMA_CIE); ++ lq_w32(lq_r32(LQ_DMA_IRNEN) | (1 << chan_no), LQ_DMA_IRNEN); ++ local_irq_restore(flag); ++ lq_enable_irq(pCh->irq); ++} ++ ++void disable_ch_irq(struct dma_channel_info *pCh) ++{ ++ unsigned long flag; ++ int chan_no = (int) (pCh - dma_chan); ++ ++ local_irq_save(flag); ++ g_lq_dma_int_status &= ~(1 << chan_no); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(0, LQ_DMA_CIE); ++ lq_w32(lq_r32(LQ_DMA_IRNEN) & ~(1 << chan_no), LQ_DMA_IRNEN); ++ local_irq_restore(flag); ++ lq_mask_and_ack_irq(pCh->irq); ++} ++ ++void open_chan(struct dma_channel_info *pCh) ++{ ++ unsigned long flag; ++ int chan_no = (int)(pCh - dma_chan); ++ ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) | 1, LQ_DMA_CCTRL); ++ if (pCh->dir == LQ_DMA_RX) ++ enable_ch_irq(pCh); ++ local_irq_restore(flag); ++} ++ ++void close_chan(struct dma_channel_info *pCh) ++{ ++ unsigned long flag; ++ int chan_no = (int) (pCh - dma_chan); ++ ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~1, LQ_DMA_CCTRL); ++ disable_ch_irq(pCh); ++ local_irq_restore(flag); ++} ++ ++void reset_chan(struct dma_channel_info *pCh) ++{ ++ int chan_no = (int) (pCh - dma_chan); ++ ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) | 2, LQ_DMA_CCTRL); ++} ++ ++void rx_chan_intr_handler(int chan_no) ++{ ++ struct dma_device_info *pDev = (struct dma_device_info *)dma_chan[chan_no].dma_dev; ++ struct dma_channel_info *pCh = &dma_chan[chan_no]; ++ struct rx_desc *rx_desc_p; ++ int tmp; ++ unsigned long flag; ++ ++ /*handle command complete interrupt */ ++ rx_desc_p = (struct rx_desc *)pCh->desc_base + pCh->curr_desc; ++ if (rx_desc_p->status.field.OWN == CPU_OWN ++ && rx_desc_p->status.field.C ++ && rx_desc_p->status.field.data_length < 1536){ ++ /* Every thing is correct, then we inform the upper layer */ ++ pDev->current_rx_chan = pCh->rel_chan_no; ++ if (pDev->intr_handler) ++ pDev->intr_handler(pDev, RCV_INT); ++ pCh->weight--; ++ } else { ++ local_irq_save(flag); ++ tmp = lq_r32(LQ_DMA_CS); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(lq_r32(LQ_DMA_CIS) | 0x7e, LQ_DMA_CIS); ++ lq_w32(tmp, LQ_DMA_CS); ++ g_lq_dma_int_status &= ~(1 << chan_no); ++ local_irq_restore(flag); ++ lq_enable_irq(dma_chan[chan_no].irq); ++ } ++} ++ ++inline void tx_chan_intr_handler(int chan_no) ++{ ++ struct dma_device_info *pDev = (struct dma_device_info *)dma_chan[chan_no].dma_dev; ++ struct dma_channel_info *pCh = &dma_chan[chan_no]; ++ int tmp; ++ unsigned long flag; ++ ++ local_irq_save(flag); ++ tmp = lq_r32(LQ_DMA_CS); ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(lq_r32(LQ_DMA_CIS) | 0x7e, LQ_DMA_CIS); ++ lq_w32(tmp, LQ_DMA_CS); ++ g_lq_dma_int_status &= ~(1 << chan_no); ++ local_irq_restore(flag); ++ pDev->current_tx_chan = pCh->rel_chan_no; ++ if (pDev->intr_handler) ++ pDev->intr_handler(pDev, TRANSMIT_CPT_INT); ++} ++ ++void do_dma_tasklet(unsigned long unused) ++{ ++ int i; ++ int chan_no = 0; ++ int budget = DMA_INT_BUDGET; ++ int weight = 0; ++ unsigned long flag; ++ ++ while (g_lq_dma_int_status) { ++ if (budget-- < 0) { ++ tasklet_schedule(&dma_tasklet); ++ return; ++ } ++ chan_no = -1; ++ weight = 0; ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) { ++ if ((g_lq_dma_int_status & (1 << i)) && dma_chan[i].weight > 0) { ++ if (dma_chan[i].weight > weight) { ++ chan_no = i; ++ weight = dma_chan[chan_no].weight; ++ } ++ } ++ } ++ ++ if (chan_no >= 0) { ++ if (chan_map[chan_no].dir == LQ_DMA_RX) ++ rx_chan_intr_handler(chan_no); ++ else ++ tx_chan_intr_handler(chan_no); ++ } else { ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) ++ dma_chan[i].weight = dma_chan[i].default_weight; ++ } ++ } ++ ++ local_irq_save(flag); ++ g_lq_dma_in_process = 0; ++ if (g_lq_dma_int_status) { ++ g_lq_dma_in_process = 1; ++ tasklet_schedule(&dma_tasklet); ++ } ++ local_irq_restore(flag); ++} ++ ++irqreturn_t dma_interrupt(int irq, void *dev_id) ++{ ++ struct dma_channel_info *pCh; ++ int chan_no = 0; ++ int tmp; ++ ++ pCh = (struct dma_channel_info *)dev_id; ++ chan_no = (int)(pCh - dma_chan); ++ if (chan_no < 0 || chan_no > 19) ++ BUG(); ++ ++ tmp = lq_r32(LQ_DMA_IRNEN); ++ lq_w32(0, LQ_DMA_IRNEN); ++ g_lq_dma_int_status |= 1 << chan_no; ++ lq_w32(tmp, LQ_DMA_IRNEN); ++ lq_mask_and_ack_irq(irq); ++ ++ if (!g_lq_dma_in_process) { ++ g_lq_dma_in_process = 1; ++ tasklet_schedule(&dma_tasklet); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++struct dma_device_info *dma_device_reserve(char *dev_name) ++{ ++ int i; ++ ++ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++) { ++ if (strcmp(dev_name, dma_devs[i].device_name) == 0) { ++ if (dma_devs[i].reserved) ++ return NULL; ++ dma_devs[i].reserved = 1; ++ break; ++ } ++ } ++ ++ return &dma_devs[i]; ++} ++EXPORT_SYMBOL(dma_device_reserve); ++ ++void dma_device_release(struct dma_device_info *dev) ++{ ++ dev->reserved = 0; ++} ++EXPORT_SYMBOL(dma_device_release); ++ ++void dma_device_register(struct dma_device_info *dev) ++{ ++ int i, j; ++ int chan_no = 0; ++ u8 *buffer; ++ int byte_offset; ++ unsigned long flag; ++ struct dma_device_info *pDev; ++ struct dma_channel_info *pCh; ++ struct rx_desc *rx_desc_p; ++ struct tx_desc *tx_desc_p; ++ ++ for (i = 0; i < dev->max_tx_chan_num; i++) { ++ pCh = dev->tx_chan[i]; ++ if (pCh->control == LQ_DMA_CH_ON) { ++ chan_no = (int)(pCh - dma_chan); ++ for (j = 0; j < pCh->desc_len; j++) { ++ tx_desc_p = (struct tx_desc *)pCh->desc_base + j; ++ memset(tx_desc_p, 0, sizeof(struct tx_desc)); ++ } ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ /* check if the descriptor length is changed */ ++ if (lq_r32(LQ_DMA_CDLEN) != pCh->desc_len) ++ lq_w32(pCh->desc_len, LQ_DMA_CDLEN); ++ ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~1, LQ_DMA_CCTRL); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) | 2, LQ_DMA_CCTRL); ++ while (lq_r32(LQ_DMA_CCTRL) & 2) ++ ; ++ lq_w32(lq_r32(LQ_DMA_IRNEN) | (1 << chan_no), LQ_DMA_IRNEN); ++ lq_w32(0x30100, LQ_DMA_CCTRL); /* reset and enable channel,enable channel later */ ++ local_irq_restore(flag); ++ } ++ } ++ ++ for (i = 0; i < dev->max_rx_chan_num; i++) { ++ pCh = dev->rx_chan[i]; ++ if (pCh->control == LQ_DMA_CH_ON) { ++ chan_no = (int)(pCh - dma_chan); ++ ++ for (j = 0; j < pCh->desc_len; j++) { ++ rx_desc_p = (struct rx_desc *)pCh->desc_base + j; ++ pDev = (struct dma_device_info *)(pCh->dma_dev); ++ buffer = pDev->buffer_alloc(pCh->packet_size, &byte_offset, (void *)&(pCh->opt[j])); ++ if (!buffer) ++ break; ++ ++ dma_cache_inv((unsigned long) buffer, pCh->packet_size); ++ ++ rx_desc_p->Data_Pointer = (u32)CPHYSADDR((u32)buffer); ++ rx_desc_p->status.word = 0; ++ rx_desc_p->status.field.byte_offset = byte_offset; ++ rx_desc_p->status.field.OWN = DMA_OWN; ++ rx_desc_p->status.field.data_length = pCh->packet_size; ++ } ++ ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ /* check if the descriptor length is changed */ ++ if (lq_r32(LQ_DMA_CDLEN) != pCh->desc_len) ++ lq_w32(pCh->desc_len, LQ_DMA_CDLEN); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~1, LQ_DMA_CCTRL); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) | 2, LQ_DMA_CCTRL); ++ while (lq_r32(LQ_DMA_CCTRL) & 2) ++ ; ++ lq_w32(0x0a, LQ_DMA_CIE); /* fix me, should enable all the interrupts here? */ ++ lq_w32(lq_r32(LQ_DMA_IRNEN) | (1 << chan_no), LQ_DMA_IRNEN); ++ lq_w32(0x30000, LQ_DMA_CCTRL); ++ local_irq_restore(flag); ++ lq_enable_irq(dma_chan[chan_no].irq); ++ } ++ } ++} ++EXPORT_SYMBOL(dma_device_register); ++ ++void dma_device_unregister(struct dma_device_info *dev) ++{ ++ int i, j; ++ int chan_no; ++ struct dma_channel_info *pCh; ++ struct rx_desc *rx_desc_p; ++ struct tx_desc *tx_desc_p; ++ unsigned long flag; ++ ++ for (i = 0; i < dev->max_tx_chan_num; i++) { ++ pCh = dev->tx_chan[i]; ++ if (pCh->control == LQ_DMA_CH_ON) { ++ chan_no = (int)(dev->tx_chan[i] - dma_chan); ++ local_irq_save(flag); ++ lq_w32(chan_no, LQ_DMA_CS); ++ pCh->curr_desc = 0; ++ pCh->prev_desc = 0; ++ pCh->control = LQ_DMA_CH_OFF; ++ lq_w32(0, LQ_DMA_CIE); /* fix me, should disable all the interrupts here? */ ++ lq_w32(lq_r32(LQ_DMA_IRNEN) & ~(1 << chan_no), LQ_DMA_IRNEN); /* disable interrupts */ ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~1, LQ_DMA_CCTRL); ++ while (lq_r32(LQ_DMA_CCTRL) & 1) ++ ; ++ local_irq_restore(flag); ++ ++ for (j = 0; j < pCh->desc_len; j++) { ++ tx_desc_p = (struct tx_desc *)pCh->desc_base + j; ++ if ((tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C) ++ || (tx_desc_p->status.field.OWN == DMA_OWN && tx_desc_p->status.field.data_length > 0)) { ++ dev->buffer_free((u8 *) __va(tx_desc_p->Data_Pointer), (void *)pCh->opt[j]); ++ } ++ tx_desc_p->status.field.OWN = CPU_OWN; ++ memset(tx_desc_p, 0, sizeof(struct tx_desc)); ++ } ++ /* TODO should free buffer that is not transferred by dma */ ++ } ++ } ++ ++ for (i = 0; i < dev->max_rx_chan_num; i++) { ++ pCh = dev->rx_chan[i]; ++ chan_no = (int)(dev->rx_chan[i] - dma_chan); ++ lq_disable_irq(pCh->irq); ++ ++ local_irq_save(flag); ++ g_lq_dma_int_status &= ~(1 << chan_no); ++ pCh->curr_desc = 0; ++ pCh->prev_desc = 0; ++ pCh->control = LQ_DMA_CH_OFF; ++ ++ lq_w32(chan_no, LQ_DMA_CS); ++ lq_w32(0, LQ_DMA_CIE); /* fix me, should disable all the interrupts here? */ ++ lq_w32(lq_r32(LQ_DMA_IRNEN) & ~(1 << chan_no), LQ_DMA_IRNEN); /* disable interrupts */ ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~1, LQ_DMA_CCTRL); ++ while (lq_r32(LQ_DMA_CCTRL) & 1) ++ ; ++ ++ local_irq_restore(flag); ++ for (j = 0; j < pCh->desc_len; j++) { ++ rx_desc_p = (struct rx_desc *) pCh->desc_base + j; ++ if ((rx_desc_p->status.field.OWN == CPU_OWN ++ && rx_desc_p->status.field.C) ++ || (rx_desc_p->status.field.OWN == DMA_OWN ++ && rx_desc_p->status.field.data_length > 0)) { ++ dev->buffer_free((u8 *) ++ __va(rx_desc_p->Data_Pointer), ++ (void *) pCh->opt[j]); ++ } ++ } ++ } ++} ++EXPORT_SYMBOL(dma_device_unregister); ++ ++int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt) ++{ ++ u8 *buf; ++ int len; ++ int byte_offset = 0; ++ void *p = NULL; ++ struct dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan]; ++ struct rx_desc *rx_desc_p; ++ ++ /* get the rx data first */ ++ rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc; ++ if (!(rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C)) ++ return 0; ++ ++ buf = (u8 *) __va(rx_desc_p->Data_Pointer); ++ *(u32 *)dataptr = (u32)buf; ++ len = rx_desc_p->status.field.data_length; ++ ++ if (opt) ++ *(int *)opt = (int)pCh->opt[pCh->curr_desc]; ++ ++ /* replace with a new allocated buffer */ ++ buf = dma_dev->buffer_alloc(pCh->packet_size, &byte_offset, &p); ++ ++ if (buf) { ++ dma_cache_inv((unsigned long) buf, pCh->packet_size); ++ pCh->opt[pCh->curr_desc] = p; ++ wmb(); ++ ++ rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buf); ++ rx_desc_p->status.word = (DMA_OWN << 31) | ((byte_offset) << 23) | pCh->packet_size; ++ wmb(); ++ } else { ++ *(u32 *) dataptr = 0; ++ if (opt) ++ *(int *) opt = 0; ++ len = 0; ++ } ++ ++ /* increase the curr_desc pointer */ ++ pCh->curr_desc++; ++ if (pCh->curr_desc == pCh->desc_len) ++ pCh->curr_desc = 0; ++ ++ return len; ++} ++EXPORT_SYMBOL(dma_device_read); ++ ++int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr, int len, void *opt) ++{ ++ unsigned long flag; ++ u32 tmp, byte_offset; ++ struct dma_channel_info *pCh; ++ int chan_no; ++ struct tx_desc *tx_desc_p; ++ local_irq_save(flag); ++ ++ pCh = dma_dev->tx_chan[dma_dev->current_tx_chan]; ++ chan_no = (int)(pCh - (struct dma_channel_info *) dma_chan); ++ ++ tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->prev_desc; ++ while (tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C) { ++ dma_dev->buffer_free((u8 *) __va(tx_desc_p->Data_Pointer), pCh->opt[pCh->prev_desc]); ++ memset(tx_desc_p, 0, sizeof(struct tx_desc)); ++ pCh->prev_desc = (pCh->prev_desc + 1) % (pCh->desc_len); ++ tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->prev_desc; ++ } ++ tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->curr_desc; ++ /* Check whether this descriptor is available */ ++ if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C) { ++ /* if not, the tell the upper layer device */ ++ dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT); ++ local_irq_restore(flag); ++ printk(KERN_INFO "%s %d: failed to write!\n", __func__, __LINE__); ++ ++ return 0; ++ } ++ pCh->opt[pCh->curr_desc] = opt; ++ /* byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */ ++ byte_offset = ((u32) CPHYSADDR((u32) dataptr)) % ((dma_dev->tx_burst_len) * 4); ++ dma_cache_wback((unsigned long) dataptr, len); ++ wmb(); ++ tx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) dataptr) - byte_offset; ++ wmb(); ++ tx_desc_p->status.word = (DMA_OWN << 31) | DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | ((byte_offset) << 23) | len; ++ wmb(); ++ ++ pCh->curr_desc++; ++ if (pCh->curr_desc == pCh->desc_len) ++ pCh->curr_desc = 0; ++ ++ /*Check whether this descriptor is available */ ++ tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc; ++ if (tx_desc_p->status.field.OWN == DMA_OWN) { ++ /*if not , the tell the upper layer device */ ++ dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT); ++ } ++ ++ lq_w32(chan_no, LQ_DMA_CS); ++ tmp = lq_r32(LQ_DMA_CCTRL); ++ ++ if (!(tmp & 1)) ++ pCh->open(pCh); ++ ++ local_irq_restore(flag); ++ ++ return len; ++} ++EXPORT_SYMBOL(dma_device_write); ++ ++int map_dma_chan(struct dma_chan_map *map) ++{ ++ int i, j; ++ int result; ++ ++ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++) ++ strcpy(dma_devs[i].device_name, global_device_name[i]); ++ ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) { ++ dma_chan[i].irq = map[i].irq; ++ result = request_irq(dma_chan[i].irq, dma_interrupt, IRQF_DISABLED, map[i].dev_name, (void *)&dma_chan[i]); ++ if (result) { ++ printk(KERN_WARNING "error, cannot get dma_irq!\n"); ++ free_irq(dma_chan[i].irq, (void *) &dma_interrupt); ++ ++ return -EFAULT; ++ } ++ } ++ ++ for (i = 0; i < MAX_DMA_DEVICE_NUM; i++) { ++ dma_devs[i].num_tx_chan = 0; /*set default tx channel number to be one */ ++ dma_devs[i].num_rx_chan = 0; /*set default rx channel number to be one */ ++ dma_devs[i].max_rx_chan_num = 0; ++ dma_devs[i].max_tx_chan_num = 0; ++ dma_devs[i].buffer_alloc = &common_buffer_alloc; ++ dma_devs[i].buffer_free = &common_buffer_free; ++ dma_devs[i].intr_handler = NULL; ++ dma_devs[i].tx_burst_len = 4; ++ dma_devs[i].rx_burst_len = 4; ++ if (i == 0) { ++ lq_w32(0, LQ_DMA_PS); ++ lq_w32(lq_r32(LQ_DMA_PCTRL) | ((0xf << 8) | (1 << 6)), LQ_DMA_PCTRL); /*enable dma drop */ ++ } ++ ++ if (i == 1) { ++ lq_w32(1, LQ_DMA_PS); ++ lq_w32(0x14, LQ_DMA_PCTRL); /*deu port setting */ ++ } ++ ++ for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++) { ++ dma_chan[j].byte_offset = 0; ++ dma_chan[j].open = &open_chan; ++ dma_chan[j].close = &close_chan; ++ dma_chan[j].reset = &reset_chan; ++ dma_chan[j].enable_irq = &enable_ch_irq; ++ dma_chan[j].disable_irq = &disable_ch_irq; ++ dma_chan[j].rel_chan_no = map[j].rel_chan_no; ++ dma_chan[j].control = LQ_DMA_CH_OFF; ++ dma_chan[j].default_weight = LQ_DMA_CH_DEFAULT_WEIGHT; ++ dma_chan[j].weight = dma_chan[j].default_weight; ++ dma_chan[j].curr_desc = 0; ++ dma_chan[j].prev_desc = 0; ++ } ++ ++ for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++) { ++ if (strcmp(dma_devs[i].device_name, map[j].dev_name) == 0) { ++ if (map[j].dir == LQ_DMA_RX) { ++ dma_chan[j].dir = LQ_DMA_RX; ++ dma_devs[i].max_rx_chan_num++; ++ dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1] = &dma_chan[j]; ++ dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1]->pri = map[j].pri; ++ dma_chan[j].dma_dev = (void *)&dma_devs[i]; ++ } else if (map[j].dir == LQ_DMA_TX) { ++ /*TX direction */ ++ dma_chan[j].dir = LQ_DMA_TX; ++ dma_devs[i].max_tx_chan_num++; ++ dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1] = &dma_chan[j]; ++ dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1]->pri = map[j].pri; ++ dma_chan[j].dma_dev = (void *)&dma_devs[i]; ++ } else { ++ printk(KERN_WARNING "WRONG DMA MAP!\n"); ++ } ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++void dma_chip_init(void) ++{ ++ int i; ++ ++ /* enable DMA from PMU */ ++ lq_pmu_enable(PMU_DMA); ++ ++ /* reset DMA */ ++ lq_w32(lq_r32(LQ_DMA_CTRL) | 1, LQ_DMA_CTRL); ++ ++ /* disable all interrupts */ ++ lq_w32(0, LQ_DMA_IRNEN); ++ ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) { ++ lq_w32(i, LQ_DMA_CS); ++ lq_w32(0x2, LQ_DMA_CCTRL); ++ lq_w32(0x80000040, LQ_DMA_CPOLL); ++ lq_w32(lq_r32(LQ_DMA_CCTRL) & ~0x1, LQ_DMA_CCTRL); ++ } ++} ++ ++int lq_dma_init(void) ++{ ++ int i; ++ ++ dma_chip_init(); ++ ++ if (map_dma_chan(default_dma_map)) ++ BUG(); ++ ++ g_desc_list = (u64 *)KSEG1ADDR(__get_free_page(GFP_DMA)); ++ ++ if (g_desc_list == NULL) { ++ printk(KERN_WARNING "no memory for desriptor\n"); ++ return -ENOMEM; ++ } ++ ++ memset(g_desc_list, 0, PAGE_SIZE); ++ ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) { ++ dma_chan[i].desc_base = (u32)g_desc_list + i * LQ_DMA_DESCRIPTOR_OFFSET * 8; ++ dma_chan[i].curr_desc = 0; ++ dma_chan[i].desc_len = LQ_DMA_DESCRIPTOR_OFFSET; ++ ++ lq_w32(i, LQ_DMA_CS); ++ lq_w32((u32)CPHYSADDR(dma_chan[i].desc_base), LQ_DMA_CDBA); ++ lq_w32(dma_chan[i].desc_len, LQ_DMA_CDLEN); ++ } ++ return 0; ++} ++ ++arch_initcall(lq_dma_init); ++ ++void dma_cleanup(void) ++{ ++ int i; ++ ++ free_page(KSEG0ADDR((unsigned long) g_desc_list)); ++ for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) ++ free_irq(dma_chan[i].irq, (void *)&dma_interrupt); ++} ++ ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/arch/mips/lantiq/xway/pmu.c +@@ -0,0 +1,36 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/version.h> ++ ++#include <xway.h> ++ ++#define LQ_PMU_PWDCR ((u32 *)(LQ_PMU_BASE_ADDR + 0x001C)) ++#define LQ_PMU_PWDSR ((u32 *)(LQ_PMU_BASE_ADDR + 0x0020)) ++ ++void ++lq_pmu_enable(unsigned int module) ++{ ++ int err = 1000000; ++ ++ lq_w32(lq_r32(LQ_PMU_PWDCR) & ~module, LQ_PMU_PWDCR); ++ while (--err && (lq_r32(LQ_PMU_PWDSR) & module)); ++ ++ if (!err) ++ panic("activating PMU module failed!"); ++} ++EXPORT_SYMBOL(lq_pmu_enable); ++ ++void ++lq_pmu_disable(unsigned int module) ++{ ++ lq_w32(lq_r32(LQ_PMU_PWDCR) | module, LQ_PMU_PWDCR); ++} ++EXPORT_SYMBOL(lq_pmu_disable); +--- /dev/null ++++ b/arch/mips/lantiq/xway/timer.c +@@ -0,0 +1,828 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/version.h> ++#include <linux/types.h> ++#include <linux/fs.h> ++#include <linux/miscdevice.h> ++#include <linux/init.h> ++#include <linux/uaccess.h> ++#include <linux/unistd.h> ++#include <linux/errno.h> ++#include <linux/interrupt.h> ++#include <linux/sched.h> ++ ++#include <asm/irq.h> ++#include <asm/div64.h> ++ ++#include <xway.h> ++#include <xway_irq.h> ++#include <lantiq_timer.h> ++ ++#define MAX_NUM_OF_32BIT_TIMER_BLOCKS 6 ++ ++#ifdef TIMER1A ++#define FIRST_TIMER TIMER1A ++#else ++#define FIRST_TIMER 2 ++#endif ++ ++/* ++ * GPTC divider is set or not. ++ */ ++#define GPTU_CLC_RMC_IS_SET 0 ++ ++/* ++ * Timer Interrupt (IRQ) ++ */ ++/* Must be adjusted when ICU driver is available */ ++#define TIMER_INTERRUPT (INT_NUM_IM3_IRL0 + 22) ++ ++/* ++ * Bits Operation ++ */ ++#define GET_BITS(x, msb, lsb) \ ++ (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) ++#define SET_BITS(x, msb, lsb, value) \ ++ (((x) & ~(((1 << ((msb) + 1)) - 1) ^ ((1 << (lsb)) - 1))) | \ ++ (((value) & ((1 << (1 + (msb) - (lsb))) - 1)) << (lsb))) ++ ++/* ++ * GPTU Register Mapping ++ */ ++#define LQ_GPTU (KSEG1 + 0x1E100A00) ++#define LQ_GPTU_CLC ((volatile u32 *)(LQ_GPTU + 0x0000)) ++#define LQ_GPTU_ID ((volatile u32 *)(LQ_GPTU + 0x0008)) ++#define LQ_GPTU_CON(n, X) ((volatile u32 *)(LQ_GPTU + 0x0010 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */ ++#define LQ_GPTU_RUN(n, X) ((volatile u32 *)(LQ_GPTU + 0x0018 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */ ++#define LQ_GPTU_RELOAD(n, X) ((volatile u32 *)(LQ_GPTU + 0x0020 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */ ++#define LQ_GPTU_COUNT(n, X) ((volatile u32 *)(LQ_GPTU + 0x0028 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */ ++#define LQ_GPTU_IRNEN ((volatile u32 *)(LQ_GPTU + 0x00F4)) ++#define LQ_GPTU_IRNICR ((volatile u32 *)(LQ_GPTU + 0x00F8)) ++#define LQ_GPTU_IRNCR ((volatile u32 *)(LQ_GPTU + 0x00FC)) ++ ++/* ++ * Clock Control Register ++ */ ++#define GPTU_CLC_SMC GET_BITS(*LQ_GPTU_CLC, 23, 16) ++#define GPTU_CLC_RMC GET_BITS(*LQ_GPTU_CLC, 15, 8) ++#define GPTU_CLC_FSOE (*LQ_GPTU_CLC & (1 << 5)) ++#define GPTU_CLC_EDIS (*LQ_GPTU_CLC & (1 << 3)) ++#define GPTU_CLC_SPEN (*LQ_GPTU_CLC & (1 << 2)) ++#define GPTU_CLC_DISS (*LQ_GPTU_CLC & (1 << 1)) ++#define GPTU_CLC_DISR (*LQ_GPTU_CLC & (1 << 0)) ++ ++#define GPTU_CLC_SMC_SET(value) SET_BITS(0, 23, 16, (value)) ++#define GPTU_CLC_RMC_SET(value) SET_BITS(0, 15, 8, (value)) ++#define GPTU_CLC_FSOE_SET(value) ((value) ? (1 << 5) : 0) ++#define GPTU_CLC_SBWE_SET(value) ((value) ? (1 << 4) : 0) ++#define GPTU_CLC_EDIS_SET(value) ((value) ? (1 << 3) : 0) ++#define GPTU_CLC_SPEN_SET(value) ((value) ? (1 << 2) : 0) ++#define GPTU_CLC_DISR_SET(value) ((value) ? (1 << 0) : 0) ++ ++/* ++ * ID Register ++ */ ++#define GPTU_ID_ID GET_BITS(*LQ_GPTU_ID, 15, 8) ++#define GPTU_ID_CFG GET_BITS(*LQ_GPTU_ID, 7, 5) ++#define GPTU_ID_REV GET_BITS(*LQ_GPTU_ID, 4, 0) ++ ++/* ++ * Control Register of Timer/Counter nX ++ * n is the index of block (1 based index) ++ * X is either A or B ++ */ ++#define GPTU_CON_SRC_EG(n, X) (*LQ_GPTU_CON(n, X) & (1 << 10)) ++#define GPTU_CON_SRC_EXT(n, X) (*LQ_GPTU_CON(n, X) & (1 << 9)) ++#define GPTU_CON_SYNC(n, X) (*LQ_GPTU_CON(n, X) & (1 << 8)) ++#define GPTU_CON_EDGE(n, X) GET_BITS(*LQ_GPTU_CON(n, X), 7, 6) ++#define GPTU_CON_INV(n, X) (*LQ_GPTU_CON(n, X) & (1 << 5)) ++#define GPTU_CON_EXT(n, X) (*LQ_GPTU_CON(n, A) & (1 << 4)) /* Timer/Counter B does not have this bit */ ++#define GPTU_CON_STP(n, X) (*LQ_GPTU_CON(n, X) & (1 << 3)) ++#define GPTU_CON_CNT(n, X) (*LQ_GPTU_CON(n, X) & (1 << 2)) ++#define GPTU_CON_DIR(n, X) (*LQ_GPTU_CON(n, X) & (1 << 1)) ++#define GPTU_CON_EN(n, X) (*LQ_GPTU_CON(n, X) & (1 << 0)) ++ ++#define GPTU_CON_SRC_EG_SET(value) ((value) ? 0 : (1 << 10)) ++#define GPTU_CON_SRC_EXT_SET(value) ((value) ? (1 << 9) : 0) ++#define GPTU_CON_SYNC_SET(value) ((value) ? (1 << 8) : 0) ++#define GPTU_CON_EDGE_SET(value) SET_BITS(0, 7, 6, (value)) ++#define GPTU_CON_INV_SET(value) ((value) ? (1 << 5) : 0) ++#define GPTU_CON_EXT_SET(value) ((value) ? (1 << 4) : 0) ++#define GPTU_CON_STP_SET(value) ((value) ? (1 << 3) : 0) ++#define GPTU_CON_CNT_SET(value) ((value) ? (1 << 2) : 0) ++#define GPTU_CON_DIR_SET(value) ((value) ? (1 << 1) : 0) ++ ++#define GPTU_RUN_RL_SET(value) ((value) ? (1 << 2) : 0) ++#define GPTU_RUN_CEN_SET(value) ((value) ? (1 << 1) : 0) ++#define GPTU_RUN_SEN_SET(value) ((value) ? (1 << 0) : 0) ++ ++#define GPTU_IRNEN_TC_SET(n, X, value) ((value) ? (1 << (((n) - 1) * 2 + (X))) : 0) ++#define GPTU_IRNCR_TC_SET(n, X, value) ((value) ? (1 << (((n) - 1) * 2 + (X))) : 0) ++ ++#define TIMER_FLAG_MASK_SIZE(x) (x & 0x0001) ++#define TIMER_FLAG_MASK_TYPE(x) (x & 0x0002) ++#define TIMER_FLAG_MASK_STOP(x) (x & 0x0004) ++#define TIMER_FLAG_MASK_DIR(x) (x & 0x0008) ++#define TIMER_FLAG_NONE_EDGE 0x0000 ++#define TIMER_FLAG_MASK_EDGE(x) (x & 0x0030) ++#define TIMER_FLAG_REAL 0x0000 ++#define TIMER_FLAG_INVERT 0x0040 ++#define TIMER_FLAG_MASK_INVERT(x) (x & 0x0040) ++#define TIMER_FLAG_MASK_TRIGGER(x) (x & 0x0070) ++#define TIMER_FLAG_MASK_SYNC(x) (x & 0x0080) ++#define TIMER_FLAG_CALLBACK_IN_HB 0x0200 ++#define TIMER_FLAG_MASK_HANDLE(x) (x & 0x0300) ++#define TIMER_FLAG_MASK_SRC(x) (x & 0x1000) ++ ++struct timer_dev_timer { ++ unsigned int f_irq_on; ++ unsigned int irq; ++ unsigned int flag; ++ unsigned long arg1; ++ unsigned long arg2; ++}; ++ ++struct timer_dev { ++ struct mutex gptu_mutex; ++ unsigned int number_of_timers; ++ unsigned int occupation; ++ unsigned int f_gptu_on; ++ struct timer_dev_timer timer[MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2]; ++}; ++ ++static int gptu_ioctl(struct inode *, struct file *, unsigned int, unsigned long); ++static int gptu_open(struct inode *, struct file *); ++static int gptu_release(struct inode *, struct file *); ++ ++static struct file_operations gptu_fops = { ++ .owner = THIS_MODULE, ++ .ioctl = gptu_ioctl, ++ .open = gptu_open, ++ .release = gptu_release ++}; ++ ++static struct miscdevice gptu_miscdev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "gptu", ++ .fops = &gptu_fops, ++}; ++ ++static struct timer_dev timer_dev; ++ ++static irqreturn_t timer_irq_handler(int irq, void *p) ++{ ++ unsigned int timer; ++ unsigned int flag; ++ struct timer_dev_timer *dev_timer = (struct timer_dev_timer *)p; ++ ++ timer = irq - TIMER_INTERRUPT; ++ if (timer < timer_dev.number_of_timers ++ && dev_timer == &timer_dev.timer[timer]) { ++ /* Clear interrupt. */ ++ lq_w32(1 << timer, LQ_GPTU_IRNCR); ++ ++ /* Call user hanler or signal. */ ++ flag = dev_timer->flag; ++ if (!(timer & 0x01) ++ || TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) { ++ /* 16-bit timer or timer A of 32-bit timer */ ++ switch (TIMER_FLAG_MASK_HANDLE(flag)) { ++ case TIMER_FLAG_CALLBACK_IN_IRQ: ++ case TIMER_FLAG_CALLBACK_IN_HB: ++ if (dev_timer->arg1) ++ (*(timer_callback)dev_timer->arg1)(dev_timer->arg2); ++ break; ++ case TIMER_FLAG_SIGNAL: ++ send_sig((int)dev_timer->arg2, (struct task_struct *)dev_timer->arg1, 0); ++ break; ++ } ++ } ++ } ++ return IRQ_HANDLED; ++} ++ ++static inline void lq_enable_gptu(void) ++{ ++ lq_pmu_enable(PMU_GPT); ++ ++ /* Set divider as 1, disable write protection for SPEN, enable module. */ ++ *LQ_GPTU_CLC = ++ GPTU_CLC_SMC_SET(0x00) | ++ GPTU_CLC_RMC_SET(0x01) | ++ GPTU_CLC_FSOE_SET(0) | ++ GPTU_CLC_SBWE_SET(1) | ++ GPTU_CLC_EDIS_SET(0) | ++ GPTU_CLC_SPEN_SET(0) | ++ GPTU_CLC_DISR_SET(0); ++} ++ ++static inline void lq_disable_gptu(void) ++{ ++ lq_w32(0x00, LQ_GPTU_IRNEN); ++ lq_w32(0xfff, LQ_GPTU_IRNCR); ++ ++ /* Set divider as 0, enable write protection for SPEN, disable module. */ ++ *LQ_GPTU_CLC = ++ GPTU_CLC_SMC_SET(0x00) | ++ GPTU_CLC_RMC_SET(0x00) | ++ GPTU_CLC_FSOE_SET(0) | ++ GPTU_CLC_SBWE_SET(0) | ++ GPTU_CLC_EDIS_SET(0) | ++ GPTU_CLC_SPEN_SET(0) | ++ GPTU_CLC_DISR_SET(1); ++ ++ lq_pmu_disable(PMU_GPT); ++} ++ ++int lq_request_timer(unsigned int timer, unsigned int flag, ++ unsigned long value, unsigned long arg1, unsigned long arg2) ++{ ++ int ret = 0; ++ unsigned int con_reg, irnen_reg; ++ int n, X; ++ ++ if (timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ printk(KERN_INFO "request_timer(%d, 0x%08X, %lu)...", ++ timer, flag, value); ++ ++ if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) ++ value &= 0xFFFF; ++ else ++ timer &= ~0x01; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ /* ++ * Allocate timer. ++ */ ++ if (timer < FIRST_TIMER) { ++ unsigned int mask; ++ unsigned int shift; ++ /* This takes care of TIMER1B which is the only choice for Voice TAPI system */ ++ unsigned int offset = TIMER2A; ++ ++ /* ++ * Pick up a free timer. ++ */ ++ if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) { ++ mask = 1 << offset; ++ shift = 1; ++ } else { ++ mask = 3 << offset; ++ shift = 2; ++ } ++ for (timer = offset; ++ timer < offset + timer_dev.number_of_timers; ++ timer += shift, mask <<= shift) ++ if (!(timer_dev.occupation & mask)) { ++ timer_dev.occupation |= mask; ++ break; ++ } ++ if (timer >= offset + timer_dev.number_of_timers) { ++ printk("failed![%d]\n", __LINE__); ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } else ++ ret = timer; ++ } else { ++ register unsigned int mask; ++ ++ /* ++ * Check if the requested timer is free. ++ */ ++ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if ((timer_dev.occupation & mask)) { ++ printk("failed![%d] mask %#x, timer_dev.occupation %#x\n", ++ __LINE__, mask, timer_dev.occupation); ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EBUSY; ++ } else { ++ timer_dev.occupation |= mask; ++ ret = 0; ++ } ++ } ++ ++ /* ++ * Prepare control register value. ++ */ ++ switch (TIMER_FLAG_MASK_EDGE(flag)) { ++ default: ++ case TIMER_FLAG_NONE_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x00); ++ break; ++ case TIMER_FLAG_RISE_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x01); ++ break; ++ case TIMER_FLAG_FALL_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x02); ++ break; ++ case TIMER_FLAG_ANY_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x03); ++ break; ++ } ++ if (TIMER_FLAG_MASK_TYPE(flag) == TIMER_FLAG_TIMER) ++ con_reg |= ++ TIMER_FLAG_MASK_SRC(flag) == ++ TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) : ++ GPTU_CON_SRC_EXT_SET(0); ++ else ++ con_reg |= ++ TIMER_FLAG_MASK_SRC(flag) == ++ TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) : ++ GPTU_CON_SRC_EG_SET(0); ++ con_reg |= ++ TIMER_FLAG_MASK_SYNC(flag) == ++ TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) : ++ GPTU_CON_SYNC_SET(1); ++ con_reg |= ++ TIMER_FLAG_MASK_INVERT(flag) == ++ TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1); ++ con_reg |= ++ TIMER_FLAG_MASK_SIZE(flag) == ++ TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) : ++ GPTU_CON_EXT_SET(1); ++ con_reg |= ++ TIMER_FLAG_MASK_STOP(flag) == ++ TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0); ++ con_reg |= ++ TIMER_FLAG_MASK_TYPE(flag) == ++ TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) : ++ GPTU_CON_CNT_SET(1); ++ con_reg |= ++ TIMER_FLAG_MASK_DIR(flag) == ++ TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0); ++ ++ /* ++ * Fill up running data. ++ */ ++ timer_dev.timer[timer - FIRST_TIMER].flag = flag; ++ timer_dev.timer[timer - FIRST_TIMER].arg1 = arg1; ++ timer_dev.timer[timer - FIRST_TIMER].arg2 = arg2; ++ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT) ++ timer_dev.timer[timer - FIRST_TIMER + 1].flag = flag; ++ ++ /* ++ * Enable GPTU module. ++ */ ++ if (!timer_dev.f_gptu_on) { ++ lq_enable_gptu(); ++ timer_dev.f_gptu_on = 1; ++ } ++ ++ /* ++ * Enable IRQ. ++ */ ++ if (TIMER_FLAG_MASK_HANDLE(flag) != TIMER_FLAG_NO_HANDLE) { ++ if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL) ++ timer_dev.timer[timer - FIRST_TIMER].arg1 = ++ (unsigned long) find_task_by_vpid((int) arg1); ++ ++ irnen_reg = 1 << (timer - FIRST_TIMER); ++ ++ if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL ++ || (TIMER_FLAG_MASK_HANDLE(flag) == ++ TIMER_FLAG_CALLBACK_IN_IRQ ++ && timer_dev.timer[timer - FIRST_TIMER].arg1)) { ++ enable_irq(timer_dev.timer[timer - FIRST_TIMER].irq); ++ timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 1; ++ } ++ } else ++ irnen_reg = 0; ++ ++ /* ++ * Write config register, reload value and enable interrupt. ++ */ ++ n = timer >> 1; ++ X = timer & 0x01; ++ *LQ_GPTU_CON(n, X) = con_reg; ++ *LQ_GPTU_RELOAD(n, X) = value; ++ /* printk("reload value = %d\n", (u32)value); */ ++ *LQ_GPTU_IRNEN |= irnen_reg; ++ ++ mutex_unlock(&timer_dev.gptu_mutex); ++ printk("successful!\n"); ++ return ret; ++} ++EXPORT_SYMBOL(lq_request_timer); ++ ++int lq_free_timer(unsigned int timer) ++{ ++ unsigned int flag; ++ unsigned int mask; ++ int n, X; ++ ++ if (!timer_dev.f_gptu_on) ++ return -EINVAL; ++ ++ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ flag = timer_dev.timer[timer - FIRST_TIMER].flag; ++ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT) ++ timer &= ~0x01; ++ ++ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if (((timer_dev.occupation & mask) ^ mask)) { ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } ++ ++ n = timer >> 1; ++ X = timer & 0x01; ++ ++ if (GPTU_CON_EN(n, X)) ++ *LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1); ++ ++ *LQ_GPTU_IRNEN &= ~GPTU_IRNEN_TC_SET(n, X, 1); ++ *LQ_GPTU_IRNCR |= GPTU_IRNCR_TC_SET(n, X, 1); ++ ++ if (timer_dev.timer[timer - FIRST_TIMER].f_irq_on) { ++ disable_irq(timer_dev.timer[timer - FIRST_TIMER].irq); ++ timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 0; ++ } ++ ++ timer_dev.occupation &= ~mask; ++ if (!timer_dev.occupation && timer_dev.f_gptu_on) { ++ lq_disable_gptu(); ++ timer_dev.f_gptu_on = 0; ++ } ++ ++ mutex_unlock(&timer_dev.gptu_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lq_free_timer); ++ ++int lq_start_timer(unsigned int timer, int is_resume) ++{ ++ unsigned int flag; ++ unsigned int mask; ++ int n, X; ++ ++ if (!timer_dev.f_gptu_on) ++ return -EINVAL; ++ ++ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ flag = timer_dev.timer[timer - FIRST_TIMER].flag; ++ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT) ++ timer &= ~0x01; ++ ++ mask = (TIMER_FLAG_MASK_SIZE(flag) == ++ TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if (((timer_dev.occupation & mask) ^ mask)) { ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } ++ ++ n = timer >> 1; ++ X = timer & 0x01; ++ ++ *LQ_GPTU_RUN(n, X) = GPTU_RUN_RL_SET(!is_resume) | GPTU_RUN_SEN_SET(1); ++ ++ mutex_unlock(&timer_dev.gptu_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lq_start_timer); ++ ++int lq_stop_timer(unsigned int timer) ++{ ++ unsigned int flag; ++ unsigned int mask; ++ int n, X; ++ ++ if (!timer_dev.f_gptu_on) ++ return -EINVAL; ++ ++ if (timer < FIRST_TIMER ++ || timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ flag = timer_dev.timer[timer - FIRST_TIMER].flag; ++ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT) ++ timer &= ~0x01; ++ ++ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if (((timer_dev.occupation & mask) ^ mask)) { ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } ++ ++ n = timer >> 1; ++ X = timer & 0x01; ++ ++ *LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1); ++ ++ mutex_unlock(&timer_dev.gptu_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lq_stop_timer); ++ ++int lq_reset_counter_flags(u32 timer, u32 flags) ++{ ++ unsigned int oflag; ++ unsigned int mask, con_reg; ++ int n, X; ++ ++ if (!timer_dev.f_gptu_on) ++ return -EINVAL; ++ ++ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ oflag = timer_dev.timer[timer - FIRST_TIMER].flag; ++ if (TIMER_FLAG_MASK_SIZE(oflag) != TIMER_FLAG_16BIT) ++ timer &= ~0x01; ++ ++ mask = (TIMER_FLAG_MASK_SIZE(oflag) == TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if (((timer_dev.occupation & mask) ^ mask)) { ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } ++ ++ switch (TIMER_FLAG_MASK_EDGE(flags)) { ++ default: ++ case TIMER_FLAG_NONE_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x00); ++ break; ++ case TIMER_FLAG_RISE_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x01); ++ break; ++ case TIMER_FLAG_FALL_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x02); ++ break; ++ case TIMER_FLAG_ANY_EDGE: ++ con_reg = GPTU_CON_EDGE_SET(0x03); ++ break; ++ } ++ if (TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER) ++ con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) : GPTU_CON_SRC_EXT_SET(0); ++ else ++ con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) : GPTU_CON_SRC_EG_SET(0); ++ con_reg |= TIMER_FLAG_MASK_SYNC(flags) == TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) : GPTU_CON_SYNC_SET(1); ++ con_reg |= TIMER_FLAG_MASK_INVERT(flags) == TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1); ++ con_reg |= TIMER_FLAG_MASK_SIZE(flags) == TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) : GPTU_CON_EXT_SET(1); ++ con_reg |= TIMER_FLAG_MASK_STOP(flags) == TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0); ++ con_reg |= TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) : GPTU_CON_CNT_SET(1); ++ con_reg |= TIMER_FLAG_MASK_DIR(flags) == TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0); ++ ++ timer_dev.timer[timer - FIRST_TIMER].flag = flags; ++ if (TIMER_FLAG_MASK_SIZE(flags) != TIMER_FLAG_16BIT) ++ timer_dev.timer[timer - FIRST_TIMER + 1].flag = flags; ++ ++ n = timer >> 1; ++ X = timer & 0x01; ++ ++ *LQ_GPTU_CON(n, X) = con_reg; ++ smp_wmb(); ++ printk(KERN_INFO "[%s]: counter%d oflags %#x, nflags %#x, GPTU_CON %#x\n", __func__, timer, oflag, flags, *LQ_GPTU_CON(n, X)); ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return 0; ++} ++EXPORT_SYMBOL(lq_reset_counter_flags); ++ ++int lq_get_count_value(unsigned int timer, unsigned long *value) ++{ ++ unsigned int flag; ++ unsigned int mask; ++ int n, X; ++ ++ if (!timer_dev.f_gptu_on) ++ return -EINVAL; ++ ++ if (timer < FIRST_TIMER ++ || timer >= FIRST_TIMER + timer_dev.number_of_timers) ++ return -EINVAL; ++ ++ mutex_lock(&timer_dev.gptu_mutex); ++ ++ flag = timer_dev.timer[timer - FIRST_TIMER].flag; ++ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT) ++ timer &= ~0x01; ++ ++ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer; ++ if (((timer_dev.occupation & mask) ^ mask)) { ++ mutex_unlock(&timer_dev.gptu_mutex); ++ return -EINVAL; ++ } ++ ++ n = timer >> 1; ++ X = timer & 0x01; ++ ++ *value = *LQ_GPTU_COUNT(n, X); ++ ++ mutex_unlock(&timer_dev.gptu_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL(lq_get_count_value); ++ ++u32 lq_cal_divider(unsigned long freq) ++{ ++ u64 module_freq, fpi = lq_get_fpi_bus_clock(2); ++ u32 clock_divider = 1; ++ module_freq = fpi * 1000; ++ do_div(module_freq, clock_divider * freq); ++ return module_freq; ++} ++EXPORT_SYMBOL(lq_cal_divider); ++ ++int lq_set_timer(unsigned int timer, unsigned int freq, int is_cyclic, ++ int is_ext_src, unsigned int handle_flag, unsigned long arg1, ++ unsigned long arg2) ++{ ++ unsigned long divider; ++ unsigned int flag; ++ ++ divider = lq_cal_divider(freq); ++ if (divider == 0) ++ return -EINVAL; ++ flag = ((divider & ~0xFFFF) ? TIMER_FLAG_32BIT : TIMER_FLAG_16BIT) ++ | (is_cyclic ? TIMER_FLAG_CYCLIC : TIMER_FLAG_ONCE) ++ | (is_ext_src ? TIMER_FLAG_EXT_SRC : TIMER_FLAG_INT_SRC) ++ | TIMER_FLAG_TIMER | TIMER_FLAG_DOWN ++ | TIMER_FLAG_MASK_HANDLE(handle_flag); ++ ++ printk(KERN_INFO "lq_set_timer(%d, %d), divider = %lu\n", ++ timer, freq, divider); ++ return lq_request_timer(timer, flag, divider, arg1, arg2); ++} ++EXPORT_SYMBOL(lq_set_timer); ++ ++int lq_set_counter(unsigned int timer, unsigned int flag, u32 reload, ++ unsigned long arg1, unsigned long arg2) ++{ ++ printk(KERN_INFO "lq_set_counter(%d, %#x, %d)\n", timer, flag, reload); ++ return lq_request_timer(timer, flag, reload, arg1, arg2); ++} ++EXPORT_SYMBOL(lq_set_counter); ++ ++static int gptu_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ int ret; ++ struct gptu_ioctl_param param; ++ ++ if (!access_ok(VERIFY_READ, arg, sizeof(struct gptu_ioctl_param))) ++ return -EFAULT; ++ copy_from_user(¶m, (void *) arg, sizeof(param)); ++ ++ if ((((cmd == GPTU_REQUEST_TIMER || cmd == GPTU_SET_TIMER ++ || GPTU_SET_COUNTER) && param.timer < 2) ++ || cmd == GPTU_GET_COUNT_VALUE || cmd == GPTU_CALCULATE_DIVIDER) ++ && !access_ok(VERIFY_WRITE, arg, ++ sizeof(struct gptu_ioctl_param))) ++ return -EFAULT; ++ ++ switch (cmd) { ++ case GPTU_REQUEST_TIMER: ++ ret = lq_request_timer(param.timer, param.flag, param.value, ++ (unsigned long) param.pid, ++ (unsigned long) param.sig); ++ if (ret > 0) { ++ copy_to_user(&((struct gptu_ioctl_param *) arg)-> ++ timer, &ret, sizeof(&ret)); ++ ret = 0; ++ } ++ break; ++ case GPTU_FREE_TIMER: ++ ret = lq_free_timer(param.timer); ++ break; ++ case GPTU_START_TIMER: ++ ret = lq_start_timer(param.timer, param.flag); ++ break; ++ case GPTU_STOP_TIMER: ++ ret = lq_stop_timer(param.timer); ++ break; ++ case GPTU_GET_COUNT_VALUE: ++ ret = lq_get_count_value(param.timer, ¶m.value); ++ if (!ret) ++ copy_to_user(&((struct gptu_ioctl_param *) arg)-> ++ value, ¶m.value, ++ sizeof(param.value)); ++ break; ++ case GPTU_CALCULATE_DIVIDER: ++ param.value = lq_cal_divider(param.value); ++ if (param.value == 0) ++ ret = -EINVAL; ++ else { ++ copy_to_user(&((struct gptu_ioctl_param *) arg)-> ++ value, ¶m.value, ++ sizeof(param.value)); ++ ret = 0; ++ } ++ break; ++ case GPTU_SET_TIMER: ++ ret = lq_set_timer(param.timer, param.value, ++ TIMER_FLAG_MASK_STOP(param.flag) != ++ TIMER_FLAG_ONCE ? 1 : 0, ++ TIMER_FLAG_MASK_SRC(param.flag) == ++ TIMER_FLAG_EXT_SRC ? 1 : 0, ++ TIMER_FLAG_MASK_HANDLE(param.flag) == ++ TIMER_FLAG_SIGNAL ? TIMER_FLAG_SIGNAL : ++ TIMER_FLAG_NO_HANDLE, ++ (unsigned long) param.pid, ++ (unsigned long) param.sig); ++ if (ret > 0) { ++ copy_to_user(&((struct gptu_ioctl_param *) arg)-> ++ timer, &ret, sizeof(&ret)); ++ ret = 0; ++ } ++ break; ++ case GPTU_SET_COUNTER: ++ lq_set_counter(param.timer, param.flag, param.value, 0, 0); ++ if (ret > 0) { ++ copy_to_user(&((struct gptu_ioctl_param *) arg)-> ++ timer, &ret, sizeof(&ret)); ++ ret = 0; ++ } ++ break; ++ default: ++ ret = -ENOTTY; ++ } ++ ++ return ret; ++} ++ ++static int gptu_open(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static int gptu_release(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++int __init lq_gptu_init(void) ++{ ++ int ret; ++ unsigned int i; ++ ++ lq_w32(0, LQ_GPTU_IRNEN); ++ lq_w32(0xfff, LQ_GPTU_IRNCR); ++ ++ memset(&timer_dev, 0, sizeof(timer_dev)); ++ mutex_init(&timer_dev.gptu_mutex); ++ ++ lq_enable_gptu(); ++ timer_dev.number_of_timers = GPTU_ID_CFG * 2; ++ lq_disable_gptu(); ++ if (timer_dev.number_of_timers > MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2) ++ timer_dev.number_of_timers = MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2; ++ printk(KERN_INFO "gptu: totally %d 16-bit timers/counters\n", timer_dev.number_of_timers); ++ ++ ret = misc_register(&gptu_miscdev); ++ if (ret) { ++ printk(KERN_ERR "gptu: can't misc_register, get error %d\n", -ret); ++ return ret; ++ } else { ++ printk(KERN_INFO "gptu: misc_register on minor %d\n", gptu_miscdev.minor); ++ } ++ ++ for (i = 0; i < timer_dev.number_of_timers; i++) { ++ ret = request_irq(TIMER_INTERRUPT + i, timer_irq_handler, IRQF_TIMER, gptu_miscdev.name, &timer_dev.timer[i]); ++ if (ret) { ++ for (; i >= 0; i--) ++ free_irq(TIMER_INTERRUPT + i, &timer_dev.timer[i]); ++ misc_deregister(&gptu_miscdev); ++ printk(KERN_ERR "gptu: failed in requesting irq (%d), get error %d\n", i, -ret); ++ return ret; ++ } else { ++ timer_dev.timer[i].irq = TIMER_INTERRUPT + i; ++ disable_irq(timer_dev.timer[i].irq); ++ printk(KERN_INFO "gptu: succeeded to request irq %d\n", timer_dev.timer[i].irq); ++ } ++ } ++ ++ return 0; ++} ++ ++void __exit lq_gptu_exit(void) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < timer_dev.number_of_timers; i++) { ++ if (timer_dev.timer[i].f_irq_on) ++ disable_irq(timer_dev.timer[i].irq); ++ free_irq(timer_dev.timer[i].irq, &timer_dev.timer[i]); ++ } ++ lq_disable_gptu(); ++ misc_deregister(&gptu_miscdev); ++} ++ ++module_init(lq_gptu_init); ++module_exit(lq_gptu_exit); +--- /dev/null ++++ b/arch/mips/lantiq/xway/timer.h +@@ -0,0 +1,155 @@ ++#ifndef __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ ++#define __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ ++ ++ ++/****************************************************************************** ++ Copyright (c) 2002, Infineon Technologies. All rights reserved. ++ ++ No Warranty ++ Because the program is licensed free of charge, there is no warranty for ++ the program, to the extent permitted by applicable law. Except when ++ otherwise stated in writing the copyright holders and/or other parties ++ provide the program "as is" without warranty of any kind, either ++ expressed or implied, including, but not limited to, the implied ++ warranties of merchantability and fitness for a particular purpose. The ++ entire risk as to the quality and performance of the program is with ++ you. should the program prove defective, you assume the cost of all ++ necessary servicing, repair or correction. ++ ++ In no event unless required by applicable law or agreed to in writing ++ will any copyright holder, or any other party who may modify and/or ++ redistribute the program as permitted above, be liable to you for ++ damages, including any general, special, incidental or consequential ++ damages arising out of the use or inability to use the program ++ (including but not limited to loss of data or data being rendered ++ inaccurate or losses sustained by you or third parties or a failure of ++ the program to operate with any other programs), even if such holder or ++ other party has been advised of the possibility of such damages. ++******************************************************************************/ ++ ++ ++/* ++ * #################################### ++ * Definition ++ * #################################### ++ */ ++ ++/* ++ * Available Timer/Counter Index ++ */ ++#define TIMER(n, X) (n * 2 + (X ? 1 : 0)) ++#define TIMER_ANY 0x00 ++#define TIMER1A TIMER(1, 0) ++#define TIMER1B TIMER(1, 1) ++#define TIMER2A TIMER(2, 0) ++#define TIMER2B TIMER(2, 1) ++#define TIMER3A TIMER(3, 0) ++#define TIMER3B TIMER(3, 1) ++ ++/* ++ * Flag of Timer/Counter ++ * These flags specify the way in which timer is configured. ++ */ ++/* Bit size of timer/counter. */ ++#define TIMER_FLAG_16BIT 0x0000 ++#define TIMER_FLAG_32BIT 0x0001 ++/* Switch between timer and counter. */ ++#define TIMER_FLAG_TIMER 0x0000 ++#define TIMER_FLAG_COUNTER 0x0002 ++/* Stop or continue when overflowing/underflowing. */ ++#define TIMER_FLAG_ONCE 0x0000 ++#define TIMER_FLAG_CYCLIC 0x0004 ++/* Count up or counter down. */ ++#define TIMER_FLAG_UP 0x0000 ++#define TIMER_FLAG_DOWN 0x0008 ++/* Count on specific level or edge. */ ++#define TIMER_FLAG_HIGH_LEVEL_SENSITIVE 0x0000 ++#define TIMER_FLAG_LOW_LEVEL_SENSITIVE 0x0040 ++#define TIMER_FLAG_RISE_EDGE 0x0010 ++#define TIMER_FLAG_FALL_EDGE 0x0020 ++#define TIMER_FLAG_ANY_EDGE 0x0030 ++/* Signal is syncronous to module clock or not. */ ++#define TIMER_FLAG_UNSYNC 0x0000 ++#define TIMER_FLAG_SYNC 0x0080 ++/* Different interrupt handle type. */ ++#define TIMER_FLAG_NO_HANDLE 0x0000 ++#if defined(__KERNEL__) ++ #define TIMER_FLAG_CALLBACK_IN_IRQ 0x0100 ++#endif // defined(__KERNEL__) ++#define TIMER_FLAG_SIGNAL 0x0300 ++/* Internal clock source or external clock source */ ++#define TIMER_FLAG_INT_SRC 0x0000 ++#define TIMER_FLAG_EXT_SRC 0x1000 ++ ++ ++/* ++ * ioctl Command ++ */ ++#define GPTU_REQUEST_TIMER 0x01 /* General method to setup timer/counter. */ ++#define GPTU_FREE_TIMER 0x02 /* Free timer/counter. */ ++#define GPTU_START_TIMER 0x03 /* Start or resume timer/counter. */ ++#define GPTU_STOP_TIMER 0x04 /* Suspend timer/counter. */ ++#define GPTU_GET_COUNT_VALUE 0x05 /* Get current count value. */ ++#define GPTU_CALCULATE_DIVIDER 0x06 /* Calculate timer divider from given freq.*/ ++#define GPTU_SET_TIMER 0x07 /* Simplified method to setup timer. */ ++#define GPTU_SET_COUNTER 0x08 /* Simplified method to setup counter. */ ++ ++/* ++ * Data Type Used to Call ioctl ++ */ ++struct gptu_ioctl_param { ++ unsigned int timer; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and * ++ * GPTU_SET_COUNTER, this field is ID of expected * ++ * timer/counter. If it's zero, a timer/counter would * ++ * be dynamically allocated and ID would be stored in * ++ * this field. * ++ * In command GPTU_GET_COUNT_VALUE, this field is * ++ * ignored. * ++ * In other command, this field is ID of timer/counter * ++ * allocated. */ ++ unsigned int flag; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and * ++ * GPTU_SET_COUNTER, this field contains flags to * ++ * specify how to configure timer/counter. * ++ * In command GPTU_START_TIMER, zero indicate start * ++ * and non-zero indicate resume timer/counter. * ++ * In other command, this field is ignored. */ ++ unsigned long value; /* In command GPTU_REQUEST_TIMER, this field contains * ++ * init/reload value. * ++ * In command GPTU_SET_TIMER, this field contains * ++ * frequency (0.001Hz) of timer. * ++ * In command GPTU_GET_COUNT_VALUE, current count * ++ * value would be stored in this field. * ++ * In command GPTU_CALCULATE_DIVIDER, this field * ++ * contains frequency wanted, and after calculation, * ++ * divider would be stored in this field to overwrite * ++ * the frequency. * ++ * In other command, this field is ignored. */ ++ int pid; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, * ++ * if signal is required, this field contains process * ++ * ID to which signal would be sent. * ++ * In other command, this field is ignored. */ ++ int sig; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, * ++ * if signal is required, this field contains signal * ++ * number which would be sent. * ++ * In other command, this field is ignored. */ ++}; ++ ++/* ++ * #################################### ++ * Data Type ++ * #################################### ++ */ ++typedef void (*timer_callback)(unsigned long arg); ++ ++extern int ifxmips_request_timer(unsigned int, unsigned int, unsigned long, unsigned long, unsigned long); ++extern int ifxmips_free_timer(unsigned int); ++extern int ifxmips_start_timer(unsigned int, int); ++extern int ifxmips_stop_timer(unsigned int); ++extern int ifxmips_reset_counter_flags(u32 timer, u32 flags); ++extern int ifxmips_get_count_value(unsigned int, unsigned long *); ++extern u32 ifxmips_cal_divider(unsigned long); ++extern int ifxmips_set_timer(unsigned int, unsigned int, int, int, unsigned int, unsigned long, unsigned long); ++extern int ifxmips_set_counter(unsigned int timer, unsigned int flag, ++ u32 reload, unsigned long arg1, unsigned long arg2); ++ ++#endif /* __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ */ +--- /dev/null ++++ b/arch/mips/lantiq/xway/Makefile +@@ -0,0 +1,5 @@ ++obj-y := pmu.o prom.o dma.o timer.o reset.o clk-xway.o ++obj-y += gpio.o gpio_ebu.o gpio_leds.o devices.o ++obj-$(CONFIG_LANTIQ_MACH_EASY50812) += mach-easy50812.o ++obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o ++obj-$(CONFIG_LANTIQ_MACH_EASY4010) += mach-easy4010.o +--- /dev/null ++++ b/arch/mips/lantiq/xway/clk-xway.c +@@ -0,0 +1,219 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2007 Xu Liang, infineon ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/io.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/clk.h> ++ ++#include <asm/time.h> ++#include <asm/irq.h> ++#include <asm/div64.h> ++ ++#include <xway.h> ++ ++static unsigned int lq_ram_clocks[] = {CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M }; ++#define DDR_HZ lq_ram_clocks[lq_r32(LQ_CGU_SYS) & 0x3] ++ ++#define BASIC_FREQUENCY_1 35328000 ++#define BASIC_FREQUENCY_2 36000000 ++#define BASIS_REQUENCY_USB 12000000 ++ ++#define GET_BITS(x, msb, lsb) (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) ++ ++#define CGU_PLL0_PHASE_DIVIDER_ENABLE (lq_r32(LQ_CGU_PLL0_CFG) & (1 << 31)) ++#define CGU_PLL0_BYPASS (lq_r32(LQ_CGU_PLL0_CFG) & (1 << 30)) ++#define CGU_PLL0_CFG_DSMSEL (lq_r32(LQ_CGU_PLL0_CFG) & (1 << 28)) ++#define CGU_PLL0_CFG_FRAC_EN (lq_r32(LQ_CGU_PLL0_CFG) & (1 << 27)) ++#define CGU_PLL1_SRC (lq_r32(LQ_CGU_PLL1_CFG) & (1 << 31)) ++#define CGU_PLL2_PHASE_DIVIDER_ENABLE (lq_r32(LQ_CGU_PLL2_CFG) & (1 << 20)) ++#define CGU_SYS_FPI_SEL (1 << 6) ++#define CGU_SYS_DDR_SEL 0x3 ++#define CGU_PLL0_SRC (1 << 29) ++ ++#define CGU_PLL0_CFG_PLLK GET_BITS(*LQ_CGU_PLL0_CFG, 26, 17) ++#define CGU_PLL0_CFG_PLLN GET_BITS(*LQ_CGU_PLL0_CFG, 12, 6) ++#define CGU_PLL0_CFG_PLLM GET_BITS(*LQ_CGU_PLL0_CFG, 5, 2) ++#define CGU_PLL2_SRC GET_BITS(*LQ_CGU_PLL2_CFG, 18, 17) ++#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(*LQ_CGU_PLL2_CFG, 16, 13) ++ ++#define LQ_GPTU_GPT_CLC ((u32 *)(LQ_GPTU_BASE_ADDR + 0x0000)) ++#define LQ_CGU_PLL0_CFG ((u32 *)(LQ_CGU_BASE_ADDR + 0x0004)) ++#define LQ_CGU_PLL1_CFG ((u32 *)(LQ_CGU_BASE_ADDR + 0x0008)) ++#define LQ_CGU_PLL2_CFG ((u32 *)(LQ_CGU_BASE_ADDR + 0x000C)) ++#define LQ_CGU_SYS ((u32 *)(LQ_CGU_BASE_ADDR + 0x0010)) ++#define LQ_CGU_UPDATE ((u32 *)(LQ_CGU_BASE_ADDR + 0x0014)) ++#define LQ_CGU_IF_CLK ((u32 *)(LQ_CGU_BASE_ADDR + 0x0018)) ++#define LQ_CGU_OSC_CON ((u32 *)(LQ_CGU_BASE_ADDR + 0x001C)) ++#define LQ_CGU_SMD ((u32 *)(LQ_CGU_BASE_ADDR + 0x0020)) ++#define LQ_CGU_CT1SR ((u32 *)(LQ_CGU_BASE_ADDR + 0x0028)) ++#define LQ_CGU_CT2SR ((u32 *)(LQ_CGU_BASE_ADDR + 0x002C)) ++#define LQ_CGU_PCMCR ((u32 *)(LQ_CGU_BASE_ADDR + 0x0030)) ++#define LQ_CGU_PCI_CR ((u32 *)(LQ_CGU_BASE_ADDR + 0x0034)) ++#define LQ_CGU_PD_PC ((u32 *)(LQ_CGU_BASE_ADDR + 0x0038)) ++#define LQ_CGU_FMR ((u32 *)(LQ_CGU_BASE_ADDR + 0x003C)) ++ ++static unsigned int lq_get_pll0_fdiv(void); ++ ++static inline unsigned int ++get_input_clock(int pll) ++{ ++ switch (pll) { ++ case 0: ++ if (lq_r32(LQ_CGU_PLL0_CFG) & CGU_PLL0_SRC) ++ return BASIS_REQUENCY_USB; ++ else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) ++ return BASIC_FREQUENCY_1; ++ else ++ return BASIC_FREQUENCY_2; ++ case 1: ++ if (CGU_PLL1_SRC) ++ return BASIS_REQUENCY_USB; ++ else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) ++ return BASIC_FREQUENCY_1; ++ else ++ return BASIC_FREQUENCY_2; ++ case 2: ++ switch (CGU_PLL2_SRC) { ++ case 0: ++ return lq_get_pll0_fdiv(); ++ case 1: ++ return CGU_PLL2_PHASE_DIVIDER_ENABLE ? ++ BASIC_FREQUENCY_1 : ++ BASIC_FREQUENCY_2; ++ case 2: ++ return BASIS_REQUENCY_USB; ++ } ++ default: ++ return 0; ++ } ++} ++ ++static inline unsigned int ++cal_dsm(int pll, unsigned int num, unsigned int den) ++{ ++ u64 res, clock = get_input_clock(pll); ++ res = num * clock; ++ do_div(res, den); ++ return res; ++} ++ ++static inline unsigned int ++mash_dsm(int pll, unsigned int M, unsigned int N, unsigned int K) ++{ ++ unsigned int num = ((N + 1) << 10) + K; ++ unsigned int den = (M + 1) << 10; ++ return cal_dsm(pll, num, den); ++} ++ ++static inline unsigned int ++ssff_dsm_1(int pll, unsigned int M, unsigned int N, unsigned int K) ++{ ++ unsigned int num = ((N + 1) << 11) + K + 512; ++ unsigned int den = (M + 1) << 11; ++ return cal_dsm(pll, num, den); ++} ++ ++static inline unsigned int ++ssff_dsm_2(int pll, unsigned int M, unsigned int N, unsigned int K) ++{ ++ unsigned int num = K >= 512 ? ++ ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584; ++ unsigned int den = (M + 1) << 12; ++ return cal_dsm(pll, num, den); ++} ++ ++static inline unsigned int ++dsm(int pll, unsigned int M, unsigned int N, unsigned int K, ++ unsigned int dsmsel, unsigned int phase_div_en) ++{ ++ if (!dsmsel) ++ return mash_dsm(pll, M, N, K); ++ else if (!phase_div_en) ++ return mash_dsm(pll, M, N, K); ++ else ++ return ssff_dsm_2(pll, M, N, K); ++} ++ ++static inline unsigned int ++lq_get_pll0_fosc(void) ++{ ++ if (CGU_PLL0_BYPASS) ++ return get_input_clock(0); ++ else ++ return !CGU_PLL0_CFG_FRAC_EN ++ ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0, CGU_PLL0_CFG_DSMSEL, ++ CGU_PLL0_PHASE_DIVIDER_ENABLE) ++ : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, CGU_PLL0_CFG_PLLK, ++ CGU_PLL0_CFG_DSMSEL, CGU_PLL0_PHASE_DIVIDER_ENABLE); ++} ++ ++static unsigned int ++lq_get_pll0_fdiv(void) ++{ ++ unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1; ++ return (lq_get_pll0_fosc() + (div >> 1)) / div; ++} ++ ++unsigned int ++lq_get_io_region_clock(void) ++{ ++ unsigned int ret = lq_get_pll0_fosc(); ++ switch (lq_r32(LQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) { ++ default: ++ case 0: ++ return (ret + 1) / 2; ++ case 1: ++ return (ret * 2 + 2) / 5; ++ case 2: ++ return (ret + 1) / 3; ++ case 3: ++ return (ret + 2) / 4; ++ } ++} ++EXPORT_SYMBOL(lq_get_io_region_clock); ++ ++unsigned int ++lq_get_fpi_bus_clock(int fpi) ++{ ++ unsigned int ret = lq_get_io_region_clock(); ++ if ((fpi == 2) && (lq_r32(LQ_CGU_SYS) & CGU_SYS_FPI_SEL)) ++ ret >>= 1; ++ return ret; ++} ++EXPORT_SYMBOL(lq_get_fpi_bus_clock); ++ ++unsigned int ++lq_get_cpu_hz(void) ++{ ++ switch (lq_r32(LQ_CGU_SYS) & 0xc) ++ { ++ case 0: ++ return CLOCK_333M; ++ case 4: ++ return DDR_HZ; ++ case 8: ++ return DDR_HZ << 1; ++ default: ++ return DDR_HZ >> 1; ++ } ++} ++EXPORT_SYMBOL(lq_get_cpu_hz); ++ ++unsigned int ++lq_get_fpi_hz(void) ++{ ++ unsigned int ddr_clock = DDR_HZ; ++ if (lq_r32(LQ_CGU_SYS) & 0x40) ++ return ddr_clock >> 1; ++ return ddr_clock; ++} ++EXPORT_SYMBOL(lq_get_fpi_hz); ++ ++ +--- /dev/null ++++ b/arch/mips/lantiq/xway/gpio.c +@@ -0,0 +1,203 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/slab.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/gpio.h> ++ ++#include <lantiq.h> ++ ++#define LQ_GPIO0_BASE_ADDR 0x1E100B10 ++#define LQ_GPIO1_BASE_ADDR 0x1E100B40 ++#define LQ_GPIO_SIZE 0x30 ++ ++#define LQ_GPIO_OUT 0x00 ++#define LQ_GPIO_IN 0x04 ++#define LQ_GPIO_DIR 0x08 ++#define LQ_GPIO_ALTSEL0 0x0C ++#define LQ_GPIO_ALTSEL1 0x10 ++#define LQ_GPIO_OD 0x14 ++ ++#define PINS_PER_PORT 16 ++ ++#define lq_gpio_getbit(m, r, p) !!(lq_r32(m + r) & (1 << p)) ++#define lq_gpio_setbit(m, r, p) lq_w32_mask(0, (1 << p), m + r) ++#define lq_gpio_clearbit(m, r, p) lq_w32_mask((1 << p), 0, m + r) ++ ++struct lq_gpio ++{ ++ void __iomem *membase; ++ struct gpio_chip chip; ++}; ++ ++int ++gpio_to_irq(unsigned int gpio) ++{ ++ return -EINVAL; ++} ++EXPORT_SYMBOL(gpio_to_irq); ++ ++int ++lq_gpio_setconfig(unsigned int pin, unsigned int reg, unsigned int val) ++{ ++ void __iomem *membase = (void*)KSEG1ADDR(LQ_GPIO0_BASE_ADDR); ++ if(pin >= (2 * PINS_PER_PORT)) ++ return -EINVAL; ++ if(pin >= PINS_PER_PORT) ++ { ++ pin -= PINS_PER_PORT; ++ membase += LQ_GPIO_SIZE; ++ } ++ if(val) ++ lq_w32_mask(0, (1 << pin), membase + reg); ++ else ++ lq_w32_mask((1 << pin), 0, membase + reg); ++ return 0; ++} ++EXPORT_SYMBOL(lq_gpio_setconfig); ++ ++int ++lq_gpio_request(unsigned int pin, unsigned int alt0, ++ unsigned int alt1, unsigned int dir, const char *name) ++{ ++ void __iomem *membase = (void*)KSEG1ADDR(LQ_GPIO0_BASE_ADDR); ++ if(pin >= (2 * PINS_PER_PORT)) ++ return -EINVAL; ++ if(gpio_request(pin, name)) ++ { ++ printk("failed to register %s gpio\n", name); ++ return -EBUSY; ++ } ++ gpio_direction_output(pin, dir); ++ if(pin >= PINS_PER_PORT) ++ { ++ pin -= PINS_PER_PORT; ++ membase += LQ_GPIO_SIZE; ++ } ++ if(alt0) ++ lq_gpio_setbit(membase, LQ_GPIO_ALTSEL0, pin); ++ else ++ lq_gpio_clearbit(membase, LQ_GPIO_ALTSEL0, pin); ++ if(alt1) ++ lq_gpio_setbit(membase, LQ_GPIO_ALTSEL1, pin); ++ else ++ lq_gpio_clearbit(membase, LQ_GPIO_ALTSEL1, pin); ++ return 0; ++} ++EXPORT_SYMBOL(lq_gpio_request); ++ ++static void ++lq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct lq_gpio *lq_gpio = container_of(chip, struct lq_gpio, chip); ++ if(value) ++ lq_gpio_setbit(lq_gpio->membase, LQ_GPIO_OUT, offset); ++ else ++ lq_gpio_clearbit(lq_gpio->membase, LQ_GPIO_OUT, offset); ++} ++ ++static int ++lq_gpio_get(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct lq_gpio *lq_gpio = container_of(chip, struct lq_gpio, chip); ++ return lq_gpio_getbit(lq_gpio->membase, LQ_GPIO_IN, offset); ++} ++ ++static int ++lq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct lq_gpio *lq_gpio = container_of(chip, struct lq_gpio, chip); ++ lq_gpio_clearbit(lq_gpio->membase, LQ_GPIO_OD, offset); ++ lq_gpio_clearbit(lq_gpio->membase, LQ_GPIO_DIR, offset); ++ return 0; ++} ++ ++static int ++lq_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct lq_gpio *lq_gpio = container_of(chip, struct lq_gpio, chip); ++ lq_gpio_setbit(lq_gpio->membase, LQ_GPIO_OD, offset); ++ lq_gpio_setbit(lq_gpio->membase, LQ_GPIO_DIR, offset); ++ lq_gpio_set(chip, offset, value); ++ return 0; ++} ++ ++static int ++lq_gpio_req(struct gpio_chip *chip, unsigned offset) ++{ ++ struct lq_gpio *lq_gpio = container_of(chip, struct lq_gpio, chip); ++ lq_gpio_clearbit(lq_gpio->membase, LQ_GPIO_ALTSEL0, offset); ++ lq_gpio_clearbit(lq_gpio->membase, LQ_GPIO_ALTSEL1, offset); ++ return 0; ++} ++ ++static int ++lq_gpio_probe(struct platform_device *pdev) ++{ ++ struct lq_gpio *lq_gpio = kzalloc(sizeof(struct lq_gpio), GFP_KERNEL); ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ int ret = 0; ++ if(!res) ++ { ++ ret = -ENOENT; ++ goto err_free; ++ } ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if(!res) ++ { ++ ret = -EBUSY; ++ goto err_free; ++ } ++ lq_gpio->membase = ioremap_nocache(res->start, resource_size(res)); ++ if(!lq_gpio->membase) ++ { ++ ret = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ lq_gpio->chip.label = "lq_gpio"; ++ lq_gpio->chip.direction_input = lq_gpio_direction_input; ++ lq_gpio->chip.direction_output = lq_gpio_direction_output; ++ lq_gpio->chip.get = lq_gpio_get; ++ lq_gpio->chip.set = lq_gpio_set; ++ lq_gpio->chip.request = lq_gpio_req; ++ lq_gpio->chip.base = PINS_PER_PORT * pdev->id; ++ lq_gpio->chip.ngpio = PINS_PER_PORT; ++ platform_set_drvdata(pdev, lq_gpio); ++ ret = gpiochip_add(&lq_gpio->chip); ++ if(!ret) ++ return 0; ++ ++ iounmap(lq_gpio->membase); ++err_release_mem_region: ++ release_mem_region(res->start, resource_size(res)); ++err_free: ++ kfree(lq_gpio); ++ return ret; ++} ++ ++static struct platform_driver ++lq_gpio_driver = { ++ .probe = lq_gpio_probe, ++ .driver = { ++ .name = "lq_gpio", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init ++lq_gpio_init(void) ++{ ++ int ret = platform_driver_register(&lq_gpio_driver); ++ if(ret) ++ printk(KERN_INFO "lq_gpio : Error registering platfom driver!"); ++ return ret; ++} ++ ++arch_initcall(lq_gpio_init); +--- /dev/null ++++ b/arch/mips/lantiq/xway/reset.c +@@ -0,0 +1,53 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/io.h> ++#include <linux/pm.h> ++#include <asm/reboot.h> ++ ++#include <xway.h> ++ ++#define LQ_RCU_RST ((u32 *)(LQ_RCU_BASE_ADDR + 0x0010)) ++#define LQ_RCU_RST_ALL 0x40000000 ++ ++static void ++lq_machine_restart(char *command) ++{ ++ printk(KERN_NOTICE "System restart\n"); ++ local_irq_disable(); ++ lq_w32(lq_r32(LQ_RCU_RST) | LQ_RCU_RST_ALL, LQ_RCU_RST); ++ for(;;); ++} ++ ++static void ++lq_machine_halt(void) ++{ ++ printk(KERN_NOTICE "System halted.\n"); ++ local_irq_disable(); ++ for(;;); ++} ++ ++static void ++lq_machine_power_off(void) ++{ ++ printk(KERN_NOTICE "Please turn off the power now.\n"); ++ local_irq_disable(); ++ for(;;); ++} ++ ++static int __init ++mips_reboot_setup(void) ++{ ++ _machine_restart = lq_machine_restart; ++ _machine_halt = lq_machine_halt; ++ pm_power_off = lq_machine_power_off; ++ return 0; ++} ++ ++arch_initcall(mips_reboot_setup); diff --git a/target/linux/lantiq/patches/105-header_xway.patch b/target/linux/lantiq/patches/105-header_xway.patch new file mode 100644 index 0000000000..f41e335a6c --- /dev/null +++ b/target/linux/lantiq/patches/105-header_xway.patch @@ -0,0 +1,565 @@ +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h +@@ -0,0 +1,18 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef __LANTIQ_IRQ_H ++#define __LANTIQ_IRQ_H ++ ++#include <xway_irq.h> ++ ++#define NR_IRQS 256 ++ ++#include_next <irq.h> ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/lantiq_timer.h +@@ -0,0 +1,155 @@ ++#ifndef __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ ++#define __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ ++ ++ ++/****************************************************************************** ++ Copyright (c) 2002, Infineon Technologies. All rights reserved. ++ ++ No Warranty ++ Because the program is licensed free of charge, there is no warranty for ++ the program, to the extent permitted by applicable law. Except when ++ otherwise stated in writing the copyright holders and/or other parties ++ provide the program "as is" without warranty of any kind, either ++ expressed or implied, including, but not limited to, the implied ++ warranties of merchantability and fitness for a particular purpose. The ++ entire risk as to the quality and performance of the program is with ++ you. should the program prove defective, you assume the cost of all ++ necessary servicing, repair or correction. ++ ++ In no event unless required by applicable law or agreed to in writing ++ will any copyright holder, or any other party who may modify and/or ++ redistribute the program as permitted above, be liable to you for ++ damages, including any general, special, incidental or consequential ++ damages arising out of the use or inability to use the program ++ (including but not limited to loss of data or data being rendered ++ inaccurate or losses sustained by you or third parties or a failure of ++ the program to operate with any other programs), even if such holder or ++ other party has been advised of the possibility of such damages. ++******************************************************************************/ ++ ++ ++/* ++ * #################################### ++ * Definition ++ * #################################### ++ */ ++ ++/* ++ * Available Timer/Counter Index ++ */ ++#define TIMER(n, X) (n * 2 + (X ? 1 : 0)) ++#define TIMER_ANY 0x00 ++#define TIMER1A TIMER(1, 0) ++#define TIMER1B TIMER(1, 1) ++#define TIMER2A TIMER(2, 0) ++#define TIMER2B TIMER(2, 1) ++#define TIMER3A TIMER(3, 0) ++#define TIMER3B TIMER(3, 1) ++ ++/* ++ * Flag of Timer/Counter ++ * These flags specify the way in which timer is configured. ++ */ ++/* Bit size of timer/counter. */ ++#define TIMER_FLAG_16BIT 0x0000 ++#define TIMER_FLAG_32BIT 0x0001 ++/* Switch between timer and counter. */ ++#define TIMER_FLAG_TIMER 0x0000 ++#define TIMER_FLAG_COUNTER 0x0002 ++/* Stop or continue when overflowing/underflowing. */ ++#define TIMER_FLAG_ONCE 0x0000 ++#define TIMER_FLAG_CYCLIC 0x0004 ++/* Count up or counter down. */ ++#define TIMER_FLAG_UP 0x0000 ++#define TIMER_FLAG_DOWN 0x0008 ++/* Count on specific level or edge. */ ++#define TIMER_FLAG_HIGH_LEVEL_SENSITIVE 0x0000 ++#define TIMER_FLAG_LOW_LEVEL_SENSITIVE 0x0040 ++#define TIMER_FLAG_RISE_EDGE 0x0010 ++#define TIMER_FLAG_FALL_EDGE 0x0020 ++#define TIMER_FLAG_ANY_EDGE 0x0030 ++/* Signal is syncronous to module clock or not. */ ++#define TIMER_FLAG_UNSYNC 0x0000 ++#define TIMER_FLAG_SYNC 0x0080 ++/* Different interrupt handle type. */ ++#define TIMER_FLAG_NO_HANDLE 0x0000 ++#if defined(__KERNEL__) ++ #define TIMER_FLAG_CALLBACK_IN_IRQ 0x0100 ++#endif // defined(__KERNEL__) ++#define TIMER_FLAG_SIGNAL 0x0300 ++/* Internal clock source or external clock source */ ++#define TIMER_FLAG_INT_SRC 0x0000 ++#define TIMER_FLAG_EXT_SRC 0x1000 ++ ++ ++/* ++ * ioctl Command ++ */ ++#define GPTU_REQUEST_TIMER 0x01 /* General method to setup timer/counter. */ ++#define GPTU_FREE_TIMER 0x02 /* Free timer/counter. */ ++#define GPTU_START_TIMER 0x03 /* Start or resume timer/counter. */ ++#define GPTU_STOP_TIMER 0x04 /* Suspend timer/counter. */ ++#define GPTU_GET_COUNT_VALUE 0x05 /* Get current count value. */ ++#define GPTU_CALCULATE_DIVIDER 0x06 /* Calculate timer divider from given freq.*/ ++#define GPTU_SET_TIMER 0x07 /* Simplified method to setup timer. */ ++#define GPTU_SET_COUNTER 0x08 /* Simplified method to setup counter. */ ++ ++/* ++ * Data Type Used to Call ioctl ++ */ ++struct gptu_ioctl_param { ++ unsigned int timer; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and * ++ * GPTU_SET_COUNTER, this field is ID of expected * ++ * timer/counter. If it's zero, a timer/counter would * ++ * be dynamically allocated and ID would be stored in * ++ * this field. * ++ * In command GPTU_GET_COUNT_VALUE, this field is * ++ * ignored. * ++ * In other command, this field is ID of timer/counter * ++ * allocated. */ ++ unsigned int flag; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and * ++ * GPTU_SET_COUNTER, this field contains flags to * ++ * specify how to configure timer/counter. * ++ * In command GPTU_START_TIMER, zero indicate start * ++ * and non-zero indicate resume timer/counter. * ++ * In other command, this field is ignored. */ ++ unsigned long value; /* In command GPTU_REQUEST_TIMER, this field contains * ++ * init/reload value. * ++ * In command GPTU_SET_TIMER, this field contains * ++ * frequency (0.001Hz) of timer. * ++ * In command GPTU_GET_COUNT_VALUE, current count * ++ * value would be stored in this field. * ++ * In command GPTU_CALCULATE_DIVIDER, this field * ++ * contains frequency wanted, and after calculation, * ++ * divider would be stored in this field to overwrite * ++ * the frequency. * ++ * In other command, this field is ignored. */ ++ int pid; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, * ++ * if signal is required, this field contains process * ++ * ID to which signal would be sent. * ++ * In other command, this field is ignored. */ ++ int sig; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, * ++ * if signal is required, this field contains signal * ++ * number which would be sent. * ++ * In other command, this field is ignored. */ ++}; ++ ++/* ++ * #################################### ++ * Data Type ++ * #################################### ++ */ ++typedef void (*timer_callback)(unsigned long arg); ++ ++extern int ifxmips_request_timer(unsigned int, unsigned int, unsigned long, unsigned long, unsigned long); ++extern int ifxmips_free_timer(unsigned int); ++extern int ifxmips_start_timer(unsigned int, int); ++extern int ifxmips_stop_timer(unsigned int); ++extern int ifxmips_reset_counter_flags(u32 timer, u32 flags); ++extern int ifxmips_get_count_value(unsigned int, unsigned long *); ++extern u32 ifxmips_cal_divider(unsigned long); ++extern int ifxmips_set_timer(unsigned int, unsigned int, int, int, unsigned int, unsigned long, unsigned long); ++extern int ifxmips_set_counter(unsigned int timer, unsigned int flag, ++ u32 reload, unsigned long arg1, unsigned long arg2); ++ ++#endif /* __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ */ +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/xway/xway.h +@@ -0,0 +1,121 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2005 infineon ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++ ++#ifndef _LQ_XWAY_H__ ++#define _LQ_XWAY_H__ ++ ++#include <lantiq.h> ++ ++/* request a non-gpio and set the PIO config */ ++extern int lq_gpio_request(unsigned int pin, unsigned int alt0, ++ unsigned int alt1, unsigned int dir, const char *name); ++extern int lq_gpio_setconfig(unsigned int pin, unsigned int reg, unsigned int val); ++ ++extern void lq_pmu_enable(unsigned int module); ++extern void lq_pmu_disable(unsigned int module); ++ ++extern unsigned int lq_get_fpi_bus_clock(int bus); ++ ++#define BOARD_SYSTEM_TYPE "LANTIQ" ++ ++/*------------ Chip IDs */ ++#define SOC_ID_DANUBE1 0x129 ++#define SOC_ID_DANUBE2 0x12B ++#define SOC_ID_TWINPASS 0x12D ++#define SOC_ID_ARX188 0x16C ++#define SOC_ID_ARX168 0x16D ++#define SOC_ID_ARX182 0x16F ++ ++/*------------ SoC Types */ ++#define SOC_TYPE_DANUBE 0x01 ++#define SOC_TYPE_TWINPASS 0x02 ++#define SOC_TYPE_AR9 0x03 ++ ++/*------------ ASC0/1 */ ++#define LQ_ASC0_BASE 0x1E100400 ++#define LQ_ASC1_BASE 0x1E100C00 ++#define LQ_ASC_SIZE 0x400 ++ ++/*------------ RCU */ ++#define LQ_RCU_BASE_ADDR 0xBF203000 ++ ++/*------------ GPTU */ ++#define LQ_GPTU_BASE_ADDR 0xB8000300 ++ ++/*------------ EBU */ ++#define LQ_EBU_GPIO_START 0x14000000 ++#define LQ_EBU_GPIO_SIZE 0x1000 ++ ++#define LQ_EBU_BASE_ADDR 0xBE105300 ++ ++#define LQ_EBU_BUSCON0 ((u32 *)(LQ_EBU_BASE_ADDR + 0x0060)) ++#define LQ_EBU_PCC_CON ((u32 *)(LQ_EBU_BASE_ADDR + 0x0090)) ++#define LQ_EBU_PCC_IEN ((u32 *)(LQ_EBU_BASE_ADDR + 0x00A4)) ++#define LQ_EBU_PCC_ISTAT ((u32 *)(LQ_EBU_BASE_ADDR + 0x00A0)) ++#define LQ_EBU_BUSCON1 ((u32 *)(LQ_EBU_BASE_ADDR + 0x0064)) ++#define LQ_EBU_ADDRSEL1 ((u32 *)(LQ_EBU_BASE_ADDR + 0x0024)) ++ ++#define EBU_WRDIS 0x80000000 ++ ++/*------------ CGU */ ++#define LQ_CGU_BASE_ADDR (KSEG1 + 0x1F103000) ++ ++/*------------ PMU */ ++#define LQ_PMU_BASE_ADDR (KSEG1 + 0x1F102000) ++ ++#define PMU_DMA 0x0020 ++#define PMU_USB 0x8041 ++#define PMU_LED 0x0800 ++#define PMU_GPT 0x1000 ++#define PMU_PPE 0x2000 ++#define PMU_FPI 0x4000 ++#define PMU_SWITCH 0x10000000 ++ ++/*------------ ETOP */ ++#define LQ_PPE32_BASE_ADDR 0xBE180000 ++#define LQ_PPE32_SIZE 0x40000 ++ ++/*------------ DMA */ ++#define LQ_DMA_BASE_ADDR 0xBE104100 ++ ++/*------------ PCI */ ++#define PCI_CR_PR_BASE_ADDR (KSEG1 + 0x1E105400) ++#define PCI_CS_PR_BASE_ADDR (KSEG1 + 0x17000000) ++ ++/*------------ WDT */ ++#define LQ_WDT_BASE 0x1F880000 ++#define LQ_WDT_SIZE 0x400 ++ ++/*------------ Serial To Parallel conversion */ ++#define LQ_STP_BASE 0x1E100BB0 ++#define LQ_STP_SIZE 0x40 ++ ++/*------------ GPIO */ ++#define LQ_GPIO0_BASE_ADDR 0x1E100B10 ++#define LQ_GPIO1_BASE_ADDR 0x1E100B40 ++#define LQ_GPIO_SIZE 0x30 ++ ++/*------------ SSC */ ++#define LQ_SSC_BASE_ADDR (KSEG1 + 0x1e100800) ++ ++/*------------ MEI */ ++#define LQ_MEI_BASE_ADDR (KSEG1 + 0x1E116000) ++ ++/*------------ DEU */ ++#define LQ_DEU_BASE (KSEG1 + 0x1E103100) ++ ++/*------------ MPS */ ++#define LQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) ++#define LQ_MPS_CHIPID ((u32 *)(LQ_MPS_BASE_ADDR + 0x0344)) ++ ++#endif ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h +@@ -0,0 +1,144 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2005 infineon ++ * Copyright (C) 2007 John Crispin <blogic@openwrt.org> ++ * ++ */ ++#ifndef _LQ_DMA_H__ ++#define _LQ_DMA_H__ ++ ++#define RCV_INT 1 ++#define TX_BUF_FULL_INT 2 ++#define TRANSMIT_CPT_INT 4 ++#define LQ_DMA_CH_ON 1 ++#define LQ_DMA_CH_OFF 0 ++#define LQ_DMA_CH_DEFAULT_WEIGHT 100 ++ ++enum attr_t{ ++ TX = 0, ++ RX = 1, ++ RESERVED = 2, ++ DEFAULT = 3, ++}; ++ ++#define DMA_OWN 1 ++#define CPU_OWN 0 ++#define DMA_MAJOR 250 ++ ++#define DMA_DESC_OWN_CPU 0x0 ++#define DMA_DESC_OWN_DMA 0x80000000 ++#define DMA_DESC_CPT_SET 0x40000000 ++#define DMA_DESC_SOP_SET 0x20000000 ++#define DMA_DESC_EOP_SET 0x10000000 ++ ++#define MISCFG_MASK 0x40 ++#define RDERR_MASK 0x20 ++#define CHOFF_MASK 0x10 ++#define DESCPT_MASK 0x8 ++#define DUR_MASK 0x4 ++#define EOP_MASK 0x2 ++ ++#define DMA_DROP_MASK (1<<31) ++ ++#define LQ_DMA_RX -1 ++#define LQ_DMA_TX 1 ++ ++struct dma_chan_map { ++ const char *dev_name; ++ enum attr_t dir; ++ int pri; ++ int irq; ++ int rel_chan_no; ++}; ++ ++#ifdef CONFIG_CPU_LITTLE_ENDIAN ++struct rx_desc { ++ u32 data_length:16; ++ volatile u32 reserved:7; ++ volatile u32 byte_offset:2; ++ volatile u32 Burst_length_offset:3; ++ volatile u32 EoP:1; ++ volatile u32 Res:1; ++ volatile u32 C:1; ++ volatile u32 OWN:1; ++ volatile u32 Data_Pointer; /* fixme: should be 28 bits here */ ++}; ++ ++struct tx_desc { ++ volatile u32 data_length:16; ++ volatile u32 reserved1:7; ++ volatile u32 byte_offset:5; ++ volatile u32 EoP:1; ++ volatile u32 SoP:1; ++ volatile u32 C:1; ++ volatile u32 OWN:1; ++ volatile u32 Data_Pointer; /* fixme: should be 28 bits here */ ++}; ++#else /* BIG */ ++struct rx_desc { ++ union { ++ struct { ++ volatile u32 OWN:1; ++ volatile u32 C:1; ++ volatile u32 SoP:1; ++ volatile u32 EoP:1; ++ volatile u32 Burst_length_offset:3; ++ volatile u32 byte_offset:2; ++ volatile u32 reserve:7; ++ volatile u32 data_length:16; ++ } field; ++ volatile u32 word; ++ } status; ++ volatile u32 Data_Pointer; ++}; ++ ++struct tx_desc { ++ union { ++ struct { ++ volatile u32 OWN:1; ++ volatile u32 C:1; ++ volatile u32 SoP:1; ++ volatile u32 EoP:1; ++ volatile u32 byte_offset:5; ++ volatile u32 reserved:7; ++ volatile u32 data_length:16; ++ } field; ++ volatile u32 word; ++ } status; ++ volatile u32 Data_Pointer; ++}; ++#endif /* ENDIAN */ ++ ++struct dma_channel_info { ++ /* relative channel number */ ++ int rel_chan_no; ++ /* class for this channel for QoS */ ++ int pri; ++ /* specify byte_offset */ ++ int byte_offset; ++ /* direction */ ++ int dir; ++ /* irq number */ ++ int irq; ++ /* descriptor parameter */ ++ int desc_base; ++ int desc_len; ++ int curr_desc; ++ int prev_desc; /* only used if it is a tx channel*/ ++ /* weight setting for WFQ algorithm*/ ++ int weight; ++ int default_weight; ++ int packet_size; ++ int burst_len; ++ /* on or off of this channel */ ++ int control; ++ /* optional information for the upper layer devices */ ++#if defined(CONFIG_LQ_ETHERNET_D2) || defined(CONFIG_LQ_PPA) ++ void *opt[64]; ++#else ++ void *opt[25]; ++#endif ++ /* Pointer to the peripheral device who is using this channel */ ++ void *dma_dev; ++ /* channel operations */ ++ void (*open)(struct dma_channel_info *pCh); ++ void (*close)(struct dma_channel_info *pCh); ++ void (*reset)(struct dma_channel_info *pCh); ++ void (*enable_irq)(struct dma_channel_info *pCh); ++ void (*disable_irq)(struct dma_channel_info *pCh); ++}; ++ ++struct dma_device_info { ++ /* device name of this peripheral */ ++ char device_name[15]; ++ int reserved; ++ int tx_burst_len; ++ int rx_burst_len; ++ int default_weight; ++ int current_tx_chan; ++ int current_rx_chan; ++ int num_tx_chan; ++ int num_rx_chan; ++ int max_rx_chan_num; ++ int max_tx_chan_num; ++ struct dma_channel_info *tx_chan[20]; ++ struct dma_channel_info *rx_chan[20]; ++ /*functions, optional*/ ++ u8 *(*buffer_alloc)(int len, int *offset, void **opt); ++ void (*buffer_free)(u8 *dataptr, void *opt); ++ int (*intr_handler)(struct dma_device_info *info, int status); ++ void *priv; /* used by peripheral driver only */ ++}; ++ ++struct dma_device_info *dma_device_reserve(char *dev_name); ++void dma_device_release(struct dma_device_info *dev); ++void dma_device_register(struct dma_device_info *info); ++void dma_device_unregister(struct dma_device_info *info); ++int dma_device_read(struct dma_device_info *info, u8 **dataptr, void **opt); ++int dma_device_write(struct dma_device_info *info, u8 *dataptr, int len, ++ void *opt); ++ ++#endif ++ +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/xway/xway_irq.h +@@ -0,0 +1,62 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LANTIQ_XWAY_IRQ_H__ ++#define _LANTIQ_XWAY_IRQ_H__ ++ ++#define INT_NUM_IRQ0 8 ++#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0) ++#define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32) ++#define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64) ++#define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96) ++#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) ++#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) ++ ++#define LQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 7)) ++#define LQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 7) + 2) ++#define LQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 7) + 3) ++ ++#define LQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15) ++#define LQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14) ++#define LQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16) ++ ++#define LQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21) ++#define LQ_MEI_INT (INT_NUM_IM1_IRL0 + 23) ++ ++#define LQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23) ++#define LQ_USB_INT (INT_NUM_IM1_IRL0 + 22) ++#define LQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23) ++ ++#define MIPS_CPU_TIMER_IRQ 7 ++ ++#define LQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) ++#define LQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1) ++#define LQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2) ++#define LQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3) ++#define LQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4) ++#define LQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5) ++#define LQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6) ++#define LQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7) ++#define LQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8) ++#define LQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9) ++#define LQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10) ++#define LQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11) ++#define LQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25) ++#define LQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26) ++#define LQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27) ++#define LQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28) ++#define LQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29) ++#define LQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30) ++#define LQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16) ++#define LQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21) ++ ++#define LQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24) ++ ++#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) ++ ++#endif diff --git a/target/linux/lantiq/patches/106-early_printk.patch b/target/linux/lantiq/patches/106-early_printk.patch new file mode 100644 index 0000000000..34e0bdf4d2 --- /dev/null +++ b/target/linux/lantiq/patches/106-early_printk.patch @@ -0,0 +1,93 @@ +--- a/arch/mips/lantiq/Kconfig ++++ b/arch/mips/lantiq/Kconfig +@@ -33,4 +33,19 @@ endchoice + source "arch/mips/lantiq/falcon/Kconfig" + source "arch/mips/lantiq/xway/Kconfig" + ++if EARLY_PRINTK ++choice ++ prompt "Early printk port" ++ default LANTIQ_PROM_ASC1 ++ help ++ Choose which serial port is used, until the console driver is loaded ++ ++config LANTIQ_PROM_ASC0 ++ bool "ASC0" ++ ++config LANTIQ_PROM_ASC1 ++ bool "ASC1" ++endchoice ++endif ++ + endif +--- /dev/null ++++ b/arch/mips/lantiq/early_printk.c +@@ -0,0 +1,68 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/cpu.h> ++ ++#include <lantiq.h> ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++#include <xway.h> ++#ifdef CONFIG_LANTIQ_PROM_ASC0 ++#define LQ_ASC_BASE KSEG1ADDR(LQ_ASC0_BASE) ++#else ++#define LQ_ASC_BASE KSEG1ADDR(LQ_ASC1_BASE) ++#endif ++ ++#elif CONFIG_SOC_LANTIQ_FALCON ++#include <falcon/gpon_reg_base.h> ++#ifdef CONFIG_LANTIQ_PROM_ASC0 ++#define LQ_ASC_BASE GPON_ASC0_BASE ++#else ++#define LQ_ASC_BASE GPON_ASC1_BASE ++#endif ++ ++#endif ++ ++#define ASC_BUF 1024 ++#define LQ_ASC_FSTAT 0x0048 ++#define LQ_ASC_TBUF 0x0020 ++#define TXMASK 0x3F00 ++#define TXOFFSET 8 ++ ++static char buf[ASC_BUF]; ++ ++void ++prom_putchar(char c) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ while ((lq_r32((u32 *)(LQ_ASC_BASE + LQ_ASC_FSTAT)) & TXMASK) >> TXOFFSET); ++ ++ if (c == '\n') ++ lq_w32('\r', (u32 *)(LQ_ASC_BASE + LQ_ASC_TBUF)); ++ lq_w32(c, (u32 *)(LQ_ASC_BASE + LQ_ASC_TBUF)); ++ local_irq_restore(flags); ++} ++ ++void ++early_printf(const char *fmt, ...) ++{ ++ va_list args; ++ int l; ++ char *p, *buf_end; ++ ++ va_start(args, fmt); ++ l = vsnprintf(buf, ASC_BUF, fmt, args); ++ va_end(args); ++ buf_end = buf + l; ++ ++ for (p = buf; p < buf_end; p++) ++ prom_putchar(*p); ++} diff --git a/target/linux/lantiq/patches/110-machine.patch b/target/linux/lantiq/patches/110-machine.patch new file mode 100644 index 0000000000..9d0b7b71c8 --- /dev/null +++ b/target/linux/lantiq/patches/110-machine.patch @@ -0,0 +1,55 @@ +--- a/arch/mips/lantiq/setup.c ++++ b/arch/mips/lantiq/setup.c +@@ -13,7 +13,8 @@ + #include <linux/ioport.h> + + #include <lantiq.h> +-#include <lantiq_regs.h> ++ ++#include <machine.h> + + void __init + plat_mem_setup(void) +@@ -46,3 +47,25 @@ plat_mem_setup(void) + memsize *= 1024 * 1024; + add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); + } ++ ++static int __init ++lq_machine_setup(void) ++{ ++ mips_machine_setup(); ++ return 0; ++} ++ ++static void __init ++mach_generic_init(void) ++{ ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_GENERIC, ++ "Generic", ++ "Generic", ++ mach_generic_init); ++ ++arch_initcall(lq_machine_setup); ++ ++/* for backward compatibility, define "board=" as alias for "machtype=" */ ++__setup("board=", mips_machtype_setup); +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/machine.h +@@ -0,0 +1,14 @@ ++#include <asm/mips_machine.h> ++ ++enum lantiq_mach_type { ++ LANTIQ_MACH_GENERIC, ++ ++ /* FALCON */ ++ LANTIQ_MACH_EASY98000, /* Falcon Eval Board, NOR Flash */ ++ LANTIQ_MACH_EASY98020, /* Falcon Reference Board */ ++ ++ /* XWAY */ ++ LANTIQ_MACH_EASY4010, /* Twinpass evalkit */ ++ LANTIQ_MACH_EASY50712, /* Danube evalkit */ ++ LANTIQ_MACH_EASY50812, /* AR9 eval board */ ++}; diff --git a/target/linux/lantiq/patches/200-serial.patch b/target/linux/lantiq/patches/200-serial.patch new file mode 100644 index 0000000000..a13022786b --- /dev/null +++ b/target/linux/lantiq/patches/200-serial.patch @@ -0,0 +1,799 @@ +--- a/drivers/serial/Kconfig ++++ b/drivers/serial/Kconfig +@@ -1397,6 +1397,14 @@ + help + Support for Console on the NWP serial ports. + ++config SERIAL_LANTIQ ++ bool "Lantiq serial driver" ++ depends on LANTIQ ++ select SERIAL_CORE ++ select SERIAL_CORE_CONSOLE ++ help ++ Driver for the Lantiq SoC ASC hardware ++ + config SERIAL_QE + tristate "Freescale QUICC Engine serial port support" + depends on QUICC_ENGINE +--- a/drivers/serial/Makefile ++++ b/drivers/serial/Makefile +@@ -84,3 +84,4 @@ + obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o + obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o + obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o ++obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o +--- /dev/null ++++ b/drivers/serial/lantiq.c +@@ -0,0 +1,772 @@ ++/* ++ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ * Copyright (C) 2004 Infineon IFAP DC COM CPE ++ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> ++ * Copyright (C) 2007 John Crispin <blogic@openwrt.org> ++ * Copyright (C) 2010 Thomas Langer, Lantiq Deutschland ++ */ ++ ++#include <linux/slab.h> ++#include <linux/module.h> ++#include <linux/ioport.h> ++#include <linux/init.h> ++#include <linux/console.h> ++#include <linux/sysrq.h> ++#include <linux/device.h> ++#include <linux/tty.h> ++#include <linux/tty_flip.h> ++#include <linux/serial_core.h> ++#include <linux/serial.h> ++#include <linux/platform_device.h> ++#include <linux/io.h> ++#include <linux/clk.h> ++ ++#define lq_r32(reg) __raw_readl(reg) ++#define lq_r8(reg) __raw_readb(reg) ++#define lq_w32(val, reg) __raw_writel(val, reg) ++#define lq_w8(val, reg) __raw_writeb(val, reg) ++#define lq_w32_mask(clear, set, reg) lq_w32((lq_r32(reg) & ~(clear)) | (set), reg) ++ ++#define PORT_IFXMIPSASC 111 ++#define MAXPORTS 2 ++ ++#define UART_DUMMY_UER_RX 1 ++ ++#define DRVNAME "lq_asc" ++ ++#ifdef __BIG_ENDIAN ++#define IFXMIPS_ASC_TBUF (0x0020 + 3) ++#define IFXMIPS_ASC_RBUF (0x0024 + 3) ++#else ++#define IFXMIPS_ASC_TBUF 0x0020 ++#define IFXMIPS_ASC_RBUF 0x0024 ++#endif ++ ++#define IFXMIPS_ASC_FSTAT 0x0048 ++#define IFXMIPS_ASC_WHBSTATE 0x0018 ++#define IFXMIPS_ASC_STATE 0x0014 ++#define IFXMIPS_ASC_IRNCR 0x00F8 ++#define IFXMIPS_ASC_CLC 0x0000 ++#define IFXMIPS_ASC_ID 0x0008 ++#define IFXMIPS_ASC_PISEL 0x0004 ++#define IFXMIPS_ASC_TXFCON 0x0044 ++#define IFXMIPS_ASC_RXFCON 0x0040 ++#define IFXMIPS_ASC_CON 0x0010 ++#define IFXMIPS_ASC_BG 0x0050 ++#define IFXMIPS_ASC_IRNREN 0x00F4 ++ ++#define ASC_IRNREN_TX 0x1 ++#define ASC_IRNREN_RX 0x2 ++#define ASC_IRNREN_ERR 0x4 ++#define ASC_IRNREN_TX_BUF 0x8 ++#define ASC_IRNCR_TIR 0x1 ++#define ASC_IRNCR_RIR 0x2 ++#define ASC_IRNCR_EIR 0x4 ++ ++#define ASCOPT_CSIZE 0x3 ++#define ASCOPT_CS7 0x1 ++#define ASCOPT_CS8 0x2 ++#define ASCOPT_PARENB 0x4 ++#define ASCOPT_STOPB 0x8 ++#define ASCOPT_PARODD 0x0 ++#define ASCOPT_CREAD 0x20 ++#define TXFIFO_FL 1 ++#define RXFIFO_FL 1 ++#define ASCCLC_DISS 0x2 ++#define ASCCLC_RMCMASK 0x0000FF00 ++#define ASCCLC_RMCOFFSET 8 ++#define ASCCON_M_8ASYNC 0x0 ++#define ASCCON_M_7ASYNC 0x2 ++#define ASCCON_ODD 0x00000020 ++#define ASCCON_STP 0x00000080 ++#define ASCCON_BRS 0x00000100 ++#define ASCCON_FDE 0x00000200 ++#define ASCCON_R 0x00008000 ++#define ASCCON_FEN 0x00020000 ++#define ASCCON_ROEN 0x00080000 ++#define ASCCON_TOEN 0x00100000 ++#define ASCSTATE_PE 0x00010000 ++#define ASCSTATE_FE 0x00020000 ++#define ASCSTATE_ROE 0x00080000 ++#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) ++#define ASCWHBSTATE_CLRREN 0x00000001 ++#define ASCWHBSTATE_SETREN 0x00000002 ++#define ASCWHBSTATE_CLRPE 0x00000004 ++#define ASCWHBSTATE_CLRFE 0x00000008 ++#define ASCWHBSTATE_CLRROE 0x00000020 ++#define ASCTXFCON_TXFEN 0x0001 ++#define ASCTXFCON_TXFFLU 0x0002 ++#define ASCTXFCON_TXFITLMASK 0x3F00 ++#define ASCTXFCON_TXFITLOFF 8 ++#define ASCRXFCON_RXFEN 0x0001 ++#define ASCRXFCON_RXFFLU 0x0002 ++#define ASCRXFCON_RXFITLMASK 0x3F00 ++#define ASCRXFCON_RXFITLOFF 8 ++#define ASCFSTAT_RXFFLMASK 0x003F ++#define ASCFSTAT_TXFFLMASK 0x3F00 ++#define ASCFSTAT_TXFFLOFF 8 ++#define ASCFSTAT_RXFREEMASK 0x003F0000 ++#define ASCFSTAT_RXFREEOFF 16 ++#define ASCFSTAT_TXFREEMASK 0x3F000000 ++#define ASCFSTAT_TXFREEOFF 24 ++ ++static void lqasc_tx_chars(struct uart_port *port); ++extern void prom_printf(const char *fmt, ...); ++static struct lq_uart_port *lqasc_port[2]; ++static struct uart_driver lqasc_reg; ++ ++struct lq_uart_port { ++ struct uart_port port; ++ struct clk *clk; ++ unsigned int tx_irq; ++ unsigned int rx_irq; ++ unsigned int err_irq; ++}; ++ ++static inline struct ++lq_uart_port *to_lq_uart_port(struct uart_port *port) ++{ ++ return container_of(port, struct lq_uart_port, port); ++} ++ ++static void ++lqasc_stop_tx(struct uart_port *port) ++{ ++ return; ++} ++ ++static void ++lqasc_start_tx(struct uart_port *port) ++{ ++ unsigned long flags; ++ local_irq_save(flags); ++ lqasc_tx_chars(port); ++ local_irq_restore(flags); ++ return; ++} ++ ++static void ++lqasc_stop_rx(struct uart_port *port) ++{ ++ lq_w32(ASCWHBSTATE_CLRREN, port->membase + IFXMIPS_ASC_WHBSTATE); ++} ++ ++static void ++lqasc_enable_ms(struct uart_port *port) ++{ ++} ++ ++static void ++lqasc_rx_chars(struct uart_port *port) ++{ ++ struct tty_struct *tty = port->state->port.tty; ++ unsigned int ch = 0, rsr = 0, fifocnt; ++ ++ fifocnt = lq_r32(port->membase + IFXMIPS_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; ++ while (fifocnt--) { ++ u8 flag = TTY_NORMAL; ++ ch = lq_r8(port->membase + IFXMIPS_ASC_RBUF); ++ rsr = (lq_r32(port->membase + IFXMIPS_ASC_STATE) ++ & ASCSTATE_ANY) | UART_DUMMY_UER_RX; ++ tty_flip_buffer_push(tty); ++ port->icount.rx++; ++ ++ /* ++ * Note that the error handling code is ++ * out of the main execution path ++ */ ++ if (rsr & ASCSTATE_ANY) { ++ if (rsr & ASCSTATE_PE) { ++ port->icount.parity++; ++ lq_w32_mask(0, ASCWHBSTATE_CLRPE, ++ port->membase + IFXMIPS_ASC_WHBSTATE); ++ } else if (rsr & ASCSTATE_FE) { ++ port->icount.frame++; ++ lq_w32_mask(0, ASCWHBSTATE_CLRFE, ++ port->membase + IFXMIPS_ASC_WHBSTATE); ++ } ++ if (rsr & ASCSTATE_ROE) { ++ port->icount.overrun++; ++ lq_w32_mask(0, ASCWHBSTATE_CLRROE, ++ port->membase + IFXMIPS_ASC_WHBSTATE); ++ } ++ ++ rsr &= port->read_status_mask; ++ ++ if (rsr & ASCSTATE_PE) ++ flag = TTY_PARITY; ++ else if (rsr & ASCSTATE_FE) ++ flag = TTY_FRAME; ++ } ++ ++ if ((rsr & port->ignore_status_mask) == 0) ++ tty_insert_flip_char(tty, ch, flag); ++ ++ if (rsr & ASCSTATE_ROE) ++ /* ++ * Overrun is special, since it's reported ++ * immediately, and doesn't affect the current ++ * character ++ */ ++ tty_insert_flip_char(tty, 0, TTY_OVERRUN); ++ } ++ if (ch != 0) ++ tty_flip_buffer_push(tty); ++ return; ++} ++ ++static void ++lqasc_tx_chars(struct uart_port *port) ++{ ++ struct circ_buf *xmit = &port->state->xmit; ++ if (uart_tx_stopped(port)) { ++ lqasc_stop_tx(port); ++ return; ++ } ++ ++ while (((lq_r32(port->membase + IFXMIPS_ASC_FSTAT) & ++ ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { ++ if (port->x_char) { ++ lq_w8(port->x_char, port->membase + IFXMIPS_ASC_TBUF); ++ port->icount.tx++; ++ port->x_char = 0; ++ continue; ++ } ++ ++ if (uart_circ_empty(xmit)) ++ break; ++ ++ lq_w8(port->state->xmit.buf[port->state->xmit.tail], ++ port->membase + IFXMIPS_ASC_TBUF); ++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); ++ port->icount.tx++; ++ } ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(port); ++} ++ ++static irqreturn_t ++lqasc_tx_int(int irq, void *_port) ++{ ++ struct uart_port *port = (struct uart_port *)_port; ++ lq_w32(ASC_IRNCR_TIR, port->membase + IFXMIPS_ASC_IRNCR); ++ lqasc_start_tx(port); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t ++lqasc_err_int(int irq, void *_port) ++{ ++ struct uart_port *port = (struct uart_port *)_port; ++ /* clear any pending interrupts */ ++ lq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | ASCWHBSTATE_CLRROE, ++ port->membase + IFXMIPS_ASC_WHBSTATE); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t ++lqasc_rx_int(int irq, void *_port) ++{ ++ struct uart_port *port = (struct uart_port *)_port; ++ lq_w32(ASC_IRNCR_RIR, port->membase + IFXMIPS_ASC_IRNCR); ++ lqasc_rx_chars(port); ++ return IRQ_HANDLED; ++} ++ ++static unsigned int ++lqasc_tx_empty(struct uart_port *port) ++{ ++ int status; ++ status = lq_r32(port->membase + IFXMIPS_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; ++ return status ? 0 : TIOCSER_TEMT; ++} ++ ++static unsigned int ++lqasc_get_mctrl(struct uart_port *port) ++{ ++ return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; ++} ++ ++static void ++lqasc_set_mctrl(struct uart_port *port, u_int mctrl) ++{ ++} ++ ++static void ++lqasc_break_ctl(struct uart_port *port, int break_state) ++{ ++} ++ ++static int ++lqasc_startup(struct uart_port *port) ++{ ++ struct lq_uart_port *ifx_port = to_lq_uart_port(port); ++ int retval; ++ ++ port->uartclk = clk_get_rate(ifx_port->clk); ++ ++ lq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), ++ port->membase + IFXMIPS_ASC_CLC); ++ ++ lq_w32(0, port->membase + IFXMIPS_ASC_PISEL); ++ lq_w32( ++ ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | ++ ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, ++ port->membase + IFXMIPS_ASC_TXFCON); ++ lq_w32( ++ ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) ++ | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, ++ port->membase + IFXMIPS_ASC_RXFCON); ++ /* make sure other settings are written to hardware before setting enable bits */ ++ wmb(); ++ lq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | ++ ASCCON_ROEN, port->membase + IFXMIPS_ASC_CON); ++ ++ retval = request_irq(ifx_port->tx_irq, lqasc_tx_int, ++ IRQF_DISABLED, "asc_tx", port); ++ if (retval) { ++ pr_err("failed to request lqasc_tx_int\n"); ++ return retval; ++ } ++ ++ retval = request_irq(ifx_port->rx_irq, lqasc_rx_int, ++ IRQF_DISABLED, "asc_rx", port); ++ if (retval) { ++ pr_err("failed to request lqasc_rx_int\n"); ++ goto err1; ++ } ++ ++ retval = request_irq(ifx_port->err_irq, lqasc_err_int, ++ IRQF_DISABLED, "asc_err", port); ++ if (retval) { ++ pr_err("failed to request lqasc_err_int\n"); ++ goto err2; ++ } ++ ++ lq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, ++ port->membase + IFXMIPS_ASC_IRNREN); ++ return 0; ++ ++err2: ++ free_irq(ifx_port->rx_irq, port); ++err1: ++ free_irq(ifx_port->tx_irq, port); ++ return retval; ++} ++ ++static void ++lqasc_shutdown(struct uart_port *port) ++{ ++ struct lq_uart_port *ifx_port = to_lq_uart_port(port); ++ free_irq(ifx_port->tx_irq, port); ++ free_irq(ifx_port->rx_irq, port); ++ free_irq(ifx_port->err_irq, port); ++ ++ lq_w32(0, port->membase + IFXMIPS_ASC_CON); ++ lq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, ++ port->membase + IFXMIPS_ASC_RXFCON); ++ lq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, ++ port->membase + IFXMIPS_ASC_TXFCON); ++} ++ ++static void ++lqasc_set_termios(struct uart_port *port, ++ struct ktermios *new, struct ktermios *old) ++{ ++ unsigned int cflag; ++ unsigned int iflag; ++ unsigned int quot; ++ unsigned int baud; ++ unsigned int con = 0; ++ unsigned long flags; ++ ++ cflag = new->c_cflag; ++ iflag = new->c_iflag; ++ ++ switch (cflag & CSIZE) { ++ case CS7: ++ con = ASCCON_M_7ASYNC; ++ break; ++ ++ case CS5: ++ case CS6: ++ default: ++ con = ASCCON_M_8ASYNC; ++ break; ++ } ++ ++ if (cflag & CSTOPB) ++ con |= ASCCON_STP; ++ ++ if (cflag & PARENB) { ++ if (!(cflag & PARODD)) ++ con &= ~ASCCON_ODD; ++ else ++ con |= ASCCON_ODD; ++ } ++ ++ port->read_status_mask = ASCSTATE_ROE; ++ if (iflag & INPCK) ++ port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; ++ ++ port->ignore_status_mask = 0; ++ if (iflag & IGNPAR) ++ port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; ++ ++ if (iflag & IGNBRK) { ++ /* ++ * If we're ignoring parity and break indicators, ++ * ignore overruns too (for real raw support). ++ */ ++ if (iflag & IGNPAR) ++ port->ignore_status_mask |= ASCSTATE_ROE; ++ } ++ ++ if ((cflag & CREAD) == 0) ++ port->ignore_status_mask |= UART_DUMMY_UER_RX; ++ ++ /* set error signals - framing, parity and overrun, enable receiver */ ++ con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; ++ ++ local_irq_save(flags); ++ ++ /* set up CON */ ++ lq_w32_mask(0, con, port->membase + IFXMIPS_ASC_CON); ++ ++ /* Set baud rate - take a divider of 2 into account */ ++ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); ++ quot = uart_get_divisor(port, baud); ++ quot = quot / 2 - 1; ++ ++ /* disable the baudrate generator */ ++ lq_w32_mask(ASCCON_R, 0, port->membase + IFXMIPS_ASC_CON); ++ ++ /* make sure the fractional divider is off */ ++ lq_w32_mask(ASCCON_FDE, 0, port->membase + IFXMIPS_ASC_CON); ++ ++ /* set up to use divisor of 2 */ ++ lq_w32_mask(ASCCON_BRS, 0, port->membase + IFXMIPS_ASC_CON); ++ ++ /* now we can write the new baudrate into the register */ ++ lq_w32(quot, port->membase + IFXMIPS_ASC_BG); ++ ++ /* turn the baudrate generator back on */ ++ lq_w32_mask(0, ASCCON_R, port->membase + IFXMIPS_ASC_CON); ++ ++ /* enable rx */ ++ lq_w32(ASCWHBSTATE_SETREN, port->membase + IFXMIPS_ASC_WHBSTATE); ++ ++ local_irq_restore(flags); ++} ++ ++static const char* ++lqasc_type(struct uart_port *port) ++{ ++ if (port->type == PORT_IFXMIPSASC) ++ return DRVNAME; ++ else ++ return NULL; ++} ++ ++static void ++lqasc_release_port(struct uart_port *port) ++{ ++ if (port->flags & UPF_IOREMAP) { ++ iounmap(port->membase); ++ port->membase = NULL; ++ } ++} ++ ++static int ++lqasc_request_port(struct uart_port *port) ++{ ++ struct platform_device *pdev = to_platform_device(port->dev); ++ struct resource *mmres; ++ int size; ++ ++ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!mmres) ++ return -ENODEV; ++ size = resource_size(mmres); ++ ++ if (port->flags & UPF_IOREMAP) { ++ port->membase = ioremap_nocache(port->mapbase, size); ++ if (port->membase == NULL) ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static void ++lqasc_config_port(struct uart_port *port, int flags) ++{ ++ if (flags & UART_CONFIG_TYPE) { ++ port->type = PORT_IFXMIPSASC; ++ lqasc_request_port(port); ++ } ++} ++ ++static int ++lqasc_verify_port(struct uart_port *port, ++ struct serial_struct *ser) ++{ ++ int ret = 0; ++ if (ser->type != PORT_UNKNOWN && ser->type != PORT_IFXMIPSASC) ++ ret = -EINVAL; ++ if (ser->irq < 0 || ser->irq >= NR_IRQS) ++ ret = -EINVAL; ++ if (ser->baud_base < 9600) ++ ret = -EINVAL; ++ return ret; ++} ++ ++static struct uart_ops lqasc_pops = { ++ .tx_empty = lqasc_tx_empty, ++ .set_mctrl = lqasc_set_mctrl, ++ .get_mctrl = lqasc_get_mctrl, ++ .stop_tx = lqasc_stop_tx, ++ .start_tx = lqasc_start_tx, ++ .stop_rx = lqasc_stop_rx, ++ .enable_ms = lqasc_enable_ms, ++ .break_ctl = lqasc_break_ctl, ++ .startup = lqasc_startup, ++ .shutdown = lqasc_shutdown, ++ .set_termios = lqasc_set_termios, ++ .type = lqasc_type, ++ .release_port = lqasc_release_port, ++ .request_port = lqasc_request_port, ++ .config_port = lqasc_config_port, ++ .verify_port = lqasc_verify_port, ++}; ++ ++static void ++lqasc_console_putchar(struct uart_port *port, int ch) ++{ ++ int fifofree; ++ ++ if (!port->membase) ++ return; ++ ++ do { ++ fifofree = (lq_r32(port->membase + IFXMIPS_ASC_FSTAT) ++ & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; ++ } while (fifofree == 0); ++ lq_w8(ch, port->membase + IFXMIPS_ASC_TBUF); ++} ++ ++ ++static void ++lqasc_console_write(struct console *co, const char *s, u_int count) ++{ ++ struct lq_uart_port *ifx_port; ++ struct uart_port *port; ++ unsigned long flags; ++ ++ if (co->index >= MAXPORTS) ++ return; ++ ++ ifx_port = lqasc_port[co->index]; ++ if (!ifx_port) ++ return; ++ ++ port = &ifx_port->port; ++ ++ local_irq_save(flags); ++ uart_console_write(port, s, count, lqasc_console_putchar); ++ local_irq_restore(flags); ++} ++ ++static int __init ++lqasc_console_setup(struct console *co, char *options) ++{ ++ struct lq_uart_port *ifx_port; ++ struct uart_port *port; ++ int baud = 115200; ++ int bits = 8; ++ int parity = 'n'; ++ int flow = 'n'; ++ ++ if (co->index >= MAXPORTS) ++ return -ENODEV; ++ ++ ifx_port = lqasc_port[co->index]; ++ if (!ifx_port) ++ return -ENODEV; ++ ++ port = &ifx_port->port; ++ ++ port->uartclk = clk_get_rate(ifx_port->clk); ++ ++ if (options) ++ uart_parse_options(options, &baud, &parity, &bits, &flow); ++ return uart_set_options(port, co, baud, parity, bits, flow); ++} ++ ++static struct console lqasc_console = { ++ .name = "ttyS", ++ .write = lqasc_console_write, ++ .device = uart_console_device, ++ .setup = lqasc_console_setup, ++ .flags = CON_PRINTBUFFER, ++ .index = -1, ++ .data = &lqasc_reg, ++}; ++ ++static int __init ++lqasc_console_init(void) ++{ ++ register_console(&lqasc_console); ++ return 0; ++} ++console_initcall(lqasc_console_init); ++ ++static struct uart_driver lqasc_reg = { ++ .owner = THIS_MODULE, ++ .driver_name = DRVNAME, ++ .dev_name = "ttyS", ++ .major = TTY_MAJOR, ++ .minor = 64, ++ .nr = MAXPORTS, ++ .cons = &lqasc_console, ++}; ++ ++static int __devinit ++lqasc_probe(struct platform_device *pdev) ++{ ++ struct lq_uart_port *ifx_port; ++ struct uart_port *port; ++ struct resource *mmres, *irqres; ++ int tx_irq, rx_irq, err_irq; ++ struct clk *clk; ++ int ret; ++ ++ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!mmres || !irqres) ++ return -ENODEV; ++ ++ if (pdev->id >= MAXPORTS) ++ return -EBUSY; ++ ++ if (lqasc_port[pdev->id] != NULL) ++ return -EBUSY; ++ ++ clk = clk_get(&pdev->dev, "fpi"); ++ if (IS_ERR(clk)) { ++ pr_err("failed to get fpi clk\n"); ++ return -ENOENT; ++ } ++ ++ tx_irq = platform_get_irq_byname(pdev, "tx"); ++ if (tx_irq < 0) { ++ /* without named resources: assume standard irq scheme */ ++ tx_irq = irqres->start; ++ rx_irq = irqres->start+2; ++ err_irq = irqres->start+3; ++ } else { ++ /* other irqs must be named also! */ ++ rx_irq = platform_get_irq_byname(pdev, "rx"); ++ err_irq = platform_get_irq_byname(pdev, "err"); ++ if ((rx_irq < 0) | (err_irq < 0)) ++ return -ENODEV; ++ } ++ ++ ifx_port = kzalloc(sizeof(struct lq_uart_port), GFP_KERNEL); ++ if (!ifx_port) ++ return -ENOMEM; ++ ++ port = &ifx_port->port; ++ ++ port->iotype = SERIAL_IO_MEM; ++ port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; ++ port->ops = &lqasc_pops; ++ port->fifosize = 16; ++ port->type = PORT_IFXMIPSASC, ++ port->line = pdev->id; ++ port->dev = &pdev->dev; ++ ++ port->irq = tx_irq; /* unused, just to be backward-compatibe */ ++ port->mapbase = mmres->start; ++ ++ ifx_port->clk = clk; ++ ++ ifx_port->tx_irq = tx_irq; ++ ifx_port->rx_irq = rx_irq; ++ ifx_port->err_irq = err_irq; ++ ++ lqasc_port[pdev->id] = ifx_port; ++ platform_set_drvdata(pdev, ifx_port); ++ ++ ret = uart_add_one_port(&lqasc_reg, port); ++ ++ return ret; ++} ++ ++static int __devexit ++lqasc_remove(struct platform_device *pdev) ++{ ++ struct lq_uart_port *ifx_port = platform_get_drvdata(pdev); ++ int ret; ++ ++ clk_put(ifx_port->clk); ++ platform_set_drvdata(pdev, NULL); ++ lqasc_port[pdev->id] = NULL; ++ ret = uart_remove_one_port(&lqasc_reg, &ifx_port->port); ++ kfree(ifx_port); ++ ++ return 0; ++} ++ ++static struct platform_driver lqasc_driver = { ++ .probe = lqasc_probe, ++ .remove = __devexit_p(lqasc_remove), ++ ++ .driver = { ++ .name = DRVNAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init ++init_lqasc(void) ++{ ++ int ret; ++ ++ ret = uart_register_driver(&lqasc_reg); ++ if (ret != 0) ++ return ret; ++ ++ ret = platform_driver_register(&lqasc_driver); ++ if (ret != 0) ++ uart_unregister_driver(&lqasc_reg); ++ ++ return ret; ++} ++ ++void __exit ++exit_lqasc(void) ++{ ++ platform_driver_unregister(&lqasc_driver); ++ uart_unregister_driver(&lqasc_reg); ++} ++ ++module_init(init_lqasc); ++module_exit(exit_lqasc); ++ ++MODULE_DESCRIPTION("Lantiq serial port driver"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/lantiq/patches/210-nor.patch b/target/linux/lantiq/patches/210-nor.patch new file mode 100644 index 0000000000..510ac6bdf3 --- /dev/null +++ b/target/linux/lantiq/patches/210-nor.patch @@ -0,0 +1,245 @@ +--- a/drivers/mtd/maps/Kconfig ++++ b/drivers/mtd/maps/Kconfig +@@ -251,6 +251,12 @@ + help + Support for flash chips on NETtel/SecureEdge/SnapGear boards. + ++config MTD_LANTIQ ++ bool "Lantiq SoC NOR support" ++ depends on LANTIQ && MTD_PARTITIONS ++ help ++ Support for NOR flsh chips on Lantiq SoC ++ + config MTD_DILNETPC + tristate "CFI Flash device mapped on DIL/Net PC" + depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN +--- a/drivers/mtd/maps/Makefile ++++ b/drivers/mtd/maps/Makefile +@@ -59,3 +59,4 @@ + obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o + obj-$(CONFIG_MTD_VMU) += vmu-flash.o + obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o ++obj-$(CONFIG_MTD_LANTIQ) += lantiq.o +--- /dev/null ++++ b/drivers/mtd/maps/lantiq.c +@@ -0,0 +1,169 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/kernel.h> ++#include <linux/io.h> ++#include <linux/init.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/map.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/cfi.h> ++#include <linux/magic.h> ++#include <linux/platform_device.h> ++#include <linux/mtd/physmap.h> ++ ++#include <lantiq.h> ++#include <lantiq_platform.h> ++ ++static map_word ++lq_read16(struct map_info *map, unsigned long adr) ++{ ++ unsigned long flags; ++ map_word temp; ++ spin_lock_irqsave(&ebu_lock, flags); ++ adr ^= 2; ++ temp.x[0] = *((__u16 *)(map->virt + adr)); ++ spin_unlock_irqrestore(&ebu_lock, flags); ++ return temp; ++} ++ ++static void ++lq_write16(struct map_info *map, map_word d, unsigned long adr) ++{ ++ unsigned long flags; ++ spin_lock_irqsave(&ebu_lock, flags); ++ adr ^= 2; ++ *((__u16 *)(map->virt + adr)) = d.x[0]; ++ spin_unlock_irqrestore(&ebu_lock, flags); ++} ++ ++void ++lq_copy_from(struct map_info *map, void *to, ++ unsigned long from, ssize_t len) ++{ ++ unsigned char *p; ++ unsigned char *to_8; ++ unsigned long flags; ++ spin_lock_irqsave(&ebu_lock, flags); ++ from = (unsigned long)(from + map->virt); ++ p = (unsigned char *) from; ++ to_8 = (unsigned char *) to; ++ while (len--) ++ *to_8++ = *p++; ++ spin_unlock_irqrestore(&ebu_lock, flags); ++} ++ ++void ++lq_copy_to(struct map_info *map, unsigned long to, ++ const void *from, ssize_t len) ++{ ++ unsigned char *p = (unsigned char *)from; ++ unsigned char *to_8; ++ unsigned long flags; ++ spin_lock_irqsave(&ebu_lock, flags); ++ to += (unsigned long) map->virt; ++ to_8 = (unsigned char *)to; ++ while (len--) ++ *p++ = *to_8++; ++ spin_unlock_irqrestore(&ebu_lock, flags); ++} ++ ++static const char *part_probe_types[] = { "cmdlinepart", NULL }; ++ ++static struct map_info lq_map = { ++ .name = "lq_nor", ++ .bankwidth = 2, ++ .read = lq_read16, ++ .write = lq_write16, ++ .copy_from = lq_copy_from, ++ .copy_to = lq_copy_to, ++}; ++ ++static int ++lq_mtd_probe(struct platform_device *pdev) ++{ ++ struct physmap_flash_data *lq_mtd_data = ++ (struct physmap_flash_data*) dev_get_platdata(&pdev->dev); ++ struct mtd_info *lq_mtd = NULL; ++ struct mtd_partition *parts = NULL; ++ struct resource *res = 0; ++ int nr_parts = 0; ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++ lq_w32(lq_r32(LQ_EBU_BUSCON0) & ~EBU_WRDIS, LQ_EBU_BUSCON0); ++#endif ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if(!res) ++ { ++ dev_err(&pdev->dev, "failed to get memory resource"); ++ return -ENOENT; ++ } ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if(!res) ++ { ++ dev_err(&pdev->dev, "failed to request mem resource"); ++ return -EBUSY; ++ } ++ ++ lq_map.phys = res->start; ++ lq_map.size = resource_size(res); ++ lq_map.virt = ioremap_nocache(lq_map.phys, lq_map.size); ++ ++ if (!lq_map.virt ) { ++ dev_err(&pdev->dev, "failed to ioremap!\n"); ++ return -EIO; ++ } ++ ++ lq_mtd = (struct mtd_info *) do_map_probe("cfi_probe", &lq_map); ++ if (!lq_mtd) { ++ iounmap(lq_map.virt); ++ dev_err(&pdev->dev, "probing failed\n"); ++ return -ENXIO; ++ } ++ ++ lq_mtd->owner = THIS_MODULE; ++ ++ nr_parts = parse_mtd_partitions(lq_mtd, part_probe_types, &parts, 0); ++ if (nr_parts > 0) { ++ dev_info(&pdev->dev, "using %d partitions from cmdline", nr_parts); ++ } else { ++ nr_parts = lq_mtd_data->nr_parts; ++ parts = lq_mtd_data->parts; ++ } ++ ++ add_mtd_partitions(lq_mtd, parts, nr_parts); ++ return 0; ++} ++ ++static struct platform_driver lq_mtd_driver = { ++ .probe = lq_mtd_probe, ++ .driver = { ++ .name = "lq_nor", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init ++init_lq_mtd(void) ++{ ++ int ret = platform_driver_register(&lq_mtd_driver); ++ if (ret) ++ printk(KERN_INFO "lq_nor: error registering platfom driver"); ++ return ret; ++} ++ ++module_init(init_lq_mtd); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); ++MODULE_DESCRIPTION("Lantiq SoC NOR"); +--- a/drivers/mtd/chips/cfi_cmdset_0001.c ++++ b/drivers/mtd/chips/cfi_cmdset_0001.c +@@ -41,7 +41,11 @@ + /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */ + + // debugging, turns off buffer write mode if set to 1 +-#define FORCE_WORD_WRITE 0 ++#ifdef CONFIG_LANTIQ ++# define FORCE_WORD_WRITE 1 ++#else ++# define FORCE_WORD_WRITE 0 ++#endif + + /* Intel chips */ + #define I82802AB 0x00ad +@@ -1491,6 +1495,9 @@ + int ret=0; + + adr += chip->start; ++#ifdef CONFIG_LANTIQ ++ adr ^= 2; ++#endif + + switch (mode) { + case FL_WRITING: +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -40,7 +40,11 @@ + #include <linux/mtd/xip.h> + + #define AMD_BOOTLOC_BUG +-#define FORCE_WORD_WRITE 0 ++#ifdef CONFIG_LANTIQ ++# define FORCE_WORD_WRITE 1 ++#else ++# define FORCE_WORD_WRITE 0 ++#endif + + #define MAX_WORD_RETRIES 3 + +@@ -1156,6 +1160,10 @@ + + adr += chip->start; + ++#ifdef CONFIG_LANTIQ ++ adr ^= 2; ++#endif ++ + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, adr, FL_WRITING); + if (ret) { diff --git a/target/linux/lantiq/patches/211-nor_split.patch b/target/linux/lantiq/patches/211-nor_split.patch new file mode 100644 index 0000000000..2f77681b8c --- /dev/null +++ b/target/linux/lantiq/patches/211-nor_split.patch @@ -0,0 +1,99 @@ +--- a/drivers/mtd/maps/lantiq.c ++++ b/drivers/mtd/maps/lantiq.c +@@ -24,6 +24,10 @@ + #include <lantiq.h> + #include <lantiq_platform.h> + ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++#include <xway.h> ++#endif ++ + static map_word + lq_read16(struct map_info *map, unsigned long adr) + { +@@ -77,6 +81,75 @@ lq_copy_to(struct map_info *map, unsigne + spin_unlock_irqrestore(&ebu_lock, flags); + } + ++static unsigned long ++find_uImage_size(struct map_info *map, unsigned long offset) ++{ ++#define UBOOT_MAGIC 0x56190527 ++ unsigned long magic; ++ unsigned long temp; ++ map->copy_from(map, &magic, offset, 4); ++ if (le32_to_cpu(magic) != UBOOT_MAGIC) ++ return 0; ++ map->copy_from(map, &temp, offset + 12, 4); ++ return temp + 0x40; ++} ++ ++static int ++detect_squashfs_partition(struct map_info *map, unsigned long offset) ++{ ++ unsigned long temp; ++ map->copy_from(map, &temp, offset, 4); ++ return le32_to_cpu(temp) == SQUASHFS_MAGIC; ++} ++ ++static struct mtd_partition split_partitions[] = { ++ { ++ .name = "kernel", ++ .offset = 0x0, ++ .size = 0x0, ++ }, { ++ .name = "rootfs", ++ .offset = 0x0, ++ .size = 0x0, ++ }, ++}; ++ ++static int ++mtd_split_linux(struct map_info *map, struct mtd_info *mtd, ++ struct mtd_partition *parts, int nr_parts) ++{ ++ int base_part = 0; ++ int i; ++ for (i = 0; i < nr_parts && !base_part; i++) { ++ if(!strcmp("linux", parts[i].name)) ++ base_part = i; ++ } ++ if (!base_part) ++ return 0; ++ split_partitions[0].size = find_uImage_size(map, parts[base_part].offset); ++ if (!split_partitions[0].size) { ++ printk(KERN_INFO "lq_nor: no uImage found in linux partition"); ++ return -1; ++ } ++ if (!detect_squashfs_partition(map, ++ parts[base_part].offset + split_partitions[0].size)) { ++ split_partitions[0].size &= ~(mtd->erasesize - 1); ++ split_partitions[0].size += mtd->erasesize; ++ } ++ split_partitions[0].offset = parts[base_part].offset; ++ split_partitions[1].offset = ++ parts[base_part].offset + split_partitions[0].size; ++ split_partitions[1].size = ++ parts[base_part].size - split_partitions[0].size; ++ ++ base_part++; ++ add_mtd_partitions(mtd, parts, base_part); ++ add_mtd_partitions(mtd, split_partitions, 2); ++ if(nr_parts != base_part) ++ add_mtd_partitions(mtd, &parts[base_part], nr_parts - base_part); ++ return nr_parts + 2; ++} ++ + static const char *part_probe_types[] = { "cmdlinepart", NULL }; + + static struct map_info lq_map = { +@@ -142,7 +215,8 @@ lq_mtd_probe(struct platform_device *pde + parts = lq_mtd_data->parts; + } + +- add_mtd_partitions(lq_mtd, parts, nr_parts); ++ if (!mtd_split_linux(&lq_map, lq_mtd, parts, nr_parts)) ++ add_mtd_partitions(lq_mtd, parts, nr_parts); + return 0; + } + diff --git a/target/linux/lantiq/patches/230-xway_etop.patch b/target/linux/lantiq/patches/230-xway_etop.patch new file mode 100644 index 0000000000..28955cf9ad --- /dev/null +++ b/target/linux/lantiq/patches/230-xway_etop.patch @@ -0,0 +1,580 @@ +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -343,6 +343,12 @@ config MACB + + source "drivers/net/arm/Kconfig" + ++config LANTIQ_ETOP ++ tristate "Lantiq SoC ETOP driver" ++ depends on SOC_LANTIQ_XWAY ++ help ++ Support for the MII0 inside the Lantiq SoC ++ + config AX88796 + tristate "ASIX AX88796 NE2000 clone support" + depends on ARM || MIPS || SUPERH +--- a/drivers/net/Makefile ++++ b/drivers/net/Makefile +@@ -204,6 +204,7 @@ obj-$(CONFIG_SNI_82596) += sni_82596.o + obj-$(CONFIG_MVME16x_NET) += 82596.o + obj-$(CONFIG_BVME6000_NET) += 82596.o + obj-$(CONFIG_SC92031) += sc92031.o ++obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o + + # This is also a 82596 and should probably be merged + obj-$(CONFIG_LP486E) += lp486e.o +--- /dev/null ++++ b/drivers/net/lantiq_etop.c +@@ -0,0 +1,552 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com> ++ * Copyright (C) 2008 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/slab.h> ++#include <linux/errno.h> ++#include <linux/types.h> ++#include <linux/interrupt.h> ++#include <linux/uaccess.h> ++#include <linux/in.h> ++#include <linux/netdevice.h> ++#include <linux/etherdevice.h> ++#include <linux/phy.h> ++#include <linux/ip.h> ++#include <linux/tcp.h> ++#include <linux/skbuff.h> ++#include <linux/mm.h> ++#include <linux/platform_device.h> ++#include <linux/ethtool.h> ++#include <linux/init.h> ++#include <linux/delay.h> ++ ++#include <asm/checksum.h> ++ ++#include <xway.h> ++#include <xway_dma.h> ++#include <lantiq_platform.h> ++ ++#define ETHERNET_PACKET_DMA_BUFFER_SIZE 0x600 ++#define LQ_PPE32_MEM_MAP ((u32 *)(LQ_PPE32_BASE_ADDR + 0x10000)) ++#define LQ_PPE32_SRST ((u32 *)(LQ_PPE32_BASE_ADDR + 0x10080)) ++ ++/* mdio access */ ++#define LQ_PPE32_MDIO_CFG ((u32 *)(LQ_PPE32_BASE_ADDR + 0x11800)) ++#define LQ_PPE32_MDIO_ACC ((u32 *)(LQ_PPE32_BASE_ADDR + 0x11804)) ++ ++#define MDIO_ACC_REQUEST 0x80000000 ++#define MDIO_ACC_READ 0x40000000 ++#define MDIO_ACC_ADDR_MASK 0x1f ++#define MDIO_ACC_ADDR_OFFSET 0x15 ++#define MDIO_ACC_REG_MASK 0x1f ++#define MDIO_ACC_REG_OFFSET 0x10 ++#define MDIO_ACC_VAL_MASK 0xffff ++ ++/* configuration */ ++#define LQ_PPE32_CFG ((u32 *)(LQ_PPE32_MEM_MAP + 0x1808)) ++ ++#define PPE32_MII_MASK 0xfffffffc ++#define PPE32_MII_NORMAL 0x8 ++#define PPE32_MII_REVERSE 0xe ++ ++/* packet length */ ++#define LQ_PPE32_IG_PLEN_CTRL ((u32 *)(LQ_PPE32_MEM_MAP + 0x1820)) ++ ++#define PPE32_PLEN_OVER 0x5ee ++#define PPE32_PLEN_UNDER 0x400000 ++ ++/* enet */ ++#define LQ_PPE32_ENET_MAC_CFG ((u32 *)(LQ_PPE32_MEM_MAP + 0x1840)) ++ ++#define PPE32_CGEN 0x800 ++ ++struct lq_mii_priv { ++ struct net_device_stats stats; ++ struct dma_device_info *dma_device; ++ struct sk_buff *skb; ++ ++ struct mii_bus *mii_bus; ++ struct phy_device *phydev; ++ int oldlink, oldspeed, oldduplex; ++}; ++ ++static struct net_device *lq_etop_dev; ++static unsigned char mac_addr[MAX_ADDR_LEN]; ++ ++static int lq_mdiobus_write(struct mii_bus *bus, int phy_addr, ++ int phy_reg, u16 phy_data) ++{ ++ u32 val = MDIO_ACC_REQUEST | ++ ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) | ++ ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET) | ++ phy_data; ++ ++ while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST) ++ ; ++ lq_w32(val, LQ_PPE32_MDIO_ACC); ++ ++ return 0; ++} ++ ++static int lq_mdiobus_read(struct mii_bus *bus, int phy_addr, int phy_reg) ++{ ++ u32 val = MDIO_ACC_REQUEST | MDIO_ACC_READ | ++ ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) | ++ ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET); ++ ++ while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST) ++ ; ++ lq_w32(val, LQ_PPE32_MDIO_ACC); ++ while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST) ++ ; ++ val = lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_VAL_MASK; ++ return val; ++} ++ ++int lq_mii_open(struct net_device *dev) ++{ ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev); ++ struct dma_device_info *dma_dev = priv->dma_device; ++ int i; ++ ++ for (i = 0; i < dma_dev->max_rx_chan_num; i++) { ++ if ((dma_dev->rx_chan[i])->control == LQ_DMA_CH_ON) ++ (dma_dev->rx_chan[i])->open(dma_dev->rx_chan[i]); ++ } ++ netif_start_queue(dev); ++ return 0; ++} ++ ++int lq_mii_release(struct net_device *dev) ++{ ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev); ++ struct dma_device_info *dma_dev = priv->dma_device; ++ int i; ++ ++ for (i = 0; i < dma_dev->max_rx_chan_num; i++) ++ dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]); ++ netif_stop_queue(dev); ++ return 0; ++} ++ ++int lq_mii_hw_receive(struct net_device *dev, struct dma_device_info *dma_dev) ++{ ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev); ++ unsigned char *buf = NULL; ++ struct sk_buff *skb = NULL; ++ int len = 0; ++ ++ len = dma_device_read(dma_dev, &buf, (void **)&skb); ++ ++ if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE) { ++ printk(KERN_INFO "lq_etop: packet too large %d\n", len); ++ goto lq_mii_hw_receive_err_exit; ++ } ++ ++ /* remove CRC */ ++ len -= 4; ++ if (skb == NULL) { ++ printk(KERN_INFO "lq_etop: cannot restore pointer\n"); ++ goto lq_mii_hw_receive_err_exit; ++ } ++ ++ if (len > (skb->end - skb->tail)) { ++ printk(KERN_INFO "lq_etop: BUG, len:%d end:%p tail:%p\n", ++ (len+4), skb->end, skb->tail); ++ goto lq_mii_hw_receive_err_exit; ++ } ++ ++ skb_put(skb, len); ++ skb->dev = dev; ++ skb->protocol = eth_type_trans(skb, dev); ++ netif_rx(skb); ++ ++ priv->stats.rx_packets++; ++ priv->stats.rx_bytes += len; ++ return 0; ++ ++lq_mii_hw_receive_err_exit: ++ if (len == 0) { ++ if (skb) ++ dev_kfree_skb_any(skb); ++ priv->stats.rx_errors++; ++ priv->stats.rx_dropped++; ++ return -EIO; ++ } else { ++ return len; ++ } ++} ++ ++int lq_mii_hw_tx(char *buf, int len, struct net_device *dev) ++{ ++ int ret = 0; ++ struct lq_mii_priv *priv = netdev_priv(dev); ++ struct dma_device_info *dma_dev = priv->dma_device; ++ ret = dma_device_write(dma_dev, buf, len, priv->skb); ++ return ret; ++} ++ ++int lq_mii_tx(struct sk_buff *skb, struct net_device *dev) ++{ ++ int len; ++ char *data; ++ struct lq_mii_priv *priv = netdev_priv(dev); ++ struct dma_device_info *dma_dev = priv->dma_device; ++ ++ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; ++ data = skb->data; ++ priv->skb = skb; ++ dev->trans_start = jiffies; ++ /* TODO: we got more than 1 dma channel, ++ so we should do something intelligent here to select one */ ++ dma_dev->current_tx_chan = 0; ++ ++ wmb(); ++ ++ if (lq_mii_hw_tx(data, len, dev) != len) { ++ dev_kfree_skb_any(skb); ++ priv->stats.tx_errors++; ++ priv->stats.tx_dropped++; ++ } else { ++ priv->stats.tx_packets++; ++ priv->stats.tx_bytes += len; ++ } ++ ++ return 0; ++} ++ ++void lq_mii_tx_timeout(struct net_device *dev) ++{ ++ int i; ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev); ++ ++ priv->stats.tx_errors++; ++ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++) ++ priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]); ++ netif_wake_queue(dev); ++ return; ++} ++ ++int dma_intr_handler(struct dma_device_info *dma_dev, int status) ++{ ++ int i; ++ ++ switch (status) { ++ case RCV_INT: ++ lq_mii_hw_receive(lq_etop_dev, dma_dev); ++ break; ++ ++ case TX_BUF_FULL_INT: ++ printk(KERN_INFO "lq_etop: tx buffer full\n"); ++ netif_stop_queue(lq_etop_dev); ++ for (i = 0; i < dma_dev->max_tx_chan_num; i++) { ++ if ((dma_dev->tx_chan[i])->control == LQ_DMA_CH_ON) ++ dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]); ++ } ++ break; ++ ++ case TRANSMIT_CPT_INT: ++ for (i = 0; i < dma_dev->max_tx_chan_num; i++) ++ dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]); ++ ++ netif_wake_queue(lq_etop_dev); ++ break; ++ } ++ ++ return 0; ++} ++ ++unsigned char *lq_etop_dma_buffer_alloc(int len, int *byte_offset, void **opt) ++{ ++ unsigned char *buffer = NULL; ++ struct sk_buff *skb = NULL; ++ ++ skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE); ++ if (skb == NULL) ++ return NULL; ++ ++ buffer = (unsigned char *)(skb->data); ++ skb_reserve(skb, 2); ++ *(int *)opt = (int)skb; ++ *byte_offset = 2; ++ ++ return buffer; ++} ++ ++void lq_etop_dma_buffer_free(unsigned char *dataptr, void *opt) ++{ ++ struct sk_buff *skb = NULL; ++ ++ if (opt == NULL) { ++ kfree(dataptr); ++ } else { ++ skb = (struct sk_buff *)opt; ++ dev_kfree_skb_any(skb); ++ } ++} ++ ++static void ++lq_adjust_link(struct net_device *dev) ++{ ++ struct lq_mii_priv *priv = netdev_priv(dev); ++ struct phy_device *phydev = priv->phydev; ++ int new_state = 0; ++ ++ /* Did anything change? */ ++ if (priv->oldlink != phydev->link || ++ priv->oldduplex != phydev->duplex || ++ priv->oldspeed != phydev->speed) { ++ /* Yes, so update status and mark as changed */ ++ new_state = 1; ++ priv->oldduplex = phydev->duplex; ++ priv->oldspeed = phydev->speed; ++ priv->oldlink = phydev->link; ++ } ++ ++ /* If link status changed, show new status */ ++ if (new_state) ++ phy_print_status(phydev); ++} ++ ++static int mii_probe(struct net_device *dev) ++{ ++ struct lq_mii_priv *priv = netdev_priv(dev); ++ struct phy_device *phydev = NULL; ++ int phy_addr; ++ ++ priv->oldlink = 0; ++ priv->oldspeed = 0; ++ priv->oldduplex = -1; ++ ++ /* find the first (lowest address) PHY on the current MAC's MII bus */ ++ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { ++ if (priv->mii_bus->phy_map[phy_addr]) { ++ phydev = priv->mii_bus->phy_map[phy_addr]; ++ break; /* break out with first one found */ ++ } ++ } ++ ++ if (!phydev) { ++ printk (KERN_ERR "%s: no PHY found\n", dev->name); ++ return -ENODEV; ++ } ++ ++ /* now we are supposed to have a proper phydev, to attach to... */ ++ BUG_ON(!phydev); ++ BUG_ON(phydev->attached_dev); ++ ++ phydev = phy_connect(dev, dev_name(&phydev->dev), &lq_adjust_link, ++ 0, PHY_INTERFACE_MODE_MII); ++ ++ if (IS_ERR(phydev)) { ++ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); ++ return PTR_ERR(phydev); ++ } ++ ++ /* mask with MAC supported features */ ++ phydev->supported &= (SUPPORTED_10baseT_Half ++ | SUPPORTED_10baseT_Full ++ | SUPPORTED_100baseT_Half ++ | SUPPORTED_100baseT_Full ++ | SUPPORTED_Autoneg ++ /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ ++ | SUPPORTED_MII ++ | SUPPORTED_TP); ++ ++ phydev->advertising = phydev->supported; ++ ++ priv->phydev = phydev; ++ ++ printk(KERN_INFO "%s: attached PHY driver [%s] " ++ "(mii_bus:phy_addr=%s, irq=%d)\n", ++ dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); ++ ++ return 0; ++} ++ ++ ++static int lq_mii_dev_init(struct net_device *dev) ++{ ++ int i; ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev); ++ ether_setup(dev); ++ dev->watchdog_timeo = 10 * HZ; ++ dev->mtu = 1500; ++ memset(priv, 0, sizeof(struct lq_mii_priv)); ++ priv->dma_device = dma_device_reserve("PPE"); ++ if (!priv->dma_device) { ++ BUG(); ++ return -ENODEV; ++ } ++ priv->dma_device->buffer_alloc = &lq_etop_dma_buffer_alloc; ++ priv->dma_device->buffer_free = &lq_etop_dma_buffer_free; ++ priv->dma_device->intr_handler = &dma_intr_handler; ++ priv->dma_device->max_rx_chan_num = 4; ++ ++ for (i = 0; i < priv->dma_device->max_rx_chan_num; i++) { ++ priv->dma_device->rx_chan[i]->packet_size = ETHERNET_PACKET_DMA_BUFFER_SIZE; ++ priv->dma_device->rx_chan[i]->control = LQ_DMA_CH_ON; ++ } ++ ++ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++) ++ if (i == 0) ++ priv->dma_device->tx_chan[i]->control = LQ_DMA_CH_ON; ++ else ++ priv->dma_device->tx_chan[i]->control = LQ_DMA_CH_OFF; ++ ++ dma_device_register(priv->dma_device); ++ ++ printk(KERN_INFO "%s: using mac=", dev->name); ++ for (i = 0; i < 6; i++) { ++ dev->dev_addr[i] = mac_addr[i]; ++ printk("%02X%c", dev->dev_addr[i], (i == 5) ? ('\n') : (':')); ++ } ++ ++ priv->mii_bus = mdiobus_alloc(); ++ if (priv->mii_bus == NULL) ++ return -ENOMEM; ++ ++ priv->mii_bus->priv = dev; ++ priv->mii_bus->read = lq_mdiobus_read; ++ priv->mii_bus->write = lq_mdiobus_write; ++ priv->mii_bus->name = "lq_mii"; ++ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); ++ priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); ++ for(i = 0; i < PHY_MAX_ADDR; ++i) ++ priv->mii_bus->irq[i] = PHY_POLL; ++ ++ mdiobus_register(priv->mii_bus); ++ ++ return mii_probe(dev); ++} ++ ++static void lq_mii_chip_init(int mode) ++{ ++ lq_pmu_enable(PMU_DMA); ++ lq_pmu_enable(PMU_PPE); ++ ++ if (mode == REV_MII_MODE) ++ lq_w32_mask(PPE32_MII_MASK, PPE32_MII_REVERSE, LQ_PPE32_CFG); ++ else if (mode == MII_MODE) ++ lq_w32_mask(PPE32_MII_MASK, PPE32_MII_NORMAL, LQ_PPE32_CFG); ++ lq_w32(PPE32_PLEN_UNDER | PPE32_PLEN_OVER, LQ_PPE32_IG_PLEN_CTRL); ++ lq_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); ++ wmb(); ++} ++ ++static int lq_mii_eth_mac_addr(struct net_device *dev, void *p) ++{ ++ int retcode; ++ ++ retcode = eth_mac_addr(dev, p); ++ ++ if (retcode) ++ return retcode; ++ ++ // set rx_addr for unicast filter ++ lq_w32(((dev->dev_addr[0]<<24)|(dev->dev_addr[1]<<16)|(dev->dev_addr[2]<< 8)|dev->dev_addr[3]), (u32*)(LQ_PPE32_BASE_ADDR|(0x461b<<2))); ++ lq_w32(((dev->dev_addr[4]<<24)|(dev->dev_addr[5]<<16)), (u32*)(LQ_PPE32_BASE_ADDR|(0x461c<<2))); ++ ++ return 0; ++} ++ ++static void lq_mii_set_rx_mode (struct net_device *dev) ++{ ++ // rx_mode promisc: unset unicast filter ++ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) ++ lq_w32(lq_r32((u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))) & ~(1<<28), (u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))); ++ // enable unicast filter ++ else ++ lq_w32(lq_r32((u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))) | (1<<28), (u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))); ++} ++ ++static const struct net_device_ops lq_eth_netdev_ops = { ++ .ndo_init = lq_mii_dev_init, ++ .ndo_open = lq_mii_open, ++ .ndo_stop = lq_mii_release, ++ .ndo_start_xmit = lq_mii_tx, ++ .ndo_tx_timeout = lq_mii_tx_timeout, ++ .ndo_change_mtu = eth_change_mtu, ++ .ndo_set_mac_address = lq_mii_eth_mac_addr, ++ .ndo_validate_addr = eth_validate_addr, ++ .ndo_set_multicast_list = lq_mii_set_rx_mode, ++}; ++ ++static int ++lq_mii_probe(struct platform_device *dev) ++{ ++ int result = 0; ++ struct lq_eth_data *eth = (struct lq_eth_data*)dev->dev.platform_data; ++ lq_etop_dev = alloc_etherdev(sizeof(struct lq_mii_priv)); ++ lq_etop_dev->netdev_ops = &lq_eth_netdev_ops; ++ memcpy(mac_addr, eth->mac, 6); ++ strcpy(lq_etop_dev->name, "eth%d"); ++ lq_mii_chip_init(eth->mii_mode); ++ result = register_netdev(lq_etop_dev); ++ if (result) { ++ printk(KERN_INFO "lq_etop: error %i registering device \"%s\"\n", result, lq_etop_dev->name); ++ goto out; ++ } ++ ++ printk(KERN_INFO "lq_etop: driver loaded!\n"); ++ ++out: ++ return result; ++} ++ ++static int lq_mii_remove(struct platform_device *dev) ++{ ++ struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(lq_etop_dev); ++ ++ printk(KERN_INFO "lq_etop: lq_etop cleanup\n"); ++ ++ dma_device_unregister(priv->dma_device); ++ dma_device_release(priv->dma_device); ++ kfree(priv->dma_device); ++ unregister_netdev(lq_etop_dev); ++ return 0; ++} ++ ++static struct platform_driver lq_mii_driver = { ++ .probe = lq_mii_probe, ++ .remove = lq_mii_remove, ++ .driver = { ++ .name = "lq_etop", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init lq_mii_init(void) ++{ ++ int ret = platform_driver_register(&lq_mii_driver); ++ if (ret) ++ printk(KERN_INFO "lq_etop: Error registering platfom driver!"); ++ return ret; ++} ++ ++static void __exit lq_mii_cleanup(void) ++{ ++ platform_driver_unregister(&lq_mii_driver); ++} ++ ++module_init(lq_mii_init); ++module_exit(lq_mii_cleanup); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); ++MODULE_DESCRIPTION("ethernet driver for IFXMIPS boards"); diff --git a/target/linux/lantiq/patches/250-watchdog.patch b/target/linux/lantiq/patches/250-watchdog.patch new file mode 100644 index 0000000000..f86ff71868 --- /dev/null +++ b/target/linux/lantiq/patches/250-watchdog.patch @@ -0,0 +1,246 @@ +--- a/drivers/watchdog/Kconfig ++++ b/drivers/watchdog/Kconfig +@@ -840,6 +840,12 @@ config TXX9_WDT + help + Hardware driver for the built-in watchdog timer on TXx9 MIPS SoCs. + ++config LANTIQ_WDT ++ bool "Lantiq SoC watchdog" ++ depends on LANTIQ ++ help ++ Hardware driver for the Lantiq SoC Watchdog Timer. ++ + # PARISC Architecture + + # POWERPC Architecture +--- a/drivers/watchdog/Makefile ++++ b/drivers/watchdog/Makefile +@@ -112,6 +112,7 @@ obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt + obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o + obj-$(CONFIG_AR7_WDT) += ar7_wdt.o + obj-$(CONFIG_TXX9_WDT) += txx9wdt.o ++obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o + + # PARISC Architecture + +--- /dev/null ++++ b/drivers/watchdog/lantiq_wdt.c +@@ -0,0 +1,218 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ * Based on EP93xx wdt driver ++ */ ++ ++#include <linux/module.h> ++#include <linux/fs.h> ++#include <linux/miscdevice.h> ++#include <linux/miscdevice.h> ++#include <linux/watchdog.h> ++#include <linux/platform_device.h> ++#include <linux/uaccess.h> ++#include <linux/clk.h> ++ ++#include <lantiq.h> ++ ++#define LQ_WDT_PW1 0x00BE0000 ++#define LQ_WDT_PW2 0x00DC0000 ++ ++#define LQ_BIU_WDT_CR 0x3F0 ++#define LQ_BIU_WDT_SR 0x3F8 ++ ++#ifndef CONFIG_WATCHDOG_NOWAYOUT ++static int wdt_ok_to_close; ++#endif ++ ++static int wdt_timeout = 30; ++static __iomem void *wdt_membase = NULL; ++static unsigned long io_region_clk = 0; ++ ++static int ++lq_wdt_enable(unsigned int timeout) ++{ ++/* printk("%s:%s[%d] %08X\n", ++ __FILE__, __func__, __LINE__, ++ lq_r32(wdt_membase + LQ_BIU_WDT_SR)); ++ if(!lq_r32(wdt_membase + LQ_BIU_WDT_SR)) ++ { ++*/ lq_w32(LQ_WDT_PW1, wdt_membase + LQ_BIU_WDT_CR); ++ lq_w32(LQ_WDT_PW2 | ++ (0x3 << 26) | /* PWL */ ++ (0x3 << 24) | /* CLKDIV */ ++ (0x1 << 31) | /* enable */ ++ ((timeout * (io_region_clk / 0x40000)) + 0x1000), /* reload */ ++ wdt_membase + LQ_BIU_WDT_CR); ++// } ++ return 0; ++} ++ ++static void ++lq_wdt_disable(void) ++{ ++#ifndef CONFIG_WATCHDOG_NOWAYOUT ++ wdt_ok_to_close = 0; ++#endif ++ lq_w32(LQ_WDT_PW1, wdt_membase + LQ_BIU_WDT_CR); ++ lq_w32(LQ_WDT_PW2, wdt_membase+ LQ_BIU_WDT_CR); ++} ++ ++static ssize_t ++lq_wdt_write(struct file *file, const char __user *data, ++ size_t len, loff_t *ppos) ++{ ++ size_t i; ++ ++ if (!len) ++ return 0; ++ ++#ifndef CONFIG_WATCHDOG_NOWAYOUT ++ for (i = 0; i != len; i++) { ++ char c; ++ if (get_user(c, data + i)) ++ return -EFAULT; ++ if (c == 'V') ++ wdt_ok_to_close = 1; ++ } ++#endif ++ lq_wdt_enable(wdt_timeout); ++ return len; ++} ++ ++static struct watchdog_info ident = { ++ .options = WDIOF_MAGICCLOSE, ++ .identity = "lq_wdt", ++}; ++ ++static int ++lq_wdt_ioctl(struct inode *inode, struct file *file, ++ unsigned int cmd, unsigned long arg) ++{ ++ int ret = -ENOTTY; ++ ++ switch (cmd) { ++ case WDIOC_GETSUPPORT: ++ ret = copy_to_user((struct watchdog_info __user *)arg, &ident, ++ sizeof(ident)) ? -EFAULT : 0; ++ break; ++ ++ case WDIOC_GETTIMEOUT: ++ ret = put_user(wdt_timeout, (int __user *)arg); ++ break; ++ ++ case WDIOC_SETTIMEOUT: ++ ret = get_user(wdt_timeout, (int __user *)arg); ++ break; ++ ++ case WDIOC_KEEPALIVE: ++ lq_wdt_enable(wdt_timeout); ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int ++lq_wdt_open(struct inode *inode, struct file *file) ++{ ++ lq_wdt_enable(wdt_timeout); ++ return nonseekable_open(inode, file); ++} ++ ++static int ++lq_wdt_release(struct inode *inode, struct file *file) ++{ ++#ifndef CONFIG_WATCHDOG_NOWAYOUT ++ if (wdt_ok_to_close) ++ lq_wdt_disable(); ++ else ++#endif ++ printk(KERN_ERR "lq_wdt: watchdog closed without warning," ++ " rebooting system\n"); ++ return 0; ++} ++ ++static const struct file_operations lq_wdt_fops = { ++ .owner = THIS_MODULE, ++ .write = lq_wdt_write, ++ .ioctl = lq_wdt_ioctl, ++ .open = lq_wdt_open, ++ .release = lq_wdt_release, ++}; ++ ++static struct miscdevice lq_wdt_miscdev = { ++ .minor = WATCHDOG_MINOR, ++ .name = "watchdog", ++ .fops = &lq_wdt_fops, ++}; ++ ++static int ++lq_wdt_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ struct clk *clk; ++ int ret = 0; ++ if(!res) ++ return -ENOENT; ++ res = request_mem_region(res->start, resource_size(res), ++ dev_name(&pdev->dev)); ++ if(!res) ++ return -EBUSY; ++ wdt_membase = ioremap_nocache(res->start, resource_size(res)); ++ if(!wdt_membase) ++ { ++ ret = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ clk = clk_get(&pdev->dev, "io"); ++ io_region_clk = clk_get_rate(clk);; ++ ret = misc_register(&lq_wdt_miscdev); ++ if(!ret) ++ return 0; ++ ++ iounmap(wdt_membase); ++err_release_mem_region: ++ release_mem_region(res->start, resource_size(res)); ++ return ret; ++} ++ ++static int ++lq_wdt_remove(struct platform_device *dev) ++{ ++ lq_wdt_disable(); ++ misc_deregister(&lq_wdt_miscdev); ++ return 0; ++} ++ ++static struct platform_driver lq_wdt_driver = { ++ .probe = lq_wdt_probe, ++ .remove = lq_wdt_remove, ++ .driver = { ++ .name = "lq_wdt", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init ++init_lq_wdt(void) ++{ ++ return platform_driver_register(&lq_wdt_driver); ++} ++ ++static void __exit ++exit_lq_wdt(void) ++{ ++ platform_driver_unregister(&lq_wdt_driver); ++} ++ ++module_init(init_lq_wdt); ++module_exit(exit_lq_wdt); ++ ++MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); ++MODULE_DESCRIPTION("ifxmips Watchdog"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/target/linux/lantiq/patches/260-pci.patch b/target/linux/lantiq/patches/260-pci.patch new file mode 100644 index 0000000000..d086b25e8b --- /dev/null +++ b/target/linux/lantiq/patches/260-pci.patch @@ -0,0 +1,436 @@ +--- a/arch/mips/pci/Makefile ++++ b/arch/mips/pci/Makefile +@@ -55,6 +55,7 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capc + obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o + obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o + obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o ++obj-$(CONFIG_LANTIQ) += pci-lantiq.o ops-lantiq.o + + ifdef CONFIG_PCI_MSI + obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o +--- /dev/null ++++ b/arch/mips/pci/ops-lantiq.c +@@ -0,0 +1,127 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/types.h> ++#include <linux/pci.h> ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/delay.h> ++#include <linux/mm.h> ++#include <asm/addrspace.h> ++#include <linux/vmalloc.h> ++ ++#include <xway.h> ++ ++#define LQ_PCI_CFG_BUSNUM_SHF 16 ++#define LQ_PCI_CFG_DEVNUM_SHF 11 ++#define LQ_PCI_CFG_FUNNUM_SHF 8 ++ ++#define PCI_ACCESS_READ 0 ++#define PCI_ACCESS_WRITE 1 ++ ++extern u32 lq_pci_mapped_cfg; ++ ++static int ++lq_pci_config_access(unsigned char access_type, ++ struct pci_bus *bus, unsigned int devfn, unsigned int where, u32 *data) ++{ ++ unsigned long cfg_base; ++ unsigned long flags; ++ ++ u32 temp; ++ ++ /* we support slot from 0 to 15 */ ++ /* dev_fn 0&0x68 (AD29) is ifxmips itself */ ++ if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) ++ || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) ++ return 1; ++ ++ spin_lock_irqsave(&ebu_lock, flags); ++ ++ cfg_base = lq_pci_mapped_cfg; ++ cfg_base |= (bus->number << LQ_PCI_CFG_BUSNUM_SHF) | (devfn << ++ LQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); ++ ++ /* Perform access */ ++ if (access_type == PCI_ACCESS_WRITE) ++ { ++#ifdef CONFIG_SWAP_IO_SPACE ++ lq_w32(swab32(*data), ((u32*)cfg_base)); ++#else ++ lq_w32(*data, ((u32*)cfg_base)); ++#endif ++ } else { ++ *data = lq_r32(((u32*)(cfg_base))); ++#ifdef CONFIG_SWAP_IO_SPACE ++ *data = swab32(*data); ++#endif ++ } ++ wmb(); ++ ++ /* clean possible Master abort */ ++ cfg_base = (lq_pci_mapped_cfg | (0x0 << LQ_PCI_CFG_FUNNUM_SHF)) + 4; ++ temp = lq_r32(((u32*)(cfg_base))); ++#ifdef CONFIG_SWAP_IO_SPACE ++ temp = swab32 (temp); ++#endif ++ cfg_base = (lq_pci_mapped_cfg | (0x68 << LQ_PCI_CFG_FUNNUM_SHF)) + 4; ++ lq_w32(temp, ((u32*)cfg_base)); ++ ++ spin_unlock_irqrestore(&ebu_lock, flags); ++ ++ if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++lq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 * val) ++{ ++ u32 data = 0; ++ ++ if (lq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 1) ++ *val = (data >> ((where & 3) << 3)) & 0xff; ++ else if (size == 2) ++ *val = (data >> ((where & 3) << 3)) & 0xffff; ++ else ++ *val = data; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++int ++lq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ u32 data = 0; ++ ++ if (size == 4) ++ { ++ data = val; ++ } else { ++ if (lq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 1) ++ data = (data & ~(0xff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ else if (size == 2) ++ data = (data & ~(0xffff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ } ++ ++ if (lq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ return PCIBIOS_SUCCESSFUL; ++} +--- /dev/null ++++ b/arch/mips/pci/pci-lantiq.c +@@ -0,0 +1,293 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/types.h> ++#include <linux/pci.h> ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/delay.h> ++#include <linux/mm.h> ++#include <linux/vmalloc.h> ++#include <linux/platform_device.h> ++ ++#include <asm/gpio.h> ++#include <asm/addrspace.h> ++ ++#include <xway.h> ++#include <xway_irq.h> ++#include <lantiq_platform.h> ++ ++#define LQ_PCI_CFG_BASE 0x17000000 ++#define LQ_PCI_CFG_SIZE 0x00008000 ++#define LQ_PCI_MEM_BASE 0x18000000 ++#define LQ_PCI_MEM_SIZE 0x02000000 ++#define LQ_PCI_IO_BASE 0x1AE00000 ++#define LQ_PCI_IO_SIZE 0x00200000 ++ ++#define PCI_CR_FCI_ADDR_MAP0 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00C0)) ++#define PCI_CR_FCI_ADDR_MAP1 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00C4)) ++#define PCI_CR_FCI_ADDR_MAP2 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00C8)) ++#define PCI_CR_FCI_ADDR_MAP3 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00CC)) ++#define PCI_CR_FCI_ADDR_MAP4 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00D0)) ++#define PCI_CR_FCI_ADDR_MAP5 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00D4)) ++#define PCI_CR_FCI_ADDR_MAP6 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00D8)) ++#define PCI_CR_FCI_ADDR_MAP7 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00DC)) ++#define PCI_CR_CLK_CTRL ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0000)) ++#define PCI_CR_PCI_MOD ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0030)) ++#define PCI_CR_PC_ARB ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0080)) ++#define PCI_CR_FCI_ADDR_MAP11hg ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00E4)) ++#define PCI_CR_BAR11MASK ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0044)) ++#define PCI_CR_BAR12MASK ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0048)) ++#define PCI_CR_BAR13MASK ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x004C)) ++#define PCI_CS_BASE_ADDR1 ((u32 *)(PCI_CS_PR_BASE_ADDR + 0x0010)) ++#define PCI_CR_PCI_ADDR_MAP11 ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x0064)) ++#define PCI_CR_FCI_BURST_LENGTH ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x00E8)) ++#define PCI_CR_PCI_EOI ((u32 *)(PCI_CR_PR_BASE_ADDR + 0x002C)) ++ ++ ++#define PCI_CS_STS_CMD ((u32 *)(PCI_CS_PR_BASE_ADDR + 0x0004)) ++ ++#define PCI_MASTER0_REQ_MASK_2BITS 8 ++#define PCI_MASTER1_REQ_MASK_2BITS 10 ++#define PCI_MASTER2_REQ_MASK_2BITS 12 ++#define INTERNAL_ARB_ENABLE_BIT 0 ++ ++#define LQ_CGU_IFCCR ((u32 *)(LQ_CGU_BASE_ADDR + 0x0018)) ++#define LQ_CGU_PCICR ((u32 *)(LQ_CGU_BASE_ADDR + 0x0034)) ++ ++extern int lq_pci_read_config_dword(struct pci_bus *bus, ++ unsigned int devfn, int where, int size, u32 *val); ++extern int lq_pci_write_config_dword(struct pci_bus *bus, ++ unsigned int devfn, int where, int size, u32 val); ++ ++u32 lq_pci_mapped_cfg; ++ ++/* Since the PCI REQ pins can be reused for other functionality, make it possible ++ to exclude those from interpretation by the PCI controller */ ++static int lq_pci_req_mask = 0xf; ++ ++struct pci_ops lq_pci_ops = ++{ ++ .read = lq_pci_read_config_dword, ++ .write = lq_pci_write_config_dword ++}; ++ ++static struct resource pci_io_resource = ++{ ++ .name = "pci io space", ++ .start = LQ_PCI_IO_BASE, ++ .end = LQ_PCI_IO_BASE + LQ_PCI_IO_SIZE - 1, ++ .flags = IORESOURCE_IO ++}; ++ ++static struct resource pci_mem_resource = ++{ ++ .name = "pci memory space", ++ .start = LQ_PCI_MEM_BASE, ++ .end = LQ_PCI_MEM_BASE + LQ_PCI_MEM_SIZE - 1, ++ .flags = IORESOURCE_MEM ++}; ++ ++static struct pci_controller lq_pci_controller = ++{ ++ .pci_ops = &lq_pci_ops, ++ .mem_resource = &pci_mem_resource, ++ .mem_offset = 0x00000000UL, ++ .io_resource = &pci_io_resource, ++ .io_offset = 0x00000000UL, ++}; ++ ++int ++pcibios_plat_dev_init(struct pci_dev *dev) ++{ ++ u8 pin; ++ ++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); ++ switch(pin) ++ { ++ case 0: ++ break; ++ case 1: ++ //falling edge level triggered:0x4, low level:0xc, rising edge:0x2 ++ lq_w32(lq_r32(LQ_EBU_PCC_CON) | 0xc, LQ_EBU_PCC_CON); ++ lq_w32(lq_r32(LQ_EBU_PCC_IEN) | 0x10, LQ_EBU_PCC_IEN); ++ break; ++ case 2: ++ case 3: ++ case 4: ++ printk ("WARNING: interrupt pin %d not supported yet!\n", pin); ++ default: ++ printk ("WARNING: invalid interrupt pin %d\n", pin); ++ return 1; ++ } ++ return 0; ++} ++ ++static u32 ++lq_calc_bar11mask(void) ++{ ++ u32 mem, bar11mask; ++ ++ /* BAR11MASK value depends on available memory on system. */ ++ mem = num_physpages * PAGE_SIZE; ++ bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) -1)) -1)) | 8; ++ ++ return bar11mask; ++} ++ ++static void ++lq_pci_setup_clk(int external_clock) ++{ ++ /* set clock to 33Mhz */ ++ lq_w32(lq_r32(LQ_CGU_IFCCR) & ~0xf00000, LQ_CGU_IFCCR); ++ lq_w32(lq_r32(LQ_CGU_IFCCR) | 0x800000, LQ_CGU_IFCCR); ++ if (external_clock) ++ { ++ lq_w32(lq_r32(LQ_CGU_IFCCR) & ~(1 << 16), LQ_CGU_IFCCR); ++ lq_w32((1 << 30), LQ_CGU_PCICR); ++ } else { ++ lq_w32(lq_r32(LQ_CGU_IFCCR) | (1 << 16), LQ_CGU_IFCCR); ++ lq_w32((1 << 31) | (1 << 30), LQ_CGU_PCICR); ++ } ++} ++ ++static void ++lq_pci_setup_gpio(void) ++{ ++ /* PCI reset line is gpio driven */ ++ lq_gpio_request(21, 0, 0, 1, "pci-reset"); ++ ++ /* PCI_REQ line */ ++ lq_gpio_request(29, 1, 0, 0, "pci-req"); ++ ++ /* PCI_GNT line */ ++ lq_gpio_request(30, 1, 0, 1, "pci-gnt"); ++} ++ ++static int __init ++lq_pci_startup(void) ++{ ++ u32 temp_buffer; ++ ++ /* setup pci clock and gpis used by pci */ ++ lq_pci_setup_gpio(); ++ ++ /* enable auto-switching between PCI and EBU */ ++ lq_w32(0xa, PCI_CR_CLK_CTRL); ++ ++ /* busy, i.e. configuration is not done, PCI access has to be retried */ ++ lq_w32(lq_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); ++ wmb (); ++ /* BUS Master/IO/MEM access */ ++ lq_w32(lq_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); ++ ++ /* enable external 2 PCI masters */ ++ temp_buffer = lq_r32(PCI_CR_PC_ARB); ++ temp_buffer &= (~(lq_pci_req_mask << 16)); ++ /* enable internal arbiter */ ++ temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); ++ /* enable internal PCI master reqest */ ++ temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); ++ ++ /* enable EBU request */ ++ temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); ++ ++ /* enable all external masters request */ ++ temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); ++ lq_w32(temp_buffer, PCI_CR_PC_ARB); ++ wmb (); ++ ++ /* setup BAR memory regions */ ++ lq_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); ++ lq_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); ++ lq_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); ++ lq_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); ++ lq_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); ++ lq_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); ++ lq_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); ++ lq_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); ++ lq_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); ++ lq_w32(lq_calc_bar11mask(), PCI_CR_BAR11MASK); ++ lq_w32(0, PCI_CR_PCI_ADDR_MAP11); ++ lq_w32(0, PCI_CS_BASE_ADDR1); ++#ifdef CONFIG_SWAP_IO_SPACE ++ /* both TX and RX endian swap are enabled */ ++ lq_w32(lq_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); ++ wmb (); ++#endif ++ /*TODO: disable BAR2 & BAR3 - why was this in the origianl infineon code */ ++ lq_w32(lq_r32(PCI_CR_BAR12MASK) | 0x80000000, PCI_CR_BAR12MASK); ++ lq_w32(lq_r32(PCI_CR_BAR13MASK) | 0x80000000, PCI_CR_BAR13MASK); ++ /*use 8 dw burst length */ ++ lq_w32(0x303, PCI_CR_FCI_BURST_LENGTH); ++ lq_w32(lq_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); ++ wmb(); ++ ++ /* toggle reset pin */ ++ __gpio_set_value(21, 0); ++ wmb(); ++ mdelay(1); ++ __gpio_set_value(21, 1); ++ return 0; ++} ++ ++int __init ++pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin){ ++ switch(slot) ++ { ++ case 13: ++ /* IDSEL = AD29 --> USB Host Controller */ ++ return (INT_NUM_IM1_IRL0 + 17); ++ case 14: ++ /* IDSEL = AD30 --> mini PCI connector */ ++ return (INT_NUM_IM0_IRL0 + 22); ++ default: ++ printk("lq_pci: no IRQ found for slot %d, pin %d\n", slot, pin); ++ return 0; ++ } ++} ++ ++static int ++lq_pci_probe(struct platform_device *pdev) ++{ ++ struct lq_pci_data *lq_pci_data = (struct lq_pci_data*) pdev->dev.platform_data; ++ extern int pci_probe_only; ++ ++ pci_probe_only = 0; ++ lq_pci_req_mask = lq_pci_data->req_mask; ++ lq_pci_setup_clk(lq_pci_data->clock); ++ ++ lq_pci_startup(); ++ lq_pci_mapped_cfg = ++ (u32)ioremap_nocache(LQ_PCI_CFG_BASE, LQ_PCI_CFG_BASE); ++ lq_pci_controller.io_map_base = ++ (unsigned long)ioremap(LQ_PCI_IO_BASE, LQ_PCI_IO_SIZE - 1); ++ ++ register_pci_controller(&lq_pci_controller); ++ return 0; ++} ++ ++static struct platform_driver ++lq_pci_driver = { ++ .probe = lq_pci_probe, ++ .driver = { ++ .name = "lq_pci", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int __init ++pcibios_init(void) ++{ ++ int ret = platform_driver_register(&lq_pci_driver); ++ if(ret) ++ printk(KERN_INFO "lq_pci: Error registering platfom driver!"); ++ return ret; ++} ++ ++arch_initcall(pcibios_init); diff --git a/target/linux/lantiq/patches/270-crypto.patch b/target/linux/lantiq/patches/270-crypto.patch new file mode 100644 index 0000000000..e6ee88585c --- /dev/null +++ b/target/linux/lantiq/patches/270-crypto.patch @@ -0,0 +1,6209 @@ +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -243,4 +243,75 @@ + OMAP processors have SHA1/MD5 hw accelerator. Select this if you + want to use the OMAP module for SHA1/MD5 algorithms. + ++config CRYPTO_DEV_LANTIQ ++ bool "Support for Lantiq crypto engine" ++ select CRYPTO_ALGAPI ++ default y ++ help ++ Will support Lantiq crypto hardware ++ If you are unsure, say M. ++ ++menuconfig CRYPTO_DEV_LANTIQ_DES ++ bool "Lantiq crypto hardware for DES algorithm" ++ depends on CRYPTO_DEV_LANTIQ ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for DES/3DES algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_AES ++ bool "Lantiq crypto hardware for AES algorithm" ++ depends on CRYPTO_DEV_LANTIQ ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for AES algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_ARC4 ++ bool "Lantiq crypto hardware for ARC4 algorithm" ++ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9) ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for ARC4 algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_MD5 ++ bool "Lantiq crypto hardware for MD5 algorithm" ++ depends on CRYPTO_DEV_LANTIQ ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for MD5 algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_SHA1 ++ bool "Lantiq crypto hardware for SHA1 algorithm" ++ depends on CRYPTO_DEV_LANTIQ ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for SHA1 algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_SHA1_HMAC ++ bool "Lantiq crypto hardware for SHA1_HMAC algorithm" ++ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9) ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for SHA1_HMAC algorithm. ++ If unsure say N. ++ ++menuconfig CRYPTO_DEV_LANTIQ_MD5_HMAC ++ bool "Lantiq crypto hardware for MD5_HMAC algorithms" ++ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9) ++ select CRYPTO_BLKCIPHER ++ default y ++ help ++ Use crypto hardware for MD5_HMAC algorithm. ++ If unsure say N. ++ + endif # CRYPTO_HW +--- /dev/null ++++ b/drivers/crypto/lantiq/Makefile +@@ -0,0 +1,11 @@ ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_falcon.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_danube.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_ar9.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_DES) += des.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_AES) += aes.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) += arc4.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1) += sha1.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC) += sha1_hmac.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5) += md5.o ++obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC) += md5_hmac.o +--- /dev/null ++++ b/drivers/crypto/lantiq/aes.c +@@ -0,0 +1,1029 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file aes.c ++ \ingroup LQ_DEU ++ \brief AES Encryption Driver main file ++*/ ++ ++/** ++ \defgroup LQ_AES_FUNCTIONS LQ_AES_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq AES driver Functions ++*/ ++ ++#include <linux/version.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <linux/crypto.h> ++#include <linux/interrupt.h> ++#include <linux/delay.h> ++#include <asm/byteorder.h> ++#include <crypto/algapi.h> ++#include "deu.h" ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++# include "deu_dma.h" ++#endif ++ ++static spinlock_t cipher_lock; ++ ++/* Definition of constants */ ++ ++#define AES_MIN_KEY_SIZE 16 ++#define AES_MAX_KEY_SIZE 32 ++#define AES_BLOCK_SIZE 16 ++#define CTR_RFC3686_NONCE_SIZE 4 ++#define CTR_RFC3686_IV_SIZE 8 ++#define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE \ ++ + CTR_RFC3686_NONCE_SIZE) ++ ++struct aes_ctx { ++ int key_length; ++ u32 buf[AES_MAX_KEY_SIZE]; ++ u8 nonce[CTR_RFC3686_NONCE_SIZE]; ++}; ++ ++/** \fn int aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets the AES keys ++ * \param tfm linux crypto algo transform ++ * \param in_key input key ++ * \param key_len key lengths of 16, 24 and 32 bytes supported ++ * \return -EINVAL - bad key length, 0 - SUCCESS ++*/ ++static int aes_set_key(struct crypto_tfm *tfm, ++ const u8 *in_key, ++ unsigned int key_len) ++{ ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); ++ u32 *flags = &tfm->crt_flags; ++ ++ DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len); ++ ++ if (key_len != 16 && key_len != 24 && key_len != 32) { ++ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; ++ return -EINVAL; ++ } ++ ++ ctx->key_length = key_len; ++ memcpy((u8 *)(ctx->buf), in_key, key_len); ++ ++ return 0; ++} ++ ++#ifndef CONFIG_CRYPTO_DEV_DMA ++/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief main interface to AES hardware ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc, ctr ++ * ++*/ ++static void deu_aes(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ size_t nbytes, ++ int encdec, ++ int mode) ++#else ++ ++/** \fn void deu_aes_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief main interface to AES hardware ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc, ctr ++ * ++*/ ++static void deu_aes_core(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ size_t nbytes, ++ int encdec, ++ int mode) ++#endif ++ ++{ ++ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ ++ volatile struct deu_aes *aes = (volatile struct deu_aes *)AES_START; ++ struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg; ++ u32 *in_key = ctx->buf; ++ ulong flag; ++ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ ++ int key_len = ctx->key_length; ++ ++#ifndef CONFIG_CRYPTO_DEV_DMA ++ int i = 0; ++ int byte_cnt = nbytes; ++#else ++ volatile struct deu_dma *dma = (struct deu_dma *)LQ_DEU_DMA_CON; ++ struct dma_device_info *dma_device = lq_deu[0].dma_device; ++ /* struct deu_drv_priv *deu_priv = ++ * (struct deu_drv_priv *)dma_device->priv; */ ++ int wlen = 0; ++ u32 *outcopy = NULL; ++ u32 *dword_mem_aligned_in = NULL; ++ ++# ifdef CONFIG_CRYPTO_DEV_POLL_DMA ++ u32 timeout = 0; ++ u32 *out_dma = NULL; ++# endif ++#endif ++ ++ DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", ctx, mode, encdec); ++ ++ CRTCL_SECT_START; ++ ++ /* 128, 192 or 256 bit key length */ ++ aes->ctrl.K = key_len / 8 - 2; ++ if (key_len == 128 / 8) { ++ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0)); ++ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1)); ++ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2)); ++ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3)); ++ } ++ else if (key_len == 192 / 8) { ++ aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0)); ++ aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1)); ++ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2)); ++ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3)); ++ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4)); ++ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5)); ++ } ++ else if (key_len == 256 / 8) { ++ aes->K7R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0)); ++ aes->K6R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1)); ++ aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2)); ++ aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3)); ++ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4)); ++ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5)); ++ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 6)); ++ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 7)); ++ } ++ else { ++ CRTCL_SECT_END; ++ return; /* -EINVAL; */ ++ } ++ ++ /* let HW pre-process DEcryption key in any case (even if ++ ENcryption is used). Key Valid (KV) bit is then only ++ checked in decryption routine! */ ++ aes->ctrl.PNK = 1; ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++ while (aes->ctrl.BUS) { ++ /* this will not take long */ ++ } ++ AES_DMA_MISC_CONFIG(); ++#endif ++ ++ aes->ctrl.E_D = !encdec; /* encryption */ ++ aes->ctrl.O = mode; /* 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */ ++ aes->ctrl.SM = 1; /* start after writing input register */ ++ aes->ctrl.DAU = 0; /* Disable Automatic Update of init ++ vector */ ++ aes->ctrl.ARS = 1; /* Autostart Select - write to IHR */ ++ ++ /* aes->ctrl.F = 128; */ /* default; only for CFB and OFB modes; ++ change only for ++ customer-specific apps */ ++ if (mode > 0) { ++ aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *)iv_arg); ++ aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1)); ++ aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2)); ++ aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3)); ++ }; ++ ++#ifndef CONFIG_CRYPTO_DEV_DMA ++ i = 0; ++ while (byte_cnt >= 16) { ++ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 0)); ++ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 1)); ++ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 2)); ++ /* start crypto */ ++ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 3)); ++ ++ while (aes->ctrl.BUS) { ++ /* this will not take long */ ++ } ++ ++ *((volatile u32 *)out_arg + (i * 4) + 0) = aes->OD3R; ++ *((volatile u32 *)out_arg + (i * 4) + 1) = aes->OD2R; ++ *((volatile u32 *)out_arg + (i * 4) + 2) = aes->OD1R; ++ *((volatile u32 *)out_arg + (i * 4) + 3) = aes->OD0R; ++ ++ i++; ++ byte_cnt -= 16; ++ } ++#else /* dma */ ++ /* Prepare Rx buf length used in dma psuedo interrupt */ ++ /* deu_priv->deu_rx_buf = out_arg; */ ++ /* deu_priv->deu_rx_len = nbytes; */ ++ ++ /* memory alignment issue */ ++ dword_mem_aligned_in = (u32 *)DEU_DWORD_REORDERING(in_arg, ++ aes_buff_in, ++ BUFFER_IN, nbytes); ++ ++ dma->ctrl.ALGO = 1; /* AES */ ++ dma->ctrl.BS = 0; ++ aes->ctrl.DAU = 0; ++ dma->ctrl.EN = 1; ++ ++ while (aes->ctrl.BUS) { ++ /* wait for AES to be ready */ ++ }; ++ ++ wlen = dma_device_write(dma_device, (u8 *)dword_mem_aligned_in, ++ nbytes, NULL); ++ if (wlen != nbytes) { ++ dma->ctrl.EN = 0; ++ CRTCL_SECT_END; ++ printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n", ++ __FILE__, __func__, __LINE__); ++ return; /* -EINVAL; */ ++ } ++ ++ WAIT_AES_DMA_READY(); ++ ++# ifdef CONFIG_CRYPTO_DEV_POLL_DMA ++ outcopy = (u32 *)DEU_DWORD_REORDERING(out_arg, aes_buff_out, ++ BUFFER_OUT, nbytes); ++ ++ /* polling DMA rx channel */ ++ while ((dma_device_read(dma_device, (u8 **)&out_dma, NULL)) == 0) { ++ timeout++; ++ ++ if (timeout >= 333000) { ++ dma->ctrl.EN = 0; ++ CRTCL_SECT_END; ++ printk (KERN_ERR "[%s %s %d]: timeout!!\n", ++ __FILE__, __func__, __LINE__); ++ return; /* -EINVAL; */ ++ } ++ } ++ ++ WAIT_AES_DMA_READY(); ++ ++ AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes); ++ ++# else /* not working at the moment.. */ ++ CRTCL_SECT_END; ++ ++ /* sleep and wait for Rx finished */ ++ DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT, ++ deu_priv->deu_event_flags); ++ ++ CRTCL_SECT_START; ++# endif ++ ++#endif /* dma */ ++ ++ /* tc.chen : copy iv_arg back */ ++ if (mode > 0) { ++ *((u32 *)iv_arg) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg)); ++ *((u32 *)iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1)); ++ *((u32 *)iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2)); ++ *((u32 *)iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3)); ++ } ++ ++ CRTCL_SECT_END; ++} ++ ++/** \fn int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets RFC3686 key ++ * \param tfm linux crypto algo transform ++ * \param in_key input key ++ * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce ++ * \return 0 - SUCCESS ++ * -EINVAL - bad key length ++*/ ++static int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm, ++ const uint8_t *in_key, ++ unsigned int key_len) ++{ ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); ++ u32 *flags = &tfm->crt_flags; ++ ++ memcpy(ctx->nonce, in_key + (key_len - CTR_RFC3686_NONCE_SIZE), ++ CTR_RFC3686_NONCE_SIZE); ++ ++ key_len -= CTR_RFC3686_NONCE_SIZE; /* remove 4 bytes of nonce */ ++ ++ if (key_len != 16 && key_len != 24 && key_len != 32) { ++ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; ++ return -EINVAL; ++ } ++ ++ ctx->key_length = key_len; ++ ++ memcpy((u8 *)(ctx->buf), in_key, key_len); ++ ++ return 0; ++} ++ ++/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief main interface with DEU hardware in DMA mode ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc, ctr ++*/ ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++static void deu_aes(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ u32 nbytes, ++ int encdec, ++ int mode) ++{ ++ u32 remain = nbytes; ++ u32 inc; ++ ++ while (remain > 0) { ++ if (remain >= DEU_MAX_PACKET_SIZE) ++ inc = DEU_MAX_PACKET_SIZE; ++ else ++ inc = remain; ++ ++ remain -= inc; ++ ++ deu_aes_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec, ++ mode); ++ ++ out_arg += inc; ++ in_arg += inc; ++ } ++} ++#endif ++ ++/* definitions from linux/include/crypto.h: ++#define CRYPTO_TFM_MODE_ECB 0x00000001 ++#define CRYPTO_TFM_MODE_CBC 0x00000002 ++#define CRYPTO_TFM_MODE_CFB 0x00000004 ++#define CRYPTO_TFM_MODE_CTR 0x00000008 ++#define CRYPTO_TFM_MODE_OFB 0x00000010 ++but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */ ++ ++/** \fn void deu_aes_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets AES hardware to ECB mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_aes_ecb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_aes(ctx, dst, src, NULL, nbytes, encdec, 0); ++} ++ ++/** \fn void deu_aes_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets AES hardware to CBC mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_aes_cbc(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_aes(ctx, dst, src, iv, nbytes, encdec, 1); ++} ++ ++#if 0 ++/** \fn void deu_aes_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets AES hardware to OFB mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_aes_ofb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_aes(ctx, dst, src, iv, nbytes, encdec, 2); ++} ++ ++/** \fn void deu_aes_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets AES hardware to CFB mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_aes_cfb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_aes(ctx, dst, src, iv, nbytes, encdec, 3); ++} ++#endif ++ ++/** \fn void deu_aes_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief sets AES hardware to CTR mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_aes_ctr(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_aes(ctx, dst, src, iv, nbytes, encdec, 4); ++} ++ ++/** \fn void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief encrypt AES_BLOCK_SIZE of data ++ * \param tfm linux crypto algo transform ++ * \param out output bytestream ++ * \param in input bytestream ++*/ ++static void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++{ ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); ++ deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0); ++} ++ ++/** \fn void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief decrypt AES_BLOCK_SIZE of data ++ * \param tfm linux crypto algo transform ++ * \param out output bytestream ++ * \param in input bytestream ++*/ ++static void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++{ ++ struct aes_ctx *ctx = crypto_tfm_ctx(tfm); ++ deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0); ++} ++ ++/* ++ * \brief AES function mappings ++*/ ++static struct crypto_alg aes_alg = { ++ .cra_name = "aes", ++ .cra_driver_name = "lq_deu-aes", ++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER, ++ .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aes_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), ++ .cra_u = { ++ .cipher = { ++ .cia_min_keysize = AES_MIN_KEY_SIZE, ++ .cia_max_keysize = AES_MAX_KEY_SIZE, ++ .cia_setkey = aes_set_key, ++ .cia_encrypt = aes_encrypt, ++ .cia_decrypt = aes_decrypt, ++ } ++ } ++}; ++ ++/** \fn int ecb_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief ECB AES encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ecb_aes_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int ecb_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief ECB AES decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ecb_aes_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief AES function mappings ++*/ ++static struct crypto_alg ecb_aes_alg = { ++ .cra_name = "ecb(aes)", ++ .cra_driver_name = "lq_deu-ecb(aes)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aes_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .setkey = aes_set_key, ++ .encrypt = ecb_aes_encrypt, ++ .decrypt = ecb_aes_decrypt, ++ } ++ } ++}; ++ ++/** \fn int cbc_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief CBC AES encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int cbc_aes_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int cbc_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief CBC AES decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int cbc_aes_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief AES function mappings ++*/ ++static struct crypto_alg cbc_aes_alg = { ++ .cra_name = "cbc(aes)", ++ .cra_driver_name = "lq_deu-cbc(aes)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aes_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ .setkey = aes_set_key, ++ .encrypt = cbc_aes_encrypt, ++ .decrypt = cbc_aes_decrypt, ++ } ++ } ++}; ++ ++/** \fn int ctr_basic_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief Counter mode AES encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ctr_basic_aes_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int ctr_basic_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief Counter mode AES decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ctr_basic_aes_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief AES function mappings ++*/ ++static struct crypto_alg ctr_basic_aes_alg = { ++ .cra_name = "ctr(aes)", ++ .cra_driver_name = "lq_deu-ctr(aes)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aes_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ctr_basic_aes_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, ++ .setkey = aes_set_key, ++ .encrypt = ctr_basic_aes_encrypt, ++ .decrypt = ctr_basic_aes_decrypt, ++ } ++ } ++}; ++ ++/** \fn int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief Counter mode AES (rfc3686) encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ u8 rfc3686_iv[16]; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ /* set up counter block */ ++ memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); ++ memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, ++ CTR_RFC3686_IV_SIZE); ++ ++ /* initialize counter portion of counter block */ ++ *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = ++ cpu_to_be32(1); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief Counter mode AES (rfc3686) decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ u8 rfc3686_iv[16]; ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ /* set up counter block */ ++ memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); ++ memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, ++ CTR_RFC3686_IV_SIZE); ++ ++ /* initialize counter portion of counter block */ ++ *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = ++ cpu_to_be32(1); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % AES_BLOCK_SIZE); ++ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= AES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief AES function mappings ++*/ ++static struct crypto_alg ctr_rfc3686_aes_alg = { ++ .cra_name = "rfc3686(ctr(aes))", ++ .cra_driver_name = "lq_deu-ctr-rfc3686(aes)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aes_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ctr_rfc3686_aes_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = AES_MIN_KEY_SIZE, ++ .max_keysize = CTR_RFC3686_MAX_KEY_SIZE, ++ .ivsize = CTR_RFC3686_IV_SIZE, ++ .setkey = ctr_rfc3686_aes_set_key, ++ .encrypt = ctr_rfc3686_aes_encrypt, ++ .decrypt = ctr_rfc3686_aes_decrypt, ++ } ++ } ++}; ++ ++/** \fn int lq_deu_init_aes (void) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief function to initialize AES driver ++ * \return ret ++*/ ++int lq_deu_init_aes(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_alg(&aes_alg))) ++ goto aes_err; ++ ++ if ((ret = crypto_register_alg(&ecb_aes_alg))) ++ goto ecb_aes_err; ++ ++ if ((ret = crypto_register_alg(&cbc_aes_alg))) ++ goto cbc_aes_err; ++ ++ if ((ret = crypto_register_alg(&ctr_basic_aes_alg))) ++ goto ctr_basic_aes_err; ++ ++ if ((ret = crypto_register_alg(&ctr_rfc3686_aes_alg))) ++ goto ctr_rfc3686_aes_err; ++ ++ deu_aes_chip_init(); ++ ++ CRTCL_SECT_INIT; ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++ if (ALLOCATE_MEMORY(BUFFER_IN, AES_ALGO) < 0) { ++ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n", ++ __FILE__, __func__, __LINE__); ++ goto ctr_rfc3686_aes_err; ++ } ++ if (ALLOCATE_MEMORY(BUFFER_OUT, AES_ALGO) < 0) { ++ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n", ++ __FILE__, __func__, __LINE__); ++ goto ctr_rfc3686_aes_err; ++ } ++#endif ++ ++ printk(KERN_NOTICE "Lantiq DEU AES initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++ctr_rfc3686_aes_err: ++ crypto_unregister_alg(&ctr_rfc3686_aes_alg); ++ printk(KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n"); ++ return ret; ++ctr_basic_aes_err: ++ crypto_unregister_alg(&ctr_basic_aes_alg); ++ printk(KERN_ERR "Lantiq ctr_basic_aes initialization failed!\n"); ++ return ret; ++cbc_aes_err: ++ crypto_unregister_alg(&cbc_aes_alg); ++ printk(KERN_ERR "Lantiq cbc_aes initialization failed!\n"); ++ return ret; ++ecb_aes_err: ++ crypto_unregister_alg(&ecb_aes_alg); ++ printk(KERN_ERR "Lantiq aes initialization failed!\n"); ++ return ret; ++aes_err: ++ printk(KERN_ERR "Lantiq DEU AES initialization failed!\n"); ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_aes(void) ++ * \ingroup LQ_AES_FUNCTIONS ++ * \brief unregister aes driver ++*/ ++void lq_deu_fini_aes(void) ++{ ++ crypto_unregister_alg(&aes_alg); ++ crypto_unregister_alg(&ecb_aes_alg); ++ crypto_unregister_alg(&cbc_aes_alg); ++ crypto_unregister_alg(&ctr_basic_aes_alg); ++ crypto_unregister_alg(&ctr_rfc3686_aes_alg); ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++ FREE_MEMORY(aes_buff_in); ++ FREE_MEMORY(aes_buff_out); ++#endif ++} +--- /dev/null ++++ b/drivers/crypto/lantiq/arc4.c +@@ -0,0 +1,397 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file arc4.c ++ \ingroup LQ_DEU ++ \brief ARC4 encryption DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_ARC4_FUNCTIONS LQ_ARC4_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU driver functions ++*/ ++ ++#include <linux/version.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <linux/crypto.h> ++#include <crypto/algapi.h> ++#include <linux/interrupt.h> ++#include <asm/byteorder.h> ++#include <linux/delay.h> ++ ++#ifdef CONFIG_SOL_LANTIQ_XWAY ++ ++#include "deu.h" ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ ++static spinlock_t cipher_lock; ++ ++/* Preprocessor declerations */ ++#define ARC4_MIN_KEY_SIZE 1 ++/* #define ARC4_MAX_KEY_SIZE 256 */ ++#define ARC4_MAX_KEY_SIZE 16 ++#define ARC4_BLOCK_SIZE 1 ++ ++/* ++ * \brief arc4 private structure ++*/ ++struct arc4_ctx { ++ int key_length; ++ u8 buf[120]; ++}; ++ ++/** \fn static void deu_arc4(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief main interface to AES hardware ++ \param ctx_arg crypto algo context ++ \param out_arg output bytestream ++ \param in_arg input bytestream ++ \param iv_arg initialization vector ++ \param nbytes length of bytestream ++ \param encdec 1 for encrypt; 0 for decrypt ++ \param mode operation mode such as ebc, cbc, ctr ++*/ ++static void deu_arc4(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ u32 nbytes, ++ int encdec, ++ int mode) ++{ ++ volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START; ++ int i = 0; ++ ulong flag; ++ ++#if 1 /* need to handle nbytes not multiple of 16 */ ++ volatile u32 tmp_array32[4]; ++ volatile u8 *tmp_ptr8; ++ int remaining_bytes, j; ++#endif ++ ++ CRTCL_SECT_START; ++ ++ arc4->IDLEN = nbytes; ++ ++#if 1 ++ while (i < nbytes) { ++ arc4->ID3R = *((u32 *) in_arg + (i>>2) + 0); ++ arc4->ID2R = *((u32 *) in_arg + (i>>2) + 1); ++ arc4->ID1R = *((u32 *) in_arg + (i>>2) + 2); ++ arc4->ID0R = *((u32 *) in_arg + (i>>2) + 3); ++ ++ arc4->ctrl.GO = 1; ++ ++ while (arc4->ctrl.BUS) { ++ /* this will not take long */ } ++ ++#if 1 ++ /* need to handle nbytes not multiple of 16 */ ++ tmp_array32[0] = arc4->OD3R; ++ tmp_array32[1] = arc4->OD2R; ++ tmp_array32[2] = arc4->OD1R; ++ tmp_array32[3] = arc4->OD0R; ++ ++ remaining_bytes = nbytes - i; ++ if (remaining_bytes > 16) ++ remaining_bytes = 16; ++ ++ tmp_ptr8 = (u8 *)&tmp_array32[0]; ++ for (j = 0; j < remaining_bytes; j++) ++ *out_arg++ = *tmp_ptr8++; ++#else ++ *((u32 *) out_arg + (i>>2) + 0) = arc4->OD3R; ++ *((u32 *) out_arg + (i>>2) + 1) = arc4->OD2R; ++ *((u32 *) out_arg + (i>>2) + 2) = arc4->OD1R; ++ *((u32 *) out_arg + (i>>2) + 3) = arc4->OD0R; ++#endif ++ ++ i += 16; ++ } ++#else /* dma */ ++ ++#endif /* dma */ ++ ++ CRTCL_SECT_END; ++} ++ ++/** \fn arc4_chip_init(void) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief initialize arc4 hardware ++*/ ++static void arc4_chip_init(void) ++{ ++ /* do nothing */ ++} ++ ++/** \fn static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief sets ARC4 key ++ \param tfm linux crypto algo transform ++ \param in_key input key ++ \param key_len key lengths less than or equal to 16 bytes supported ++*/ ++static int arc4_set_key(struct crypto_tfm *tfm, ++ const u8 *inkey, ++ unsigned int key_len) ++{ ++ /* struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); */ ++ volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START; ++ ++ u32 *in_key = (u32 *)inkey; ++ ++ /* must program all bits at one go?!!! */ ++#if 1 ++ /* #ifndef CONFIG_CRYPTO_DEV_VR9_DMA */ ++ *LQ_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) ); ++ /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=0 */ ++ ++ arc4->K3R = *((u32 *) in_key + 0); ++ arc4->K2R = *((u32 *) in_key + 1); ++ arc4->K1R = *((u32 *) in_key + 2); ++ arc4->K0R = *((u32 *) in_key + 3); ++#else /* dma */ ++ *AMAZONS_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) | (1<<4) ); ++ /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=1 */ ++ ++ arc4->K3R = *((u32 *) in_key + 0); ++ arc4->K2R = *((u32 *) in_key + 1); ++ arc4->K1R = *((u32 *) in_key + 2); ++ arc4->K0R = *((u32 *) in_key + 3); ++ ++#if 0 ++ arc4->K3R = deu_endian_swap(*((u32 *) in_key + 0)); ++ arc4->K2R = deu_endian_swap(*((u32 *) in_key + 1)); ++ arc4->K1R = deu_endian_swap(*((u32 *) in_key + 2)); ++ arc4->K0R = deu_endian_swap(*((u32 *) in_key + 3)); ++#endif ++ ++#endif ++ ++#if 0 /* arc4 is a ugly state machine, KSAE can only be set once per session */ ++ ctx->key_length = key_len; ++ ++ memcpy((u8 *)(ctx->buf), in_key, key_len); ++#endif ++ ++ return 0; ++} ++ ++/** \fn static void deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief sets ARC4 hardware to ECB mode ++ \param ctx crypto algo context ++ \param dst output bytestream ++ \param src input bytestream ++ \param iv initialization vector ++ \param nbytes length of bytestream ++ \param encdec 1 for encrypt; 0 for decrypt ++ \param inplace not used ++*/ ++static void deu_arc4_ecb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ deu_arc4(ctx, dst, src, NULL, nbytes, encdec, 0); ++} ++ ++/** \fn static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief encrypt/decrypt ARC4_BLOCK_SIZE of data ++ \param tfm linux crypto algo transform ++ \param out output bytestream ++ \param in input bytestream ++*/ ++static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ++{ ++ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ deu_arc4(ctx, out, in, NULL, ARC4_BLOCK_SIZE, ++ CRYPTO_DIR_DECRYPT, CRYPTO_TFM_MODE_ECB); ++} ++ ++/* ++ * \brief ARC4 function mappings ++*/ ++static struct crypto_alg arc4_alg = { ++ .cra_name = "arc4", ++ .cra_driver_name = "lq_deu-arc4", ++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER, ++ .cra_blocksize = ARC4_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct arc4_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list), ++ .cra_u = { ++ .cipher = { ++ .cia_min_keysize = ARC4_MIN_KEY_SIZE, ++ .cia_max_keysize = ARC4_MAX_KEY_SIZE, ++ .cia_setkey = arc4_set_key, ++ .cia_encrypt = arc4_crypt, ++ .cia_decrypt = arc4_crypt, ++ } ++ } ++}; ++ ++/** \fn static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief ECB ARC4 encrypt using linux crypto blkcipher ++ \param desc blkcipher descriptor ++ \param dst output scatterlist ++ \param src input scatterlist ++ \param nbytes data size in bytes ++*/ ++static int ecb_arc4_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(1, "\n"); ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= ARC4_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief ECB ARC4 decrypt using linux crypto blkcipher ++ \param desc blkcipher descriptor ++ \param dst output scatterlist ++ \param src input scatterlist ++ \param nbytes data size in bytes ++*/ ++static int ecb_arc4_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(1, "\n"); ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= ARC4_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief ARC4 function mappings ++*/ ++static struct crypto_alg ecb_arc4_alg = { ++ .cra_name = "ecb(arc4)", ++ .cra_driver_name = "lq_deu-ecb(arc4)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = ARC4_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct arc4_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ecb_arc4_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = ARC4_MIN_KEY_SIZE, ++ .max_keysize = ARC4_MAX_KEY_SIZE, ++ .setkey = arc4_set_key, ++ .encrypt = ecb_arc4_encrypt, ++ .decrypt = ecb_arc4_decrypt, ++ } ++ } ++}; ++ ++/** \fn int lq_deu_init_arc4(void) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief initialize arc4 driver ++*/ ++int lq_deu_init_arc4(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_alg(&arc4_alg))) ++ goto arc4_err; ++ ++ if ((ret = crypto_register_alg(&ecb_arc4_alg))) ++ goto ecb_arc4_err; ++ ++ arc4_chip_init(); ++ ++ CRTCL_SECT_INIT; ++ ++ printk(KERN_NOTICE "Lantiq DEU ARC4 initialized %s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++arc4_err: ++ crypto_unregister_alg(&arc4_alg); ++ printk(KERN_ERR "Lantiq arc4 initialization failed!\n"); ++ return ret; ++ecb_arc4_err: ++ crypto_unregister_alg(&ecb_arc4_alg); ++ printk(KERN_ERR "Lantiq ecb_arc4 initialization failed!\n"); ++ ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_arc4(void) ++ \ingroup LQ_ARC4_FUNCTIONS ++ \brief unregister arc4 driver ++*/ ++void lq_deu_fini_arc4(void) ++{ ++ crypto_unregister_alg(&arc4_alg); ++ crypto_unregister_alg(&ecb_arc4_alg); ++} ++ ++#endif ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/des.c +@@ -0,0 +1,929 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver ++*/ ++ ++/** ++ \file des.c ++ \ingroup LQ_DEU ++ \brief DES encryption DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_DES_FUNCTIONS LQ_DES_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DES Encryption functions ++*/ ++ ++#include <linux/version.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <linux/crypto.h> ++#include <linux/interrupt.h> ++#include <linux/delay.h> ++#include <asm/byteorder.h> ++#include <crypto/algapi.h> ++ ++#ifdef CONFIG_SOL_LANTIQ_XWAY ++ ++#include "deu.h" ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++# include "deu_dma.h" ++#endif ++ ++static spinlock_t cipher_lock; ++ ++/* Preprocessor declarations */ ++#define DES_KEY_SIZE 8 ++#define DES_EXPKEY_WORDS 32 ++#define DES_BLOCK_SIZE 8 ++#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) ++#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) ++#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE ++ ++struct des_ctx { ++ int controlr_M; ++ int key_length; ++ u8 iv[DES_BLOCK_SIZE]; ++ u32 expkey[DES3_EDE_EXPKEY_WORDS]; ++}; ++ ++/** \fn int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets DES key ++ * \param tfm linux crypto algo transform ++ * \param key input key ++ * \param key_len key length ++*/ ++static int des_setkey(struct crypto_tfm *tfm, ++ const u8 *key, ++ unsigned int key_len) ++{ ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ DPRINTF(0, "ctx @%p, key_len %d %d\n", ctx, key_len); ++ ++ ctx->controlr_M = 0; /* des */ ++ ctx->key_length = key_len; ++ ++ memcpy((u8 *)(ctx->expkey), key, key_len); ++ ++ return 0; ++} ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief main interface to DES hardware ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc ++*/ ++ ++static void deu_des(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ u32 nbytes, ++ int encdec, ++ int mode) ++#else ++/** \fn void deu_des_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief main interface to DES hardware ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc ++*/ ++static void deu_des_core(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ u32 nbytes, ++ int encdec, ++ int mode) ++#endif ++{ ++ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START; ++ struct des_ctx *dctx = ctx_arg; ++ u32 *key = dctx->expkey; ++ ulong flag; ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ int i = 0; ++ int nblocks = 0; ++#else ++ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON; ++ struct dma_device_info *dma_device = lq_deu[0].dma_device; ++ /* struct deu_drv_priv *deu_priv = ++ * (struct deu_drv_priv *)dma_device->priv; */ ++ int wlen = 0; ++ u32 *outcopy = NULL; ++ u32 *dword_mem_aligned_in = NULL; ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA ++ u32 timeout = 0; ++ u32 *out_dma = NULL; ++#endif ++ ++#endif ++ ++ DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", dctx, mode, encdec); ++ ++ CRTCL_SECT_START; ++ ++ des->ctrl.E_D = !encdec; /* encryption */ ++ des->ctrl.O = mode; /* 0 ECB, 1 CBC, 2 OFB, 3 CFB, 4 CTR */ ++ des->ctrl.SM = 1; /* start after writing input register */ ++ des->ctrl.DAU = 0; /* Disable Automatic Update of init vect */ ++ des->ctrl.ARS = 1; /* Autostart Select - write to IHR */ ++ ++ des->ctrl.M = dctx->controlr_M; ++ /* write keys */ ++ if (dctx->controlr_M == 0) { ++ /* DES mode */ ++ des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0)); ++ des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1)); ++#ifdef CRYPTO_DEBUG ++ printk("key1: %x\n", (*((u32 *) key + 0))); ++ printk("key2: %x\n", (*((u32 *) key + 1))); ++#endif ++ } else { ++ /* 3DES mode (EDE-x) */ ++ switch (dctx->key_length) { ++ case 24: ++ des->K3HR = DEU_ENDIAN_SWAP(*((u32 *) key + 4)); ++ des->K3LR = DEU_ENDIAN_SWAP(*((u32 *) key + 5)); ++ /* no break; */ ++ case 16: ++ des->K2HR = DEU_ENDIAN_SWAP(*((u32 *) key + 2)); ++ des->K2LR = DEU_ENDIAN_SWAP(*((u32 *) key + 3)); ++ /* no break; */ ++ case 8: ++ des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0)); ++ des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1)); ++ break; ++ default: ++ CRTCL_SECT_END; ++ return; ++ } ++ } ++ ++ /* write init vector (not required for ECB mode) */ ++ if (mode > 0) { ++ des->IVHR = DEU_ENDIAN_SWAP(*(u32 *) iv_arg); ++ des->IVLR = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1)); ++ } ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ nblocks = nbytes / 4; ++ ++ for (i = 0; i < nblocks; i += 2) { ++ /* wait for busy bit to clear */ ++ ++ /*--- Workaround --------------------------------------------- ++ do a dummy read to the busy flag because it is not raised ++ early enough in CFB/OFB 3DES modes */ ++#ifdef CRYPTO_DEBUG ++ printk("ihr: %x\n", (*((u32 *) in_arg + i))); ++ printk("ilr: %x\n", (*((u32 *) in_arg + 1 + i))); ++#endif ++ des->IHR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + i)); ++ /* start crypto */ ++ des->ILR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + 1 + i)); ++ ++ while (des->ctrl.BUS) { ++ /* this will not take long */ ++ } ++ ++ *((u32 *) out_arg + 0 + i) = des->OHR; ++ *((u32 *) out_arg + 1 + i) = des->OLR; ++ ++#ifdef CRYPTO_DEBUG ++ printk("ohr: %x\n", (*((u32 *) out_arg + i))); ++ printk("olr: %x\n", (*((u32 *) out_arg + 1 + i))); ++#endif ++ } ++ ++#else /* dma mode */ ++ ++ /* Prepare Rx buf length used in dma psuedo interrupt */ ++ /* deu_priv->deu_rx_buf = out_arg; */ ++ /* deu_priv->deu_rx_len = nbytes; */ ++ ++ /* memory alignment issue */ ++ dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, des_buff_in, ++ BUFFER_IN, nbytes); ++ ++ dma->ctrl.ALGO = 0; /* DES */ ++ des->ctrl.DAU = 0; ++ dma->ctrl.BS = 0; ++ dma->ctrl.EN = 1; ++ ++ while (des->ctrl.BUS) { ++ /* wait for AES to be ready */ ++ }; ++ ++ wlen = dma_device_write(dma_device, (u8 *) dword_mem_aligned_in, nbytes, ++ NULL); ++ if (wlen != nbytes) { ++ dma->ctrl.EN = 0; ++ CRTCL_SECT_END; ++ printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n", ++ __FILE__, __func__, __LINE__); ++ return; /* -EINVAL; */ ++ } ++ ++ WAIT_DES_DMA_READY(); ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA ++ outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, des_buff_out, ++ BUFFER_OUT, nbytes); ++ ++ /* polling DMA rx channel */ ++ while ((dma_device_read(dma_device, (u8 **) &out_dma, NULL)) == 0) { ++ timeout++; ++ ++ if (timeout >= 333000) { ++ dma->ctrl.EN = 0; ++ CRTCL_SECT_END; ++ printk(KERN_ERR "[%s %s %d]: timeout!!\n", ++ __FILE__, __func__, __LINE__); ++ return; /* -EINVAL; */ ++ } ++ } ++ ++ WAIT_DES_DMA_READY(); ++ ++ DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes); ++#else ++ CRTCL_SECT_END; /* Sleep and wait for Rx finished */ ++ DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT, ++ deu_priv->deu_event_flags); ++ CRTCL_SECT_START; ++#endif ++ ++#endif /* dma mode */ ++ ++ if (mode > 0) { ++ *(u32 *) iv_arg = DEU_ENDIAN_SWAP(des->IVHR); ++ *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(des->IVLR); ++ }; ++ ++ CRTCL_SECT_END; ++} ++ ++/* definitions from linux/include/crypto.h: ++#define CRYPTO_TFM_MODE_ECB 0x00000001 ++#define CRYPTO_TFM_MODE_CBC 0x00000002 ++#define CRYPTO_TFM_MODE_CFB 0x00000004 ++#define CRYPTO_TFM_MODE_CTR 0x00000008 ++#define CRYPTO_TFM_MODE_OFB 0x00000010 ++but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */ ++ ++/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief main interface to DES hardware ++ * \param ctx_arg crypto algo context ++ * \param out_arg output bytestream ++ * \param in_arg input bytestream ++ * \param iv_arg initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param mode operation mode such as ebc, cbc ++*/ ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++static void deu_des(void *ctx_arg, ++ u8 *out_arg, ++ const u8 *in_arg, ++ u8 *iv_arg, ++ u32 nbytes, ++ int encdec, ++ int mode) ++{ ++ u32 remain = nbytes; ++ u32 inc; ++ ++ DPRINTF(0, "\n"); ++ ++ while (remain > 0) { ++ if (remain >= DEU_MAX_PACKET_SIZE) ++ inc = DEU_MAX_PACKET_SIZE; ++ else ++ inc = remain; ++ ++ remain -= inc; ++ ++ deu_des_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec, ++ mode); ++ ++ out_arg += inc; ++ in_arg += inc; ++ } ++} ++#endif ++ ++/** \fn void deu_des_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets DES hardware to ECB mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++ ++static void deu_des_ecb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, dst, src, NULL, nbytes, encdec, 0); ++} ++ ++/** \fn void deu_des_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets DES hardware to CBC mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_des_cbc(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, dst, src, iv, nbytes, encdec, 1); ++} ++ ++#if 0 ++/** \fn void deu_des_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets DES hardware to OFB mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_des_ofb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, dst, src, iv, nbytes, encdec, 2); ++} ++ ++/** \fn void deu_des_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ \ingroup LQ_DES_FUNCTIONS ++ \brief sets DES hardware to CFB mode ++ \param ctx crypto algo context ++ \param dst output bytestream ++ \param src input bytestream ++ \param iv initialization vector ++ \param nbytes length of bytestream ++ \param encdec 1 for encrypt; 0 for decrypt ++ \param inplace not used ++*/ ++static void deu_des_cfb(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, dst, src, iv, nbytes, encdec, 3); ++} ++ ++/** \fn void deu_des_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets DES hardware to CTR mode ++ * \param ctx crypto algo context ++ * \param dst output bytestream ++ * \param src input bytestream ++ * \param iv initialization vector ++ * \param nbytes length of bytestream ++ * \param encdec 1 for encrypt; 0 for decrypt ++ * \param inplace not used ++*/ ++static void deu_des_ctr(void *ctx, ++ uint8_t *dst, ++ const uint8_t *src, ++ uint8_t *iv, ++ size_t nbytes, ++ int encdec, ++ int inplace) ++{ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, dst, src, iv, nbytes, encdec, 4); ++} ++#endif ++ ++/** \fn void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief encrypt DES_BLOCK_SIZE of data ++ * \param tfm linux crypto algo transform ++ * \param out output bytestream ++ * \param in input bytestream ++*/ ++static void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++{ ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0); ++} ++ ++/** \fn void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief encrypt DES_BLOCK_SIZE of data ++ * \param tfm linux crypto algo transform ++ * \param out output bytestream ++ * \param in input bytestream ++*/ ++static void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in) ++{ ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ DPRINTF(0, "ctx @%p\n", ctx); ++ deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0); ++} ++ ++/* ++ * \brief RFC2451: ++ * ++ * For DES-EDE3, there is no known need to reject weak or ++ * complementation keys. Any weakness is obviated by the use of ++ * multiple keys. ++ * ++ * However, if the first two or last two independent 64-bit keys are ++ * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the ++ * same as DES. Implementers MUST reject keys that exhibit this ++ * property. ++ * ++ */ ++ ++/** \fn int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief sets 3DES key ++ * \param tfm linux crypto algo transform ++ * \param key input key ++ * \param keylen key length ++*/ ++static int des3_ede_setkey(struct crypto_tfm *tfm, ++ const u8 *key, ++ unsigned int key_len) ++{ ++ struct des_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len); ++ ++ ctx->controlr_M = key_len / 8 + 1; /* 3DES EDE1 / EDE2 / EDE3 Mode */ ++ ctx->key_length = key_len; ++ ++ memcpy((u8 *)(ctx->expkey), key, key_len); ++ ++ return 0; ++} ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg des_alg = { ++ .cra_name = "des", ++ .cra_driver_name = "lq_deu-des", ++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER, ++ .cra_blocksize = DES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, ++ .cra_list = LIST_HEAD_INIT(des_alg.cra_list), ++ .cra_u = { ++ .cipher = { ++ .cia_min_keysize = DES_KEY_SIZE, ++ .cia_max_keysize = DES_KEY_SIZE, ++ .cia_setkey = des_setkey, ++ .cia_encrypt = des_encrypt, ++ .cia_decrypt = des_decrypt ++ } ++ } ++}; ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg des3_ede_alg = { ++ .cra_name = "des3_ede", ++ .cra_driver_name = "lq_deu-des3_ede", ++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER, ++ .cra_blocksize = DES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_alignmask = 3, ++ .cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list), ++ .cra_u = { ++ .cipher = { ++ .cia_min_keysize = DES_KEY_SIZE, ++ .cia_max_keysize = DES_KEY_SIZE, ++ .cia_setkey = des3_ede_setkey, ++ .cia_encrypt = des_encrypt, ++ .cia_decrypt = des_decrypt ++ } ++ } ++}; ++ ++/** \fn int ecb_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief ECB DES encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++*/ ++static int ecb_des_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % DES_BLOCK_SIZE); ++ deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= DES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int ecb_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief ECB DES decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int ecb_des_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ nbytes -= (nbytes % DES_BLOCK_SIZE); ++ deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= DES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg ecb_des_alg = { ++ .cra_name = "ecb(des)", ++ .cra_driver_name = "lq_deu-ecb(des)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = DES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = DES_KEY_SIZE, ++ .max_keysize = DES_KEY_SIZE, ++ .setkey = des_setkey, ++ .encrypt = ecb_des_encrypt, ++ .decrypt = ecb_des_decrypt, ++ } ++ } ++}; ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg ecb_des3_ede_alg = { ++ .cra_name = "ecb(des3_ede)", ++ .cra_driver_name = "lq_deu-ecb(des3_ede)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(ecb_des3_ede_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = DES3_EDE_KEY_SIZE, ++ .max_keysize = DES3_EDE_KEY_SIZE, ++ .setkey = des3_ede_setkey, ++ .encrypt = ecb_des_encrypt, ++ .decrypt = ecb_des_decrypt, ++ } ++ } ++}; ++ ++/** \fn int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief CBC DES encrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int cbc_des_encrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ /* printk("iv = %08x\n", *(u32 *)iv); */ ++ nbytes -= (nbytes % DES_BLOCK_SIZE); ++ deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0); ++ nbytes &= DES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/** \fn int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief CBC DES decrypt using linux crypto blkcipher ++ * \param desc blkcipher descriptor ++ * \param dst output scatterlist ++ * \param src input scatterlist ++ * \param nbytes data size in bytes ++ * \return err ++*/ ++static int cbc_des_decrypt(struct blkcipher_desc *desc, ++ struct scatterlist *dst, ++ struct scatterlist *src, ++ unsigned int nbytes) ++{ ++ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ++ struct blkcipher_walk walk; ++ int err; ++ ++ DPRINTF(0, "ctx @%p\n", ctx); ++ ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ err = blkcipher_walk_virt(desc, &walk); ++ ++ while ((nbytes = walk.nbytes)) { ++ u8 *iv = walk.iv; ++ /* printk("iv = %08x\n", *(u32 *)iv); */ ++ nbytes -= (nbytes % DES_BLOCK_SIZE); ++ deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr, ++ iv, nbytes, CRYPTO_DIR_DECRYPT, 0); ++ nbytes &= DES_BLOCK_SIZE - 1; ++ err = blkcipher_walk_done(desc, &walk, nbytes); ++ } ++ ++ return err; ++} ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg cbc_des_alg = { ++ .cra_name = "cbc(des)", ++ .cra_driver_name = "lq_deu-cbc(des)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = DES_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = DES_KEY_SIZE, ++ .max_keysize = DES_KEY_SIZE, ++ .ivsize = DES_BLOCK_SIZE, ++ .setkey = des_setkey, ++ .encrypt = cbc_des_encrypt, ++ .decrypt = cbc_des_decrypt, ++ } ++ } ++}; ++ ++/* ++ * \brief DES function mappings ++*/ ++static struct crypto_alg cbc_des3_ede_alg = { ++ .cra_name = "cbc(des3_ede)", ++ .cra_driver_name = "lq_deu-cbc(des3_ede)", ++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, ++ .cra_blocksize = DES3_EDE_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct des_ctx), ++ .cra_type = &crypto_blkcipher_type, ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(cbc_des3_ede_alg.cra_list), ++ .cra_u = { ++ .blkcipher = { ++ .min_keysize = DES3_EDE_KEY_SIZE, ++ .max_keysize = DES3_EDE_KEY_SIZE, ++ .ivsize = DES_BLOCK_SIZE, ++ .setkey = des3_ede_setkey, ++ .encrypt = cbc_des_encrypt, ++ .decrypt = cbc_des_decrypt, ++ } ++ } ++}; ++ ++/** \fn int lq_deu_init_des(void) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief initialize des driver ++*/ ++int lq_deu_init_des(void) ++{ ++ int ret = 0; ++ ++ ret = crypto_register_alg(&des_alg); ++ if (ret < 0) ++ goto des_err; ++ ++ ret = crypto_register_alg(&ecb_des_alg); ++ if (ret < 0) ++ goto ecb_des_err; ++ ++ ret = crypto_register_alg(&cbc_des_alg); ++ if (ret < 0) ++ goto cbc_des_err; ++ ++ ret = crypto_register_alg(&des3_ede_alg); ++ if (ret < 0) ++ goto des3_ede_err; ++ ++ ret = crypto_register_alg(&ecb_des3_ede_alg); ++ if (ret < 0) ++ goto ecb_des3_ede_err; ++ ++ ret = crypto_register_alg(&cbc_des3_ede_alg); ++ if (ret < 0) ++ goto cbc_des3_ede_err; ++ ++ deu_des_chip_init(); ++ ++ CRTCL_SECT_INIT; ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ if (ALLOCATE_MEMORY(BUFFER_IN, DES_ALGO) < 0) { ++ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n", ++ __FILE__, __func__, __LINE__); ++ goto cbc_des3_ede_err; ++ } ++ if (ALLOCATE_MEMORY(BUFFER_OUT, DES_ALGO) < 0) { ++ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n", ++ __FILE__, __func__, __LINE__); ++ goto cbc_des3_ede_err; ++ } ++#endif ++ ++ printk(KERN_NOTICE "Lantiq DEU DES initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++des_err: ++ crypto_unregister_alg(&des_alg); ++ printk(KERN_ERR "Lantiq des initialization failed!\n"); ++ ++ return ret; ++ ++ecb_des_err: ++ crypto_unregister_alg(&ecb_des_alg); ++ printk(KERN_ERR "Lantiq ecb_des initialization failed!\n"); ++ ++ return ret; ++ ++cbc_des_err: ++ crypto_unregister_alg(&cbc_des_alg); ++ printk(KERN_ERR "Lantiq cbc_des initialization failed!\n"); ++ ++ return ret; ++ ++des3_ede_err: ++ crypto_unregister_alg(&des3_ede_alg); ++ printk(KERN_ERR "Lantiq des3_ede initialization failed!\n"); ++ ++ return ret; ++ ++ecb_des3_ede_err: ++ crypto_unregister_alg(&ecb_des3_ede_alg); ++ printk(KERN_ERR "Lantiq ecb_des3_ede initialization failed!\n"); ++ ++ return ret; ++ ++cbc_des3_ede_err: ++ crypto_unregister_alg(&cbc_des3_ede_alg); ++ printk(KERN_ERR "Lantiq cbc_des3_ede initialization failed!\n"); ++ ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_des(void) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief unregister des driver ++*/ ++void lq_deu_fini_des(void) ++{ ++ crypto_unregister_alg(&des_alg); ++ crypto_unregister_alg(&ecb_des_alg); ++ crypto_unregister_alg(&cbc_des_alg); ++ crypto_unregister_alg(&des3_ede_alg); ++ crypto_unregister_alg(&ecb_des3_ede_alg); ++ crypto_unregister_alg(&cbc_des3_ede_alg); ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ FREE_MEMORY(des_buff_in); ++ FREE_MEMORY(des_buff_out); ++#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA_DANUBE */ ++} ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/deu.c +@@ -0,0 +1,195 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu.c ++ \ingroup LQ_DEU ++ \brief main DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_DEU_FUNCTIONS LQ_DEU_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU functions ++*/ ++ ++#include <linux/version.h> ++#if defined(CONFIG_MODVERSIONS) ++#define MODVERSIONS ++#include <linux/modversions.h> ++#endif ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <linux/crypto.h> ++#include <linux/proc_fs.h> ++#include <linux/fs.h> /* Stuff about file systems that we need */ ++#include <asm/byteorder.h> ++ ++#if 0 ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++# include <lq_pmu.h> ++#endif ++#endif ++ ++#include "deu.h" ++ ++struct lq_crypto_priv lq_crypto_ops; ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++int disable_deudma = 0; ++#else ++int disable_deudma = 1; ++#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */ ++ ++#ifdef CRYPTO_DEBUG ++char deu_debug_level = 3; ++#endif ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_MODULE ++# define STATIC static ++#else ++# define STATIC ++#endif ++ ++/** \fn static int lq_deu_init(void) ++ * \ingroup LQ_DEU_FUNCTIONS ++ * \brief link all modules that have been selected in kernel config for Lantiq HW crypto support ++ * \return ret ++*/ ++int lq_deu_init(void) ++{ ++ int ret = -ENOSYS; ++ u32 config; ++ ++ printk(KERN_INFO "Lantiq crypto hardware driver version %s\n", ++ LQ_DEU_DRV_VERSION); ++ ++ config = deu_chip_init(); ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ deu_dma_init(); ++#endif ++ ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES) ++ if(config & LQ_DEU_ID_AES) { ++ if ((ret = lq_deu_init_aes())) { ++ printk(KERN_ERR "Lantiq AES initialization failed!\n"); ++ } ++ } else { ++ printk(KERN_ERR "Lantiq AES not supported!\n"); ++ } ++#endif ++ ++#ifdef CONFIG_SOL_LANTIQ_XWAY ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES) ++ if(config & LQ_DEU_ID_DES) { ++ if ((ret = lq_deu_init_des())) { ++ printk(KERN_ERR "Lantiq DES initialization failed!\n"); ++ } ++ } else { ++ printk(KERN_ERR "Lantiq DES not supported!\n"); ++ } ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA) ++ if ((ret = lq_deu_init_arc4())) { ++ printk(KERN_ERR "Lantiq ARC4 initialization failed!\n"); ++ } ++#endif ++#endif ++ ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1) ++ if(config & LQ_DEU_ID_HASH) { ++ if ((ret = lq_deu_init_sha1())) { ++ printk(KERN_ERR "Lantiq SHA1 initialization failed!\n"); ++ } ++ } else { ++ printk(KERN_ERR "Lantiq SHA1 not supported!\n"); ++ } ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5) ++ if(config & LQ_DEU_ID_HASH) { ++ if ((ret = lq_deu_init_md5())) { ++ printk(KERN_ERR "Lantiq MD5 initialization failed!\n"); ++ } ++ } else { ++ printk(KERN_ERR "Lantiq MD5 not supported!\n"); ++ } ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC) ++ if ((ret = lq_deu_init_sha1_hmac())) { ++ printk(KERN_ERR "Lantiq SHA1_HMAC initialization failed!\n"); ++ } ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC) ++ if ((ret = lq_deu_init_md5_hmac())) { ++ printk(KERN_ERR "Lantiq MD5_HMAC initialization failed!\n"); ++ } ++#endif ++ return ret; ++} ++ ++/** \fn static void lq_deu_fini(void) ++ * \ingroup LQ_DEU_FUNCTIONS ++ * \brief remove the loaded crypto algorithms ++*/ ++void lq_deu_exit(void) ++{ ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES) ++ lq_deu_fini_aes(); ++#endif ++#ifdef CONFIG_SOL_LANTIQ_XWAY ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES) ++ lq_deu_fini_des(); ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) \ ++ && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA) ++ lq_deu_fini_arc4(); ++#endif ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1) ++ lq_deu_fini_sha1(); ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5) ++ lq_deu_fini_md5(); ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC) ++ lq_deu_fini_sha1_hmac(); ++#endif ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC) ++ lq_deu_fini_md5_hmac(); ++#endif ++ ++ printk("DEU has exited successfully\n"); ++ ++#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA) ++ deu_dma_exit(); ++ printk("DMA has deregistered successfully\n"); ++#endif ++} ++ ++EXPORT_SYMBOL(lq_deu_init); ++EXPORT_SYMBOL(lq_deu_exit); +--- /dev/null ++++ b/drivers/crypto/lantiq/deu.h +@@ -0,0 +1,248 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu.h ++ \brief Main DEU driver header file ++*/ ++ ++/** ++ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU definitions ++*/ ++ ++ ++#ifndef DEU_H ++#define DEU_H ++ ++#undef CRYPTO_DEBUG ++ ++#define LQ_DEU_DRV_VERSION "1.0.1" ++ ++#if defined(CONFIG_LANTIQ_DANUBE) ++# include "deu_danube.h" ++#elif defined(CONFIG_LANTIQ_AR9) ++# include "deu_ar9.h" ++#elif defined(CONFIG_SOC_LANTIQ_FALCON) ++# include "deu_falcon.h" ++#else ++//# error "Unknown platform" ++# include "deu_danube.h" ++#endif ++ ++struct lq_crypto_priv { ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ u32 *des_buff_in; ++ u32 *des_buff_out; ++ u32 *aes_buff_in; ++ u32 *aes_buff_out; ++ ++ int (*dma_init)(void); ++ void (*dma_exit)(void); ++ u32 (*dma_align)(const u8 *, u32 *, int, int); ++ void (*aes_dma_memcpy)(u32 *, u32 *, u8 *, int); ++ void (*des_dma_memcpy)(u32 *, u32 *, u8 *, int); ++ int (*aes_dma_malloc)(int); ++ int (*des_dma_malloc)(int); ++ void (*dma_free)(u32 *); ++#endif ++ ++ u32 (*endian_swap)(u32); ++ u32 (*input_swap)(u32); ++ void (*aes_chip_init)(void); ++ void (*des_chip_init)(void); ++ u32 (*chip_init)(void); ++}; ++ ++extern struct lq_crypto_priv lq_crypto_ops; ++ ++#define LQ_DEU_ALIGNMENT 16 ++ ++#define PFX "lq_deu: " ++ ++#define LQ_DEU_CRA_PRIORITY 300 ++#define LQ_DEU_COMPOSITE_PRIORITY 400 ++ ++#define CRYPTO_DIR_ENCRYPT 1 ++#define CRYPTO_DIR_DECRYPT 0 ++ ++#define CRTCL_SECT_INIT spin_lock_init(&cipher_lock) ++#define CRTCL_SECT_START spin_lock_irqsave(&cipher_lock, flag) ++#define CRTCL_SECT_END spin_unlock_irqrestore(&cipher_lock, flag) ++ ++#define LQ_DEU_ID_REV 0x00001F ++#define LQ_DEU_ID_ID 0x00FF00 ++#define LQ_DEU_ID_DMA 0x010000 ++#define LQ_DEU_ID_HASH 0x020000 ++#define LQ_DEU_ID_AES 0x040000 ++#define LQ_DEU_ID_3DES 0x080000 ++#define LQ_DEU_ID_DES 0x100000 ++ ++extern int disable_deudma; ++ ++int lq_deu_init(void); ++void lq_deu_exit(void); ++ ++int lq_deu_init_des(void); ++int lq_deu_init_aes(void); ++int lq_deu_init_arc4(void); ++int lq_deu_init_sha1(void); ++int lq_deu_init_md5(void); ++int lq_deu_init_sha1_hmac(void); ++int lq_deu_init_md5_hmac(void); ++ ++void lq_deu_fini_des(void); ++void lq_deu_fini_aes(void); ++void lq_deu_fini_arc4(void); ++void lq_deu_fini_sha1(void); ++void lq_deu_fini_md5(void); ++void lq_deu_fini_sha1_hmac(void); ++void lq_deu_fini_md5_hmac(void); ++ ++/* board specific functions */ ++/* { */ ++static inline u32 deu_chip_init(void) ++{ ++ return lq_crypto_ops.chip_init(); ++} ++ ++static inline void deu_des_chip_init(void) ++{ ++ lq_crypto_ops.des_chip_init(); ++} ++ ++static inline void deu_aes_chip_init(void) ++{ ++ lq_crypto_ops.aes_chip_init(); ++} ++ ++static inline u32 deu_input_swap(u32 input) ++{ ++ return lq_crypto_ops.input_swap(input); ++} ++ ++static inline u32 deu_endian_swap(u32 input) ++{ ++ return lq_crypto_ops.endian_swap(input); ++} ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++static inline int deu_aes_dma_malloc(int value) ++{ ++ return lq_crypto_ops.aes_dma_malloc(value); ++} ++ ++static inline int deu_des_dma_malloc(int value) ++{ ++ return lq_crypto_ops.des_dma_malloc(value); ++} ++ ++static inline u32 *deu_dma_align(const u8 *arg, ++ u32 *buff_alloc, ++ int in_out, ++ int nbytes) ++{ ++ return lq_crypto_ops.dma_align(arg, buff_alloc, in_out, nbytes); ++} ++ ++static inline void deu_aes_dma_memcpy(u32 *outcopy, ++ u32 *out_dma, ++ u8 *out_arg, ++ int nbytes) ++{ ++ lq_crypto_ops.aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes); ++} ++ ++static inline void deu_des_dma_memcpy(u32 *outcopy, ++ u32 *out_dma, ++ u8 *out_arg, ++ int nbytes) ++{ ++ lq_crypto_ops.des_dma_memcpy(outcopy, out_dma, out_arg, nbytes); ++} ++ ++static inline void deu_dma_free(u32 *addr) ++{ ++ lq_crypto_ops.dma_free(addr); ++} ++ ++static inline int deu_dma_init(void) ++{ ++ lq_crypto_ops.dma_init(); ++} ++ ++static inline void deu_dma_exit(void) ++{ ++ lq_crypto_ops.dma_exit(); ++} ++#endif ++ ++/* } */ ++ ++#define DEU_WAKELIST_INIT(queue) \ ++ init_waitqueue_head(&queue) ++ ++#define DEU_WAIT_EVENT_TIMEOUT(queue, event, flags, timeout) \ ++ do { \ ++ wait_event_interruptible_timeout((queue), \ ++ test_bit((event), \ ++ &(flags)), (timeout)); \ ++ clear_bit((event), &(flags)); \ ++ }while (0) ++ ++ ++#define DEU_WAKEUP_EVENT(queue, event, flags) \ ++ do { \ ++ set_bit((event), &(flags)); \ ++ wake_up_interruptible(&(queue)); \ ++ }while (0) ++ ++#define DEU_WAIT_EVENT(queue, event, flags) \ ++ do { \ ++ wait_event_interruptible(queue, \ ++ test_bit((event), &(flags))); \ ++ clear_bit((event), &(flags)); \ ++ }while (0) ++ ++struct deu_drv_priv { ++ wait_queue_head_t deu_thread_wait; ++#define DEU_EVENT 1 ++ volatile long deu_event_flags; ++ u8 *deu_rx_buf; ++ u32 deu_rx_len; ++}; ++ ++#ifdef CRYPTO_DEBUG ++extern char deu_debug_level; ++# define DPRINTF(level, format, args...) \ ++ if (level < deu_debug_level) \ ++ printk(KERN_INFO "[%s %s %d]: " format, \ ++ __FILE__, __func__, __LINE__, ##args) ++#else ++# define DPRINTF(level, format, args...) do { } while(0) ++#endif ++ ++#endif /* DEU_H */ +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_ar9.c +@@ -0,0 +1,327 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <asm/io.h> /* dma_cache_inv */ ++#include <linux/platform_device.h> ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++ ++#include "deu.h" ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu_ar9.c ++ \brief Lantiq DEU board specific driver file for ar9 ++*/ ++ ++/** ++ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief board specific functions ++*/ ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++struct lq_deu_device lq_deu[1]; ++ ++static u8 *g_dma_page_ptr = NULL; ++static u8 *g_dma_block = NULL; ++static u8 *g_dma_block2 = NULL; ++ ++/** \fn int dma_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Initialize DMA for DEU usage. DMA specific registers are ++ * intialized here, including a pointer to the device, memory ++ * space for the device and DEU-DMA descriptors ++ * \return -1: fail, 0: SUCCESS ++*/ ++static int dma_init(void) ++{ ++ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON; ++ struct dma_device_info *dma_device = NULL; ++ int i = 0; ++ ++ struct dma_device_info *deu_dma_device_ptr; ++ ++ /* get one free page and share between g_dma_block and g_dma_block2 */ ++ printk("PAGE_SIZE = %ld\n", PAGE_SIZE); ++ /* need 16-byte alignment memory block */ ++ g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL); ++ /* need 16-byte alignment memory block */ ++ g_dma_block = g_dma_page_ptr; ++ /* need 16-byte alignment memory block */ ++ g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1)); ++ ++ /* deu_dma_priv_init(); */ ++ ++ deu_dma_device_ptr = dma_device_reserve("DEU"); ++ if (!deu_dma_device_ptr) { ++ printk("DEU: reserve DMA fail!\n"); ++ return -1; ++ } ++ lq_deu[0].dma_device = deu_dma_device_ptr; ++ ++ dma_device = deu_dma_device_ptr; ++ /* dma_device->priv = &deu_dma_priv; */ ++ dma_device->buffer_alloc = &deu_dma_buffer_alloc; ++ dma_device->buffer_free = &deu_dma_buffer_free; ++ dma_device->intr_handler = &deu_dma_intr_handler; ++ ++ dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3; ++ dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3; ++ dma_device->port_num = 1; ++ dma_device->tx_burst_len = 2; ++ dma_device->rx_burst_len = 2; ++ dma_device->max_rx_chan_num = 1; ++ dma_device->max_tx_chan_num = 1; ++ dma_device->port_packet_drop_enable = 0; ++ ++ for (i = 0; i < dma_device->max_rx_chan_num; i++) { ++ dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE; ++ dma_device->rx_chan[i]->desc_len = 1; ++ dma_device->rx_chan[i]->control = LQ_DMA_CH_ON; ++ dma_device->rx_chan[i]->byte_offset = 0; ++ dma_device->rx_chan[i]->chan_poll_enable = 1; ++ } ++ ++ for (i = 0; i < dma_device->max_tx_chan_num; i++) { ++ dma_device->tx_chan[i]->control = LQ_DMA_CH_ON; ++ dma_device->tx_chan[i]->desc_len = 1; ++ dma_device->tx_chan[i]->chan_poll_enable = 1; ++ } ++ ++ dma_device->current_tx_chan = 0; ++ dma_device->current_rx_chan = 0; ++ ++ i = dma_device_register(dma_device); ++ for (i = 0; i < dma_device->max_rx_chan_num; i++) { ++ (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]); ++ } ++ ++ dma->ctrl.BS = 0; ++ dma->ctrl.RXCLS = 0; ++ dma->ctrl.EN = 1; ++ ++ return 0; ++} ++ ++/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Not used for AR9 ++ * \param arg Pointer to the input / output memory address ++ * \param buffer_alloc A pointer to the buffer ++ * \param in_buff Input (if == 1) or Output (if == 0) buffer ++ * \param nbytes Number of bytes of data ++*/ ++static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes) ++{ ++ return (u32 *) arg; ++} ++ ++/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief copy the DMA data to the memory address space for AES ++ * \param outcopy Not used ++ * \param out_dma A pointer to the memory address that stores the DMA data ++ * \param out_arg The pointer to the memory address that needs to be copied to] ++ * \param nbytes Number of bytes of data ++*/ ++static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++{ ++ memcpy(out_arg, out_dma, nbytes); ++} ++ ++/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief copy the DMA data to the memory address space for DES ++ * \param outcopy Not used ++ * \param out_dma A pointer to the memory address that stores the DMA data ++ * \param out_arg The pointer to the memory address that needs to be copied to] ++ * \param nbytes Number of bytes of data ++*/ ++static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++{ ++ memcpy(out_arg, out_dma, nbytes); ++} ++ ++/** \fn dma_exit(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief unregister dma devices after exit ++*/ ++static void dma_exit(void) ++{ ++ if (g_dma_page_ptr) ++ free_page((u32) g_dma_page_ptr); ++ dma_device_release(lq_deu[0].dma_device); ++ dma_device_unregister(lq_deu[0].dma_device); ++} ++#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */ ++ ++/** \fn u32 endian_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Swap data given to the function ++ * \param input Data input to be swapped ++ * \return either the swapped data or the input data depending on whether it is in DMA mode or FPI mode ++*/ ++static u32 endian_swap(u32 input) ++{ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ u8 *ptr = (u8 *)&input; ++ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]); ++#else ++ return input; ++#endif ++} ++ ++/** \fn u32 input_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Not used ++ * \return input ++*/ ++static u32 input_swap(u32 input) ++{ ++ return input; ++} ++ ++/** \fn void aes_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize AES hardware ++*/ ++static void aes_chip_init(void) ++{ ++ volatile struct deu_aes *aes = (struct deu_aes *) AES_START; ++ ++ aes->ctrl.SM = 1; ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ aes->ctrl.ARS = 1; ++#else ++ aes->ctrl.NDC = 1; /* to write to ENDI */ ++ asm("sync"); ++ aes->ctrl.ENDI = 0; ++ asm("sync"); ++ aes->ctrl.ARS = 0; /* 0 for dma */ ++ asm("sync"); ++#endif ++} ++ ++/** \fn void des_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize DES hardware ++*/ ++static void des_chip_init(void) ++{ ++ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START; ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ /* start crypto engine with write to ILR */ ++ des->ctrl.SM = 1; ++ asm("sync"); ++ des->ctrl.ARS = 1; ++#else ++ des->ctrl.SM = 1; ++ des->ctrl.NDC = 1; ++ asm("sync"); ++ des->ctrl.ENDI = 0; ++ asm("sync"); ++ des->ctrl.ARS = 0; /* 0 for dma */ ++ ++#endif ++} ++ ++static u32 chip_init(void) ++{ ++ volatile struct deu_clk_ctrl *clc = (struct deu_clk_ctrl *) LQ_DEU_CLK; ++ ++#if 0 ++ lq_pmu_enable(1<<20); ++#endif ++ ++ clc->FSOE = 0; ++ clc->SBWE = 0; ++ clc->SPEN = 0; ++ clc->SBWE = 0; ++ clc->DISS = 0; ++ clc->DISR = 0; ++ ++ return *LQ_DEU_ID; ++} ++ ++static int lq_crypto_probe(struct platform_device *pdev) ++{ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ lq_crypto_ops.dma_init = dma_init; ++ lq_crypto_ops.dma_exit = dma_exit; ++ lq_crypto_ops.aes_dma_memcpy = aes_dma_memcpy; ++ lq_crypto_ops.des_dma_memcpy = des_dma_memcpy; ++ lq_crypto_ops.aes_dma_malloc = aes_dma_malloc; ++ lq_crypto_ops.des_dma_malloc = des_dma_malloc; ++ lq_crypto_ops.dma_align = dma_align; ++ lq_crypto_ops.dma_free = dma_free; ++#endif ++ ++ lq_crypto_ops.endian_swap = endian_swap; ++ lq_crypto_ops.input_swap = input_swap; ++ lq_crypto_ops.aes_chip_init = aes_chip_init; ++ lq_crypto_ops.des_chip_init = des_chip_init; ++ lq_crypto_ops.chip_init = chip_init; ++ ++ printk("lq_ar9_deu: driver loaded!\n"); ++ ++ lq_deu_init(); ++ ++ return 0; ++} ++ ++static int lq_crypto_remove(struct platform_device *pdev) ++{ ++ lq_deu_exit(); ++ ++ return 0; ++} ++ ++static struct platform_driver lq_crypto = { ++ .probe = lq_crypto_probe, ++ .remove = lq_crypto_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "lq_ar9_deu" ++ } ++}; ++ ++static int __init lq_crypto_init(void) ++{ ++ return platform_driver_register(&lq_crypto); ++} ++module_init(lq_crypto_init); ++ ++static void __exit lq_crypto_exit(void) ++{ ++ platform_driver_unregister(&lq_crypto); ++} ++module_exit(lq_crypto_exit); ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_ar9.h +@@ -0,0 +1,291 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief DEU driver module ++*/ ++ ++/** ++ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU definitions ++*/ ++ ++/** ++ \file deu_ar9.h ++ \brief DEU driver header file ++*/ ++ ++ ++#ifndef DEU_AR9_H ++#define DEU_AR9_H ++ ++#define LQ_DEU_BASE_ADDR (KSEG1 | 0x1E103100) ++#define LQ_DEU_CLK ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0000)) ++#define LQ_DEU_ID ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0008)) ++#define LQ_DES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0010)) ++#define LQ_AES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0050)) ++#define LQ_HASH_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x00B0)) ++#define LQ_ARC4_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0100)) ++ ++#define ARC4_START LQ_ARC4_CON ++#define DES_3DES_START LQ_DES_CON ++#define HASH_START LQ_HASH_CON ++#define AES_START LQ_AES_CON ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++# include "deu_dma.h" ++# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \ ++ deu_dma_align(ptr, buffer, in_out, bytes) ++# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define BUFFER_IN 1 ++# define BUFFER_OUT 0 ++# define AES_ALGO 1 ++# define DES_ALGO 0 ++# define ALLOCATE_MEMORY(val, type) 1 ++# define FREE_MEMORY(buff) ++extern struct lq_deu_device lq_deu[1]; ++#endif /* CONFIG_CRYPTO_DEV_DMA */ ++ ++/* SHA CONSTANTS */ ++#define HASH_CON_VALUE 0x0700002C ++ ++#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input) ++#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input) ++#define DELAY_PERIOD 10 ++#define FIND_DEU_CHIP_VERSION chip_version() ++ ++#define WAIT_AES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_aes *aes = \ ++ (volatile struct deu_aes *) AES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (aes->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define WAIT_DES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_des *des = \ ++ (struct deu_des *) DES_3DES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (des->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define AES_DMA_MISC_CONFIG() \ ++ do { \ ++ volatile struct deu_aes *aes = \ ++ (volatile struct deu_aes *) AES_START; \ ++ aes->ctrl.KRE = 1; \ ++ aes->ctrl.GO = 1; \ ++ } while(0) ++ ++#define SHA_HASH_INIT \ ++ do { \ ++ volatile struct deu_hash *hash = \ ++ (struct deu_hash *) HASH_START; \ ++ hash->ctrl.SM = 1; \ ++ hash->ctrl.ALGO = 0; \ ++ hash->ctrl.INIT = 1; \ ++ } while(0) ++ ++/* DEU Common Structures for AR9*/ ++ ++struct deu_clk_ctrl { ++ u32 Res:26; ++ u32 FSOE:1; ++ u32 SBWE:1; ++ u32 EDIS:1; ++ u32 SPEN:1; ++ u32 DISS:1; ++ u32 DISR:1; ++}; ++ ++struct deu_des { ++ struct deu_des_ctrl { /* 10h */ ++ u32 KRE:1; ++ u32 reserved1:5; ++ u32 GO:1; ++ u32 STP:1; ++ u32 Res2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 Res3:2; ++ u32 F:3; ++ u32 O:3; ++ u32 BUS:1; ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 M:3; ++ } ctrl; ++ ++ u32 IHR; /* 14h */ ++ u32 ILR; /* 18h */ ++ u32 K1HR; /* 1c */ ++ u32 K1LR; ++ u32 K2HR; ++ u32 K2LR; ++ u32 K3HR; ++ u32 K3LR; /* 30h */ ++ u32 IVHR; /* 34h */ ++ u32 IVLR; /* 38 */ ++ u32 OHR; /* 3c */ ++ u32 OLR; /* 40 */ ++}; ++ ++struct deu_aes { ++ struct deu_aes_ctrl { ++ u32 KRE:1; ++ u32 reserved1:4; ++ u32 PNK:1; ++ u32 GO:1; ++ u32 STP:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:2; ++ u32 F:3; /* fbs */ ++ u32 O:3; /* om */ ++ u32 BUS:1; /* bsy */ ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 KV:1; ++ u32 K:2; /* KL */ ++ } ctrl; ++ ++ u32 ID3R; /* 80h */ ++ u32 ID2R; /* 84h */ ++ u32 ID1R; /* 88h */ ++ u32 ID0R; /* 8Ch */ ++ u32 K7R; /* 90h */ ++ u32 K6R; /* 94h */ ++ u32 K5R; /* 98h */ ++ u32 K4R; /* 9Ch */ ++ u32 K3R; /* A0h */ ++ u32 K2R; /* A4h */ ++ u32 K1R; /* A8h */ ++ u32 K0R; /* ACh */ ++ u32 IV3R; /* B0h */ ++ u32 IV2R; /* B4h */ ++ u32 IV1R; /* B8h */ ++ u32 IV0R; /* BCh */ ++ u32 OD3R; /* D4h */ ++ u32 OD2R; /* D8h */ ++ u32 OD1R; /* DCh */ ++ u32 OD0R; /* E0h */ ++}; ++ ++struct deu_arc4 { ++ struct arc4_controlr { ++ u32 KRE:1; ++ u32 KLEN:4; ++ u32 KSAE:1; ++ u32 GO:1; ++ u32 STP:1; ++ u32 reserved1:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved2:8; ++ u32 BUS:1; /* bsy */ ++ u32 reserved3:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 reserved4:4; ++ } ctrl; ++ ++ u32 K3R; /* 104h */ ++ u32 K2R; /* 108h */ ++ u32 K1R; /* 10Ch */ ++ u32 K0R; /* 110h */ ++ u32 IDLEN; /* 114h */ ++ u32 ID3R; /* 118h */ ++ u32 ID2R; /* 11Ch */ ++ u32 ID1R; /* 120h */ ++ u32 ID0R; /* 124h */ ++ u32 OD3R; /* 128h */ ++ u32 OD2R; /* 12Ch */ ++ u32 OD1R; /* 130h */ ++ u32 OD0R; /* 134h */ ++}; ++ ++struct deu_hash { ++ struct deu_hash_ctrl { ++ u32 reserved1:5; ++ u32 KHS:1; ++ u32 GO:1; ++ u32 INIT:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:7; ++ u32 DGRY:1; ++ u32 BSY:1; ++ u32 reserved4:1; ++ u32 IRCL:1; ++ u32 SM:1; ++ u32 KYUE:1; ++ u32 HMEN:1; ++ u32 SSEN:1; ++ u32 ALGO:1; ++ } ctrl; ++ ++ u32 MR; /* B4h */ ++ u32 D1R; /* B8h */ ++ u32 D2R; /* BCh */ ++ u32 D3R; /* C0h */ ++ u32 D4R; /* C4h */ ++ u32 D5R; /* C8h */ ++ u32 dummy; /* CCh */ ++ u32 KIDX; /* D0h */ ++ u32 KEY; /* D4h */ ++ u32 DBN; /* D8h */ ++}; ++ ++struct deu_dma { ++ struct deu_dma_ctrl { ++ u32 reserved1:22; ++ u32 BS:2; ++ u32 BSY:1; ++ u32 reserved2:1; ++ u32 ALGO:2; ++ u32 RXCLS:2; ++ u32 reserved3:1; ++ u32 EN:1; ++ } ctrl; ++}; ++ ++#endif /* DEU_AR9_H */ +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_danube.c +@@ -0,0 +1,484 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <asm/io.h> /* dma_cache_inv */ ++#include <linux/platform_device.h> ++ ++#ifdef CONFIG_SOC_LANTIQ_XWAY ++ ++#include "deu.h" ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief DEU driver module ++*/ ++ ++/** ++ \file deu_danube.c ++ \ingroup LQ_DEU ++ \brief board specific DEU driver file for danube ++*/ ++ ++/** ++ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief board specific DEU functions ++*/ ++ ++static int danube_pre_1_4; ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++u32 *des_buff_in = NULL; ++u32 *des_buff_out = NULL; ++u32 *aes_buff_in = NULL; ++u32 *aes_buff_out = NULL; ++ ++struct lq_deu_device lq_deu[1]; ++ ++static u8 *g_dma_page_ptr = NULL; ++static u8 *g_dma_block = NULL; ++static u8 *g_dma_block2 = NULL; ++ ++/** \fn int dma_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Initialize DMA for DEU usage. DMA specific registers are ++ * intialized here, including a pointer to the device, memory ++ * space for the device and DEU-DMA descriptors ++ * \return -1 if fail, otherwise return 0 ++*/ ++static int dma_init(void) ++{ ++ struct dma_device_info *dma_device = NULL; ++ int i = 0; ++ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON; ++ struct dma_device_info *deu_dma_device_ptr; ++ ++ /* get one free page and share between g_dma_block and g_dma_block2 */ ++ printk("PAGE_SIZE = %ld\n", PAGE_SIZE); ++ /* need 16-byte alignment memory block */ ++ g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL); ++ /* need 16-byte alignment memory block */ ++ g_dma_block = g_dma_page_ptr; ++ /* need 16-byte alignment memory block */ ++ g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1)); ++ ++ deu_dma_device_ptr = dma_device_reserve("DEU"); ++ if (!deu_dma_device_ptr) { ++ printk("DEU: reserve DMA fail!\n"); ++ return -1; ++ } ++ lq_deu[0].dma_device = deu_dma_device_ptr; ++ dma_device = deu_dma_device_ptr; ++ /* dma_device->priv = &deu_dma_priv; */ ++ dma_device->buffer_alloc = &deu_dma_buffer_alloc; ++ dma_device->buffer_free = &deu_dma_buffer_free; ++ dma_device->intr_handler = &deu_dma_intr_handler; ++ dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3; ++ dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3; ++ dma_device->port_num = 1; ++ dma_device->tx_burst_len = 4; ++ dma_device->max_rx_chan_num = 1; ++ dma_device->max_tx_chan_num = 1; ++ dma_device->port_packet_drop_enable = 0; ++ ++ for (i = 0; i < dma_device->max_rx_chan_num; i++) { ++ dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE; ++ dma_device->rx_chan[i]->desc_len = 1; ++ dma_device->rx_chan[i]->control = LQ_DMA_CH_ON; ++ dma_device->rx_chan[i]->byte_offset = 0; ++ dma_device->rx_chan[i]->chan_poll_enable = 1; ++ ++ } ++ ++ for (i = 0; i < dma_device->max_tx_chan_num; i++) { ++ dma_device->tx_chan[i]->control = LQ_DMA_CH_ON; ++ dma_device->tx_chan[i]->desc_len = 1; ++ dma_device->tx_chan[i]->chan_poll_enable = 1; ++ } ++ ++ dma_device->current_tx_chan = 0; ++ dma_device->current_rx_chan = 0; ++ ++ dma_device_register(dma_device); ++ for (i = 0; i < dma_device->max_rx_chan_num; i++) { ++ (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]); ++ } ++ ++ dma->ctrl.BS = 0; ++ dma->ctrl.RXCLS = 0; ++ dma->ctrl.EN = 1; ++ ++ ++ *LQ_DMA_PS = 1; ++ ++ /* DANUBE PRE 1.4 SOFTWARE FIX */ ++ if (danube_pre_1_4) ++ *LQ_DMA_PCTRL = 0x14; ++ else ++ *LQ_DMA_PCTRL = 0xF14; ++ ++ return 0; ++} ++ ++/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief A fix to align mis-aligned address for Danube version 1.3 chips ++ * which has memory alignment issues. ++ * \param arg Pointer to the input / output memory address ++ * \param buffer_alloc A pointer to the buffer ++ * \param in_buff Input (if == 1) or Output (if == 0) buffer ++ * \param nbytes Number of bytes of data ++ * \return returns arg: if address is aligned, buffer_alloc: if memory address is not aligned ++*/ ++static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes) ++{ ++ if (danube_pre_1_4) { ++ /* for input buffer */ ++ if (in_buff) { ++ if (((u32) arg) & 0xF) { ++ memcpy(buffer_alloc, arg, nbytes); ++ return (u32 *) buffer_alloc; ++ } else { ++ return (u32 *) arg; ++ } ++ } ++ else { ++ /* for output buffer */ ++ if (((u32) arg) & 0x3) ++ return buffer_alloc; ++ else ++ return (u32 *) arg; ++ } ++ } ++ ++ return (u32 *) arg; ++} ++ ++/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief copy the DMA data to the memory address space for AES. The swaping ++ * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise, ++ * it is a direct memory copy to out_arg pointer ++ * \param outcopy Pointer to the address to store swapped copy ++ * \param out_dma A pointer to the memory address that stores the DMA data ++ * \param out_arg The pointer to the memory address that needs to be copied to ++ * \param nbytes Number of bytes of data ++*/ ++static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++{ ++ int i = 0; ++ int x = 0; ++ ++ /* DANUBE PRE 1.4 SOFTWARE FIX */ ++ if (danube_pre_1_4) { ++ for (i = 0; i < (nbytes / 4); i++) { ++ x = i ^ 0x3; ++ outcopy[i] = out_dma[x]; ++ ++ } ++ if (((u32) out_arg) & 0x3) { ++ memcpy((u8 *)out_arg, outcopy, nbytes); ++ } ++ } else { ++ memcpy(out_arg, out_dma, nbytes); ++ } ++} ++ ++/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief copy the DMA data to the memory address space for DES. The swaping ++ * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise, ++ * it is a direct memory copy to out_arg pointer ++ * \param outcopy Pointer to the address to store swapped copy ++ * \param out_dma A pointer to the memory address that stores the DMA data ++ * \param out_arg The pointer to the memory address that needs to be copied to ++ * \param nbytes Number of bytes of data ++*/ ++static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes) ++{ ++ int i = 0; ++ int x = 0; ++ ++ /* DANUBE PRE 1.4 SOFTWARE FIX */ ++ if (danube_pre_1_4) { ++ for (i = 0; i < (nbytes / 4); i++) { ++ x = i ^ 1; ++ outcopy[i] = out_dma[x]; ++ ++ } ++ if (((u32) out_arg) & 0x3) { ++ memcpy((u8 *)out_arg, outcopy, nbytes); ++ } ++ } else { ++ memcpy(out_arg, out_dma, nbytes); ++ } ++} ++ ++/** \fn int des_dma_malloc(int value) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief allocates memory to the necessary memory input/output buffer ++ * location, used during the DES algorithm DMA transfer (memory ++ * alignment issues) ++ * \param value value determinds whether the calling of the function is for a ++ * input buffer or for an output buffer memory allocation ++*/ ++static int des_dma_malloc(int value) ++{ ++ if (danube_pre_1_4) { ++ if (value == BUFFER_IN) { ++ des_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC); ++ if (!des_buff_in) ++ return -1; ++ else ++ return 0; ++ } ++ else { ++ des_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC); ++ if (!des_buff_out) ++ return -1; ++ else ++ return 0; ++ } ++ } else { ++ return 0; ++ } ++} ++ ++/** \fn int aes_dma_malloc(int value) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief allocates memory to the necessary memory input/output buffer ++ * location, used during the AES algorithm DMA transfer (memory ++ * alignment issues) ++ * \param value value determinds whether the calling of the function is for a ++ * input buffer or for an output buffer memory allocation ++*/ ++static int aes_dma_malloc(int value) ++{ ++ if (danube_pre_1_4) { ++ if (value == BUFFER_IN) { ++ aes_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC); ++ if (!aes_buff_in) ++ return -1; ++ else ++ return 0; ++ } ++ else { ++ aes_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC); ++ if (!aes_buff_out) ++ return -1; ++ else ++ return 0; ++ } ++ } else { ++ return 0; ++ } ++} ++ ++/** \fn void dma_free(u32 *addr) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief frees previously allocated memory ++ * \param addr memory address of the buffer that needs to be freed ++*/ ++static void dma_free(u32 *addr) ++{ ++ if (addr) ++ kfree(addr); ++ return; ++} ++ ++/** \fn dma_exit(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief unregister dma devices after exit ++*/ ++static void dma_exit(void) ++{ ++ if (g_dma_page_ptr) ++ free_page((u32) g_dma_page_ptr); ++ dma_device_release(lq_deu[0].dma_device); ++ dma_device_unregister(lq_deu[0].dma_device); ++} ++#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */ ++ ++/** \fn u32 endian_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief function is not used ++ * \param input Data input to be swapped ++ * \return input ++*/ ++static u32 endian_swap(u32 input) ++{ ++ return input; ++} ++ ++/** \fn u32 input_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Swap the input data if the current chip is Danube version ++ * 1.4 and do nothing to the data if the current chip is ++ * Danube version 1.3 ++ * \param input data that needs to be swapped ++ * \return input or swapped input ++*/ ++static u32 input_swap(u32 input) ++{ ++ if (!danube_pre_1_4) { ++ u8 *ptr = (u8 *)&input; ++ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]); ++ } else { ++ return input; ++ } ++} ++ ++/** \fn void aes_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize AES hardware ++*/ ++static void aes_chip_init(void) ++{ ++ volatile struct deu_aes *aes = (struct deu_aes *) AES_START; ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ /* start crypto engine with write to ILR */ ++ aes->ctrl.SM = 1; ++ aes->ctrl.ARS = 1; ++#else ++ aes->ctrl.SM = 1; ++ aes->ctrl.ARS = 1; /* 0 for dma */ ++#endif ++} ++ ++/** \fn void des_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize DES hardware ++*/ ++static void des_chip_init(void) ++{ ++ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START; ++ ++#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ /* start crypto engine with write to ILR */ ++ des->ctrl.SM = 1; ++ des->ctrl.ARS = 1; ++#else ++ des->ctrl.SM = 1; ++ des->ctrl.ARS = 1; /* 0 for dma */ ++#endif ++} ++ ++/** \fn void deu_chip_version(void) ++ * \ingroup LQ_DES_FUNCTIONS ++ * \brief To find the version of the chip by looking at the chip ID ++ * \param danube_pre_1_4 (sets to 1 if Chip is Danube less than v1.4) ++*/ ++static void deu_chip_version(void) ++{ ++ /* DANUBE PRE 1.4 SOFTWARE FIX */ ++ int chip_id = 0; ++ chip_id = *LQ_MPS_CHIPID; ++ chip_id >>= 28; ++ ++ if (chip_id >= 4) { ++ danube_pre_1_4 = 0; ++ printk("Danube Chip ver. 1.4 detected. \n"); ++ } ++ else { ++ danube_pre_1_4 = 1; ++ printk("Danube Chip ver. 1.3 or below detected. \n"); ++ } ++} ++ ++static u32 chip_init(void) ++{ ++ volatile struct deu_clk_ctrl *clc = (struct deu_clk_ctrl *) LQ_DEU_CLK; ++ ++#if 0 ++ lq_pmu_enable(1<<20); ++#endif ++ ++ deu_chip_version(); ++ ++ clc->FSOE = 0; ++ clc->SBWE = 0; ++ clc->SPEN = 0; ++ clc->SBWE = 0; ++ clc->DISS = 0; ++ clc->DISR = 0; ++ ++ return *LQ_DEU_ID; ++} ++ ++static int lq_crypto_probe(struct platform_device *pdev) ++{ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ lq_crypto_ops.dma_init = dma_init; ++ lq_crypto_ops.dma_exit = dma_exit; ++ lq_crypto_ops.aes_dma_memcpy = aes_dma_memcpy; ++ lq_crypto_ops.des_dma_memcpy = des_dma_memcpy; ++ lq_crypto_ops.aes_dma_malloc = aes_dma_malloc; ++ lq_crypto_ops.des_dma_malloc = des_dma_malloc; ++ lq_crypto_ops.dma_align = dma_align; ++ lq_crypto_ops.dma_free = dma_free; ++#endif ++ ++ lq_crypto_ops.endian_swap = endian_swap; ++ lq_crypto_ops.input_swap = input_swap; ++ lq_crypto_ops.aes_chip_init = aes_chip_init; ++ lq_crypto_ops.des_chip_init = des_chip_init; ++ lq_crypto_ops.chip_init = chip_init; ++ ++ printk("lq_danube_deu: driver loaded!\n"); ++ ++ lq_deu_init(); ++ ++ return 0; ++} ++ ++static int lq_crypto_remove(struct platform_device *pdev) ++{ ++ lq_deu_exit(); ++ ++ return 0; ++} ++ ++static struct platform_driver lq_crypto = { ++ .probe = lq_crypto_probe, ++ .remove = lq_crypto_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "lq_danube_deu" ++ } ++}; ++ ++static int __init lq_crypto_init(void) ++{ ++ return platform_driver_register(&lq_crypto); ++} ++module_init(lq_crypto_init); ++ ++static void __exit lq_crypto_exit(void) ++{ ++ platform_driver_unregister(&lq_crypto); ++} ++module_exit(lq_crypto_exit); ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_danube.h +@@ -0,0 +1,255 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief DEU driver module ++*/ ++ ++/** ++ \file deu_danube.h ++ \brief board specific driver header file for danube ++*/ ++ ++/** ++ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief board specific DEU header files ++*/ ++ ++#ifndef DEU_DANUBE_H ++#define DEU_DANUBE_H ++ ++#define LQ_DEU_BASE_ADDR (KSEG1 | 0x1E103100) ++#define LQ_DEU_CLK ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0000)) ++#define LQ_DEU_ID ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0008)) ++#define LQ_DES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0010)) ++#define LQ_AES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0050)) ++#define LQ_HASH_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x00B0)) ++#define LQ_ARC4_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0100)) ++ ++#define ARC4_START LQ_ARC4_CON ++#define DES_3DES_START LQ_DES_CON ++#define HASH_START LQ_HASH_CON ++#define AES_START LQ_AES_CON ++ ++#define LQ_MPS (KSEG1 | 0x1F107000) ++#define LQ_MPS_CHIPID ((volatile u32*)(LQ_MPS + 0x0344)) ++#define LQ_MPS_CHIPID_VERSION_GET(value) (((value) >> 28) & 0xF) ++#define LQ_MPS_CHIPID_VERSION_SET(value) (((value) & 0xF) << 28) ++#define LQ_MPS_CHIPID_PARTNUM_GET(value) (((value) >> 12) & 0xFFFF) ++#define LQ_MPS_CHIPID_PARTNUM_SET(value) (((value) & 0xFFFF) << 12) ++#define LQ_MPS_CHIPID_MANID_GET(value) (((value) >> 1) & 0x7FF) ++#define LQ_MPS_CHIPID_MANID_SET(value) (((value) & 0x7FF) << 1) ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \ ++ deu_dma_align(ptr, buffer, in_out, bytes) ++# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define BUFFER_IN 1 ++# define BUFFER_OUT 0 ++# define DELAY_PERIOD 9 ++# define AES_ALGO 1 ++# define DES_ALGO 0 ++# define FREE_MEMORY(buff) deu_dma_free(buff) ++# define ALLOCATE_MEMORY(val, type) type ? \ ++ deu_aes_dma_malloc(val) : \ ++ deu_des_dma_malloc(val) ++#endif /* CONFIG_CRYPTO_DEV_DMA */ ++ ++#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input) ++#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input) ++#define AES_DMA_MISC_CONFIG() ++ ++#define WAIT_AES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_aes *aes = \ ++ (volatile struct deu_aes *) AES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (aes->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define WAIT_DES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_des *des = \ ++ (struct deu_des *) DES_3DES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (des->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define SHA_HASH_INIT \ ++ do { \ ++ volatile struct deu_hash *hash = \ ++ (struct deu_hash *) HASH_START; \ ++ hash->ctrl.SM = 1; \ ++ hash->ctrl.ALGO = 0; \ ++ hash->ctrl.INIT = 1; \ ++ } while(0) ++ ++/* DEU STRUCTURES */ ++ ++struct deu_clk_ctrl { ++ u32 Res:26; ++ u32 FSOE:1; ++ u32 SBWE:1; ++ u32 EDIS:1; ++ u32 SPEN:1; ++ u32 DISS:1; ++ u32 DISR:1; ++}; ++ ++struct deu_des { ++ struct deu_des_ctrl { ++ u32 KRE:1; ++ u32 reserved1:5; ++ u32 GO:1; ++ u32 STP:1; ++ u32 Res2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 Res3:2; ++ u32 F:3; ++ u32 O:3; ++ u32 BUS:1; ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 M:3; ++ } ctrl; ++ ++ u32 IHR; ++ u32 ILR; ++ u32 K1HR; ++ u32 K1LR; ++ u32 K2HR; ++ u32 K2LR; ++ u32 K3HR; ++ u32 K3LR; ++ u32 IVHR; ++ u32 IVLR; ++ u32 OHR; ++ u32 OLR; ++}; ++ ++struct deu_aes { ++ struct deu_aes_ctrl { ++ u32 KRE:1; ++ u32 reserved1:4; ++ u32 PNK:1; ++ u32 GO:1; ++ u32 STP:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:2; ++ u32 F:3; /* fbs */ ++ u32 O:3; /* om */ ++ u32 BUS:1; /* bsy */ ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 KV:1; ++ u32 K:2; /* KL */ ++ } ctrl; ++ ++ u32 ID3R; /* 80h */ ++ u32 ID2R; /* 84h */ ++ u32 ID1R; /* 88h */ ++ u32 ID0R; /* 8Ch */ ++ u32 K7R; /* 90h */ ++ u32 K6R; /* 94h */ ++ u32 K5R; /* 98h */ ++ u32 K4R; /* 9Ch */ ++ u32 K3R; /* A0h */ ++ u32 K2R; /* A4h */ ++ u32 K1R; /* A8h */ ++ u32 K0R; /* ACh */ ++ u32 IV3R; /* B0h */ ++ u32 IV2R; /* B4h */ ++ u32 IV1R; /* B8h */ ++ u32 IV0R; /* BCh */ ++ u32 OD3R; /* D4h */ ++ u32 OD2R; /* D8h */ ++ u32 OD1R; /* DCh */ ++ u32 OD0R; /* E0h */ ++}; ++ ++struct deu_hash { ++ struct deu_hash_ctrl { ++ u32 reserved1:5; ++ u32 KHS:1; ++ u32 GO:1; ++ u32 INIT:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:7; ++ u32 DGRY:1; ++ u32 BSY:1; ++ u32 reserved4:1; ++ u32 IRCL:1; ++ u32 SM:1; ++ u32 KYUE:1; ++ u32 HMEN:1; ++ u32 SSEN:1; ++ u32 ALGO:1; ++ } ctrl; ++ ++ u32 MR; /* B4h */ ++ u32 D1R; /* B8h */ ++ u32 D2R; /* BCh */ ++ u32 D3R; /* C0h */ ++ u32 D4R; /* C4h */ ++ u32 D5R; /* C8h */ ++ u32 dummy; /* CCh */ ++ u32 KIDX; /* D0h */ ++ u32 KEY; /* D4h */ ++ u32 DBN; /* D8h */ ++}; ++ ++struct deu_dma { ++ struct deu_dma_ctrl { ++ u32 reserved1:22; ++ u32 BS:2; ++ u32 BSY:1; ++ u32 reserved2:1; ++ u32 ALGO:2; ++ u32 RXCLS:2; ++ u32 reserved3:1; ++ u32 EN:1; ++ } ctrl; ++}; ++ ++#endif /* DEU_DANUBE_H */ +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_dma.c +@@ -0,0 +1,147 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup LQ_API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu_dma.c ++ \ingroup LQ_DEU ++ \brief DMA DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_DMA_FUNCTIONS LQ_DMA_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief DMA DEU driver functions ++*/ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <linux/delay.h> ++#include <asm/io.h> ++#include "deu.h" ++#include "deu_dma.h" ++ ++/* extern struct deu_drv_priv deu_dma_priv; */ ++ ++/** \fn int deu_dma_intr_handler(struct dma_device_info *dma_dev, int status) ++ * \ingroup LQ_DMA_FUNCTIONS ++ * \brief callback function for DEU DMA interrupt ++ * \param dma_dev dma device ++ * \param status not used ++*/ ++int deu_dma_intr_handler(struct dma_device_info *dma_dev, int status) ++{ ++#if 0 ++ int len = 0; ++ while (len <= 20000) { len++; } ++ u8 *buf; ++ int len = 0; ++ ++ struct deu_drv_priv *deu_priv = (struct deu_drv_priv *)dma_dev->priv; ++ /* printk("status:%d \n",status); */ ++ switch(status) { ++ case RCV_INT: ++ len = dma_device_read(dma_dev, (u8 **)&buf, NULL); ++ if ( len != deu_priv->deu_rx_len) { ++ printk(KERN_ERR "%s packet length %d is not " ++ "equal to expect %d\n", ++ __func__, len, deu_priv->deu_rx_len); ++ return -1; ++ } ++ memcpy(deu_priv->deu_rx_buf, buf, deu_priv->deu_rx_len); ++ /* Reset for next usage */ ++ deu_priv->deu_rx_buf = NULL; ++ deu_priv->deu_rx_len = 0; ++ DEU_WAKEUP_EVENT(deu_priv->deu_thread_wait, DEU_EVENT, ++ deu_priv->deu_event_flags); ++ break; ++ case TX_BUF_FULL_INT: ++ /* delay for buffer to be cleared */ ++ while (len <= 20000) { len++; } ++ break; ++ ++ case TRANSMIT_CPT_INT: ++ break; ++ default: ++ break; ++ } ++#endif ++ return 0; ++} ++ ++extern u8 *g_dma_block; ++extern u8 *g_dma_block2; ++ ++/** \fn u8 *deu_dma_buffer_alloc(int len, int *byte_offset, void **opt) ++ * \ingroup LQ_DMA_FUNCTIONS ++ * \brief callback function for allocating buffers for dma receive descriptors ++ * \param len not used ++ * \param byte_offset dma byte offset ++ * \param *opt not used ++ * ++*/ ++u8 *deu_dma_buffer_alloc(int len, int *byte_offset, void **opt) ++{ ++ u8 *swap = NULL; ++ ++ /* dma-core needs at least 2 blocks of memory */ ++ swap = g_dma_block; ++ g_dma_block = g_dma_block2; ++ g_dma_block2 = swap; ++ ++ /* dma_cache_wback_inv((unsigned long) g_dma_block,(PAGE_SIZE >> 1)); */ ++ *byte_offset = 0; ++ ++ return g_dma_block; ++} ++ ++/** \fn int deu_dma_buffer_free(u8 * dataptr, void *opt) ++ * \ingroup LQ_DMA_FUNCTIONS ++ * \brief callback function for freeing dma transmit descriptors ++ * \param dataptr data pointer to be freed ++ * \param opt not used ++*/ ++int deu_dma_buffer_free(u8 *dataptr, void *opt) ++{ ++#if 0 ++ printk("Trying to free memory buffer\n"); ++ if (dataptr == NULL && opt == NULL) ++ return 0; ++ else if (opt == NULL) { ++ kfree(dataptr); ++ return 1; ++ } ++ else if (dataptr == NULL) { ++ kfree(opt); ++ return 1; ++ } ++ else { ++ kfree(opt); ++ kfree(dataptr); ++ } ++#endif ++ return 0; ++} +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_dma.h +@@ -0,0 +1,78 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \addtogroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu_dma.h ++ \ingroup LQ_DEU ++ \brief DMA DEU driver header file ++*/ ++ ++#ifndef DEU_DMA_H ++#define DEU_DMA_H ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/mm.h> ++#include <linux/crypto.h> ++#include <asm/scatterlist.h> ++#include <asm/byteorder.h> ++#include <linux/skbuff.h> ++#include <linux/netdevice.h> ++ ++#include <asm/ifx/irq.h> ++#include <asm/ifx/ifx_dma_core.h> ++#ifndef CONFIG_CRYPTO_DEV_POLL_DMA ++# define CONFIG_CRYPTO_DEV_POLL_DMA ++#endif ++ ++/* must match the size of memory block allocated for ++ * g_dma_block and g_dma_block2 */ ++#define DEU_MAX_PACKET_SIZE (PAGE_SIZE >> 1) ++ ++struct lq_deu_device { ++ struct dma_device_info *dma_device; ++ u8 *dst; ++ u8 *src; ++ int len; ++ int dst_count; ++ int src_count; ++ int recv_count; ++ int packet_size; ++ int packet_num; ++ wait_queue_t wait; ++}; ++ ++extern struct lq_deu_device lq_deu[1]; ++ ++extern int deu_dma_intr_handler(struct dma_device_info *, int); ++extern u8 *deu_dma_buffer_alloc(int, int *, void **); ++extern int deu_dma_buffer_free(u8 *, void *); ++extern void deu_dma_inactivate_poll(struct dma_device_info* dma_dev); ++extern void deu_dma_activate_poll(struct dma_device_info* dma_dev); ++extern struct dma_device_info* deu_dma_reserve(struct dma_device_info** ++ dma_device); ++extern int deu_dma_release(struct dma_device_info** dma_device); ++ ++#endif /* IFMIPS_DEU_DMA_H */ +--- /dev/null ++++ b/drivers/crypto/lantiq/md5.c +@@ -0,0 +1,285 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file md5.c ++ \ingroup LQ_DEU ++ \brief MD5 encryption DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_MD5_FUNCTIONS LQ_MD5_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU MD5 functions ++*/ ++ ++#include <crypto/internal/hash.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/string.h> ++#include <linux/crypto.h> ++#include <linux/types.h> ++#include <asm/byteorder.h> ++#include "deu.h" ++ ++#define MD5_DIGEST_SIZE 16 ++#define MD5_HMAC_BLOCK_SIZE 64 ++#define MD5_BLOCK_WORDS 16 ++#define MD5_HASH_WORDS 4 ++ ++static spinlock_t cipher_lock; ++ ++struct md5_ctx { ++ u32 hash[MD5_HASH_WORDS]; ++ u32 block[MD5_BLOCK_WORDS]; ++ u64 byte_count; ++}; ++ ++/** \fn static u32 md5_endian_swap(u32 input) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief perform dword level endian swap ++ * \param input value of dword that requires to be swapped ++*/ ++static u32 md5_endian_swap(u32 input) ++{ ++ u8 *ptr = (u8 *)&input; ++ ++ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]); ++} ++ ++/** \fn static void md5_transform(u32 *hash, u32 const *in) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief main interface to md5 hardware ++ * \param hash current hash value ++ * \param in 64-byte block of input ++*/ ++static void md5_transform(u32 *hash, u32 const *in) ++{ ++ int i; ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ ulong flag; ++ ++ CRTCL_SECT_START; ++ ++ for (i = 0; i < 16; i++) { ++ hashs->MR = md5_endian_swap(in[i]); ++ }; ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ CRTCL_SECT_END; ++} ++ ++/** \fn static inline void md5_transform_helper(struct md5_ctx *ctx) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief interfacing function for md5_transform() ++ * \param ctx crypto context ++*/ ++static inline void md5_transform_helper(struct md5_ctx *ctx) ++{ ++ /* le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); */ ++ md5_transform(ctx->hash, ctx->block); ++} ++ ++/** \fn static void md5_init(struct crypto_tfm *tfm) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief initialize md5 hardware ++ * \param tfm linux crypto algo transform ++*/ ++static int md5_init(struct shash_desc *desc) ++{ ++ struct md5_ctx *mctx = shash_desc_ctx(desc); ++ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START; ++ ++ hash->ctrl.SM = 1; ++ hash->ctrl.ALGO = 1; /* 1 = md5 0 = sha1 */ ++ hash->ctrl.INIT = 1; /* Initialize the hash operation by writing ++ a '1' to the INIT bit. */ ++ ++ mctx->byte_count = 0; ++ ++ return 0; ++} ++ ++/** \fn static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief on-the-fly md5 computation ++ * \param tfm linux crypto algo transform ++ * \param data input data ++ * \param len size of input data ++*/ ++static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) ++{ ++ struct md5_ctx *mctx = shash_desc_ctx(desc); ++ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); ++ ++ mctx->byte_count += len; ++ ++ if (avail > len) { ++ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), ++ data, len); ++ return 0; ++ } ++ ++ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), ++ data, avail); ++ ++ md5_transform_helper(mctx); ++ data += avail; ++ len -= avail; ++ ++ while (len >= sizeof(mctx->block)) { ++ memcpy(mctx->block, data, sizeof(mctx->block)); ++ md5_transform_helper(mctx); ++ data += sizeof(mctx->block); ++ len -= sizeof(mctx->block); ++ } ++ ++ memcpy(mctx->block, data, len); ++ ++ return 0; ++} ++ ++/** \fn static void md5_final(struct crypto_tfm *tfm, u8 *out) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief compute final md5 value ++ * \param tfm linux crypto algo transform ++ * \param out final md5 output value ++*/ ++static int md5_final(struct shash_desc *desc, u8 *out) ++{ ++ struct md5_ctx *mctx = shash_desc_ctx(desc); ++ const unsigned int offset = mctx->byte_count & 0x3f; ++ char *p = (char *)mctx->block + offset; ++ int padding = 56 - (offset + 1); ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ unsigned long flag; ++ ++ *p++ = 0x80; ++ if (padding < 0) { ++ memset(p, 0x00, padding + sizeof (u64)); ++ md5_transform_helper(mctx); ++ p = (char *)mctx->block; ++ padding = 56; ++ } ++ ++ memset(p, 0, padding); ++ mctx->block[14] = md5_endian_swap(mctx->byte_count << 3); ++ mctx->block[15] = md5_endian_swap(mctx->byte_count >> 29); ++ ++#if 0 ++ le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - ++ sizeof(u64)) / sizeof(u32)); ++#endif ++ ++ md5_transform(mctx->hash, mctx->block); ++ ++ CRTCL_SECT_START; ++ ++ *((u32 *) out + 0) = md5_endian_swap(hashs->D1R); ++ *((u32 *) out + 1) = md5_endian_swap(hashs->D2R); ++ *((u32 *) out + 2) = md5_endian_swap(hashs->D3R); ++ *((u32 *) out + 3) = md5_endian_swap(hashs->D4R); ++ ++ CRTCL_SECT_END; ++ ++ /* Wipe context */ ++ memset(mctx, 0, sizeof(*mctx)); ++ ++ return 0; ++} ++ ++static int md5_export(struct shash_desc *desc, void *out) ++{ ++ struct md5_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(out, sctx, sizeof(*sctx)); ++ return 0; ++} ++ ++static int md5_import(struct shash_desc *desc, const void *in) ++{ ++ struct md5_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(sctx, in, sizeof(*sctx)); ++ return 0; ++} ++ ++/* ++ * \brief MD5 function mappings ++*/ ++static struct shash_alg md5_alg = { ++ .digestsize = MD5_DIGEST_SIZE, ++ .init = md5_init, ++ .update = md5_update, ++ .final = md5_final, ++ .export = md5_export, ++ .import = md5_import, ++ .descsize = sizeof(struct md5_ctx), ++ .statesize = sizeof(struct md5_ctx), ++ .base = { ++ .cra_name = "md5", ++ .cra_driver_name = "lq_deu-md5", ++ .cra_flags = CRYPTO_ALG_TYPE_SHASH, ++ .cra_blocksize = MD5_HMAC_BLOCK_SIZE, ++ .cra_module = THIS_MODULE, ++ } ++}; ++ ++/** \fn int lq_deu_init_md5(void) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief initialize md5 driver ++*/ ++int lq_deu_init_md5(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_shash(&md5_alg))) ++ goto md5_err; ++ ++ CRTCL_SECT_INIT; ++ ++ printk(KERN_NOTICE "Lantiq DEU MD5 initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++md5_err: ++ printk(KERN_ERR "Lantiq DEU MD5 initialization failed!\n"); ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_md5(void) ++ * \ingroup LQ_MD5_FUNCTIONS ++ * \brief unregister md5 driver ++*/ ++ ++void lq_deu_fini_md5(void) ++{ ++ crypto_unregister_shash(&md5_alg); ++} ++ +--- /dev/null ++++ b/drivers/crypto/lantiq/md5_hmac.c +@@ -0,0 +1,329 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file md5_hmac.c ++ \ingroup LQ_DEU ++ \brief MD5-HMAC encryption DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_MD5_HMAC_FUNCTIONS LQ_MD5_HMAC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq md5-hmac driver functions ++*/ ++ ++#include <crypto/internal/hash.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/string.h> ++#include <linux/crypto.h> ++#include <linux/types.h> ++#include <asm/byteorder.h> ++#include "deu.h" ++ ++#define MD5_DIGEST_SIZE 16 ++#define MD5_HMAC_BLOCK_SIZE 64 ++#define MD5_BLOCK_WORDS 16 ++#define MD5_HASH_WORDS 4 ++#define MD5_HMAC_DBN_TEMP_SIZE 1024 /* size in dword, ++ needed for dbn workaround */ ++ ++static spinlock_t cipher_lock; ++ ++struct md5_hmac_ctx { ++ u32 hash[MD5_HASH_WORDS]; ++ u32 block[MD5_BLOCK_WORDS]; ++ u64 byte_count; ++ u32 dbn; ++ u32 temp[MD5_HMAC_DBN_TEMP_SIZE]; ++}; ++ ++/** \fn static u32 md5_endian_swap(u32 input) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief perform dword level endian swap ++ * \param input value of dword that requires to be swapped ++*/ ++static u32 md5_endian_swap(u32 input) ++{ ++ u8 *ptr = (u8 *)&input; ++ ++ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]); ++} ++ ++/** \fn static void md5_hmac_transform(struct crypto_tfm *tfm, u32 const *in) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief save input block to context ++ * \param tfm linux crypto algo transform ++ * \param in 64-byte block of input ++*/ ++static void md5_hmac_transform(struct shash_desc *desc, u32 const *in) ++{ ++ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc); ++ ++ memcpy(&mctx->temp[mctx->dbn<<4], in, 64); /* dbn workaround */ ++ mctx->dbn += 1; ++ ++ if ( (mctx->dbn<<4) > MD5_HMAC_DBN_TEMP_SIZE ) ++ { ++ printk("MD5_HMAC_DBN_TEMP_SIZE exceeded\n"); ++ } ++} ++ ++/** \fn int md5_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief sets md5 hmac key ++ * \param tfm linux crypto algo transform ++ * \param key input key ++ * \param keylen key length greater than 64 bytes IS NOT SUPPORTED ++*/ ++static int md5_hmac_setkey(struct crypto_shash *tfm, ++ const u8 *key, ++ unsigned int keylen) ++{ ++ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START; ++ int i, j; ++ u32 *in_key = (u32 *)key; ++ ++ hash->KIDX = 0x80000000; /* reset all 16 words of the key to '0' */ ++ asm("sync"); ++ ++ j = 0; ++ for (i = 0; i < keylen; i+=4) ++ { ++ hash->KIDX = j; ++ asm("sync"); ++ hash->KEY = *((u32 *) in_key + j); ++ j++; ++ } ++ ++ return 0; ++} ++ ++/** \fn void md5_hmac_init(struct crypto_tfm *tfm) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief initialize md5 hmac context ++ * \param tfm linux crypto algo transform ++*/ ++static int md5_hmac_init(struct shash_desc *desc) ++{ ++ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc); ++ ++ memset(mctx, 0, sizeof(struct md5_hmac_ctx)); ++ mctx->dbn = 0; /* dbn workaround */ ++ return 0; ++} ++ ++/** \fn void md5_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief on-the-fly md5 hmac computation ++ * \param tfm linux crypto algo transform ++ * \param data input data ++ * \param len size of input data ++*/ ++static int md5_hmac_update(struct shash_desc *desc, ++ const u8 *data, ++ unsigned int len) ++{ ++ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc); ++ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); ++ ++ mctx->byte_count += len; ++ ++ if (avail > len) { ++ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), ++ data, len); ++ return 0; ++ } ++ ++ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), ++ data, avail); ++ ++ md5_hmac_transform(desc, mctx->block); ++ data += avail; ++ len -= avail; ++ ++ while (len >= sizeof(mctx->block)) { ++ memcpy(mctx->block, data, sizeof(mctx->block)); ++ md5_hmac_transform(desc, mctx->block); ++ data += sizeof(mctx->block); ++ len -= sizeof(mctx->block); ++ } ++ ++ memcpy(mctx->block, data, len); ++ ++ return 0; ++} ++ ++/** \fn void md5_hmac_final(struct crypto_tfm *tfm, u8 *out) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief compute final md5 hmac value ++ * \param tfm linux crypto algo transform ++ * \param out final md5 hmac output value ++*/ ++static int md5_hmac_final(struct shash_desc *desc, u8 *out) ++{ ++ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc); ++ const unsigned int offset = mctx->byte_count & 0x3f; ++ char *p = (char *)mctx->block + offset; ++ int padding = 56 - (offset + 1); ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ u32 flag; ++ int i = 0; ++ int dbn; ++ u32 *in = &mctx->temp[0]; ++ ++ *p++ = 0x80; ++ if (padding < 0) { ++ memset(p, 0x00, padding + sizeof (u64)); ++ md5_hmac_transform(desc, mctx->block); ++ p = (char *)mctx->block; ++ padding = 56; ++ } ++ ++ memset(p, 0, padding); ++ /* need to add 512 bit of the IPAD operation */ ++ mctx->block[14] = md5_endian_swap((mctx->byte_count + 64) << 3); ++ mctx->block[15] = 0x00000000; ++ ++ md5_hmac_transform(desc, mctx->block); ++ ++ CRTCL_SECT_START; ++ ++ printk("dbn = %d\n", mctx->dbn); ++ hashs->DBN = mctx->dbn; ++ ++ /* khs, go, init, ndc, endi, kyue, hmen, md5 */ ++ *LQ_HASH_CON = 0x0703002D; ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ for (dbn = 0; dbn < mctx->dbn; dbn++) ++ { ++ for (i = 0; i < 16; i++) { ++ hashs->MR = in[i]; ++ }; ++ ++ hashs->ctrl.GO = 1; ++ asm("sync"); ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ in += 16; ++ } ++ ++#if 1 ++ /* wait for digest ready */ ++ while (! hashs->ctrl.DGRY) { ++ /* this will not take long */ ++ } ++#endif ++ ++ *((u32 *) out + 0) = hashs->D1R; ++ *((u32 *) out + 1) = hashs->D2R; ++ *((u32 *) out + 2) = hashs->D3R; ++ *((u32 *) out + 3) = hashs->D4R; ++ *((u32 *) out + 4) = hashs->D5R; ++ ++ CRTCL_SECT_END; ++ ++ return 0; ++} ++ ++static int md5_hmac_export(struct shash_desc *desc, void *out) ++{ ++ struct md5_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(out, sctx, sizeof(*sctx)); ++ return 0; ++} ++ ++static int md5_hmac_import(struct shash_desc *desc, const void *in) ++{ ++ struct md5_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(sctx, in, sizeof(*sctx)); ++ return 0; ++} ++ ++/* ++ * \brief MD5_HMAC function mappings ++*/ ++static struct shash_alg md5_hmac_alg = { ++ .digestsize = MD5_DIGEST_SIZE, ++ .init = md5_hmac_init, ++ .update = md5_hmac_update, ++ .final = md5_hmac_final, ++ .setkey = md5_hmac_setkey, ++ .export = md5_hmac_export, ++ .import = md5_hmac_import, ++ .descsize = sizeof(struct md5_hmac_ctx), ++ .statesize = sizeof(struct md5_hmac_ctx), ++ .base = { ++ .cra_name = "hmac(md5)", ++ .cra_driver_name = "lq_deu-md5_hmac", ++ .cra_flags = CRYPTO_ALG_TYPE_SHASH, ++ .cra_blocksize = MD5_HMAC_BLOCK_SIZE, ++ .cra_module = THIS_MODULE, ++ } ++}; ++ ++/** \fn int lq_deu_init_md5_hmac(void) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief initialize md5 hmac driver ++*/ ++int lq_deu_init_md5_hmac(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_shash(&md5_hmac_alg))) ++ goto md5_hmac_err; ++ ++ CRTCL_SECT_INIT; ++ ++ printk(KERN_NOTICE "Lantiq DEU MD5_HMAC initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++md5_hmac_err: ++ printk(KERN_ERR "Lantiq DEU MD5_HMAC initialization failed!\n"); ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_md5_hmac(void) ++ * \ingroup LQ_MD5_HMAC_FUNCTIONS ++ * \brief unregister md5 hmac driver ++*/ ++void lq_deu_fini_md5_hmac(void) ++{ ++ crypto_unregister_shash(&md5_hmac_alg); ++} ++ +--- /dev/null ++++ b/drivers/crypto/lantiq/sha1.c +@@ -0,0 +1,262 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file sha1.c ++ \ingroup LQ_DEU ++ \brief SHA1 encryption DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_SHA1_FUNCTIONS LQ_SHA1_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU sha1 functions ++*/ ++ ++ ++#include <crypto/internal/hash.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/mm.h> ++#include <linux/crypto.h> ++#include <linux/cryptohash.h> ++#include <crypto/sha.h> ++#include <linux/types.h> ++#include <asm/scatterlist.h> ++#include <asm/byteorder.h> ++#include "deu.h" ++ ++#define SHA1_DIGEST_SIZE 20 ++#define SHA1_HMAC_BLOCK_SIZE 64 ++ ++static spinlock_t cipher_lock; ++ ++/* ++ * \brief SHA1 private structure ++*/ ++struct sha1_ctx { ++ u64 count; ++ u32 state[5]; ++ u8 buffer[64]; ++}; ++ ++/** \fn static void sha1_transform(u32 *state, const u32 *in) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief main interface to sha1 hardware ++ * \param state current state ++ * \param in 64-byte block of input ++*/ ++static void sha1_transform(u32 *state, const u32 *in) ++{ ++ int i = 0; ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ unsigned long flag; ++ ++ CRTCL_SECT_START; ++ ++ for (i = 0; i < 16; i++) { ++ hashs->MR = in[i]; ++ }; ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ CRTCL_SECT_END; ++} ++ ++/** \fn static void sha1_init(struct crypto_tfm *tfm) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief initialize sha1 hardware ++ * \param tfm linux crypto algo transform ++*/ ++static int sha1_init(struct shash_desc *desc) ++{ ++ struct sha1_ctx *sctx = shash_desc_ctx(desc); ++ ++ SHA_HASH_INIT; ++ ++ sctx->count = 0; ++ ++ return 0; ++} ++ ++/** \fn static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief on-the-fly sha1 computation ++ * \param tfm linux crypto algo transform ++ * \param data input data ++ * \param len size of input data ++*/ ++static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) ++{ ++ struct sha1_ctx *sctx = shash_desc_ctx(desc); ++ unsigned int i, j; ++ ++ j = (sctx->count >> 3) & 0x3f; ++ sctx->count += len << 3; ++ ++ if ((j + len) > 63) { ++ memcpy(&sctx->buffer[j], data, (i = 64 - j)); ++ sha1_transform(sctx->state, (const u32 *)sctx->buffer); ++ for (; i + 63 < len; i += 64) { ++ sha1_transform(sctx->state, (const u32 *)&data[i]); ++ } ++ ++ j = 0; ++ } else { ++ i = 0; ++ } ++ ++ memcpy(&sctx->buffer[j], &data[i], len - i); ++ ++ return 0; ++} ++ ++/** \fn static void sha1_final(struct crypto_tfm *tfm, u8 *out) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief compute final sha1 value ++ * \param tfm linux crypto algo transform ++ * \param out final md5 output value ++*/ ++static int sha1_final(struct shash_desc *desc, u8 *out) ++{ ++ struct sha1_ctx *sctx = shash_desc_ctx(desc); ++ u32 index, padlen; ++ u64 t; ++ u8 bits[8] = { 0, }; ++ static const u8 padding[64] = { 0x80, }; ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ ulong flag; ++ ++ t = sctx->count; ++ bits[7] = 0xff & t; ++ t >>= 8; ++ bits[6] = 0xff & t; ++ t >>= 8; ++ bits[5] = 0xff & t; ++ t >>= 8; ++ bits[4] = 0xff & t; ++ t >>= 8; ++ bits[3] = 0xff & t; ++ t >>= 8; ++ bits[2] = 0xff & t; ++ t >>= 8; ++ bits[1] = 0xff & t; ++ t >>= 8; ++ bits[0] = 0xff & t; ++ ++ /* Pad out to 56 mod 64 */ ++ index = (sctx->count >> 3) & 0x3f; ++ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); ++ sha1_update(desc, padding, padlen); ++ ++ /* Append length */ ++ sha1_update(desc, bits, sizeof bits); ++ ++ CRTCL_SECT_START; ++ ++ *((u32 *) out + 0) = hashs->D1R; ++ *((u32 *) out + 1) = hashs->D2R; ++ *((u32 *) out + 2) = hashs->D3R; ++ *((u32 *) out + 3) = hashs->D4R; ++ *((u32 *) out + 4) = hashs->D5R; ++ ++ CRTCL_SECT_END; ++ ++ /* Wipe context*/ ++ memset(sctx, 0, sizeof *sctx); ++ ++ return 0; ++} ++ ++static int sha1_export(struct shash_desc *desc, void *out) ++{ ++ struct sha1_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(out, sctx, sizeof(*sctx)); ++ return 0; ++} ++ ++static int sha1_import(struct shash_desc *desc, const void *in) ++{ ++ struct sha1_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(sctx, in, sizeof(*sctx)); ++ return 0; ++} ++ ++/* ++ * \brief SHA1 function mappings ++*/ ++static struct shash_alg deu_sha1_alg = { ++ .digestsize = SHA1_DIGEST_SIZE, ++ .init = sha1_init, ++ .update = sha1_update, ++ .final = sha1_final, ++ .export = sha1_export, ++ .import = sha1_import, ++ .descsize = sizeof(struct sha1_ctx), ++ .statesize = sizeof(struct sha1_ctx), ++ .base = { ++ .cra_name = "sha1", ++ .cra_driver_name = "lq_deu-sha1", ++ .cra_flags = CRYPTO_ALG_TYPE_SHASH, ++ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, ++ .cra_module = THIS_MODULE, ++ } ++}; ++ ++/** \fn int lq_deu_init_sha1(void) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief initialize sha1 driver ++*/ ++int lq_deu_init_sha1(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_shash(&deu_sha1_alg))) ++ goto sha1_err; ++ ++ CRTCL_SECT_INIT; ++ ++ printk(KERN_NOTICE "Lantiq DEU SHA1 initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++sha1_err: ++ printk(KERN_ERR "Lantiq DEU SHA1 initialization failed!\n"); ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_sha1(void) ++ * \ingroup LQ_SHA1_FUNCTIONS ++ * \brief unregister sha1 driver ++*/ ++void lq_deu_fini_sha1(void) ++{ ++ crypto_unregister_shash(&deu_sha1_alg); ++} +--- /dev/null ++++ b/drivers/crypto/lantiq/sha1_hmac.c +@@ -0,0 +1,325 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file sha1_hmac.c ++ \ingroup LQ_DEU ++ \brief SHA1-HMAC DEU driver file ++*/ ++ ++/** ++ \defgroup LQ_SHA1_HMAC_FUNCTIONS LQ_SHA1_HMAC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief Lantiq sha1 hmac functions ++*/ ++ ++ ++#include <crypto/internal/hash.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/mm.h> ++#include <linux/crypto.h> ++#include <linux/cryptohash.h> ++#include <linux/types.h> ++#include <asm/scatterlist.h> ++#include <asm/byteorder.h> ++#include <linux/delay.h> ++#include "deu.h" ++ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC ++ ++#define SHA1_DIGEST_SIZE 20 ++#define SHA1_HMAC_BLOCK_SIZE 64 ++/* size in dword, needed for dbn workaround */ ++#define SHA1_HMAC_DBN_TEMP_SIZE 1024 ++ ++static spinlock_t cipher_lock; ++ ++struct sha1_hmac_ctx { ++ u64 count; ++ u32 state[5]; ++ u8 buffer[64]; ++ u32 dbn; ++ u32 temp[SHA1_HMAC_DBN_TEMP_SIZE]; ++}; ++ ++/** \fn static void sha1_hmac_transform(struct crypto_tfm *tfm, u32 const *in) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief save input block to context ++ * \param tfm linux crypto algo transform ++ * \param in 64-byte block of input ++*/ ++static void sha1_hmac_transform(struct shash_desc *desc, u32 const *in) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(&sctx->temp[sctx->dbn<<4], in, 64); /* dbn workaround */ ++ sctx->dbn += 1; ++ ++ if ((sctx->dbn<<4) > SHA1_HMAC_DBN_TEMP_SIZE) { ++ printk("SHA1_HMAC_DBN_TEMP_SIZE exceeded\n"); ++ } ++} ++ ++/** \fn int sha1_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief sets sha1 hmac key ++ * \param tfm linux crypto algo transform ++ * \param key input key ++ * \param keylen key length greater than 64 bytes IS NOT SUPPORTED ++*/ ++static int sha1_hmac_setkey(struct crypto_shash *tfm, ++ const u8 *key, ++ unsigned int keylen) ++{ ++ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START; ++ int i, j; ++ u32 *in_key = (u32 *)key; ++ ++ hash->KIDX = 0x80000000; /* reset all 16 words of the key to '0' */ ++ asm("sync"); ++ ++ j = 0; ++ for (i = 0; i < keylen; i+=4) ++ { ++ hash->KIDX = j; ++ asm("sync"); ++ hash->KEY = *((u32 *) in_key + j); ++ j++; ++ } ++ ++ return 0; ++} ++ ++static int sha1_hmac_export(struct shash_desc *desc, void *out) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(out, sctx, sizeof(*sctx)); ++ return 0; ++} ++ ++static int sha1_hmac_import(struct shash_desc *desc, const void *in) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memcpy(sctx, in, sizeof(*sctx)); ++ return 0; ++} ++ ++/** \fn void sha1_hmac_init(struct crypto_tfm *tfm) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief initialize sha1 hmac context ++ * \param tfm linux crypto algo transform ++*/ ++static int sha1_hmac_init(struct shash_desc *desc) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ ++ memset(sctx, 0, sizeof(struct sha1_hmac_ctx)); ++ sctx->dbn = 0; /* dbn workaround */ ++ ++ return 0; ++} ++ ++/** \fn static void sha1_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief on-the-fly sha1 hmac computation ++ * \param tfm linux crypto algo transform ++ * \param data input data ++ * \param len size of input data ++*/ ++static int sha1_hmac_update(struct shash_desc *desc, const u8 *data, ++ unsigned int len) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ unsigned int i, j; ++ ++ j = (sctx->count >> 3) & 0x3f; ++ sctx->count += len << 3; ++ /* printk("sctx->count = %d\n", (sctx->count >> 3)); */ ++ ++ if ((j + len) > 63) { ++ memcpy(&sctx->buffer[j], data, (i = 64 - j)); ++ sha1_hmac_transform(desc, (const u32 *)sctx->buffer); ++ for (; i + 63 < len; i += 64) { ++ sha1_hmac_transform(desc, (const u32 *)&data[i]); ++ } ++ ++ j = 0; ++ } else { ++ i = 0; ++ } ++ ++ memcpy(&sctx->buffer[j], &data[i], len - i); ++ ++ return 0; ++} ++ ++/** \fn static void sha1_hmac_final(struct crypto_tfm *tfm, u8 *out) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief ompute final sha1 hmac value ++ * \param tfm linux crypto algo transform ++ * \param out final sha1 hmac output value ++*/ ++static int sha1_hmac_final(struct shash_desc *desc, u8 *out) ++{ ++ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc); ++ u32 index, padlen; ++ u64 t; ++ u8 bits[8] = { 0, }; ++ static const u8 padding[64] = { 0x80, }; ++ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START; ++ ulong flag; ++ int i = 0; ++ int dbn; ++ u32 *in = &sctx->temp[0]; ++ ++ t = sctx->count + 512; /* need to add 512 bit of the IPAD operation */ ++ bits[7] = 0xff & t; ++ t >>= 8; ++ bits[6] = 0xff & t; ++ t >>= 8; ++ bits[5] = 0xff & t; ++ t >>= 8; ++ bits[4] = 0xff & t; ++ t >>= 8; ++ bits[3] = 0xff & t; ++ t >>= 8; ++ bits[2] = 0xff & t; ++ t >>= 8; ++ bits[1] = 0xff & t; ++ t >>= 8; ++ bits[0] = 0xff & t; ++ ++ /* Pad out to 56 mod 64 */ ++ index = (sctx->count >> 3) & 0x3f; ++ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); ++ sha1_hmac_update(desc, padding, padlen); ++ ++ /* Append length */ ++ sha1_hmac_update(desc, bits, sizeof bits); ++ ++ CRTCL_SECT_START; ++ ++ hashs->DBN = sctx->dbn; ++ ++ /* for vr9 change, ENDI = 1 */ ++ *LQ_HASH_CON = HASH_CON_VALUE; ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ for (dbn = 0; dbn < sctx->dbn; dbn++) ++ { ++ for (i = 0; i < 16; i++) { ++ hashs->MR = in[i]; ++ }; ++ ++ hashs->ctrl.GO = 1; ++ asm("sync"); ++ ++ /* wait for processing */ ++ while (hashs->ctrl.BSY) { ++ /* this will not take long */ ++ } ++ ++ in += 16; ++ ++ return 0; ++ } ++ ++#if 1 ++ /* wait for digest ready */ ++ while (! hashs->ctrl.DGRY) { ++ /* this will not take long */ ++ } ++#endif ++ ++ *((u32 *) out + 0) = hashs->D1R; ++ *((u32 *) out + 1) = hashs->D2R; ++ *((u32 *) out + 2) = hashs->D3R; ++ *((u32 *) out + 3) = hashs->D4R; ++ *((u32 *) out + 4) = hashs->D5R; ++ ++ CRTCL_SECT_END; ++} ++ ++/* ++ * \brief SHA1-HMAC function mappings ++*/ ++static struct shash_alg sha1_hmac_alg = { ++ .digestsize = SHA1_DIGEST_SIZE, ++ .init = sha1_hmac_init, ++ .update = sha1_hmac_update, ++ .final = sha1_hmac_final, ++ .export = sha1_hmac_export, ++ .import = sha1_hmac_import, ++ .setkey = sha1_hmac_setkey, ++ .descsize = sizeof(struct sha1_hmac_ctx), ++ .statesize = sizeof(struct sha1_hmac_ctx), ++ .base = { ++ .cra_name = "hmac(sha1)", ++ .cra_driver_name = "lq_deu-sha1_hmac", ++ .cra_flags = CRYPTO_ALG_TYPE_SHASH, ++ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, ++ .cra_module = THIS_MODULE, ++ } ++}; ++ ++/** \fn int lq_deu_init_sha1_hmac(void) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief initialize sha1 hmac driver ++*/ ++int lq_deu_init_sha1_hmac(void) ++{ ++ int ret; ++ ++ if ((ret = crypto_register_shash(&sha1_hmac_alg))) ++ goto sha1_err; ++ ++ CRTCL_SECT_INIT; ++ ++ printk(KERN_NOTICE "Lantiq DEU SHA1_HMAC initialized%s.\n", ++ disable_deudma ? "" : " (DMA)"); ++ return ret; ++ ++sha1_err: ++ printk(KERN_ERR "Lantiq DEU SHA1_HMAC initialization failed!\n"); ++ return ret; ++} ++ ++/** \fn void lq_deu_fini_sha1_hmac(void) ++ * \ingroup LQ_SHA1_HMAC_FUNCTIONS ++ * \brief unregister sha1 hmac driver ++*/ ++void lq_deu_fini_sha1_hmac(void) ++{ ++ crypto_unregister_shash(&sha1_hmac_alg); ++} ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_falcon.c +@@ -0,0 +1,163 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/types.h> ++#include <linux/errno.h> ++#include <asm/io.h> /* dma_cache_inv */ ++#include <linux/platform_device.h> ++ ++#ifdef CONFIG_SOC_LANTIQ_FALCON ++ ++#include "deu.h" ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief Lantiq DEU driver module ++*/ ++ ++/** ++ \file deu_falcon.c ++ \brief Lantiq DEU board specific driver file for ar9 ++*/ ++ ++/** ++ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS ++ \ingroup LQ_DEU ++ \brief board specific functions ++*/ ++ ++#include <falcon/gpon_reg_base.h> ++#include <falcon/sys1_reg.h> ++#include <falcon/status_reg.h> ++#include <falcon/sysctrl.h> ++ ++#define reg_r32(reg) __raw_readl(reg) ++#define reg_w32(val, reg) __raw_writel(val, reg) ++#define reg_w32_mask(clear, set, reg) reg_w32((reg_r32(reg) & ~(clear)) | (set), reg) ++ ++static gpon_sys1_t * const sys1 = (gpon_sys1_t *)GPON_SYS1_BASE; ++static gpon_status_t * const status = (gpon_status_t *)GPON_STATUS_BASE; ++ ++/** \fn u32 endian_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Swap data given to the function ++ * \param input Data input to be swapped ++ * \return either the swapped data or the input data depending on whether it is in DMA mode or FPI mode ++*/ ++static u32 endian_swap(u32 input) ++{ ++ return input; ++} ++ ++/** \fn u32 input_swap(u32 input) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief Not used ++ * \return input ++*/ ++static u32 input_swap(u32 input) ++{ ++ return input; ++} ++ ++/** \fn void aes_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize AES hardware ++*/ ++static void aes_chip_init(void) ++{ ++ volatile struct deu_aes *aes = (struct deu_aes *) AES_START; ++ ++ aes->ctrl.SM = 1; ++ aes->ctrl.ARS = 1; ++} ++ ++/** \fn void des_chip_init(void) ++ * \ingroup BOARD_SPECIFIC_FUNCTIONS ++ * \brief initialize DES hardware ++*/ ++static void des_chip_init(void) ++{ ++} ++ ++static u32 chip_init(void) ++{ ++ sys1_hw_clk_enable(CLKEN_SHA1_SET | CLKEN_AES_SET); ++ sys1_hw_activate(ACT_SHA1_SET | ACT_AES_SET); ++ ++ return LQ_DEU_ID_AES | LQ_DEU_ID_HASH; ++} ++ ++static int lq_crypto_probe(struct platform_device *pdev) ++{ ++#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA ++ lq_crypto_ops.dma_init = NULL; ++ lq_crypto_ops.dma_exit = NULL; ++ lq_crypto_ops.aes_dma_memcpy = NULL; ++ lq_crypto_ops.des_dma_memcpy = NULL; ++ lq_crypto_ops.aes_dma_malloc = NULL; ++ lq_crypto_ops.des_dma_malloc = NULL; ++ lq_crypto_ops.dma_align = NULL; ++ lq_crypto_ops.dma_free = NULL; ++#endif ++ ++ lq_crypto_ops.endian_swap = endian_swap; ++ lq_crypto_ops.input_swap = input_swap; ++ lq_crypto_ops.aes_chip_init = aes_chip_init; ++ lq_crypto_ops.des_chip_init = des_chip_init; ++ lq_crypto_ops.chip_init = chip_init; ++ ++ printk("lq_falcon_deu: driver loaded!\n"); ++ ++ lq_deu_init(); ++ ++ return 0; ++} ++ ++static int lq_crypto_remove(struct platform_device *pdev) ++{ ++ lq_deu_exit(); ++ ++ return 0; ++} ++ ++static struct platform_driver lq_crypto = { ++ .probe = lq_crypto_probe, ++ .remove = lq_crypto_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "lq_falcon_deu" ++ } ++}; ++ ++static int __init lq_crypto_init(void) ++{ ++ return platform_driver_register(&lq_crypto); ++} ++module_init(lq_crypto_init); ++ ++static void __exit lq_crypto_exit(void) ++{ ++ platform_driver_unregister(&lq_crypto); ++} ++module_exit(lq_crypto_exit); ++ ++#endif +--- /dev/null ++++ b/drivers/crypto/lantiq/deu_falcon.h +@@ -0,0 +1,281 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com> ++ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies ++ */ ++ ++/** ++ \defgroup LQ_DEU LQ_DEU_DRIVERS ++ \ingroup API ++ \brief DEU driver module ++*/ ++ ++/** ++ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS ++ \ingroup LQ_DEU ++ \brief Lantiq DEU definitions ++*/ ++ ++/** ++ \file deu_falcon.h ++ \brief DEU driver header file ++*/ ++ ++ ++#ifndef DEU_FALCON_H ++#define DEU_FALCON_H ++ ++#define HASH_START 0xbd008100 ++#define AES_START 0xbd008000 ++ ++#ifdef CONFIG_CRYPTO_DEV_DMA ++# include "deu_dma.h" ++# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \ ++ deu_dma_align(ptr, buffer, in_out, bytes) ++# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \ ++ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes) ++# define BUFFER_IN 1 ++# define BUFFER_OUT 0 ++# define AES_ALGO 1 ++# define DES_ALGO 0 ++# define ALLOCATE_MEMORY(val, type) 1 ++# define FREE_MEMORY(buff) ++extern struct lq_deu_device lq_deu[1]; ++#endif /* CONFIG_CRYPTO_DEV_DMA */ ++ ++/* SHA CONSTANTS */ ++#define HASH_CON_VALUE 0x0700002C ++ ++#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input) ++#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input) ++#define DELAY_PERIOD 10 ++#define FIND_DEU_CHIP_VERSION chip_version() ++ ++#define WAIT_AES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_aes *aes = \ ++ (volatile struct deu_aes *) AES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (aes->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define WAIT_DES_DMA_READY() \ ++ do { \ ++ int i; \ ++ volatile struct deu_dma *dma = \ ++ (struct deu_dma *) LQ_DEU_DMA_CON; \ ++ volatile struct deu_des *des = \ ++ (struct deu_des *) DES_3DES_START; \ ++ for (i = 0; i < 10; i++) \ ++ udelay(DELAY_PERIOD); \ ++ while (dma->ctrl.BSY) {}; \ ++ while (des->ctrl.BUS) {}; \ ++ } while (0) ++ ++#define AES_DMA_MISC_CONFIG() \ ++ do { \ ++ volatile struct deu_aes *aes = \ ++ (volatile struct deu_aes *) AES_START; \ ++ aes->ctrl.KRE = 1; \ ++ aes->ctrl.GO = 1; \ ++ } while(0) ++ ++#define SHA_HASH_INIT \ ++ do { \ ++ volatile struct deu_hash *hash = \ ++ (struct deu_hash *) HASH_START; \ ++ hash->ctrl.SM = 1; \ ++ hash->ctrl.ALGO = 0; \ ++ hash->ctrl.INIT = 1; \ ++ } while(0) ++ ++/* DEU Common Structures for Falcon*/ ++ ++struct deu_clk_ctrl { ++ u32 Res:26; ++ u32 FSOE:1; ++ u32 SBWE:1; ++ u32 EDIS:1; ++ u32 SPEN:1; ++ u32 DISS:1; ++ u32 DISR:1; ++}; ++ ++struct deu_des { ++ struct deu_des_ctrl { /* 10h */ ++ u32 KRE:1; ++ u32 reserved1:5; ++ u32 GO:1; ++ u32 STP:1; ++ u32 Res2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 Res3:2; ++ u32 F:3; ++ u32 O:3; ++ u32 BUS:1; ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 M:3; ++ } ctrl; ++ ++ u32 IHR; /* 14h */ ++ u32 ILR; /* 18h */ ++ u32 K1HR; /* 1c */ ++ u32 K1LR; ++ u32 K2HR; ++ u32 K2LR; ++ u32 K3HR; ++ u32 K3LR; /* 30h */ ++ u32 IVHR; /* 34h */ ++ u32 IVLR; /* 38 */ ++ u32 OHR; /* 3c */ ++ u32 OLR; /* 40 */ ++}; ++ ++struct deu_aes { ++ struct deu_aes_ctrl { ++ u32 KRE:1; ++ u32 reserved1:4; ++ u32 PNK:1; ++ u32 GO:1; ++ u32 STP:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:2; ++ u32 F:3; /* fbs */ ++ u32 O:3; /* om */ ++ u32 BUS:1; /* bsy */ ++ u32 DAU:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 E_D:1; ++ u32 KV:1; ++ u32 K:2; /* KL */ ++ } ctrl; ++ ++ u32 ID3R; /* 80h */ ++ u32 ID2R; /* 84h */ ++ u32 ID1R; /* 88h */ ++ u32 ID0R; /* 8Ch */ ++ u32 K7R; /* 90h */ ++ u32 K6R; /* 94h */ ++ u32 K5R; /* 98h */ ++ u32 K4R; /* 9Ch */ ++ u32 K3R; /* A0h */ ++ u32 K2R; /* A4h */ ++ u32 K1R; /* A8h */ ++ u32 K0R; /* ACh */ ++ u32 IV3R; /* B0h */ ++ u32 IV2R; /* B4h */ ++ u32 IV1R; /* B8h */ ++ u32 IV0R; /* BCh */ ++ u32 OD3R; /* D4h */ ++ u32 OD2R; /* D8h */ ++ u32 OD1R; /* DCh */ ++ u32 OD0R; /* E0h */ ++}; ++ ++struct deu_arc4 { ++ struct arc4_controlr { ++ u32 KRE:1; ++ u32 KLEN:4; ++ u32 KSAE:1; ++ u32 GO:1; ++ u32 STP:1; ++ u32 reserved1:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved2:8; ++ u32 BUS:1; /* bsy */ ++ u32 reserved3:1; ++ u32 ARS:1; ++ u32 SM:1; ++ u32 reserved4:4; ++ } ctrl; ++ ++ u32 K3R; /* 104h */ ++ u32 K2R; /* 108h */ ++ u32 K1R; /* 10Ch */ ++ u32 K0R; /* 110h */ ++ u32 IDLEN; /* 114h */ ++ u32 ID3R; /* 118h */ ++ u32 ID2R; /* 11Ch */ ++ u32 ID1R; /* 120h */ ++ u32 ID0R; /* 124h */ ++ u32 OD3R; /* 128h */ ++ u32 OD2R; /* 12Ch */ ++ u32 OD1R; /* 130h */ ++ u32 OD0R; /* 134h */ ++}; ++ ++struct deu_hash { ++ struct deu_hash_ctrl { ++ u32 reserved1:5; ++ u32 KHS:1; ++ u32 GO:1; ++ u32 INIT:1; ++ u32 reserved2:6; ++ u32 NDC:1; ++ u32 ENDI:1; ++ u32 reserved3:7; ++ u32 DGRY:1; ++ u32 BSY:1; ++ u32 reserved4:1; ++ u32 IRCL:1; ++ u32 SM:1; ++ u32 KYUE:1; ++ u32 HMEN:1; ++ u32 SSEN:1; ++ u32 ALGO:1; ++ } ctrl; ++ ++ u32 MR; /* B4h */ ++ u32 D1R; /* B8h */ ++ u32 D2R; /* BCh */ ++ u32 D3R; /* C0h */ ++ u32 D4R; /* C4h */ ++ u32 D5R; /* C8h */ ++ u32 dummy; /* CCh */ ++ u32 KIDX; /* D0h */ ++ u32 KEY; /* D4h */ ++ u32 DBN; /* D8h */ ++}; ++ ++struct deu_dma { ++ struct deu_dma_ctrl { ++ u32 reserved1:22; ++ u32 BS:2; ++ u32 BSY:1; ++ u32 reserved2:1; ++ u32 ALGO:2; ++ u32 RXCLS:2; ++ u32 reserved3:1; ++ u32 EN:1; ++ } ctrl; ++}; ++ ++#endif /* DEU_FALCON_H */ +--- a/arch/mips/lantiq/xway/devices.c ++++ b/arch/mips/lantiq/xway/devices.c +@@ -277,3 +277,9 @@ + break; + } + } ++ ++void __init ++lq_register_crypto(const char *name) ++{ ++ platform_device_register_simple(name, 0, 0, 0); ++} +--- a/arch/mips/lantiq/xway/devices.h ++++ b/arch/mips/lantiq/xway/devices.h +@@ -21,5 +21,6 @@ + extern void __init lq_register_wdt(void); + extern void __init lq_register_ethernet(struct lq_eth_data *eth); + extern void __init lq_register_asc(int port); ++extern void __init lq_register_crypto(const char *name); + + #endif +--- a/arch/mips/lantiq/xway/mach-easy50712.c ++++ b/arch/mips/lantiq/xway/mach-easy50712.c +@@ -72,6 +72,7 @@ + lq_register_wdt(); + lq_register_pci(&lq_pci_data); + lq_register_ethernet(&lq_eth_data); ++ lq_register_crypto("lq_danube_deu"); + } + + MIPS_MACHINE(LANTIQ_MACH_EASY50712, +--- a/arch/mips/lantiq/xway/mach-easy50812.c ++++ b/arch/mips/lantiq/xway/mach-easy50812.c +@@ -71,6 +71,7 @@ + lq_register_wdt(); + lq_register_pci(&lq_pci_data); + lq_register_ethernet(&lq_eth_data); ++ lq_register_crypto("lq_ar9_deu"); + } + + MIPS_MACHINE(LANTIQ_MACH_EASY50812, diff --git a/target/linux/lantiq/patches/300-udp_redirect.patch b/target/linux/lantiq/patches/300-udp_redirect.patch new file mode 100644 index 0000000000..7425a3108f --- /dev/null +++ b/target/linux/lantiq/patches/300-udp_redirect.patch @@ -0,0 +1,346 @@ +--- /dev/null ++++ b/include/linux/udp_redirect.h +@@ -0,0 +1,57 @@ ++#ifndef _UDP_REDIRECT_H ++#define _UDP_REDIRECT_H ++ ++/****************************************************************************** ++ ++ Copyright (c) 2006 ++ Infineon Technologies AG ++ Am Campeon 1-12; 81726 Munich, Germany ++ ++ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE, ++ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS ++ SOFTWARE IS FREE OF CHARGE. ++ ++ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS ++ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING ++ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP, ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE ++ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD ++ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL ++ PROPERTY INFRINGEMENT. ++ ++ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT ++ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM ++ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ++******************************************************************************/ ++ ++/* ============================= */ ++/* Includes */ ++/* ============================= */ ++#ifndef _LINUX_TYPES_H ++#include <linux/types.h> ++#endif ++ ++ ++/* ============================= */ ++/* Definitions */ ++/* ============================= */ ++#define UDP_REDIRECT_MAGIC (void*)0x55445052L ++ ++ ++/* ============================= */ ++/* Global variable declaration */ ++/* ============================= */ ++extern int (*udp_do_redirect_fn)(struct sock *sk, struct sk_buff *skb); ++extern int (*udpredirect_getfrag_fn)(void *p, char * to, ++ int offset, int fraglen, int odd, ++ struct sk_buff *skb); ++/* ============================= */ ++/* Global function declaration */ ++/* ============================= */ ++ ++extern int udpredirect_getfrag(void *p, char * to, int offset, ++ int fraglen, int odd, struct sk_buff *skb); ++#endif +--- /dev/null ++++ b/net/ipv4/udp_redirect_symb.c +@@ -0,0 +1,186 @@ ++/****************************************************************************** ++ ++ Copyright (c) 2006 ++ Infineon Technologies AG ++ Am Campeon 1-12; 81726 Munich, Germany ++ ++ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE, ++ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS ++ SOFTWARE IS FREE OF CHARGE. ++ ++ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS ++ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING ++ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP, ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE ++ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD ++ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL ++ PROPERTY INFRINGEMENT. ++ ++ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT ++ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM ++ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ++******************************************************************************/ ++#if defined(CONFIG_IFX_UDP_REDIRECT) || defined(CONFIG_IFX_UDP_REDIRECT_MODULE) ++/* ============================= */ ++/* Includes */ ++/* ============================= */ ++#include <net/checksum.h> ++#include <net/udp.h> ++#include <linux/module.h> ++#include <linux/skbuff.h> ++#include <linux/udp_redirect.h> ++ ++/* ============================= */ ++/* Global variable definition */ ++/* ============================= */ ++int (*udpredirect_getfrag_fn) (void *p, char * to, int offset, ++ int fraglen, int odd, struct sk_buff *skb) = NULL; ++int (*udp_do_redirect_fn)(struct sock *sk, struct sk_buff *skb) = NULL; ++ ++/* ============================= */ ++/* Local type definitions */ ++/* ============================= */ ++struct udpfakehdr ++{ ++ struct udphdr uh; ++ u32 saddr; ++ u32 daddr; ++ struct iovec *iov; ++ u32 wcheck; ++}; ++ ++/* ============================= */ ++/* Local function declaration */ ++/* ============================= */ ++static int udpredirect_csum_partial_copy_fromiovecend(unsigned char *kdata, ++ struct iovec *iov, int offset, unsigned int len, __wsum *csump); ++ ++static int udpredirect_memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, ++ int len); ++ ++/* ============================= */ ++/* Global function definition */ ++/* ============================= */ ++ ++/* ++ Copy of udp_getfrag() from udp.c ++ This function exists because no copy_from_user() is needed for udpredirect. ++*/ ++ ++int ++udpredirect_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) ++{ ++ struct iovec *iov = from; ++ ++ if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ if (udpredirect_memcpy_fromiovecend(to, iov, offset, len) < 0) ++ return -EFAULT; ++ } else { ++ __wsum csum = 0; ++ if (udpredirect_csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0) ++ return -EFAULT; ++ skb->csum = csum_block_add(skb->csum, csum, odd); ++ } ++ return 0; ++} ++ ++static int udpredirect_memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, ++ int len) ++{ ++ /* Skip over the finished iovecs */ ++ while (offset >= iov->iov_len) { ++ offset -= iov->iov_len; ++ iov++; ++ } ++ ++ while (len > 0) { ++ u8 __user *base = iov->iov_base + offset; ++ int copy = min_t(unsigned int, len, iov->iov_len - offset); ++ ++ offset = 0; ++ memcpy(kdata, base, copy); ++ len -= copy; ++ kdata += copy; ++ iov++; ++ } ++ ++ return 0; ++} ++ ++/* ++ Copy of csum_partial_copy_fromiovecend() from iovec.c ++ This function exists because no copy_from_user() is needed for udpredirect. ++*/ ++ ++int udpredirect_csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, ++ int offset, unsigned int len, __wsum *csump) ++{ ++ __wsum csum = *csump; ++ int partial_cnt = 0, err = 0; ++ ++ /* Skip over the finished iovecs */ ++ while (offset >= iov->iov_len) { ++ offset -= iov->iov_len; ++ iov++; ++ } ++ ++ while (len > 0) { ++ u8 __user *base = iov->iov_base + offset; ++ int copy = min_t(unsigned int, len, iov->iov_len - offset); ++ ++ offset = 0; ++ ++ /* There is a remnant from previous iov. */ ++ if (partial_cnt) { ++ int par_len = 4 - partial_cnt; ++ ++ /* iov component is too short ... */ ++ if (par_len > copy) { ++ memcpy(kdata, base, copy); ++ kdata += copy; ++ base += copy; ++ partial_cnt += copy; ++ len -= copy; ++ iov++; ++ if (len) ++ continue; ++ *csump = csum_partial(kdata - partial_cnt, ++ partial_cnt, csum); ++ goto out; ++ } ++ memcpy(kdata, base, par_len); ++ csum = csum_partial(kdata - partial_cnt, 4, csum); ++ kdata += par_len; ++ base += par_len; ++ copy -= par_len; ++ len -= par_len; ++ partial_cnt = 0; ++ } ++ ++ if (len > copy) { ++ partial_cnt = copy % 4; ++ if (partial_cnt) { ++ copy -= partial_cnt; ++ memcpy(kdata + copy, base + copy, partial_cnt); ++ } ++ } ++ ++ if (copy) { ++ csum = csum_partial_copy_nocheck(base, kdata, copy, csum); ++ } ++ len -= copy + partial_cnt; ++ kdata += copy + partial_cnt; ++ iov++; ++ } ++ *csump = csum; ++out: ++ return err; ++} ++ ++EXPORT_SYMBOL(udpredirect_getfrag); ++EXPORT_SYMBOL(udp_do_redirect_fn); ++EXPORT_SYMBOL(udpredirect_getfrag_fn); ++#endif /* CONFIG_IFX_UDP_REDIRECT* */ +--- a/net/ipv4/Makefile ++++ b/net/ipv4/Makefile +@@ -14,6 +14,9 @@ obj-y := route.o inetpeer.o protocol + inet_fragment.o + + obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o ++ifneq ($(CONFIG_IFX_UDP_REDIRECT),) ++obj-$(CONFIG_IFX_UDP_REDIRECT) += udp_redirect_symb.o ++endif + obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o + obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o + obj-$(CONFIG_PROC_FS) += proc.o +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -106,6 +106,10 @@ + #include <net/xfrm.h> + #include "udp_impl.h" + ++#if defined(CONFIG_IFX_UDP_REDIRECT) || defined(CONFIG_IFX_UDP_REDIRECT_MODULE) ++#include <linux/udp_redirect.h> ++#endif ++ + struct udp_table udp_table __read_mostly; + EXPORT_SYMBOL(udp_table); + +@@ -782,7 +786,7 @@ int udp_sendmsg(struct kiocb *iocb, stru + u8 tos; + int err, is_udplite = IS_UDPLITE(sk); + int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; +- int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); ++ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *) = NULL; + + if (len > 0xFFFF) + return -EMSGSIZE; +@@ -944,6 +948,12 @@ back_from_confirm: + + do_append_data: + up->len += ulen; ++ /* UDPREDIRECT */ ++#if defined(CONFIG_IFX_UDP_REDIRECT) || defined(CONFIG_IFX_UDP_REDIRECT_MODULE) ++ if(udpredirect_getfrag_fn && sk->sk_user_data == UDP_REDIRECT_MAGIC) ++ getfrag = udpredirect_getfrag_fn; ++ else ++#endif /* IFX_UDP_REDIRECT */ + getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; + err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, + sizeof(struct udphdr), &ipc, &rt, +@@ -1518,6 +1528,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, + struct rtable *rt = skb_rtable(skb); + __be32 saddr, daddr; + struct net *net = dev_net(skb->dev); ++ int ret = 0; + + /* + * Validate the packet. +@@ -1550,7 +1561,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, + sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); + + if (sk != NULL) { +- int ret = udp_queue_rcv_skb(sk, skb); ++ /* UDPREDIRECT */ ++#if defined(CONFIG_IFX_UDP_REDIRECT) || defined(CONFIG_IFX_UDP_REDIRECT_MODULE) ++ if(udp_do_redirect_fn && sk->sk_user_data == UDP_REDIRECT_MAGIC) ++ { ++ udp_do_redirect_fn(sk,skb); ++ kfree_skb(skb); ++ return(0); ++ } ++#endif ++ ret = udp_queue_rcv_skb(sk, skb); + sock_put(sk); + + /* a return value > 0 means to resubmit the input, but +@@ -1845,7 +1865,7 @@ struct proto udp_prot = { + #endif + }; + EXPORT_SYMBOL(udp_prot); +- ++EXPORT_SYMBOL(udp_rcv); + /* ------------------------------------------------------------------------ */ + #ifdef CONFIG_PROC_FS + +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -72,6 +72,12 @@ config INET + + Short answer: say Y. + ++config IFX_UDP_REDIRECT ++ bool "IFX Kernel Packet Interface for UDP redirection" ++ help ++ You can say Y here if you want to use hooks from kernel for ++ UDP redirection. ++ + if INET + source "net/ipv4/Kconfig" + source "net/ipv6/Kconfig" diff --git a/target/linux/lantiq/patches/310-atm_hack.patch b/target/linux/lantiq/patches/310-atm_hack.patch new file mode 100644 index 0000000000..55e75ce546 --- /dev/null +++ b/target/linux/lantiq/patches/310-atm_hack.patch @@ -0,0 +1,42 @@ +--- a/arch/mips/mm/cache.c ++++ b/arch/mips/mm/cache.c +@@ -52,6 +52,8 @@ + void (*_dma_cache_inv)(unsigned long start, unsigned long size); + + EXPORT_SYMBOL(_dma_cache_wback_inv); ++EXPORT_SYMBOL(_dma_cache_wback); ++EXPORT_SYMBOL(_dma_cache_inv); + + #endif /* CONFIG_DMA_NONCOHERENT */ + +--- a/net/atm/proc.c ++++ b/net/atm/proc.c +@@ -153,7 +153,7 @@ + static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) + { + static const char *const class_name[] = { +- "off", "UBR", "CBR", "VBR", "ABR"}; ++ "off","UBR","CBR","NTR-VBR","ABR","ANY","RT-VBR","UBR+","GFR"}; + static const char *const aal_name[] = { + "---", "1", "2", "3/4", /* 0- 3 */ + "???", "5", "???", "???", /* 4- 7 */ +--- a/net/atm/common.c ++++ b/net/atm/common.c +@@ -60,11 +60,17 @@ + write_unlock_irq(&vcc_sklist_lock); + } + ++struct sk_buff* (*ifx_atm_alloc_tx)(struct atm_vcc *, unsigned int) = NULL; ++EXPORT_SYMBOL(ifx_atm_alloc_tx); ++ + static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size) + { + struct sk_buff *skb; + struct sock *sk = sk_atm(vcc); + ++ if (ifx_atm_alloc_tx != NULL) ++ return ifx_atm_alloc_tx(vcc, size); ++ + if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { + pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", + sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); diff --git a/target/linux/lantiq/patches/400-mach-arv45xx.patch b/target/linux/lantiq/patches/400-mach-arv45xx.patch new file mode 100644 index 0000000000..643a0f7ae1 --- /dev/null +++ b/target/linux/lantiq/patches/400-mach-arv45xx.patch @@ -0,0 +1,211 @@ +--- a/arch/mips/include/asm/mach-lantiq/machine.h ++++ b/arch/mips/include/asm/mach-lantiq/machine.h +@@ -11,4 +11,7 @@ + LANTIQ_MACH_EASY4010, /* Twinpass evalkit */ + LANTIQ_MACH_EASY50712, /* Danube evalkit */ + LANTIQ_MACH_EASY50812, /* AR9 eval board */ ++ LANTIQ_MACH_ARV4518, /* Airties WAV-221, SMC-7908A-ISP */ ++ LANTIQ_MACH_ARV452, /* Airties WAV-281, Arcor EasyboxA800 */ ++ LANTIQ_MACH_ARV4525, /* Speedport W502V */ + }; +--- a/arch/mips/lantiq/xway/Kconfig ++++ b/arch/mips/lantiq/xway/Kconfig +@@ -14,6 +14,10 @@ + bool "Easy4010" + default y + ++config LANTIQ_MACH_ARV45XX ++ bool "ARV45XX" ++ default y ++ + endmenu + + endif +--- a/arch/mips/lantiq/xway/Makefile ++++ b/arch/mips/lantiq/xway/Makefile +@@ -3,3 +3,4 @@ + obj-$(CONFIG_LANTIQ_MACH_EASY50812) += mach-easy50812.o + obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o + obj-$(CONFIG_LANTIQ_MACH_EASY4010) += mach-easy4010.o ++obj-$(CONFIG_LANTIQ_MACH_ARV45XX) += mach-arv45xx.o +--- /dev/null ++++ b/arch/mips/lantiq/xway/mach-arv45xx.c +@@ -0,0 +1,178 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/gpio.h> ++#include <linux/gpio_buttons.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/physmap.h> ++#include <linux/input.h> ++#include <linux/etherdevice.h> ++ ++#include <machine.h> ++ ++#include <xway.h> ++#include <lantiq_platform.h> ++ ++#include "devices.h" ++ ++#define ARV452_LATCH_SWITCH (1 << 10) ++ ++#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition arv45xx_partitions[] = ++{ ++ { ++ .name = "uboot", ++ .offset = 0x0, ++ .size = 0x20000, ++ }, ++ { ++ .name = "uboot_env", ++ .offset = 0x20000, ++ .size = 0x10000, ++ }, ++ { ++ .name = "linux", ++ .offset = 0x30000, ++ .size = 0x3c0000, ++ }, ++ { ++ .name = "board_config", ++ .offset = 0x3f0000, ++ .size = 0x10000, ++ }, ++}; ++#endif ++ ++static struct physmap_flash_data arv45xx_flash_data = { ++#ifdef CONFIG_MTD_PARTITIONS ++ .nr_parts = ARRAY_SIZE(arv45xx_partitions), ++ .parts = arv45xx_partitions, ++#endif ++}; ++ ++static struct lq_pci_data lq_pci_data = { ++ .clock = PCI_CLOCK_EXT, ++ .req_mask = 0xf, ++}; ++ ++static struct lq_eth_data lq_eth_data = { ++ .mii_mode = REV_MII_MODE, ++ .mac = "\xff\xff\xff\xff\xff\xff", ++}; ++ ++static struct gpio_led ++arv4518_leds_gpio[] __initdata = { ++ { .name = "soc:blue:power", .gpio = 3, .active_low = 1, }, ++ { .name = "soc:blue:adsl", .gpio = 4, .active_low = 1, }, ++ { .name = "soc:blue:internet", .gpio = 5, .active_low = 1, }, ++ { .name = "soc:red:power", .gpio = 6, .active_low = 1, }, ++ { .name = "soc:yello:wps", .gpio = 7, .active_low = 1, }, ++ { .name = "soc:red:wps", .gpio = 9, .active_low = 1, }, ++ { .name = "soc:blue:voip", .gpio = 32, .active_low = 1, }, ++ { .name = "soc:blue:fxs1", .gpio = 33, .active_low = 1, }, ++ { .name = "soc:blue:fxs2", .gpio = 34, .active_low = 1, }, ++ { .name = "soc:blue:fxo", .gpio = 35, .active_low = 1, }, ++ { .name = "soc:blue:voice", .gpio = 36, .active_low = 1, }, ++ { .name = "soc:blue:usb", .gpio = 37, .active_low = 1, }, ++ { .name = "soc:blue:wlan", .gpio = 38, .active_low = 1, }, ++}; ++ ++static struct gpio_led ++arv452_leds_gpio[] __initdata = { ++ { .name = "soc:blue:power", .gpio = 3, .active_low = 1, }, ++ { .name = "soc:blue:adsl", .gpio = 4, .active_low = 1, }, ++ { .name = "soc:blue:internet", .gpio = 5, .active_low = 1, }, ++ { .name = "soc:red:power", .gpio = 6, .active_low = 1, }, ++ { .name = "soc:yello:wps", .gpio = 7, .active_low = 1, }, ++ { .name = "soc:red:wps", .gpio = 9, .active_low = 1, }, ++ { .name = "soc:blue:voip", .gpio = 32, .active_low = 1, }, ++ { .name = "soc:blue:fxs1", .gpio = 33, .active_low = 1, }, ++ { .name = "soc:blue:fxs2", .gpio = 34, .active_low = 1, }, ++ { .name = "soc:blue:fxo", .gpio = 35, .active_low = 1, }, ++ { .name = "soc:blue:voice", .gpio = 36, .active_low = 1, }, ++ { .name = "soc:blue:usb", .gpio = 37, .active_low = 1, }, ++ { .name = "soc:blue:wlan", .gpio = 38, .active_low = 1, }, ++}; ++ ++static struct gpio_led arv4525_leds_gpio[] __initdata = { ++ { .name = "soc:green:festnetz", .gpio = 4, .active_low = 1, }, ++ { .name = "soc:green:internet", .gpio = 5, .active_low = 1, }, ++ { .name = "soc:green:dsl", .gpio = 6, .active_low = 1, }, ++ { .name = "soc:green:wlan", .gpio = 8, .active_low = 1, }, ++ { .name = "soc:green:online", .gpio = 9, .active_low = 1, }, ++}; ++ ++static void ++arv45xx_register_ethernet(void) ++{ ++#define ARV45XX_BRN_MAC 0x3f0016 ++ memcpy_fromio(lq_eth_data.mac, ++ (void *)KSEG1ADDR(LQ_FLASH_START + ARV45XX_BRN_MAC), 6); ++ lq_register_ethernet(&lq_eth_data); ++} ++ ++static void __init ++arv4518_init(void) ++{ ++ lq_register_gpio(); ++ lq_register_gpio_ebu(0); ++ lq_register_gpio_leds(arv4518_leds_gpio, ARRAY_SIZE(arv4518_leds_gpio)); ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_nor(&arv45xx_flash_data); ++ lq_register_pci(&lq_pci_data); ++ lq_register_wdt(); ++ arv45xx_register_ethernet(); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_ARV4518, ++ "ARV4518", ++ "ARV4518 - SMC7908A-ISP", ++ arv4518_init); ++ ++static void __init ++arv452_init(void) ++{ ++ lq_register_gpio(); ++ lq_register_gpio_ebu(ARV452_LATCH_SWITCH); ++ lq_register_gpio_leds(arv452_leds_gpio, ARRAY_SIZE(arv452_leds_gpio)); ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_nor(&arv45xx_flash_data); ++ lq_register_pci(&lq_pci_data); ++ lq_register_wdt(); ++ arv45xx_register_ethernet(); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_ARV452, ++ "ARV452", ++ "ARV452 - Airties WAV-281, Arcor A800", ++ arv452_init); ++ ++static void __init ++arv4525_init(void) ++{ ++ lq_register_gpio(); ++ lq_register_gpio_leds(arv4525_leds_gpio, ARRAY_SIZE(arv4525_leds_gpio)); ++ lq_register_asc(0); ++ lq_register_asc(1); ++ lq_register_nor(&arv45xx_flash_data); ++ lq_register_pci(&lq_pci_data); ++ lq_register_wdt(); ++ lq_eth_data.mii_mode = MII_MODE; ++ arv45xx_register_ethernet(); ++} ++ ++MIPS_MACHINE(LANTIQ_MACH_ARV4525, ++ "ARV4525", ++ "ARV4525 - Speedport W502V", ++ arv4525_init); diff --git a/target/linux/lantiq/patches/700-dwc_otg.patch b/target/linux/lantiq/patches/700-dwc_otg.patch new file mode 100644 index 0000000000..9176770426 --- /dev/null +++ b/target/linux/lantiq/patches/700-dwc_otg.patch @@ -0,0 +1,15693 @@ +--- a/drivers/usb/Kconfig ++++ b/drivers/usb/Kconfig +@@ -111,6 +111,8 @@ + + source "drivers/usb/host/Kconfig" + ++source "drivers/usb/dwc_otg/Kconfig" ++ + source "drivers/usb/musb/Kconfig" + + source "drivers/usb/class/Kconfig" +--- a/drivers/usb/Makefile ++++ b/drivers/usb/Makefile +@@ -27,6 +27,8 @@ + + obj-$(CONFIG_USB_WUSB) += wusbcore/ + ++obj-$(CONFIG_DWC_OTG) += dwc_otg/ ++ + obj-$(CONFIG_USB_ACM) += class/ + obj-$(CONFIG_USB_PRINTER) += class/ + obj-$(CONFIG_USB_WDM) += class/ +--- /dev/null ++++ b/drivers/usb/dwc_otg/Kconfig +@@ -0,0 +1,37 @@ ++config DWC_OTG ++ tristate "Synopsis DWC_OTG support" ++ depends on USB ++ help ++ This driver supports Synopsis DWC_OTG IP core ++ embebbed on many SOCs (ralink, infineon, etc) ++ ++choice ++ prompt "USB Operation Mode" ++ depends on DWC_OTG ++ default DWC_OTG_HOST_ONLY ++ ++config DWC_OTG_HOST_ONLY ++ bool "HOST ONLY MODE" ++ depends on DWC_OTG ++ ++config DWC_OTG_DEVICE_ONLY ++ bool "DEVICE ONLY MODE" ++ depends on DWC_OTG ++endchoice ++ ++choice ++ prompt "Platform" ++ depends on DWC_OTG ++ default DWC_OTG_LANTIQ ++ ++config DWC_OTG_LANTIQ ++ bool "Lantiq" ++ depends on LANTIQ ++ help ++ Danube USB Host Controller ++ platform support ++endchoice ++ ++config DWC_OTG_DEBUG ++ bool "Enable debug mode" ++ depends on DWC_OTG +--- /dev/null ++++ b/drivers/usb/dwc_otg/Makefile +@@ -0,0 +1,39 @@ ++# ++# Makefile for DWC_otg Highspeed USB controller driver ++# ++ ++ifeq ($(CONFIG_DWC_OTG_DEBUG),y) ++EXTRA_CFLAGS += -DDEBUG ++endif ++ ++# Use one of the following flags to compile the software in host-only or ++# device-only mode based on the configuration selected by the user ++ifeq ($(CONFIG_DWC_OTG_HOST_ONLY),y) ++ EXTRA_CFLAGS += -DDWC_OTG_HOST_ONLY -DDWC_HOST_ONLY ++ EXTRA_CFLAGS += -DDWC_OTG_EN_ISOC -DDWC_EN_ISOC ++else ifeq ($(CONFIG_DWC_OTG_DEVICE_ONLY),y) ++ EXTRA_CFLAGS += -DDWC_OTG_DEVICE_ONLY ++else ++ EXTRA_CFLAGS += -DDWC_OTG_MODE ++endif ++ ++# EXTRA_CFLAGS += -DDWC_HS_ELECT_TST ++# EXTRA_CFLAGS += -DDWC_OTG_EXT_CHG_PUMP ++ ++ifeq ($(CONFIG_DWC_OTG_LANTIQ),y) ++ EXTRA_CFLAGS += -Dlinux -D__LINUX__ -DDWC_OTG_IFX -DDWC_OTG_HOST_ONLY -DDWC_HOST_ONLY -D__KERNEL__ ++endif ++ifeq ($(CONFIG_DWC_OTG_LANTIQ),m) ++ EXTRA_CFLAGS += -Dlinux -D__LINUX__ -DDWC_OTG_IFX -DDWC_HOST_ONLY -DMODULE -D__KERNEL__ -DDEBUG ++endif ++ ++obj-$(CONFIG_DWC_OTG) := dwc_otg.o ++dwc_otg-objs := dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o ++#dwc_otg-objs += dwc_otg_pcd.o dwc_otg_pcd_intr.o ++dwc_otg-objs += dwc_otg_attr.o ++dwc_otg-objs += dwc_otg_cil.o dwc_otg_cil_intr.o ++dwc_otg-objs += dwc_otg_ifx.o ++dwc_otg-objs += dwc_otg_driver.o ++ ++#obj-$(CONFIG_DWC_OTG_IFX) := dwc_otg_ifx.o ++#dwc_otg_ifx-objs := dwc_otg_ifx.o +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_attr.c +@@ -0,0 +1,802 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 537387 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. The Linux driver attributes ++ * feature will be used to provide the Linux Diagnostic ++ * Interface. These attributes are accessed through sysfs. ++ */ ++ ++/** @page "Linux Module Attributes" ++ * ++ * The Linux module attributes feature is used to provide the Linux ++ * Diagnostic Interface. These attributes are accessed through sysfs. ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. ++ ++ ++ The following table shows the attributes. ++ <table> ++ <tr> ++ <td><b> Name</b></td> ++ <td><b> Description</b></td> ++ <td><b> Access</b></td> ++ </tr> ++ ++ <tr> ++ <td> mode </td> ++ <td> Returns the current mode: 0 for device mode, 1 for host mode</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> hnpcapable </td> ++ <td> Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> srpcapable </td> ++ <td> Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> hnp </td> ++ <td> Initiates the Host Negotiation Protocol. Read returns the status.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> srp </td> ++ <td> Initiates the Session Request Protocol. Read returns the status.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> buspower </td> ++ <td> Gets or sets the Power State of the bus (0 - Off or 1 - On)</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> bussuspend </td> ++ <td> Suspends the USB bus.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> busconnected </td> ++ <td> Gets the connection status of the bus</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> gotgctl </td> ++ <td> Gets or sets the Core Control Status Register.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> gusbcfg </td> ++ <td> Gets or sets the Core USB Configuration Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> grxfsiz </td> ++ <td> Gets or sets the Receive FIFO Size Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> gnptxfsiz </td> ++ <td> Gets or sets the non-periodic Transmit Size Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> gpvndctl </td> ++ <td> Gets or sets the PHY Vendor Control Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> ggpio </td> ++ <td> Gets the value in the lower 16-bits of the General Purpose IO Register ++ or sets the upper 16 bits.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> guid </td> ++ <td> Gets or sets the value of the User ID Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> gsnpsid </td> ++ <td> Gets the value of the Synopsys ID Regester</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> devspeed </td> ++ <td> Gets or sets the device speed setting in the DCFG register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> enumspeed </td> ++ <td> Gets the device enumeration Speed.</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> hptxfsiz </td> ++ <td> Gets the value of the Host Periodic Transmit FIFO</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> hprt0 </td> ++ <td> Gets or sets the value in the Host Port Control and Status Register</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> regoffset </td> ++ <td> Sets the register offset for the next Register Access</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> regvalue </td> ++ <td> Gets or sets the value of the register at the offset in the regoffset attribute.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> remote_wakeup </td> ++ <td> On read, shows the status of Remote Wakeup. On write, initiates a remote ++ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote ++ Wakeup signalling bit in the Device Control Register is set for 1 ++ milli-second.</td> ++ <td> Read/Write</td> ++ </tr> ++ ++ <tr> ++ <td> regdump </td> ++ <td> Dumps the contents of core registers.</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> hcddump </td> ++ <td> Dumps the current HCD state.</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> hcd_frrem </td> ++ <td> Shows the average value of the Frame Remaining ++ field in the Host Frame Number/Frame Remaining register when an SOF interrupt ++ occurs. This can be used to determine the average interrupt latency. Also ++ shows the average Frame Remaining value for start_transfer and the "a" and ++ "b" sample points. The "a" and "b" sample points may be used during debugging ++ bto determine how long it takes to execute a section of the HCD code.</td> ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> rd_reg_test </td> ++ <td> Displays the time required to read the GNPTXFSIZ register many times ++ (the output shows the number of times the register is read). ++ <td> Read</td> ++ </tr> ++ ++ <tr> ++ <td> wr_reg_test </td> ++ <td> Displays the time required to write the GNPTXFSIZ register many times ++ (the output shows the number of times the register is written). ++ <td> Read</td> ++ </tr> ++ ++ </table> ++ ++ Example usage: ++ To get the current mode: ++ cat /sys/devices/lm0/mode ++ ++ To power down the USB: ++ echo 0 > /sys/devices/lm0/buspower ++ */ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/init.h> ++#include <linux/device.h> ++#include <linux/errno.h> ++#include <linux/types.h> ++#include <linux/stat.h> /* permission constants */ ++ ++#include <asm/io.h> ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++// #include "dwc_otg_pcd.h" ++#include "dwc_otg_hcd.h" ++ ++// 20070316, winder added. ++#ifndef SZ_256K ++#define SZ_256K 0x00040000 ++#endif ++ ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++ ++/** @name Functions for Show/Store of Attributes */ ++/**@{*/ ++ ++/** ++ * Show the register offset of the Register Access. ++ */ ++static ssize_t regoffset_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); ++} ++ ++/** ++ * Set the register offset for the next Register Access Read/Write ++ */ ++static ssize_t regoffset_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t offset = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x\n", offset); ++ if (offset < SZ_256K ) { ++ otg_dev->reg_offset = offset; ++ } ++ else { ++ dev_err( _dev, "invalid offset\n" ); ++ } ++ ++ return count; ++} ++DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, regoffset_show, regoffset_store); ++ ++/** ++ * Show the value of the register at the offset in the reg_offset ++ * attribute. ++ */ ++static ssize_t regvalue_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t val; ++ volatile uint32_t *addr; ++ ++ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ val = dwc_read_reg32( addr ); ++ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, ++ "Reg@0x%06x = 0x%08x\n", ++ otg_dev->reg_offset, val); ++ } ++ else { ++ dev_err(_dev, "Invalid offset (0x%0x)\n", ++ otg_dev->reg_offset); ++ return sprintf(buf, "invalid offset\n" ); ++ } ++} ++ ++/** ++ * Store the value in the register at the offset in the reg_offset ++ * attribute. ++ * ++ */ ++static ssize_t regvalue_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ volatile uint32_t * addr; ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); ++ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ dwc_write_reg32( addr, val ); ++ } ++ else { ++ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", ++ otg_dev->reg_offset); ++ } ++ return count; ++} ++DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); ++ ++/* ++ * Attributes ++ */ ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); ++ ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); ++DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); ++ ++ ++/** ++ * @todo Add code to initiate the HNP. ++ */ ++/** ++ * Show the HNP status bit ++ */ ++static ssize_t hnp_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); ++} ++ ++/** ++ * Set the HNP Request bit ++ */ ++static ssize_t hnp_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.hnpreq = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); ++ ++/** ++ * @todo Add code to initiate the SRP. ++ */ ++/** ++ * Show the SRP status bit ++ */ ++static ssize_t srp_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++ ++/** ++ * Set the SRP Request bit ++ */ ++static ssize_t srp_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ dwc_otg_pcd_initiate_srp(otg_dev->pcd); ++#endif ++ return count; ++} ++DEVICE_ATTR(srp, 0644, srp_show, srp_store); ++ ++/** ++ * @todo Need to do more for power on/off? ++ */ ++/** ++ * Show the Bus Power status ++ */ ++static ssize_t buspower_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); ++} ++ ++ ++/** ++ * Set the Bus Power status ++ */ ++static ssize_t buspower_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t on = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtpwr = on; ++ ++ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ ++ return count; ++} ++DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); ++ ++/** ++ * @todo Need to do more for suspend? ++ */ ++/** ++ * Show the Bus Suspend status ++ */ ++static ssize_t bussuspend_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); ++} ++ ++/** ++ * Set the Bus Suspend status ++ */ ++static ssize_t bussuspend_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtsusp = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); ++ ++/** ++ * Show the status of Remote Wakeup. ++ */ ++static ssize_t remote_wakeup_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ dctl_data_t val; ++ val.d32 = dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); ++ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", ++ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++ ++/** ++ * Initiate a remote wakeup of the host. The Device control register ++ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable ++ * flag is set. ++ * ++ */ ++static ssize_t remote_wakeup_store( struct device *_dev, struct device_attribute *attr, const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ if (val&1) { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); ++ } ++ else { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); ++ } ++#endif ++ return count; ++} ++DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, ++ remote_wakeup_store); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t regdump_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++#ifdef DEBUG ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ printk("%s otg_dev=0x%p\n", __FUNCTION__, otg_dev); ++ ++ dwc_otg_dump_global_registers( otg_dev->core_if); ++ if (dwc_otg_is_host_mode(otg_dev->core_if)) { ++ dwc_otg_dump_host_registers( otg_dev->core_if); ++ } else { ++ dwc_otg_dump_dev_registers( otg_dev->core_if); ++ } ++#endif ++ ++ return sprintf( buf, "Register Dump\n" ); ++} ++ ++DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); ++ ++/** ++ * Dump the current hcd state. ++ */ ++static ssize_t hcddump_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ dwc_otg_hcd_dump_state(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump\n" ); ++} ++ ++DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); ++ ++/** ++ * Dump the average frame remaining at SOF. This can be used to ++ * determine average interrupt latency. Frame remaining is also shown for ++ * start transfer and two additional sample points. ++ */ ++static ssize_t hcd_frrem_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ dwc_otg_hcd_dump_frrem(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump Frame Remaining\n" ); ++} ++ ++DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); ++ ++/** ++ * Displays the time required to read the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is read). ++ */ ++#define RW_REG_COUNT 10000000 ++#define MSEC_PER_JIFFIE 1000/HZ ++static ssize_t rd_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ int i; ++ int time; ++ int start_jiffies; ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); ++} ++ ++DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); ++ ++/** ++ * Displays the time required to write the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is written). ++ */ ++static ssize_t wr_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf) ++{ ++ int i; ++ int time; ++ int start_jiffies; ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ uint32_t reg_val; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); ++} ++ ++DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); ++/**@}*/ ++ ++/** ++ * Create the device files ++ */ ++void dwc_otg_attr_create (struct device *_dev) ++{ ++ int retval; ++ ++ retval = device_create_file(_dev, &dev_attr_regoffset); ++ retval += device_create_file(_dev, &dev_attr_regvalue); ++ retval += device_create_file(_dev, &dev_attr_mode); ++ retval += device_create_file(_dev, &dev_attr_hnpcapable); ++ retval += device_create_file(_dev, &dev_attr_srpcapable); ++ retval += device_create_file(_dev, &dev_attr_hnp); ++ retval += device_create_file(_dev, &dev_attr_srp); ++ retval += device_create_file(_dev, &dev_attr_buspower); ++ retval += device_create_file(_dev, &dev_attr_bussuspend); ++ retval += device_create_file(_dev, &dev_attr_busconnected); ++ retval += device_create_file(_dev, &dev_attr_gotgctl); ++ retval += device_create_file(_dev, &dev_attr_gusbcfg); ++ retval += device_create_file(_dev, &dev_attr_grxfsiz); ++ retval += device_create_file(_dev, &dev_attr_gnptxfsiz); ++ retval += device_create_file(_dev, &dev_attr_gpvndctl); ++ retval += device_create_file(_dev, &dev_attr_ggpio); ++ retval += device_create_file(_dev, &dev_attr_guid); ++ retval += device_create_file(_dev, &dev_attr_gsnpsid); ++ retval += device_create_file(_dev, &dev_attr_devspeed); ++ retval += device_create_file(_dev, &dev_attr_enumspeed); ++ retval += device_create_file(_dev, &dev_attr_hptxfsiz); ++ retval += device_create_file(_dev, &dev_attr_hprt0); ++ retval += device_create_file(_dev, &dev_attr_remote_wakeup); ++ retval += device_create_file(_dev, &dev_attr_regdump); ++ retval += device_create_file(_dev, &dev_attr_hcddump); ++ retval += device_create_file(_dev, &dev_attr_hcd_frrem); ++ retval += device_create_file(_dev, &dev_attr_rd_reg_test); ++ retval += device_create_file(_dev, &dev_attr_wr_reg_test); ++ ++ if(retval != 0) ++ { ++ DWC_PRINT("cannot create sysfs device files.\n"); ++ // DWC_PRINT("killing own sysfs device files!\n"); ++ dwc_otg_attr_remove(_dev); ++ } ++} ++ ++/** ++ * Remove the device files ++ */ ++void dwc_otg_attr_remove (struct device *_dev) ++{ ++ device_remove_file(_dev, &dev_attr_regoffset); ++ device_remove_file(_dev, &dev_attr_regvalue); ++ device_remove_file(_dev, &dev_attr_mode); ++ device_remove_file(_dev, &dev_attr_hnpcapable); ++ device_remove_file(_dev, &dev_attr_srpcapable); ++ device_remove_file(_dev, &dev_attr_hnp); ++ device_remove_file(_dev, &dev_attr_srp); ++ device_remove_file(_dev, &dev_attr_buspower); ++ device_remove_file(_dev, &dev_attr_bussuspend); ++ device_remove_file(_dev, &dev_attr_busconnected); ++ device_remove_file(_dev, &dev_attr_gotgctl); ++ device_remove_file(_dev, &dev_attr_gusbcfg); ++ device_remove_file(_dev, &dev_attr_grxfsiz); ++ device_remove_file(_dev, &dev_attr_gnptxfsiz); ++ device_remove_file(_dev, &dev_attr_gpvndctl); ++ device_remove_file(_dev, &dev_attr_ggpio); ++ device_remove_file(_dev, &dev_attr_guid); ++ device_remove_file(_dev, &dev_attr_gsnpsid); ++ device_remove_file(_dev, &dev_attr_devspeed); ++ device_remove_file(_dev, &dev_attr_enumspeed); ++ device_remove_file(_dev, &dev_attr_hptxfsiz); ++ device_remove_file(_dev, &dev_attr_hprt0); ++ device_remove_file(_dev, &dev_attr_remote_wakeup); ++ device_remove_file(_dev, &dev_attr_regdump); ++ device_remove_file(_dev, &dev_attr_hcddump); ++ device_remove_file(_dev, &dev_attr_hcd_frrem); ++ device_remove_file(_dev, &dev_attr_rd_reg_test); ++ device_remove_file(_dev, &dev_attr_wr_reg_test); ++} +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_attr.h +@@ -0,0 +1,67 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 510275 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_ATTR_H__) ++#define __DWC_OTG_ATTR_H__ ++ ++/** @file ++ * This file contains the interface to the Linux device attributes. ++ */ ++extern struct device_attribute dev_attr_regoffset; ++extern struct device_attribute dev_attr_regvalue; ++ ++extern struct device_attribute dev_attr_mode; ++extern struct device_attribute dev_attr_hnpcapable; ++extern struct device_attribute dev_attr_srpcapable; ++extern struct device_attribute dev_attr_hnp; ++extern struct device_attribute dev_attr_srp; ++extern struct device_attribute dev_attr_buspower; ++extern struct device_attribute dev_attr_bussuspend; ++extern struct device_attribute dev_attr_busconnected; ++extern struct device_attribute dev_attr_gotgctl; ++extern struct device_attribute dev_attr_gusbcfg; ++extern struct device_attribute dev_attr_grxfsiz; ++extern struct device_attribute dev_attr_gnptxfsiz; ++extern struct device_attribute dev_attr_gpvndctl; ++extern struct device_attribute dev_attr_ggpio; ++extern struct device_attribute dev_attr_guid; ++extern struct device_attribute dev_attr_gsnpsid; ++extern struct device_attribute dev_attr_devspeed; ++extern struct device_attribute dev_attr_enumspeed; ++extern struct device_attribute dev_attr_hptxfsiz; ++extern struct device_attribute dev_attr_hprt0; ++ ++void dwc_otg_attr_create (struct device *_dev); ++void dwc_otg_attr_remove (struct device *_dev); ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil.c +@@ -0,0 +1,3025 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 631780 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * The CIL manages the memory map for the core so that the HCD and PCD ++ * don't have to do this separately. It also handles basic tasks like ++ * reading/writing the registers and data FIFOs in the controller. ++ * Some of the data access functions provide encapsulation of several ++ * operations required to perform a task, such as writing multiple ++ * registers to start a transfer. Finally, the CIL performs basic ++ * services that are not specific to either the host or device modes ++ * of operation. These services include management of the OTG Host ++ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A ++ * Diagnostic API is also provided to allow testing of the controller ++ * hardware. ++ * ++ * The Core Interface Layer has the following requirements: ++ * - Provides basic controller operations. ++ * - Minimal use of OS services. ++ * - The OS services used will be abstracted by using inline functions ++ * or macros. ++ * ++ */ ++#include <asm/unaligned.h> ++ ++#ifdef DEBUG ++#include <linux/jiffies.h> ++#endif ++ ++#include "dwc_otg_plat.h" ++ ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++ ++/** ++ * This function is called to initialize the DWC_otg CSR data ++ * structures. The register addresses in the device and host ++ * structures are initialized from the base address supplied by the ++ * caller. The calling function must make the OS calls to get the ++ * base address of the DWC_otg controller registers. The core_params ++ * argument holds the parameters that specify how the core should be ++ * configured. ++ * ++ * @param[in] _reg_base_addr Base address of DWC_otg core registers ++ * @param[in] _core_params Pointer to the core configuration parameters ++ * ++ */ ++dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, ++ dwc_otg_core_params_t *_core_params) ++{ ++ dwc_otg_core_if_t *core_if = 0; ++ dwc_otg_dev_if_t *dev_if = 0; ++ dwc_otg_host_if_t *host_if = 0; ++ uint8_t *reg_base = (uint8_t *)_reg_base_addr; ++ int i = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, _reg_base_addr, _core_params); ++ ++ core_if = kmalloc( sizeof(dwc_otg_core_if_t), GFP_KERNEL); ++ if (core_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); ++ return 0; ++ } ++ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); ++ ++ core_if->core_params = _core_params; ++ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; ++ /* ++ * Allocate the Device Mode structures. ++ */ ++ dev_if = kmalloc( sizeof(dwc_otg_dev_if_t), GFP_KERNEL); ++ if (dev_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); ++ kfree( core_if ); ++ return 0; ++ } ++ ++ dev_if->dev_global_regs = ++ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); ++ ++ for (i=0; i<MAX_EPS_CHANNELS; i++) { ++ dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) ++ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ ++ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) ++ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", ++ i, &dev_if->in_ep_regs[i]->diepctl); ++ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", ++ i, &dev_if->out_ep_regs[i]->doepctl); ++ } ++ dev_if->speed = 0; // unknown ++ //dev_if->num_eps = MAX_EPS_CHANNELS; ++ //dev_if->num_perio_eps = 0; ++ ++ core_if->dev_if = dev_if; ++ /* ++ * Allocate the Host Mode structures. ++ */ ++ host_if = kmalloc( sizeof(dwc_otg_host_if_t), GFP_KERNEL); ++ if (host_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); ++ kfree( dev_if ); ++ kfree( core_if ); ++ return 0; ++ } ++ ++ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) ++ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); ++ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); ++ for (i=0; i<MAX_EPS_CHANNELS; i++) { ++ host_if->hc_regs[i] = (dwc_otg_hc_regs_t *) ++ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + ++ (i * DWC_OTG_CHAN_REGS_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", ++ i, &host_if->hc_regs[i]->hcchar); ++ } ++ host_if->num_host_channels = MAX_EPS_CHANNELS; ++ core_if->host_if = host_if; ++ ++ for (i=0; i<MAX_EPS_CHANNELS; i++) { ++ core_if->data_fifo[i] = ++ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + ++ (i * DWC_OTG_DATA_FIFO_SIZE)); ++ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", ++ i, (unsigned)core_if->data_fifo[i]); ++ } // for loop. ++ ++ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); ++ ++ /* ++ * Store the contents of the hardware configuration registers here for ++ * easy access later. ++ */ ++ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); ++ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); ++ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); ++ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); ++ ++ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); ++ ++ ++ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); ++ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); ++ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); ++ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); ++ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); ++ ++ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); ++ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); ++ ++ /* ++ * Set the SRP sucess bit for FS-I2c ++ */ ++ core_if->srp_success = 0; ++ core_if->srp_timer_started = 0; ++ ++ return core_if; ++} ++/** ++ * This function frees the structures allocated by dwc_otg_cil_init(). ++ * ++ * @param[in] _core_if The core interface pointer returned from ++ * dwc_otg_cil_init(). ++ * ++ */ ++void dwc_otg_cil_remove( dwc_otg_core_if_t *_core_if ) ++{ ++ /* Disable all interrupts */ ++ dwc_modify_reg32( &_core_if->core_global_regs->gahbcfg, 1, 0); ++ dwc_write_reg32( &_core_if->core_global_regs->gintmsk, 0); ++ ++ if ( _core_if->dev_if ) { ++ kfree( _core_if->dev_if ); ++ } ++ if ( _core_if->host_if ) { ++ kfree( _core_if->host_if ); ++ } ++ kfree( _core_if ); ++} ++ ++/** ++ * This function enables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] _core_if Programming view of DWC_otg controller. ++ */ ++extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); ++} ++/** ++ * This function disables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] _core_if Programming view of DWC_otg controller. ++ */ ++extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); ++} ++ ++/** ++ * This function initializes the commmon interrupts, used in both ++ * device and host modes. ++ * ++ * @param[in] _core_if Programming view of the DWC_otg controller ++ * ++ */ ++static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ _core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ /* Clear any pending OTG Interrupts */ ++ dwc_write_reg32( &global_regs->gotgint, 0xFFFFFFFF); ++ /* Clear any pending interrupts */ ++ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF); ++ /* ++ * Enable the interrupts in the GINTMSK. ++ */ ++ intr_mask.b.modemismatch = 1; ++ intr_mask.b.otgintr = 1; ++ if (!_core_if->dma_enable) { ++ intr_mask.b.rxstsqlvl = 1; ++ } ++ intr_mask.b.conidstschng = 1; ++ intr_mask.b.wkupintr = 1; ++ intr_mask.b.disconnect = 1; ++ intr_mask.b.usbsuspend = 1; ++ intr_mask.b.sessreqintr = 1; ++ dwc_write_reg32( &global_regs->gintmsk, intr_mask.d32); ++} ++ ++/** ++ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY ++ * type. ++ */ ++static void init_fslspclksel(dwc_otg_core_if_t *_core_if) ++{ ++ uint32_t val; ++ hcfg_data_t hcfg; ++ ++ if (((_core_if->hwcfg2.b.hs_phy_type == 2) && ++ (_core_if->hwcfg2.b.fs_phy_type == 1) && ++ (_core_if->core_params->ulpi_fs_ls)) || ++ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) ++ { ++ /* Full speed PHY */ ++ val = DWC_HCFG_48_MHZ; ++ } else { ++ /* High speed PHY running at full speed or high speed */ ++ val = DWC_HCFG_30_60_MHZ; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); ++ hcfg.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hcfg); ++ hcfg.b.fslspclksel = val; ++ dwc_write_reg32(&_core_if->host_if->host_global_regs->hcfg, hcfg.d32); ++} ++ ++/** ++ * Initializes the DevSpd field of the DCFG register depending on the PHY type ++ * and the enumeration speed of the device. ++ */ ++static void init_devspd(dwc_otg_core_if_t *_core_if) ++{ ++ uint32_t val; ++ dcfg_data_t dcfg; ++ ++ if (((_core_if->hwcfg2.b.hs_phy_type == 2) && ++ (_core_if->hwcfg2.b.fs_phy_type == 1) && ++ (_core_if->core_params->ulpi_fs_ls)) || ++ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) ++ { ++ /* Full speed PHY */ ++ val = 0x3; ++ } else if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { ++ /* High speed PHY running at full speed */ ++ val = 0x1; ++ } else { ++ /* High speed PHY running at high speed */ ++ val = 0x0; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); ++ dcfg.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dcfg); ++ dcfg.b.devspd = val; ++ dwc_write_reg32(&_core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); ++} ++ ++/** ++ * This function calculates the number of IN EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param _pcd the pcd structure. ++ */ ++static uint32_t calc_num_in_eps(dwc_otg_core_if_t * _core_if) ++{ ++ uint32_t num_in_eps = 0; ++ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2; ++ uint32_t num_tx_fifos = _core_if->hwcfg4.b.num_in_eps; ++ int i; ++ for (i = 0; i < num_eps; ++i) { ++ if (!(hwcfg1 & 0x1)) ++ num_in_eps++; ++ hwcfg1 >>= 2; ++ } ++ if (_core_if->hwcfg4.b.ded_fifo_en) { ++ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; ++ } ++ return num_in_eps; ++} ++ ++ ++/** ++ * This function calculates the number of OUT EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param _pcd the pcd structure. ++ */ ++static uint32_t calc_num_out_eps(dwc_otg_core_if_t * _core_if) ++{ ++ uint32_t num_out_eps = 0; ++ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2; ++ int i; ++ for (i = 0; i < num_eps; ++i) { ++ if (!(hwcfg1 & 0x2)) ++ num_out_eps++; ++ hwcfg1 >>= 2; ++ } ++ return num_out_eps; ++} ++/** ++ * This function initializes the DWC_otg controller registers and ++ * prepares the core for device mode or host mode operation. ++ * ++ * @param _core_if Programming view of the DWC_otg controller ++ * ++ */ ++void dwc_otg_core_init(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ int i = 0; ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ gusbcfg_data_t usbcfg = { .d32 = 0 }; ++ gi2cctl_data_t i2cctl = {.d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n",_core_if); ++ ++ /* Common Initialization */ ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ DWC_DEBUGPL(DBG_CIL, "USB config register: 0x%08x\n", usbcfg.d32); ++ ++ /* Program the ULPI External VBUS bit if needed */ ++ //usbcfg.b.ulpi_ext_vbus_drv = 1; ++ //usbcfg.b.ulpi_ext_vbus_drv = 0; ++ usbcfg.b.ulpi_ext_vbus_drv = ++ (_core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; ++ ++ /* Set external TS Dline pulsing */ ++ usbcfg.b.term_sel_dl_pulse = (_core_if->core_params->ts_dline == 1) ? 1 : 0; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset the Controller */ ++ dwc_otg_core_reset( _core_if ); ++ ++ /* Initialize parameters from Hardware configuration registers. */ ++#if 0 ++ dev_if->num_eps = _core_if->hwcfg2.b.num_dev_ep; ++ dev_if->num_perio_eps = _core_if->hwcfg4.b.num_dev_perio_in_ep; ++#else ++ dev_if->num_in_eps = calc_num_in_eps(_core_if); ++ dev_if->num_out_eps = calc_num_out_eps(_core_if); ++#endif ++ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", ++ _core_if->hwcfg4.b.num_dev_perio_in_ep); ++ DWC_DEBUGPL(DBG_CIL, "Is power optimization enabled? %s\n", ++ _core_if->hwcfg4.b.power_optimiz ? "Yes" : "No"); ++ DWC_DEBUGPL(DBG_CIL, "vbus_valid filter enabled? %s\n", ++ _core_if->hwcfg4.b.vbus_valid_filt_en ? "Yes" : "No"); ++ DWC_DEBUGPL(DBG_CIL, "iddig filter enabled? %s\n", ++ _core_if->hwcfg4.b.iddig_filt_en ? "Yes" : "No"); ++ ++ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",_core_if->hwcfg4.b.num_dev_perio_in_ep); ++ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) { ++ dev_if->perio_tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i, ++ dev_if->perio_tx_fifo_size[i]); ++ } ++ for (i = 0; i < _core_if->hwcfg4.b.num_in_eps; i++) { ++ dev_if->tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", i, ++ dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ _core_if->total_fifo_size = _core_if->hwcfg3.b.dfifo_depth; ++ _core_if->rx_fifo_size = dwc_read_reg32(&global_regs->grxfsiz); ++ _core_if->nperio_tx_fifo_size = dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; ++ ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", _core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", _core_if->rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", _core_if->nperio_tx_fifo_size); ++ ++ /* This programming sequence needs to happen in FS mode before any other ++ * programming occurs */ ++ if ((_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && ++ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* If FS mode with FS PHY */ ++ ++ /* core_init() is now called on every switch so only call the ++ * following for the first time through. */ ++ if (!_core_if->phy_init_done) { ++ _core_if->phy_init_done = 1; ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.physel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after a PHY select */ ++ dwc_otg_core_reset( _core_if ); ++ } ++ ++ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also ++ * do this on HNP Dev/Host mode switches (done in dev_init and ++ * host_init). */ ++ if (dwc_otg_is_host_mode(_core_if)) { ++ DWC_DEBUGPL(DBG_CIL, "host mode\n"); ++ init_fslspclksel(_core_if); ++ } else { ++ DWC_DEBUGPL(DBG_CIL, "device mode\n"); ++ init_devspd(_core_if); ++ } ++ ++ if (_core_if->core_params->i2c_enable) { ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); ++ /* Program GUSBCFG.OtgUtmifsSel to I2C */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.otgutmifssel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Program GI2CCTL.I2CEn */ ++ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); ++ i2cctl.b.i2cdevaddr = 1; ++ i2cctl.b.i2cen = 0; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ i2cctl.b.i2cen = 1; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ } ++ ++ } /* endif speed == DWC_SPEED_PARAM_FULL */ ++ else { ++ /* High speed PHY. */ ++ if (!_core_if->phy_init_done) { ++ _core_if->phy_init_done = 1; ++ DWC_DEBUGPL(DBG_CIL, "High spped PHY\n"); ++ /* HS PHY parameters. These parameters are preserved ++ * during soft reset so only program the first time. Do ++ * a soft reset immediately after setting phyif. */ ++ usbcfg.b.ulpi_utmi_sel = _core_if->core_params->phy_type; ++ if (usbcfg.b.ulpi_utmi_sel == 2) { // winder ++ DWC_DEBUGPL(DBG_CIL, "ULPI\n"); ++ /* ULPI interface */ ++ usbcfg.b.phyif = 0; ++ usbcfg.b.ddrsel = _core_if->core_params->phy_ulpi_ddr; ++ } else { ++ /* UTMI+ interface */ ++ if (_core_if->core_params->phy_utmi_width == 16) { ++ usbcfg.b.phyif = 1; ++ DWC_DEBUGPL(DBG_CIL, "UTMI+ 16\n"); ++ } else { ++ DWC_DEBUGPL(DBG_CIL, "UTMI+ 8\n"); ++ usbcfg.b.phyif = 0; ++ } ++ } ++ dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after setting the PHY parameters */ ++ dwc_otg_core_reset( _core_if ); ++ } ++ } ++ ++ if ((_core_if->hwcfg2.b.hs_phy_type == 2) && ++ (_core_if->hwcfg2.b.fs_phy_type == 1) && ++ (_core_if->core_params->ulpi_fs_ls)) ++ { ++ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 1; ++ usbcfg.b.ulpi_clk_sus_m = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } else { ++ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS=0\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 0; ++ usbcfg.b.ulpi_clk_sus_m = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ ++ /* Program the GAHBCFG Register.*/ ++ switch (_core_if->hwcfg2.b.architecture){ ++ ++ case DWC_SLAVE_ONLY_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); ++ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ _core_if->dma_enable = 0; ++ break; ++ ++ case DWC_EXT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); ++ ahbcfg.b.hburstlen = _core_if->core_params->dma_burst_size; ++ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0); ++ break; ++ ++ case DWC_INT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); ++ //ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; ++ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR4; ++ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0); ++ break; ++ } ++ ahbcfg.b.dmaenable = _core_if->dma_enable; ++ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); ++ _core_if->en_multiple_tx_fifo = _core_if->hwcfg4.b.ded_fifo_en; ++ ++ /* ++ * Program the GUSBCFG register. ++ */ ++ usbcfg.d32 = dwc_read_reg32( &global_regs->gusbcfg ); ++ ++ switch (_core_if->hwcfg2.b.op_mode) { ++ case DWC_MODE_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = (_core_if->core_params->otg_cap == ++ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); ++ usbcfg.b.srpcap = (_core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_SRP_ONLY_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (_core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (_core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (_core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ } ++ ++ dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Enable common interrupts */ ++ dwc_otg_enable_common_interrupts( _core_if ); ++ ++ /* Do device or host intialization based on mode during PCD ++ * and HCD initialization */ ++ if (dwc_otg_is_host_mode( _core_if )) { ++ DWC_DEBUGPL(DBG_ANY, "Host Mode\n" ); ++ _core_if->op_state = A_HOST; ++ } else { ++ DWC_DEBUGPL(DBG_ANY, "Device Mode\n" ); ++ _core_if->op_state = B_PERIPHERAL; ++#ifdef DWC_DEVICE_ONLY ++ dwc_otg_core_dev_init( _core_if ); ++#endif ++ } ++} ++ ++ ++/** ++ * This function enables the Device mode interrupts. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32( &global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts( _core_if ); ++ ++ /* Enable interrupts */ ++ intr_mask.b.usbreset = 1; ++ intr_mask.b.enumdone = 1; ++ //intr_mask.b.epmismatch = 1; ++ intr_mask.b.inepintr = 1; ++ intr_mask.b.outepintr = 1; ++ intr_mask.b.erlysuspend = 1; ++ if (_core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.epmismatch = 1; ++ } ++ ++ /** @todo NGS: Should this be a module parameter? */ ++ intr_mask.b.isooutdrop = 1; ++ intr_mask.b.eopframe = 1; ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ ++ dwc_modify_reg32( &global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ++ dwc_read_reg32( &global_regs->gintmsk)); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * device mode. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ _core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ dwc_otg_core_params_t *params = _core_if->core_params; ++ dcfg_data_t dcfg = {.d32 = 0}; ++ grstctl_t resetctl = { .d32=0 }; ++ int i; ++ uint32_t rx_fifo_size; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t txfifosize; ++ dthrctl_data_t dthrctl; ++ ++ fifosize_data_t ptxfifosize; ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(_core_if->pcgcctl, 0); ++ ++ /* Device configuration register */ ++ init_devspd(_core_if); ++ dcfg.d32 = dwc_read_reg32( &dev_if->dev_global_regs->dcfg); ++ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; ++ dwc_write_reg32( &dev_if->dev_global_regs->dcfg, dcfg.d32 ); ++ ++ /* Configure data FIFO sizes */ ++ if ( _core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo ) { ++ ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", _core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ rx_fifo_size = params->dev_rx_fifo_size; ++ dwc_write_reg32( &global_regs->grxfsiz, rx_fifo_size ); ++ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /** Set Periodic Tx FIFO Mask all bits 0 */ ++ _core_if->p_tx_msk = 0; ++ ++ /** Set Tx FIFO Mask all bits 0 */ ++ _core_if->tx_msk = 0; ++ if (_core_if->en_multiple_tx_fifo == 0) { ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ dwc_write_reg32( &global_regs->gnptxfsiz, nptxfifosize.d32 ); ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ ++ /**@todo NGS: Fix Periodic FIFO Sizing! */ ++ /* ++ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_perio_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz array run from 0 to 14. ++ */ ++ /** @todo Finish debug of this */ ++ ptxfifosize.b.startaddr = ++ nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) { ++ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i]; ++ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n", ++ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n", ++ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ ptxfifosize.b.startaddr += ptxfifosize.b.depth; ++ } ++ } else { ++ ++ /* ++ * Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz_dieptxf array run from 0 to 14. ++ */ ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ for (i = 1;i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) { ++ txfifosize.b.depth = params->dev_tx_fifo_size[i]; ++ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n", ++ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i - 1],txfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n", ++ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1])); ++ txfifosize.b.startaddr += txfifosize.b.depth; ++ } ++ } ++ } ++ /* Flush the FIFOs */ ++ dwc_otg_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */ ++ dwc_otg_flush_rx_fifo(_core_if); ++ ++ /* Flush the Learning Queue. */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32( &_core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ /* Clear all pending Device Interrupts */ ++ dwc_write_reg32( &dev_if->dev_global_regs->diepmsk, 0 ); ++ dwc_write_reg32( &dev_if->dev_global_regs->doepmsk, 0 ); ++ dwc_write_reg32( &dev_if->dev_global_regs->daint, 0xFFFFFFFF ); ++ dwc_write_reg32( &dev_if->dev_global_regs->daintmsk, 0 ); ++ ++ for (i = 0; i <= dev_if->num_in_eps; i++) { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } else { ++ depctl.d32 = 0; ++ } ++ dwc_write_reg32( &dev_if->in_ep_regs[i]->diepctl, depctl.d32); ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); ++ } ++ for (i = 0; i <= dev_if->num_out_eps; i++) { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } else { ++ depctl.d32 = 0; ++ } ++ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepctl, depctl.d32); ++ ++ //dwc_write_reg32( &dev_if->in_ep_regs[i]->dieptsiz, 0); ++ dwc_write_reg32( &dev_if->out_ep_regs[i]->doeptsiz, 0); ++ //dwc_write_reg32( &dev_if->in_ep_regs[i]->diepdma, 0); ++ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepdma, 0); ++ //dwc_write_reg32( &dev_if->in_ep_regs[i]->diepint, 0xFF); ++ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepint, 0xFF); ++ } ++ ++ if (_core_if->en_multiple_tx_fifo && _core_if->dma_enable) { ++ dev_if->non_iso_tx_thr_en = _core_if->core_params->thr_ctl & 0x1; ++ dev_if->iso_tx_thr_en = (_core_if->core_params->thr_ctl >> 1) & 0x1; ++ dev_if->rx_thr_en = (_core_if->core_params->thr_ctl >> 2) & 0x1; ++ dev_if->rx_thr_length = _core_if->core_params->rx_thr_length; ++ dev_if->tx_thr_length = _core_if->core_params->tx_thr_length; ++ dthrctl.d32 = 0; ++ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; ++ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; ++ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; ++ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; ++ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; ++ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl,dthrctl.d32); ++ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\n" ++ "Rx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", ++ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, ++ dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, ++ dthrctl.b.rx_thr_len); ++ } ++ dwc_otg_enable_device_interrupts( _core_if ); ++ { ++ diepmsk_data_t msk = {.d32 = 0}; ++ msk.b.txfifoundrn = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32,msk.d32); ++} ++} ++ ++/** ++ * This function enables the Host mode interrupts. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts. */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(_core_if); ++ ++ /* ++ * Enable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ ++ //dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++ //dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++} ++ ++/** ++ * This function disables the Host Mode interrupts. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ _core_if->core_global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); ++ ++ /* ++ * Disable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ intr_mask.b.ptxfempty = 1; ++ intr_mask.b.nptxfempty = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++} ++ ++#if 1 ++/* currently not used, keep it here as if needed later */ ++static int phy_read(dwc_otg_core_if_t * _core_if, int addr) ++{ ++ u32 val; ++ int timeout = 10; ++ ++ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl, ++ 0x02000000 | (addr << 16)); ++ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl); ++ while (((val & 0x08000000) == 0) && (timeout--)) { ++ udelay(1000); ++ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl); ++ } ++ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl); ++ printk("%s: addr=%02x regval=%02x\n", __func__, addr, val & 0x000000ff); ++ ++ return 0; ++} ++#endif ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * host mode. ++ * ++ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the ++ * request queues. Host channels are reset to ensure that they are ready for ++ * performing transfers. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = _core_if->host_if; ++ dwc_otg_core_params_t *params = _core_if->core_params; ++ hprt0_data_t hprt0 = {.d32 = 0}; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t ptxfifosize; ++ int i; ++ hcchar_data_t hcchar; ++ hcfg_data_t hcfg; ++ dwc_otg_hc_regs_t *hc_regs; ++ int num_channels; ++ gotgctl_data_t gotgctl = {.d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, _core_if); ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(_core_if->pcgcctl, 0); ++ ++ /* Initialize Host Configuration Register */ ++ init_fslspclksel(_core_if); ++ if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ hcfg.b.fslssupp = 1; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); ++ } ++ ++ /* Configure data FIFO sizes */ ++ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", _core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->host_rx_fifo_size; ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ /* Periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; ++ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ } ++ ++ /* Clear Host Set HNP Enable in the OTG Control Register */ ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32( &global_regs->gotgctl, gotgctl.d32, 0); ++ ++ /* Make sure the FIFOs are flushed. */ ++ dwc_otg_flush_tx_fifo(_core_if, 0x10 /* all Tx FIFOs */); ++ dwc_otg_flush_rx_fifo(_core_if); ++ ++ /* Flush out any leftover queued requests. */ ++ num_channels = _core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ hc_regs = _core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ /* Halt all channels to put them into a known state. */ ++ for (i = 0; i < num_channels; i++) { ++ int count = 0; ++ hc_regs = _core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); ++ do { ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (++count > 200) { ++ DWC_ERROR("%s: Unable to clear halt on channel %d\n", ++ __func__, i); ++ break; ++ } ++ udelay(100); ++ } while (hcchar.b.chen); ++ } ++ ++ /* Turn on the vbus power. */ ++ DWC_PRINT("Init: Port Power? op_state=%d\n", _core_if->op_state); ++ if (_core_if->op_state == A_HOST){ ++ hprt0.d32 = dwc_otg_read_hprt0(_core_if); ++ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); ++ if (hprt0.b.prtpwr == 0 ) { ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(host_if->hprt0, hprt0.d32); ++ } ++ } ++ ++ dwc_otg_enable_host_interrupts( _core_if ); ++} ++ ++/** ++ * Prepares a host channel for transferring packets to/from a specific ++ * endpoint. The HCCHARn register is set up with the characteristics specified ++ * in _hc. Host channel interrupts that may need to be serviced while this ++ * transfer is in progress are enabled. ++ * ++ * @param _core_if Programming view of DWC_otg controller ++ * @param _hc Information needed to initialize the host channel ++ */ ++void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ uint32_t intr_enable; ++ hcintmsk_data_t hc_intr_mask; ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ ++ uint8_t hc_num = _hc->hc_num; ++ dwc_otg_host_if_t *host_if = _core_if->host_if; ++ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; ++ ++ /* Clear old interrupt conditions for this host channel. */ ++ hc_intr_mask.d32 = 0xFFFFFFFF; ++ hc_intr_mask.b.reserved = 0; ++ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); ++ ++ /* Enable channel interrupts required for this transfer. */ ++ hc_intr_mask.d32 = 0; ++ hc_intr_mask.b.chhltd = 1; ++ if (_core_if->dma_enable) { ++ hc_intr_mask.b.ahberr = 1; ++ if (_hc->error_state && !_hc->do_split && ++ _hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ hc_intr_mask.b.ack = 1; ++ if (_hc->ep_is_in) { ++ hc_intr_mask.b.datatglerr = 1; ++ if (_hc->ep_type != DWC_OTG_EP_TYPE_INTR) { ++ hc_intr_mask.b.nak = 1; ++ } ++ } ++ } ++ } else { ++ switch (_hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ if (_hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } else { ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.nyet = 1; ++ if (_hc->do_ping) { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (_hc->do_split) { ++ hc_intr_mask.b.nak = 1; ++ if (_hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (_hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ break; ++ case DWC_OTG_EP_TYPE_INTR: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ ++ if (_hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ if (_hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ if (_hc->do_split) { ++ if (_hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ break; ++ case DWC_OTG_EP_TYPE_ISOC: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ hc_intr_mask.b.ack = 1; ++ ++ if (_hc->ep_is_in) { ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.bblerr = 1; ++ } ++ break; ++ } ++ } ++ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); ++ ++ /* Enable the top level host channel interrupt. */ ++ intr_enable = (1 << hc_num); ++ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); ++ ++ /* Make sure host channel interrupts are enabled. */ ++ gintmsk.b.hcintr = 1; ++ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, 0, gintmsk.d32); ++ ++ /* ++ * Program the HCCHARn register with the endpoint characteristics for ++ * the current transfer. ++ */ ++ hcchar.d32 = 0; ++ hcchar.b.devaddr = _hc->dev_addr; ++ hcchar.b.epnum = _hc->ep_num; ++ hcchar.b.epdir = _hc->ep_is_in; ++ hcchar.b.lspddev = (_hc->speed == DWC_OTG_EP_SPEED_LOW); ++ hcchar.b.eptype = _hc->ep_type; ++ hcchar.b.mps = _hc->max_packet; ++ ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); ++ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); ++ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); ++ ++ /* ++ * Program the HCSPLIT register for SPLITs ++ */ ++ hcsplt.d32 = 0; ++ if (_hc->do_split) { ++ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", _hc->hc_num, ++ _hc->complete_split ? "CSPLIT" : "SSPLIT"); ++ hcsplt.b.compsplt = _hc->complete_split; ++ hcsplt.b.xactpos = _hc->xact_pos; ++ hcsplt.b.hubaddr = _hc->hub_addr; ++ hcsplt.b.prtaddr = _hc->port_addr; ++ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", _hc->complete_split); ++ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", _hc->xact_pos); ++ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", _hc->hub_addr); ++ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", _hc->port_addr); ++ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", _hc->ep_is_in); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", _hc->xfer_len); ++ } ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); ++ ++} ++ ++/** ++ * Attempts to halt a host channel. This function should only be called in ++ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under ++ * normal circumstances in DMA mode, the controller halts the channel when the ++ * transfer is complete or a condition occurs that requires application ++ * intervention. ++ * ++ * In slave mode, checks for a free request queue entry, then sets the Channel ++ * Enable and Channel Disable bits of the Host Channel Characteristics ++ * register of the specified channel to intiate the halt. If there is no free ++ * request queue entry, sets only the Channel Disable bit of the HCCHARn ++ * register to flush requests for this channel. In the latter case, sets a ++ * flag to indicate that the host channel needs to be halted when a request ++ * queue slot is open. ++ * ++ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the ++ * HCCHARn register. The controller ensures there is space in the request ++ * queue before submitting the halt request. ++ * ++ * Some time may elapse before the core flushes any posted requests for this ++ * host channel and halts. The Channel Halted interrupt handler completes the ++ * deactivation of the host channel. ++ * ++ * @param _core_if Controller register interface. ++ * @param _hc Host channel to halt. ++ * @param _halt_status Reason for halting the channel. ++ */ ++void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ dwc_otg_halt_status_e _halt_status) ++{ ++ gnptxsts_data_t nptxsts; ++ hptxsts_data_t hptxsts; ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_core_global_regs_t *global_regs; ++ dwc_otg_host_global_regs_t *host_global_regs; ++ ++ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ global_regs = _core_if->core_global_regs; ++ host_global_regs = _core_if->host_if->host_global_regs; ++ ++ WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); ++ ++ if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ _halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Disable all channel interrupts except Ch Halted. The QTD ++ * and QH state associated with this transfer has been cleared ++ * (in the case of URB_DEQUEUE), so the channel needs to be ++ * shut down carefully to prevent crashes. ++ */ ++ hcintmsk_data_t hcintmsk; ++ hcintmsk.d32 = 0; ++ hcintmsk.b.chhltd = 1; ++ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); ++ ++ /* ++ * Make sure no other interrupts besides halt are currently ++ * pending. Handling another interrupt could cause a crash due ++ * to the QTD and QH state. ++ */ ++ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); ++ ++ /* ++ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR ++ * even if the channel was already halted for some other ++ * reason. ++ */ ++ _hc->halt_status = _halt_status; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen == 0) { ++ /* ++ * The channel is either already halted or it hasn't ++ * started yet. In DMA mode, the transfer may halt if ++ * it finishes normally or a condition occurs that ++ * requires driver intervention. Don't want to halt ++ * the channel again. In either Slave or DMA mode, ++ * it's possible that the transfer has been assigned ++ * to a channel, but not started yet when an URB is ++ * dequeued. Don't want to halt a channel that hasn't ++ * started yet. ++ */ ++ return; ++ } ++ } ++ ++ if (_hc->halt_pending) { ++ /* ++ * A halt has already been issued for this channel. This might ++ * happen when a transfer is aborted by a higher level in ++ * the stack. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", ++ __func__, _hc->hc_num); ++ ++/* dwc_otg_dump_global_registers(_core_if); */ ++/* dwc_otg_dump_host_registers(_core_if); */ ++#endif ++ return; ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ ++ if (!_core_if->dma_enable) { ++ /* Check for space in the request queue to issue the halt. */ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (nptxsts.b.nptxqspcavail == 0) { ++ hcchar.b.chen = 0; ++ } ++ } else { ++ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); ++ if ((hptxsts.b.ptxqspcavail == 0) || (_core_if->queuing_high_bandwidth)) { ++ hcchar.b.chen = 0; ++ } ++ } ++ } ++ ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ _hc->halt_status = _halt_status; ++ ++ if (hcchar.b.chen) { ++ _hc->halt_pending = 1; ++ _hc->halt_on_queue = 0; ++ } else { ++ _hc->halt_on_queue = 1; ++ } ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", _hc->halt_pending); ++ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", _hc->halt_on_queue); ++ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", _hc->halt_status); ++ ++ return; ++} ++ ++/** ++ * Clears the transfer state for a host channel. This function is normally ++ * called after a transfer is done and the host channel is being released. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _hc Identifies the host channel to clean up. ++ */ ++void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ dwc_otg_hc_regs_t *hc_regs; ++ ++ _hc->xfer_started = 0; ++ ++ /* ++ * Clear channel interrupt enables and any unhandled channel interrupt ++ * conditions. ++ */ ++ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ dwc_write_reg32(&hc_regs->hcintmsk, 0); ++ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); ++ ++#ifdef DEBUG ++ del_timer(&_core_if->hc_xfer_timer[_hc->hc_num]); ++ { ++ hcchar_data_t hcchar; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, _hc->hc_num, hcchar.d32); ++ } ++ } ++#endif ++} ++ ++/** ++ * Sets the channel property that indicates in which frame a periodic transfer ++ * should occur. This is always set to the _next_ frame. This function has no ++ * effect on non-periodic transfers. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _hc Identifies the host channel to set up and its properties. ++ * @param _hcchar Current value of the HCCHAR register for the specified host ++ * channel. ++ */ ++static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ hcchar_data_t *_hcchar) ++{ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hfnum_data_t hfnum; ++ hfnum.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hfnum); ++ /* 1 if _next_ frame is odd, 0 if it's even */ ++ _hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; ++#ifdef DEBUG ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR && _hc->do_split && !_hc->complete_split) { ++ switch (hfnum.b.frnum & 0x7) { ++ case 7: ++ _core_if->hfnum_7_samples++; ++ _core_if->hfnum_7_frrem_accum += hfnum.b.frrem; ++ break; ++ case 0: ++ _core_if->hfnum_0_samples++; ++ _core_if->hfnum_0_frrem_accum += hfnum.b.frrem; ++ break; ++ default: ++ _core_if->hfnum_other_samples++; ++ _core_if->hfnum_other_frrem_accum += hfnum.b.frrem; ++ break; ++ } ++ } ++#endif ++ } ++} ++ ++#ifdef DEBUG ++static void hc_xfer_timeout(unsigned long _ptr) ++{ ++ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)_ptr; ++ int hc_num = xfer_info->hc->hc_num; ++ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); ++ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); ++} ++#endif ++ ++/* ++ * This function does the setup for a data transfer for a host channel and ++ * starts the transfer. May be called in either Slave mode or DMA mode. In ++ * Slave mode, the caller must ensure that there is sufficient space in the ++ * request queue and Tx Data FIFO. ++ * ++ * For an OUT transfer in Slave mode, it loads a data packet into the ++ * appropriate FIFO. If necessary, additional data packets will be loaded in ++ * the Host ISR. ++ * ++ * For an IN transfer in Slave mode, a data packet is requested. The data ++ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, ++ * additional data packets are requested in the Host ISR. ++ * ++ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ ++ * register along with a packet count of 1 and the channel is enabled. This ++ * causes a single PING transaction to occur. Other fields in HCTSIZ are ++ * simply set to 0 since no data transfer occurs in this case. ++ * ++ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with ++ * all the information required to perform the subsequent data transfer. In ++ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the ++ * controller performs the entire PING protocol, then starts the data ++ * transfer. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _hc Information needed to initialize the host channel. The xfer_len ++ * value may be reduced to accommodate the max widths of the XferSize and ++ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed ++ * to reflect the final xfer_len value. ++ */ ++void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ uint16_t num_packets; ++ uint32_t max_hc_xfer_size = _core_if->core_params->max_transfer_size; ++ uint16_t max_hc_pkt_count = _core_if->core_params->max_packet_count; ++ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ ++ hctsiz.d32 = 0; ++ ++ if (_hc->do_ping) { ++ if (!_core_if->dma_enable) { ++ dwc_otg_hc_do_ping(_core_if, _hc); ++ _hc->xfer_started = 1; ++ return; ++ } else { ++ hctsiz.b.dopng = 1; ++ } ++ } ++ ++ if (_hc->do_split) { ++ num_packets = 1; ++ ++ if (_hc->complete_split && !_hc->ep_is_in) { ++ /* For CSPLIT OUT Transfer, set the size to 0 so the ++ * core doesn't expect any data written to the FIFO */ ++ _hc->xfer_len = 0; ++ } else if (_hc->ep_is_in || (_hc->xfer_len > _hc->max_packet)) { ++ _hc->xfer_len = _hc->max_packet; ++ } else if (!_hc->ep_is_in && (_hc->xfer_len > 188)) { ++ _hc->xfer_len = 188; ++ } ++ ++ hctsiz.b.xfersize = _hc->xfer_len; ++ } else { ++ /* ++ * Ensure that the transfer length and packet count will fit ++ * in the widths allocated for them in the HCTSIZn register. ++ */ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure the transfer size is no larger than one ++ * (micro)frame's worth of data. (A check was done ++ * when the periodic transfer was accepted to ensure ++ * that a (micro)frame's worth of data can be ++ * programmed into a channel.) ++ */ ++ uint32_t max_periodic_len = _hc->multi_count * _hc->max_packet; ++ if (_hc->xfer_len > max_periodic_len) { ++ _hc->xfer_len = max_periodic_len; ++ } else { ++ } ++ } else if (_hc->xfer_len > max_hc_xfer_size) { ++ /* Make sure that xfer_len is a multiple of max packet size. */ ++ _hc->xfer_len = max_hc_xfer_size - _hc->max_packet + 1; ++ } ++ ++ if (_hc->xfer_len > 0) { ++ num_packets = (_hc->xfer_len + _hc->max_packet - 1) / _hc->max_packet; ++ if (num_packets > max_hc_pkt_count) { ++ num_packets = max_hc_pkt_count; ++ _hc->xfer_len = num_packets * _hc->max_packet; ++ } ++ } else { ++ /* Need 1 packet for transfer length of 0. */ ++ num_packets = 1; ++ } ++ ++ if (_hc->ep_is_in) { ++ /* Always program an integral # of max packets for IN transfers. */ ++ _hc->xfer_len = num_packets * _hc->max_packet; ++ } ++ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure that the multi_count field matches the ++ * actual transfer length. ++ */ ++ _hc->multi_count = num_packets; ++ ++ } ++ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* Set up the initial PID for the transfer. */ ++ if (_hc->speed == DWC_OTG_EP_SPEED_HIGH) { ++ if (_hc->ep_is_in) { ++ if (_hc->multi_count == 1) { ++ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } else if (_hc->multi_count == 2) { ++ _hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ } else { ++ _hc->data_pid_start = DWC_OTG_HC_PID_DATA2; ++ } ++ } else { ++ if (_hc->multi_count == 1) { ++ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } else { ++ _hc->data_pid_start = DWC_OTG_HC_PID_MDATA; ++ } ++ } ++ } else { ++ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ } ++ ++ hctsiz.b.xfersize = _hc->xfer_len; ++ } ++ ++ _hc->start_pkt_count = num_packets; ++ hctsiz.b.pktcnt = num_packets; ++ hctsiz.b.pid = _hc->data_pid_start; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); ++ ++ if (_core_if->dma_enable) { ++#ifdef DEBUG ++if(((uint32_t)_hc->xfer_buff)%4) ++printk("dwc_otg_hc_start_transfer _hc->xfer_buff not 4 byte alignment\n"); ++#endif ++ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)_hc->xfer_buff); ++ } ++ ++ /* Start the split */ ++ if (_hc->do_split) { ++ hcsplt_data_t hcsplt; ++ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); ++ hcsplt.b.spltena = 1; ++ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.multicnt = _hc->multi_count; ++ hc_set_even_odd_frame(_core_if, _hc, &hcchar); ++#ifdef DEBUG ++ _core_if->start_hcchar_val[_hc->hc_num] = hcchar.d32; ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, _hc->hc_num, hcchar.d32); ++ } ++#endif ++ ++ /* Set host channel enable after all other setup is complete. */ ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ _hc->xfer_started = 1; ++ _hc->requests++; ++ ++ if (!_core_if->dma_enable && !_hc->ep_is_in && _hc->xfer_len > 0) { ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(_core_if, _hc); ++ } ++ ++#ifdef DEBUG ++ /* Start a timer for this transfer. */ ++ _core_if->hc_xfer_timer[_hc->hc_num].function = hc_xfer_timeout; ++ _core_if->hc_xfer_info[_hc->hc_num].core_if = _core_if; ++ _core_if->hc_xfer_info[_hc->hc_num].hc = _hc; ++ _core_if->hc_xfer_timer[_hc->hc_num].data = (unsigned long)(&_core_if->hc_xfer_info[_hc->hc_num]); ++ _core_if->hc_xfer_timer[_hc->hc_num].expires = jiffies + (HZ*10); ++ add_timer(&_core_if->hc_xfer_timer[_hc->hc_num]); ++#endif ++} ++ ++/** ++ * This function continues a data transfer that was started by previous call ++ * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is ++ * sufficient space in the request queue and Tx Data FIFO. This function ++ * should only be called in Slave mode. In DMA mode, the controller acts ++ * autonomously to complete transfers programmed to a host channel. ++ * ++ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO ++ * if there is any data remaining to be queued. For an IN transfer, another ++ * data packet is always requested. For the SETUP phase of a control transfer, ++ * this function does nothing. ++ * ++ * @return 1 if a new request is queued, 0 if no more requests are required ++ * for this transfer. ++ */ ++int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); ++ ++ if (_hc->do_split) { ++ /* SPLITs always queue just once per channel */ ++ return 0; ++ } else if (_hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ /* SETUPs are queued only once since they can't be NAKed. */ ++ return 0; ++ } else if (_hc->ep_is_in) { ++ /* ++ * Always queue another request for other IN transfers. If ++ * back-to-back INs are issued and NAKs are received for both, ++ * the driver may still be processing the first NAK when the ++ * second NAK is received. When the interrupt handler clears ++ * the NAK interrupt for the first NAK, the second NAK will ++ * not be seen. So we can't depend on the NAK interrupt ++ * handler to requeue a NAKed request. Instead, IN requests ++ * are issued each time this function is called. When the ++ * transfer completes, the extra requests for the channel will ++ * be flushed. ++ */ ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(_core_if, _hc, &hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ _hc->requests++; ++ return 1; ++ } else { ++ /* OUT transfers. */ ++ if (_hc->xfer_count < _hc->xfer_len) { ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(_core_if, _hc, &hcchar); ++ } ++ ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(_core_if, _hc); ++ _hc->requests++; ++ return 1; ++ } else { ++ return 0; ++ } ++ } ++} ++ ++/** ++ * Starts a PING transfer. This function should only be called in Slave mode. ++ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. ++ */ ++void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); ++ ++ hctsiz.d32 = 0; ++ hctsiz.b.dopng = 1; ++ hctsiz.b.pktcnt = 1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++} ++ ++/* ++ * This function writes a packet into the Tx FIFO associated with the Host ++ * Channel. For a channel associated with a non-periodic EP, the non-periodic ++ * Tx FIFO is written. For a channel associated with a periodic EP, the ++ * periodic Tx FIFO is written. This function should only be called in Slave ++ * mode. ++ * ++ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by ++ * then number of bytes written to the Tx FIFO. ++ */ ++void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ uint32_t i; ++ uint32_t remaining_count; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ ++ uint32_t *data_buff = (uint32_t *)(_hc->xfer_buff); ++ uint32_t *data_fifo = _core_if->data_fifo[_hc->hc_num]; ++ ++ remaining_count = _hc->xfer_len - _hc->xfer_count; ++ if (remaining_count > _hc->max_packet) { ++ byte_count = _hc->max_packet; ++ } else { ++ byte_count = remaining_count; ++ } ++ ++ dword_count = (byte_count + 3) / 4; ++ ++ if ((((unsigned long)data_buff) & 0x3) == 0) { ++ /* xfer_buff is DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) { ++ dwc_write_reg32(data_fifo, *data_buff); ++ } ++ } else { ++ /* xfer_buff is not DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) { ++ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); ++ } ++ } ++ ++ _hc->xfer_count += byte_count; ++ _hc->xfer_buff += byte_count; ++} ++ ++/** ++ * Gets the current USB frame number. This is the frame number from the last ++ * SOF packet. ++ */ ++uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if) ++{ ++ dsts_data_t dsts; ++ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts); ++ ++ /* read current frame/microfreme number from DSTS register */ ++ return dsts.b.soffn; ++} ++ ++/** ++ * This function reads a setup packet from the Rx FIFO into the destination ++ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) ++ * Interrupt routine when a SETUP packet has been received in Slave mode. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _dest Destination buffer for packet data. ++ */ ++void dwc_otg_read_setup_packet(dwc_otg_core_if_t *_core_if, uint32_t *_dest) ++{ ++ /* Get the 8 bytes of a setup transaction data */ ++ ++ /* Pop 2 DWORDS off the receive data FIFO into memory */ ++ _dest[0] = dwc_read_reg32(_core_if->data_fifo[0]); ++ _dest[1] = dwc_read_reg32(_core_if->data_fifo[0]); ++ //_dest[0] = dwc_read_datafifo32(_core_if->data_fifo[0]); ++ //_dest[1] = dwc_read_datafifo32(_core_if->data_fifo[0]); ++} ++ ++ ++/** ++ * This function enables EP0 OUT to receive SETUP packets and configures EP0 ++ * IN for transmitting packets. It is normally called when the ++ * "Enumeration Done" interrupt occurs. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP0 data. ++ */ ++void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ dsts_data_t dsts; ++ depctl_data_t diepctl; ++ depctl_data_t doepctl; ++ dctl_data_t dctl ={.d32=0}; ++ ++ /* Read the Device Status and Endpoint 0 Control registers */ ++ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); ++ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); ++ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); ++ ++ /* Set the MPS of the IN EP based on the enumeration speed */ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_64; ++ break; ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_8; ++ break; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); ++ ++ /* Enable OUT EP for receive */ ++ doepctl.b.epena = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++ dctl.b.cgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); ++} ++ ++/** ++ * This function activates an EP. The Device EP control register for ++ * the EP is configured as defined in the ep structure. Note: This ++ * function is not used for EP0. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to activate. ++ */ ++void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ depctl_data_t depctl; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = {.d32=0}; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, _ep->num, ++ (_ep->is_in?"IN":"OUT")); ++ ++ /* Read DEPCTLn register */ ++ if (_ep->is_in == 1) { ++ addr = &dev_if->in_ep_regs[_ep->num]->diepctl; ++ daintmsk.ep.in = 1<<_ep->num; ++ } else { ++ addr = &dev_if->out_ep_regs[_ep->num]->doepctl; ++ daintmsk.ep.out = 1<<_ep->num; ++ } ++ ++ /* If the EP is already active don't change the EP Control ++ * register. */ ++ depctl.d32 = dwc_read_reg32(addr); ++ if (!depctl.b.usbactep) { ++ depctl.b.mps = _ep->maxpacket; ++ depctl.b.eptype = _ep->type; ++ depctl.b.txfnum = _ep->tx_fifo_num; ++ ++ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ depctl.b.setd0pid = 1; // ??? ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ depctl.b.usbactep = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr)); ++ } ++ ++ ++ /* Enable the Interrupt for this EP */ ++ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, ++ 0, daintmsk.d32); ++ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); ++ _ep->stall_clear_flag = 0; ++ return; ++} ++ ++/** ++ * This function deactivates an EP. This is done by clearing the USB Active ++ * EP bit in the Device EP control register. Note: This function is not used ++ * for EP0. EP0 cannot be deactivated. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to deactivate. ++ */ ++void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ depctl_data_t depctl ={.d32 = 0}; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = {.d32=0}; ++ ++ /* Read DEPCTLn register */ ++ if (_ep->is_in == 1) { ++ addr = &_core_if->dev_if->in_ep_regs[_ep->num]->diepctl; ++ daintmsk.ep.in = 1<<_ep->num; ++ } else { ++ addr = &_core_if->dev_if->out_ep_regs[_ep->num]->doepctl; ++ daintmsk.ep.out = 1<<_ep->num; ++ } ++ ++ depctl.b.usbactep = 0; ++ dwc_write_reg32(addr, depctl.d32); ++ ++ /* Disable the Interrupt for this EP */ ++ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->daintmsk, ++ daintmsk.d32, 0); ++ ++ return; ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to start the transfer on. ++ */ ++void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ /** @todo Refactor this funciton to check the transfer size ++ * count value does not execed the number bits in the Transfer ++ * count register. */ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++#ifdef CHECK_PACKET_COUNTER_WIDTH ++ const uint32_t MAX_XFER_SIZE = ++ _core_if->core_params->max_transfer_size; ++ const uint32_t MAX_PKT_COUNT = ++ _core_if->core_params->max_packet_count; ++ uint32_t num_packets; ++ uint32_t transfer_len; ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ _core_if->dev_if->out_ep_regs[_ep->num]; ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ _core_if->dev_if->in_ep_regs[_ep->num]; ++ gnptxsts_data_t txstatus; ++ ++ int lvl = SET_DEBUG_LEVEL(DBG_PCD); ++ ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p\n", ++ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len, ++ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff); ++ ++ transfer_len = _ep->xfer_len - _ep->xfer_count; ++ if (transfer_len > MAX_XFER_SIZE) { ++ transfer_len = MAX_XFER_SIZE; ++ } ++ if (transfer_len == 0) { ++ num_packets = 1; ++ /* OUT EP to recieve Zero-length packet set transfer ++ * size to maxpacket size. */ ++ if (!_ep->is_in) { ++ transfer_len = _ep->maxpacket; ++ } ++ } else { ++ num_packets = ++ (transfer_len + _ep->maxpacket - 1) / _ep->maxpacket; ++ if (num_packets > MAX_PKT_COUNT) { ++ num_packets = MAX_PKT_COUNT; ++ } ++ } ++ DWC_DEBUGPL(DBG_PCD, "transfer_len=%d #pckt=%d\n", transfer_len, ++ num_packets); ++ ++ deptsiz.b.xfersize = transfer_len; ++ deptsiz.b.pktcnt = num_packets; ++ ++ /* IN endpoint */ ++ if (_ep->is_in == 1) { ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ } else {/* OUT endpoint */ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ /* IN endpoint */ ++ if (_ep->is_in == 1) { ++ txstatus.d32 = ++ dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); ++ if (txstatus.b.nptxqspcavail == 0) { ++ DWC_DEBUGPL(DBG_ANY, "TX Queue Full (0x%0x)\n", ++ txstatus.d32); ++ return; ++ } ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (_core_if->dma_enable) { ++ dwc_write_reg32(&in_regs->diepdma, (uint32_t) _ep->xfer_buff); ++ } else { ++ if (_core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32( &_core_if->core_global_regs->gintsts, ++ intr_mask.d32, 0); ++ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if (_ep->xfer_len > 0 && ++ _ep->type != DWC_OTG_EP_TYPE_ISOC) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = (0x1 << _ep->num); ++ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs-> ++ dtknqr4_fifoemptymsk,0, fifoemptymsk); ++ } ++ } ++ } ++ } else { /* OUT endpoint */ ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ if (_core_if->dma_enable) { ++ dwc_write_reg32(&out_regs->doepdma,(uint32_t) _ep->xfer_buff); ++ } ++ } ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n", ++ dwc_read_reg32(&out_regs->doepctl), ++ dwc_read_reg32(&out_regs->doeptsiz)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++ ++ SET_DEBUG_LEVEL(lvl); ++#endif ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p\n", ++ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len, ++ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff); ++ ++ /* IN endpoint */ ++ if (_ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num]; ++ gnptxsts_data_t gtxstatus; ++ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); ++ if (_core_if->en_multiple_tx_fifo == 0 && ++ gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); ++#endif ++ //return; ++ MDELAY(100); //james ++ } ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ /* Zero Length Packet? */ ++ if (_ep->xfer_len == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } else { ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = _ep->xfer_len; ++ deptsiz.b.pktcnt = (_ep->xfer_len - 1 + _ep->maxpacket) / _ep->maxpacket; ++ } ++ ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (_core_if->dma_enable) { ++#if 1 // winder ++ dma_cache_wback_inv((unsigned long) _ep->xfer_buff, _ep->xfer_len); // winder ++ dwc_write_reg32 (&(in_regs->diepdma), ++ CPHYSADDR((uint32_t)_ep->xfer_buff)); // winder ++#else ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)_ep->dma_addr); ++#endif ++ } else { ++ if (_ep->type != DWC_OTG_EP_TYPE_ISOC) { ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if (_core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32( &_core_if->core_global_regs->gintsts, ++ intr_mask.d32, 0); ++ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if (_ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << _ep->num; ++ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs-> ++ dtknqr4_fifoemptymsk,0,fifoemptymsk); ++ } ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ if (_core_if->dma_enable) { ++ depctl.d32 = dwc_read_reg32 (&_core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = _ep->num; ++ dwc_write_reg32 (&_core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ } ++ } else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ /* Program the transfer size and packet count as follows: ++ * ++ * pktcnt = N ++ * xfersize = N * maxpacket ++ */ ++ if (_ep->xfer_len == 0) { ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = _ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ } else { ++ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket; ++ } ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", ++ _ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (_core_if->dma_enable) { ++#if 1 // winder ++ dwc_write_reg32 (&(out_regs->doepdma), ++ CPHYSADDR((uint32_t)_ep->xfer_buff)); // winder ++#else ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)_ep->dma_addr); ++#endif ++ } ++ ++ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /** @todo NGS: dpid is read-only. Use setd0pid ++ * or setd1pid. */ ++ if (_ep->even_odd_frame) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n", ++ dwc_read_reg32(&out_regs->doepctl), ++ dwc_read_reg32(&out_regs->doeptsiz)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++ } ++} ++ ++ ++/** ++ * This function does the setup for a data transfer for EP0 and starts ++ * the transfer. For an IN transfer, the packets will be loaded into ++ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are ++ * unloaded from the Rx FIFO in the ISR. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP0 data. ++ */ ++void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ volatile depctl_data_t depctl; ++ volatile deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p total_len=%d\n", ++ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len, ++ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff, ++ _ep->total_len); ++ _ep->total_len = _ep->xfer_len; ++ ++ /* IN endpoint */ ++ if (_ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0]; ++ gnptxsts_data_t gtxstatus; ++ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); ++ if (_core_if->en_multiple_tx_fifo == 0 && ++ gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", ++ dwc_read_reg32(&in_regs->diepctl)); ++ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", ++ deptsiz.d32, deptsiz.b.xfersize,deptsiz.b.pktcnt); ++ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", gtxstatus.d32); ++#endif /* */ ++ printk("TX Queue or FIFO Full!!!!\n"); // test-only ++ //return; ++ MDELAY(100); //james ++ } ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Zero Length Packet? */ ++ if (_ep->xfer_len == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ if (_ep->xfer_len > _ep->maxpacket) { ++ _ep->xfer_len = _ep->maxpacket; ++ deptsiz.b.xfersize = _ep->maxpacket; ++ } ++ else { ++ deptsiz.b.xfersize = _ep->xfer_len; ++ } ++ deptsiz.b.pktcnt = 1; ++ ++ } ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (_core_if->dma_enable) { ++ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!_core_if->dma_enable) { ++ if (_core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&_core_if->core_global_regs->gintsts, intr_mask.d32, 0); ++ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, intr_mask.d32, ++ intr_mask.d32); ++ } else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if (_ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << _ep->num; ++ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ ++ } ++ } ++ } else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count as follows: ++ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) ++ * pktcnt = N */ ++ if (_ep->xfer_len == 0) { ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = _ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ } else { ++ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket; ++ } ++ ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", ++ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt); ++ ++ if (_core_if->dma_enable) { ++ dwc_write_reg32(&(out_regs->doepdma), (uint32_t) _ep->dma_addr); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); ++ } ++} ++ ++/** ++ * This function continues control IN transfers started by ++ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a ++ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one ++ * bit for the packet count. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP0 data. ++ */ ++void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ if (_ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ _core_if->dev_if->in_ep_regs[0]; ++ gnptxsts_data_t tx_status = {.d32 = 0}; ++ ++ tx_status.d32 = dwc_read_reg32( &_core_if->core_global_regs->gnptxsts ); ++ /** @todo Should there be check for room in the Tx ++ * Status Queue. If not remove the code above this comment. */ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = (_ep->total_len - _ep->xfer_count) > _ep->maxpacket ? _ep->maxpacket : ++ (_ep->total_len - _ep->xfer_count); ++ deptsiz.b.pktcnt = 1; ++ _ep->xfer_len += deptsiz.b.xfersize; ++ ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ _ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (_core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ dwc_write_reg32 (&(in_regs->diepdma), ++ CPHYSADDR((uint32_t)_ep->dma_addr)); // winder ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!_core_if->dma_enable) { ++ /* First clear it from GINTSTS */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_write_reg32( &_core_if->core_global_regs->gintsts, ++ intr_mask.d32 ); ++ ++ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ ++ } ++ ++} ++ ++#ifdef DEBUG ++void dump_msg(const u8 *buf, unsigned int length) ++{ ++ unsigned int start, num, i; ++ char line[52], *p; ++ ++ if (length >= 512) ++ return; ++ start = 0; ++ while (length > 0) { ++ num = min(length, 16u); ++ p = line; ++ for (i = 0; i < num; ++i) { ++ if (i == 8) ++ *p++ = ' '; ++ sprintf(p, " %02x", buf[i]); ++ p += 3; ++ } ++ *p = 0; ++ DWC_PRINT( "%6x: %s\n", start, line); ++ buf += num; ++ start += num; ++ length -= num; ++ } ++} ++#else ++static inline void dump_msg(const u8 *buf, unsigned int length) ++{ ++} ++#endif ++ ++/** ++ * This function writes a packet into the Tx FIFO associated with the ++ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For ++ * periodic EPs the periodic Tx FIFO associated with the EP is written ++ * with all packets for the next micro-frame. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to write packet for. ++ * @param _dma Indicates if DMA is being used. ++ */ ++void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma) ++{ ++ /** ++ * The buffer is padded to DWORD on a per packet basis in ++ * slave/dma mode if the MPS is not DWORD aligned. The last ++ * packet, if short, is also padded to a multiple of DWORD. ++ * ++ * ep->xfer_buff always starts DWORD aligned in memory and is a ++ * multiple of DWORD in length ++ * ++ * ep->xfer_len can be any number of bytes ++ * ++ * ep->xfer_count is a multiple of ep->maxpacket until the last ++ * packet ++ * ++ * FIFO access is DWORD */ ++ ++ uint32_t i; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ uint32_t *fifo; ++ uint32_t *data_buff = (uint32_t *)_ep->xfer_buff; ++ ++ //DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, _core_if, _ep); ++ if (_ep->xfer_count >= _ep->xfer_len) { ++ DWC_WARN("%s() No data for EP%d!!!\n", __func__, _ep->num); ++ return; ++ } ++ ++ /* Find the byte length of the packet either short packet or MPS */ ++ if ((_ep->xfer_len - _ep->xfer_count) < _ep->maxpacket) { ++ byte_count = _ep->xfer_len - _ep->xfer_count; ++ } ++ else { ++ byte_count = _ep->maxpacket; ++ } ++ ++ /* Find the DWORD length, padded by extra bytes as neccessary if MPS ++ * is not a multiple of DWORD */ ++ dword_count = (byte_count + 3) / 4; ++ ++#ifdef VERBOSE ++ dump_msg(_ep->xfer_buff, byte_count); ++#endif ++ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /**@todo NGS Where are the Periodic Tx FIFO addresses ++ * intialized? What should this be? */ ++ fifo = _core_if->data_fifo[_ep->tx_fifo_num]; ++ } else { ++ fifo = _core_if->data_fifo[_ep->num]; ++ } ++ ++ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", ++ fifo, data_buff, *data_buff, byte_count); ++ ++ ++ if (!_dma) { ++ for (i=0; i<dword_count; i++, data_buff++) { ++ dwc_write_reg32( fifo, *data_buff ); ++ } ++ } ++ ++ _ep->xfer_count += byte_count; ++ _ep->xfer_buff += byte_count; ++#if 1 // winder, why do we need this?? ++ _ep->dma_addr += byte_count; ++#endif ++} ++ ++/** ++ * Set the EP STALL. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to set the stall on. ++ */ ++void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num, ++ (_ep->is_in?"IN":"OUT")); ++ ++ if (_ep->is_in == 1) { ++ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the disable and stall bits */ ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ ++ } else { ++ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the stall bit */ ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ return; ++} ++ ++/** ++ * Clear the EP STALL. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _ep The EP to clear stall from. ++ */ ++void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num, ++ (_ep->is_in?"IN":"OUT")); ++ ++ if (_ep->is_in == 1) { ++ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl); ++ } else { ++ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl); ++ } ++ ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* clear the stall bits */ ++ depctl.b.stall = 0; ++ ++ /* ++ * USB Spec 9.4.5: For endpoints using data toggle, regardless ++ * of whether an endpoint has the Halt feature set, a ++ * ClearFeature(ENDPOINT_HALT) request always results in the ++ * data toggle being reinitialized to DATA0. ++ */ ++ if (_ep->type == DWC_OTG_EP_TYPE_INTR || ++ _ep->type == DWC_OTG_EP_TYPE_BULK) { ++ depctl.b.setd0pid = 1; /* DATA0 */ ++ } ++ ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ return; ++} ++ ++/** ++ * This function reads a packet from the Rx FIFO into the destination ++ * buffer. To read SETUP data use dwc_otg_read_setup_packet. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _dest Destination buffer for the packet. ++ * @param _bytes Number of bytes to copy to the destination. ++ */ ++void dwc_otg_read_packet(dwc_otg_core_if_t *_core_if, ++ uint8_t *_dest, ++ uint16_t _bytes) ++{ ++ int i; ++ int word_count = (_bytes + 3) / 4; ++ ++ volatile uint32_t *fifo = _core_if->data_fifo[0]; ++ uint32_t *data_buff = (uint32_t *)_dest; ++ ++ /** ++ * @todo Account for the case where _dest is not dword aligned. This ++ * requires reading data from the FIFO into a uint32_t temp buffer, ++ * then moving it into the data buffer. ++ */ ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, ++ _core_if, _dest, _bytes); ++ ++ for (i=0; i<word_count; i++, data_buff++) { ++ *data_buff = dwc_read_reg32(fifo); ++ } ++ ++ return; ++} ++ ++ ++#ifdef DEBUG ++/** ++ * This functions reads the device registers and prints them ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Device Global Registers\n"); ++ addr=&_core_if->dev_if->dev_global_regs->dcfg; ++ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->dctl; ++ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->dsts; ++ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->diepmsk; ++ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->doepmsk; ++ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->daint; ++ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->dev_global_regs->dtknqr1; ++ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ if (_core_if->hwcfg2.b.dev_token_q_depth > 6) { ++ addr=&_core_if->dev_if->dev_global_regs->dtknqr2; ++ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ addr=&_core_if->dev_if->dev_global_regs->dvbusdis; ++ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ addr=&_core_if->dev_if->dev_global_regs->dvbuspulse; ++ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ ++ if (_core_if->hwcfg2.b.dev_token_q_depth > 14) { ++ addr = &_core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; ++ DWC_PRINT("DTKNQR3 @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ if (_core_if->hwcfg2.b.dev_token_q_depth > 22) { ++ addr = &_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t) addr, ++ dwc_read_reg32(addr)); ++ } ++ for (i = 0; i <= _core_if->dev_if->num_in_eps; i++) { ++ DWC_PRINT("Device IN EP %d Registers\n", i); ++ addr=&_core_if->dev_if->in_ep_regs[i]->diepctl; ++ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->in_ep_regs[i]->diepint; ++ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->in_ep_regs[i]->dieptsiz; ++ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->in_ep_regs[i]->diepdma; ++ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++addr = &_core_if->dev_if->in_ep_regs[i]->dtxfsts; ++ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n", (uint32_t) addr, ++ dwc_read_reg32(addr)); ++ } ++ for (i = 0; i <= _core_if->dev_if->num_out_eps; i++) { ++ DWC_PRINT("Device OUT EP %d Registers\n", i); ++ addr=&_core_if->dev_if->out_ep_regs[i]->doepctl; ++ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->out_ep_regs[i]->doepfn; ++ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->out_ep_regs[i]->doepint; ++ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->out_ep_regs[i]->doeptsiz; ++ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->dev_if->out_ep_regs[i]->doepdma; ++ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ return; ++} ++ ++/** ++ * This function reads the host registers and prints them ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Host Global Registers\n"); ++ addr=&_core_if->host_if->host_global_regs->hcfg; ++ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->host_global_regs->hfir; ++ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->host_global_regs->hfnum; ++ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->host_global_regs->hptxsts; ++ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->host_global_regs->haint; ++ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->host_global_regs->haintmsk; ++ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=_core_if->host_if->hprt0; ++ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; i<_core_if->core_params->host_channels; i++) { ++ DWC_PRINT("Host Channel %d Specific Registers\n", i); ++ addr=&_core_if->host_if->hc_regs[i]->hcchar; ++ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->hc_regs[i]->hcsplt; ++ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->hc_regs[i]->hcint; ++ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->hc_regs[i]->hcintmsk; ++ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->hc_regs[i]->hctsiz; ++ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->host_if->hc_regs[i]->hcdma; ++ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ } ++ return; ++} ++ ++/** ++ * This function reads the core global registers and prints them ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Core Global Registers\n"); ++ addr=&_core_if->core_global_regs->gotgctl; ++ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gotgint; ++ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gahbcfg; ++ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gusbcfg; ++ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->grstctl; ++ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gintsts; ++ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gintmsk; ++ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->grxstsr; ++ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //addr=&_core_if->core_global_regs->grxstsp; ++ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->grxfsiz; ++ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gnptxfsiz; ++ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gnptxsts; ++ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gi2cctl; ++ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gpvndctl; ++ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->ggpio; ++ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->guid; ++ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->gsnpsid; ++ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->ghwcfg1; ++ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->ghwcfg2; ++ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->ghwcfg3; ++ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->ghwcfg4; ++ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&_core_if->core_global_regs->hptxfsiz; ++ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; i<_core_if->hwcfg4.b.num_dev_perio_in_ep; i++) { ++ addr=&_core_if->core_global_regs->dptxfsiz_dieptxf[i]; ++ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++} ++#endif ++ ++/** ++ * Flush a Tx FIFO. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _num Tx FIFO to flush. ++ */ ++extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, ++ const int _num ) ++{ ++ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", _num); ++ ++ greset.b.txfflsh = 1; ++ greset.b.txfnum = _num; ++ dwc_write_reg32( &global_regs->grstctl, greset.d32 ); ++ ++ do { ++ greset.d32 = dwc_read_reg32( &global_regs->grstctl); ++ if (++count > 10000){ ++ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", ++ __func__, greset.d32, ++ dwc_read_reg32( &global_regs->gnptxsts)); ++ break; ++ } ++ ++ udelay(1); ++ } while (greset.b.txfflsh == 1); ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Flush Rx FIFO. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ) ++{ ++ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); ++ /* ++ * ++ */ ++ greset.b.rxfflsh = 1; ++ dwc_write_reg32( &global_regs->grstctl, greset.d32 ); ++ ++ do { ++ greset.d32 = dwc_read_reg32( &global_regs->grstctl); ++ if (++count > 10000){ ++ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } while (greset.b.rxfflsh == 1); ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Do core a soft reset of the core. Be careful with this because it ++ * resets all the internal state machines of the core. ++ */ ++ ++void dwc_otg_core_reset(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); ++ /* Wait for AHB master IDLE state. */ ++ do { ++ UDELAY(10); ++ greset.d32 = dwc_read_reg32( &global_regs->grstctl); ++ if (++count > 100000){ ++ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x %x\n", __func__, ++ greset.d32, greset.b.ahbidle); ++ return; ++ } ++ } while (greset.b.ahbidle == 0); ++ ++// winder add. ++#if 1 ++ /* Note: Actually, I don't exactly why we need to put delay here. */ ++ MDELAY(100); ++#endif ++ /* Core Soft Reset */ ++ count = 0; ++ greset.b.csftrst = 1; ++ dwc_write_reg32( &global_regs->grstctl, greset.d32 ); ++// winder add. ++#if 1 ++ /* Note: Actually, I don't exactly why we need to put delay here. */ ++ MDELAY(100); ++#endif ++ do { ++ greset.d32 = dwc_read_reg32( &global_regs->grstctl); ++ if (++count > 10000){ ++ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ udelay(1); ++ } while (greset.b.csftrst == 1); ++ /* Wait for 3 PHY Clocks*/ ++ //DWC_PRINT("100ms\n"); ++ MDELAY(100); ++} ++ ++ ++ ++/** ++ * Register HCD callbacks. The callbacks are used to start and stop ++ * the HCD for interrupt processing. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _cb the HCD callback structure. ++ * @param _p pointer to be passed to callback function (usb_hcd*). ++ */ ++extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p) ++{ ++ _core_if->hcd_cb = _cb; ++ _cb->p = _p; ++} ++ ++/** ++ * Register PCD callbacks. The callbacks are used to start and stop ++ * the PCD for interrupt processing. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ * @param _cb the PCD callback structure. ++ * @param _p pointer to be passed to callback function (pcd*). ++ */ ++extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p) ++{ ++ _core_if->pcd_cb = _cb; ++ _cb->p = _p; ++} ++ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil.h +@@ -0,0 +1,911 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 631780 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_CIL_H__) ++#define __DWC_CIL_H__ ++ ++#include "dwc_otg_plat.h" ++ ++#include "dwc_otg_regs.h" ++#ifdef DEBUG ++#include "linux/timer.h" ++#endif ++ ++/* the OTG capabilities. */ ++#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 ++#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 ++#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 ++/* the maximum speed of operation in host and device mode. */ ++#define DWC_SPEED_PARAM_HIGH 0 ++#define DWC_SPEED_PARAM_FULL 1 ++/* the PHY clock rate in low power mode when connected to a ++ * Low Speed device in host mode. */ ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 ++/* the type of PHY interface to use. */ ++#define DWC_PHY_TYPE_PARAM_FS 0 ++#define DWC_PHY_TYPE_PARAM_UTMI 1 ++#define DWC_PHY_TYPE_PARAM_ULPI 2 ++/* whether to use the internal or external supply to ++ * drive the vbus with a ULPI phy. */ ++#define DWC_PHY_ULPI_INTERNAL_VBUS 0 ++#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 ++/* EP type. */ ++ ++/** ++ * @file ++ * This file contains the interface to the Core Interface Layer. ++ */ ++ ++/** ++ * The <code>dwc_ep</code> structure represents the state of a single ++ * endpoint when acting in device mode. It contains the data items ++ * needed for an endpoint to be activated and transfer packets. ++ */ ++typedef struct dwc_ep { ++ /** EP number used for register address lookup */ ++ uint8_t num; ++ /** EP direction 0 = OUT */ ++ unsigned is_in : 1; ++ /** EP active. */ ++ unsigned active : 1; ++ ++ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO ++ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ ++ unsigned tx_fifo_num : 4; ++ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ ++ unsigned type : 2; ++#define DWC_OTG_EP_TYPE_CONTROL 0 ++#define DWC_OTG_EP_TYPE_ISOC 1 ++#define DWC_OTG_EP_TYPE_BULK 2 ++#define DWC_OTG_EP_TYPE_INTR 3 ++ ++ /** DATA start PID for INTR and BULK EP */ ++ unsigned data_pid_start : 1; ++ /** Frame (even/odd) for ISOC EP */ ++ unsigned even_odd_frame : 1; ++ /** Max Packet bytes */ ++ unsigned maxpacket : 11; ++ ++ /** @name Transfer state */ ++ /** @{ */ ++ ++ /** ++ * Pointer to the beginning of the transfer buffer -- do not modify ++ * during transfer. ++ */ ++ ++ uint32_t dma_addr; ++ ++ uint8_t *start_xfer_buff; ++ /** pointer to the transfer buffer */ ++ uint8_t *xfer_buff; ++ /** Number of bytes to transfer */ ++ unsigned xfer_len : 19; ++ /** Number of bytes transferred. */ ++ unsigned xfer_count : 19; ++ /** Sent ZLP */ ++ unsigned sent_zlp : 1; ++ /** Total len for control transfer */ ++ unsigned total_len : 19; ++ ++ /** stall clear flag */ ++ unsigned stall_clear_flag : 1; ++ ++ /** @} */ ++} dwc_ep_t; ++ ++/* ++ * Reasons for halting a host channel. ++ */ ++typedef enum dwc_otg_halt_status { ++ DWC_OTG_HC_XFER_NO_HALT_STATUS, ++ DWC_OTG_HC_XFER_COMPLETE, ++ DWC_OTG_HC_XFER_URB_COMPLETE, ++ DWC_OTG_HC_XFER_ACK, ++ DWC_OTG_HC_XFER_NAK, ++ DWC_OTG_HC_XFER_NYET, ++ DWC_OTG_HC_XFER_STALL, ++ DWC_OTG_HC_XFER_XACT_ERR, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN, ++ DWC_OTG_HC_XFER_BABBLE_ERR, ++ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, ++ DWC_OTG_HC_XFER_AHB_ERR, ++ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, ++ DWC_OTG_HC_XFER_URB_DEQUEUE ++} dwc_otg_halt_status_e; ++ ++/** ++ * Host channel descriptor. This structure represents the state of a single ++ * host channel when acting in host mode. It contains the data items needed to ++ * transfer packets to an endpoint via a host channel. ++ */ ++typedef struct dwc_hc { ++ /** Host channel number used for register address lookup */ ++ uint8_t hc_num; ++ ++ /** Device to access */ ++ unsigned dev_addr : 7; ++ ++ /** EP to access */ ++ unsigned ep_num : 4; ++ ++ /** EP direction. 0: OUT, 1: IN */ ++ unsigned ep_is_in : 1; ++ ++ /** ++ * EP speed. ++ * One of the following values: ++ * - DWC_OTG_EP_SPEED_LOW ++ * - DWC_OTG_EP_SPEED_FULL ++ * - DWC_OTG_EP_SPEED_HIGH ++ */ ++ unsigned speed : 2; ++#define DWC_OTG_EP_SPEED_LOW 0 ++#define DWC_OTG_EP_SPEED_FULL 1 ++#define DWC_OTG_EP_SPEED_HIGH 2 ++ ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - DWC_OTG_EP_TYPE_CONTROL: 0 ++ * - DWC_OTG_EP_TYPE_ISOC: 1 ++ * - DWC_OTG_EP_TYPE_BULK: 2 ++ * - DWC_OTG_EP_TYPE_INTR: 3 ++ */ ++ unsigned ep_type : 2; ++ ++ /** Max packet size in bytes */ ++ unsigned max_packet : 11; ++ ++ /** ++ * PID for initial transaction. ++ * 0: DATA0,<br> ++ * 1: DATA2,<br> ++ * 2: DATA1,<br> ++ * 3: MDATA (non-Control EP), ++ * SETUP (Control EP) ++ */ ++ unsigned data_pid_start : 2; ++#define DWC_OTG_HC_PID_DATA0 0 ++#define DWC_OTG_HC_PID_DATA2 1 ++#define DWC_OTG_HC_PID_DATA1 2 ++#define DWC_OTG_HC_PID_MDATA 3 ++#define DWC_OTG_HC_PID_SETUP 3 ++ ++ /** Number of periodic transactions per (micro)frame */ ++ unsigned multi_count: 2; ++ ++ /** @name Transfer State */ ++ /** @{ */ ++ ++ /** Pointer to the current transfer buffer position. */ ++ uint8_t *xfer_buff; ++ /** Total number of bytes to transfer. */ ++ uint32_t xfer_len; ++ /** Number of bytes transferred so far. */ ++ uint32_t xfer_count; ++ /** Packet count at start of transfer.*/ ++ uint16_t start_pkt_count; ++ ++ /** ++ * Flag to indicate whether the transfer has been started. Set to 1 if ++ * it has been started, 0 otherwise. ++ */ ++ uint8_t xfer_started; ++ ++ /** ++ * Set to 1 to indicate that a PING request should be issued on this ++ * channel. If 0, process normally. ++ */ ++ uint8_t do_ping; ++ ++ /** ++ * Set to 1 to indicate that the error count for this transaction is ++ * non-zero. Set to 0 if the error count is 0. ++ */ ++ uint8_t error_state; ++ ++ /** ++ * Set to 1 to indicate that this channel should be halted the next ++ * time a request is queued for the channel. This is necessary in ++ * slave mode if no request queue space is available when an attempt ++ * is made to halt the channel. ++ */ ++ uint8_t halt_on_queue; ++ ++ /** ++ * Set to 1 if the host channel has been halted, but the core is not ++ * finished flushing queued requests. Otherwise 0. ++ */ ++ uint8_t halt_pending; ++ ++ /** ++ * Reason for halting the host channel. ++ */ ++ dwc_otg_halt_status_e halt_status; ++ ++ /* ++ * Split settings for the host channel ++ */ ++ uint8_t do_split; /**< Enable split for the channel */ ++ uint8_t complete_split; /**< Enable complete split */ ++ uint8_t hub_addr; /**< Address of high speed hub */ ++ ++ uint8_t port_addr; /**< Port of the low/full speed device */ ++ /** Split transaction position ++ * One of the following values: ++ * - DWC_HCSPLIT_XACTPOS_MID ++ * - DWC_HCSPLIT_XACTPOS_BEGIN ++ * - DWC_HCSPLIT_XACTPOS_END ++ * - DWC_HCSPLIT_XACTPOS_ALL */ ++ uint8_t xact_pos; ++ ++ /** Set when the host channel does a short read. */ ++ uint8_t short_read; ++ ++ /** ++ * Number of requests issued for this channel since it was assigned to ++ * the current transfer (not counting PINGs). ++ */ ++ uint8_t requests; ++ ++ /** ++ * Queue Head for the transfer being processed by this channel. ++ */ ++ struct dwc_otg_qh *qh; ++ ++ /** @} */ ++ ++ /** Entry in list of host channels. */ ++ struct list_head hc_list_entry; ++} dwc_hc_t; ++ ++/** ++ * The following parameters may be specified when starting the module. These ++ * parameters define how the DWC_otg controller should be configured. ++ * Parameter values are passed to the CIL initialization function ++ * dwc_otg_cil_init. ++ */ ++ ++typedef struct dwc_otg_core_params ++{ ++ int32_t opt; ++//#define dwc_param_opt_default 1 ++ /** ++ * Specifies the OTG capabilities. The driver will automatically ++ * detect the value for this parameter if none is specified. ++ * 0 - HNP and SRP capable (default) ++ * 1 - SRP Only capable ++ * 2 - No HNP/SRP capable ++ */ ++ int32_t otg_cap; ++#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 ++#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 ++#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 ++//#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE ++ /** ++ * Specifies whether to use slave or DMA mode for accessing the data ++ * FIFOs. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - Slave ++ * 1 - DMA (default, if available) ++ */ ++ int32_t dma_enable; ++//#define dwc_param_dma_enable_default 1 ++ /** The DMA Burst size (applicable only for External DMA ++ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++ */ ++ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ ++//#define dwc_param_dma_burst_size_default 32 ++ /** ++ * Specifies the maximum speed of operation in host and device mode. ++ * The actual speed depends on the speed of the attached device and ++ * the value of phy_type. The actual speed depends on the speed of the ++ * attached device. ++ * 0 - High Speed (default) ++ * 1 - Full Speed ++ */ ++ int32_t speed; ++//#define dwc_param_speed_default 0 ++#define DWC_SPEED_PARAM_HIGH 0 ++#define DWC_SPEED_PARAM_FULL 1 ++ ++ /** Specifies whether low power mode is supported when attached ++ * to a Full Speed or Low Speed device in host mode. ++ * 0 - Don't support low power mode (default) ++ * 1 - Support low power mode ++ */ ++ int32_t host_support_fs_ls_low_power; ++//#define dwc_param_host_support_fs_ls_low_power_default 0 ++ /** Specifies the PHY clock rate in low power mode when connected to a ++ * Low Speed device in host mode. This parameter is applicable only if ++ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS ++ * then defaults to 6 MHZ otherwise 48 MHZ. ++ * ++ * 0 - 48 MHz ++ * 1 - 6 MHz ++ */ ++ int32_t host_ls_low_power_phy_clk; ++//#define dwc_param_host_ls_low_power_phy_clk_default 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 ++ /** ++ * 0 - Use cC FIFO size parameters ++ * 1 - Allow dynamic FIFO sizing (default) ++ */ ++ int32_t enable_dynamic_fifo; ++//#define dwc_param_enable_dynamic_fifo_default 1 ++ /** Total number of 4-byte words in the data FIFO memory. This ++ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic ++ * Tx FIFOs. ++ * 32 to 32768 (default 8192) ++ * Note: The total FIFO memory depth in the FPGA configuration is 8192. ++ */ ++ int32_t data_fifo_size; ++//#define dwc_param_data_fifo_size_default 8192 ++ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1064) ++ */ ++ int32_t dev_rx_fifo_size; ++//#define dwc_param_dev_rx_fifo_size_default 1064 ++ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode ++ * when dynamic FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t dev_nperio_tx_fifo_size; ++//#define dwc_param_dev_nperio_tx_fifo_size_default 1024 ++ /** Number of 4-byte words in each of the periodic Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++//#define dwc_param_dev_perio_tx_fifo_size_default 256 ++ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_rx_fifo_size; ++//#define dwc_param_host_rx_fifo_size_default 1024 ++ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode ++ * when Dynamic FIFO sizing is enabled in the core. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_nperio_tx_fifo_size; ++//#define dwc_param_host_nperio_tx_fifo_size_default 1024 ++ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_perio_tx_fifo_size; ++//#define dwc_param_host_perio_tx_fifo_size_default 1024 ++ /** The maximum transfer size supported in bytes. ++ * 2047 to 65,535 (default 65,535) ++ */ ++ int32_t max_transfer_size; ++//#define dwc_param_max_transfer_size_default 65535 ++ /** The maximum number of packets in a transfer. ++ * 15 to 511 (default 511) ++ */ ++ int32_t max_packet_count; ++//#define dwc_param_max_packet_count_default 511 ++ /** The number of host channel registers to use. ++ * 1 to 16 (default 12) ++ * Note: The FPGA configuration supports a maximum of 12 host channels. ++ */ ++ int32_t host_channels; ++//#define dwc_param_host_channels_default 12 ++ /** The number of endpoints in addition to EP0 available for device ++ * mode operations. ++ * 1 to 15 (default 6 IN and OUT) ++ * Note: The FPGA configuration supports a maximum of 6 IN and OUT ++ * endpoints in addition to EP0. ++ */ ++ int32_t dev_endpoints; ++//#define dwc_param_dev_endpoints_default 6 ++ /** ++ * Specifies the type of PHY interface to use. By default, the driver ++ * will automatically detect the phy_type. ++ * ++ * 0 - Full Speed PHY ++ * 1 - UTMI+ (default) ++ * 2 - ULPI ++ */ ++ int32_t phy_type; ++#define DWC_PHY_TYPE_PARAM_FS 0 ++#define DWC_PHY_TYPE_PARAM_UTMI 1 ++#define DWC_PHY_TYPE_PARAM_ULPI 2 ++//#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI ++ /** ++ * Specifies the UTMI+ Data Width. This parameter is ++ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI ++ * PHY_TYPE, this parameter indicates the data width between ++ * the MAC and the ULPI Wrapper.) Also, this parameter is ++ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set ++ * to "8 and 16 bits", meaning that the core has been ++ * configured to work at either data path width. ++ * ++ * 8 or 16 bits (default 16) ++ */ ++ int32_t phy_utmi_width; ++//#define dwc_param_phy_utmi_width_default 16 ++ /** ++ * Specifies whether the ULPI operates at double or single ++ * data rate. This parameter is only applicable if PHY_TYPE is ++ * ULPI. ++ * ++ * 0 - single data rate ULPI interface with 8 bit wide data ++ * bus (default) ++ * 1 - double data rate ULPI interface with 4 bit wide data ++ * bus ++ */ ++ int32_t phy_ulpi_ddr; ++//#define dwc_param_phy_ulpi_ddr_default 0 ++ /** ++ * Specifies whether to use the internal or external supply to ++ * drive the vbus with a ULPI phy. ++ */ ++ int32_t phy_ulpi_ext_vbus; ++#define DWC_PHY_ULPI_INTERNAL_VBUS 0 ++#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 ++//#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS ++ /** ++ * Specifies whether to use the I2Cinterface for full speed PHY. This ++ * parameter is only applicable if PHY_TYPE is FS. ++ * 0 - No (default) ++ * 1 - Yes ++ */ ++ int32_t i2c_enable; ++//#define dwc_param_i2c_enable_default 0 ++ ++ int32_t ulpi_fs_ls; ++//#define dwc_param_ulpi_fs_ls_default 0 ++ ++ int32_t ts_dline; ++//#define dwc_param_ts_dline_default 0 ++ ++ /** ++ * Specifies whether dedicated transmit FIFOs are ++ * enabled for non periodic IN endpoints in device mode ++ * 0 - No ++ * 1 - Yes ++ */ ++ int32_t en_multiple_tx_fifo; ++#define dwc_param_en_multiple_tx_fifo_default 1 ++ ++ /** Number of 4-byte words in each of the Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; ++#define dwc_param_dev_tx_fifo_size_default 256 ++ ++ /** Thresholding enable flag- ++ * bit 0 - enable non-ISO Tx thresholding ++ * bit 1 - enable ISO Tx thresholding ++ * bit 2 - enable Rx thresholding ++ */ ++ uint32_t thr_ctl; ++#define dwc_param_thr_ctl_default 0 ++ ++ /** Thresholding length for Tx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t tx_thr_length; ++#define dwc_param_tx_thr_length_default 64 ++ ++ /** Thresholding length for Rx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t rx_thr_length; ++#define dwc_param_rx_thr_length_default 64 ++} dwc_otg_core_params_t; ++ ++#ifdef DEBUG ++struct dwc_otg_core_if; ++typedef struct hc_xfer_info ++{ ++ struct dwc_otg_core_if *core_if; ++ dwc_hc_t *hc; ++} hc_xfer_info_t; ++#endif ++ ++/** ++ * The <code>dwc_otg_core_if</code> structure contains information needed to manage ++ * the DWC_otg controller acting in either host or device mode. It ++ * represents the programming view of the controller as a whole. ++ */ ++typedef struct dwc_otg_core_if ++{ ++ /** Parameters that define how the core should be configured.*/ ++ dwc_otg_core_params_t *core_params; ++ ++ /** Core Global registers starting at offset 000h. */ ++ dwc_otg_core_global_regs_t *core_global_regs; ++ ++ /** Device-specific information */ ++ dwc_otg_dev_if_t *dev_if; ++ /** Host-specific information */ ++ dwc_otg_host_if_t *host_if; ++ ++ /* ++ * Set to 1 if the core PHY interface bits in USBCFG have been ++ * initialized. ++ */ ++ uint8_t phy_init_done; ++ ++ /* ++ * SRP Success flag, set by srp success interrupt in FS I2C mode ++ */ ++ uint8_t srp_success; ++ uint8_t srp_timer_started; ++ ++ /* Common configuration information */ ++ /** Power and Clock Gating Control Register */ ++ volatile uint32_t *pcgcctl; ++#define DWC_OTG_PCGCCTL_OFFSET 0xE00 ++ ++ /** Push/pop addresses for endpoints or host channels.*/ ++ uint32_t *data_fifo[MAX_EPS_CHANNELS]; ++#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 ++#define DWC_OTG_DATA_FIFO_SIZE 0x1000 ++ ++ /** Total RAM for FIFOs (Bytes) */ ++ uint16_t total_fifo_size; ++ /** Size of Rx FIFO (Bytes) */ ++ uint16_t rx_fifo_size; ++ /** Size of Non-periodic Tx FIFO (Bytes) */ ++ uint16_t nperio_tx_fifo_size; ++ ++ /** 1 if DMA is enabled, 0 otherwise. */ ++ uint8_t dma_enable; ++ ++ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ ++ uint8_t en_multiple_tx_fifo; ++ ++ /** Set to 1 if multiple packets of a high-bandwidth transfer is in ++ * process of being queued */ ++ uint8_t queuing_high_bandwidth; ++ ++ /** Hardware Configuration -- stored here for convenience.*/ ++ hwcfg1_data_t hwcfg1; ++ hwcfg2_data_t hwcfg2; ++ hwcfg3_data_t hwcfg3; ++ hwcfg4_data_t hwcfg4; ++ ++ /** The operational State, during transations ++ * (a_host>>a_peripherial and b_device=>b_host) this may not ++ * match the core but allows the software to determine ++ * transitions. ++ */ ++ uint8_t op_state; ++ ++ /** ++ * Set to 1 if the HCD needs to be restarted on a session request ++ * interrupt. This is required if no connector ID status change has ++ * occurred since the HCD was last disconnected. ++ */ ++ uint8_t restart_hcd_on_session_req; ++ ++ /** HCD callbacks */ ++ /** A-Device is a_host */ ++#define A_HOST (1) ++ /** A-Device is a_suspend */ ++#define A_SUSPEND (2) ++ /** A-Device is a_peripherial */ ++#define A_PERIPHERAL (3) ++ /** B-Device is operating as a Peripheral. */ ++#define B_PERIPHERAL (4) ++ /** B-Device is operating as a Host. */ ++#define B_HOST (5) ++ ++ /** HCD callbacks */ ++ struct dwc_otg_cil_callbacks *hcd_cb; ++ /** PCD callbacks */ ++ struct dwc_otg_cil_callbacks *pcd_cb; ++ ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t p_tx_msk; ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t tx_msk; ++ ++#ifdef DEBUG ++ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; ++ ++ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; ++ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; ++ ++#if 1 // winder ++ uint32_t hfnum_7_samples; ++ uint32_t hfnum_7_frrem_accum; ++ uint32_t hfnum_0_samples; ++ uint32_t hfnum_0_frrem_accum; ++ uint32_t hfnum_other_samples; ++ uint32_t hfnum_other_frrem_accum; ++#else ++ uint32_t hfnum_7_samples; ++ uint64_t hfnum_7_frrem_accum; ++ uint32_t hfnum_0_samples; ++ uint64_t hfnum_0_frrem_accum; ++ uint32_t hfnum_other_samples; ++ uint64_t hfnum_other_frrem_accum; ++#endif ++ resource_size_t phys_addr; /* Added to support PLB DMA : phys-virt mapping */ ++#endif ++ ++} dwc_otg_core_if_t; ++ ++/* ++ * The following functions support initialization of the CIL driver component ++ * and the DWC_otg controller. ++ */ ++extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, ++ dwc_otg_core_params_t *_core_params); ++extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++ ++/** @name Device CIL Functions ++ * The following functions support managing the DWC_otg controller in device ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); ++extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); ++extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); ++/**@}*/ ++ ++/** @name Host CIL Functions ++ * The following functions support managing the DWC_otg controller in host ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ dwc_otg_halt_status_e _halt_status); ++extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); ++ ++/** ++ * This function Reads HPRT0 in preparation to modify. It keeps the ++ * WC bits 0 so that if they are read as 1, they won't clear when you ++ * write it back ++ */ ++static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) ++{ ++ hprt0_data_t hprt0; ++ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); ++ hprt0.b.prtena = 0; ++ hprt0.b.prtconndet = 0; ++ hprt0.b.prtenchng = 0; ++ hprt0.b.prtovrcurrchng = 0; ++ return hprt0.d32; ++} ++ ++extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); ++/**@}*/ ++ ++/** @name Common CIL Functions ++ * The following functions support managing the DWC_otg controller in either ++ * device or host mode. ++ */ ++/**@{*/ ++ ++extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes); ++ ++extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); ++ ++extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, ++ const int _num ); ++extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); ++ ++#define NP_TXFIFO_EMPTY -1 ++#define MAX_NP_TXREQUEST_Q_SLOTS 8 ++/** ++ * This function returns the endpoint number of the request at ++ * the top of non-periodic TX FIFO, or -1 if the request FIFO is ++ * empty. ++ */ ++static inline int dwc_otg_top_nptxfifo_epnum(dwc_otg_core_if_t *_core_if) { ++ gnptxsts_data_t txstatus = {.d32 = 0}; ++ ++ txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); ++ return (txstatus.b.nptxqspcavail == MAX_NP_TXREQUEST_Q_SLOTS ? ++ -1 : txstatus.b.nptxqtop_chnep); ++} ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) { ++ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++} ++ ++/** ++ * This function returns the OTG Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) { ++ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the IN endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *_core_if) { ++ uint32_t v; ++ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk); ++ return (v & 0xffff); ++ ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the OUT endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *_core_if) { ++ uint32_t v; ++ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk); ++ return ((v & 0xffff0000) >> 16); ++} ++ ++/** ++ * This function returns the Device IN EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *_core_if, ++ dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v, msk, emp; ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> _ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) & msk; ++/* ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) & ++ dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); ++*/ ++ return v; ++} ++/** ++ * This function returns the Device OUT EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, ++ dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v; ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & ++ dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); ++ return v; ++} ++ ++/** ++ * This function returns the Host All Channel Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); ++} ++ ++static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); ++} ++ ++ ++/** ++ * This function returns the mode of the operation, host or device. ++ * ++ * @return 0 - Device Mode, 1 - Host Mode ++ */ ++static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) { ++ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); ++} ++ ++static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); ++} ++static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); ++} ++ ++extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); ++ ++ ++/**@}*/ ++ ++/** ++ * DWC_otg CIL callback structure. This structure allows the HCD and ++ * PCD to register functions used for starting and stopping the PCD ++ * and HCD for role change on for a DRD. ++ */ ++typedef struct dwc_otg_cil_callbacks ++{ ++ /** Start function for role change */ ++ int (*start) (void *_p); ++ /** Stop Function for role change */ ++ int (*stop) (void *_p); ++ /** Disconnect Function for role change */ ++ int (*disconnect) (void *_p); ++ /** Resume/Remote wakeup Function */ ++ int (*resume_wakeup) (void *_p); ++ /** Suspend function */ ++ int (*suspend) (void *_p); ++ /** Session Start (SRP) */ ++ int (*session_start) (void *_p); ++ /** Pointer passed to start() and stop() */ ++ void *p; ++} dwc_otg_cil_callbacks_t; ++ ++ ++ ++extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++ ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil_ifx.h +@@ -0,0 +1,58 @@ ++/****************************************************************************** ++** ++** FILE NAME : dwc_otg_cil_ifx.h ++** PROJECT : Twinpass/Danube ++** MODULES : DWC OTG USB ++** ++** DATE : 07 Sep. 2007 ++** AUTHOR : Sung Winder ++** DESCRIPTION : Default param value. ++** COPYRIGHT : Copyright (c) 2007 ++** Infineon Technologies AG ++** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park, ++** Hsin-chu City, 300 Taiwan. ++** ++** This program is free software; you can redistribute it and/or modify ++** it under the terms of the GNU General Public License as published by ++** the Free Software Foundation; either version 2 of the License, or ++** (at your option) any later version. ++** ++** HISTORY ++** $Date $Author $Comment ++** 12 April 2007 Sung Winder Initiate Version ++*******************************************************************************/ ++#if !defined(__DWC_OTG_CIL_IFX_H__) ++#define __DWC_OTG_CIL_IFX_H__ ++ ++/* ================ Default param value ================== */ ++#define dwc_param_opt_default 1 ++#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE ++#define dwc_param_dma_enable_default 1 ++#define dwc_param_dma_burst_size_default 32 ++#define dwc_param_speed_default DWC_SPEED_PARAM_HIGH ++#define dwc_param_host_support_fs_ls_low_power_default 0 ++#define dwc_param_host_ls_low_power_phy_clk_default DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ ++#define dwc_param_enable_dynamic_fifo_default 1 ++#define dwc_param_data_fifo_size_default 2048 ++#define dwc_param_dev_rx_fifo_size_default 1024 ++#define dwc_param_dev_nperio_tx_fifo_size_default 1024 ++#define dwc_param_dev_perio_tx_fifo_size_default 768 ++#define dwc_param_host_rx_fifo_size_default 640 ++#define dwc_param_host_nperio_tx_fifo_size_default 640 ++#define dwc_param_host_perio_tx_fifo_size_default 768 ++#define dwc_param_max_transfer_size_default 65535 ++#define dwc_param_max_packet_count_default 511 ++#define dwc_param_host_channels_default 16 ++#define dwc_param_dev_endpoints_default 6 ++#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI ++#define dwc_param_phy_utmi_width_default 16 ++#define dwc_param_phy_ulpi_ddr_default 0 ++#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS ++#define dwc_param_i2c_enable_default 0 ++#define dwc_param_ulpi_fs_ls_default 0 ++#define dwc_param_ts_dline_default 0 ++ ++/* ======================================================= */ ++ ++#endif // __DWC_OTG_CIL_IFX_H__ ++ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c +@@ -0,0 +1,708 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil_intr.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 553126 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * This file contains the Common Interrupt handlers. ++ */ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++ ++#ifdef DEBUG ++inline const char *op_state_str( dwc_otg_core_if_t *_core_if ) ++{ ++ return (_core_if->op_state==A_HOST?"a_host": ++ (_core_if->op_state==A_SUSPEND?"a_suspend": ++ (_core_if->op_state==A_PERIPHERAL?"a_peripheral": ++ (_core_if->op_state==B_PERIPHERAL?"b_peripheral": ++ (_core_if->op_state==B_HOST?"b_host": ++ "unknown"))))); ++} ++#endif ++ ++/** This function will log a debug message ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *_core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", ++ dwc_otg_mode(_core_if) ? "Host" : "Device"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.modemismatch = 1; ++ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++ ++/** Start the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_start( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->hcd_cb && _core_if->hcd_cb->start) { ++ _core_if->hcd_cb->start( _core_if->hcd_cb->p ); ++ } ++} ++/** Stop the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_stop( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->hcd_cb && _core_if->hcd_cb->stop) { ++ _core_if->hcd_cb->stop( _core_if->hcd_cb->p ); ++ } ++} ++/** Disconnect the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_disconnect( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->hcd_cb && _core_if->hcd_cb->disconnect) { ++ _core_if->hcd_cb->disconnect( _core_if->hcd_cb->p ); ++ } ++} ++/** Inform the HCD the a New Session has begun. Helper function for ++ * using the HCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_session_start( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->hcd_cb && _core_if->hcd_cb->session_start) { ++ _core_if->hcd_cb->session_start( _core_if->hcd_cb->p ); ++ } ++} ++ ++/** Start the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_start( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->pcd_cb && _core_if->pcd_cb->start ) { ++ _core_if->pcd_cb->start( _core_if->pcd_cb->p ); ++ } ++} ++/** Stop the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_stop( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->pcd_cb && _core_if->pcd_cb->stop ) { ++ _core_if->pcd_cb->stop( _core_if->pcd_cb->p ); ++ } ++} ++/** Suspend the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_suspend( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->pcd_cb && _core_if->pcd_cb->suspend ) { ++ _core_if->pcd_cb->suspend( _core_if->pcd_cb->p ); ++ } ++} ++/** Resume the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_resume( dwc_otg_core_if_t *_core_if ) ++{ ++ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup ) { ++ _core_if->pcd_cb->resume_wakeup( _core_if->pcd_cb->p ); ++ } ++} ++ ++/** ++ * This function handles the OTG Interrupts. It reads the OTG ++ * Interrupt Register (GOTGINT) to determine what interrupt has ++ * occurred. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *_core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ _core_if->core_global_regs; ++ gotgint_data_t gotgint; ++ gotgctl_data_t gotgctl; ++ gintmsk_data_t gintmsk; ++ ++ gotgint.d32 = dwc_read_reg32( &global_regs->gotgint); ++ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32, ++ op_state_str(_core_if)); ++ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32 ); ++ ++ if (gotgint.b.sesenddet) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session End Detected++ (%s)\n", ++ op_state_str(_core_if)); ++ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl); ++ ++ if (_core_if->op_state == B_HOST) { ++ pcd_start( _core_if ); ++ _core_if->op_state = B_PERIPHERAL; ++ } else { ++ /* If not B_HOST and Device HNP still set. HNP ++ * Did not succeed!*/ ++ if (gotgctl.b.devhnpen) { ++ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); ++ DWC_ERROR( "Device Not Connected/Responding!\n" ); ++ } ++ ++ /* If Session End Detected the B-Cable has ++ * been disconnected. */ ++ /* Reset PCD and Gadget driver to a ++ * clean state. */ ++ pcd_stop(_core_if); ++ } ++ gotgctl.d32 = 0; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32( &global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ if (gotgint.b.sesreqsucstschng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session Reqeust Success Status Change++\n"); ++ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl); ++ if (gotgctl.b.sesreqscs) { ++ if ((_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (_core_if->core_params->i2c_enable)) { ++ _core_if->srp_success = 1; ++ } ++ else { ++ pcd_resume( _core_if ); ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32( &global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ } ++ } ++ if (gotgint.b.hstnegsucstschng) { ++ /* Print statements during the HNP interrupt handling ++ * can cause it to fail.*/ ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.hstnegscs) { ++ if (dwc_otg_is_host_mode(_core_if) ) { ++ _core_if->op_state = B_HOST; ++ /* ++ * Need to disable SOF interrupt immediately. ++ * When switching from device to host, the PCD ++ * interrupt handler won't handle the ++ * interrupt if host mode is already set. The ++ * HCD interrupt handler won't get called if ++ * the HCD state is HALT. This means that the ++ * interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ pcd_stop(_core_if); ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ hcd_start( _core_if ); ++ _core_if->op_state = B_HOST; ++ } ++ } else { ++ gotgctl.d32 = 0; ++ gotgctl.b.hnpreq = 1; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32( &global_regs->gotgctl, ++ gotgctl.d32, 0); ++ DWC_DEBUGPL( DBG_ANY, "HNP Failed\n"); ++ DWC_ERROR( "Device Not Connected/Responding\n" ); ++ } ++ } ++ if (gotgint.b.hstnegdet) { ++ /* The disconnect interrupt is set at the same time as ++ * Host Negotiation Detected. During the mode ++ * switch all interrupts are cleared so the disconnect ++ * interrupt handler will not get executed. ++ */ ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Host Negotiation Detected++ (%s)\n", ++ (dwc_otg_is_host_mode(_core_if)?"Host":"Device")); ++ if (dwc_otg_is_device_mode(_core_if)){ ++ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",_core_if->op_state); ++ hcd_disconnect( _core_if ); ++ pcd_start( _core_if ); ++ _core_if->op_state = A_PERIPHERAL; ++ } else { ++ /* ++ * Need to disable SOF interrupt immediately. When ++ * switching from device to host, the PCD interrupt ++ * handler won't handle the interrupt if host mode is ++ * already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that ++ * the interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ pcd_stop( _core_if ); ++ hcd_start( _core_if ); ++ _core_if->op_state = A_HOST; ++ } ++ } ++ if (gotgint.b.adevtoutchng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "A-Device Timeout Change++\n"); ++ } ++ if (gotgint.b.debdone) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Debounce Done++\n"); ++ } ++ ++ /* Clear GOTGINT */ ++ dwc_write_reg32 (&_core_if->core_global_regs->gotgint, gotgint.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function handles the Connector ID Status Change Interrupt. It ++ * reads the OTG Interrupt Register (GOTCTL) to determine whether this ++ * is a Device to Host Mode transition or a Host Mode to Device ++ * Transition. ++ * ++ * This only occurs when the cable is connected/removed from the PHY ++ * connector. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *_core_if) ++{ ++ uint32_t count = 0; ++ ++ gintsts_data_t gintsts = { .d32 = 0 }; ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ /* ++ * Need to disable SOF interrupt immediately. If switching from device ++ * to host, the PCD interrupt handler won't handle the interrupt if ++ * host mode is already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that the interrupt does ++ * not get handled and Linux complains loudly. ++ */ ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, gintmsk.d32, 0); ++ ++ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", ++ (dwc_otg_is_host_mode(_core_if)?"Host":"Device")); ++ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); ++ ++ /* B-Device connector (Device Mode) */ ++ if (gotgctl.b.conidsts) { ++ /* Wait for switch to device mode. */ ++ while (!dwc_otg_is_device_mode(_core_if) ){ ++ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(_core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ _core_if->op_state = B_PERIPHERAL; ++ dwc_otg_core_init(_core_if); ++ dwc_otg_enable_global_interrupts(_core_if); ++ pcd_start( _core_if ); ++ } else { ++ /* A-Device connector (Host Mode) */ ++ while (!dwc_otg_is_host_mode(_core_if) ) { ++ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(_core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ _core_if->op_state = A_HOST; ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ dwc_otg_core_init(_core_if); ++ dwc_otg_enable_global_interrupts(_core_if); ++ hcd_start( _core_if ); ++ } ++ ++ /* Set flag and clear interrupt */ ++ gintsts.b.conidstschng = 1; ++ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device is initiating the Session ++ * Request Protocol to request the host to turn on bus power so a new ++ * session can begin. The handler responds by turning on bus power. If ++ * the DWC_otg controller is in low power mode, the handler brings the ++ * controller out of low power mode before turning on bus power. ++ * ++ * @param _core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_session_req_intr( dwc_otg_core_if_t *_core_if ) ++{ ++#ifndef DWC_HOST_ONLY // winder ++ hprt0_data_t hprt0; ++#endif ++ gintsts_data_t gintsts; ++ ++#ifndef DWC_HOST_ONLY ++ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(_core_if) ) { ++ DWC_PRINT("SRP: Device mode\n"); ++ } else { ++ DWC_PRINT("SRP: Host mode\n"); ++ ++ /* Turn on the port power bit. */ ++ hprt0.d32 = dwc_otg_read_hprt0( _core_if ); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Start the Connection timer. So a message can be displayed ++ * if connect does not occur within 10 seconds. */ ++ hcd_session_start( _core_if ); ++ } ++#endif ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sessreqintr = 1; ++ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that the DWC_otg controller has detected a ++ * resume or remote wakeup sequence. If the DWC_otg controller is in ++ * low power mode, the handler must brings the controller out of low ++ * power mode. The controller automatically begins resume ++ * signaling. The handler schedules a time to stop resume signaling. ++ */ ++int32_t dwc_otg_handle_wakeup_detected_intr( dwc_otg_core_if_t *_core_if ) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(_core_if) ) { ++ dctl_data_t dctl = {.d32=0}; ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", ++ dwc_read_reg32( &_core_if->dev_if->dev_global_regs->dsts)); ++#ifdef PARTIAL_POWER_DOWN ++ if (_core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ ++ power.d32 = dwc_read_reg32( _core_if->pcgcctl ); ++ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); ++ ++ power.b.stoppclk = 0; ++ dwc_write_reg32( _core_if->pcgcctl, power.d32); ++ ++ power.b.pwrclmp = 0; ++ dwc_write_reg32( _core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 0; ++ dwc_write_reg32( _core_if->pcgcctl, power.d32); ++ } ++#endif ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32( &_core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0 ); ++ ++ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup) { ++ _core_if->pcd_cb->resume_wakeup( _core_if->pcd_cb->p ); ++ } ++ ++ } else { ++ /* ++ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms ++ * so that OPT tests pass with all PHYs). ++ */ ++ hprt0_data_t hprt0 = {.d32=0}; ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(_core_if->pcgcctl, pcgcctl.d32, 0); ++ UDELAY(10); ++ ++ /* Now wait for 70 ms. */ ++ hprt0.d32 = dwc_otg_read_hprt0( _core_if ); ++ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); ++ MDELAY(70); ++ hprt0.b.prtres = 0; /* Resume */ ++ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32); ++ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(_core_if->host_if->hprt0)); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.wkupintr = 1; ++ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device has been disconnected from ++ * the root port. ++ */ ++int32_t dwc_otg_handle_disconnect_intr( dwc_otg_core_if_t *_core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", ++ (dwc_otg_is_host_mode(_core_if)?"Host":"Device"), ++ op_state_str(_core_if)); ++ ++/** @todo Consolidate this if statement. */ ++#ifndef DWC_HOST_ONLY ++ if (_core_if->op_state == B_HOST) { ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect( _core_if ); ++ pcd_start( _core_if ); ++ _core_if->op_state = B_PERIPHERAL; ++ } else if (dwc_otg_is_device_mode(_core_if)) { ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl); ++ if (gotgctl.b.hstsethnpen==1) { ++ /* Do nothing, if HNP in process the OTG ++ * interrupt "Host Negotiation Detected" ++ * interrupt will do the mode switch. ++ */ ++ } else if (gotgctl.b.devhnpen == 0) { ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect( _core_if ); ++ pcd_start( _core_if ); ++ _core_if->op_state = B_PERIPHERAL; ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); ++ } ++ } else { ++ if (_core_if->op_state == A_HOST) { ++ /* A-Cable still connected but device disconnected. */ ++ hcd_disconnect( _core_if ); ++ } ++ } ++#endif ++/* Without OTG, we should use the disconnect function!? winder added.*/ ++#if 1 // NO OTG, so host only!! ++ hcd_disconnect( _core_if ); ++#endif ++ ++ gintsts.d32 = 0; ++ gintsts.b.disconnect = 1; ++ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++/** ++ * This interrupt indicates that SUSPEND state has been detected on ++ * the USB. ++ * ++ * For HNP the USB Suspend interrupt signals the change from ++ * "a_peripheral" to "a_host". ++ * ++ * When power management is enabled the core will be put in low power ++ * mode. ++ */ ++int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *_core_if ) ++{ ++ dsts_data_t dsts; ++ gintsts_data_t gintsts; ++ ++ //805141:<IFTW-fchang>.removed DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); ++ ++ if (dwc_otg_is_device_mode( _core_if ) ) { ++ /* Check the Device status register to determine if the Suspend ++ * state is active. */ ++ dsts.d32 = dwc_read_reg32( &_core_if->dev_if->dev_global_regs->dsts); ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); ++ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " ++ "HWCFG4.power Optimize=%d\n", ++ dsts.b.suspsts, _core_if->hwcfg4.b.power_optimiz); ++ ++ ++#ifdef PARTIAL_POWER_DOWN ++/** @todo Add a module parameter for power management. */ ++ ++ if (dsts.b.suspsts && _core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ DWC_DEBUGPL(DBG_CIL, "suspend\n"); ++ ++ power.b.pwrclmp = 1; ++ dwc_write_reg32( _core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 1; ++ dwc_modify_reg32( _core_if->pcgcctl, 0, power.d32); ++ ++ power.b.stoppclk = 1; ++ dwc_modify_reg32( _core_if->pcgcctl, 0, power.d32); ++ ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); ++ } ++#endif ++ /* PCD callback for suspend. */ ++ pcd_suspend(_core_if); ++ } else { ++ if (_core_if->op_state == A_PERIPHERAL) { ++ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); ++ /* Clear the a_peripheral flag, back to a_host. */ ++ pcd_stop( _core_if ); ++ hcd_start( _core_if ); ++ _core_if->op_state = A_HOST; ++ } ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbsuspend = 1; ++ dwc_write_reg32( &_core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *_core_if) ++{ ++ gintsts_data_t gintsts; ++ gintmsk_data_t gintmsk; ++ gintmsk_data_t gintmsk_common = {.d32=0}; ++ gintmsk_common.b.wkupintr = 1; ++ gintmsk_common.b.sessreqintr = 1; ++ gintmsk_common.b.conidstschng = 1; ++ gintmsk_common.b.otgintr = 1; ++ gintmsk_common.b.modemismatch = 1; ++ gintmsk_common.b.disconnect = 1; ++ gintmsk_common.b.usbsuspend = 1; ++ /** @todo: The port interrupt occurs while in device ++ * mode. Added code to CIL to clear the interrupt for now! ++ */ ++ gintmsk_common.b.portintr = 1; ++ ++ gintsts.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintsts); ++ gintmsk.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintmsk); ++#ifdef DEBUG ++ /* if any common interrupts set */ ++ if (gintsts.d32 & gintmsk_common.d32) { ++ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", ++ gintsts.d32, gintmsk.d32); ++ } ++#endif ++ ++ return ((gintsts.d32 & gintmsk.d32 ) & gintmsk_common.d32); ++ ++} ++ ++/** ++ * Common interrupt handler. ++ * ++ * The common interrupts are those that occur in both Host and Device mode. ++ * This handler handles the following interrupts: ++ * - Mode Mismatch Interrupt ++ * - Disconnect Interrupt ++ * - OTG Interrupt ++ * - Connector ID Status Change Interrupt ++ * - Session Request Interrupt. ++ * - Resume / Remote Wakeup Detected Interrupt. ++ * ++ */ ++extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ) ++{ ++ int retval = 0; ++ gintsts_data_t gintsts; ++ ++ gintsts.d32 = dwc_otg_read_common_intr(_core_if); ++ ++ if (gintsts.b.modemismatch) { ++ retval |= dwc_otg_handle_mode_mismatch_intr( _core_if ); ++ } ++ if (gintsts.b.otgintr) { ++ retval |= dwc_otg_handle_otg_intr( _core_if ); ++ } ++ if (gintsts.b.conidstschng) { ++ retval |= dwc_otg_handle_conn_id_status_change_intr( _core_if ); ++ } ++ if (gintsts.b.disconnect) { ++ retval |= dwc_otg_handle_disconnect_intr( _core_if ); ++ } ++ if (gintsts.b.sessreqintr) { ++ retval |= dwc_otg_handle_session_req_intr( _core_if ); ++ } ++ if (gintsts.b.wkupintr) { ++ retval |= dwc_otg_handle_wakeup_detected_intr( _core_if ); ++ } ++ if (gintsts.b.usbsuspend) { ++ retval |= dwc_otg_handle_usb_suspend_intr( _core_if ); ++ } ++ if (gintsts.b.portintr && dwc_otg_is_device_mode(_core_if)) { ++ /* The port interrupt occurs while in device mode with HPRT0 ++ * Port Enable/Disable. ++ */ ++ gintsts.d32 = 0; ++ gintsts.b.portintr = 1; ++ dwc_write_reg32(&_core_if->core_global_regs->gintsts, ++ gintsts.d32); ++ retval |= 1; ++ ++ } ++ return retval; ++} +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_driver.c +@@ -0,0 +1,1264 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 631780 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * The dwc_otg_driver module provides the initialization and cleanup entry ++ * points for the DWC_otg driver. This module will be dynamically installed ++ * after Linux is booted using the insmod command. When the module is ++ * installed, the dwc_otg_init function is called. When the module is ++ * removed (using rmmod), the dwc_otg_cleanup function is called. ++ * ++ * This module also defines a data structure for the dwc_otg_driver, which is ++ * used in conjunction with the standard ARM lm_device structure. These ++ * structures allow the OTG driver to comply with the standard Linux driver ++ * model in which devices and drivers are registered with a bus driver. This ++ * has the benefit that Linux can expose attributes of the driver and device ++ * in its special sysfs file system. Users can then read or write files in ++ * this file system to perform diagnostics on the driver components or the ++ * device. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/init.h> ++ ++#include <linux/device.h> ++#include <linux/platform_device.h> ++ ++#include <linux/errno.h> ++#include <linux/types.h> ++#include <linux/stat.h> /* permission constants */ ++#include <linux/irq.h> ++#include <asm/io.h> ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++#include "dwc_otg_cil.h" ++#include "dwc_otg_cil_ifx.h" ++ ++// #include "dwc_otg_pcd.h" // device ++#include "dwc_otg_hcd.h" // host ++ ++#include "dwc_otg_ifx.h" // for Infineon platform specific. ++ ++#define DWC_DRIVER_VERSION "2.60a 22-NOV-2006" ++#define DWC_DRIVER_DESC "HS OTG USB Controller driver" ++ ++const char dwc_driver_name[] = "dwc_otg"; ++ ++static unsigned long dwc_iomem_base = IFX_USB_IOMEM_BASE; ++int dwc_irq = LQ_USB_INT; ++//int dwc_irq = 54; ++//int dwc_irq = IFXMIPS_USB_OC_INT; ++ ++extern int ifx_usb_hc_init(unsigned long base_addr, int irq); ++extern void ifx_usb_hc_remove(void); ++ ++/*-------------------------------------------------------------------------*/ ++/* Encapsulate the module parameter settings */ ++ ++static dwc_otg_core_params_t dwc_otg_module_params = { ++ .opt = -1, ++ .otg_cap = -1, ++ .dma_enable = -1, ++ .dma_burst_size = -1, ++ .speed = -1, ++ .host_support_fs_ls_low_power = -1, ++ .host_ls_low_power_phy_clk = -1, ++ .enable_dynamic_fifo = -1, ++ .data_fifo_size = -1, ++ .dev_rx_fifo_size = -1, ++ .dev_nperio_tx_fifo_size = -1, ++ .dev_perio_tx_fifo_size = /* dev_perio_tx_fifo_size_1 */ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, /* 15 */ ++ .host_rx_fifo_size = -1, ++ .host_nperio_tx_fifo_size = -1, ++ .host_perio_tx_fifo_size = -1, ++ .max_transfer_size = -1, ++ .max_packet_count = -1, ++ .host_channels = -1, ++ .dev_endpoints = -1, ++ .phy_type = -1, ++ .phy_utmi_width = -1, ++ .phy_ulpi_ddr = -1, ++ .phy_ulpi_ext_vbus = -1, ++ .i2c_enable = -1, ++ .ulpi_fs_ls = -1, ++ .ts_dline = -1, ++ .en_multiple_tx_fifo = -1, ++ .dev_tx_fifo_size = { /* dev_tx_fifo_size */ ++ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ++ }, /* 15 */ ++ .thr_ctl = -1, ++ .tx_thr_length = -1, ++ .rx_thr_length = -1, ++}; ++ ++/** ++ * This function shows the Driver Version. ++ */ ++static ssize_t version_show(struct device_driver *dev, char *buf) ++{ ++ return snprintf(buf, sizeof(DWC_DRIVER_VERSION)+2,"%s\n", ++ DWC_DRIVER_VERSION); ++} ++static DRIVER_ATTR(version, S_IRUGO, version_show, NULL); ++ ++/** ++ * Global Debug Level Mask. ++ */ ++uint32_t g_dbg_lvl = 0xff; /* OFF */ ++ ++/** ++ * This function shows the driver Debug Level. ++ */ ++static ssize_t dbg_level_show(struct device_driver *_drv, char *_buf) ++{ ++ return sprintf(_buf, "0x%0x\n", g_dbg_lvl); ++} ++/** ++ * This function stores the driver Debug Level. ++ */ ++static ssize_t dbg_level_store(struct device_driver *_drv, const char *_buf, ++ size_t _count) ++{ ++ g_dbg_lvl = simple_strtoul(_buf, NULL, 16); ++ return _count; ++} ++static DRIVER_ATTR(debuglevel, S_IRUGO|S_IWUSR, dbg_level_show, dbg_level_store); ++ ++/** ++ * This function is called during module intialization to verify that ++ * the module parameters are in a valid state. ++ */ ++static int check_parameters(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ int retval = 0; ++ ++/* Checks if the parameter is outside of its valid range of values */ ++#define DWC_OTG_PARAM_TEST(_param_,_low_,_high_) \ ++ ((dwc_otg_module_params._param_ < (_low_)) || \ ++ (dwc_otg_module_params._param_ > (_high_))) ++ ++/* If the parameter has been set by the user, check that the parameter value is ++ * within the value range of values. If not, report a module error. */ ++#define DWC_OTG_PARAM_ERR(_param_,_low_,_high_,_string_) \ ++ do { \ ++ if (dwc_otg_module_params._param_ != -1) { \ ++ if (DWC_OTG_PARAM_TEST(_param_,(_low_),(_high_))) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ ++ dwc_otg_module_params._param_, _string_); \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ retval ++; \ ++ } \ ++ } \ ++ } while (0) ++ ++ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); ++ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); ++ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); ++ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); ++ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); ++ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); ++ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); ++ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); ++ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); ++ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); ++ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); ++ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); ++ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); ++ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); ++ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); ++ ++ if (dwc_otg_module_params.dma_burst_size != -1) { ++ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) ++ { ++ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", ++ dwc_otg_module_params.dma_burst_size); ++ dwc_otg_module_params.dma_burst_size = 32; ++ retval ++; ++ } ++ } ++ ++ if (dwc_otg_module_params.phy_utmi_width != -1) { ++ if (DWC_OTG_PARAM_TEST(phy_utmi_width,8,8) && ++ DWC_OTG_PARAM_TEST(phy_utmi_width,16,16)) ++ { ++ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", ++ dwc_otg_module_params.phy_utmi_width); ++ //dwc_otg_module_params.phy_utmi_width = 16; ++ dwc_otg_module_params.phy_utmi_width = 8; ++ retval ++; ++ } ++ } ++ ++ for (i=0; i<15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i],4,768,"dev_perio_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i],4,768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ retval ++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i],4,768,"dev_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_tx_fifo_size[i], ++ "dev_tx_fifo_size", i); ++ dwc_otg_module_params.dev_tx_fifo_size[i] = ++ dwc_param_dev_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); ++ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); ++ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); ++ ++ /* At this point, all module parameters that have been set by the user ++ * are valid, and those that have not are left unset. Now set their ++ * default values and/or check the parameters against the hardware ++ * configurations of the OTG core. */ ++ ++ ++ ++/* This sets the parameter to the default value if it has not been set by the ++ * user */ ++#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ ++ ({ \ ++ int changed = 1; \ ++ if (dwc_otg_module_params._param_ == -1) { \ ++ changed = 0; \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ } \ ++ changed; \ ++ }) ++ ++/* This checks the macro agains the hardware configuration to see if it is ++ * valid. It is possible that the default value could be invalid. In this ++ * case, it will report a module error if the user touched the parameter. ++ * Otherwise it will adjust the value without any error. */ ++#define DWC_OTG_PARAM_CHECK_VALID(_param_,_str_,_is_valid_,_set_valid_) \ ++ ({ \ ++ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ ++ int error = 0; \ ++ if (!(_is_valid_)) { \ ++ if (changed) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_,_str_); \ ++ error = 1; \ ++ } \ ++ dwc_otg_module_params._param_ = (_set_valid_); \ ++ } \ ++ error; \ ++ }) ++ ++ /* OTG Cap */ ++ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap,"otg_cap", ++ ({ ++ int valid; ++ valid = 1; ++ switch (dwc_otg_module_params.otg_cap) { ++ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: ++ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) valid = 0; ++ break; ++ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: ++ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ++ { ++ valid = 0; ++ } ++ break; ++ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: ++ /* always valid */ ++ break; ++ } ++ valid; ++ }), ++ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? ++ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable,"dma_enable", ++ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(opt,"opt", ++ 1, ++ 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, ++ "host_support_fs_ls_low_power", ++ 1, 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, ++ "enable_dynamic_fifo", ++ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || ++ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); ++ ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, ++ "data_fifo_size", ++ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), ++ core_if->hwcfg3.b.dfifo_depth); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, ++ "dev_rx_fifo_size", ++ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, ++ "dev_nperio_tx_fifo_size", ++ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, ++ "host_rx_fifo_size", ++ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, ++ "host_nperio_tx_fifo_size", ++ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, ++ "host_perio_tx_fifo_size", ++ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), ++ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, ++ "max_transfer_size", ++ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), ++ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, ++ "max_packet_count", ++ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), ++ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, ++ "host_channels", ++ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), ++ (core_if->hwcfg2.b.num_host_chan + 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, ++ "dev_endpoints", ++ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), ++ core_if->hwcfg2.b.num_dev_ep); ++ ++/* ++ * Define the following to disable the FS PHY Hardware checking. This is for ++ * internal testing only. ++ * ++ * #define NO_FS_PHY_HW_CHECKS ++ */ ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", ++ ({ ++ int valid = 0; ++ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 1) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) ++ { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 2) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) ++ { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->hwcfg2.b.fs_phy_type == 1)) ++ { ++ valid = 1; ++ } ++ valid; ++ }), ++ ({ ++ int set = DWC_PHY_TYPE_PARAM_FS; ++ if (core_if->hwcfg2.b.hs_phy_type) { ++ if ((core_if->hwcfg2.b.hs_phy_type == 3) || ++ (core_if->hwcfg2.b.hs_phy_type == 1)) { ++ set = DWC_PHY_TYPE_PARAM_UTMI; ++ } ++ else { ++ set = DWC_PHY_TYPE_PARAM_ULPI; ++ } ++ } ++ set; ++ })); ++#endif ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(speed,"speed", ++ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, ++ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, ++ "host_ls_low_power_phy_clk", ++ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), ++ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); ++ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); ++ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, ++ "i2c_enable", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, ++ "i2c_enable", ++ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, ++ 0); ++#endif ++ ++ for (i=0; i<16; i++) { ++ ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i],i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, ++ "en_multiple_tx_fifo", ++ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && ++ (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, 0); ++ ++ for (i = 0; i < 16; i++) { ++ int changed = 1; ++ int error = 0; ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_tx_fifo_size[i] = ++ dwc_param_dev_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= ++ (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'." ++ "Check HW configuration.\n",dwc_otg_module_params.dev_tx_fifo_size[i],i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_tx_fifo_size[i] = ++ dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ DWC_OTG_PARAM_SET_DEFAULT(thr_ctl); ++ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); ++ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); ++ return retval; ++} // check_parameters ++ ++ ++/** ++ * This function is the top level interrupt handler for the Common ++ * (Device and host modes) interrupts. ++ */ ++static irqreturn_t dwc_otg_common_irq(int _irq, void *_dev) ++{ ++ dwc_otg_device_t *otg_dev = _dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_handle_common_intr( otg_dev->core_if ); ++ ++ mask_and_ack_ifx_irq (_irq); ++ ++ return IRQ_RETVAL(retval); ++} ++ ++ ++/** ++ * This function is called when a DWC_OTG device is unregistered with the ++ * dwc_otg_driver. This happens, for example, when the rmmod command is ++ * executed. The device may or may not be electrically present. If it is ++ * present, the driver stops device processing. Any resources used on behalf ++ * of this device are freed. ++ * ++ * @return ++ */ ++static int ++dwc_otg_driver_remove(struct platform_device *_dev) ++{ ++ //dwc_otg_device_t *otg_dev = dev_get_drvdata(&_dev->dev); ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(_dev); ++ ++ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, _dev); ++ ++ if (otg_dev == NULL) { ++ /* Memory allocation for the dwc_otg_device failed. */ ++ return 0; ++ } ++ ++ /* ++ * Free the IRQ ++ */ ++ if (otg_dev->common_irq_installed) { ++ free_irq( otg_dev->irq, otg_dev ); ++ } ++ ++#ifndef DWC_DEVICE_ONLY ++ if (otg_dev->hcd != NULL) { ++ dwc_otg_hcd_remove(&_dev->dev); ++ } ++#endif ++ printk("after removehcd\n"); ++ ++// Note: Integrate HOST and DEVICE(Gadget) is not planned yet. ++#ifndef DWC_HOST_ONLY ++ if (otg_dev->pcd != NULL) { ++ dwc_otg_pcd_remove(otg_dev); ++ } ++#endif ++ if (otg_dev->core_if != NULL) { ++ dwc_otg_cil_remove( otg_dev->core_if ); ++ } ++ printk("after removecil\n"); ++ ++ /* ++ * Remove the device attributes ++ */ ++ dwc_otg_attr_remove(&_dev->dev); ++ printk("after removeattr\n"); ++ ++ /* ++ * Return the memory. ++ */ ++ if (otg_dev->base != NULL) { ++ iounmap(otg_dev->base); ++ } ++ if (otg_dev->phys_addr != 0) { ++ release_mem_region(otg_dev->phys_addr, otg_dev->base_len); ++ } ++ kfree(otg_dev); ++ ++ /* ++ * Clear the drvdata pointer. ++ */ ++ //dev_set_drvdata(&_dev->dev, 0); ++ platform_set_drvdata(_dev, 0); ++ return 0; ++} ++ ++/** ++ * This function is called when an DWC_OTG device is bound to a ++ * dwc_otg_driver. It creates the driver components required to ++ * control the device (CIL, HCD, and PCD) and it initializes the ++ * device. The driver components are stored in a dwc_otg_device ++ * structure. A reference to the dwc_otg_device is saved in the ++ * lm_device. This allows the driver to access the dwc_otg_device ++ * structure on subsequent calls to driver methods for this device. ++ * ++ * @return ++ */ ++static int __devinit ++dwc_otg_driver_probe(struct platform_device *_dev) ++{ ++ int retval = 0; ++ dwc_otg_device_t *dwc_otg_device; ++ int32_t snpsid; ++ struct resource *res; ++ gusbcfg_data_t usbcfg = {.d32 = 0}; ++ ++ dev_dbg(&_dev->dev, "dwc_otg_driver_probe (%p)\n", _dev); ++ ++ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); ++ if (dwc_otg_device == 0) { ++ dev_err(&_dev->dev, "kmalloc of dwc_otg_device failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device)); ++ dwc_otg_device->reg_offset = 0xFFFFFFFF; ++ ++ /* ++ * Retrieve the memory and IRQ resources. ++ */ ++ dwc_otg_device->irq = platform_get_irq(_dev, 0); ++ if (dwc_otg_device->irq == 0) { ++ dev_err(&_dev->dev, "no device irq\n"); ++ retval = -ENODEV; ++ goto fail; ++ } ++ dev_dbg(&_dev->dev, "OTG - device irq: %d\n", dwc_otg_device->irq); ++ res = platform_get_resource(_dev, IORESOURCE_MEM, 0); ++ if (res == NULL) { ++ dev_err(&_dev->dev, "no CSR address\n"); ++ retval = -ENODEV; ++ goto fail; ++ } ++ dev_dbg(&_dev->dev, "OTG - ioresource_mem start0x%08x: end:0x%08x\n", ++ (unsigned)res->start, (unsigned)res->end); ++ dwc_otg_device->phys_addr = res->start; ++ dwc_otg_device->base_len = res->end - res->start + 1; ++ if (request_mem_region(dwc_otg_device->phys_addr, dwc_otg_device->base_len, ++ dwc_driver_name) == NULL) { ++ dev_err(&_dev->dev, "request_mem_region failed\n"); ++ retval = -EBUSY; ++ goto fail; ++ } ++ ++ /* ++ * Map the DWC_otg Core memory into virtual address space. ++ */ ++ dwc_otg_device->base = ioremap_nocache(dwc_otg_device->phys_addr, dwc_otg_device->base_len); ++ if (dwc_otg_device->base == NULL) { ++ dev_err(&_dev->dev, "ioremap() failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ dev_dbg(&_dev->dev, "mapped base=0x%08x\n", (unsigned)dwc_otg_device->base); ++ ++ /* ++ * Attempt to ensure this device is really a DWC_otg Controller. ++ * Read and verify the SNPSID register contents. The value should be ++ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX". ++ */ ++ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)dwc_otg_device->base + 0x40)); ++ if ((snpsid & 0xFFFFF000) != 0x4F542000) { ++ dev_err(&_dev->dev, "Bad value for SNPSID: 0x%08x\n", snpsid); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ /* ++ * Initialize driver data to point to the global DWC_otg ++ * Device structure. ++ */ ++ platform_set_drvdata(_dev, dwc_otg_device); ++ dev_dbg(&_dev->dev, "dwc_otg_device=0x%p\n", dwc_otg_device); ++ dwc_otg_device->core_if = dwc_otg_cil_init( dwc_otg_device->base, &dwc_otg_module_params); ++ if (dwc_otg_device->core_if == 0) { ++ dev_err(&_dev->dev, "CIL initialization failed!\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ /* ++ * Validate parameter values. ++ */ ++ if (check_parameters(dwc_otg_device->core_if) != 0) { ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ /* Added for PLB DMA phys virt mapping */ ++ //dwc_otg_device->core_if->phys_addr = dwc_otg_device->phys_addr; ++ /* ++ * Create Device Attributes in sysfs ++ */ ++ dwc_otg_attr_create (&_dev->dev); ++ ++ /* ++ * Disable the global interrupt until all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_disable_global_interrupts( dwc_otg_device->core_if ); ++ /* ++ * Install the interrupt handler for the common interrupts before ++ * enabling common interrupts in core_init below. ++ */ ++ DWC_DEBUGPL( DBG_CIL, "registering (common) handler for irq%d\n", dwc_otg_device->irq); ++ ++ retval = request_irq((unsigned int)dwc_otg_device->irq, dwc_otg_common_irq, ++ //SA_INTERRUPT|SA_SHIRQ, "dwc_otg", (void *)dwc_otg_device ); ++ IRQF_SHARED, "dwc_otg", (void *)dwc_otg_device ); ++ //IRQF_DISABLED, "dwc_otg", (void *)dwc_otg_device ); ++ if (retval != 0) { ++ DWC_ERROR("request of irq%d failed retval: %d\n", dwc_otg_device->irq, retval); ++ retval = -EBUSY; ++ goto fail; ++ } else { ++ dwc_otg_device->common_irq_installed = 1; ++ } ++ ++ /* ++ * Initialize the DWC_otg core. ++ */ ++ dwc_otg_core_init( dwc_otg_device->core_if ); ++ ++ ++#ifndef DWC_HOST_ONLY // otg device mode. (gadget.) ++ /* ++ * Initialize the PCD ++ */ ++ retval = dwc_otg_pcd_init(dwc_otg_device); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_pcd_init failed\n"); ++ dwc_otg_device->pcd = NULL; ++ goto fail; ++ } ++#endif // DWC_HOST_ONLY ++ ++#ifndef DWC_DEVICE_ONLY // otg host mode. (HCD) ++ /* ++ * Initialize the HCD ++ */ ++#if 1 /*fscz*/ ++ /* force_host_mode */ ++ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg); ++ usbcfg.b.force_host_mode = 1; ++ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32); ++#endif ++ retval = dwc_otg_hcd_init(&_dev->dev, dwc_otg_device); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_hcd_init failed\n"); ++ dwc_otg_device->hcd = NULL; ++ goto fail; ++ } ++#endif // DWC_DEVICE_ONLY ++ ++ /* ++ * Enable the global interrupt after all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_enable_global_interrupts( dwc_otg_device->core_if ); ++#if 0 /*fscz*/ ++ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg); ++ usbcfg.b.force_host_mode = 0; ++ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32); ++#endif ++ ++ ++ return 0; ++ ++fail: ++ dwc_otg_driver_remove(_dev); ++ return retval; ++} ++ ++/** ++ * This structure defines the methods to be called by a bus driver ++ * during the lifecycle of a device on that bus. Both drivers and ++ * devices are registered with a bus driver. The bus driver matches ++ * devices to drivers based on information in the device and driver ++ * structures. ++ * ++ * The probe function is called when the bus driver matches a device ++ * to this driver. The remove function is called when a device is ++ * unregistered with the bus driver. ++ */ ++struct platform_driver dwc_otg_driver = { ++ .probe = dwc_otg_driver_probe, ++ .remove = dwc_otg_driver_remove, ++// .suspend = dwc_otg_driver_suspend, ++// .resume = dwc_otg_driver_resume, ++ .driver = { ++ .name = dwc_driver_name, ++ .owner = THIS_MODULE, ++ }, ++}; ++EXPORT_SYMBOL(dwc_otg_driver); ++ ++/** ++ * This function is called when the dwc_otg_driver is installed with the ++ * insmod command. It registers the dwc_otg_driver structure with the ++ * appropriate bus driver. This will cause the dwc_otg_driver_probe function ++ * to be called. In addition, the bus driver will automatically expose ++ * attributes defined for the device and driver in the special sysfs file ++ * system. ++ * ++ * @return ++ */ ++static int __init dwc_otg_init(void) ++{ ++ int retval = 0; ++ ++ printk(KERN_INFO "%s: version %s\n", dwc_driver_name, DWC_DRIVER_VERSION); ++ ++ // ifxmips setup ++ retval = ifx_usb_hc_init(dwc_iomem_base, dwc_irq); ++ if (retval < 0) ++ { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ return retval; ++ } ++ dwc_otg_power_on(); // ifx only!! ++ ++ ++ retval = platform_driver_register(&dwc_otg_driver); ++ ++ if (retval < 0) { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ goto error1; ++ } ++ ++ retval = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version); ++ if (retval < 0) ++ { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ goto error2; ++ } ++ retval = driver_create_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); ++ if (retval < 0) ++ { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ goto error3; ++ } ++ return retval; ++ ++ ++error3: ++ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version); ++error2: ++ driver_unregister(&dwc_otg_driver.driver); ++error1: ++ ifx_usb_hc_remove(); ++ return retval; ++} ++module_init(dwc_otg_init); ++ ++/** ++ * This function is called when the driver is removed from the kernel ++ * with the rmmod command. The driver unregisters itself with its bus ++ * driver. ++ * ++ */ ++static void __exit dwc_otg_cleanup(void) ++{ ++ printk(KERN_DEBUG "dwc_otg_cleanup()\n"); ++ ++ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); ++ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version); ++ ++ platform_driver_unregister(&dwc_otg_driver); ++ ifx_usb_hc_remove(); ++ ++ printk(KERN_INFO "%s module removed\n", dwc_driver_name); ++} ++module_exit(dwc_otg_cleanup); ++ ++MODULE_DESCRIPTION(DWC_DRIVER_DESC); ++MODULE_AUTHOR("Synopsys Inc."); ++MODULE_LICENSE("GPL"); ++ ++module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); ++MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); ++module_param_named(opt, dwc_otg_module_params.opt, int, 0444); ++MODULE_PARM_DESC(opt, "OPT Mode"); ++module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); ++MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); ++module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); ++MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); ++module_param_named(speed, dwc_otg_module_params.speed, int, 0444); ++MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); ++module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); ++MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); ++module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); ++MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); ++module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); ++MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); ++module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); ++MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); ++module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); ++module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); ++/** @todo Set the max to 512K, modify checks */ ++MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); ++module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); ++MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); ++module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); ++MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); ++module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); ++MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); ++module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); ++MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); ++module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); ++MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); ++module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); ++module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); ++module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); ++MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); ++module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); ++MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); ++module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); ++MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); ++module_param_named(debug, g_dbg_lvl, int, 0444); ++MODULE_PARM_DESC(debug, "0"); ++module_param_named(en_multiple_tx_fifo, ++ dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); ++MODULE_PARM_DESC(en_multiple_tx_fifo, ++ "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); ++module_param_named(dev_tx_fifo_size_1, ++ dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_2, ++ dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_3, ++ dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_4, ++ dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_5, ++ dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_6, ++ dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_7, ++ dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_8, ++ dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_9, ++ dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_10, ++ dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_11, ++ dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_12, ++ dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_13, ++ dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_14, ++ dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_15, ++ dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); ++module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); ++MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit" ++ "0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); ++module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); ++MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); ++module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); ++MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); ++module_param_named (iomem_base, dwc_iomem_base, ulong, 0444); ++MODULE_PARM_DESC (dwc_iomem_base, "The base address of the DWC_OTG register."); ++module_param_named (irq, dwc_irq, int, 0444); ++MODULE_PARM_DESC (dwc_irq, "The interrupt number"); ++ ++/** @page "Module Parameters" ++ * ++ * The following parameters may be specified when starting the module. ++ * These parameters define how the DWC_otg controller should be ++ * configured. Parameter values are passed to the CIL initialization ++ * function dwc_otg_cil_init ++ * ++ * Example: <code>modprobe dwc_otg speed=1 otg_cap=1</code> ++ * ++ ++ <table> ++ <tr><td>Parameter Name</td><td>Meaning</td></tr> ++ ++ <tr> ++ <td>otg_cap</td> ++ <td>Specifies the OTG capabilities. The driver will automatically detect the ++ value for this parameter if none is specified. ++ - 0: HNP and SRP capable (default, if available) ++ - 1: SRP Only capable ++ - 2: No HNP/SRP capable ++ </td></tr> ++ ++ <tr> ++ <td>dma_enable</td> ++ <td>Specifies whether to use slave or DMA mode for accessing the data FIFOs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Slave ++ - 1: DMA (default, if available) ++ </td></tr> ++ ++ <tr> ++ <td>dma_burst_size</td> ++ <td>The DMA Burst size (applicable only for External DMA Mode). ++ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++ </td></tr> ++ ++ <tr> ++ <td>speed</td> ++ <td>Specifies the maximum speed of operation in host and device mode. The ++ actual speed depends on the speed of the attached device and the value of ++ phy_type. ++ - 0: High Speed (default) ++ - 1: Full Speed ++ </td></tr> ++ ++ <tr> ++ <td>host_support_fs_ls_low_power</td> ++ <td>Specifies whether low power mode is supported when attached to a Full ++ Speed or Low Speed device in host mode. ++ - 0: Don't support low power mode (default) ++ - 1: Support low power mode ++ </td></tr> ++ ++ <tr> ++ <td>host_ls_low_power_phy_clk</td> ++ <td>Specifies the PHY clock rate in low power mode when connected to a Low ++ Speed device in host mode. This parameter is applicable only if ++ HOST_SUPPORT_FS_LS_LOW_POWER is enabled. ++ - 0: 48 MHz (default) ++ - 1: 6 MHz ++ </td></tr> ++ ++ <tr> ++ <td>enable_dynamic_fifo</td> ++ <td> Specifies whether FIFOs may be resized by the driver software. ++ - 0: Use cC FIFO size parameters ++ - 1: Allow dynamic FIFO sizing (default) ++ </td></tr> ++ ++ <tr> ++ <td>data_fifo_size</td> ++ <td>Total number of 4-byte words in the data FIFO memory. This memory ++ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs. ++ - Values: 32 to 32768 (default 8192) ++ ++ Note: The total FIFO memory depth in the FPGA configuration is 8192. ++ </td></tr> ++ ++ <tr> ++ <td>dev_rx_fifo_size</td> ++ <td>Number of 4-byte words in the Rx FIFO in device mode when dynamic ++ FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1064) ++ </td></tr> ++ ++ <tr> ++ <td>dev_nperio_tx_fifo_size</td> ++ <td>Number of 4-byte words in the non-periodic Tx FIFO in device mode when ++ dynamic FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++ </td></tr> ++ ++ <tr> ++ <td>dev_perio_tx_fifo_size_n (n = 1 to 15)</td> ++ <td>Number of 4-byte words in each of the periodic Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++ </td></tr> ++ ++ <tr> ++ <td>host_rx_fifo_size</td> ++ <td>Number of 4-byte words in the Rx FIFO in host mode when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++ </td></tr> ++ ++ <tr> ++ <td>host_nperio_tx_fifo_size</td> ++ <td>Number of 4-byte words in the non-periodic Tx FIFO in host mode when ++ dynamic FIFO sizing is enabled in the core. ++ - Values: 16 to 32768 (default 1024) ++ </td></tr> ++ ++ <tr> ++ <td>host_perio_tx_fifo_size</td> ++ <td>Number of 4-byte words in the host periodic Tx FIFO when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++ </td></tr> ++ ++ <tr> ++ <td>max_transfer_size</td> ++ <td>The maximum transfer size supported in bytes. ++ - Values: 2047 to 65,535 (default 65,535) ++ </td></tr> ++ ++ <tr> ++ <td>max_packet_count</td> ++ <td>The maximum number of packets in a transfer. ++ - Values: 15 to 511 (default 511) ++ </td></tr> ++ ++ <tr> ++ <td>host_channels</td> ++ <td>The number of host channel registers to use. ++ - Values: 1 to 16 (default 12) ++ ++ Note: The FPGA configuration supports a maximum of 12 host channels. ++ </td></tr> ++ ++ <tr> ++ <td>dev_endpoints</td> ++ <td>The number of endpoints in addition to EP0 available for device mode ++ operations. ++ - Values: 1 to 15 (default 6 IN and OUT) ++ ++ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in ++ addition to EP0. ++ </td></tr> ++ ++ <tr> ++ <td>phy_type</td> ++ <td>Specifies the type of PHY interface to use. By default, the driver will ++ automatically detect the phy_type. ++ - 0: Full Speed ++ - 1: UTMI+ (default, if available) ++ - 2: ULPI ++ </td></tr> ++ ++ <tr> ++ <td>phy_utmi_width</td> ++ <td>Specifies the UTMI+ Data Width. This parameter is applicable for a ++ phy_type of UTMI+. Also, this parameter is applicable only if the ++ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the ++ core has been configured to work at either data path width. ++ - Values: 8 or 16 bits (default 16) ++ </td></tr> ++ ++ <tr> ++ <td>phy_ulpi_ddr</td> ++ <td>Specifies whether the ULPI operates at double or single data rate. This ++ parameter is only applicable if phy_type is ULPI. ++ - 0: single data rate ULPI interface with 8 bit wide data bus (default) ++ - 1: double data rate ULPI interface with 4 bit wide data bus ++ </td></tr> ++ ++ <tr> ++ <td>i2c_enable</td> ++ <td>Specifies whether to use the I2C interface for full speed PHY. This ++ parameter is only applicable if PHY_TYPE is FS. ++ - 0: Disabled (default) ++ - 1: Enabled ++ </td></tr> ++ ++ <tr> ++ <td>otg_en_multiple_tx_fifo</td> ++ <td>Specifies whether dedicatedto tx fifos are enabled for non periodic IN EPs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Disabled ++ - 1: Enabled (default, if available) ++ </td></tr> ++ ++ <tr> ++ <td>dev_tx_fifo_size_n (n = 1 to 15)</td> ++ <td>Number of 4-byte words in each of the Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++ </td></tr> ++ ++*/ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_driver.h +@@ -0,0 +1,84 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 510275 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_DRIVER_H__) ++#define __DWC_OTG_DRIVER_H__ ++ ++/** @file ++ * This file contains the interface to the Linux driver. ++ */ ++#include "dwc_otg_cil.h" ++ ++/* Type declarations */ ++struct dwc_otg_pcd; ++struct dwc_otg_hcd; ++ ++/** ++ * This structure is a wrapper that encapsulates the driver components used to ++ * manage a single DWC_otg controller. ++ */ ++typedef struct dwc_otg_device ++{ ++ /** Base address returned from ioremap() */ ++ void *base; ++ ++ /** Pointer to the core interface structure. */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Register offset for Diagnostic API.*/ ++ uint32_t reg_offset; ++ ++ /** Pointer to the PCD structure. */ ++ struct dwc_otg_pcd *pcd; ++ ++ /** Pointer to the HCD structure. */ ++ struct dwc_otg_hcd *hcd; ++ ++ /** Flag to indicate whether the common IRQ handler is installed. */ ++ uint8_t common_irq_installed; ++ ++ /** Interrupt request number. */ ++ unsigned int irq; ++ ++ /** Physical address of Control and Status registers, used by ++ * release_mem_region(). ++ */ ++ resource_size_t phys_addr; ++ ++ /** Length of memory region, used by release_mem_region(). */ ++ unsigned long base_len; ++} dwc_otg_device_t; ++ ++//#define dev_dbg(fake, format, arg...) printk(KERN_CRIT __FILE__ ":%d: " format "\n" , __LINE__, ## arg) ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd.c +@@ -0,0 +1,2870 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 631780 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the implementation of the HCD. In Linux, the HCD ++ * implements the hc_driver API. ++ */ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/init.h> ++ ++#include <linux/device.h> ++ ++#include <linux/errno.h> ++#include <linux/list.h> ++#include <linux/interrupt.h> ++#include <linux/string.h> ++ ++#include <linux/dma-mapping.h> ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++#include <asm/irq.h> ++#include "dwc_otg_ifx.h" // for Infineon platform specific. ++extern atomic_t release_later; ++ ++static u64 dma_mask = DMA_BIT_MASK(32); ++ ++static const char dwc_otg_hcd_name [] = "dwc_otg_hcd"; ++static const struct hc_driver dwc_otg_hc_driver = ++{ ++ .description = dwc_otg_hcd_name, ++ .product_desc = "DWC OTG Controller", ++ .hcd_priv_size = sizeof(dwc_otg_hcd_t), ++ .irq = dwc_otg_hcd_irq, ++ .flags = HCD_MEMORY | HCD_USB2, ++ //.reset = ++ .start = dwc_otg_hcd_start, ++ //.suspend = ++ //.resume = ++ .stop = dwc_otg_hcd_stop, ++ .urb_enqueue = dwc_otg_hcd_urb_enqueue, ++ .urb_dequeue = dwc_otg_hcd_urb_dequeue, ++ .endpoint_disable = dwc_otg_hcd_endpoint_disable, ++ .get_frame_number = dwc_otg_hcd_get_frame_number, ++ .hub_status_data = dwc_otg_hcd_hub_status_data, ++ .hub_control = dwc_otg_hcd_hub_control, ++ //.hub_suspend = ++ //.hub_resume = ++}; ++ ++ ++/** ++ * Work queue function for starting the HCD when A-Cable is connected. ++ * The dwc_otg_hcd_start() must be called in a process context. ++ */ ++static void hcd_start_func(struct work_struct *work) ++{ ++ struct dwc_otg_hcd *priv = ++ container_of(work, struct dwc_otg_hcd, start_work); ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p; ++ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); ++ if (usb_hcd) { ++ dwc_otg_hcd_start(usb_hcd); ++ } ++} ++ ++ ++/** ++ * HCD Callback function for starting the HCD when A-Cable is ++ * connected. ++ * ++ * @param _p void pointer to the <code>struct usb_hcd</code> ++ */ ++static int32_t dwc_otg_hcd_start_cb(void *_p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ if (core_if->op_state == B_HOST) { ++ /* ++ * Reset the port. During a HNP mode switch the reset ++ * needs to occur within 1ms and have a duration of at ++ * least 50ms. ++ */ ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ((struct usb_hcd *)_p)->self.is_b_host = 1; ++ } else { ++ ((struct usb_hcd *)_p)->self.is_b_host = 0; ++ } ++ /* Need to start the HCD in a non-interrupt context. */ ++ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++ dwc_otg_hcd->_p = _p; ++ schedule_work(&dwc_otg_hcd->start_work); ++ return 1; ++} ++ ++ ++/** ++ * HCD Callback function for stopping the HCD. ++ * ++ * @param _p void pointer to the <code>struct usb_hcd</code> ++ */ ++static int32_t dwc_otg_hcd_stop_cb( void *_p ) ++{ ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)_p; ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); ++ dwc_otg_hcd_stop( usb_hcd ); ++ return 1; ++} ++static void del_xfer_timers(dwc_otg_hcd_t *_hcd) ++{ ++#ifdef DEBUG ++ int i; ++ int num_channels = _hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ del_timer(&_hcd->core_if->hc_xfer_timer[i]); ++ } ++#endif /* */ ++} ++ ++static void del_timers(dwc_otg_hcd_t *_hcd) ++{ ++ del_xfer_timers(_hcd); ++ del_timer(&_hcd->conn_timer); ++} ++ ++/** ++ * Processes all the URBs in a single list of QHs. Completes them with ++ * -ETIMEDOUT and frees the QTD. ++ */ ++static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd, ++ struct list_head *_qh_list) ++{ ++ struct list_head *qh_item; ++ dwc_otg_qh_t *qh; ++ struct list_head *qtd_item; ++ dwc_otg_qtd_t *qtd; ++ ++ list_for_each(qh_item, _qh_list) { ++ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); ++ for (qtd_item = qh->qtd_list.next; qtd_item != &qh->qtd_list; ++ qtd_item = qh->qtd_list.next) { ++ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); ++ if (qtd->urb != NULL) { ++ dwc_otg_hcd_complete_urb(_hcd, qtd->urb,-ETIMEDOUT); ++ } ++ dwc_otg_hcd_qtd_remove_and_free(qtd); ++ } ++ } ++} ++ ++/** ++ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic ++ * and periodic schedules. The QTD associated with each URB is removed from ++ * the schedule and freed. This function may be called when a disconnect is ++ * detected or when the HCD is being stopped. ++ */ ++static void kill_all_urbs(dwc_otg_hcd_t *_hcd) ++{ ++ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_deferred); ++ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_inactive); ++ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_active); ++ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inactive); ++ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready); ++ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned); ++ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param _p void pointer to the <code>struct usb_hcd</code> ++ */ ++static int32_t dwc_otg_hcd_disconnect_cb( void *_p ) ++{ ++ gintsts_data_t intr; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); ++ ++ /* ++ * Set status flags for the hub driver. ++ */ ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 0; ++ ++ /* ++ * Shutdown any transfers in process by clearing the Tx FIFO Empty ++ * interrupt mask and status bits and disabling subsequent host ++ * channel interrupts. ++ */ ++ intr.d32 = 0; ++ intr.b.nptxfempty = 1; ++ intr.b.ptxfempty = 1; ++ intr.b.hcintr = 1; ++ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); ++ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* ++ * Turn off the vbus power only if the core has transitioned to device ++ * mode. If still in host mode, need to keep power on to detect a ++ * reconnection. ++ */ ++ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { ++ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { ++ hprt0_data_t hprt0 = { .d32=0 }; ++ DWC_PRINT("Disconnect: PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++ } ++ ++ dwc_otg_disable_host_interrupts( dwc_otg_hcd->core_if ); ++ } ++ ++ /* Respond with an error status to all URBs in the schedule. */ ++ kill_all_urbs(dwc_otg_hcd); ++ ++ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { ++ /* Clean up any host channels that were in use. */ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ /* Flush out any channel requests in slave mode. */ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ } ++ } ++ } ++ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ /* Halt the channel. */ ++ hcchar.b.chdis = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); ++ list_add_tail(&channel->hc_list_entry, ++ &dwc_otg_hcd->free_hc_list); ++ } ++ } ++ } ++ ++ /* A disconnect will end the session so the B-Device is no ++ * longer a B-host. */ ++ ((struct usb_hcd *)_p)->self.is_b_host = 0; ++ ++ return 1; ++} ++ ++/** ++ * Connection timeout function. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. ++ */ ++void dwc_otg_hcd_connect_timeout( unsigned long _ptr ) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr); ++ DWC_PRINT( "Connect Timeout\n"); ++ DWC_ERROR( "Device Not Connected/Responding\n" ); ++} ++ ++/** ++ * Start the connection timer. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. The ++ * timer is deleted if a port connect interrupt occurs before the ++ * timer expires. ++ */ ++static void dwc_otg_hcd_start_connect_timer( dwc_otg_hcd_t *_hcd) ++{ ++ init_timer( &_hcd->conn_timer ); ++ _hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; ++ _hcd->conn_timer.data = (unsigned long)0; ++ _hcd->conn_timer.expires = jiffies + (HZ*10); ++ add_timer( &_hcd->conn_timer ); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param _p void pointer to the <code>struct usb_hcd</code> ++ */ ++static int32_t dwc_otg_hcd_session_start_cb( void *_p ) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p); ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); ++ dwc_otg_hcd_start_connect_timer( dwc_otg_hcd ); ++ return 1; ++} ++ ++/** ++ * HCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { ++ .start = dwc_otg_hcd_start_cb, ++ .stop = dwc_otg_hcd_stop_cb, ++ .disconnect = dwc_otg_hcd_disconnect_cb, ++ .session_start = dwc_otg_hcd_session_start_cb, ++ .p = 0, ++}; ++ ++ ++/** ++ * Reset tasklet function ++ */ ++static void reset_tasklet_func (unsigned long data) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t*)data; ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); ++ ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay (60); ++ ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++ ++ return; ++} ++ ++static struct tasklet_struct reset_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = reset_tasklet_func, ++ .data = 0, ++}; ++ ++/** ++ * Initializes the HCD. This function allocates memory for and initializes the ++ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the ++ * USB bus with the core and calls the hc_driver->start() function. It returns ++ * a negative error on failure. ++ */ ++int init_hcd_usecs(dwc_otg_hcd_t *_hcd); ++ ++int __devinit dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device) ++{ ++ struct usb_hcd *hcd = NULL; ++ dwc_otg_hcd_t *dwc_otg_hcd = NULL; ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ int retval = 0; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); ++ ++ /* ++ * Allocate memory for the base HCD plus the DWC OTG HCD. ++ * Initialize the base HCD. ++ */ ++ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_dev)); ++ if (hcd == NULL) { ++ retval = -ENOMEM; ++ goto error1; ++ } ++ dev_set_drvdata(_dev, dwc_otg_device); /* fscz restore */ ++ hcd->regs = otg_dev->base; ++ hcd->rsrc_start = (int)otg_dev->base; ++ ++ hcd->self.otg_port = 1; ++ ++ /* Initialize the DWC OTG HCD. */ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_hcd->core_if = otg_dev->core_if; ++ otg_dev->hcd = dwc_otg_hcd; ++ ++ /* Register the HCD CIL Callbacks */ ++ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, ++ &hcd_cil_callbacks, hcd); ++ ++ /* Initialize the non-periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_deferred); ++ ++ /* Initialize the periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); ++ ++ /* ++ * Create a host channel descriptor for each host channel implemented ++ * in the controller. Initialize the channel descriptor array. ++ */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); ++ if (channel == NULL) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: host channel allocation failed\n", __func__); ++ goto error2; ++ } ++ memset(channel, 0, sizeof(dwc_hc_t)); ++ channel->hc_num = i; ++ dwc_otg_hcd->hc_ptr_array[i] = channel; ++#ifdef DEBUG ++ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); ++#endif ++ ++ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); ++ } ++ ++ /* Initialize the Connection timeout timer. */ ++ init_timer( &dwc_otg_hcd->conn_timer ); ++ ++ /* Initialize reset tasklet. */ ++ reset_tasklet.data = (unsigned long) dwc_otg_hcd; ++ dwc_otg_hcd->reset_tasklet = &reset_tasklet; ++ ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++ //_dev->dma_mask = (void *)~0; ++ //_dev->coherent_dma_mask = ~0; ++ _dev->dma_mask = &dma_mask; ++ _dev->coherent_dma_mask = DMA_BIT_MASK(32); ++ } else { ++ DWC_PRINT("Using Slave mode\n"); ++ _dev->dma_mask = (void *)0; ++ _dev->coherent_dma_mask = 0; ++ } ++ ++ init_hcd_usecs(dwc_otg_hcd); ++ /* ++ * Finish generic HCD initialization and start the HCD. This function ++ * allocates the DMA buffer pool, registers the USB bus, requests the ++ * IRQ line, and calls dwc_otg_hcd_start method. ++ */ ++ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); ++ if (retval < 0) { ++ goto error2; ++ } ++ ++ /* ++ * Allocate space for storing data on status transactions. Normally no ++ * data is sent, but this space acts as a bit bucket. This must be ++ * done after usb_add_hcd since that function allocates the DMA buffer ++ * pool. ++ */ ++ if (otg_dev->core_if->dma_enable) { ++ dwc_otg_hcd->status_buf = ++ dma_alloc_coherent(_dev, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ &dwc_otg_hcd->status_buf_dma, ++ GFP_KERNEL | GFP_DMA); ++ } else { ++ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, ++ GFP_KERNEL); ++ } ++ if (dwc_otg_hcd->status_buf == NULL) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: status_buf allocation failed\n", __func__); ++ goto error3; ++ } ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n", ++ dev_name(_dev), hcd->self.busnum); ++ ++ return 0; ++ ++ /* Error conditions */ ++error3: ++ usb_remove_hcd(hcd); ++error2: ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++error1: ++ return retval; ++} ++ ++/** ++ * Removes the HCD. ++ * Frees memory and resources associated with the HCD and deregisters the bus. ++ */ ++void dwc_otg_hcd_remove(struct device *_dev) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ dwc_otg_hcd_t *dwc_otg_hcd = otg_dev->hcd; ++ struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); ++ ++ /* Turn off all interrupts */ ++ dwc_write_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); ++ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); ++ ++ usb_remove_hcd(hcd); ++ ++ dwc_otg_hcd_free(hcd); ++ ++ usb_put_hcd(hcd); ++ ++ return; ++} ++ ++ ++/* ========================================================================= ++ * Linux HC Driver Functions ++ * ========================================================================= */ ++ ++/** ++ * Initializes dynamic portions of the DWC_otg HCD state. ++ */ ++static void hcd_reinit(dwc_otg_hcd_t *_hcd) ++{ ++ struct list_head *item; ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ _hcd->flags.d32 = 0; ++ ++ _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched_active; ++ _hcd->available_host_channels = _hcd->core_if->core_params->host_channels; ++ ++ /* ++ * Put all channels in the free channel list and clean up channel ++ * states. ++ */ ++ item = _hcd->free_hc_list.next; ++ while (item != &_hcd->free_hc_list) { ++ list_del(item); ++ item = _hcd->free_hc_list.next; ++ } ++ num_channels = _hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ channel = _hcd->hc_ptr_array[i]; ++ list_add_tail(&channel->hc_list_entry, &_hcd->free_hc_list); ++ dwc_otg_hc_cleanup(_hcd->core_if, channel); ++ } ++ ++ /* Initialize the DWC core for host mode operation. */ ++ dwc_otg_core_host_init(_hcd->core_if); ++} ++ ++/** Initializes the DWC_otg controller and its root hub and prepares it for host ++ * mode operation. Activates the root port. Returns 0 on success and a negative ++ * error code on failure. */ ++int dwc_otg_hcd_start(struct usb_hcd *_hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if; ++ struct usb_bus *bus; ++ ++ // int retval; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); ++ ++ bus = hcd_to_bus(_hcd); ++ ++ /* Initialize the bus state. If the core is in Device Mode ++ * HALT the USB bus and return. */ ++ if (dwc_otg_is_device_mode (core_if)) { ++ _hcd->state = HC_STATE_HALT; ++ return 0; ++ } ++ _hcd->state = HC_STATE_RUNNING; ++ ++ /* Initialize and connect root hub if one is not already attached */ ++ if (bus->root_hub) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); ++ /* Inform the HUB driver to resume. */ ++ usb_hcd_resume_root_hub(_hcd); ++ } ++ else { ++#if 0 ++ struct usb_device *udev; ++ udev = usb_alloc_dev(NULL, bus, 0); ++ if (!udev) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n"); ++ return -ENODEV; ++ } ++ udev->speed = USB_SPEED_HIGH; ++ /* Not needed - VJ ++ if ((retval = usb_hcd_register_root_hub(udev, _hcd)) != 0) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval); ++ return -ENODEV; ++ } ++ */ ++#else ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n"); ++#endif ++ } ++ ++ hcd_reinit(dwc_otg_hcd); ++ ++ return 0; ++} ++ ++static void qh_list_free(dwc_otg_hcd_t *_hcd, struct list_head *_qh_list) ++{ ++ struct list_head *item; ++ dwc_otg_qh_t *qh; ++ ++ if (_qh_list->next == NULL) { ++ /* The list hasn't been initialized yet. */ ++ return; ++ } ++ ++ /* Ensure there are no QTDs or URBs left. */ ++ kill_urbs_in_qh_list(_hcd, _qh_list); ++ ++ for (item = _qh_list->next; item != _qh_list; item = _qh_list->next) { ++ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ dwc_otg_hcd_qh_remove_and_free(_hcd, qh); ++ } ++} ++ ++/** ++ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are ++ * stopped. ++ */ ++void dwc_otg_hcd_stop(struct usb_hcd *_hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ hprt0_data_t hprt0 = { .d32=0 }; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); ++ ++ /* Turn off all host-specific interrupts. */ ++ dwc_otg_disable_host_interrupts( dwc_otg_hcd->core_if ); ++ ++ /* ++ * The root hub should be disconnected before this function is called. ++ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) ++ * and the QH lists (via ..._hcd_endpoint_disable). ++ */ ++ ++ /* Turn off the vbus power */ ++ DWC_PRINT("PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++ ++ return; ++} ++ ++ ++/** Returns the current frame number. */ ++int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd); ++ hfnum_data_t hfnum; ++ ++ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> ++ host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); ++#endif ++ return hfnum.b.frnum; ++} ++ ++/** ++ * Frees secondary storage associated with the dwc_otg_hcd structure contained ++ * in the struct usb_hcd field. ++ */ ++void dwc_otg_hcd_free(struct usb_hcd *_hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd); ++ int i; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* Free memory for QH/QTD lists */ ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_deferred); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); ++ ++ /* Free memory for the host channels. */ ++ for (i = 0; i < MAX_EPS_CHANNELS; i++) { ++ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; ++ if (hc != NULL) { ++ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); ++ kfree(hc); ++ } ++ } ++ ++ if (dwc_otg_hcd->core_if->dma_enable) { ++ if (dwc_otg_hcd->status_buf_dma) { ++ dma_free_coherent(_hcd->self.controller, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ dwc_otg_hcd->status_buf, ++ dwc_otg_hcd->status_buf_dma); ++ } ++ } else if (dwc_otg_hcd->status_buf != NULL) { ++ kfree(dwc_otg_hcd->status_buf); ++ } ++ ++ return; ++} ++ ++ ++#ifdef DEBUG ++static void dump_urb_info(struct urb *_urb, char* _fn_name) ++{ ++ DWC_PRINT("%s, urb %p\n", _fn_name, _urb); ++ DWC_PRINT(" Device address: %d\n", usb_pipedevice(_urb->pipe)); ++ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_urb->pipe), ++ (usb_pipein(_urb->pipe) ? "IN" : "OUT")); ++ DWC_PRINT(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(_urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_PRINT(" Speed: %s\n", ++ ({char *speed; ++ switch (_urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(_urb->dev, _urb->pipe, usb_pipeout(_urb->pipe))); ++ DWC_PRINT(" Data buffer length: %d\n", _urb->transfer_buffer_length); ++ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", ++ _urb->transfer_buffer, (void *)_urb->transfer_dma); ++ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", ++ _urb->setup_packet, (void *)_urb->setup_dma); ++ DWC_PRINT(" Interval: %d\n", _urb->interval); ++ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < _urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d:\n", i); ++ DWC_PRINT(" offset: %d, length %d\n", ++ _urb->iso_frame_desc[i].offset, ++ _urb->iso_frame_desc[i].length); ++ } ++ } ++} ++ ++static void dump_channel_info(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *qh) ++{ ++ if (qh->channel != NULL) { ++ dwc_hc_t *hc = qh->channel; ++ struct list_head *item; ++ dwc_otg_qh_t *qh_item; ++ int num_channels = _hcd->core_if->core_params->host_channels; ++ int i; ++ ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ ++ hc_regs = _hcd->core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_PRINT(" Assigned to channel %p:\n", hc); ++ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ DWC_PRINT(" NP inactive sched:\n"); ++ list_for_each(item, &_hcd->non_periodic_sched_inactive) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } DWC_PRINT(" NP active sched:\n"); ++ list_for_each(item, &_hcd->non_periodic_sched_deferred) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } DWC_PRINT(" NP deferred sched:\n"); ++ list_for_each(item, &_hcd->non_periodic_sched_active) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } DWC_PRINT(" Channels: \n"); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = _hcd->hc_ptr_array[i]; ++ DWC_PRINT(" %2d: %p\n", i, hc); ++ } ++ } ++} ++#endif // DEBUG ++ ++/** Starts processing a USB transfer request specified by a USB Request Block ++ * (URB). mem_flags indicates the type of memory allocation to use while ++ * processing this URB. */ ++int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd, ++ struct urb *_urb, ++ gfp_t _mem_flags) ++{ ++ unsigned long flags; ++ int retval; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ dwc_otg_qtd_t *qtd; ++ ++ local_irq_save(flags); ++ retval = usb_hcd_link_urb_to_ep(_hcd, _urb); ++ if (retval) { ++ local_irq_restore(flags); ++ return retval; ++ } ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue"); ++ } ++#endif // DEBUG ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* No longer connected. */ ++ local_irq_restore(flags); ++ return -ENODEV; ++ } ++ ++ qtd = dwc_otg_hcd_qtd_create (_urb); ++ if (qtd == NULL) { ++ local_irq_restore(flags); ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); ++ return -ENOMEM; ++ } ++ ++ retval = dwc_otg_hcd_qtd_add (qtd, dwc_otg_hcd); ++ if (retval < 0) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " ++ "Error status %d\n", retval); ++ dwc_otg_hcd_qtd_free(qtd); ++ } ++ ++ local_irq_restore (flags); ++ return retval; ++} ++ ++/** Aborts/cancels a USB transfer request. Always returns 0 to indicate ++ * success. */ ++int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status) ++{ ++ unsigned long flags; ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ dwc_otg_qtd_t *urb_qtd; ++ dwc_otg_qh_t *qh; ++ int retval; ++ //struct usb_host_endpoint *_ep = NULL; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); ++ ++ local_irq_save(flags); ++ ++ retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status); ++ if (retval) { ++ local_irq_restore(flags); ++ return retval; ++ } ++ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd); ++ urb_qtd = (dwc_otg_qtd_t *)_urb->hcpriv; ++ if (urb_qtd == NULL) { ++ printk("urb_qtd is NULL for _urb %08x\n",(unsigned)_urb); ++ goto done; ++ } ++ qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr; ++ if (qh == NULL) { ++ goto done; ++ } ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue"); ++ if (urb_qtd == qh->qtd_in_process) { ++ dump_channel_info(dwc_otg_hcd, qh); ++ } ++ } ++#endif // DEBUG ++ ++ if (urb_qtd == qh->qtd_in_process) { ++ /* The QTD is in process (it has been assigned to a channel). */ ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * If still connected (i.e. in host mode), halt the ++ * channel so it can be used for other transfers. If ++ * no longer connected, the host registers can't be ++ * written to halt the channel since the core is in ++ * device mode. ++ */ ++ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, ++ DWC_OTG_HC_XFER_URB_DEQUEUE); ++ } ++ } ++ ++ /* ++ * Free the QTD and clean up the associated QH. Leave the QH in the ++ * schedule if it has any remaining QTDs. ++ */ ++ dwc_otg_hcd_qtd_remove_and_free(urb_qtd); ++ if (urb_qtd == qh->qtd_in_process) { ++ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ } else if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); ++ } ++ ++done: ++ local_irq_restore(flags); ++ _urb->hcpriv = NULL; ++ ++ /* Higher layer software sets URB status. */ ++ usb_hcd_unlink_urb_from_ep(_hcd, _urb); ++ usb_hcd_giveback_urb(_hcd, _urb, _status); ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); ++ DWC_PRINT(" urb->status = %d\n", _urb->status); ++ } ++ ++ return 0; ++} ++ ++ ++/** Frees resources in the DWC_otg controller related to a given endpoint. Also ++ * clears state in the HCD related to the endpoint. Any URBs for the endpoint ++ * must already be dequeued. */ ++void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd, ++ struct usb_host_endpoint *_ep) ++ ++{ ++ dwc_otg_qh_t *qh; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd); ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " ++ "endpoint=%d\n", _ep->desc.bEndpointAddress, ++ dwc_ep_addr_to_endpoint(_ep->desc.bEndpointAddress)); ++ ++ qh = (dwc_otg_qh_t *)(_ep->hcpriv); ++ if (qh != NULL) { ++#ifdef DEBUG ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++#endif // DEBUG ++ ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ _ep->hcpriv = NULL; ++ } ++ ++ return; ++} ++extern int dwc_irq; ++/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if ++ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid ++ * interrupt. ++ * ++ * This function is called by the USB core when an interrupt occurs */ ++irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *_hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ ++ mask_and_ack_ifx_irq (dwc_irq); ++ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd)); ++} ++ ++/** Creates Status Change bitmap for the root hub and root port. The bitmap is ++ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 ++ * is the status change indicator for the single root port. Returns 1 if either ++ * change indicator is 1, otherwise returns 0. */ ++int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_buf) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ ++ _buf[0] = 0; ++ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || ++ dwc_otg_hcd->flags.b.port_reset_change || ++ dwc_otg_hcd->flags.b.port_enable_change || ++ dwc_otg_hcd->flags.b.port_suspend_change || ++ dwc_otg_hcd->flags.b.port_over_current_change) << 1; ++ ++#ifdef DEBUG ++ if (_buf[0]) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" ++ " Root port status changed\n"); ++ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", ++ dwc_otg_hcd->flags.b.port_connect_status_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", ++ dwc_otg_hcd->flags.b.port_reset_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", ++ dwc_otg_hcd->flags.b.port_enable_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", ++ dwc_otg_hcd->flags.b.port_suspend_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", ++ dwc_otg_hcd->flags.b.port_over_current_change); ++ } ++#endif // DEBUG ++ return (_buf[0] != 0); ++} ++ ++#ifdef DWC_HS_ELECT_TST ++/* ++ * Quick and dirty hack to implement the HS Electrical Test ++ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. ++ * ++ * This code was copied from our userspace app "hset". It sends a ++ * Get Device Descriptor control sequence in two parts, first the ++ * Setup packet by itself, followed some time later by the In and ++ * Ack packets. Rather than trying to figure out how to add this ++ * functionality to the normal driver code, we just hijack the ++ * hardware, using these two function to drive the hardware ++ * directly. ++ */ ++ ++dwc_otg_core_global_regs_t *global_regs; ++dwc_otg_host_global_regs_t *hc_global_regs; ++dwc_otg_hc_regs_t *hc_regs; ++uint32_t *data_fifo; ++ ++static void do_setup(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Send Setup packet (Get Device Descriptor) ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ // hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ MDELAY(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ /* Fill FIFO with Setup data for Get Device Descriptor */ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ dwc_write_reg32(data_fifo++, 0x01000680); ++ dwc_write_reg32(data_fifo++, 0x00080000); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++ ++static void do_in_ack(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ host_grxsts_data_t grxsts; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Receive Control In packet ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ MDELAY(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 1; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer */ ++ if (grxsts.b.bcnt > 0) { ++ int i; ++ int word_count = (grxsts.b.bcnt + 3) / 4; ++ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ for (i = 0; i < word_count; i++) { ++ (void)dwc_read_reg32(data_fifo++); ++ } ++ } ++ ++ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ // usleep(100000); ++ // mdelay(100); ++ MDELAY(1); ++ ++ /* ++ * Send handshake packet ++ */ ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ MDELAY(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 0; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++#endif /* DWC_HS_ELECT_TST */ ++ ++/** Handles hub class-specific requests.*/ ++int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, ++ u16 _typeReq, ++ u16 _wValue, ++ u16 _wIndex, ++ char *_buf, ++ u16 _wLength) ++{ ++ int retval = 0; ++ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd); ++ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd (_hcd)->core_if; ++ struct usb_hub_descriptor *desc; ++ hprt0_data_t hprt0 = {.d32 = 0}; ++ ++ uint32_t port_status; ++ ++ switch (_typeReq) { ++ case ClearHubFeature: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearHubFeature 0x%x\n", _wValue); ++ switch (_wValue) { ++ case C_HUB_LOCAL_POWER: ++ case C_HUB_OVER_CURRENT: ++ /* Nothing required here */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR ("DWC OTG HCD - " ++ "ClearHubFeature request %xh unknown\n", _wValue); ++ } ++ break; ++ case ClearPortFeature: ++ if (!_wIndex || _wIndex > 1) ++ goto error; ++ ++ switch (_wValue) { ++ case USB_PORT_FEAT_ENABLE: ++ DWC_DEBUGPL (DBG_ANY, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtena = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ /* Clear Resume bit */ ++ mdelay (100); ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); ++ /* Port inidicator not supported */ ++ break; ++ case USB_PORT_FEAT_C_CONNECTION: ++ /* Clears drivers internal connect status change ++ * flag */ ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 0; ++ break; ++ case USB_PORT_FEAT_C_RESET: ++ /* Clears the driver's internal Port Reset Change ++ * flag */ ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); ++ dwc_otg_hcd->flags.b.port_reset_change = 0; ++ break; ++ case USB_PORT_FEAT_C_ENABLE: ++ /* Clears the driver's internal Port ++ * Enable/Disable Change flag */ ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); ++ dwc_otg_hcd->flags.b.port_enable_change = 0; ++ break; ++ case USB_PORT_FEAT_C_SUSPEND: ++ /* Clears the driver's internal Port Suspend ++ * Change flag, which is set when resume signaling on ++ * the host port is complete */ ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); ++ dwc_otg_hcd->flags.b.port_suspend_change = 0; ++ break; ++ case USB_PORT_FEAT_C_OVER_CURRENT: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); ++ dwc_otg_hcd->flags.b.port_over_current_change = 0; ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR ("DWC OTG HCD - " ++ "ClearPortFeature request %xh " ++ "unknown or unsupported\n", _wValue); ++ } ++ break; ++ case GetHubDescriptor: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubDescriptor\n"); ++ desc = (struct usb_hub_descriptor *)_buf; ++ desc->bDescLength = 9; ++ desc->bDescriptorType = 0x29; ++ desc->bNbrPorts = 1; ++ desc->wHubCharacteristics = 0x08; ++ desc->bPwrOn2PwrGood = 1; ++ desc->bHubContrCurrent = 0; ++ desc->bitmap[0] = 0; ++ desc->bitmap[1] = 0xff; ++ break; ++ case GetHubStatus: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubStatus\n"); ++ memset (_buf, 0, 4); ++ break; ++ case GetPortStatus: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetPortStatus\n"); ++ ++ if (!_wIndex || _wIndex > 1) ++ goto error; ++ ++ port_status = 0; ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status_change) ++ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); ++ ++ if (dwc_otg_hcd->flags.b.port_enable_change) ++ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); ++ ++ if (dwc_otg_hcd->flags.b.port_suspend_change) ++ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); ++ ++ if (dwc_otg_hcd->flags.b.port_reset_change) ++ port_status |= (1 << USB_PORT_FEAT_C_RESET); ++ ++ if (dwc_otg_hcd->flags.b.port_over_current_change) { ++ DWC_ERROR("Device Not Supported\n"); ++ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); ++ } ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ printk("DISCONNECTED PORT\n"); ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return 0's for the remainder of the port status ++ * since the port register can't be read if the core ++ * is in device mode. ++ */ ++#if 1 // winder. ++ *((u32 *) _buf) = cpu_to_le32(port_status); ++#else ++ *((__le32 *) _buf) = cpu_to_le32(port_status); ++#endif ++ break; ++ } ++ ++ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); ++ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); ++ ++ if (hprt0.b.prtconnsts) ++ port_status |= (1 << USB_PORT_FEAT_CONNECTION); ++ ++ if (hprt0.b.prtena) ++ port_status |= (1 << USB_PORT_FEAT_ENABLE); ++ ++ if (hprt0.b.prtsusp) ++ port_status |= (1 << USB_PORT_FEAT_SUSPEND); ++ ++ if (hprt0.b.prtovrcurract) ++ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); ++ ++ if (hprt0.b.prtrst) ++ port_status |= (1 << USB_PORT_FEAT_RESET); ++ ++ if (hprt0.b.prtpwr) ++ port_status |= (1 << USB_PORT_FEAT_POWER); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) ++ port_status |= USB_PORT_STAT_HIGH_SPEED; ++ ++ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ++ port_status |= (1 << USB_PORT_FEAT_LOWSPEED); ++ ++ if (hprt0.b.prttstctl) ++ port_status |= (1 << USB_PORT_FEAT_TEST); ++ ++ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ ++#if 1 // winder. ++ *((u32 *) _buf) = cpu_to_le32(port_status); ++#else ++ *((__le32 *) _buf) = cpu_to_le32(port_status); ++#endif ++ ++ break; ++ case SetHubFeature: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetHubFeature\n"); ++ /* No HUB features supported */ ++ break; ++ case SetPortFeature: ++ if (_wValue != USB_PORT_FEAT_TEST && (!_wIndex || _wIndex > 1)) ++ goto error; ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return without doing anything since the port ++ * register can't be written if the core is in device ++ * mode. ++ */ ++ break; ++ } ++ ++ switch (_wValue) { ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); ++ if (_hcd->self.otg_port == _wIndex ++ && _hcd->self.b_hnp_enable) { ++ gotgctl_data_t gotgctl = {.d32=0}; ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&core_if->core_global_regs-> ++ gotgctl, 0, gotgctl.d32); ++ core_if->op_state = A_SUSPEND; ++ } ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtsusp = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt0.d32); ++ /* Suspend the Phy Clock */ ++ { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ pcgcctl.b.stoppclk = 1; ++ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); ++ } ++ ++ /* For HNP the bus must be suspended for at least 200ms.*/ ++ if (_hcd->self.b_hnp_enable) { ++ mdelay(200); ++ //DWC_PRINT( "SUSPEND: wait complete! (%d)\n", _hcd->state); ++ } ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_RESET: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_RESET\n"); ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ /* TODO: Is this for OTG protocol?? ++ * We shoudl remove OTG totally for Danube system. ++ * But, in the future, maybe we need this. ++ */ ++#if 1 // winder ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++#else ++ /* When B-Host the Port reset bit is set in ++ * the Start HCD Callback function, so that ++ * the reset is started within 1ms of the HNP ++ * success interrupt. */ ++ if (!_hcd->self.is_b_host) { ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } ++#endif ++ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ ++ MDELAY (60); ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ ++#ifdef DWC_HS_ELECT_TST ++ case USB_PORT_FEAT_TEST: ++ { ++ uint32_t t; ++ gintmsk_data_t gintmsk; ++ ++ t = (_wIndex >> 8); /* MSB wIndex USB */ ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); ++ printk("USB_PORT_FEAT_TEST %d\n", t); ++ if (t < 6) { ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prttstctl = t; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } else { ++ /* Setup global vars with reg addresses (quick and ++ * dirty hack, should be cleaned up) ++ */ ++ global_regs = core_if->core_global_regs; ++ hc_global_regs = core_if->host_if->host_global_regs; ++ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive suspend on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtsusp = 1; ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive resume on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0 (core_if); ++ hprt0.b.prtsusp = 0; ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(100); ++ ++ /* Clear the resume bit */ ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Send the In and Ack packets */ ++ do_in_ack(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } ++ } ++ break; ++ } ++#endif /* DWC_HS_ELECT_TST */ ++ ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); ++ /* Not supported */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR ("DWC OTG HCD - " ++ "SetPortFeature request %xh " ++ "unknown or unsupported\n", _wValue); ++ break; ++ } ++ break; ++ default: ++error: ++ retval = -EINVAL; ++ DWC_WARN ("DWC OTG HCD - " ++ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", ++ _typeReq, _wIndex, _wValue); ++ break; ++ } ++ ++ return retval; ++} ++ ++ ++/** ++ * Assigns transactions from a QTD to a free host channel and initializes the ++ * host channel to perform the transactions. The host channel is removed from ++ * the free list. ++ * ++ * @param _hcd The HCD state structure. ++ * @param _qh Transactions from the first QTD for this QH are selected and ++ * assigned to a free host channel. ++ */ ++static void assign_and_init_hc(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ dwc_hc_t *hc; ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd, _qh); ++ ++ hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); ++ ++ /* Remove the host channel from the free list. */ ++ list_del_init(&hc->hc_list_entry); ++ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ urb = qtd->urb; ++ _qh->channel = hc; ++ _qh->qtd_in_process = qtd; ++ ++ /* ++ * Use usb_pipedevice to determine device address. This address is ++ * 0 before the SET_ADDRESS command and the correct address afterward. ++ */ ++ hc->dev_addr = usb_pipedevice(urb->pipe); ++ hc->ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (urb->dev->speed == USB_SPEED_LOW) { ++ hc->speed = DWC_OTG_EP_SPEED_LOW; ++ } else if (urb->dev->speed == USB_SPEED_FULL) { ++ hc->speed = DWC_OTG_EP_SPEED_FULL; ++ } else { ++ hc->speed = DWC_OTG_EP_SPEED_HIGH; ++ } ++ hc->max_packet = dwc_max_packet(_qh->maxp); ++ ++ hc->xfer_started = 0; ++ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; ++ hc->error_state = (qtd->error_count > 0); ++ hc->halt_on_queue = 0; ++ hc->halt_pending = 0; ++ hc->requests = 0; ++ ++ /* ++ * The following values may be modified in the transfer type section ++ * below. The xfer_len value may be reduced when the transfer is ++ * started to accommodate the max widths of the XferSize and PktCnt ++ * fields in the HCTSIZn register. ++ */ ++ hc->do_ping = _qh->ping_state; ++ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); ++ hc->data_pid_start = _qh->data_toggle; ++ hc->multi_count = 1; ++ ++ if (_hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma + urb->actual_length; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; ++ } ++ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; ++ hc->xfer_count = 0; ++ ++ /* ++ * Set the split attributes ++ */ ++ hc->do_split = 0; ++ if (_qh->do_split) { ++ hc->do_split = 1; ++ hc->xact_pos = qtd->isoc_split_pos; ++ hc->complete_split = qtd->complete_split; ++ hc->hub_addr = urb->dev->tt->hub->devnum; ++ hc->port_addr = urb->dev->ttport; ++ } ++ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); ++ hc->do_ping = 0; ++ hc->ep_is_in = 0; ++ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; ++ if (_hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)(u32)urb->setup_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->setup_packet; ++ } ++ hc->xfer_len = 8; ++ break; ++ case DWC_OTG_CONTROL_DATA: ++ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); ++ hc->data_pid_start = qtd->data_toggle; ++ break; ++ case DWC_OTG_CONTROL_STATUS: ++ /* ++ * Direction is opposite of data direction or IN if no ++ * data. ++ */ ++ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); ++ if (urb->transfer_buffer_length == 0) { ++ hc->ep_is_in = 1; ++ } else { ++ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); ++ } ++ if (hc->ep_is_in) { ++ hc->do_ping = 0; ++ } ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ hc->xfer_len = 0; ++ if (_hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)_hcd->status_buf_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)_hcd->status_buf; ++ } ++ break; ++ } ++ break; ++ case PIPE_BULK: ++ hc->ep_type = DWC_OTG_EP_TYPE_BULK; ++ break; ++ case PIPE_INTERRUPT: ++ hc->ep_type = DWC_OTG_EP_TYPE_INTR; ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; ++ if (_hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; ++ } ++ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; ++ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; ++ ++ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ if (hc->xfer_len <= 188) { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ } ++ else { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; ++ } ++ } ++ } ++ break; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This value may be modified when the transfer is started to ++ * reflect the actual transfer length. ++ */ ++ hc->multi_count = dwc_hb_mult(_qh->maxp); ++ } ++ ++ dwc_otg_hc_init(_hcd->core_if, hc); ++ hc->qh = _qh; ++} ++#define DEBUG_HOST_CHANNELS ++#ifdef DEBUG_HOST_CHANNELS ++static int last_sel_trans_num_per_scheduled = 0; ++module_param(last_sel_trans_num_per_scheduled, int, 0444); ++ ++static int last_sel_trans_num_nonper_scheduled = 0; ++module_param(last_sel_trans_num_nonper_scheduled, int, 0444); ++ ++static int last_sel_trans_num_avail_hc_at_start = 0; ++module_param(last_sel_trans_num_avail_hc_at_start, int, 0444); ++ ++static int last_sel_trans_num_avail_hc_at_end = 0; ++module_param(last_sel_trans_num_avail_hc_at_end, int, 0444); ++#endif /* DEBUG_HOST_CHANNELS */ ++ ++/** ++ * This function selects transactions from the HCD transfer schedule and ++ * assigns them to available host channels. It is called from HCD interrupt ++ * handler functions. ++ * ++ * @param _hcd The HCD state structure. ++ * ++ * @return The types of new transactions that were assigned to host channels. ++ */ ++dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd) ++{ ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int num_channels; ++ unsigned long flags; ++ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); ++#endif /* */ ++ ++#ifdef DEBUG_HOST_CHANNELS ++ last_sel_trans_num_per_scheduled = 0; ++ last_sel_trans_num_nonper_scheduled = 0; ++ last_sel_trans_num_avail_hc_at_start = _hcd->available_host_channels; ++#endif /* DEBUG_HOST_CHANNELS */ ++ ++ /* Process entries in the periodic ready list. */ ++ num_channels = _hcd->core_if->core_params->host_channels; ++ qh_ptr = _hcd->periodic_sched_ready.next; ++ while (qh_ptr != &_hcd->periodic_sched_ready ++ && !list_empty(&_hcd->free_hc_list)) { ++ ++ // Make sure we leave one channel for non periodic transactions. ++ local_irq_save(flags); ++ if (_hcd->available_host_channels <= 1) { ++ local_irq_restore(flags); ++ break; ++ } ++ _hcd->available_host_channels--; ++ local_irq_restore(flags); ++#ifdef DEBUG_HOST_CHANNELS ++ last_sel_trans_num_per_scheduled++; ++#endif /* DEBUG_HOST_CHANNELS */ ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(_hcd, qh); ++ ++ /* ++ * Move the QH from the periodic ready schedule to the ++ * periodic assigned schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ local_irq_save(flags); ++ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned); ++ local_irq_restore(flags); ++ ret_val = DWC_OTG_TRANSACTION_PERIODIC; ++ } ++ ++ /* ++ * Process entries in the deferred portion of the non-periodic list. ++ * A NAK put them here and, at the right time, they need to be ++ * placed on the sched_inactive list. ++ */ ++ qh_ptr = _hcd->non_periodic_sched_deferred.next; ++ while (qh_ptr != &_hcd->non_periodic_sched_deferred) { ++ uint16_t frame_number = ++ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd)); ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ qh_ptr = qh_ptr->next; ++ ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ // NAK did this ++ /* ++ * Move the QH from the non periodic deferred schedule to ++ * the non periodic inactive schedule. ++ */ ++ local_irq_save(flags); ++ list_move(&qh->qh_list_entry, ++ &_hcd->non_periodic_sched_inactive); ++ local_irq_restore(flags); ++ } ++ } ++ ++ /* ++ * Process entries in the inactive portion of the non-periodic ++ * schedule. Some free host channels may not be used if they are ++ * reserved for periodic transfers. ++ */ ++ qh_ptr = _hcd->non_periodic_sched_inactive.next; ++ num_channels = _hcd->core_if->core_params->host_channels; ++ while (qh_ptr != &_hcd->non_periodic_sched_inactive ++ && !list_empty(&_hcd->free_hc_list)) { ++ ++ local_irq_save(flags); ++ if (_hcd->available_host_channels < 1) { ++ local_irq_restore(flags); ++ break; ++ } ++ _hcd->available_host_channels--; ++ local_irq_restore(flags); ++#ifdef DEBUG_HOST_CHANNELS ++ last_sel_trans_num_nonper_scheduled++; ++#endif /* DEBUG_HOST_CHANNELS */ ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(_hcd, qh); ++ ++ /* ++ * Move the QH from the non-periodic inactive schedule to the ++ * non-periodic active schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ local_irq_save(flags); ++ list_move(&qh->qh_list_entry, &_hcd->non_periodic_sched_active); ++ local_irq_restore(flags); ++ ++ if (ret_val == DWC_OTG_TRANSACTION_NONE) { ++ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; ++ } else { ++ ret_val = DWC_OTG_TRANSACTION_ALL; ++ } ++ ++ } ++#ifdef DEBUG_HOST_CHANNELS ++ last_sel_trans_num_avail_hc_at_end = _hcd->available_host_channels; ++#endif /* DEBUG_HOST_CHANNELS */ ++ ++ return ret_val; ++} ++ ++/** ++ * Attempts to queue a single transaction request for a host channel ++ * associated with either a periodic or non-periodic transfer. This function ++ * assumes that there is space available in the appropriate request queue. For ++ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space ++ * is available in the appropriate Tx FIFO. ++ * ++ * @param _hcd The HCD state structure. ++ * @param _hc Host channel descriptor associated with either a periodic or ++ * non-periodic transfer. ++ * @param _fifo_dwords_avail Number of DWORDs available in the periodic Tx ++ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic ++ * transfers. ++ * ++ * @return 1 if a request is queued and more requests may be needed to ++ * complete the transfer, 0 if no more requests are required for this ++ * transfer, -1 if there is insufficient space in the Tx FIFO. ++ */ ++static int queue_transaction(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ uint16_t _fifo_dwords_avail) ++{ ++ int retval; ++ ++ if (_hcd->core_if->dma_enable) { ++ if (!_hc->xfer_started) { ++ dwc_otg_hc_start_transfer(_hcd->core_if, _hc); ++ _hc->qh->ping_state = 0; ++ } ++ retval = 0; ++ } else if (_hc->halt_pending) { ++ /* Don't queue a request if the channel has been halted. */ ++ retval = 0; ++ } else if (_hc->halt_on_queue) { ++ dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt_status); ++ retval = 0; ++ } else if (_hc->do_ping) { ++ if (!_hc->xfer_started) { ++ dwc_otg_hc_start_transfer(_hcd->core_if, _hc); ++ } ++ retval = 0; ++ } else if (!_hc->ep_is_in || ++ _hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ if ((_fifo_dwords_avail * 4) >= _hc->max_packet) { ++ if (!_hc->xfer_started) { ++ dwc_otg_hc_start_transfer(_hcd->core_if, _hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc); ++ } ++ } else { ++ retval = -1; ++ } ++ } else { ++ if (!_hc->xfer_started) { ++ dwc_otg_hc_start_transfer(_hcd->core_if, _hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc); ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * Processes active non-periodic channels and queues transactions for these ++ * channels to the DWC_otg controller. After queueing transactions, the NP Tx ++ * FIFO Empty interrupt is enabled if there are more transactions to queue as ++ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx ++ * FIFO Empty interrupt is disabled. ++ */ ++static void process_non_periodic_channels(dwc_otg_hcd_t *_hcd) ++{ ++ gnptxsts_data_t tx_status; ++ struct list_head *orig_qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ int more_to_do = 0; ++ ++ dwc_otg_core_global_regs_t *global_regs = _hcd->core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ /* ++ * Keep track of the starting point. Skip over the start-of-list ++ * entry. ++ */ ++ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) { ++ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next; ++ } ++ orig_qh_ptr = _hcd->non_periodic_qh_ptr; ++ ++ /* ++ * Process once through the active list or until no more space is ++ * available in the request queue or the Tx FIFO. ++ */ ++ do { ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (!_hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(_hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ status = queue_transaction(_hcd, qh->channel, tx_status.b.nptxfspcavail); ++ ++ if (status > 0) { ++ more_to_do = 1; ++ } else if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* Advance to next QH, skipping start-of-list entry. */ ++ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next; ++ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) { ++ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next; ++ } ++ ++ } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr); ++ ++ if (!_hcd->core_if->dma_enable) { ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ intr_mask.b.nptxfempty = 1; ++ ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ if (more_to_do || no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the non-periodic ++ * Tx FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Processes periodic channels for the next frame and queues transactions for ++ * these channels to the DWC_otg controller. After queueing transactions, the ++ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions ++ * to queue as Periodic Tx FIFO or request queue space becomes available. ++ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. ++ */ ++static void process_periodic_channels(dwc_otg_hcd_t *_hcd) ++{ ++ hptxsts_data_t tx_status; ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ ++ dwc_otg_host_global_regs_t *host_regs; ++ host_regs = _hcd->core_if->host_if->host_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ ++ qh_ptr = _hcd->periodic_sched_assigned.next; ++ while (qh_ptr != &_hcd->periodic_sched_assigned) { ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ if (tx_status.b.ptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ ++ /* ++ * Set a flag if we're queuing high-bandwidth in slave mode. ++ * The flag prevents any halts to get into the request queue in ++ * the middle of multiple high-bandwidth packets getting queued. ++ */ ++ if ((!_hcd->core_if->dma_enable) && ++ (qh->channel->multi_count > 1)) ++ { ++ _hcd->core_if->queuing_high_bandwidth = 1; ++ } ++ ++ status = queue_transaction(_hcd, qh->channel, tx_status.b.ptxfspcavail); ++ if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* ++ * In Slave mode, stay on the current transfer until there is ++ * nothing more to do or the high-bandwidth request count is ++ * reached. In DMA mode, only need to queue one request. The ++ * controller automatically handles multiple packets for ++ * high-bandwidth transfers. ++ */ ++ if (_hcd->core_if->dma_enable || ++ (status == 0 || ++ qh->channel->requests == qh->channel->multi_count)) { ++ qh_ptr = qh_ptr->next; ++ /* ++ * Move the QH from the periodic assigned schedule to ++ * the periodic queued schedule. ++ */ ++ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_queued); ++ ++ /* done queuing high bandwidth */ ++ _hcd->core_if->queuing_high_bandwidth = 0; ++ } ++ } ++ ++ if (!_hcd->core_if->dma_enable) { ++ dwc_otg_core_global_regs_t *global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ global_regs = _hcd->core_if->core_global_regs; ++ intr_mask.b.ptxfempty = 1; ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ if (!(list_empty(&_hcd->periodic_sched_assigned)) || ++ no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the periodic Tx ++ * FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * This function processes the currently active host channels and queues ++ * transactions for these channels to the DWC_otg controller. It is called ++ * from HCD interrupt handler functions. ++ * ++ * @param _hcd The HCD state structure. ++ * @param _tr_type The type(s) of transactions to queue (non-periodic, ++ * periodic, or both). ++ */ ++void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd, ++ dwc_otg_transaction_type_e _tr_type) ++{ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); ++#endif ++ /* Process host channels associated with periodic transfers. */ ++ if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC || ++ _tr_type == DWC_OTG_TRANSACTION_ALL) && ++ !list_empty(&_hcd->periodic_sched_assigned)) { ++ ++ process_periodic_channels(_hcd); ++ } ++ ++ /* Process host channels associated with non-periodic transfers. */ ++ if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || ++ _tr_type == DWC_OTG_TRANSACTION_ALL)) { ++ if (!list_empty(&_hcd->non_periodic_sched_active)) { ++ process_non_periodic_channels(_hcd); ++ } else { ++ /* ++ * Ensure NP Tx FIFO empty interrupt is disabled when ++ * there are no non-periodic transfers to process. ++ */ ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&_hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Sets the final status of an URB and returns it to the device driver. Any ++ * required cleanup of the URB is performed. ++ */ ++void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct urb *_urb, ++ int _status) ++ __releases(_hcd->lock) ++__acquires(_hcd->lock) ++{ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", ++ __func__, _urb, usb_pipedevice(_urb->pipe), ++ usb_pipeendpoint(_urb->pipe), ++ usb_pipein(_urb->pipe) ? "IN" : "OUT", _status); ++ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < _urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d status: %d\n", ++ i, _urb->iso_frame_desc[i].status); ++ } ++ } ++ } ++#endif ++ ++ _urb->status = _status; ++ _urb->hcpriv = NULL; ++ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb); ++ spin_unlock(&_hcd->lock); ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status); ++ spin_lock(&_hcd->lock); ++} ++ ++/* ++ * Returns the Queue Head for an URB. ++ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb) ++{ ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_urb); ++ return (dwc_otg_qh_t *)ep->hcpriv; ++} ++ ++#ifdef DEBUG ++void dwc_print_setup_data (uint8_t *setup) ++{ ++ int i; ++ if (CHK_DEBUG_LEVEL(DBG_HCD)){ ++ DWC_PRINT("Setup Data = MSB "); ++ for (i=7; i>=0; i--) DWC_PRINT ("%02x ", setup[i]); ++ DWC_PRINT("\n"); ++ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0]&0x80) ? "Device-to-Host" : "Host-to-Device"); ++ DWC_PRINT(" bmRequestType Type = "); ++ switch ((setup[0]&0x60) >> 5) { ++ case 0: DWC_PRINT("Standard\n"); break; ++ case 1: DWC_PRINT("Class\n"); break; ++ case 2: DWC_PRINT("Vendor\n"); break; ++ case 3: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bmRequestType Recipient = "); ++ switch (setup[0]&0x1f) { ++ case 0: DWC_PRINT("Device\n"); break; ++ case 1: DWC_PRINT("Interface\n"); break; ++ case 2: DWC_PRINT("Endpoint\n"); break; ++ case 3: DWC_PRINT("Other\n"); break; ++ default: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); ++ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); ++ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); ++ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); ++ } ++} ++#endif ++ ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd) { ++#ifdef DEBUG ++#if 0 ++ DWC_PRINT("Frame remaining at SOF:\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->frrem_samples, _hcd->frrem_accum, ++ (_hcd->frrem_samples > 0) ? ++ _hcd->frrem_accum/_hcd->frrem_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %u, avg %u\n", ++ _hcd->core_if->hfnum_7_samples, _hcd->core_if->hfnum_7_frrem_accum, ++ (_hcd->core_if->hfnum_7_samples > 0) ? ++ _hcd->core_if->hfnum_7_frrem_accum/_hcd->core_if->hfnum_7_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %u, avg %u\n", ++ _hcd->core_if->hfnum_0_samples, _hcd->core_if->hfnum_0_frrem_accum, ++ (_hcd->core_if->hfnum_0_samples > 0) ? ++ _hcd->core_if->hfnum_0_frrem_accum/_hcd->core_if->hfnum_0_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %u, avg %u\n", ++ _hcd->core_if->hfnum_other_samples, _hcd->core_if->hfnum_other_frrem_accum, ++ (_hcd->core_if->hfnum_other_samples > 0) ? ++ _hcd->core_if->hfnum_other_frrem_accum/_hcd->core_if->hfnum_other_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frrem_accum_a, ++ (_hcd->hfnum_7_samples_a > 0) ? ++ _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frrem_accum_a, ++ (_hcd->hfnum_0_samples_a > 0) ? ++ _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_other_samples_a, _hcd->hfnum_other_frrem_accum_a, ++ (_hcd->hfnum_other_samples_a > 0) ? ++ _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum_other_samples_a : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frrem_accum_b, ++ (_hcd->hfnum_7_samples_b > 0) ? ++ _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frrem_accum_b, ++ (_hcd->hfnum_0_samples_b > 0) ? ++ _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ _hcd->hfnum_other_samples_b, _hcd->hfnum_other_frrem_accum_b, ++ (_hcd->hfnum_other_samples_b > 0) ? ++ _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum_other_samples_b : 0); ++#endif ++#endif ++} ++ ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd) ++{ ++#ifdef DEBUG ++ int num_channels; ++ int i; ++ gnptxsts_data_t np_tx_status; ++ hptxsts_data_t p_tx_status; ++ ++ num_channels = _hcd->core_if->core_params->host_channels; ++ DWC_PRINT("\n"); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("HCD State:\n"); ++ DWC_PRINT(" Num channels: %d\n", num_channels); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = _hcd->hc_ptr_array[i]; ++ DWC_PRINT(" Channel %d:\n", i); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" speed: %d\n", hc->speed); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" multi_count: %d\n", hc->multi_count); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); ++ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" do_split: %d\n", hc->do_split); ++ DWC_PRINT(" complete_split: %d\n", hc->complete_split); ++ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); ++ DWC_PRINT(" port_addr: %d\n", hc->port_addr); ++ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); ++ DWC_PRINT(" requests: %d\n", hc->requests); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ if (hc->xfer_started) { ++ hfnum_data_t hfnum; ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); ++ hcchar.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hctsiz); ++ hcint.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcintmsk); ++ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); ++ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); ++ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); ++ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); ++ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); ++ } ++ if (hc->xfer_started && (hc->qh != NULL) && (hc->qh->qtd_in_process != NULL)) { ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ qtd = hc->qh->qtd_in_process; ++ urb = qtd->urb; ++ DWC_PRINT(" URB Info:\n"); ++ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); ++ if (urb != NULL) { ++ DWC_PRINT(" Dev: %d, EP: %d %s\n", ++ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT"); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); ++ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); ++ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" actual_length: %d\n", urb->actual_length); ++ } ++ } ++ } ++ //DWC_PRINT(" non_periodic_channels: %d\n", _hcd->non_periodic_channels); ++ //DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic_channels); ++ DWC_PRINT(" available_channels: %d\n", _hcd->available_host_channels); ++ DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_usecs); ++ np_tx_status.d32 = dwc_read_reg32(&_hcd->core_if->core_global_regs->gnptxsts); ++ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); ++ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); ++ p_tx_status.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hptxsts); ++ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); ++ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); ++ dwc_otg_hcd_dump_frrem(_hcd); ++ dwc_otg_dump_global_registers(_hcd->core_if); ++ dwc_otg_dump_host_registers(_hcd->core_if); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("\n"); ++#endif ++} ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd.h +@@ -0,0 +1,676 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 537387 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++#if !defined(__DWC_HCD_H__) ++#define __DWC_HCD_H__ ++ ++#include <linux/list.h> ++#include <linux/usb.h> ++#include <linux/usb/hcd.h> ++ ++struct lm_device; ++struct dwc_otg_device; ++ ++#include "dwc_otg_cil.h" ++//#include "dwc_otg_ifx.h" // winder ++ ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Host Contoller Driver (HCD). ++ * ++ * The Host Controller Driver (HCD) is responsible for translating requests ++ * from the USB Driver into the appropriate actions on the DWC_otg controller. ++ * It isolates the USBD from the specifics of the controller by providing an ++ * API to the USBD. ++ */ ++ ++/** ++ * Phases for control transfers. ++ */ ++typedef enum dwc_otg_control_phase { ++ DWC_OTG_CONTROL_SETUP, ++ DWC_OTG_CONTROL_DATA, ++ DWC_OTG_CONTROL_STATUS ++} dwc_otg_control_phase_e; ++ ++/** Transaction types. */ ++typedef enum dwc_otg_transaction_type { ++ DWC_OTG_TRANSACTION_NONE, ++ DWC_OTG_TRANSACTION_PERIODIC, ++ DWC_OTG_TRANSACTION_NON_PERIODIC, ++ DWC_OTG_TRANSACTION_ALL ++} dwc_otg_transaction_type_e; ++ ++/** ++ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, ++ * interrupt, or isochronous transfer. A single QTD is created for each URB ++ * (of one of these types) submitted to the HCD. The transfer associated with ++ * a QTD may require one or multiple transactions. ++ * ++ * A QTD is linked to a Queue Head, which is entered in either the ++ * non-periodic or periodic schedule for execution. When a QTD is chosen for ++ * execution, some or all of its transactions may be executed. After ++ * execution, the state of the QTD is updated. The QTD may be retired if all ++ * its transactions are complete or if an error occurred. Otherwise, it ++ * remains in the schedule so more transactions can be executed later. ++ */ ++struct dwc_otg_qh; ++typedef struct dwc_otg_qtd { ++ /** ++ * Determines the PID of the next data packet for the data phase of ++ * control transfers. Ignored for other transfer types.<br> ++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Current phase for control transfers (Setup, Data, or Status). */ ++ dwc_otg_control_phase_e control_phase; ++ ++ /** Keep track of the current split type ++ * for FS/LS endpoints on a HS Hub */ ++ uint8_t complete_split; ++ ++ /** How many bytes transferred during SSPLIT OUT */ ++ uint32_t ssplit_out_xfer_count; ++ ++ /** ++ * Holds the number of bus errors that have occurred for a transaction ++ * within this transfer. ++ */ ++ uint8_t error_count; ++ ++ /** ++ * Index of the next frame descriptor for an isochronous transfer. A ++ * frame descriptor describes the buffer position and length of the ++ * data to be transferred in the next scheduled (micro)frame of an ++ * isochronous transfer. It also holds status for that transaction. ++ * The frame index starts at 0. ++ */ ++ int isoc_frame_index; ++ ++ /** Position of the ISOC split on full/low speed */ ++ uint8_t isoc_split_pos; ++ ++ /** Position of the ISOC split in the buffer for the current frame */ ++ uint16_t isoc_split_offset; ++ ++ /** URB for this transfer */ ++ struct urb *urb; ++ ++ /** This list of QTDs */ ++ struct list_head qtd_list_entry; ++ ++ /* Field to track the qh pointer */ ++ struct dwc_otg_qh *qtd_qh_ptr; ++} dwc_otg_qtd_t; ++ ++/** ++ * A Queue Head (QH) holds the static characteristics of an endpoint and ++ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may ++ * be entered in either the non-periodic or periodic schedule. ++ */ ++typedef struct dwc_otg_qh { ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - USB_ENDPOINT_XFER_CONTROL ++ * - USB_ENDPOINT_XFER_ISOC ++ * - USB_ENDPOINT_XFER_BULK ++ * - USB_ENDPOINT_XFER_INT ++ */ ++ uint8_t ep_type; ++ uint8_t ep_is_in; ++ ++ /** wMaxPacketSize Field of Endpoint Descriptor. */ ++ uint16_t maxp; ++ ++ /** ++ * Determines the PID of the next data packet for non-control ++ * transfers. Ignored for control transfers.<br> ++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Ping state if 1. */ ++ uint8_t ping_state; ++ ++ /** ++ * List of QTDs for this QH. ++ */ ++ struct list_head qtd_list; ++ ++ /** Host channel currently processing transfers for this QH. */ ++ dwc_hc_t *channel; ++ ++ /** QTD currently assigned to a host channel for this QH. */ ++ dwc_otg_qtd_t *qtd_in_process; ++ ++ /** Full/low speed endpoint on high-speed hub requires split. */ ++ uint8_t do_split; ++ ++ /** @name Periodic schedule information */ ++ /** @{ */ ++ ++ /** Bandwidth in microseconds per (micro)frame. */ ++ uint8_t usecs; ++ ++ /** Interval between transfers in (micro)frames. */ ++ uint16_t interval; ++ ++ /** ++ * (micro)frame to initialize a periodic transfer. The transfer ++ * executes in the following (micro)frame. ++ */ ++ uint16_t sched_frame; ++ ++ /** (micro)frame at which last start split was initialized. */ ++ uint16_t start_split_frame; ++ ++ /** @} */ ++ ++ uint16_t speed; ++ uint16_t frame_usecs[8]; ++ /** Entry for QH in either the periodic or non-periodic schedule. */ ++ struct list_head qh_list_entry; ++} dwc_otg_qh_t; ++ ++/** ++ * This structure holds the state of the HCD, including the non-periodic and ++ * periodic schedules. ++ */ ++typedef struct dwc_otg_hcd { ++ spinlock_t lock; ++ ++ /** DWC OTG Core Interface Layer */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Internal DWC HCD Flags */ ++ volatile union dwc_otg_hcd_internal_flags { ++ uint32_t d32; ++ struct { ++ unsigned port_connect_status_change : 1; ++ unsigned port_connect_status : 1; ++ unsigned port_reset_change : 1; ++ unsigned port_enable_change : 1; ++ unsigned port_suspend_change : 1; ++ unsigned port_over_current_change : 1; ++ unsigned reserved : 27; ++ } b; ++ } flags; ++ ++ /** ++ * Inactive items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are not ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_inactive; ++ ++ /** ++ * Deferred items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are not ++ * currently assigned to a host channel. ++ * When we get an NAK, the QH goes here. ++ */ ++ struct list_head non_periodic_sched_deferred; ++ ++ /** ++ * Active items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_active; ++ ++ /** ++ * Pointer to the next Queue Head to process in the active ++ * non-periodic schedule. ++ */ ++ struct list_head *non_periodic_qh_ptr; ++ ++ /** ++ * Inactive items in the periodic schedule. This is a list of QHs for ++ * periodic transfers that are _not_ scheduled for the next frame. ++ * Each QH in the list has an interval counter that determines when it ++ * needs to be scheduled for execution. This scheduling mechanism ++ * allows only a simple calculation for periodic bandwidth used (i.e. ++ * must assume that all periodic transfers may need to execute in the ++ * same frame). However, it greatly simplifies scheduling and should ++ * be sufficient for the vast majority of OTG hosts, which need to ++ * connect to a small number of peripherals at one time. ++ * ++ * Items move from this list to periodic_sched_ready when the QH ++ * interval counter is 0 at SOF. ++ */ ++ struct list_head periodic_sched_inactive; ++ ++ /** ++ * List of periodic QHs that are ready for execution in the next ++ * frame, but have not yet been assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_assigned as host ++ * channels become available during the current frame. ++ */ ++ struct list_head periodic_sched_ready; ++ ++ /** ++ * List of periodic QHs to be executed in the next frame that are ++ * assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_queued as the ++ * transactions for the QH are queued to the DWC_otg controller. ++ */ ++ struct list_head periodic_sched_assigned; ++ ++ /** ++ * List of periodic QHs that have been queued for execution. ++ * ++ * Items move from this list to either periodic_sched_inactive or ++ * periodic_sched_ready when the channel associated with the transfer ++ * is released. If the interval for the QH is 1, the item moves to ++ * periodic_sched_ready because it must be rescheduled for the next ++ * frame. Otherwise, the item moves to periodic_sched_inactive. ++ */ ++ struct list_head periodic_sched_queued; ++ ++ /** ++ * Total bandwidth claimed so far for periodic transfers. This value ++ * is in microseconds per (micro)frame. The assumption is that all ++ * periodic transfers may occur in the same (micro)frame. ++ */ ++ uint16_t periodic_usecs; ++ ++ /** ++ * Total bandwidth claimed so far for all periodic transfers ++ * in a frame. ++ * This will include a mixture of HS and FS transfers. ++ * Units are microseconds per (micro)frame. ++ * We have a budget per frame and have to schedule ++ * transactions accordingly. ++ * Watch out for the fact that things are actually scheduled for the ++ * "next frame". ++ */ ++ uint16_t frame_usecs[8]; ++ ++ /** ++ * Frame number read from the core at SOF. The value ranges from 0 to ++ * DWC_HFNUM_MAX_FRNUM. ++ */ ++ uint16_t frame_number; ++ ++ /** ++ * Free host channels in the controller. This is a list of ++ * dwc_hc_t items. ++ */ ++ struct list_head free_hc_list; ++ ++ /** ++ * Number of available host channels. ++ */ ++ int available_host_channels; ++ ++ /** ++ * Array of pointers to the host channel descriptors. Allows accessing ++ * a host channel descriptor given the host channel number. This is ++ * useful in interrupt handlers. ++ */ ++ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; ++ ++ /** ++ * Buffer to use for any data received during the status phase of a ++ * control transfer. Normally no data is transferred during the status ++ * phase. This buffer is used as a bit bucket. ++ */ ++ uint8_t *status_buf; ++ ++ /** ++ * DMA address for status_buf. ++ */ ++ dma_addr_t status_buf_dma; ++#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 ++ ++ /** ++ * Structure to allow starting the HCD in a non-interrupt context ++ * during an OTG role change. ++ */ ++ struct work_struct start_work; ++ struct usb_hcd *_p; ++ ++ /** ++ * Connection timer. An OTG host must display a message if the device ++ * does not connect. Started when the VBus power is turned on via ++ * sysfs attribute "buspower". ++ */ ++ struct timer_list conn_timer; ++ ++ /* Tasket to do a reset */ ++ struct tasklet_struct *reset_tasklet; ++ ++#ifdef DEBUG ++ uint32_t frrem_samples; ++ uint64_t frrem_accum; ++ ++ uint32_t hfnum_7_samples_a; ++ uint64_t hfnum_7_frrem_accum_a; ++ uint32_t hfnum_0_samples_a; ++ uint64_t hfnum_0_frrem_accum_a; ++ uint32_t hfnum_other_samples_a; ++ uint64_t hfnum_other_frrem_accum_a; ++ ++ uint32_t hfnum_7_samples_b; ++ uint64_t hfnum_7_frrem_accum_b; ++ uint32_t hfnum_0_samples_b; ++ uint64_t hfnum_0_frrem_accum_b; ++ uint32_t hfnum_other_samples_b; ++ uint64_t hfnum_other_frrem_accum_b; ++#endif ++ ++} dwc_otg_hcd_t; ++ ++/** Gets the dwc_otg_hcd from a struct usb_hcd */ ++static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) ++{ ++ return (dwc_otg_hcd_t *)(hcd->hcd_priv); ++} ++ ++/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ ++static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); ++} ++ ++/** @name HCD Create/Destroy Functions */ ++/** @{ */ ++extern int __devinit dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device); ++extern void dwc_otg_hcd_remove(struct device *_dev); ++/** @} */ ++ ++/** @name Linux HC Driver API Functions */ ++/** @{ */ ++ ++extern int dwc_otg_hcd_start(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_free(struct usb_hcd *hcd); ++ ++extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++ struct urb *urb, ++ gfp_t mem_flags); ++extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++ struct urb *urb, ++ int status); ++extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd); ++ ++extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep); ++ ++extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, ++ char *buf); ++extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength); ++ ++/** @} */ ++ ++/** @name Transaction Execution Functions */ ++/** @{ */ ++extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd); ++extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd, ++ dwc_otg_transaction_type_e _tr_type); ++extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *_urb, ++ int _status); ++/** @} */ ++ ++/** @name Interrupt Handler Functions */ ++/** @{ */ ++extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num); ++extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_otg_hcd_t *_dwc_otg_hcd); ++/** @} */ ++ ++ ++/** @name Schedule Queue Functions */ ++/** @{ */ ++ ++/* Implemented in dwc_otg_hcd_queue.c */ ++extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb); ++extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb); ++extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh); ++extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh); ++extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh); ++extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_csplit); ++extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int delay); ++ ++/** Remove and free a QH */ ++static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hcd_t *_hcd, ++ dwc_otg_qh_t *_qh) ++{ ++ dwc_otg_hcd_qh_remove (_hcd, _qh); ++ dwc_otg_hcd_qh_free (_qh); ++} ++ ++/** Allocates memory for a QH structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void) ++{ ++#ifdef _SC_BUILD_ ++ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_ATOMIC); ++#else ++ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_KERNEL); ++#endif ++} ++ ++extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb); ++extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb); ++extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); ++ ++/** Allocates memory for a QTD structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void) ++{ ++#ifdef _SC_BUILD_ ++ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_ATOMIC); ++#else ++ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_KERNEL); ++#endif ++} ++ ++/** Frees the memory for a QTD structure. QTD should already be removed from ++ * list. ++ * @param[in] _qtd QTD to free.*/ ++static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd) ++{ ++ kfree (_qtd); ++} ++ ++/** Removes a QTD from list. ++ * @param[in] _qtd QTD to remove from list. */ ++static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qtd) ++{ ++ unsigned long flags; ++ local_irq_save (flags); ++ list_del (&_qtd->qtd_list_entry); ++ local_irq_restore (flags); ++} ++ ++/** Remove and free a QTD */ ++static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_qtd_t *_qtd) ++{ ++ dwc_otg_hcd_qtd_remove (_qtd); ++ dwc_otg_hcd_qtd_free (_qtd); ++} ++ ++/** @} */ ++ ++ ++/** @name Internal Functions */ ++/** @{ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb); ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd); ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd); ++/** @} */ ++ ++ ++/** Gets the usb_host_endpoint associated with an URB. */ ++static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *_urb) ++{ ++ struct usb_device *dev = _urb->dev; ++ int ep_num = usb_pipeendpoint(_urb->pipe); ++ if (usb_pipein(_urb->pipe)) ++ return dev->ep_in[ep_num]; ++ else ++ return dev->ep_out[ep_num]; ++} ++ ++/** ++ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is ++ * qualified with its direction (possible 32 endpoints per device). ++ */ ++#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \ ++ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ ++ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) ++ ++/** Gets the QH that contains the list_head */ ++#define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qh_t,qh_list_entry)) ++ ++/** Gets the QTD that contains the list_head */ ++#define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qtd_t,qtd_list_entry)) ++ ++/** Check if QH is non-periodic */ ++#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ ++ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) ++ ++/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ ++#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) ++ ++/** Packet size for any kind of endpoint descriptor */ ++#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) ++ ++/** ++ * Returns true if _frame1 is less than or equal to _frame2. The comparison is ++ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the ++ * frame number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t _frame2) ++{ ++ return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <= ++ (DWC_HFNUM_MAX_FRNUM >> 1); ++} ++ ++/** ++ * Returns true if _frame1 is greater than _frame2. The comparison is done ++ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame ++ * number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t _frame2) ++{ ++ return (_frame1 != _frame2) && ++ (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM) < ++ (DWC_HFNUM_MAX_FRNUM >> 1)); ++} ++ ++/** ++ * Increments _frame by the amount specified by _inc. The addition is done ++ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. ++ */ ++static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uint16_t _inc) ++{ ++ return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM; ++} ++ ++static inline uint16_t dwc_full_frame_num (uint16_t _frame) ++{ ++ return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3; ++} ++ ++static inline uint16_t dwc_micro_frame_num (uint16_t _frame) ++{ ++ return (_frame) & 0x7; ++} ++ ++#ifdef DEBUG ++/** ++ * Macro to sample the remaining PHY clocks left in the current frame. This ++ * may be used during debugging to determine the average time it takes to ++ * execute sections of code. There are two possible sample points, "a" and ++ * "b", so the _letter argument must be one of these values. ++ * ++ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For ++ * example, "cat /sys/devices/lm0/hcd_frrem". ++ */ ++#define dwc_sample_frrem(_hcd, _qh, _letter) \ ++{ \ ++ hfnum_data_t hfnum; \ ++ dwc_otg_qtd_t *qtd; \ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ ++ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ ++ switch (hfnum.b.frnum & 0x7) { \ ++ case 7: \ ++ _hcd->hfnum_7_samples_##_letter++; \ ++ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ case 0: \ ++ _hcd->hfnum_0_samples_##_letter++; \ ++ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ default: \ ++ _hcd->hfnum_other_samples_##_letter++; \ ++ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ } \ ++ } \ ++} ++#else // DEBUG ++#define dwc_sample_frrem(_hcd, _qh, _letter) ++#endif // DEBUG ++#endif // __DWC_HCD_H__ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c +@@ -0,0 +1,1841 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_intr.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 553126 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++const int erratum_usb09_patched = 0; ++const int deferral_on = 1; ++const int nak_deferral_delay = 8; ++const int nyet_deferral_delay = 1; ++/** @file ++ * This file contains the implementation of the HCD Interrupt handlers. ++ */ ++ ++/** This function handles interrupts for the HCD. */ ++int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ int retval = 0; ++ ++ dwc_otg_core_if_t *core_if = _dwc_otg_hcd->core_if; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++#endif ++ ++ /* Check if HOST Mode */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ gintsts.d32 = dwc_otg_read_core_intr(core_if); ++ if (!gintsts.d32) { ++ return 0; ++ } ++ ++#ifdef DEBUG ++ /* Don't print debug message in the interrupt handler on SOF */ ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL (DBG_HCD, "\n"); ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); ++#endif ++ ++ if (gintsts.b.sofintr) { ++ retval |= dwc_otg_hcd_handle_sof_intr (_dwc_otg_hcd); ++ } ++ if (gintsts.b.rxstsqlvl) { ++ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr (_dwc_otg_hcd); ++ } ++ if (gintsts.b.nptxfempty) { ++ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr (_dwc_otg_hcd); ++ } ++ if (gintsts.b.i2cintr) { ++ /** @todo Implement i2cintr handler. */ ++ } ++ if (gintsts.b.portintr) { ++ retval |= dwc_otg_hcd_handle_port_intr (_dwc_otg_hcd); ++ } ++ if (gintsts.b.hcintr) { ++ retval |= dwc_otg_hcd_handle_hc_intr (_dwc_otg_hcd); ++ } ++ if (gintsts.b.ptxfempty) { ++ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (_dwc_otg_hcd); ++ } ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintmsk)); ++ } ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL (DBG_HCD, "\n"); ++#endif ++ ++ } ++ ++ return retval; ++} ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++#warning Compiling code to track missed SOFs ++#define FRAME_NUM_ARRAY_SIZE 1000 ++/** ++ * This function is for debug only. ++ */ ++static inline void track_missed_sofs(uint16_t _curr_frame_number) { ++ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static int frame_num_idx = 0; ++ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; ++ static int dumped_frame_num_array = 0; ++ ++ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { ++ if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != _curr_frame_number)) { ++ frame_num_array[frame_num_idx] = _curr_frame_number; ++ last_frame_num_array[frame_num_idx++] = last_frame_num; ++ } ++ } else if (!dumped_frame_num_array) { ++ int i; ++ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); ++ printk(KERN_EMERG USB_DWC "----- ----------\n"); ++ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { ++ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", ++ frame_num_array[i], last_frame_num_array[i]); ++ } ++ dumped_frame_num_array = 1; ++ } ++ last_frame_num = _curr_frame_number; ++} ++#endif ++ ++/** ++ * Handles the start-of-frame interrupt in host mode. Non-periodic ++ * transactions may be queued to the DWC_otg controller for the current ++ * (micro)frame. Periodic transactions may be queued to the controller for the ++ * next (micro)frame. ++ */ ++int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_hcd) ++{ ++ hfnum_data_t hfnum; ++ struct list_head *qh_entry; ++ dwc_otg_qh_t *qh; ++ dwc_otg_transaction_type_e tr_type; ++ gintsts_data_t gintsts = {.d32 = 0}; ++ ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); ++#endif ++ ++ _hcd->frame_number = hfnum.b.frnum; ++ ++#ifdef DEBUG ++ _hcd->frrem_accum += hfnum.b.frrem; ++ _hcd->frrem_samples++; ++#endif ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++ track_missed_sofs(_hcd->frame_number); ++#endif ++ ++ /* Determine whether any periodic QHs should be executed. */ ++ qh_entry = _hcd->periodic_sched_inactive.next; ++ while (qh_entry != &_hcd->periodic_sched_inactive) { ++ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); ++ qh_entry = qh_entry->next; ++ if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) { ++ /* ++ * Move QH to the ready list to be executed next ++ * (micro)frame. ++ */ ++ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_ready); ++ } ++ } ++ ++ tr_type = dwc_otg_hcd_select_transactions(_hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(_hcd, tr_type); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32(&_hcd->core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at ++ * least one packet in the Rx FIFO. The packets are moved from the FIFO to ++ * memory if the DWC_otg controller is operating in Slave mode. */ ++int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ host_grxsts_data_t grxsts; ++ dwc_hc_t *hc = NULL; ++ ++ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); ++ ++ grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->core_global_regs->grxstsp); ++ ++ hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; ++ ++ /* Packet Status */ ++ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); ++ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); ++ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); ++ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer. */ ++ if (grxsts.b.bcnt > 0) { ++ dwc_otg_read_packet(_dwc_otg_hcd->core_if, ++ hc->xfer_buff, ++ grxsts.b.bcnt); ++ ++ /* Update the HC fields for the next packet received. */ ++ hc->xfer_count += grxsts.b.bcnt; ++ hc->xfer_buff += grxsts.b.bcnt; ++ } ++ ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: ++ case DWC_GRXSTS_PKTSTS_CH_HALTED: ++ /* Handled in interrupt, just ignore data */ ++ break; ++ default: ++ DWC_ERROR ("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); ++ break; ++ } ++ ++ return 1; ++} ++ ++/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More ++ * data packets may be written to the FIFO for OUT transfers. More requests ++ * may be written to the non-periodic request queue for IN transfers. This ++ * interrupt is enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_NON_PERIODIC); ++ return 1; ++} ++ ++/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data ++ * packets may be written to the FIFO for OUT transfers. More requests may be ++ * written to the periodic request queue for IN transfers. This interrupt is ++ * enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_PERIODIC); ++ return 1; ++} ++ ++/** There are multiple conditions that can cause a port interrupt. This function ++ * determines which interrupt conditions have occurred and handles them ++ * appropriately. */ ++int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ int retval = 0; ++ hprt0_data_t hprt0; ++ hprt0_data_t hprt0_modify; ++ ++ hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0); ++ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0); ++ ++ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in ++ * GINTSTS */ ++ ++ hprt0_modify.b.prtena = 0; ++ hprt0_modify.b.prtconndet = 0; ++ hprt0_modify.b.prtenchng = 0; ++ hprt0_modify.b.prtovrcurrchng = 0; ++ ++ /* Port Connect Detected ++ * Set flag and clear if detected */ ++ if (hprt0.b.prtconndet) { ++ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " ++ "Port Connect Detected--\n", hprt0.d32); ++ _dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ _dwc_otg_hcd->flags.b.port_connect_status = 1; ++ hprt0_modify.b.prtconndet = 1; ++ ++ /* B-Device has connected, Delete the connection timer. */ ++ del_timer( &_dwc_otg_hcd->conn_timer ); ++ ++ /* The Hub driver asserts a reset when it sees port connect ++ * status change flag */ ++ retval |= 1; ++ } ++ ++ /* Port Enable Changed ++ * Clear if detected - Set internal flag if disabled */ ++ if (hprt0.b.prtenchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Enable Changed--\n", hprt0.d32); ++ hprt0_modify.b.prtenchng = 1; ++ if (hprt0.b.prtena == 1) { ++ int do_reset = 0; ++ dwc_otg_core_params_t *params = _dwc_otg_hcd->core_if->core_params; ++ dwc_otg_core_global_regs_t *global_regs = _dwc_otg_hcd->core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = _dwc_otg_hcd->core_if->host_if; ++ ++ /* Check if we need to adjust the PHY clock speed for ++ * low power and adjust it */ ++ if (params->host_support_fs_ls_low_power) ++ { ++ gusbcfg_data_t usbcfg; ++ ++ usbcfg.d32 = dwc_read_reg32 (&global_regs->gusbcfg); ++ ++ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) || ++ (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED)) ++ { ++ /* ++ * Low power ++ */ ++ hcfg_data_t hcfg; ++ if (usbcfg.b.phylpwrclksel == 0) { ++ /* Set PHY low power clock select for FS/LS devices */ ++ usbcfg.b.phylpwrclksel = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ ++ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) && ++ (params->host_ls_low_power_phy_clk == ++ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) ++ { ++ /* 6 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } ++ else { ++ /* 48 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } ++ } ++ else { ++ /* ++ * Not low power ++ */ ++ if (usbcfg.b.phylpwrclksel == 1) { ++ usbcfg.b.phylpwrclksel = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ } ++ ++ if (do_reset) { ++ tasklet_schedule(_dwc_otg_hcd->reset_tasklet); ++ } ++ } ++ ++ if (!do_reset) { ++ /* Port has been enabled set the reset change flag */ ++ _dwc_otg_hcd->flags.b.port_reset_change = 1; ++ } ++ ++ } else { ++ _dwc_otg_hcd->flags.b.port_enable_change = 1; ++ } ++ retval |= 1; ++ } ++ ++ /** Overcurrent Change Interrupt */ ++ if (hprt0.b.prtovrcurrchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Overcurrent Changed--\n", hprt0.d32); ++ _dwc_otg_hcd->flags.b.port_over_current_change = 1; ++ hprt0_modify.b.prtovrcurrchng = 1; ++ retval |= 1; ++ } ++ ++ /* Clear Port Interrupts */ ++ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); ++ ++ return retval; ++} ++ ++ ++/** This interrupt indicates that one or more host channels has a pending ++ * interrupt. There are multiple conditions that can cause each host channel ++ * interrupt. This function determines which conditions have occurred for each ++ * host channel interrupt and handles them appropriately. */ ++int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd) ++{ ++ int i; ++ int retval = 0; ++ haint_data_t haint; ++ ++ /* Clear appropriate bits in HCINTn to clear the interrupt bit in ++ * GINTSTS */ ++ ++ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_hcd->core_if); ++ ++ for (i=0; i<_dwc_otg_hcd->core_if->core_params->host_channels; i++) { ++ if (haint.b2.chint & (1 << i)) { ++ retval |= dwc_otg_hcd_handle_hc_n_intr (_dwc_otg_hcd, i); ++ } ++ } ++ ++ return retval; ++} ++ ++/* Macro used to clear one channel interrupt */ ++#define clear_hc_int(_hc_regs_,_intr_) \ ++do { \ ++ hcint_data_t hcint_clear = {.d32 = 0}; \ ++ hcint_clear.b._intr_ = 1; \ ++ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_clear.d32); \ ++} while (0) ++ ++/* ++ * Macro used to disable one channel interrupt. Channel interrupts are ++ * disabled when the channel is halted or released by the interrupt handler. ++ * There is no need to handle further interrupts of that type until the ++ * channel is re-assigned. In fact, subsequent handling may cause crashes ++ * because the channel structures are cleaned up when the channel is released. ++ */ ++#define disable_hc_int(_hc_regs_,_intr_) \ ++do { \ ++ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ ++ hcintmsk.b._intr_ = 1; \ ++ dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \ ++} while (0) ++ ++/** ++ * Gets the actual length of a transfer after the transfer halts. _halt_status ++ * holds the reason for the halt. ++ * ++ * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COMPLETE, ++ * *_short_read is set to 1 upon return if less than the requested ++ * number of bytes were transferred. Otherwise, *_short_read is set to 0 upon ++ * return. _short_read may also be NULL on entry, in which case it remains ++ * unchanged. ++ */ ++static uint32_t get_actual_xfer_length(dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status, ++ int *_short_read) ++{ ++ hctsiz_data_t hctsiz; ++ uint32_t length; ++ ++ if (_short_read != NULL) { ++ *_short_read = 0; ++ } ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ ++ if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) { ++ if (_hc->ep_is_in) { ++ length = _hc->xfer_len - hctsiz.b.xfersize; ++ if (_short_read != NULL) { ++ *_short_read = (hctsiz.b.xfersize != 0); ++ } ++ } else if (_hc->qh->do_split) { ++ length = _qtd->ssplit_out_xfer_count; ++ } else { ++ length = _hc->xfer_len; ++ } ++ } else { ++ /* ++ * Must use the hctsiz.pktcnt field to determine how much data ++ * has been transferred. This field reflects the number of ++ * packets that have been transferred via the USB. This is ++ * always an integral number of packets if the transfer was ++ * halted before its normal completion. (Can't use the ++ * hctsiz.xfersize field because that reflects the number of ++ * bytes transferred via the AHB, not the USB). ++ */ ++ length = (_hc->start_pkt_count - hctsiz.b.pktcnt) * _hc->max_packet; ++ } ++ ++ return length; ++} ++ ++/** ++ * Updates the state of the URB after a Transfer Complete interrupt on the ++ * host channel. Updates the actual_length field of the URB based on the ++ * number of bytes transferred via the host channel. Sets the URB status ++ * if the data transfer is finished. ++ * ++ * @return 1 if the data transfer specified by the URB is completely finished, ++ * 0 otherwise. ++ */ ++static int update_urb_state_xfer_comp(dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb, ++ dwc_otg_qtd_t * _qtd, int *status) ++{ ++ int xfer_done = 0; ++ int short_read = 0; ++ ++ _urb->actual_length += get_actual_xfer_length(_hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_COMPLETE, ++ &short_read); ++ ++ if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) { ++ xfer_done = 1; ++ if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) { ++ *status = -EREMOTEIO; ++ } else { ++ *status = 0; ++ } ++ } ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _hc->xfer_len); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ _urb->transfer_buffer_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", _urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", ++ short_read, xfer_done); ++ } ++#endif ++ ++ return xfer_done; ++} ++ ++/* ++ * Save the starting data toggle for the next transfer. The data toggle is ++ * saved in the QH for non-control transfers and it's saved in the QTD for ++ * control transfers. ++ */ ++static void save_data_toggle(dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd) ++{ ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ ++ if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { ++ dwc_otg_qh_t *qh = _hc->qh; ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } else { ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ _qtd->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } ++} ++ ++/** ++ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic ++ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are ++ * still linked to the QH, the QH is added to the end of the inactive ++ * non-periodic schedule. For periodic QHs, removes the QH from the periodic ++ * schedule if no more QTDs are linked to the QH. ++ */ ++static void deactivate_qh(dwc_otg_hcd_t *_hcd, ++ dwc_otg_qh_t *_qh, ++ int free_qtd) ++{ ++ int continue_split = 0; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _hcd, _qh, free_qtd); ++ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ if (qtd->complete_split) { ++ continue_split = 1; ++ } ++ else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) || ++ (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END)) ++ { ++ continue_split = 1; ++ } ++ ++ if (free_qtd) { ++ /* ++ * Note that this was previously a call to ++ * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd. ++ * However, that call frees the qtd memory, and we continue in the ++ * interrupt logic to access it many more times, including writing ++ * to it. With slub debugging on, it is clear that we were writing ++ * to memory we had freed. ++ * Call this instead, and now I have moved the freeing of the memory to ++ * the end of processing this interrupt. ++ */ ++ //dwc_otg_hcd_qtd_remove_and_free(qtd); ++ dwc_otg_hcd_qtd_remove(qtd); ++ ++ continue_split = 0; ++ } ++ ++ _qh->channel = NULL; ++ _qh->qtd_in_process = NULL; ++ dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split); ++} ++ ++/** ++ * Updates the state of an Isochronous URB when the transfer is stopped for ++ * any reason. The fields of the current entry in the frame descriptor array ++ * are set based on the transfer state and the input _halt_status. Completes ++ * the Isochronous URB if all the URB frames have been completed. ++ * ++ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be ++ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. ++ */ ++static dwc_otg_halt_status_e ++update_isoc_urb_state(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status) ++{ ++ struct urb *urb = _qtd->urb; ++ dwc_otg_halt_status_e ret_val = _halt_status; ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_index]; ++ switch (_halt_status) { ++ case DWC_OTG_HC_XFER_COMPLETE: ++ frame_desc->status = 0; ++ frame_desc->actual_length = ++ get_actual_xfer_length(_hc, _hc_regs, _qtd, ++ _halt_status, NULL); ++ break; ++ case DWC_OTG_HC_XFER_FRAME_OVERRUN: ++ urb->error_count++; ++ if (_hc->ep_is_in) { ++ frame_desc->status = -ENOSR; ++ } else { ++ frame_desc->status = -ECOMM; ++ } ++ frame_desc->actual_length = 0; ++ break; ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ urb->error_count++; ++ frame_desc->status = -EOVERFLOW; ++ /* Don't need to update actual_length in this case. */ ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ urb->error_count++; ++ frame_desc->status = -EPROTO; ++ frame_desc->actual_length = ++ get_actual_xfer_length(_hc, _hc_regs, _qtd, ++ _halt_status, NULL); ++ default: ++ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, ++ _halt_status); ++ BUG(); ++ break; ++ } ++ ++ if (++_qtd->isoc_frame_index == urb->number_of_packets) { ++ /* ++ * urb->status is not used for isoc transfers. ++ * The individual frame_desc statuses are used instead. ++ */ ++ dwc_otg_hcd_complete_urb(_hcd, urb, 0); ++ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ ret_val = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Releases a host channel for use by other transfers. Attempts to select and ++ * queue more transactions since at least one host channel is available. ++ * ++ * @param _hcd The HCD state structure. ++ * @param _hc The host channel to release. ++ * @param _qtd The QTD associated with the host channel. This QTD may be freed ++ * if the transfer is complete or an error has occurred. ++ * @param _halt_status Reason the channel is being released. This status ++ * determines the actions taken by this function. ++ */ ++static void release_channel(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status, ++ int *must_free) ++{ ++ dwc_otg_transaction_type_e tr_type; ++ int free_qtd; ++ dwc_otg_qh_t * _qh; ++ int deact = 1; ++ int retry_delay = 1; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__, ++ _hc->hc_num, _halt_status); ++ ++ switch (_halt_status) { ++ case DWC_OTG_HC_XFER_NYET: ++ case DWC_OTG_HC_XFER_NAK: ++ if (_halt_status == DWC_OTG_HC_XFER_NYET) { ++ retry_delay = nyet_deferral_delay; ++ } else { ++ retry_delay = nak_deferral_delay; ++ } ++ free_qtd = 0; ++ if (deferral_on && _hc->do_split) { ++ _qh = _hc->qh; ++ if (_qh) { ++ deact = dwc_otg_hcd_qh_deferr(_hcd, _qh , retry_delay); ++ } ++ } ++ break; ++ case DWC_OTG_HC_XFER_URB_COMPLETE: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_AHB_ERR: ++ case DWC_OTG_HC_XFER_STALL: ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ if (_qtd->error_count >= 3) { ++ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); ++ free_qtd = 1; ++ //_qtd->urb->status = -EPROTO; ++ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPROTO); ++ } else { ++ free_qtd = 0; ++ } ++ break; ++ case DWC_OTG_HC_XFER_URB_DEQUEUE: ++ /* ++ * The QTD has already been removed and the QH has been ++ * deactivated. Don't want to do anything except release the ++ * host channel and try to queue more transfers. ++ */ ++ goto cleanup; ++ case DWC_OTG_HC_XFER_NO_HALT_STATUS: ++ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, _hc->hc_num); ++ free_qtd = 0; ++ break; ++ default: ++ free_qtd = 0; ++ break; ++ } ++ if (free_qtd) { ++ /* Only change must_free to true (do not set to zero here -- it is ++ * pre-initialized to zero). ++ */ ++ *must_free = 1; ++ } ++ if (deact) { ++ deactivate_qh(_hcd, _hc->qh, free_qtd); ++ } ++ cleanup: ++ /* ++ * Release the host channel for use by other transfers. The cleanup ++ * function clears the channel interrupt enables and conditions, so ++ * there's no need to clear the Channel Halted interrupt separately. ++ */ ++ dwc_otg_hc_cleanup(_hcd->core_if, _hc); ++ list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list); ++ ++ local_irq_save(flags); ++ _hcd->available_host_channels++; ++ local_irq_restore(flags); ++ /* Try to queue more transfers now that there's a free channel, */ ++ /* unless erratum_usb09_patched is set */ ++ if (!erratum_usb09_patched) { ++ tr_type = dwc_otg_hcd_select_transactions(_hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(_hcd, tr_type); ++ } ++ } ++} ++ ++/** ++ * Halts a host channel. If the channel cannot be halted immediately because ++ * the request queue is full, this function ensures that the FIFO empty ++ * interrupt for the appropriate queue is enabled so that the halt request can ++ * be queued when there is space in the request queue. ++ * ++ * This function may also be called in DMA mode. In that case, the channel is ++ * simply released since the core always halts the channel automatically in ++ * DMA mode. ++ */ ++static void halt_channel(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status, int *must_free) ++{ ++ if (_hcd->core_if->dma_enable) { ++ release_channel(_hcd, _hc, _qtd, _halt_status, must_free); ++ return; ++ } ++ ++ /* Slave mode processing... */ ++ dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status); ++ ++ if (_hc->halt_on_queue) { ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs; ++ global_regs = _hcd->core_if->core_global_regs; ++ ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ /* ++ * Make sure the Non-periodic Tx FIFO empty interrupt ++ * is enabled so that the non-periodic schedule will ++ * be processed. ++ */ ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } else { ++ /* ++ * Move the QH from the periodic queued schedule to ++ * the periodic assigned schedule. This allows the ++ * halt to be queued when the periodic schedule is ++ * processed. ++ */ ++ list_move(&_hc->qh->qh_list_entry, ++ &_hcd->periodic_sched_assigned); ++ ++ /* ++ * Make sure the Periodic Tx FIFO Empty interrupt is ++ * enabled so that the periodic schedule will be ++ * processed. ++ */ ++ gintmsk.b.ptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } ++ } ++} ++ ++/** ++ * Performs common cleanup for non-periodic transfers after a Transfer ++ * Complete interrupt. This function should be called after any endpoint type ++ * specific handling is finished to release the host channel. ++ */ ++static void complete_non_periodic_xfer(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status, int *must_free) ++{ ++ hcint_data_t hcint; ++ ++ _qtd->error_count = 0; ++ ++ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); ++ if (hcint.b.nyet) { ++ /* ++ * Got a NYET on the last transaction of the transfer. This ++ * means that the endpoint should be in the PING state at the ++ * beginning of the next transfer. ++ */ ++ _hc->qh->ping_state = 1; ++ clear_hc_int(_hc_regs,nyet); ++ } ++ ++ /* ++ * Always halt and release the host channel to make it available for ++ * more transfers. There may still be more phases for a control ++ * transfer or more data packets for a bulk transfer at this point, ++ * but the host channel is still halted. A channel will be reassigned ++ * to the transfer when the non-periodic schedule is processed after ++ * the channel is released. This allows transactions to be queued ++ * properly via dwc_otg_hcd_queue_transactions, which also enables the ++ * Tx FIFO Empty interrupt if necessary. ++ */ ++ if (_hc->ep_is_in) { ++ /* ++ * IN transfers in Slave mode require an explicit disable to ++ * halt the channel. (In DMA mode, this call simply releases ++ * the channel.) ++ */ ++ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free); ++ } else { ++ /* ++ * The channel is automatically disabled by the core for OUT ++ * transfers in Slave mode. ++ */ ++ release_channel(_hcd, _hc, _qtd, _halt_status, must_free); ++ } ++} ++ ++/** ++ * Performs common cleanup for periodic transfers after a Transfer Complete ++ * interrupt. This function should be called after any endpoint type specific ++ * handling is finished to release the host channel. ++ */ ++static void complete_periodic_xfer(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status, int *must_free) ++{ ++ hctsiz_data_t hctsiz; ++ _qtd->error_count = 0; ++ ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) { ++ /* Core halts channel in these cases. */ ++ release_channel(_hcd, _hc, _qtd, _halt_status, must_free); ++ } else { ++ /* Flush any outstanding requests from the Tx queue. */ ++ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free); ++ } ++} ++ ++/** ++ * Handles a host channel Transfer Complete interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, int *must_free) ++{ ++ int urb_xfer_done; ++ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ struct urb *urb = _qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ int status = -EINPROGRESS; ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transfer Complete--\n", _hc->hc_num); ++ ++ /* ++ * Handle xfer complete on CSPLIT. ++ */ ++ if (_hc->qh->do_split) { ++ _qtd->complete_split = 0; ++ } ++ ++ /* Update the QTD and URB states. */ ++ switch (pipe_type) { ++ case PIPE_CONTROL: ++ switch (_qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ if (urb->transfer_buffer_length > 0) { ++ _qtd->control_phase = DWC_OTG_CONTROL_DATA; ++ } else { ++ _qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ } ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ case DWC_OTG_CONTROL_DATA: { ++ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs,urb, _qtd, &status); ++ if (urb_xfer_done) { ++ _qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); ++ } else { ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ } ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ } ++ case DWC_OTG_CONTROL_STATUS: ++ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); ++ if (status == -EINPROGRESS) { ++ status = 0; ++ } ++ dwc_otg_hcd_complete_urb(_hcd, urb, status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ break; ++ } ++ ++ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, ++ halt_status, must_free); ++ break; ++ case PIPE_BULK: ++ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); ++ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status); ++ if (urb_xfer_done) { ++ dwc_otg_hcd_complete_urb(_hcd, urb, status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,halt_status, must_free); ++ break; ++ case PIPE_INTERRUPT: ++ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); ++ update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status); ++ ++ /* ++ * Interrupt URB is done on the first transfer complete ++ * interrupt. ++ */ ++ dwc_otg_hcd_complete_urb(_hcd, urb, status); ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_URB_COMPLETE, must_free); ++ break; ++ case PIPE_ISOCHRONOUS: ++ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); ++ if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) ++ { ++ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_COMPLETE); ++ } ++ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, halt_status, must_free); ++ break; ++ } ++ ++ disable_hc_int(_hc_regs,xfercompl); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel STALL interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, int *must_free) ++{ ++ struct urb *urb = _qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "STALL Received--\n", _hc->hc_num); ++ ++ if (pipe_type == PIPE_CONTROL) { ++ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE); ++ } ++ ++ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { ++ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE); ++ /* ++ * USB protocol requires resetting the data toggle for bulk ++ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) ++ * setup command is issued to the endpoint. Anticipate the ++ * CLEAR_FEATURE command since a STALL has occurred and reset ++ * the data toggle now. ++ */ ++ _hc->qh->data_toggle = 0; ++ } ++ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, must_free); ++ disable_hc_int(_hc_regs,stall); ++ ++ return 1; ++} ++ ++/* ++ * Updates the state of the URB when a transfer has been stopped due to an ++ * abnormal condition before the transfer completes. Modifies the ++ * actual_length field of the URB to reflect the number of bytes that have ++ * actually been transferred via the host channel. ++ */ ++static void update_urb_state_xfer_intr(dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ struct urb *_urb, ++ dwc_otg_qtd_t *_qtd, ++ dwc_otg_halt_status_e _halt_status) ++{ ++ uint32_t bytes_transferred = get_actual_xfer_length(_hc, _hc_regs, _qtd, ++ _halt_status, NULL); ++ _urb->actual_length += bytes_transferred; ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count %d\n", _hc->start_pkt_count); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n", _hc->max_packet); ++ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); ++ DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length %d\n", _urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffer_length %d\n", ++ _urb->transfer_buffer_length); ++ } ++#endif ++} ++ ++/** ++ * Handles a host channel NAK interrupt. This handler may be called in either ++ * DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NAK Received--\n", _hc->hc_num); ++ ++ /* ++ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and ++ * interrupt. Re-start the SSPLIT transfer. ++ */ ++ if (_hc->do_split) { ++ if (_hc->complete_split) { ++ _qtd->error_count = 0; ++ } ++ _qtd->complete_split = 0; ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free); ++ goto handle_nak_done; ++ } ++ ++ switch (usb_pipetype(_qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ if (_hcd->core_if->dma_enable && _hc->ep_is_in) { ++ /* ++ * NAK interrupts are enabled on bulk/control IN ++ * transfers in DMA mode for the sole purpose of ++ * resetting the error count after a transaction error ++ * occurs. The core will continue transferring data. ++ */ ++ _qtd->error_count = 0; ++ goto handle_nak_done; ++ } ++ ++ /* ++ * NAK interrupts normally occur during OUT transfers in DMA ++ * or Slave mode. For IN transfers, more requests will be ++ * queued as request queue space is available. ++ */ ++ _qtd->error_count = 0; ++ ++ if (!_hc->qh->ping_state) { ++ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, ++ _qtd, DWC_OTG_HC_XFER_NAK); ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ if (_qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ _hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will ++ * start/continue. ++ */ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free); ++ break; ++ case PIPE_INTERRUPT: ++ _qtd->error_count = 0; ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free); ++ break; ++ case PIPE_ISOCHRONOUS: ++ /* Should never get called for isochronous transfers. */ ++ BUG(); ++ break; ++ } ++ ++ handle_nak_done: ++ disable_hc_int(_hc_regs,nak); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel ACK interrupt. This interrupt is enabled when ++ * performing the PING protocol in Slave mode, when errors occur during ++ * either Slave mode or DMA mode, and during Start Split transactions. ++ */ ++static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "ACK Received--\n", _hc->hc_num); ++ ++ if (_hc->do_split) { ++ /* ++ * Handle ACK on SSPLIT. ++ * ACK should not occur in CSPLIT. ++ */ ++ if ((!_hc->ep_is_in) && (_hc->data_pid_start != DWC_OTG_HC_PID_SETUP)) { ++ _qtd->ssplit_out_xfer_count = _hc->xfer_len; ++ } ++ if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !_hc->ep_is_in)) { ++ /* Don't need complete for isochronous out transfers. */ ++ _qtd->complete_split = 1; ++ } ++ ++ /* ISOC OUT */ ++ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_hc->ep_is_in) { ++ switch (_hc->xact_pos) { ++ case DWC_HCSPLIT_XACTPOS_ALL: ++ break; ++ case DWC_HCSPLIT_XACTPOS_END: ++ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ _qtd->isoc_split_offset = 0; ++ break; ++ case DWC_HCSPLIT_XACTPOS_BEGIN: ++ case DWC_HCSPLIT_XACTPOS_MID: ++ /* ++ * For BEGIN or MID, calculate the length for ++ * the next microframe to determine the correct ++ * SSPLIT token, either MID or END. ++ */ ++ do { ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &_qtd->urb->iso_frame_desc[_qtd->isoc_frame_index]; ++ _qtd->isoc_split_offset += 188; ++ ++ if ((frame_desc->length - _qtd->isoc_split_offset) <= 188) { ++ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; ++ } ++ else { ++ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; ++ } ++ ++ } while(0); ++ break; ++ } ++ } else { ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free); ++ } ++ } else { ++ _qtd->error_count = 0; ++ ++ if (_hc->qh->ping_state) { ++ _hc->qh->ping_state = 0; ++ /* ++ * Halt the channel so the transfer can be re-started ++ * from the appropriate point. This only happens in ++ * Slave mode. In DMA mode, the ping_state is cleared ++ * when the transfer is started because the core ++ * automatically executes the PING, then the transfer. ++ */ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free); ++ } else { ++ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ } ++ } ++ ++ /* ++ * If the ACK occurred when _not_ in the PING state, let the channel ++ * continue transferring data after clearing the error count. ++ */ ++ ++ disable_hc_int(_hc_regs,ack); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel NYET interrupt. This interrupt should only occur on ++ * Bulk and Control OUT endpoints and for complete split transactions. If a ++ * NYET occurs at the same time as a Transfer Complete interrupt, it is ++ * handled in the xfercomp interrupt handler, not here. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NYET Received--\n", _hc->hc_num); ++ ++ /* ++ * NYET on CSPLIT ++ * re-do the CSPLIT immediately on non-periodic ++ */ ++ if ((_hc->do_split) && (_hc->complete_split)) { ++ if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) || ++ (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) { ++ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd)); ++ ++ if (dwc_full_frame_num(frnum) != ++ dwc_full_frame_num(_hc->qh->sched_frame)) { ++ /* ++ * No longer in the same full speed frame. ++ * Treat this as a transaction error. ++ */ ++#if 0 ++ /** @todo Fix system performance so this can ++ * be treated as an error. Right now complete ++ * splits cannot be scheduled precisely enough ++ * due to other system activity, so this error ++ * occurs regularly in Slave mode. ++ */ ++ _qtd->error_count++; ++#endif ++ _qtd->complete_split = 0; ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free); ++ /** @todo add support for isoc release */ ++ goto handle_nyet_done; ++ } ++ } ++ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free); ++ goto handle_nyet_done; ++ } ++ ++ _hc->qh->ping_state = 1; ++ _qtd->error_count = 0; ++ ++ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, _qtd, ++ DWC_OTG_HC_XFER_NYET); ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ ++ /* ++ * Halt the channel and re-start the transfer so the PING ++ * protocol will start. ++ */ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free); ++ ++handle_nyet_done: ++ disable_hc_int(_hc_regs,nyet); ++ clear_hc_int(_hc_regs, nyet); ++ return 1; ++} ++ ++/** ++ * Handles a host channel babble interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Babble Error--\n", _hc->hc_num); ++ if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EOVERFLOW); ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_BABBLE_ERR, must_free); ++ } else { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_BABBLE_ERR); ++ halt_channel(_hcd, _hc, _qtd, halt_status, must_free); ++ } ++ disable_hc_int(_hc_regs,bblerr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel AHB error interrupt. This handler is only called in ++ * DMA mode. ++ */ ++static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t *_hc, ++ dwc_otg_hc_regs_t *_hc_regs, ++ dwc_otg_qtd_t *_qtd) ++{ ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ struct urb *urb = _qtd->urb; ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "AHB Error--\n", _hc->hc_num); ++ ++ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&_hc_regs->hcdma); ++ ++ DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num); ++ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); ++ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_ERROR(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_ERROR(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_ERROR(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)(u32)urb->transfer_dma); ++ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)(u32)urb->setup_dma); ++ DWC_ERROR(" Interval: %d\n", urb->interval); ++ ++ dwc_otg_hcd_complete_urb(_hcd, urb, -EIO); ++ ++ /* ++ * Force a channel halt. Don't call halt_channel because that won't ++ * write to the HCCHARn register in DMA mode to force the halt. ++ */ ++ dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_ERR); ++ ++ disable_hc_int(_hc_regs,ahberr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel transaction error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transaction Error--\n", _hc->hc_num); ++ ++ switch (usb_pipetype(_qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ _qtd->error_count++; ++ if (!_hc->qh->ping_state) { ++ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, ++ _qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ save_data_toggle(_hc, _hc_regs, _qtd); ++ if (!_hc->ep_is_in && _qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ _hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will start. ++ */ ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free); ++ break; ++ case PIPE_INTERRUPT: ++ _qtd->error_count++; ++ if ((_hc->do_split) && (_hc->complete_split)) { ++ _qtd->complete_split = 0; ++ } ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_XACT_ERR); ++ ++ halt_channel(_hcd, _hc, _qtd, halt_status, must_free); ++ } ++ break; ++ } ++ ++ ++ disable_hc_int(_hc_regs,xacterr); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel frame overrun interrupt. This handler may be called ++ * in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Frame Overrun--\n", _hc->hc_num); ++ ++ switch (usb_pipetype(_qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ break; ++ case PIPE_INTERRUPT: ++ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN, must_free); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ ++ halt_channel(_hcd, _hc, _qtd, halt_status, must_free); ++ } ++ break; ++ } ++ ++ disable_hc_int(_hc_regs,frmovrun); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel data toggle error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Data Toggle Error--\n", _hc->hc_num); ++ ++ if (_hc->ep_is_in) { ++ _qtd->error_count = 0; ++ } else { ++ DWC_ERROR("Data Toggle Error on OUT transfer," ++ "channel %d\n", _hc->hc_num); ++ } ++ ++ disable_hc_int(_hc_regs,datatglerr); ++ ++ return 1; ++} ++ ++#ifdef DEBUG ++/** ++ * This function is for debug only. It checks that a valid halt status is set ++ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is ++ * taken and a warning is issued. ++ * @return 1 if halt status is ok, 0 otherwise. ++ */ ++static inline int halt_status_ok(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hcsplt_data_t hcsplt; ++ ++ if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { ++ /* ++ * This code is here only as a check. This condition should ++ * never happen. Ignore the halt if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); ++ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk); ++ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt); ++ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, " ++ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " ++ "hcint 0x%08x, hcintmsk 0x%08x, " ++ "hcsplt 0x%08x, qtd->complete_split %d\n", ++ __func__, _hc->hc_num, hcchar.d32, hctsiz.d32, ++ hcint.d32, hcintmsk.d32, ++ hcsplt.d32, _qtd->complete_split); ++ ++ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", ++ __func__, _hc->hc_num); ++ DWC_WARN("\n"); ++ clear_hc_int(_hc_regs,chhltd); ++ return 0; ++ } ++ ++ /* ++ * This code is here only as a check. hcchar.chdis should ++ * never be set when the halt interrupt occurs. Halt the ++ * channel again if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: hcchar.chdis set unexpectedly, " ++ "hcchar 0x%08x, trying to halt again\n", ++ __func__, hcchar.d32); ++ clear_hc_int(_hc_regs,chhltd); ++ _hc->halt_pending = 0; ++ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++/** ++ * Handles a host Channel Halted interrupt in DMA mode. This handler ++ * determines the reason the channel halted and proceeds accordingly. ++ */ ++static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ ++ if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Just release the channel. A dequeue can happen on a ++ * transfer timeout. In the case of an AHB Error, the channel ++ * was forced to halt because there's no way to gracefully ++ * recover. ++ */ ++ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ return; ++ } ++ ++ /* Read the HCINTn register to determine the cause for the halt. */ ++ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk); ++ ++ if (hcint.b.xfercomp) { ++ /** @todo This is here because of a possible hardware bug. Spec ++ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT ++ * interrupt w/ACK bit set should occur, but I only see the ++ * XFERCOMP bit, even with it masked out. This is a workaround ++ * for that behavior. Should fix this when hardware is fixed. ++ */ ++ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_hc->ep_is_in)) { ++ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } ++ handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.stall) { ++ handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.xacterr) { ++ /* ++ * Must handle xacterr before nak or ack. Could get a xacterr ++ * at the same time as either of these on a BULK/CONTROL OUT ++ * that started with a PING. The xacterr takes precedence. ++ */ ++ handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.nyet) { ++ /* ++ * Must handle nyet before nak or ack. Could get a nyet at the ++ * same time as either of those on a BULK/CONTROL OUT that ++ * started with a PING. The nyet takes precedence. ++ */ ++ handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.bblerr) { ++ handle_hc_babble_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.frmovrun) { ++ handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.datatglerr) { ++ handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ _hc->qh->data_toggle = 0; ++ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ } else if (hcint.b.nak && !hcintmsk.b.nak) { ++ /* ++ * If nak is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the nak is handled by ++ * the nak interrupt handler, not here. Handle nak here for ++ * BULK/CONTROL OUT transfers, which halt on a NAK to allow ++ * rewinding the buffer pointer. ++ */ ++ handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else if (hcint.b.ack && !hcintmsk.b.ack) { ++ /* ++ * If ack is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the ack is handled by ++ * the ack interrupt handler, not here. Handle ack here for ++ * split transfers. Start splits halt on ACK. ++ */ ++ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else { ++ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * A periodic transfer halted with no other channel ++ * interrupts set. Assume it was halted by the core ++ * because it could not be completed in its scheduled ++ * (micro)frame. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", ++ __func__, _hc->hc_num); ++#endif /* */ ++ halt_channel(_hcd, _hc, _qtd, ++ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free); ++ } else { ++#ifdef DEBUG ++ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " ++ "for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n", ++ __func__, _hc->hc_num, hcint.b.nyet, hcint.d32, ++ dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts)); ++#endif ++ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ } ++ } ++} ++ ++/** ++ * Handles a host channel Channel Halted interrupt. ++ * ++ * In slave mode, this handler is called only when the driver specifically ++ * requests a halt. This occurs during handling other host channel interrupts ++ * (e.g. nak, xacterr, stall, nyet, etc.). ++ * ++ * In DMA mode, this is the interrupt that occurs when the core has finished ++ * processing a transfer on a channel. Other host channel interrupts (except ++ * ahberr) are disabled in DMA mode. ++ */ ++static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *_hcd, ++ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Channel Halted--\n", _hc->hc_num); ++ ++ if (_hcd->core_if->dma_enable) { ++ handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free); ++ } else { ++#ifdef DEBUG ++ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd, must_free)) { ++ return 1; ++ } ++#endif /* */ ++ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free); ++ } ++ ++ return 1; ++} ++ ++/** Handles interrupt for a specific Host Channel */ ++int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num) ++{ ++ int must_free = 0; ++ int retval = 0; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ dwc_hc_t *hc; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", _num); ++ ++ hc = _dwc_otg_hcd->hc_ptr_array[_num]; ++ hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_num]; ++ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", ++ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); ++ hcint.d32 = hcint.d32 & hcintmsk.d32; ++ ++ if (!_dwc_otg_hcd->core_if->dma_enable) { ++ if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) { ++ hcint.b.chhltd = 0; ++ } ++ } ++ ++ if (hcint.b.xfercomp) { ++ retval |= handle_hc_xfercomp_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ /* ++ * If NYET occurred at same time as Xfer Complete, the NYET is ++ * handled by the Xfer Complete interrupt handler. Don't want ++ * to call the NYET interrupt handler in this case. ++ */ ++ hcint.b.nyet = 0; ++ } ++ if (hcint.b.chhltd) { ++ retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.ahberr) { ++ retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.stall) { ++ retval |= handle_hc_stall_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.nak) { ++ retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.ack) { ++ retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.nyet) { ++ retval |= handle_hc_nyet_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.xacterr) { ++ retval |= handle_hc_xacterr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.bblerr) { ++ retval |= handle_hc_babble_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.frmovrun) { ++ retval |= handle_hc_frmovrun_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ if (hcint.b.datatglerr) { ++ retval |= handle_hc_datatglerr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free); ++ } ++ ++ /* ++ * Logic to free the qtd here, at the end of the hc intr ++ * processing, if the handling of this interrupt determined ++ * that it needs to be freed. ++ */ ++ if (must_free) { ++ /* Free the qtd here now that we are done using it. */ ++ dwc_otg_hcd_qtd_free(qtd); ++ } ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c +@@ -0,0 +1,794 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 537387 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the functions to manage Queue Heads and Queue ++ * Transfer Descriptors. ++ */ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/init.h> ++#include <linux/device.h> ++#include <linux/errno.h> ++#include <linux/list.h> ++#include <linux/interrupt.h> ++#include <linux/string.h> ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++/** ++ * This function allocates and initializes a QH. ++ * ++ * @param _hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] _urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. ++ * ++ * @return Returns pointer to the newly allocated QH, or NULL on error. */ ++dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb) ++{ ++ dwc_otg_qh_t *qh; ++ ++ /* Allocate memory */ ++ /** @todo add memflags argument */ ++ qh = dwc_otg_hcd_qh_alloc (); ++ if (qh == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qh_init (_hcd, qh, _urb); ++ return qh; ++} ++ ++/** Free each QTD in the QH's QTD-list then free the QH. QH should already be ++ * removed from a list. QTD list should already be empty if called from URB ++ * Dequeue. ++ * ++ * @param[in] _qh The QH to free. ++ */ ++void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh) ++{ ++ dwc_otg_qtd_t *qtd; ++ struct list_head *pos; ++ unsigned long flags; ++ ++ /* Free each QTD in the QTD list */ ++ local_irq_save (flags); ++ for (pos = _qh->qtd_list.next; ++ pos != &_qh->qtd_list; ++ pos = _qh->qtd_list.next) ++ { ++ list_del (pos); ++ qtd = dwc_list_to_qtd (pos); ++ dwc_otg_hcd_qtd_free (qtd); ++ } ++ local_irq_restore (flags); ++ ++ kfree (_qh); ++ return; ++} ++ ++/** Initializes a QH structure. ++ * ++ * @param[in] _hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] _qh The QH to init. ++ * @param[in] _urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. */ ++#define SCHEDULE_SLOP 10 ++void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb) ++{ ++ memset (_qh, 0, sizeof (dwc_otg_qh_t)); ++ ++ /* Initialize QH */ ++ switch (usb_pipetype(_urb->pipe)) { ++ case PIPE_CONTROL: ++ _qh->ep_type = USB_ENDPOINT_XFER_CONTROL; ++ break; ++ case PIPE_BULK: ++ _qh->ep_type = USB_ENDPOINT_XFER_BULK; ++ break; ++ case PIPE_ISOCHRONOUS: ++ _qh->ep_type = USB_ENDPOINT_XFER_ISOC; ++ break; ++ case PIPE_INTERRUPT: ++ _qh->ep_type = USB_ENDPOINT_XFER_INT; ++ break; ++ } ++ ++ _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0; ++ ++ _qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe))); ++ INIT_LIST_HEAD(&_qh->qtd_list); ++ INIT_LIST_HEAD(&_qh->qh_list_entry); ++ _qh->channel = NULL; ++ ++ /* FS/LS Enpoint on HS Hub ++ * NOT virtual root hub */ ++ _qh->do_split = 0; ++ _qh->speed = _urb->dev->speed; ++ if (((_urb->dev->speed == USB_SPEED_LOW) || ++ (_urb->dev->speed == USB_SPEED_FULL)) && ++ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) { ++ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", ++ usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum, ++ _urb->dev->ttport); ++ _qh->do_split = 1; ++ } ++ ++ if (_qh->ep_type == USB_ENDPOINT_XFER_INT || ++ _qh->ep_type == USB_ENDPOINT_XFER_ISOC) { ++ /* Compute scheduling parameters once and save them. */ ++ hprt0_data_t hprt; ++ ++ /** @todo Account for split transfers in the bus time. */ ++ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_max_packet(_qh->maxp); ++ _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb->dev->speed, ++ usb_pipein(_urb->pipe), ++ (_qh->ep_type == USB_ENDPOINT_XFER_ISOC),bytecount)); ++ ++ /* Start in a slightly future (micro)frame. */ ++ _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, SCHEDULE_SLOP); ++ _qh->interval = _urb->interval; ++#if 0 ++ /* Increase interrupt polling rate for debugging. */ ++ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ _qh->interval = 8; ++ } ++#endif ++ hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if->hprt0); ++ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ++ ((_urb->dev->speed == USB_SPEED_LOW) || ++ (_urb->dev->speed == USB_SPEED_FULL))) ++ { ++ _qh->interval *= 8; ++ _qh->sched_frame |= 0x7; ++ _qh->start_split_frame = _qh->sched_frame; ++ } ++ } ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", _qh); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", ++ _urb->dev->devnum); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", ++ usb_pipeendpoint(_urb->pipe), ++ usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", ++ ({ char *speed; switch (_urb->dev->speed) { ++ case USB_SPEED_LOW: speed = "low"; break; ++ case USB_SPEED_FULL: speed = "full"; break; ++ case USB_SPEED_HIGH: speed = "high"; break; ++ default: speed = "?"; break; ++ }; speed;})); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n", ++ ({ char *type; switch (_qh->ep_type) { ++ case USB_ENDPOINT_XFER_ISOC: type = "isochronous"; break; ++ case USB_ENDPOINT_XFER_INT: type = "interrupt"; break; ++ case USB_ENDPOINT_XFER_CONTROL: type = "control"; break; ++ case USB_ENDPOINT_XFER_BULK: type = "bulk"; break; ++ default: type = "?"; break; ++ }; type;})); ++#ifdef DEBUG ++ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", ++ _qh->usecs); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", ++ _qh->interval); ++ } ++#endif ++ ++ return; ++} ++ ++/** ++ * Microframe scheduler ++ * track the total use in hcd->frame_usecs ++ * keep each qh use in qh->frame_usecs ++ * when surrendering the qh then donate the time back ++ */ ++const unsigned short max_uframe_usecs[]={ 100, 100, 100, 100, 100, 100, 30, 0 }; ++ ++/* ++ * called from dwc_otg_hcd.c:dwc_otg_hcd_init ++ */ ++int init_hcd_usecs(dwc_otg_hcd_t *_hcd) ++{ ++ int i; ++ for (i=0; i<8; i++) { ++ _hcd->frame_usecs[i] = max_uframe_usecs[i]; ++ } ++ return 0; ++} ++ ++static int find_single_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh) ++{ ++ int i; ++ unsigned short utime; ++ int t_left; ++ int ret; ++ int done; ++ ++ ret = -1; ++ utime = _qh->usecs; ++ t_left = utime; ++ i = 0; ++ done = 0; ++ while (done == 0) { ++ /* At the start _hcd->frame_usecs[i] = max_uframe_usecs[i]; */ ++ if (utime <= _hcd->frame_usecs[i]) { ++ _hcd->frame_usecs[i] -= utime; ++ _qh->frame_usecs[i] += utime; ++ t_left -= utime; ++ ret = i; ++ done = 1; ++ return ret; ++ } else { ++ i++; ++ if (i == 8) { ++ done = 1; ++ ret = -1; ++ } ++ } ++ } ++ return ret; ++} ++ ++/* ++ * use this for FS apps that can span multiple uframes ++ */ ++static int find_multi_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh) ++{ ++ int i; ++ int j; ++ unsigned short utime; ++ int t_left; ++ int ret; ++ int done; ++ unsigned short xtime; ++ ++ ret = -1; ++ utime = _qh->usecs; ++ t_left = utime; ++ i = 0; ++ done = 0; ++loop: ++ while (done == 0) { ++ if(_hcd->frame_usecs[i] <= 0) { ++ i++; ++ if (i == 8) { ++ done = 1; ++ ret = -1; ++ } ++ goto loop; ++ } ++ ++ /* ++ * we need n consequtive slots ++ * so use j as a start slot j plus j+1 must be enough time (for now) ++ */ ++ xtime= _hcd->frame_usecs[i]; ++ for (j = i+1 ; j < 8 ; j++ ) { ++ /* ++ * if we add this frame remaining time to xtime we may ++ * be OK, if not we need to test j for a complete frame ++ */ ++ if ((xtime+_hcd->frame_usecs[j]) < utime) { ++ if (_hcd->frame_usecs[j] < max_uframe_usecs[j]) { ++ j = 8; ++ ret = -1; ++ continue; ++ } ++ } ++ if (xtime >= utime) { ++ ret = i; ++ j = 8; /* stop loop with a good value ret */ ++ continue; ++ } ++ /* add the frame time to x time */ ++ xtime += _hcd->frame_usecs[j]; ++ /* we must have a fully available next frame or break */ ++ if ((xtime < utime) ++ && (_hcd->frame_usecs[j] == max_uframe_usecs[j])) { ++ ret = -1; ++ j = 8; /* stop loop with a bad value ret */ ++ continue; ++ } ++ } ++ if (ret >= 0) { ++ t_left = utime; ++ for (j = i; (t_left>0) && (j < 8); j++ ) { ++ t_left -= _hcd->frame_usecs[j]; ++ if ( t_left <= 0 ) { ++ _qh->frame_usecs[j] += _hcd->frame_usecs[j] + t_left; ++ _hcd->frame_usecs[j]= -t_left; ++ ret = i; ++ done = 1; ++ } else { ++ _qh->frame_usecs[j] += _hcd->frame_usecs[j]; ++ _hcd->frame_usecs[j] = 0; ++ } ++ } ++ } else { ++ i++; ++ if (i == 8) { ++ done = 1; ++ ret = -1; ++ } ++ } ++ } ++ return ret; ++} ++ ++static int find_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh) ++{ ++ int ret; ++ ret = -1; ++ ++ if (_qh->speed == USB_SPEED_HIGH) { ++ /* if this is a hs transaction we need a full frame */ ++ ret = find_single_uframe(_hcd, _qh); ++ } else { ++ /* if this is a fs transaction we may need a sequence of frames */ ++ ret = find_multi_uframe(_hcd, _qh); ++ } ++ return ret; ++} ++ ++/** ++ * Checks that the max transfer size allowed in a host channel is large enough ++ * to handle the maximum data transfer in a single (micro)frame for a periodic ++ * transfer. ++ * ++ * @param _hcd The HCD state structure for the DWC OTG controller. ++ * @param _qh QH for a periodic endpoint. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_max_xfer_size(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ int status; ++ uint32_t max_xfer_size; ++ uint32_t max_channel_xfer_size; ++ ++ status = 0; ++ ++ max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mult(_qh->maxp); ++ max_channel_xfer_size = _hcd->core_if->core_params->max_transfer_size; ++ ++ if (max_xfer_size > max_channel_xfer_size) { ++ DWC_NOTICE("%s: Periodic xfer length %d > " ++ "max xfer length for channel %d\n", ++ __func__, max_xfer_size, max_channel_xfer_size); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Schedules an interrupt or isochronous transfer in the periodic schedule. ++ * ++ * @param _hcd The HCD state structure for the DWC OTG controller. ++ * @param _qh QH for the periodic transfer. The QH should already contain the ++ * scheduling information. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int schedule_periodic(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ int status = 0; ++ ++ int frame; ++ status = find_uframe(_hcd, _qh); ++ frame = -1; ++ if (status == 0) { ++ frame = 7; ++ } else { ++ if (status > 0 ) ++ frame = status-1; ++ } ++ ++ /* Set the new frame up */ ++ if (frame > -1) { ++ _qh->sched_frame &= ~0x7; ++ _qh->sched_frame |= (frame & 7); ++ } ++ ++ if (status != -1 ) ++ status = 0; ++ if (status) { ++ DWC_NOTICE("%s: Insufficient periodic bandwidth for " ++ "periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_max_xfer_size(_hcd, _qh); ++ if (status) { ++ DWC_NOTICE("%s: Channel max transfer size too small " ++ "for periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive); ++ ++ ++ /* Update claimed usecs per (micro)frame. */ ++ _hcd->periodic_usecs += _qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated += _qh->usecs / _qh->interval; ++ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", ++ _qh, _qh->usecs, _qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", ++ _qh, _qh->usecs, _qh->interval); ++ } ++ ++ return status; ++} ++ ++/** ++ * This function adds a QH to either the non periodic or periodic schedule if ++ * it is not already in the schedule. If the QH is already in the schedule, no ++ * action is taken. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ unsigned long flags; ++ int status = 0; ++ ++ local_irq_save(flags); ++ ++ if (!list_empty(&_qh->qh_list_entry)) { ++ /* QH already in a schedule. */ ++ goto done; ++ } ++ ++ /* Add the new QH to the appropriate schedule */ ++ if (dwc_qh_is_non_per(_qh)) { ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&_qh->qh_list_entry, &_hcd->non_periodic_sched_inactive); ++ } else { ++ status = schedule_periodic(_hcd, _qh); ++ } ++ ++ done: ++ local_irq_restore(flags); ++ ++ return status; ++} ++ ++/** ++ * This function adds a QH to the non periodic deferred schedule. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh) ++{ ++ unsigned long flags; ++ local_irq_save(flags); ++ if (!list_empty(&_qh->qh_list_entry)) { ++ /* QH already in a schedule. */ ++ goto done; ++ } ++ ++ /* Add the new QH to the non periodic deferred schedule */ ++ if (dwc_qh_is_non_per(_qh)) { ++ list_add_tail(&_qh->qh_list_entry, ++ &_hcd->non_periodic_sched_deferred); ++ } ++done: ++ local_irq_restore(flags); ++ return 0; ++} ++ ++/** ++ * Removes an interrupt or isochronous transfer from the periodic schedule. ++ * ++ * @param _hcd The HCD state structure for the DWC OTG controller. ++ * @param _qh QH for the periodic transfer. ++ */ ++static void deschedule_periodic(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ int i; ++ list_del_init(&_qh->qh_list_entry); ++ ++ ++ /* Update claimed usecs per (micro)frame. */ ++ _hcd->periodic_usecs -= _qh->usecs; ++ ++ for (i = 0; i < 8; i++) { ++ _hcd->frame_usecs[i] += _qh->frame_usecs[i]; ++ _qh->frame_usecs[i] = 0; ++ } ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated -= _qh->usecs / _qh->interval; ++ ++ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", ++ _qh, _qh->usecs, _qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", ++ _qh, _qh->usecs, _qh->interval); ++ } ++} ++ ++/** ++ * Removes a QH from either the non-periodic or periodic schedule. Memory is ++ * not freed. ++ * ++ * @param[in] _hcd The HCD state structure. ++ * @param[in] _qh QH to remove from schedule. */ ++void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ if (list_empty(&_qh->qh_list_entry)) { ++ /* QH is not in a schedule. */ ++ goto done; ++ } ++ ++ if (dwc_qh_is_non_per(_qh)) { ++ if (_hcd->non_periodic_qh_ptr == &_qh->qh_list_entry) { ++ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next; ++ } ++ list_del_init(&_qh->qh_list_entry); ++ } else { ++ deschedule_periodic(_hcd, _qh); ++ } ++ ++ done: ++ local_irq_restore(flags); ++} ++ ++/** ++ * Defers a QH. For non-periodic QHs, removes the QH from the active ++ * non-periodic schedule. The QH is added to the deferred non-periodic ++ * schedule if any QTDs are still attached to the QH. ++ */ ++int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh, int delay) ++{ ++ int deact = 1; ++ unsigned long flags; ++ local_irq_save(flags); ++ if (dwc_qh_is_non_per(_qh)) { ++ _qh->sched_frame = ++ dwc_frame_num_inc(_hcd->frame_number, ++ delay); ++ _qh->channel = NULL; ++ _qh->qtd_in_process = NULL; ++ deact = 0; ++ dwc_otg_hcd_qh_remove(_hcd, _qh); ++ if (!list_empty(&_qh->qtd_list)) { ++ /* Add back to deferred non-periodic schedule. */ ++ dwc_otg_hcd_qh_add_deferred(_hcd, _qh); ++ } ++ } ++ local_irq_restore(flags); ++ return deact; ++} ++ ++/** ++ * Deactivates a QH. For non-periodic QHs, removes the QH from the active ++ * non-periodic schedule. The QH is added to the inactive non-periodic ++ * schedule if any QTDs are still attached to the QH. ++ * ++ * For periodic QHs, the QH is removed from the periodic queued schedule. If ++ * there are any QTDs still attached to the QH, the QH is added to either the ++ * periodic inactive schedule or the periodic ready schedule and its next ++ * scheduled frame is calculated. The QH is placed in the ready schedule if ++ * the scheduled frame has been reached already. Otherwise it's placed in the ++ * inactive schedule. If there are no QTDs attached to the QH, the QH is ++ * completely removed from the periodic schedule. ++ */ ++void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_next_periodic_split) ++{ ++ unsigned long flags; ++ local_irq_save(flags); ++ ++ if (dwc_qh_is_non_per(_qh)) { ++ dwc_otg_hcd_qh_remove(_hcd, _qh); ++ if (!list_empty(&_qh->qtd_list)) { ++ /* Add back to inactive non-periodic schedule. */ ++ dwc_otg_hcd_qh_add(_hcd, _qh); ++ } ++ } else { ++ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd)); ++ ++ if (_qh->do_split) { ++ /* Schedule the next continuing periodic split transfer */ ++ if (sched_next_periodic_split) { ++ ++ _qh->sched_frame = frame_number; ++ if (dwc_frame_num_le(frame_number, ++ dwc_frame_num_inc(_qh->start_split_frame, 1))) { ++ /* ++ * Allow one frame to elapse after start ++ * split microframe before scheduling ++ * complete split, but DONT if we are ++ * doing the next start split in the ++ * same frame for an ISOC out. ++ */ ++ if ((_qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (_qh->ep_is_in != 0)) { ++ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame, 1); ++ } ++ } ++ } else { ++ _qh->sched_frame = dwc_frame_num_inc(_qh->start_split_frame, ++ _qh->interval); ++ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) { ++ _qh->sched_frame = frame_number; ++ } ++ _qh->sched_frame |= 0x7; ++ _qh->start_split_frame = _qh->sched_frame; ++ } ++ } else { ++ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame, _qh->interval); ++ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) { ++ _qh->sched_frame = frame_number; ++ } ++ } ++ ++ if (list_empty(&_qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(_hcd, _qh); ++ } else { ++ /* ++ * Remove from periodic_sched_queued and move to ++ * appropriate queue. ++ */ ++ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) { ++ list_move(&_qh->qh_list_entry, ++ &_hcd->periodic_sched_ready); ++ } else { ++ list_move(&_qh->qh_list_entry, ++ &_hcd->periodic_sched_inactive); ++ } ++ } ++ } ++ ++ local_irq_restore(flags); ++} ++ ++/** ++ * This function allocates and initializes a QTD. ++ * ++ * @param[in] _urb The URB to create a QTD from. Each URB-QTD pair will end up ++ * pointing to each other so each pair should have a unique correlation. ++ * ++ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ ++dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *_urb) ++{ ++ dwc_otg_qtd_t *qtd; ++ ++ qtd = dwc_otg_hcd_qtd_alloc (); ++ if (qtd == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qtd_init (qtd, _urb); ++ return qtd; ++} ++ ++/** ++ * Initializes a QTD structure. ++ * ++ * @param[in] _qtd The QTD to initialize. ++ * @param[in] _urb The URB to use for initialization. */ ++void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *_qtd, struct urb *_urb) ++{ ++ memset (_qtd, 0, sizeof (dwc_otg_qtd_t)); ++ _qtd->urb = _urb; ++ if (usb_pipecontrol(_urb->pipe)) { ++ /* ++ * The only time the QTD data toggle is used is on the data ++ * phase of control transfers. This phase always starts with ++ * DATA1. ++ */ ++ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ _qtd->control_phase = DWC_OTG_CONTROL_SETUP; ++ } ++ ++ /* start split */ ++ _qtd->complete_split = 0; ++ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ _qtd->isoc_split_offset = 0; ++ ++ /* Store the qtd ptr in the urb to reference what QTD. */ ++ _urb->hcpriv = _qtd; ++ return; ++} ++ ++/** ++ * This function adds a QTD to the QTD-list of a QH. It will find the correct ++ * QH to place the QTD into. If it does not find a QH, then it will create a ++ * new QH. If the QH to which the QTD is added is not currently scheduled, it ++ * is placed into the proper schedule based on its EP type. ++ * ++ * @param[in] _qtd The QTD to add ++ * @param[in] _dwc_otg_hcd The DWC HCD structure ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t * _dwc_otg_hcd) ++{ ++ struct usb_host_endpoint *ep; ++ dwc_otg_qh_t *qh; ++ unsigned long flags; ++ int retval = 0; ++ struct urb *urb = _qtd->urb; ++ ++ local_irq_save(flags); ++ ++ /* ++ * Get the QH which holds the QTD-list to insert to. Create QH if it ++ * doesn't exist. ++ */ ++ ep = dwc_urb_to_endpoint(urb); ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ if (qh == NULL) { ++ qh = dwc_otg_hcd_qh_create (_dwc_otg_hcd, urb); ++ if (qh == NULL) { ++ retval = -1; ++ goto done; ++ } ++ ep->hcpriv = qh; ++ } ++ ++ _qtd->qtd_qh_ptr = qh; ++ retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh); ++ if (retval == 0) { ++ list_add_tail(&_qtd->qtd_list_entry, &qh->qtd_list); ++ } ++ ++ done: ++ local_irq_restore(flags); ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_ifx.c +@@ -0,0 +1,176 @@ ++/****************************************************************************** ++** ++** FILE NAME : dwc_otg_ifx.c ++** PROJECT : Twinpass/Danube ++** MODULES : DWC OTG USB ++** ++** DATE : 12 Auguest 2007 ++** AUTHOR : Sung Winder ++** DESCRIPTION : Platform specific initialization. ++** COPYRIGHT : Copyright (c) 2007 ++** Infineon Technologies AG ++** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park, ++** Hsin-chu City, 300 Taiwan. ++** ++** This program is free software; you can redistribute it and/or modify ++** it under the terms of the GNU General Public License as published by ++** the Free Software Foundation; either version 2 of the License, or ++** (at your option) any later version. ++** ++** HISTORY ++** $Date $Author $Comment ++** 12 Auguest 2007 Sung Winder Initiate Version ++*******************************************************************************/ ++#include "dwc_otg_ifx.h" ++ ++#include <linux/platform_device.h> ++#include <linux/kernel.h> ++#include <linux/ioport.h> ++#include <linux/gpio.h> ++ ++#include <asm/io.h> ++//#include <asm/mach-ifxmips/ifxmips.h> ++#include <xway.h> ++ ++#define IFXMIPS_GPIO_BASE_ADDR (0xBE100B00) ++ ++#define IFXMIPS_GPIO_P0_OUT ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0010)) ++#define IFXMIPS_GPIO_P1_OUT ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0040)) ++#define IFXMIPS_GPIO_P0_IN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0014)) ++#define IFXMIPS_GPIO_P1_IN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0044)) ++#define IFXMIPS_GPIO_P0_DIR ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0018)) ++#define IFXMIPS_GPIO_P1_DIR ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0048)) ++#define IFXMIPS_GPIO_P0_ALTSEL0 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x001C)) ++#define IFXMIPS_GPIO_P1_ALTSEL0 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x004C)) ++#define IFXMIPS_GPIO_P0_ALTSEL1 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0020)) ++#define IFXMIPS_GPIO_P1_ALTSEL1 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0050)) ++#define IFXMIPS_GPIO_P0_OD ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0024)) ++#define IFXMIPS_GPIO_P1_OD ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0054)) ++#define IFXMIPS_GPIO_P0_STOFF ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0028)) ++#define IFXMIPS_GPIO_P1_STOFF ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0058)) ++#define IFXMIPS_GPIO_P0_PUDSEL ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x002C)) ++#define IFXMIPS_GPIO_P1_PUDSEL ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x005C)) ++#define IFXMIPS_GPIO_P0_PUDEN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0030)) ++#define IFXMIPS_GPIO_P1_PUDEN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0060)) ++ ++ ++extern void lq_enable_irq(unsigned int irq_nr); ++#define writel lq_w32 ++#define readl lq_r32 ++void dwc_otg_power_on (void) ++{ ++ // GPIOs ++ gpio_request(28, "USB_POWER"); ++ gpio_direction_output(28, 1); ++ /* ++ writel(readl(IFXMIPS_GPIO_P0_DIR) | (0x4000), IFXMIPS_GPIO_P0_DIR); ++ writel(readl(IFXMIPS_GPIO_P0_OD) | (0x4000), IFXMIPS_GPIO_P0_OD); ++ writel(readl(IFXMIPS_GPIO_P0_ALTSEL0) & ~(0x4000), IFXMIPS_GPIO_P0_ALTSEL0); ++ writel(readl(IFXMIPS_GPIO_P0_ALTSEL1) & ~(0x4000), IFXMIPS_GPIO_P0_ALTSEL1); ++ writel(readl(IFXMIPS_GPIO_P0_OUT) | (0x4000), IFXMIPS_GPIO_P0_OUT); ++*/ ++/* writel(readl(IFXMIPS_GPIO_P1_DIR) | (0x1000), IFXMIPS_GPIO_P1_DIR); ++ writel(readl(IFXMIPS_GPIO_P1_OD) | (0x1000), IFXMIPS_GPIO_P1_OD); ++ writel(readl(IFXMIPS_GPIO_P1_ALTSEL0) & ~(0x1000), IFXMIPS_GPIO_P1_ALTSEL0); ++ writel(readl(IFXMIPS_GPIO_P1_ALTSEL1) & ~(0x1000), IFXMIPS_GPIO_P1_ALTSEL1); ++ writel(readl(IFXMIPS_GPIO_P1_OUT) | (0x1000), IFXMIPS_GPIO_P1_OUT); ++*/ ++ // clear power ++ //set_bit (0, DANUBE_PMU_PWDCR); ++ //set_bit (6, DANUBE_PMU_PWDCR); ++ writel(readl(DANUBE_PMU_PWDCR) | 0x41, DANUBE_PMU_PWDCR); ++ ++ // set clock gating ++ //set_bit (4, (volatile unsigned long *)DANUBE_CGU_IFCCR); ++ //set_bit (5, (volatile unsigned long *)DANUBE_CGU_IFCCR); ++ writel(readl(DANUBE_CGU_IFCCR) | 0x30, DANUBE_CGU_IFCCR); ++ ++ // set power ++ //clear_bit (0, (volatile unsigned long *)DANUBE_PMU_PWDCR); ++ writel(readl(DANUBE_PMU_PWDCR) & ~0x1, DANUBE_PMU_PWDCR); ++ //clear_bit (6, (volatile unsigned long *)DANUBE_PMU_PWDCR); ++ writel(readl(DANUBE_PMU_PWDCR) & ~0x40, DANUBE_PMU_PWDCR); ++ //clear_bit (15, (volatile unsigned long *)DANUBE_PMU_PWDCR); ++ writel(readl(DANUBE_PMU_PWDCR) & ~0x8000, DANUBE_PMU_PWDCR); ++ //writel(readl(DANUBE_PMU_PWDCR) & ~0x8041, DANUBE_PMU_PWDCR); ++ ++#if 1//defined (DWC_HOST_ONLY) ++ // make the hardware be a host controller (default) ++ //clear_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG); ++ writel(readl(DANUBE_RCU_UBSCFG) & ~(1<<DANUBE_USBCFG_HDSEL_BIT), DANUBE_RCU_UBSCFG); ++ ++ //#elif defined (DWC_DEVICE_ONLY) ++ /* set the controller to the device mode */ ++ // set_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG); ++#else ++#error "For Danube/Twinpass, it should be HOST or Device Only." ++#endif ++ ++ // set the HC's byte-order to big-endian ++ //set_bit (DANUBE_USBCFG_HOST_END_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG); ++ writel(readl(DANUBE_RCU_UBSCFG) | (1<<DANUBE_USBCFG_HOST_END_BIT), DANUBE_RCU_UBSCFG); ++ //clear_bit (DANUBE_USBCFG_SLV_END_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG); ++ writel(readl(DANUBE_RCU_UBSCFG) & ~(1<<DANUBE_USBCFG_SLV_END_BIT), DANUBE_RCU_UBSCFG); ++ //writel(0x400, DANUBE_RCU_UBSCFG); ++ ++ // PHY configurations. ++ writel (0x14014, (volatile unsigned long *)0xbe10103c); ++} ++ ++static void release_platform_dev(struct device * dev) ++{ ++ printk("IFX dwc_otg USB platform_dev release\n"); ++ dev->parent = NULL; ++} ++ ++static struct resource resources[] = ++{ ++ [0] = { ++ .name = "dwc_otg_membase", ++ .start = IFX_USB_IOMEM_BASE, ++ .end = IFX_USB_IOMEM_BASE + IFX_USB_IOMEM_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .name = "dwc_otg_irq", ++ .start = IFX_USB_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 dwc_dmamask = (u32)0x1fffffff; ++ ++static struct platform_device platform_dev = { ++ .dev = { ++ .release = release_platform_dev, ++ .dma_mask = &dwc_dmamask, ++ }, ++ .resource = resources, ++ .num_resources = ARRAY_SIZE(resources), ++}; ++ ++extern const char dwc_driver_name[]; ++int ifx_usb_hc_init(unsigned long base_addr, int irq) ++{ ++ if (platform_dev.dev.parent) ++ return -EBUSY; ++ ++ /* finish seting up the platform device */ ++ //resources[0].start = base_addr; ++ //resources[0].end = base_addr + SZ_256K; ++ ++ //resources[1].start = irq; ++ ++ /* The driver core will probe for us. We know sl811-hcd has been ++ * initialized already because of the link order dependency. ++ */ ++ platform_dev.name = dwc_driver_name; ++ lq_enable_irq(resources[1].start); ++ ++ return platform_device_register(&platform_dev); ++} ++ ++void ifx_usb_hc_remove(void) ++{ ++ platform_device_unregister(&platform_dev); ++} +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_ifx.h +@@ -0,0 +1,79 @@ ++/****************************************************************************** ++** ++** FILE NAME : dwc_otg_ifx.h ++** PROJECT : Twinpass/Danube ++** MODULES : DWC OTG USB ++** ++** DATE : 12 April 2007 ++** AUTHOR : Sung Winder ++** DESCRIPTION : Platform specific initialization. ++** COPYRIGHT : Copyright (c) 2007 ++** Infineon Technologies AG ++** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park, ++** Hsin-chu City, 300 Taiwan. ++** ++** This program is free software; you can redistribute it and/or modify ++** it under the terms of the GNU General Public License as published by ++** the Free Software Foundation; either version 2 of the License, or ++** (at your option) any later version. ++** ++** HISTORY ++** $Date $Author $Comment ++** 12 April 2007 Sung Winder Initiate Version ++*******************************************************************************/ ++#if !defined(__DWC_OTG_IFX_H__) ++#define __DWC_OTG_IFX_H__ ++ ++#include <irq.h> ++ ++// 20070316, winder added. ++#ifndef SZ_256K ++#define SZ_256K 0x00040000 ++#endif ++ ++extern void dwc_otg_power_on (void); ++ ++/* FIXME: The current Linux-2.6 do not have these header files, but anyway, we need these. */ ++// #include <asm/danube/danube.h> ++// #include <asm/ifx/irq.h> ++ ++/* winder, I used the Danube parameter as default. * ++ * We could change this through module param. */ ++#define IFX_USB_IOMEM_BASE 0x1e101000 ++#define IFX_USB_IOMEM_SIZE SZ_256K ++#define IFX_USB_IRQ LQ_USB_INT ++ ++/** ++ * This function is called to set correct clock gating and power. ++ * For Twinpass/Danube board. ++ */ ++#ifndef DANUBE_RCU_BASE_ADDR ++#define DANUBE_RCU_BASE_ADDR (0xBF203000) ++#endif ++ ++#ifndef DANUBE_CGU ++#define DANUBE_CGU (0xBF103000) ++#endif ++#ifndef DANUBE_CGU_IFCCR ++/***CGU Interface Clock Control Register***/ ++#define DANUBE_CGU_IFCCR ((volatile u32*)(DANUBE_CGU+ 0x0018)) ++#endif ++ ++#ifndef DANUBE_PMU ++#define DANUBE_PMU (KSEG1+0x1F102000) ++#endif ++#ifndef DANUBE_PMU_PWDCR ++/* PMU Power down Control Register */ ++#define DANUBE_PMU_PWDCR ((volatile u32*)(DANUBE_PMU+0x001C)) ++#endif ++ ++ ++#define DANUBE_RCU_UBSCFG ((volatile u32*)(DANUBE_RCU_BASE_ADDR + 0x18)) ++#define DANUBE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device ++#define DANUBE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end ++#define DANUBE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end ++ ++extern void lq_mask_and_ack_irq (unsigned int irq_nr); ++#define mask_and_ack_ifx_irq lq_mask_and_ack_irq ++ ++#endif //__DWC_OTG_IFX_H__ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_plat.h +@@ -0,0 +1,269 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/platform/dwc_otg_plat.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 510301 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_PLAT_H__) ++#define __DWC_OTG_PLAT_H__ ++ ++#include <linux/types.h> ++#include <linux/slab.h> ++#include <linux/list.h> ++#include <linux/delay.h> ++#include <asm/io.h> ++ ++/** ++ * @file ++ * ++ * This file contains the Platform Specific constants, interfaces ++ * (functions and macros) for Linux. ++ * ++ */ ++/*#if !defined(__LINUX__) ++#error "The contents of this file is Linux specific!!!" ++#endif ++*/ ++#include <xway.h> ++#define writel lq_w32 ++#define readl lq_r32 ++ ++/** ++ * Reads the content of a register. ++ * ++ * @param _reg address of register to read. ++ * @return contents of the register. ++ * ++ ++ * Usage:<br> ++ * <code>uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl);</code> ++ */ ++static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *_reg) ++{ ++ return readl(_reg); ++}; ++ ++/** ++ * Writes a register with a 32 bit value. ++ * ++ * @param _reg address of register to read. ++ * @param _value to write to _reg. ++ * ++ * Usage:<br> ++ * <code>dwc_write_reg32(&dev_regs->dctl, 0); </code> ++ */ ++static __inline__ void dwc_write_reg32( volatile uint32_t *_reg, const uint32_t _value) ++{ ++ writel( _value, _reg ); ++}; ++ ++/** ++ * This function modifies bit values in a register. Using the ++ * algorithm: (reg_contents & ~clear_mask) | set_mask. ++ * ++ * @param _reg address of register to read. ++ * @param _clear_mask bit mask to be cleared. ++ * @param _set_mask bit mask to be set. ++ * ++ * Usage:<br> ++ * <code> // Clear the SOF Interrupt Mask bit and <br> ++ * // set the OTG Interrupt mask bit, leaving all others as they were. ++ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);</code> ++ */ ++static __inline__ ++ void dwc_modify_reg32( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask) ++{ ++ writel( (readl(_reg) & ~_clear_mask) | _set_mask, _reg ); ++}; ++ ++ ++/** ++ * Wrapper for the OS micro-second delay function. ++ * @param[in] _usecs Microseconds of delay ++ */ ++static __inline__ void UDELAY( const uint32_t _usecs ) ++{ ++ udelay( _usecs ); ++} ++ ++/** ++ * Wrapper for the OS milli-second delay function. ++ * @param[in] _msecs milliseconds of delay ++ */ ++static __inline__ void MDELAY( const uint32_t _msecs ) ++{ ++ mdelay( _msecs ); ++} ++ ++/** ++ * Wrapper for the Linux spin_lock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param _lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_LOCK( spinlock_t *_lock ) ++{ ++ spin_lock(_lock); ++} ++ ++/** ++ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param _lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_UNLOCK( spinlock_t *_lock ) ++{ ++ spin_unlock(_lock); ++} ++ ++/** ++ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param _l Pointer to the spinlock. ++ * @param _f unsigned long for irq flags storage. ++ */ ++#define SPIN_LOCK_IRQSAVE( _l, _f ) { \ ++ spin_lock_irqsave(_l,_f); \ ++ } ++ ++/** ++ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param _l Pointer to the spinlock. ++ * @param _f unsigned long for irq flags storage. ++ */ ++#define SPIN_UNLOCK_IRQRESTORE( _l,_f ) {\ ++ spin_unlock_irqrestore(_l,_f); \ ++ } ++ ++ ++/* ++ * Debugging support vanishes in non-debug builds. ++ */ ++ ++ ++/** ++ * The Debug Level bit-mask variable. ++ */ ++extern uint32_t g_dbg_lvl; ++/** ++ * Set the Debug Level variable. ++ */ ++static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new ) ++{ ++ uint32_t old = g_dbg_lvl; ++ g_dbg_lvl = _new; ++ return old; ++} ++ ++/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ ++#define DBG_CIL (0x2) ++/** When debug level has the DBG_CILV bit set, display CIL Verbose debug ++ * messages */ ++#define DBG_CILV (0x20) ++/** When debug level has the DBG_PCD bit set, display PCD (Device) debug ++ * messages */ ++#define DBG_PCD (0x4) ++/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug ++ * messages */ ++#define DBG_PCDV (0x40) ++/** When debug level has the DBG_HCD bit set, display Host debug messages */ ++#define DBG_HCD (0x8) ++/** When debug level has the DBG_HCDV bit set, display Verbose Host debug ++ * messages */ ++#define DBG_HCDV (0x80) ++/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host ++ * mode. */ ++#define DBG_HCD_URB (0x800) ++ ++/** When debug level has any bit set, display debug messages */ ++#define DBG_ANY (0xFF) ++ ++/** All debug messages off */ ++#define DBG_OFF 0 ++ ++/** Prefix string for DWC_DEBUG print macros. */ ++#define USB_DWC "DWC_otg: " ++ ++/** ++ * Print a debug message when the Global debug level variable contains ++ * the bit defined in <code>lvl</code>. ++ * ++ * @param[in] lvl - Debug level, use one of the DBG_ constants above. ++ * @param[in] x - like printf ++ * ++ * Example:<p> ++ * <code> ++ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); ++ * </code> ++ * <br> ++ * results in:<br> ++ * <code> ++ * usb-DWC_otg: dwc_otg_cil_init(ca867000) ++ * </code> ++ */ ++#ifdef DEBUG ++ ++# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) ++# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) ++ ++# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) ++ ++#else ++ ++# define DWC_DEBUGPL(lvl, x...) do{}while(0) ++# define DWC_DEBUGP(x...) ++ ++# define CHK_DEBUG_LEVEL(level) (0) ++ ++#endif /*DEBUG*/ ++ ++/** ++ * Print an Error message. ++ */ ++#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) ++/** ++ * Print a Warning message. ++ */ ++#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) ++/** ++ * Print a notice (normal but significant message). ++ */ ++#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) ++/** ++ * Basic message printing. ++ */ ++#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_regs.h +@@ -0,0 +1,1797 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_regs.h $ ++ * $Revision: 1.1.1.1 $ ++ * $Date: 2009-04-17 06:15:34 $ ++ * $Change: 631780 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_REGS_H__ ++#define __DWC_OTG_REGS_H__ ++ ++/** ++ * @file ++ * ++ * This file contains the data structures for accessing the DWC_otg core registers. ++ * ++ * The application interfaces with the HS OTG core by reading from and ++ * writing to the Control and Status Register (CSR) space through the ++ * AHB Slave interface. These registers are 32 bits wide, and the ++ * addresses are 32-bit-block aligned. ++ * CSRs are classified as follows: ++ * - Core Global Registers ++ * - Device Mode Registers ++ * - Device Global Registers ++ * - Device Endpoint Specific Registers ++ * - Host Mode Registers ++ * - Host Global Registers ++ * - Host Port CSRs ++ * - Host Channel Specific Registers ++ * ++ * Only the Core Global registers can be accessed in both Device and ++ * Host modes. When the HS OTG core is operating in one mode, either ++ * Device or Host, the application must not access registers from the ++ * other mode. When the core switches from one mode to another, the ++ * registers in the new mode of operation must be reprogrammed as they ++ * would be after a power-on reset. ++ */ ++ ++/****************************************************************************/ ++/** DWC_otg Core registers . ++ * The dwc_otg_core_global_regs structure defines the size ++ * and relative field offsets for the Core Global registers. ++ */ ++typedef struct dwc_otg_core_global_regs ++{ ++ /** OTG Control and Status Register. <i>Offset: 000h</i> */ ++ volatile uint32_t gotgctl; ++ /** OTG Interrupt Register. <i>Offset: 004h</i> */ ++ volatile uint32_t gotgint; ++ /**Core AHB Configuration Register. <i>Offset: 008h</i> */ ++ volatile uint32_t gahbcfg; ++#define DWC_GLBINTRMASK 0x0001 ++#define DWC_DMAENABLE 0x0020 ++#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 ++#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 ++#define DWC_PTXEMPTYLVL_EMPTY 0x0100 ++#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 ++ ++ ++ /**Core USB Configuration Register. <i>Offset: 00Ch</i> */ ++ volatile uint32_t gusbcfg; ++ /**Core Reset Register. <i>Offset: 010h</i> */ ++ volatile uint32_t grstctl; ++ /**Core Interrupt Register. <i>Offset: 014h</i> */ ++ volatile uint32_t gintsts; ++ /**Core Interrupt Mask Register. <i>Offset: 018h</i> */ ++ volatile uint32_t gintmsk; ++ /**Receive Status Queue Read Register (Read Only). <i>Offset: 01Ch</i> */ ++ volatile uint32_t grxstsr; ++ /**Receive Status Queue Read & POP Register (Read Only). <i>Offset: 020h</i>*/ ++ volatile uint32_t grxstsp; ++ /**Receive FIFO Size Register. <i>Offset: 024h</i> */ ++ volatile uint32_t grxfsiz; ++ /**Non Periodic Transmit FIFO Size Register. <i>Offset: 028h</i> */ ++ volatile uint32_t gnptxfsiz; ++ /**Non Periodic Transmit FIFO/Queue Status Register (Read ++ * Only). <i>Offset: 02Ch</i> */ ++ volatile uint32_t gnptxsts; ++ /**I2C Access Register. <i>Offset: 030h</i> */ ++ volatile uint32_t gi2cctl; ++ /**PHY Vendor Control Register. <i>Offset: 034h</i> */ ++ volatile uint32_t gpvndctl; ++ /**General Purpose Input/Output Register. <i>Offset: 038h</i> */ ++ volatile uint32_t ggpio; ++ /**User ID Register. <i>Offset: 03Ch</i> */ ++ volatile uint32_t guid; ++ /**Synopsys ID Register (Read Only). <i>Offset: 040h</i> */ ++ volatile uint32_t gsnpsid; ++ /**User HW Config1 Register (Read Only). <i>Offset: 044h</i> */ ++ volatile uint32_t ghwcfg1; ++ /**User HW Config2 Register (Read Only). <i>Offset: 048h</i> */ ++ volatile uint32_t ghwcfg2; ++#define DWC_SLAVE_ONLY_ARCH 0 ++#define DWC_EXT_DMA_ARCH 1 ++#define DWC_INT_DMA_ARCH 2 ++ ++#define DWC_MODE_HNP_SRP_CAPABLE 0 ++#define DWC_MODE_SRP_ONLY_CAPABLE 1 ++#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 ++#define DWC_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ /**User HW Config3 Register (Read Only). <i>Offset: 04Ch</i> */ ++ volatile uint32_t ghwcfg3; ++ /**User HW Config4 Register (Read Only). <i>Offset: 050h</i>*/ ++ volatile uint32_t ghwcfg4; ++ /** Reserved <i>Offset: 054h-0FFh</i> */ ++ uint32_t reserved[43]; ++ /** Host Periodic Transmit FIFO Size Register. <i>Offset: 100h</i> */ ++ volatile uint32_t hptxfsiz; ++ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, ++ otherwise Device Transmit FIFO#n Register. ++ * <i>Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15).</i> */ ++ //volatile uint32_t dptxfsiz[15]; ++ volatile uint32_t dptxfsiz_dieptxf[15]; ++} dwc_otg_core_global_regs_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Control ++ * and Status Register (GOTGCTL). Set the bits using the bit ++ * fields then write the <i>d32</i> value to the register. ++ */ ++typedef union gotgctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned reserved31_21 : 11; ++ unsigned currmod : 1; ++ unsigned bsesvld : 1; ++ unsigned asesvld : 1; ++ unsigned reserved17 : 1; ++ unsigned conidsts : 1; ++ unsigned reserved15_12 : 4; ++ unsigned devhnpen : 1; ++ unsigned hstsethnpen : 1; ++ unsigned hnpreq : 1; ++ unsigned hstnegscs : 1; ++ unsigned reserved7_2 : 6; ++ unsigned sesreq : 1; ++ unsigned sesreqscs : 1; ++ } b; ++} gotgctl_data_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Interrupt Register ++ * (GOTGINT). Set/clear the bits using the bit fields then write the <i>d32</i> ++ * value to the register. ++ */ ++typedef union gotgint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Current Mode */ ++ unsigned reserved31_20 : 12; ++ /** Debounce Done */ ++ unsigned debdone : 1; ++ /** A-Device Timeout Change */ ++ unsigned adevtoutchng : 1; ++ /** Host Negotiation Detected */ ++ unsigned hstnegdet : 1; ++ unsigned reserver16_10 : 7; ++ /** Host Negotiation Success Status Change */ ++ unsigned hstnegsucstschng : 1; ++ /** Session Request Success Status Change */ ++ unsigned sesreqsucstschng : 1; ++ unsigned reserved3_7 : 5; ++ /** Session End Detected */ ++ unsigned sesenddet : 1; ++ /** Current Mode */ ++ unsigned reserved1_0 : 2; ++ } b; ++} gotgint_data_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core AHB Configuration ++ * Register (GAHBCFG). Set/clear the bits using the bit fields then ++ * write the <i>d32</i> value to the register. ++ */ ++typedef union gahbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 ++#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 ++ unsigned reserved9_31 : 23; ++ unsigned ptxfemplvl : 1; ++ unsigned nptxfemplvl_txfemplvl : 1; ++#define DWC_GAHBCFG_DMAENABLE 1 ++ unsigned reserved : 1; ++ unsigned dmaenable : 1; ++#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 ++ unsigned hburstlen : 4; ++ unsigned glblintrmsk : 1; ++#define DWC_GAHBCFG_GLBINT_ENABLE 1 ++ ++ } b; ++} gahbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core USB Configuration ++ * Register (GUSBCFG). Set the bits using the bit fields then write ++ * the <i>d32</i> value to the register. ++ */ ++typedef union gusbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned corrupt_tx_packet: 1; /*fscz*/ ++ unsigned force_device_mode: 1; ++ unsigned force_host_mode: 1; ++ unsigned reserved23_28 : 6; ++ unsigned term_sel_dl_pulse : 1; ++ unsigned ulpi_int_vbus_indicator : 1; ++ unsigned ulpi_ext_vbus_drv : 1; ++ unsigned ulpi_clk_sus_m : 1; ++ unsigned ulpi_auto_res : 1; ++ unsigned ulpi_fsls : 1; ++ unsigned otgutmifssel : 1; ++ unsigned phylpwrclksel : 1; ++ unsigned nptxfrwnden : 1; ++ unsigned usbtrdtim : 4; ++ unsigned hnpcap : 1; ++ unsigned srpcap : 1; ++ unsigned ddrsel : 1; ++ unsigned physel : 1; ++ unsigned fsintf : 1; ++ unsigned ulpi_utmi_sel : 1; ++ unsigned phyif : 1; ++ unsigned toutcal : 3; ++ } b; ++} gusbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core Reset Register ++ * (GRSTCTL). Set/clear the bits using the bit fields then write the ++ * <i>d32</i> value to the register. ++ */ ++typedef union grstctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** AHB Master Idle. Indicates the AHB Master State ++ * Machine is in IDLE condition. */ ++ unsigned ahbidle : 1; ++ /** DMA Request Signal. Indicated DMA request is in ++ * probress. Used for debug purpose. */ ++ unsigned dmareq : 1; ++ /** Reserved */ ++ unsigned reserved29_11 : 19; ++ /** TxFIFO Number (TxFNum) (Device and Host). ++ * ++ * This is the FIFO number which needs to be flushed, ++ * using the TxFIFO Flush bit. This field should not ++ * be changed until the TxFIFO Flush bit is cleared by ++ * the core. ++ * - 0x0 : Non Periodic TxFIFO Flush ++ * - 0x1 : Periodic TxFIFO #1 Flush in device mode ++ * or Periodic TxFIFO in host mode ++ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. ++ * - ... ++ * - 0xF : Periodic TxFIFO #15 Flush in device mode ++ * - 0x10: Flush all the Transmit NonPeriodic and ++ * Transmit Periodic FIFOs in the core ++ */ ++ unsigned txfnum : 5; ++ /** TxFIFO Flush (TxFFlsh) (Device and Host). ++ * ++ * This bit is used to selectively flush a single or ++ * all transmit FIFOs. The application must first ++ * ensure that the core is not in the middle of a ++ * transaction. <p>The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is writing into the TxFIFO nor the MAC ++ * is reading the data out of the FIFO. <p>The ++ * application should wait until the core clears this ++ * bit, before performing any operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned txfflsh : 1; ++ /** RxFIFO Flush (RxFFlsh) (Device and Host) ++ * ++ * The application can flush the entire Receive FIFO ++ * using this bit. <p>The application must first ++ * ensure that the core is not in the middle of a ++ * transaction. <p>The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is reading from the RxFIFO nor the MAC ++ * is writing the data in to the FIFO. <p>The ++ * application should wait until the bit is cleared ++ * before performing any other operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned rxfflsh : 1; ++ /** In Token Sequence Learning Queue Flush ++ * (INTknQFlsh) (Device Only) ++ */ ++ unsigned intknqflsh : 1; ++ /** Host Frame Counter Reset (Host Only)<br> ++ * ++ * The application can reset the (micro)frame number ++ * counter inside the core, using this bit. When the ++ * (micro)frame counter is reset, the subsequent SOF ++ * sent out by the core, will have a (micro)frame ++ * number of 0. ++ */ ++ unsigned hstfrm : 1; ++ /** Hclk Soft Reset ++ * ++ * The application uses this bit to reset the control logic in ++ * the AHB clock domain. Only AHB clock domain pipelines are ++ * reset. ++ */ ++ unsigned hsftrst : 1; ++ /** Core Soft Reset (CSftRst) (Device and Host) ++ * ++ * The application can flush the control logic in the ++ * entire core using this bit. This bit resets the ++ * pipelines in the AHB Clock domain as well as the ++ * PHY Clock domain. ++ * ++ * The state machines are reset to an IDLE state, the ++ * control bits in the CSRs are cleared, all the ++ * transmit FIFOs and the receive FIFO are flushed. ++ * ++ * The status mask bits that control the generation of ++ * the interrupt, are cleared, to clear the ++ * interrupt. The interrupt status bits are not ++ * cleared, so the application can get the status of ++ * any events that occurred in the core after it has ++ * set this bit. ++ * ++ * Any transactions on the AHB are terminated as soon ++ * as possible following the protocol. Any ++ * transactions on the USB are terminated immediately. ++ * ++ * The configuration settings in the CSRs are ++ * unchanged, so the software doesn't have to ++ * reprogram these registers (Device ++ * Configuration/Host Configuration/Core System ++ * Configuration/Core PHY Configuration). ++ * ++ * The application can write to this bit, any time it ++ * wants to reset the core. This is a self clearing ++ * bit and the core clears this bit after all the ++ * necessary logic is reset in the core, which may ++ * take several clocks, depending on the current state ++ * of the core. ++ */ ++ unsigned csftrst : 1; ++ } b; ++} grstctl_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core Interrupt Mask ++ * Register (GINTMSK). Set/clear the bits using the bit fields then ++ * write the <i>d32</i> value to the register. ++ */ ++typedef union gintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned wkupintr : 1; ++ unsigned sessreqintr : 1; ++ unsigned disconnect : 1; ++ unsigned conidstschng : 1; ++ unsigned reserved27 : 1; ++ unsigned ptxfempty : 1; ++ unsigned hcintr : 1; ++ unsigned portintr : 1; ++ unsigned reserved22_23 : 2; ++ unsigned incomplisoout : 1; ++ unsigned incomplisoin : 1; ++ unsigned outepintr : 1; ++ unsigned inepintr : 1; ++ unsigned epmismatch : 1; ++ unsigned reserved16 : 1; ++ unsigned eopframe : 1; ++ unsigned isooutdrop : 1; ++ unsigned enumdone : 1; ++ unsigned usbreset : 1; ++ unsigned usbsuspend : 1; ++ unsigned erlysuspend : 1; ++ unsigned i2cintr : 1; ++ unsigned reserved8 : 1; ++ unsigned goutnakeff : 1; ++ unsigned ginnakeff : 1; ++ unsigned nptxfempty : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned sofintr : 1; ++ unsigned otgintr : 1; ++ unsigned modemismatch : 1; ++ unsigned reserved0 : 1; ++ } b; ++} gintmsk_data_t; ++/** ++ * This union represents the bit fields of the Core Interrupt Register ++ * (GINTSTS). Set/clear the bits using the bit fields then write the ++ * <i>d32</i> value to the register. ++ */ ++typedef union gintsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++#define DWC_SOF_INTR_MASK 0x0008 ++ /** register bits */ ++ struct ++ { ++#define DWC_HOST_MODE 1 ++ unsigned wkupintr : 1; ++ unsigned sessreqintr : 1; ++ unsigned disconnect : 1; ++ unsigned conidstschng : 1; ++ unsigned reserved27 : 1; ++ unsigned ptxfempty : 1; ++ unsigned hcintr : 1; ++ unsigned portintr : 1; ++ unsigned reserved22_23 : 2; ++ unsigned incomplisoout : 1; ++ unsigned incomplisoin : 1; ++ unsigned outepintr : 1; ++ unsigned inepint: 1; ++ unsigned epmismatch : 1; ++ unsigned intokenrx : 1; ++ unsigned eopframe : 1; ++ unsigned isooutdrop : 1; ++ unsigned enumdone : 1; ++ unsigned usbreset : 1; ++ unsigned usbsuspend : 1; ++ unsigned erlysuspend : 1; ++ unsigned i2cintr : 1; ++ unsigned reserved8 : 1; ++ unsigned goutnakeff : 1; ++ unsigned ginnakeff : 1; ++ unsigned nptxfempty : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned sofintr : 1; ++ unsigned otgintr : 1; ++ unsigned modemismatch : 1; ++ unsigned curmode : 1; ++ } b; ++} gintsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i> ++ * element then read out the bits using the <i>b</i>it elements. ++ */ ++typedef union device_grxsts_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 7; ++ unsigned fn : 4; ++#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet ++#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete ++ ++#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK ++#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete ++#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet ++ unsigned pktsts : 4; ++ unsigned dpid : 2; ++ unsigned bcnt : 11; ++ unsigned epnum : 4; ++ } b; ++} device_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i> ++ * element then read out the bits using the <i>b</i>it elements. ++ */ ++typedef union host_grxsts_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved31_21 : 11; ++#define DWC_GRXSTS_PKTSTS_IN 0x2 ++#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 ++#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 ++#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 ++ unsigned pktsts : 4; ++ unsigned dpid : 2; ++ unsigned bcnt : 11; ++ unsigned chnum : 4; ++ } b; ++} host_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, ++ * GNPTXFSIZ, DPTXFSIZn). Read the register into the <i>d32</i> element then ++ * read out the bits using the <i>b</i>it elements. ++ */ ++typedef union fifosize_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned depth : 16; ++ unsigned startaddr : 16; ++ } b; ++} fifosize_data_t; ++ ++/** ++ * This union represents the bit fields in the Non-Periodic Transmit ++ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the ++ * <i>d32</i> element then read out the bits using the <i>b</i>it ++ * elements. ++ */ ++typedef union gnptxsts_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 1; ++ /** Top of the Non-Periodic Transmit Request Queue ++ * - bits 30:27 - Channel/EP Number ++ * - bits 26:25 - Token Type ++ * - bit 24 - Terminate (Last entry for the selected ++ * channel/EP) ++ * - 2'b00 - IN/OUT ++ * - 2'b01 - Zero Length OUT ++ * - 2'b10 - PING/Complete Split ++ * - 2'b11 - Channel Halt ++ ++ */ ++ unsigned nptxqtop_chnep : 4; ++ unsigned nptxqtop_token : 2; ++ unsigned nptxqtop_terminate : 1; ++ unsigned nptxqspcavail : 8; ++ unsigned nptxfspcavail : 16; ++ } b; ++} gnptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Transmit ++ * FIFO Status Register (DTXFSTS). Read the register into the ++ * <i>d32</i> element then read out the bits using the <i>b</i>it ++ * elements. ++ */ ++typedef union dtxfsts_data /* fscz */ //* ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 16; ++ unsigned txfspcavail : 16; ++ } b; ++} dtxfsts_data_t; ++ ++/** ++ * This union represents the bit fields in the I2C Control Register ++ * (I2CCTL). Read the register into the <i>d32</i> element then read out the ++ * bits using the <i>b</i>it elements. ++ */ ++typedef union gi2cctl_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned bsydne : 1; ++ unsigned rw : 1; ++ unsigned reserved : 2; ++ unsigned i2cdevaddr : 2; ++ unsigned i2csuspctl : 1; ++ unsigned ack : 1; ++ unsigned i2cen : 1; ++ unsigned addr : 7; ++ unsigned regaddr : 8; ++ unsigned rwdata : 8; ++ } b; ++} gi2cctl_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config1 ++ * Register. Read the register into the <i>d32</i> element then read ++ * out the bits using the <i>b</i>it elements. ++ */ ++typedef union hwcfg1_data { ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned ep_dir15 : 2; ++ unsigned ep_dir14 : 2; ++ unsigned ep_dir13 : 2; ++ unsigned ep_dir12 : 2; ++ unsigned ep_dir11 : 2; ++ unsigned ep_dir10 : 2; ++ unsigned ep_dir9 : 2; ++ unsigned ep_dir8 : 2; ++ unsigned ep_dir7 : 2; ++ unsigned ep_dir6 : 2; ++ unsigned ep_dir5 : 2; ++ unsigned ep_dir4 : 2; ++ unsigned ep_dir3 : 2; ++ unsigned ep_dir2 : 2; ++ unsigned ep_dir1 : 2; ++ unsigned ep_dir0 : 2; ++ } b; ++} hwcfg1_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config2 ++ * Register. Read the register into the <i>d32</i> element then read ++ * out the bits using the <i>b</i>it elements. ++ */ ++typedef union hwcfg2_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /* GHWCFG2 */ ++ unsigned reserved31 : 1; ++ unsigned dev_token_q_depth : 5; ++ unsigned host_perio_tx_q_depth : 2; ++ unsigned nonperio_tx_q_depth : 2; ++ unsigned rx_status_q_depth : 2; ++ unsigned dynamic_fifo : 1; ++ unsigned perio_ep_supported : 1; ++ unsigned num_host_chan : 4; ++ unsigned num_dev_ep : 4; ++ unsigned fs_phy_type : 2; ++#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 ++#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 ++ unsigned hs_phy_type : 2; ++ unsigned point2point : 1; ++ unsigned architecture : 2; ++#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 ++#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 ++#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 ++ unsigned op_mode : 3; ++ } b; ++} hwcfg2_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config3 ++ * Register. Read the register into the <i>d32</i> element then read ++ * out the bits using the <i>b</i>it elements. ++ */ ++typedef union hwcfg3_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /* GHWCFG3 */ ++ unsigned dfifo_depth : 16; ++ unsigned reserved15_13 : 3; ++ unsigned ahb_phy_clock_synch : 1; ++ unsigned synch_reset_type : 1; ++ unsigned optional_features : 1; ++ unsigned vendor_ctrl_if : 1; ++ unsigned i2c : 1; ++ unsigned otg_func : 1; ++ unsigned packet_size_cntr_width : 3; ++ unsigned xfer_size_cntr_width : 4; ++ } b; ++} hwcfg3_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config4 ++ * Register. Read the register into the <i>d32</i> element then read ++ * out the bits using the <i>b</i>it elements. ++ */ ++typedef union hwcfg4_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++unsigned reserved31_30 : 2; /* fscz */ ++ unsigned num_in_eps : 4; ++ unsigned ded_fifo_en : 1; ++ ++ unsigned session_end_filt_en : 1; ++ unsigned b_valid_filt_en : 1; ++ unsigned a_valid_filt_en : 1; ++ unsigned vbus_valid_filt_en : 1; ++ unsigned iddig_filt_en : 1; ++ unsigned num_dev_mode_ctrl_ep : 4; ++ unsigned utmi_phy_data_width : 2; ++ unsigned min_ahb_freq : 9; ++ unsigned power_optimiz : 1; ++ unsigned num_dev_perio_in_ep : 4; ++ } b; ++} hwcfg4_data_t; ++ ++//////////////////////////////////////////// ++// Device Registers ++/** ++ * Device Global Registers. <i>Offsets 800h-BFFh</i> ++ * ++ * The following structures define the size and relative field offsets ++ * for the Device Mode Registers. ++ * ++ * <i>These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown.</i> ++ */ ++typedef struct dwc_otg_dev_global_regs ++{ ++ /** Device Configuration Register. <i>Offset 800h</i> */ ++ volatile uint32_t dcfg; ++ /** Device Control Register. <i>Offset: 804h</i> */ ++ volatile uint32_t dctl; ++ /** Device Status Register (Read Only). <i>Offset: 808h</i> */ ++ volatile uint32_t dsts; ++ /** Reserved. <i>Offset: 80Ch</i> */ ++ uint32_t unused; ++ /** Device IN Endpoint Common Interrupt Mask ++ * Register. <i>Offset: 810h</i> */ ++ volatile uint32_t diepmsk; ++ /** Device OUT Endpoint Common Interrupt Mask ++ * Register. <i>Offset: 814h</i> */ ++ volatile uint32_t doepmsk; ++ /** Device All Endpoints Interrupt Register. <i>Offset: 818h</i> */ ++ volatile uint32_t daint; ++ /** Device All Endpoints Interrupt Mask Register. <i>Offset: ++ * 81Ch</i> */ ++ volatile uint32_t daintmsk; ++ /** Device IN Token Queue Read Register-1 (Read Only). ++ * <i>Offset: 820h</i> */ ++ volatile uint32_t dtknqr1; ++ /** Device IN Token Queue Read Register-2 (Read Only). ++ * <i>Offset: 824h</i> */ ++ volatile uint32_t dtknqr2; ++ /** Device VBUS discharge Register. <i>Offset: 828h</i> */ ++ volatile uint32_t dvbusdis; ++ /** Device VBUS Pulse Register. <i>Offset: 82Ch</i> */ ++ volatile uint32_t dvbuspulse; ++ /** Device IN Token Queue Read Register-3 (Read Only). ++ * Device Thresholding control register (Read/Write) ++ * <i>Offset: 830h</i> */ ++ volatile uint32_t dtknqr3_dthrctl; ++ /** Device IN Token Queue Read Register-4 (Read Only). / ++ * Device IN EPs empty Inr. Mask Register (Read/Write) ++ * <i>Offset: 834h</i> */ ++ volatile uint32_t dtknqr4_fifoemptymsk; ++} dwc_otg_device_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device Configuration ++ * Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. Write the ++ * <i>d32</i> member to the dcfg register. ++ */ ++typedef union dcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved31_23 : 9; ++ /** In Endpoint Mis-match count */ ++ unsigned epmscnt : 5; ++ unsigned reserved13_17 : 5; ++ /** Periodic Frame Interval */ ++#define DWC_DCFG_FRAME_INTERVAL_80 0 ++#define DWC_DCFG_FRAME_INTERVAL_85 1 ++#define DWC_DCFG_FRAME_INTERVAL_90 2 ++#define DWC_DCFG_FRAME_INTERVAL_95 3 ++ unsigned perfrint : 2; ++ /** Device Addresses */ ++ unsigned devaddr : 7; ++ unsigned reserved3 : 1; ++ /** Non Zero Length Status OUT Handshake */ ++#define DWC_DCFG_SEND_STALL 1 ++ unsigned nzstsouthshk : 1; ++ /** Device Speed */ ++ unsigned devspd : 2; ++ } b; ++} dcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Control ++ * Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. ++ */ ++typedef union dctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 20; ++ /** Power-On Programming Done */ ++ unsigned pwronprgdone : 1; ++ /** Clear Global OUT NAK */ ++ unsigned cgoutnak : 1; ++ /** Set Global OUT NAK */ ++ unsigned sgoutnak : 1; ++ /** Clear Global Non-Periodic IN NAK */ ++ unsigned cgnpinnak : 1; ++ /** Set Global Non-Periodic IN NAK */ ++ unsigned sgnpinnak : 1; ++ /** Test Control */ ++ unsigned tstctl : 3; ++ /** Global OUT NAK Status */ ++ unsigned goutnaksts : 1; ++ /** Global Non-Periodic IN NAK Status */ ++ unsigned gnpinnaksts : 1; ++ /** Soft Disconnect */ ++ unsigned sftdiscon : 1; ++ /** Remote Wakeup */ ++ unsigned rmtwkupsig : 1; ++ } b; ++} dctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Status ++ * Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. ++ */ ++typedef union dsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved22_31 : 10; ++ /** Frame or Microframe Number of the received SOF */ ++ unsigned soffn : 14; ++ unsigned reserved4_7: 4; ++ /** Erratic Error */ ++ unsigned errticerr : 1; ++ /** Enumerated Speed */ ++#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 ++#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 ++#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 ++#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 ++ unsigned enumspd : 2; ++ /** Suspend Status */ ++ unsigned suspsts : 1; ++ } b; ++} dsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device IN EP Interrupt ++ * Register and the Device IN EP Common Mask Register. ++ * ++ * - Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. ++ */ ++typedef union diepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved07_31 : 23; ++ unsigned txfifoundrn : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned emptyintr : 1; ++ /** IN Endpoint NAK Effective mask */ ++ unsigned inepnakeff : 1; ++ /** IN Token Received with EP mismatch mask */ ++ unsigned intknepmis : 1; ++ /** IN Token received with TxF Empty mask */ ++ unsigned intktxfemp : 1; ++ /** TimeOUT Handshake mask (non-ISOC EPs) */ ++ unsigned timeout : 1; ++ /** AHB Error mask */ ++ unsigned ahberr : 1; ++ /** Endpoint disable mask */ ++ unsigned epdisabled : 1; ++ /** Transfer complete mask */ ++ unsigned xfercompl : 1; ++ } b; ++} diepint_data_t; ++/** ++ * This union represents the bit fields in the Device IN EP Common ++ * Interrupt Mask Register. ++ */ ++typedef union diepint_data diepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP Interrupt ++ * Registerand Device OUT EP Common Interrupt Mask Register. ++ * ++ * - Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. ++ */ ++typedef union doepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved04_31 : 27; ++ /** OUT Token Received when Endpoint Disabled */ ++ unsigned outtknepdis : 1; ++ /** Setup Phase Done (contorl EPs) */ ++ unsigned setup : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** Endpoint disable */ ++ unsigned epdisabled : 1; ++ /** Transfer complete */ ++ unsigned xfercompl : 1; ++ } b; ++} doepint_data_t; ++/** ++ * This union represents the bit fields in the Device OUT EP Common ++ * Interrupt Mask Register. ++ */ ++typedef union doepint_data doepmsk_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device All EP Interrupt ++ * and Mask Registers. ++ * - Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. ++ */ ++typedef union daint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** OUT Endpoint bits */ ++ unsigned out : 16; ++ /** IN Endpoint bits */ ++ unsigned in : 16; ++ } ep; ++ struct { ++ /** OUT Endpoint bits */ ++ unsigned outep15 : 1; ++ unsigned outep14 : 1; ++ unsigned outep13 : 1; ++ unsigned outep12 : 1; ++ unsigned outep11 : 1; ++ unsigned outep10 : 1; ++ unsigned outep9 : 1; ++ unsigned outep8 : 1; ++ unsigned outep7 : 1; ++ unsigned outep6 : 1; ++ unsigned outep5 : 1; ++ unsigned outep4 : 1; ++ unsigned outep3 : 1; ++ unsigned outep2 : 1; ++ unsigned outep1 : 1; ++ unsigned outep0 : 1; ++ /** IN Endpoint bits */ ++ unsigned inep15 : 1; ++ unsigned inep14 : 1; ++ unsigned inep13 : 1; ++ unsigned inep12 : 1; ++ unsigned inep11 : 1; ++ unsigned inep10 : 1; ++ unsigned inep9 : 1; ++ unsigned inep8 : 1; ++ unsigned inep7 : 1; ++ unsigned inep6 : 1; ++ unsigned inep5 : 1; ++ unsigned inep4 : 1; ++ unsigned inep3 : 1; ++ unsigned inep2 : 1; ++ unsigned inep1 : 1; ++ unsigned inep0 : 1; ++ } b; ++} daint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN Token Queue ++ * Read Registers. ++ * - Read the register into the <i>d32</i> member. ++ * - READ-ONLY Register ++ */ ++typedef union dtknq1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** EP Numbers of IN Tokens 0 ... 4 */ ++ unsigned epnums0_5 : 24; ++ /** write pointer has wrapped. */ ++ unsigned wrap_bit : 1; ++ /** Reserved */ ++ unsigned reserved05_06 : 2; ++ /** In Token Queue Write Pointer */ ++ unsigned intknwptr : 5; ++ }b; ++} dtknq1_data_t; ++ ++/** ++ * This union represents Threshold control Register ++ * - Read and write the register into the <i>d32</i> member. ++ * - READ-WRITABLE Register ++ */ ++typedef union dthrctl_data //* /*fscz */ ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Reserved */ ++ unsigned reserved26_31 : 6; ++ /** Rx Thr. Length */ ++ unsigned rx_thr_len : 9; ++ /** Rx Thr. Enable */ ++ unsigned rx_thr_en : 1; ++ /** Reserved */ ++ unsigned reserved11_15 : 5; ++ /** Tx Thr. Length */ ++ unsigned tx_thr_len : 9; ++ /** ISO Tx Thr. Enable */ ++ unsigned iso_thr_en : 1; ++ /** non ISO Tx Thr. Enable */ ++ unsigned non_iso_thr_en : 1; ++ ++ }b; ++} dthrctl_data_t; ++ ++/** ++ * Device Logical IN Endpoint-Specific Registers. <i>Offsets ++ * 900h-AFCh</i> ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * <i>These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown.</i> ++ */ ++typedef struct dwc_otg_dev_in_ep_regs ++{ ++ /** Device IN Endpoint Control Register. <i>Offset:900h + ++ * (ep_num * 20h) + 00h</i> */ ++ volatile uint32_t diepctl; ++ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */ ++ uint32_t reserved04; ++ /** Device IN Endpoint Interrupt Register. <i>Offset:900h + ++ * (ep_num * 20h) + 08h</i> */ ++ volatile uint32_t diepint; ++ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */ ++ uint32_t reserved0C; ++ /** Device IN Endpoint Transfer Size ++ * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */ ++ volatile uint32_t dieptsiz; ++ /** Device IN Endpoint DMA Address Register. <i>Offset:900h + ++ * (ep_num * 20h) + 14h</i> */ ++ volatile uint32_t diepdma; ++ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 18h - 900h + ++ * (ep_num * 20h) + 1Ch</i>*/ ++ volatile uint32_t dtxfsts; ++ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 1Ch - 900h + ++ * (ep_num * 20h) + 1Ch</i>*/ ++ uint32_t reserved18; ++} dwc_otg_dev_in_ep_regs_t; ++ ++/** ++ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets: ++ * B00h-CFCh</i> ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * <i>These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown.</i> ++ */ ++typedef struct dwc_otg_dev_out_ep_regs ++{ ++ /** Device OUT Endpoint Control Register. <i>Offset:B00h + ++ * (ep_num * 20h) + 00h</i> */ ++ volatile uint32_t doepctl; ++ /** Device OUT Endpoint Frame number Register. <i>Offset: ++ * B00h + (ep_num * 20h) + 04h</i> */ ++ volatile uint32_t doepfn; ++ /** Device OUT Endpoint Interrupt Register. <i>Offset:B00h + ++ * (ep_num * 20h) + 08h</i> */ ++ volatile uint32_t doepint; ++ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */ ++ uint32_t reserved0C; ++ /** Device OUT Endpoint Transfer Size Register. <i>Offset: ++ * B00h + (ep_num * 20h) + 10h</i> */ ++ volatile uint32_t doeptsiz; ++ /** Device OUT Endpoint DMA Address Register. <i>Offset:B00h ++ * + (ep_num * 20h) + 14h</i> */ ++ volatile uint32_t doepdma; ++ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 18h - B00h + ++ * (ep_num * 20h) + 1Ch</i> */ ++ uint32_t unused[2]; ++} dwc_otg_dev_out_ep_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Control ++ * Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. ++ */ ++typedef union depctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Endpoint Enable */ ++ unsigned epena : 1; ++ /** Endpoint Disable */ ++ unsigned epdis : 1; ++ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA1 Set Odd ++ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to odd (micro) frame. ++ */ ++ unsigned setd1pid : 1; ++ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA0. Set Even ++ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to even (micro) ++ * frame. ++ */ ++ unsigned setd0pid : 1; ++ /** Set NAK */ ++ unsigned snak : 1; ++ /** Clear NAK */ ++ unsigned cnak : 1; ++ /** Tx Fifo Number ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned txfnum : 4; ++ /** Stall Handshake */ ++ unsigned stall : 1; ++ /** Snoop Mode ++ * OUT EPn/OUT EP0 ++ * IN EPn/IN EP0 - reserved */ ++ unsigned snp : 1; ++ /** Endpoint Type ++ * 2'b00: Control ++ * 2'b01: Isochronous ++ * 2'b10: Bulk ++ * 2'b11: Interrupt */ ++ unsigned eptype : 2; ++ /** NAK Status */ ++ unsigned naksts : 1; ++ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) ++ * This field contains the PID of the packet going to ++ * be received or transmitted on this endpoint. The ++ * application should program the PID of the first ++ * packet going to be received or transmitted on this ++ * endpoint , after the endpoint is ++ * activated. Application use the SetD1PID and ++ * SetD0PID fields of this register to program either ++ * D0 or D1 PID. ++ * ++ * The encoding for this field is ++ * - 0: D0 ++ * - 1: D1 ++ */ ++ unsigned dpid : 1; ++ /** USB Active Endpoint */ ++ unsigned usbactep : 1; ++ /** Next Endpoint ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned nextep : 4; ++ /** Maximum Packet Size ++ * IN/OUT EPn ++ * IN/OUT EP0 - 2 bits ++ * 2'b00: 64 Bytes ++ * 2'b01: 32 ++ * 2'b10: 16 ++ * 2'b11: 8 */ ++#define DWC_DEP0CTL_MPS_64 0 ++#define DWC_DEP0CTL_MPS_32 1 ++#define DWC_DEP0CTL_MPS_16 2 ++#define DWC_DEP0CTL_MPS_8 3 ++ unsigned mps : 11; ++ } b; ++} depctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Transfer ++ * Size Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. ++ */ ++typedef union deptsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 1; ++ /** Multi Count - Periodic IN endpoints */ ++ unsigned mc : 2; ++ /** Packet Count */ ++ unsigned pktcnt : 10; ++ /** Transfer size */ ++ unsigned xfersize : 19; ++ } b; ++} deptsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP 0 Transfer ++ * Size Register. Read the register into the <i>d32</i> member then ++ * set/clear the bits using the <i>b</i>it elements. ++ */ ++typedef union deptsiz0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved31 : 1; ++ /**Setup Packet Count (DOEPTSIZ0 Only) */ ++ unsigned supcnt : 2; ++ /** Reserved */ ++ unsigned reserved28_20 : 9; ++ /** Packet Count */ ++ unsigned pktcnt : 1; ++ /** Reserved */ ++ unsigned reserved18_7 : 12; ++ /** Transfer size */ ++ unsigned xfersize : 7; ++ } b; ++} deptsiz0_data_t; ++ ++ ++/** Maximum number of Periodic FIFOs */ ++#define MAX_PERIO_FIFOS 15 ++/** Maximum number of TX FIFOs */ ++#define MAX_TX_FIFOS 15 ++/** Maximum number of Endpoints/HostChannels */ ++#define MAX_EPS_CHANNELS 16 ++//#define MAX_EPS_CHANNELS 4 ++ ++/** ++ * The dwc_otg_dev_if structure contains information needed to manage ++ * the DWC_otg controller acting in device mode. It represents the ++ * programming view of the device-specific aspects of the controller. ++ */ ++typedef struct dwc_otg_dev_if { ++ /** Pointer to device Global registers. ++ * Device Global Registers starting at offset 800h ++ */ ++ dwc_otg_device_global_regs_t *dev_global_regs; ++#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 ++ ++ /** ++ * Device Logical IN Endpoint-Specific Registers 900h-AFCh ++ */ ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_IN_EP_REG_OFFSET 0x900 ++#define DWC_EP_REG_OFFSET 0x20 ++ ++ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 ++ ++ /* Device configuration information*/ ++ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ ++ //uint8_t num_eps; /**< Number of EPs range: 0-16 (includes EP0) */ ++ //uint8_t num_perio_eps; /**< # of Periodic EP range: 0-15 */ ++ /*fscz */ ++ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ ++ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ ++ ++ /** Size of periodic FIFOs (Bytes) */ ++ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++ ++ /** Size of Tx FIFOs (Bytes) */ ++ uint16_t tx_fifo_size[MAX_TX_FIFOS]; ++ ++ /** Thresholding enable flags and length varaiables **/ ++ uint16_t rx_thr_en; ++ uint16_t iso_tx_thr_en; ++ uint16_t non_iso_tx_thr_en; ++ ++ uint16_t rx_thr_length; ++ uint16_t tx_thr_length; ++} dwc_otg_dev_if_t; ++ ++/** ++ * This union represents the bit fields in the Power and Clock Gating Control ++ * Register. Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. ++ */ ++typedef union pcgcctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ unsigned reserved31_05 : 27; ++ /** PHY Suspended */ ++ unsigned physuspended : 1; ++ /** Reset Power Down Modules */ ++ unsigned rstpdwnmodule : 1; ++ /** Power Clamp */ ++ unsigned pwrclmp : 1; ++ /** Gate Hclk */ ++ unsigned gatehclk : 1; ++ /** Stop Pclk */ ++ unsigned stoppclk : 1; ++ } b; ++} pcgcctl_data_t; ++ ++///////////////////////////////////////////////// ++// Host Mode Register Structures ++// ++/** ++ * The Host Global Registers structure defines the size and relative ++ * field offsets for the Host Mode Global Registers. Host Global ++ * Registers offsets 400h-7FFh. ++*/ ++typedef struct dwc_otg_host_global_regs ++{ ++ /** Host Configuration Register. <i>Offset: 400h</i> */ ++ volatile uint32_t hcfg; ++ /** Host Frame Interval Register. <i>Offset: 404h</i> */ ++ volatile uint32_t hfir; ++ /** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */ ++ volatile uint32_t hfnum; ++ /** Reserved. <i>Offset: 40Ch</i> */ ++ uint32_t reserved40C; ++ /** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */ ++ volatile uint32_t hptxsts; ++ /** Host All Channels Interrupt Register. <i>Offset: 414h</i> */ ++ volatile uint32_t haint; ++ /** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */ ++ volatile uint32_t haintmsk; ++} dwc_otg_host_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Configuration Register. ++ * Read the register into the <i>d32</i> member then set/clear the bits using ++ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register. ++ */ ++typedef union hcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ /** Reserved */ ++ //unsigned reserved31_03 : 29; ++ /** FS/LS Only Support */ ++ unsigned fslssupp : 1; ++ /** FS/LS Phy Clock Select */ ++#define DWC_HCFG_30_60_MHZ 0 ++#define DWC_HCFG_48_MHZ 1 ++#define DWC_HCFG_6_MHZ 2 ++ unsigned fslspclksel : 2; ++ } b; ++} hcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfir_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ unsigned reserved : 16; ++ unsigned frint : 16; ++ } b; ++} hfir_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfnum_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ unsigned frrem : 16; ++#define DWC_HFNUM_MAX_FRNUM 0x3FFF ++ unsigned frnum : 16; ++ } b; ++} hfnum_data_t; ++ ++typedef union hptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ /** Top of the Periodic Transmit Request Queue ++ * - bit 24 - Terminate (last entry for the selected channel) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - Zero length ++ * - 2'b01 - Ping ++ * - 2'b10 - Disable ++ * - bits 30:27 - Channel Number ++ * - bit 31 - Odd/even microframe ++ */ ++ unsigned ptxqtop_odd : 1; ++ unsigned ptxqtop_chnum : 4; ++ unsigned ptxqtop_token : 2; ++ unsigned ptxqtop_terminate : 1; ++ unsigned ptxqspcavail : 8; ++ unsigned ptxfspcavail : 16; ++ } b; ++} hptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Port Control and Status ++ * Register. Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the ++ * hprt0 register. ++ */ ++typedef union hprt0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved19_31 : 13; ++#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 ++#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 ++#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 ++ unsigned prtspd : 2; ++ unsigned prttstctl : 4; ++ unsigned prtpwr : 1; ++ unsigned prtlnsts : 2; ++ unsigned reserved9 : 1; ++ unsigned prtrst : 1; ++ unsigned prtsusp : 1; ++ unsigned prtres : 1; ++ unsigned prtovrcurrchng : 1; ++ unsigned prtovrcurract : 1; ++ unsigned prtenchng : 1; ++ unsigned prtena : 1; ++ unsigned prtconndet : 1; ++ unsigned prtconnsts : 1; ++ } b; ++} hprt0_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 16; ++ unsigned ch15 : 1; ++ unsigned ch14 : 1; ++ unsigned ch13 : 1; ++ unsigned ch12 : 1; ++ unsigned ch11 : 1; ++ unsigned ch10 : 1; ++ unsigned ch9 : 1; ++ unsigned ch8 : 1; ++ unsigned ch7 : 1; ++ unsigned ch6 : 1; ++ unsigned ch5 : 1; ++ unsigned ch4 : 1; ++ unsigned ch3 : 1; ++ unsigned ch2 : 1; ++ unsigned ch1 : 1; ++ unsigned ch0 : 1; ++ } b; ++ struct { ++ unsigned reserved : 16; ++ unsigned chint : 16; ++ } b2; ++} haint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ unsigned reserved : 16; ++ unsigned ch15 : 1; ++ unsigned ch14 : 1; ++ unsigned ch13 : 1; ++ unsigned ch12 : 1; ++ unsigned ch11 : 1; ++ unsigned ch10 : 1; ++ unsigned ch9 : 1; ++ unsigned ch8 : 1; ++ unsigned ch7 : 1; ++ unsigned ch6 : 1; ++ unsigned ch5 : 1; ++ unsigned ch4 : 1; ++ unsigned ch3 : 1; ++ unsigned ch2 : 1; ++ unsigned ch1 : 1; ++ unsigned ch0 : 1; ++ } b; ++ struct { ++ unsigned reserved : 16; ++ unsigned chint : 16; ++ } b2; ++} haintmsk_data_t; ++ ++/** ++ * Host Channel Specific Registers. <i>500h-5FCh</i> ++ */ ++typedef struct dwc_otg_hc_regs ++{ ++ /** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */ ++ volatile uint32_t hcchar; ++ /** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */ ++ volatile uint32_t hcsplt; ++ /** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */ ++ volatile uint32_t hcint; ++ /** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */ ++ volatile uint32_t hcintmsk; ++ /** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */ ++ volatile uint32_t hctsiz; ++ /** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */ ++ volatile uint32_t hcdma; ++ /** Reserved. <i>Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch</i> */ ++ uint32_t reserved[2]; ++} dwc_otg_hc_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Characteristics ++ * Register. Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the ++ * hcchar register. ++ */ ++typedef union hcchar_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ /** Channel enable */ ++ unsigned chen : 1; ++ /** Channel disable */ ++ unsigned chdis : 1; ++ /** ++ * Frame to transmit periodic transaction. ++ * 0: even, 1: odd ++ */ ++ unsigned oddfrm : 1; ++ /** Device address */ ++ unsigned devaddr : 7; ++ /** Packets per frame for periodic transfers. 0 is reserved. */ ++ unsigned multicnt : 2; ++ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ ++ unsigned eptype : 2; ++ /** 0: Full/high speed device, 1: Low speed device */ ++ unsigned lspddev : 1; ++ unsigned reserved : 1; ++ /** 0: OUT, 1: IN */ ++ unsigned epdir : 1; ++ /** Endpoint number */ ++ unsigned epnum : 4; ++ /** Maximum packet size in bytes */ ++ unsigned mps : 11; ++ } b; ++} hcchar_data_t; ++ ++typedef union hcsplt_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ /** Split Enble */ ++ unsigned spltena : 1; ++ /** Reserved */ ++ unsigned reserved : 14; ++ /** Do Complete Split */ ++ unsigned compsplt : 1; ++ /** Transaction Position */ ++#define DWC_HCSPLIT_XACTPOS_MID 0 ++#define DWC_HCSPLIT_XACTPOS_END 1 ++#define DWC_HCSPLIT_XACTPOS_BEGIN 2 ++#define DWC_HCSPLIT_XACTPOS_ALL 3 ++ unsigned xactpos : 2; ++ /** Hub Address */ ++ unsigned hubaddr : 7; ++ /** Port Address */ ++ unsigned prtaddr : 7; ++ } b; ++} hcsplt_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union hcint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Reserved */ ++ unsigned reserved : 21; ++ /** Data Toggle Error */ ++ unsigned datatglerr : 1; ++ /** Frame Overrun */ ++ unsigned frmovrun : 1; ++ /** Babble Error */ ++ unsigned bblerr : 1; ++ /** Transaction Err */ ++ unsigned xacterr : 1; ++ /** NYET Response Received */ ++ unsigned nyet : 1; ++ /** ACK Response Received */ ++ unsigned ack : 1; ++ /** NAK Response Received */ ++ unsigned nak : 1; ++ /** STALL Response Received */ ++ unsigned stall : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** Channel Halted */ ++ unsigned chhltd : 1; ++ /** Transfer Complete */ ++ unsigned xfercomp : 1; ++ } b; ++} hcint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Transfer Size ++ * Register. Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the ++ * hcchar register. ++ */ ++typedef union hctsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ /** Do PING protocol when 1 */ ++ unsigned dopng : 1; ++ /** ++ * Packet ID for next data packet ++ * 0: DATA0 ++ * 1: DATA2 ++ * 2: DATA1 ++ * 3: MDATA (non-Control), SETUP (Control) ++ */ ++#define DWC_HCTSIZ_DATA0 0 ++#define DWC_HCTSIZ_DATA1 2 ++#define DWC_HCTSIZ_DATA2 1 ++#define DWC_HCTSIZ_MDATA 3 ++#define DWC_HCTSIZ_SETUP 3 ++ unsigned pid : 2; ++ /** Data packets to transfer */ ++ unsigned pktcnt : 10; ++ /** Total transfer size in bytes */ ++ unsigned xfersize : 19; ++ } b; ++} hctsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Interrupt Mask ++ * Register. Read the register into the <i>d32</i> member then set/clear the ++ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the ++ * hcintmsk register. ++ */ ++typedef union hcintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct { ++ unsigned reserved : 21; ++ unsigned datatglerr : 1; ++ unsigned frmovrun : 1; ++ unsigned bblerr : 1; ++ unsigned xacterr : 1; ++ unsigned nyet : 1; ++ unsigned ack : 1; ++ unsigned nak : 1; ++ unsigned stall : 1; ++ unsigned ahberr : 1; ++ unsigned chhltd : 1; ++ unsigned xfercompl : 1; ++ } b; ++} hcintmsk_data_t; ++ ++/** OTG Host Interface Structure. ++ * ++ * The OTG Host Interface Structure structure contains information ++ * needed to manage the DWC_otg controller acting in host mode. It ++ * represents the programming view of the host-specific aspects of the ++ * controller. ++ */ ++typedef struct dwc_otg_host_if { ++ /** Host Global Registers starting at offset 400h.*/ ++ dwc_otg_host_global_regs_t *host_global_regs; ++#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 ++ ++ /** Host Port 0 Control and Status Register */ ++ volatile uint32_t *hprt0; ++#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 ++ ++ ++ /** Host Channel Specific Registers at offsets 500h-5FCh. */ ++ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; ++#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 ++#define DWC_OTG_CHAN_REGS_OFFSET 0x20 ++ ++ ++ /* Host configuration information */ ++ /** Number of Host Channels (range: 1-16) */ ++ uint8_t num_host_channels; ++ /** Periodic EPs supported (0: no, 1: yes) */ ++ uint8_t perio_eps_supported; ++ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ ++ uint16_t perio_tx_fifo_size; ++ ++} dwc_otg_host_if_t; ++ ++#endif +--- a/arch/mips/lantiq/xway/Makefile ++++ b/arch/mips/lantiq/xway/Makefile +@@ -4,3 +4,4 @@ + obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o + obj-$(CONFIG_LANTIQ_MACH_EASY4010) += mach-easy4010.o + obj-$(CONFIG_LANTIQ_MACH_ARV45XX) += mach-arv45xx.o ++onj-y += dev-dwc_otg.o +--- /dev/null ++++ b/arch/mips/lantiq/xway/dev-dwc_otg.c +@@ -0,0 +1,64 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/string.h> ++#include <linux/mtd/physmap.h> ++#include <linux/kernel.h> ++#include <linux/reboot.h> ++#include <linux/platform_device.h> ++#include <linux/leds.h> ++#include <linux/etherdevice.h> ++#include <linux/reboot.h> ++#include <linux/time.h> ++#include <linux/io.h> ++#include <linux/gpio.h> ++#include <linux/leds.h> ++ ++#include <asm/bootinfo.h> ++#include <asm/irq.h> ++ ++#include <xway.h> ++#include <xway_irq.h> ++#include <lantiq_platform.h> ++ ++static struct resource resources[] = ++{ ++ [0] = { ++ .name = "dwc_otg_membase", ++ .start = IFX_USB_IOMEM_BASE, ++ .end = IFX_USB_IOMEM_BASE + IFX_USB_IOMEM_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .name = "dwc_otg_irq", ++ .start = IFX_USB_IRQ, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 dwc_dmamask = (u32)0x1fffffff; ++ ++static struct platform_device platform_dev = { ++ .name = "dwc_otg", ++ .dev = { ++ .dma_mask = &dwc_dmamask, ++ }, ++ .resource = resources, ++ .num_resources = ARRAY_SIZE(resources), ++}; ++ ++int __init ++xway_register_dwc(int pin) ++{ ++ lq_enable_irq(resources[1].start); ++ return platform_device_register(&platform_dev); ++} +--- /dev/null ++++ b/arch/mips/lantiq/xway/dev-dwc_otg.h +@@ -0,0 +1,17 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * Copyright (C) 2010 John Crispin <blogic@openwrt.org> ++ */ ++ ++#ifndef _LQ_DEV_DWC_H__ ++#define _LQ_DEV_DWC_H__ ++ ++#include <lantiq_platform.h> ++ ++extern void __init xway_register_dwc(int pin); ++ ++#endif +--- a/arch/mips/lantiq/xway/mach-arv45xx.c ++++ b/arch/mips/lantiq/xway/mach-arv45xx.c +@@ -24,6 +24,7 @@ + #include <lantiq_platform.h> + + #include "devices.h" ++#include "dev-dwc_otg.h" + + #define ARV452_LATCH_SWITCH (1 << 10) + +@@ -133,6 +134,7 @@ + lq_register_pci(&lq_pci_data); + lq_register_wdt(); + arv45xx_register_ethernet(); ++ xway_register_dwc(14); + } + + MIPS_MACHINE(LANTIQ_MACH_ARV4518, +@@ -152,6 +154,7 @@ + lq_register_pci(&lq_pci_data); + lq_register_wdt(); + arv45xx_register_ethernet(); ++ xway_register_dwc(28); + } + + MIPS_MACHINE(LANTIQ_MACH_ARV452, diff --git a/target/linux/lantiq/patches/809-mt-vpe.patch b/target/linux/lantiq/patches/809-mt-vpe.patch new file mode 100644 index 0000000000..07312c7747 --- /dev/null +++ b/target/linux/lantiq/patches/809-mt-vpe.patch @@ -0,0 +1,1173 @@ +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -1669,6 +1669,28 @@ config MIPS_VPE_LOADER + Includes a loader for loading an elf relocatable object + onto another VPE and running it. + ++config IFX_VPE_EXT ++ bool "IFX APRP Extensions" ++ depends on MIPS_VPE_LOADER ++ default y ++ help ++ IFX included extensions in APRP ++ ++config PERFCTRS ++ bool "34K Performance counters" ++ depends on MIPS_MT && PROC_FS ++ default n ++ help ++ 34K Performance counter through /proc ++ ++config MTSCHED ++ bool "Support mtsched priority configuration for TCs" ++ depends on MIPS_MT && PROC_FS ++ default y ++ help ++ Support for mtsched priority configuration for TCs through ++ /proc/mips/mtsched ++ + config MIPS_MT_SMTC_IM_BACKSTOP + bool "Use per-TC register bits as backstop for inhibited IM bits" + depends on MIPS_MT_SMTC +--- a/arch/mips/include/asm/mipsmtregs.h ++++ b/arch/mips/include/asm/mipsmtregs.h +@@ -28,14 +28,34 @@ + #define read_c0_vpeconf0() __read_32bit_c0_register($1, 2) + #define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val) + ++#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3) ++#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val) ++ ++#define read_c0_vpeschedule() __read_32bit_c0_register($1, 5) ++#define write_c0_vpeschedule(val) __write_32bit_c0_register($1, 5, val) ++ ++#define read_c0_vpeschefback() __read_32bit_c0_register($1, 6) ++#define write_c0_vpeschefback(val) __write_32bit_c0_register($1, 6, val) ++ ++#define read_c0_vpeopt() __read_32bit_c0_register($1, 7) ++#define write_c0_vpeopt(val) __write_32bit_c0_register($1, 7, val) ++ + #define read_c0_tcstatus() __read_32bit_c0_register($2, 1) + #define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val) + + #define read_c0_tcbind() __read_32bit_c0_register($2, 2) ++#define write_c0_tcbind(val) __write_32bit_c0_register($2, 2, val) + + #define read_c0_tccontext() __read_32bit_c0_register($2, 5) + #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) + ++#define read_c0_tcschedule() __read_32bit_c0_register($2, 6) ++#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val) ++ ++#define read_c0_tcschefback() __read_32bit_c0_register($2, 7) ++#define write_c0_tcschefback(val) __write_32bit_c0_register($2, 7, val) ++ ++ + #else /* Assembly */ + /* + * Macros for use in assembly language code +@@ -74,6 +94,8 @@ + #define MVPCONTROL_STLB_SHIFT 2 + #define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT) + ++#define MVPCONTROL_CPA_SHIFT 3 ++#define MVPCONTROL_CPA (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT) + + /* MVPConf0 fields */ + #define MVPCONF0_PTC_SHIFT 0 +@@ -84,6 +106,8 @@ + #define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT) + #define MVPCONF0_PTLBE_SHIFT 16 + #define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT) ++#define MVPCONF0_PCP_SHIFT 27 ++#define MVPCONF0_PCP (_ULCAST_(1) << MVPCONF0_PCP_SHIFT) + #define MVPCONF0_TLBS_SHIFT 29 + #define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT) + #define MVPCONF0_M_SHIFT 31 +@@ -121,9 +145,25 @@ + #define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT) + #define VPECONF0_MVP_SHIFT 1 + #define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT) ++#define VPECONF0_ICS_SHIFT 16 ++#define VPECONF0_ICS (_ULCAST_(1) << VPECONF0_ICS_SHIFT) ++#define VPECONF0_DCS_SHIFT 17 ++#define VPECONF0_DCS (_ULCAST_(1) << VPECONF0_DCS_SHIFT) + #define VPECONF0_XTC_SHIFT 21 + #define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT) + ++/* VPEOpt fields */ ++#define VPEOPT_DWX_SHIFT 0 ++#define VPEOPT_IWX_SHIFT 8 ++#define VPEOPT_IWX0 ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT) ++#define VPEOPT_IWX1 ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT) ++#define VPEOPT_IWX2 ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT) ++#define VPEOPT_IWX3 ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT) ++#define VPEOPT_DWX0 ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT) ++#define VPEOPT_DWX1 ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT) ++#define VPEOPT_DWX2 ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT) ++#define VPEOPT_DWX3 ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT) ++ + /* TCStatus fields (per TC) */ + #define TCSTATUS_TASID (_ULCAST_(0xff)) + #define TCSTATUS_IXMT_SHIFT 10 +@@ -350,6 +390,14 @@ do { \ + #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val) + #define read_vpe_c0_vpeconf0() mftc0(1, 2) + #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val) ++#define read_vpe_c0_vpeschedule() mftc0(1, 5) ++#define write_vpe_c0_vpeschedule(val) mttc0(1, 5, val) ++#define read_vpe_c0_vpeschefback() mftc0(1, 6) ++#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val) ++#define read_vpe_c0_vpeopt() mftc0(1, 7) ++#define write_vpe_c0_vpeopt(val) mttc0(1, 7, val) ++#define read_vpe_c0_wired() mftc0(6, 0) ++#define write_vpe_c0_wired(val) mttc0(6, 0, val) + #define read_vpe_c0_count() mftc0(9, 0) + #define write_vpe_c0_count(val) mttc0(9, 0, val) + #define read_vpe_c0_status() mftc0(12, 0) +@@ -381,6 +429,12 @@ do { \ + #define write_tc_c0_tchalt(val) mttc0(2, 4, val) + #define read_tc_c0_tccontext() mftc0(2, 5) + #define write_tc_c0_tccontext(val) mttc0(2, 5, val) ++#define read_tc_c0_tcschedule() mftc0(2, 6) ++#define write_tc_c0_tcschedule(val) mttc0(2, 6, val) ++#define read_tc_c0_tcschefback() mftc0(2, 7) ++#define write_tc_c0_tcschefback(val) mttc0(2, 7, val) ++#define read_tc_c0_entryhi() mftc0(10, 0) ++#define write_tc_c0_entryhi(val) mttc0(10, 0, val) + + /* GPR */ + #define read_tc_gpr_sp() mftgpr(29) +--- a/arch/mips/kernel/Makefile ++++ b/arch/mips/kernel/Makefile +@@ -84,7 +84,8 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo3 + + obj-$(CONFIG_KGDB) += kgdb.o + obj-$(CONFIG_PROC_FS) += proc.o +- ++obj-$(CONFIG_MTSCHED) += mtsched_proc.o ++obj-$(CONFIG_PERFCTRS) += perf_proc.o + obj-$(CONFIG_64BIT) += cpu-bugs64.o + + obj-$(CONFIG_I8253) += i8253.o +--- a/arch/mips/kernel/mips-mt.c ++++ b/arch/mips/kernel/mips-mt.c +@@ -21,26 +21,95 @@ + #include <asm/cacheflush.h> + + int vpelimit; +- + static int __init maxvpes(char *str) + { + get_option(&str, &vpelimit); +- + return 1; + } +- + __setup("maxvpes=", maxvpes); + + int tclimit; +- + static int __init maxtcs(char *str) + { + get_option(&str, &tclimit); ++ return 1; ++} ++__setup("maxtcs=", maxtcs); + ++#ifdef CONFIG_IFX_VPE_EXT ++int stlb; ++static int __init istlbshared(char *str) ++{ ++ get_option(&str, &stlb); + return 1; + } ++__setup("vpe_tlb_shared=", istlbshared); + +-__setup("maxtcs=", maxtcs); ++int vpe0_wired; ++static int __init vpe0wired(char *str) ++{ ++ get_option(&str, &vpe0_wired); ++ return 1; ++} ++__setup("vpe0_wired_tlb_entries=", vpe0wired); ++ ++int vpe1_wired; ++static int __init vpe1wired(char *str) ++{ ++ get_option(&str, &vpe1_wired); ++ return 1; ++} ++__setup("vpe1_wired_tlb_entries=", vpe1wired); ++ ++#ifdef CONFIG_MIPS_MT_SMTC ++extern int nostlb; ++#endif ++void configure_tlb(void) ++{ ++ int vpeflags, tcflags, tlbsiz; ++ unsigned int config1val; ++ vpeflags = dvpe(); ++ tcflags = dmt(); ++ write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP)); ++ write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC)); ++ mips_ihb(); ++ //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired); ++ if (stlb) { ++ if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) { ++ emt(tcflags); ++ evpe(vpeflags); ++ return; ++ } ++ ++ write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); ++ write_c0_wired(vpe0_wired + vpe1_wired); ++ if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { ++ config1val = read_vpe_c0_config1(); ++ tlbsiz = (((config1val >> 25) & 0x3f) + 1); ++ if (tlbsiz > 64) ++ tlbsiz = 64; ++ cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; ++ } ++ ++ } ++ else { ++ write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB); ++ write_c0_wired(vpe0_wired); ++ } ++ ++ ehb(); ++ write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC)); ++ ehb(); ++ local_flush_tlb_all(); ++ ++ printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired()); ++#ifdef CONFIG_MIPS_MT_SMTC ++ nostlb = !stlb; ++#endif ++ emt(tcflags); ++ evpe(vpeflags); ++} ++#endif + + /* + * Dump new MIPS MT state for the core. Does not leave TCs halted. +@@ -78,18 +147,18 @@ void mips_mt_regdump(unsigned long mvpct + if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { + printk(" VPE %d\n", i); + printk(" VPEControl : %08lx\n", +- read_vpe_c0_vpecontrol()); ++ read_vpe_c0_vpecontrol()); + printk(" VPEConf0 : %08lx\n", +- read_vpe_c0_vpeconf0()); ++ read_vpe_c0_vpeconf0()); + printk(" VPE%d.Status : %08lx\n", +- i, read_vpe_c0_status()); ++ i, read_vpe_c0_status()); + printk(" VPE%d.EPC : %08lx %pS\n", +- i, read_vpe_c0_epc(), +- (void *) read_vpe_c0_epc()); ++ i, read_vpe_c0_epc(), ++ (void *) read_vpe_c0_epc()); + printk(" VPE%d.Cause : %08lx\n", +- i, read_vpe_c0_cause()); ++ i, read_vpe_c0_cause()); + printk(" VPE%d.Config7 : %08lx\n", +- i, read_vpe_c0_config7()); ++ i, read_vpe_c0_config7()); + break; /* Next VPE */ + } + } +@@ -287,6 +356,9 @@ void mips_mt_set_cpuoptions(void) + printk("Mapped %ld ITC cells starting at 0x%08x\n", + ((itcblkgrn & 0x7fe00000) >> 20), itc_base); + } ++#ifdef CONFIG_IFX_VPE_EXT ++ configure_tlb(); ++#endif + } + + /* +--- a/arch/mips/kernel/proc.c ++++ b/arch/mips/kernel/proc.c +@@ -7,6 +7,7 @@ + #include <linux/kernel.h> + #include <linux/sched.h> + #include <linux/seq_file.h> ++#include <linux/proc_fs.h> + #include <asm/bootinfo.h> + #include <asm/cpu.h> + #include <asm/cpu-features.h> +@@ -108,3 +109,19 @@ const struct seq_operations cpuinfo_op = + .stop = c_stop, + .show = show_cpuinfo, + }; ++ ++/* ++ * Support for MIPS/local /proc hooks in /proc/mips/ ++ */ ++ ++static struct proc_dir_entry *mips_proc = NULL; ++ ++struct proc_dir_entry *get_mips_proc_dir(void) ++{ ++ /* ++ * This ought not to be preemptable. ++ */ ++ if(mips_proc == NULL) ++ mips_proc = proc_mkdir("mips", NULL); ++ return(mips_proc); ++} +--- a/arch/mips/kernel/smtc.c ++++ b/arch/mips/kernel/smtc.c +@@ -1335,6 +1335,13 @@ void smtc_get_new_mmu_context(struct mm_ + asid = asid_cache(cpu); + + do { ++#ifdef CONFIG_IFX_VPE_EXT ++ /* If TLB is shared between AP and RP (AP is running SMTC), ++ leave out max ASID i.e., ASID_MASK for RP ++ */ ++ if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1))) ++ asid++; ++#endif + if (!((asid += ASID_INC) & ASID_MASK) ) { + if (cpu_has_vtag_icache) + flush_icache_all(); +--- a/arch/mips/kernel/vpe.c ++++ b/arch/mips/kernel/vpe.c +@@ -77,6 +77,58 @@ static struct kspd_notifications kspd_ev + static int kspd_events_reqd; + #endif + ++#ifdef CONFIG_IFX_VPE_EXT ++static int is_sdepgm; ++extern int stlb; ++extern int vpe0_wired; ++extern int vpe1_wired; ++unsigned int vpe1_load_addr; ++ ++static int __init load_address(char *str) ++{ ++ get_option(&str, &vpe1_load_addr); ++ return 1; ++} ++__setup("vpe1_load_addr=", load_address); ++ ++#include <asm/mipsmtregs.h> ++#define write_vpe_c0_wired(val) mttc0(6, 0, val) ++ ++#ifndef COMMAND_LINE_SIZE ++# define COMMAND_LINE_SIZE 512 ++#endif ++ ++char command_line[COMMAND_LINE_SIZE * 2]; ++ ++static unsigned int vpe1_mem; ++static int __init vpe1mem(char *str) ++{ ++ vpe1_mem = memparse(str, &str); ++ return 1; ++} ++__setup("vpe1_mem=", vpe1mem); ++ ++uint32_t vpe1_wdog_ctr; ++static int __init wdog_ctr(char *str) ++{ ++ get_option(&str, &vpe1_wdog_ctr); ++ return 1; ++} ++ ++__setup("vpe1_wdog_ctr_addr=", wdog_ctr); ++EXPORT_SYMBOL(vpe1_wdog_ctr); ++ ++uint32_t vpe1_wdog_timeout; ++static int __init wdog_timeout(char *str) ++{ ++ get_option(&str, &vpe1_wdog_timeout); ++ return 1; ++} ++ ++__setup("vpe1_wdog_timeout=", wdog_timeout); ++EXPORT_SYMBOL(vpe1_wdog_timeout); ++ ++#endif + /* grab the likely amount of memory we will need. */ + #ifdef CONFIG_MIPS_VPE_LOADER_TOM + #define P_SIZE (2 * 1024 * 1024) +@@ -269,6 +321,13 @@ static void *alloc_progmem(unsigned long + void *addr; + + #ifdef CONFIG_MIPS_VPE_LOADER_TOM ++#ifdef CONFIG_IFX_VPE_EXT ++ if (vpe1_load_addr) { ++ memset((void *)vpe1_load_addr, 0, len); ++ return (void *)vpe1_load_addr; ++ } ++#endif ++ + /* + * This means you must tell Linux to use less memory than you + * physically have, for example by passing a mem= boot argument. +@@ -747,6 +806,12 @@ static int vpe_run(struct vpe * v) + } + + /* Write the address we want it to start running from in the TCPC register. */ ++#if defined(CONFIG_IFX_VPE_EXT) && 0 ++ if (stlb) ++ write_vpe_c0_wired(vpe0_wired + vpe1_wired); ++ else ++ write_vpe_c0_wired(vpe1_wired); ++#endif + write_tc_c0_tcrestart((unsigned long)v->__start); + write_tc_c0_tccontext((unsigned long)0); + +@@ -760,6 +825,20 @@ static int vpe_run(struct vpe * v) + + write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); + ++#if defined(CONFIG_IFX_VPE_EXT) && 0 ++ /* ++ * $a2 & $a3 are used to pass command line parameters to VPE1. $a2 ++ * points to the start of the command line string and $a3 points to ++ * the end of the string. This convention is identical to the Linux ++ * kernel boot parameter passing mechanism. Please note that $a3 is ++ * used to pass physical memory size or 0 in SDE tool kit. So, if you ++ * are passing comand line parameters through $a2 & $a3 SDE programs ++ * don't work as desired. ++ */ ++ mttgpr(6, command_line); ++ mttgpr(7, (command_line + strlen(command_line))); ++ if (is_sdepgm) ++#endif + /* + * The sde-kit passes 'memsize' to __start in $a3, so set something + * here... Or set $a3 to zero and define DFLT_STACK_SIZE and +@@ -834,6 +913,9 @@ static int find_vpe_symbols(struct vpe * + if ( (v->__start == 0) || (v->shared_ptr == NULL)) + return -1; + ++#ifdef CONFIG_IFX_VPE_EXT ++ is_sdepgm = 1; ++#endif + return 0; + } + +@@ -995,6 +1077,15 @@ static int vpe_elfload(struct vpe * v) + (unsigned long)v->load_addr + v->len); + + if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { ++#ifdef CONFIG_IFX_VPE_EXT ++ if (vpe1_load_addr) { ++ /* Conversion to KSEG1 is required ??? */ ++ v->__start = KSEG1ADDR(vpe1_load_addr); ++ is_sdepgm = 0; ++ return 0; ++ } ++#endif ++ + if (v->__start == 0) { + printk(KERN_WARNING "VPE loader: program does not contain " + "a __start symbol\n"); +@@ -1065,6 +1156,9 @@ static int vpe_open(struct inode *inode, + struct vpe_notifications *not; + struct vpe *v; + int ret; ++#ifdef CONFIG_IFX_VPE_EXT ++ int progsize; ++#endif + + if (minor != iminor(inode)) { + /* assume only 1 device at the moment. */ +@@ -1091,14 +1185,23 @@ static int vpe_open(struct inode *inode, + cleanup_tc(get_tc(tclimit)); + } + ++#ifdef CONFIG_IFX_VPE_EXT ++ progsize = (vpe1_mem != 0) ? vpe1_mem : P_SIZE; ++ //printk("progsize = %x\n", progsize); ++ v->pbuffer = vmalloc(progsize); ++ v->plen = progsize; ++#else + /* this of-course trashes what was there before... */ + v->pbuffer = vmalloc(P_SIZE); + v->plen = P_SIZE; ++#endif + v->load_addr = NULL; + v->len = 0; + ++#if 0 + v->uid = filp->f_cred->fsuid; + v->gid = filp->f_cred->fsgid; ++#endif + + #ifdef CONFIG_MIPS_APSP_KSPD + /* get kspd to tell us when a syscall_exit happens */ +@@ -1351,6 +1454,133 @@ static void kspd_sp_exit( int sp_id) + cleanup_tc(get_tc(sp_id)); + } + #endif ++#ifdef CONFIG_IFX_VPE_EXT ++int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags) ++{ ++ enum vpe_state state; ++ struct vpe *v = get_vpe(tclimit); ++ struct vpe_notifications *not; ++ ++ if (tcmask || flags) { ++ printk(KERN_WARNING "Currently tcmask and flags should be 0.\ ++ other values not supported\n"); ++ return -1; ++ } ++ ++ state = xchg(&v->state, VPE_STATE_INUSE); ++ if (state != VPE_STATE_UNUSED) { ++ vpe_stop(v); ++ ++ list_for_each_entry(not, &v->notify, list) { ++ not->stop(tclimit); ++ } ++ } ++ ++ v->__start = (unsigned long)sw_start_addr; ++ is_sdepgm = 0; ++ ++ if (!vpe_run(v)) { ++ printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n"); ++ return 0; ++ } ++ return -1; ++} ++ ++EXPORT_SYMBOL(vpe1_sw_start); ++ ++int32_t vpe1_sw_stop(uint32_t flags) ++{ ++ struct vpe *v = get_vpe(tclimit); ++ ++ if (!vpe_free(v)) { ++ printk(KERN_DEBUG "RP Stopped\n"); ++ return 0; ++ } ++ else ++ return -1; ++} ++ ++EXPORT_SYMBOL(vpe1_sw_stop); ++ ++uint32_t vpe1_get_load_addr (uint32_t flags) ++{ ++ return vpe1_load_addr; ++} ++ ++EXPORT_SYMBOL(vpe1_get_load_addr); ++ ++uint32_t vpe1_get_max_mem (uint32_t flags) ++{ ++ if (!vpe1_mem) ++ return P_SIZE; ++ else ++ return vpe1_mem; ++} ++ ++EXPORT_SYMBOL(vpe1_get_max_mem); ++ ++void* vpe1_get_cmdline_argument(void) ++{ ++ return saved_command_line; ++} ++ ++EXPORT_SYMBOL(vpe1_get_cmdline_argument); ++ ++int32_t vpe1_set_boot_param(char *field, char *value, char flags) ++{ ++ char *ptr, string[64]; ++ int start_off, end_off; ++ if (!field) ++ return -1; ++ strcpy(string, field); ++ if (value) { ++ strcat(string, "="); ++ strcat(string, value); ++ strcat(command_line, " "); ++ strcat(command_line, string); ++ } ++ else { ++ ptr = strstr(command_line, string); ++ if (ptr) { ++ start_off = ptr - command_line; ++ ptr += strlen(string); ++ while ((*ptr != ' ') && (*ptr != '\0')) ++ ptr++; ++ end_off = ptr - command_line; ++ command_line[start_off] = '\0'; ++ strcat (command_line, command_line+end_off); ++ } ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(vpe1_set_boot_param); ++ ++int32_t vpe1_get_boot_param(char *field, char **value, char flags) ++{ ++ char *ptr, string[64]; ++ int i = 0; ++ if (!field) ++ return -1; ++ if ((ptr = strstr(command_line, field))) { ++ ptr += strlen(field) + 1; /* including = */ ++ while ((*ptr != ' ') && (*ptr != '\0')) ++ string[i++] = *ptr++; ++ string[i] = '\0'; ++ *value = kmalloc((strlen(string) + 1), GFP_KERNEL); ++ if (*value != NULL) ++ strcpy(*value, string); ++ } ++ else ++ *value = NULL; ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(vpe1_get_boot_param); ++ ++extern void configure_tlb(void); ++#endif + + static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +@@ -1432,6 +1662,18 @@ static int __init vpe_module_init(void) + printk("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } ++#ifdef CONFIG_IFX_VPE_EXT ++#ifndef CONFIG_MIPS_MT_SMTC ++ configure_tlb(); ++#endif ++#endif ++ ++#ifndef CONFIG_MIPS_MT_SMTC ++ if (!vpelimit) ++ vpelimit = 1; ++ if (!tclimit) ++ tclimit = 1; ++#endif + + if (vpelimit == 0) { + printk(KERN_WARNING "No VPEs reserved for AP/SP, not " +@@ -1476,10 +1718,12 @@ static int __init vpe_module_init(void) + mtflags = dmt(); + vpflags = dvpe(); + ++ back_to_back_c0_hazard(); ++ + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + +- /* dump_mtregs(); */ ++ dump_mtregs(); + + val = read_c0_mvpconf0(); + hw_tcs = (val & MVPCONF0_PTC) + 1; +@@ -1491,6 +1735,7 @@ static int __init vpe_module_init(void) + * reschedule send IPIs or similar we might hang. + */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); ++ back_to_back_c0_hazard(); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); +@@ -1516,6 +1761,7 @@ static int __init vpe_module_init(void) + } + + v->ntcs = hw_tcs - tclimit; ++ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); +@@ -1584,6 +1830,7 @@ static int __init vpe_module_init(void) + out_reenable: + /* release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); ++ back_to_back_c0_hazard(); + + evpe(vpflags); + emt(mtflags); +--- /dev/null ++++ b/arch/mips/kernel/mtsched_proc.c +@@ -0,0 +1,279 @@ ++/* ++ * /proc hooks for MIPS MT scheduling policy management for 34K cores ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Copyright (C) 2006 Mips Technologies, Inc ++ */ ++ ++#include <linux/kernel.h> ++ ++#include <asm/cpu.h> ++#include <asm/processor.h> ++#include <asm/system.h> ++#include <asm/mipsregs.h> ++#include <asm/mipsmtregs.h> ++#include <asm/uaccess.h> ++#include <linux/proc_fs.h> ++ ++static struct proc_dir_entry *mtsched_proc; ++ ++#ifndef CONFIG_MIPS_MT_SMTC ++#define NTCS 2 ++#else ++#define NTCS NR_CPUS ++#endif ++#define NVPES 2 ++ ++int lastvpe = 1; ++int lasttc = 8; ++ ++static int proc_read_mtsched(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ int totalen = 0; ++ int len; ++ ++ int i; ++ int vpe; ++ int mytc; ++ unsigned long flags; ++ unsigned int mtflags; ++ unsigned int haltstate; ++ unsigned int vpes_checked[NVPES]; ++ unsigned int vpeschedule[NVPES]; ++ unsigned int vpeschefback[NVPES]; ++ unsigned int tcschedule[NTCS]; ++ unsigned int tcschefback[NTCS]; ++ ++ /* Dump the state of the MIPS MT scheduling policy manager */ ++ /* Inititalize control state */ ++ for(i = 0; i < NVPES; i++) { ++ vpes_checked[i] = 0; ++ vpeschedule[i] = 0; ++ vpeschefback[i] = 0; ++ } ++ for(i = 0; i < NTCS; i++) { ++ tcschedule[i] = 0; ++ tcschefback[i] = 0; ++ } ++ ++ /* Disable interrupts and multithreaded issue */ ++ local_irq_save(flags); ++ mtflags = dvpe(); ++ ++ /* Then go through the TCs, halt 'em, and extract the values */ ++ mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; ++ for(i = 0; i < NTCS; i++) { ++ if(i == mytc) { ++ /* No need to halt ourselves! */ ++ tcschedule[i] = read_c0_tcschedule(); ++ tcschefback[i] = read_c0_tcschefback(); ++ /* If VPE bound to TC hasn't been checked, do it */ ++ vpe = read_c0_tcbind() & TCBIND_CURVPE; ++ if(!vpes_checked[vpe]) { ++ vpeschedule[vpe] = read_c0_vpeschedule(); ++ vpeschefback[vpe] = read_c0_vpeschefback(); ++ vpes_checked[vpe] = 1; ++ } ++ } else { ++ settc(i); ++ haltstate = read_tc_c0_tchalt(); ++ write_tc_c0_tchalt(TCHALT_H); ++ mips_ihb(); ++ tcschedule[i] = read_tc_c0_tcschedule(); ++ tcschefback[i] = read_tc_c0_tcschefback(); ++ /* If VPE bound to TC hasn't been checked, do it */ ++ vpe = read_tc_c0_tcbind() & TCBIND_CURVPE; ++ if(!vpes_checked[vpe]) { ++ vpeschedule[vpe] = read_vpe_c0_vpeschedule(); ++ vpeschefback[vpe] = read_vpe_c0_vpeschefback(); ++ vpes_checked[vpe] = 1; ++ } ++ if(!haltstate) write_tc_c0_tchalt(0); ++ } ++ } ++ /* Re-enable MT and interrupts */ ++ evpe(mtflags); ++ local_irq_restore(flags); ++ ++ for(vpe=0; vpe < NVPES; vpe++) { ++ len = sprintf(page, "VPE[%d].VPEschedule = 0x%08x\n", ++ vpe, vpeschedule[vpe]); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n", ++ vpe, vpeschefback[vpe]); ++ totalen += len; ++ page += len; ++ } ++ for(i=0; i < NTCS; i++) { ++ len = sprintf(page, "TC[%d].TCschedule = 0x%08x\n", ++ i, tcschedule[i]); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "TC[%d].TCschefback = 0x%08x\n", ++ i, tcschefback[i]); ++ totalen += len; ++ page += len; ++ } ++ return totalen; ++} ++ ++/* ++ * Write to perf counter registers based on text input ++ */ ++ ++#define TXTBUFSZ 1024 ++ ++static int proc_write_mtsched(struct file *file, const char *buffer, ++ unsigned long count, void *data) ++{ ++ int len = 0; ++ char mybuf[TXTBUFSZ]; ++ /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */ ++ char entity[1]; //, entity1[1]; ++ int number[1]; ++ unsigned long value[1]; ++ int nparsed = 0 , index = 0; ++ unsigned long flags; ++ unsigned int mtflags; ++ unsigned int haltstate; ++ unsigned int tcbindval; ++ ++ if(count >= TXTBUFSZ) len = TXTBUFSZ-1; ++ else len = count; ++ memset(mybuf,0,TXTBUFSZ); ++ if(copy_from_user(mybuf, buffer, len)) return -EFAULT; ++ ++ nparsed = sscanf(mybuf, "%c%d %lx", ++ &entity[0] ,&number[0], &value[0]); ++ ++ /* ++ * Having acquired the inputs, which might have ++ * generated exceptions and preemptions, ++ * program the registers. ++ */ ++ /* Disable interrupts and multithreaded issue */ ++ local_irq_save(flags); ++ mtflags = dvpe(); ++ ++ if(entity[index] == 't' ) { ++ /* Set TCSchedule or TCScheFBack of specified TC */ ++ if(number[index] > NTCS) goto skip; ++ /* If it's our own TC, do it direct */ ++ if(number[index] == ++ ((read_c0_tcbind() & TCBIND_CURTC) ++ >> TCBIND_CURTC_SHIFT)) { ++ if(entity[index] == 't') ++ write_c0_tcschedule(value[index]); ++ else ++ write_c0_tcschefback(value[index]); ++ } else { ++ /* Otherwise, we do it via MTTR */ ++ settc(number[index]); ++ haltstate = read_tc_c0_tchalt(); ++ write_tc_c0_tchalt(TCHALT_H); ++ mips_ihb(); ++ if(entity[index] == 't') ++ write_tc_c0_tcschedule(value[index]); ++ else ++ write_tc_c0_tcschefback(value[index]); ++ mips_ihb(); ++ if(!haltstate) write_tc_c0_tchalt(0); ++ } ++ } else if(entity[index] == 'v') { ++ /* Set VPESchedule of specified VPE */ ++ if(number[index] > NVPES) goto skip; ++ tcbindval = read_c0_tcbind(); ++ /* Are we doing this to our current VPE? */ ++ if((tcbindval & TCBIND_CURVPE) == number[index]) { ++ /* Then life is simple */ ++ write_c0_vpeschedule(value[index]); ++ } else { ++ /* ++ * Bind ourselves to the other VPE long enough ++ * to program the bind value. ++ */ ++ write_c0_tcbind((tcbindval & ~TCBIND_CURVPE) ++ | number[index]); ++ mips_ihb(); ++ write_c0_vpeschedule(value[index]); ++ mips_ihb(); ++ /* Restore previous binding */ ++ write_c0_tcbind(tcbindval); ++ mips_ihb(); ++ } ++ } ++ ++ else if(entity[index] == 'r') { ++ unsigned int vpes_checked[2], vpe ,i , mytc; ++ vpes_checked[0] = vpes_checked[1] = 0; ++ ++ /* Then go through the TCs, halt 'em, and extract the values */ ++ mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; ++ ++ for(i = 0; i < NTCS; i++) { ++ if(i == mytc) { ++ /* No need to halt ourselves! */ ++ write_c0_vpeschefback(0); ++ write_c0_tcschefback(0); ++ } else { ++ settc(i); ++ haltstate = read_tc_c0_tchalt(); ++ write_tc_c0_tchalt(TCHALT_H); ++ mips_ihb(); ++ write_tc_c0_tcschefback(0); ++ /* If VPE bound to TC hasn't been checked, do it */ ++ vpe = read_tc_c0_tcbind() & TCBIND_CURVPE; ++ if(!vpes_checked[vpe]) { ++ write_vpe_c0_vpeschefback(0); ++ vpes_checked[vpe] = 1; ++ } ++ if(!haltstate) write_tc_c0_tchalt(0); ++ } ++ } ++ } ++ else { ++ printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n"); ++ } ++ ++skip: ++ /* Re-enable MT and interrupts */ ++ evpe(mtflags); ++ local_irq_restore(flags); ++ return (len); ++} ++ ++static int __init init_mtsched_proc(void) ++{ ++ extern struct proc_dir_entry *get_mips_proc_dir(void); ++ struct proc_dir_entry *mips_proc_dir; ++ ++ if (!cpu_has_mipsmt) { ++ printk("mtsched: not a MIPS MT capable processor\n"); ++ return -ENODEV; ++ } ++ ++ mips_proc_dir = get_mips_proc_dir(); ++ ++ mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir); ++ mtsched_proc->read_proc = proc_read_mtsched; ++ mtsched_proc->write_proc = proc_write_mtsched; ++ ++ return 0; ++} ++ ++/* Automagically create the entry */ ++module_init(init_mtsched_proc); +--- /dev/null ++++ b/arch/mips/kernel/perf_proc.c +@@ -0,0 +1,191 @@ ++/* ++ * /proc hooks for CPU performance counter support for SMTC kernel ++ * (and ultimately others) ++ * Copyright (C) 2006 Mips Technologies, Inc ++ */ ++ ++#include <linux/kernel.h> ++ ++#include <asm/cpu.h> ++#include <asm/processor.h> ++#include <asm/system.h> ++#include <asm/mipsregs.h> ++#include <asm/uaccess.h> ++#include <linux/proc_fs.h> ++ ++/* ++ * /proc diagnostic and statistics hooks ++ */ ++ ++ ++/* Internal software-extended event counters */ ++ ++static unsigned long long extencount[4] = {0,0,0,0}; ++ ++static struct proc_dir_entry *perf_proc; ++ ++static int proc_read_perf(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ int totalen = 0; ++ int len; ++ ++ len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0()); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n", ++ extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0())); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1()); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n", ++ extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1())); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2()); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n", ++ extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2())); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3()); ++ totalen += len; ++ page += len; ++ len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n", ++ extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3())); ++ totalen += len; ++ page += len; ++ ++ return totalen; ++} ++ ++/* ++ * Write to perf counter registers based on text input ++ */ ++ ++#define TXTBUFSZ 1024 ++ ++static int proc_write_perf(struct file *file, const char *buffer, ++ unsigned long count, void *data) ++{ ++ int len; ++ int nparsed; ++ int index; ++ char mybuf[TXTBUFSZ]; ++ ++ int which[4]; ++ unsigned long control[4]; ++ long long ctrdata[4]; ++ ++ if(count >= TXTBUFSZ) len = TXTBUFSZ-1; ++ else len = count; ++ memset(mybuf,0,TXTBUFSZ); ++ if(copy_from_user(mybuf, buffer, len)) return -EFAULT; ++ ++ nparsed = sscanf(mybuf, ++ "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld", ++ &which[0], &control[0], &ctrdata[0], ++ &which[1], &control[1], &ctrdata[1], ++ &which[2], &control[2], &ctrdata[2], ++ &which[3], &control[3], &ctrdata[3]); ++ ++ for(index = 0; nparsed >= 3; index++) { ++ switch (which[index]) { ++ case 0: ++ write_c0_perfctrl0(control[index]); ++ if(ctrdata[index] != -1) { ++ extencount[0] = (unsigned long long)ctrdata[index]; ++ write_c0_perfcntr0((unsigned long)0); ++ } ++ break; ++ case 1: ++ write_c0_perfctrl1(control[index]); ++ if(ctrdata[index] != -1) { ++ extencount[1] = (unsigned long long)ctrdata[index]; ++ write_c0_perfcntr1((unsigned long)0); ++ } ++ break; ++ case 2: ++ write_c0_perfctrl2(control[index]); ++ if(ctrdata[index] != -1) { ++ extencount[2] = (unsigned long long)ctrdata[index]; ++ write_c0_perfcntr2((unsigned long)0); ++ } ++ break; ++ case 3: ++ write_c0_perfctrl3(control[index]); ++ if(ctrdata[index] != -1) { ++ extencount[3] = (unsigned long long)ctrdata[index]; ++ write_c0_perfcntr3((unsigned long)0); ++ } ++ break; ++ } ++ nparsed -= 3; ++ } ++ return (len); ++} ++ ++extern int (*perf_irq)(void); ++ ++/* ++ * Invoked when timer interrupt vector picks up a perf counter overflow ++ */ ++ ++static int perf_proc_irq(void) ++{ ++ unsigned long snapshot; ++ ++ /* ++ * It would be nice to do this as a loop, but we don't have ++ * indirect access to CP0 registers. ++ */ ++ snapshot = read_c0_perfcntr0(); ++ if ((long)snapshot < 0) { ++ extencount[0] += ++ (unsigned long long)((unsigned)read_c0_perfcntr0()); ++ write_c0_perfcntr0(0); ++ } ++ snapshot = read_c0_perfcntr1(); ++ if ((long)snapshot < 0) { ++ extencount[1] += ++ (unsigned long long)((unsigned)read_c0_perfcntr1()); ++ write_c0_perfcntr1(0); ++ } ++ snapshot = read_c0_perfcntr2(); ++ if ((long)snapshot < 0) { ++ extencount[2] += ++ (unsigned long long)((unsigned)read_c0_perfcntr2()); ++ write_c0_perfcntr2(0); ++ } ++ snapshot = read_c0_perfcntr3(); ++ if ((long)snapshot < 0) { ++ extencount[3] += ++ (unsigned long long)((unsigned)read_c0_perfcntr3()); ++ write_c0_perfcntr3(0); ++ } ++ return 0; ++} ++ ++static int __init init_perf_proc(void) ++{ ++ extern struct proc_dir_entry *get_mips_proc_dir(void); ++ ++ struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir(); ++ ++ write_c0_perfcntr0(0); ++ write_c0_perfcntr1(0); ++ write_c0_perfcntr2(0); ++ write_c0_perfcntr3(0); ++ perf_proc = create_proc_entry("perf", 0644, mips_proc_dir); ++ perf_proc->read_proc = proc_read_perf; ++ perf_proc->write_proc = proc_write_perf; ++ perf_irq = perf_proc_irq; ++ ++ return 0; ++} ++ ++/* Automagically create the entry */ ++module_init(init_perf_proc); diff --git a/target/linux/lantiq/patches/810-ar9-cache-split.patch b/target/linux/lantiq/patches/810-ar9-cache-split.patch new file mode 100644 index 0000000000..9487f6ce43 --- /dev/null +++ b/target/linux/lantiq/patches/810-ar9-cache-split.patch @@ -0,0 +1,301 @@ +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -1706,6 +1706,28 @@ + help + IFX included extensions in APRP + ++config IFX_VPE_CACHE_SPLIT ++ bool "IFX Cache Split Ways" ++ depends on IFX_VPE_EXT ++ help ++ IFX extension for reserving (splitting) cache ways among VPEs. You must ++ give kernel command line arguments vpe_icache_shared=0 or ++ vpe_dcache_shared=0 to enable splitting of icache or dcache ++ respectively. Then you can specify which cache ways should be ++ assigned to which VPE. There are total 8 cache ways, 4 each ++ for dcache and icache: dcache_way0, dcache_way1,dcache_way2, ++ dcache_way3 and icache_way0,icache_way1, icache_way2,icache_way3. ++ ++ For example, if you specify vpe_icache_shared=0 and icache_way2=1, ++ then the 3rd icache way will be assigned to VPE0 and denied in VPE1. ++ ++ For icache, software is required to make at least one cache way available ++ for a VPE at all times i.e., one can't assign all the icache ways to one ++ VPE. ++ ++ By default, vpe_dcache_shared and vpe_icache_shared are set to 1 ++ (i.e., both icache and dcache are shared among VPEs) ++ + config PERFCTRS + bool "34K Performance counters" + depends on MIPS_MT && PROC_FS +--- a/arch/mips/kernel/vpe.c ++++ b/arch/mips/kernel/vpe.c +@@ -128,6 +128,13 @@ + EXPORT_SYMBOL(vpe1_wdog_timeout); + + #endif ++ ++#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */ ++extern int vpe_icache_shared,vpe_dcache_shared; ++extern int icache_way0,icache_way1,icache_way2,icache_way3; ++extern int dcache_way0,dcache_way1,dcache_way2,dcache_way3; ++#endif ++ + /* grab the likely amount of memory we will need. */ + #ifdef CONFIG_MIPS_VPE_LOADER_TOM + #define P_SIZE (2 * 1024 * 1024) +@@ -866,6 +873,65 @@ + /* enable this VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + ++#ifdef CONFIG_IFX_VPE_CACHE_SPLIT ++ if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ) { ++ ++ /* PCP bit must be 1 to split the cache */ ++ if(read_c0_mvpconf0() & MVPCONF0_PCP) { ++ ++ if ( !vpe_icache_shared ){ ++ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_ICS); ++ ++ /* ++ * If any cache way is 1, then that way is denied ++ * in VPE1. Otherwise assign that way to VPE1. ++ */ ++ if (!icache_way0) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX0 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX0 ); ++ if (!icache_way1) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX1 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX1 ); ++ if (!icache_way2) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX2 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX2 ); ++ if (!icache_way3) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX3 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX3 ); ++ } ++ ++ if ( !vpe_dcache_shared ) { ++ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_DCS); ++ ++ /* ++ * If any cache way is 1, then that way is denied ++ * in VPE1. Otherwise assign that way to VPE1. ++ */ ++ if (!dcache_way0) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX0 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX0 ); ++ if (!dcache_way1) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX1 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX1 ); ++ if (!dcache_way2) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX2 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX2 ); ++ if (!dcache_way3) ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX3 ); ++ else ++ write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX3 ); ++ } ++ } ++ } ++#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */ ++ + /* clear out any left overs from a previous program */ + write_vpe_c0_status(0); + write_vpe_c0_cause(0); +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -1348,6 +1348,106 @@ + __setup("coherentio", setcoherentio); + #endif + ++#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */ ++ ++#include <asm/mipsmtregs.h> ++ ++/* ++ * By default, vpe_icache_shared and vpe_dcache_shared ++ * values are 1 i.e., both icache and dcache are shared ++ * among the VPEs. ++ */ ++ ++int vpe_icache_shared = 1; ++static int __init vpe_icache_shared_val(char *str) ++{ ++ get_option(&str, &vpe_icache_shared); ++ return 1; ++} ++__setup("vpe_icache_shared=", vpe_icache_shared_val); ++EXPORT_SYMBOL(vpe_icache_shared); ++ ++int vpe_dcache_shared = 1; ++static int __init vpe_dcache_shared_val(char *str) ++{ ++ get_option(&str, &vpe_dcache_shared); ++ return 1; ++} ++__setup("vpe_dcache_shared=", vpe_dcache_shared_val); ++EXPORT_SYMBOL(vpe_dcache_shared); ++ ++/* ++ * Software is required to make atleast one icache ++ * way available for a VPE at all times i.e., one ++ * can't assign all the icache ways to one VPE. ++ */ ++ ++int icache_way0 = 0; ++static int __init icache_way0_val(char *str) ++{ ++ get_option(&str, &icache_way0); ++ return 1; ++} ++__setup("icache_way0=", icache_way0_val); ++ ++int icache_way1 = 0; ++static int __init icache_way1_val(char *str) ++{ ++ get_option(&str, &icache_way1); ++ return 1; ++} ++__setup("icache_way1=", icache_way1_val); ++ ++int icache_way2 = 0; ++static int __init icache_way2_val(char *str) ++{ ++ get_option(&str, &icache_way2); ++ return 1; ++} ++__setup("icache_way2=", icache_way2_val); ++ ++int icache_way3 = 0; ++static int __init icache_way3_val(char *str) ++{ ++ get_option(&str, &icache_way3); ++ return 1; ++} ++__setup("icache_way3=", icache_way3_val); ++ ++int dcache_way0 = 0; ++static int __init dcache_way0_val(char *str) ++{ ++ get_option(&str, &dcache_way0); ++ return 1; ++} ++__setup("dcache_way0=", dcache_way0_val); ++ ++int dcache_way1 = 0; ++static int __init dcache_way1_val(char *str) ++{ ++ get_option(&str, &dcache_way1); ++ return 1; ++} ++__setup("dcache_way1=", dcache_way1_val); ++ ++int dcache_way2 = 0; ++static int __init dcache_way2_val(char *str) ++{ ++ get_option(&str, &dcache_way2); ++ return 1; ++} ++__setup("dcache_way2=", dcache_way2_val); ++ ++int dcache_way3 = 0; ++static int __init dcache_way3_val(char *str) ++{ ++ get_option(&str, &dcache_way3); ++ return 1; ++} ++__setup("dcache_way3=", dcache_way3_val); ++ ++#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */ ++ + void __cpuinit r4k_cache_init(void) + { + extern void build_clear_page(void); +@@ -1367,6 +1467,78 @@ + break; + } + ++#ifdef CONFIG_IFX_VPE_CACHE_SPLIT ++ /* ++ * We split the cache ways appropriately among the VPEs ++ * based on cache ways values we received as command line ++ * arguments ++ */ ++ if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ){ ++ ++ /* PCP bit must be 1 to split the cache */ ++ if(read_c0_mvpconf0() & MVPCONF0_PCP) { ++ ++ /* Set CPA bit which enables us to modify VPEOpt register */ ++ write_c0_mvpcontrol((read_c0_mvpcontrol()) | MVPCONTROL_CPA); ++ ++ if ( !vpe_icache_shared ){ ++ write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_ICS); ++ /* ++ * If any cache way is 1, then that way is denied ++ * in VPE0. Otherwise assign that way to VPE0. ++ */ ++ printk(KERN_DEBUG "icache is split\n"); ++ printk(KERN_DEBUG "icache_way0=%d icache_way1=%d icache_way2=%d icache_way3=%d\n", ++ icache_way0, icache_way1,icache_way2, icache_way3); ++ if (icache_way0) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX0 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX0 ); ++ if (icache_way1) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX1 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX1 ); ++ if (icache_way2) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX2 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX2 ); ++ if (icache_way3) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX3 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX3 ); ++ } ++ ++ if ( !vpe_dcache_shared ) { ++ /* ++ * If any cache way is 1, then that way is denied ++ * in VPE0. Otherwise assign that way to VPE0. ++ */ ++ printk(KERN_DEBUG "dcache is split\n"); ++ printk(KERN_DEBUG "dcache_way0=%d dcache_way1=%d dcache_way2=%d dcache_way3=%d\n", ++ dcache_way0, dcache_way1, dcache_way2, dcache_way3); ++ write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_DCS); ++ if (dcache_way0) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX0 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX0 ); ++ if (dcache_way1) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX1 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX1 ); ++ if (dcache_way2) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX2 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX2 ); ++ if (dcache_way3) ++ write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX3 ); ++ else ++ write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX3 ); ++ } ++ } ++ } ++ ++#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */ ++ + probe_pcache(); + setup_scache(); + diff --git a/target/linux/lantiq/patches/900-pci_ath5k_hook.patch b/target/linux/lantiq/patches/900-pci_ath5k_hook.patch new file mode 100644 index 0000000000..f01cadc85e --- /dev/null +++ b/target/linux/lantiq/patches/900-pci_ath5k_hook.patch @@ -0,0 +1,40 @@ +--- /dev/null ++++ b/arch/mips/include/asm/mach-lantiq/pci.h +@@ -0,0 +1,14 @@ ++/* ++ * lantiq SoCs specific PCI definitions ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#ifndef __ASM_MACH_LANTIQ_PCI_H ++#define __ASM_MACH_LANTIQ_PCI_H ++ ++extern int (*ifxmips_pci_plat_dev_init)(struct pci_dev *dev); ++ ++#endif +--- a/arch/mips/pci/pci-lantiq.c ++++ b/arch/mips/pci/pci-lantiq.c +@@ -68,6 +68,8 @@ + + u32 lq_pci_mapped_cfg; + ++int (*lqpci_plat_dev_init)(struct pci_dev *dev) = NULL; ++ + /* Since the PCI REQ pins can be reused for other functionality, make it possible + to exclude those from interpretation by the PCI controller */ + static int lq_pci_req_mask = 0xf; +@@ -126,6 +128,10 @@ + printk ("WARNING: invalid interrupt pin %d\n", pin); + return 1; + } ++ ++ if (lqpci_plat_dev_init) ++ return lqpci_plat_dev_init(dev); ++ + return 0; + } + + diff --git a/target/linux/lantiq/xway/config-default b/target/linux/lantiq/xway/config-default new file mode 100644 index 0000000000..46cd6e778b --- /dev/null +++ b/target/linux/lantiq/xway/config-default @@ -0,0 +1,21 @@ +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +# CONFIG_CRYPTO_HW is not set +CONFIG_HAVE_IDE=y +CONFIG_HW_HAS_PCI=y +CONFIG_IMAGE_CMDLINE_HACK=y +CONFIG_LANTIQ_ETOP=y +CONFIG_LANTIQ_MACH_ARV45XX=y +CONFIG_LANTIQ_MACH_EASY4010=y +CONFIG_LANTIQ_MACH_EASY50712=y +CONFIG_LANTIQ_MACH_EASY50812=y +# CONFIG_LANTIQ_PROM_ASC0 is not set +CONFIG_LANTIQ_PROM_ASC1=y +CONFIG_LANTIQ_WDT=y +CONFIG_LEDS_GPIO=y +# CONFIG_LOONGSON_MC146818 is not set +CONFIG_LOONGSON_UART_BASE=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_SCSI_MOD=y +CONFIG_SOC_LANTIQ=y +CONFIG_SOC_LANTIQ_XWAY=y +CONFIG_USB_SUPPORT=y diff --git a/target/linux/lantiq/xway/target.mk b/target/linux/lantiq/xway/target.mk new file mode 100644 index 0000000000..e2e2a6f12b --- /dev/null +++ b/target/linux/lantiq/xway/target.mk @@ -0,0 +1,11 @@ +ARCH:=mips +SUBTARGET:=xway +BOARDNAME:=Xway +FEATURES:=squashfs jffs2 atm + +DEFAULT_PACKAGES+=uboot-lantiq-easy50712 kmod-pppoa ppp-mod-pppoa linux-atm atm-tools br2684ctl ifxmips-dsl-api ifxmips-dsl-control ifx-tapidemo + +define Target/Description + Lantiq XWAY (danube/twinpass/ar9) +endef + |