openwrt/target/linux/en75xx/patches-4.9/000-ENHANCE_COMMON_econet_sdk_modification.patch
Matheus Sampaio Queiroga 561eb7c4a8
Some checks failed
Build Kernel / Build all affected Kernels (push) Failing after 1m38s
SOC: Add Econet en75xx
2024-07-25 22:53:26 -03:00

155038 lines
4.5 MiB
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

Index: linux-3.18.21/Documentation/zh_CN/HOWTO
===================================================================
--- linux-3.18.21.orig/Documentation/zh_CN/HOWTO 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/Documentation/zh_CN/HOWTO 2018-02-05 13:19:30.000000000 +0800
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/HOWTO
+Chinese translated version of Documentation/HOWTO
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
Index: linux-3.18.21/Documentation/zh_CN/SubmittingDrivers
===================================================================
--- linux-3.18.21.orig/Documentation/zh_CN/SubmittingDrivers 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/Documentation/zh_CN/SubmittingDrivers 2018-02-05 13:19:30.000000000 +0800
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/SubmittingDrivers
+Chinese translated version of Documentation/SubmittingDrivers
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
Index: linux-3.18.21/Documentation/zh_CN/email-clients.txt
===================================================================
--- linux-3.18.21.orig/Documentation/zh_CN/email-clients.txt 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/Documentation/zh_CN/email-clients.txt 2018-02-05 13:19:30.000000000 +0800
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/email-clients.txt
+Chinese translated version of Documentation/email-clients.txt
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
Index: linux-3.18.21/Documentation/zh_CN/sparse.txt
===================================================================
--- linux-3.18.21.orig/Documentation/zh_CN/sparse.txt 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/Documentation/zh_CN/sparse.txt 2018-02-05 13:19:30.000000000 +0800
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/sparse.txt
+Chinese translated version of Documentation/sparse.txt
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
Index: linux-3.18.21/Makefile
===================================================================
--- linux-3.18.21.orig/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/Makefile 2018-02-05 13:19:31.000000000 +0800
@@ -248,7 +248,8 @@
# "make" in the configured kernel build directory always uses that.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-ARCH ?= $(SUBARCH)
+ARCH ?= mips
+#$(SUBARCH)
CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
# Architecture as present in compile.h
@@ -400,7 +401,7 @@
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
- -Werror-implicit-function-declaration \
+# -Werror-implicit-function-declaration \
-Wno-format-security \
-std=gnu89
@@ -758,6 +759,10 @@
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
+ifneq ($(strip $(TC_CFLAGS)),)
+KBUILD_CFLAGS += $(TC_CFLAGS)
+endif
+
# disallow errors like 'EXPORT_GPL(foo);' with missing header
KBUILD_CFLAGS += $(call cc-option,-Werror=implicit-int)
@@ -921,6 +926,11 @@
$(Q)$(MAKE) $(build)=Documentation
endif
+$(call if_changed,link-vmlinux)
+linux.7z: vmlinux
+# $(STRIP) -o vmlinux.strip vmlinux
+ $(OBJCOPY) -O binary vmlinux linux.bin
+ ../tools/lzma e linux.bin linux.7z
+ ../tools/trx/trx -f linux.7z -o linux.7z.trx -c ../tools/trx/trx_config
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -1562,9 +1572,9 @@
cmd_rmfiles = rm -f $(rm-files)
# Run depmod only if we have System.map and depmod is executable
-quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
- cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
- $(KERNELRELEASE) "$(patsubst y,_,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))"
+#quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
+ #cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
+ #$(KERNELRELEASE) "$(patsubst y,_,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))"
# Create temporary dir for module support files
# clean it up only when building all modules
Index: linux-3.18.21/add_kernel_config.sh
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/add_kernel_config.sh 2018-02-05 16:43:04.000000000 +0800
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+if [ $# != 2 ] ; then
+ echo "usage: $0 [kernel_config_path] [temp_config_path]"
+ exit 0
+fi
+
+KERNEL_CONFIG=$1
+NEW_KERNEL_CONFIG=$2
+
+if [ "$TCSUPPORT_CZ_GENERAL" != "" ] || [ "$TCSUPPORT_SAMBA" != "" ] || [ "$TCSUPPORT_SAMBA_IPv6" != "" ] || [ "$TCSUPPORT_DMS" != "" ] || [ "$TCSUPPORT_6RD" != "" ] || [ "$TCSUPPORT_IPV6_PRIVACYADDRS" != "" ] || [ "$TCSUPPORT_VPN" != "" ]; then
+
+ if [ "$TCSUPPORT_SAMBA" != "" ] || [ "$TCSUPPORT_SAMBA_IPv6" != "" ];then
+ echo "Add kernel config for Samba"
+ SEDCMD="$SEDCMD -e 's/CONFIG_FAT_DEFAULT_CODEPAGE=437/CONFIG_FAT_DEFAULT_CODEPAGE=936/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_FAT_DEFAULT_IOCHARSET=\"ascii\"/CONFIG_FAT_DEFAULT_IOCHARSET=\"utf8\"/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_NLS_DEFAULT=\"iso8859-1\"/CONFIG_NLS_DEFAULT=\"utf8\"/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_NLS_CODEPAGE_437=m/# CONFIG_NLS_CODEPAGE_437 is not set/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_NLS_CODEPAGE_936 is not set/CONFIG_NLS_CODEPAGE_936=m/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_NLS_ASCII=m/# CONFIG_NLS_ASCII is not set/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_NLS_UTF8 is not set/CONFIG_NLS_UTF8=m/'"
+ fi
+
+ if [ "$TCSUPPORT_DMS" != "" ];then
+ echo "Add kernel config for DMS"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_EXT2_FS is not set/CONFIG_EXT2_FS=y\nCONFIG_EXT2_FS_XATTR=y\nCONFIG_EXT2_FS_POSIX_ACL=y\nCONFIG_EXT2_FS_SECURITY=y\nCONFIG_EXT2_FS_XIP=y\nCONFIG_FS_XIP=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_EXT3_FS is not set/CONFIG_EXT3_FS=y\nCONFIG_EXT3_FS_XATTR=y\nCONFIG_EXT3_FS_POSIX_ACL=y\nCONFIG_EXT3_FS_SECURITY=y/'"
+ SEDCMD="$SEDCMD -e '/# CONFIG_EXT4DEV_FS is not set/a\\CONFIG_JBD=y\n# CONFIG_JBD_DEBUG is not set\nCONFIG_FS_MBCACHE=y'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_FS_POSIX_ACL is not set/CONFIG_FS_POSIX_ACL=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INOTIFY is not set/CONFIG_INOTIFY=y\nCONFIG_INOTIFY_USER=y/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_FAT_FS=m/CONFIG_FAT_FS=y/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_VFAT_FS=m/CONFIG_VFAT_FS=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_NFS_FS is not set/CONFIG_NFS_FS=m\nCONFIG_NFS_V3=y\n# CONFIG_NFS_V3_ACL is not set\n# CONFIG_NFS_V4 is not set\nCONFIG_NFS_DIRECTIO=y/'"
+ SEDCMD="$SEDCMD -e 's/CONFIG_VFAT_FS=m/CONFIG_VFAT_FS=y/'"
+ SEDCMD="$SEDCMD -e '/# CONFIG_NFSD is not set/a\\CONFIG_LOCKD=m\nCONFIG_LOCKD_V4=y\nCONFIG_NFS_COMMON=y\nCONFIG_SUNRPC=m\n# CONFIG_SUNRPC_BIND34 is not set\n# CONFIG_RPCSEC_GSS_KRB5 is not set\n# CONFIG_RPCSEC_GSS_SPKM3 is not set'"
+ fi
+
+ if [ "$TCSUPPORT_6RD" != "" ];then
+ echo "Add kernel config for 6RD"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_IPV6_SIT_6RD is not set/CONFIG_IPV6_SIT_6RD=y/'"
+ fi
+
+ if [ "$TCSUPPORT_USB_3G_DONGLE" != "" ];then
+ echo "Add kernel config for 3G Dongle"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_USB_DEVICEFS is not set/CONFIG_USB_DEVICEFS=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_USB_DEVICE_CLASS is not set/CONFIG_USB_DEVICE_CLASS=y\n# CONFIG_USB_TEST is not set/'"
+ fi
+
+ if [ "$TCSUPPORT_IPV6_PRIVACYADDRS" != "" ];then
+ echo "Add kernel config for privacy addr"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_IPV6_PRIVACY is not set/CONFIG_IPV6_PRIVACY=y/'"
+ fi
+
+ if [ "$TCSUPPORT_VPN" != "" ];then
+ echo "Add kernel config for vpn tools"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_XFRM_USER is not set/CONFIG_XFRM_USER=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_XFRM_MIGRATE is not set/CONFIG_XFRM_MIGRATE=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_NET_KEY is not set/CONFIG_NET_KEY=y\nCONFIG_NET_KEY_MIGRATE=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_AH is not set/CONFIG_INET_AH=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_ESP is not set/CONFIG_INET_ESP=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_IPCOMP is not set/CONFIG_INET_IPCOMP=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_XFRM_TUNNEL is not set/CONFIG_INET_XFRM_TUNNEL=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_XFRM_MODE_TRANSPORT is not set/CONFIG_INET_XFRM_MODE_TRANSPORT=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_INET_XFRM_MODE_TUNNEL is not set/CONFIG_INET_XFRM_MODE_TUNNEL=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_NET_IPVTI is not set/CONFIG_NET_IPVTI=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_HMAC is not set/CONFIG_CRYPTO_HASH=y\nCONFIG_CRYPTO_HMAC=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_NULL is not set/CONFIG_CRYPTO_NULL=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_SHA1 is not set/CONFIG_CRYPTO_SHA1=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_SHA256 is not set/CONFIG_CRYPTO_SHA256=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_CBC is not set/CONFIG_CRYPTO_CBC=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_DES is not set/CONFIG_CRYPTO_DES=y/'"
+ SEDCMD="$SEDCMD -e 's/# CONFIG_CRYPTO_DEFLATE is not set/CONFIG_CRYPTO_DEFLATE=y/'"
+
+ fi
+
+ if [ "$TCSUPPORT_CZ_GENERAL" != "" ];then
+ SEDCMD="$SEDCMD -e 's/# CONFIG_BRIDGE_EBT_ARP is not set/CONFIG_BRIDGE_EBT_ARP=m/'"
+ fi
+ echo "sed $SEDCMD"
+ gen="sed $SEDCMD $KERNEL_CONFIG"
+ eval $gen > $NEW_KERNEL_CONFIG
+else
+ echo "no need to add kernel option"
+fi
+
+echo "modify kernel config finish"
+
Index: linux-3.18.21/arch/mips/Kbuild
===================================================================
--- linux-3.18.21.orig/arch/mips/Kbuild 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/Kbuild 2018-02-05 13:19:52.000000000 +0800
@@ -1,7 +1,7 @@
# Fail on warnings - also for files referenced in subdirs
# -Werror can be disabled for specific files using:
# CFLAGS_<file.o> := -Wno-error
-subdir-ccflags-y := -Werror
+#subdir-ccflags-y := -Werror
# platform specific definitions
include arch/mips/Kbuild.platforms
Index: linux-3.18.21/arch/mips/Kbuild.platforms
===================================================================
--- linux-3.18.21.orig/arch/mips/Kbuild.platforms 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/Kbuild.platforms 2018-02-05 13:19:52.000000000 +0800
@@ -22,6 +22,7 @@
platforms += pmcs-msp71xx
platforms += pnx833x
platforms += ralink
+platforms += econet
platforms += rb532
platforms += sgi-ip22
platforms += sgi-ip27
Index: linux-3.18.21/arch/mips/Kconfig
===================================================================
--- linux-3.18.21.orig/arch/mips/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/Kconfig 2018-02-05 13:19:52.000000000 +0800
@@ -350,6 +350,74 @@
This enables support for the MIPS Technologies Malta evaluation
board.
+config MIPS_TC3262
+ bool "TrendChip's TC3262 Board"
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select NO_EXCEPT_FILL
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select HW_HAS_PCI
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MULTITHREADING
+ select TC3162_ADSL
+ select TC3162_IMEM if !MIPS_EN7516
+ select TC3162_DMEM if !MIPS_EN7516
+ help
+ This enables support for TrendChip's TC3262 based board.
+ board.
+
+config MIPS_TC3182
+ bool "TrendChip's TC3182 Board"
+ depends on MIPS_TC3262
+ help
+ This enables support for TrendChip's TC3182 based board.
+ board.
+
+config MIPS_RT63165
+ bool "Ralink's RT63165 Board"
+ depends on MIPS_TC3262
+ help
+ This enables support for Ralink's RT63165 based board.
+
+config MIPS_RT65168
+ bool "Ralink's RT65168 Board"
+ depends on MIPS_TC3262
+ select RALINK_VDSL
+ help
+ This enables support for Ralink's RT65168 based board.
+
+config MIPS_RT63365
+ bool "Ralink's RT63365 Board"
+ depends on MIPS_TC3262
+ help
+ This enables support for Ralink's RT63365 based board.
+config MIPS_MT7510
+ bool "MediaTek's MT7510 Board"
+ depends on MIPS_TC3262
+ select RALINK_VDSL
+ help
+ This enables support for MediaTek's MT7510 based board.
+
+config MIPS_EN7516
+ bool "EcoNet's EN7516 Board"
+ depends on MIPS_TC3262
+ select MIPS_CPU_SCACHE
+ select SYS_SUPPORTS_MIPS_CMP
+ select IRQ_GIC
+ select WEAK_REORDERING_BEYOND_LLSC
+ help
+ This enables support for EcoNet's EN7516 based board whcih uses MIPS 1004k CPU.
config MIPS_SEAD3
bool "MIPS SEAD3 board"
select BOOT_ELF32
@@ -1143,6 +1211,32 @@
config BOOT_ELF32
bool
+config MIPS_PATENTFREE
+ bool
+ depends on SYS_HAS_CPU_TC3162
+
+config TC3162_ADSL
+ bool
+ depends on SYS_HAS_CPU_TC3162
+
+config RALINK_VDSL
+ bool
+ depends on SYS_HAS_CPU_TC3162
+
+config TC3162_IMEM
+ bool
+ depends on SYS_HAS_CPU_TC3162
+
+config TC3162_DMEM
+ bool
+ depends on SYS_HAS_CPU_TC3162
+
+config IMEM_SIZE
+ int
+ default "32768" if MIPS_TC3162U || MIPS_TC3182 || MIPS_TC3262
+ default "16384" if MIPS_TC3162
+ depends on TC3162_IMEM
+
config MIPS_L1_CACHE_SHIFT_4
bool
Index: linux-3.18.21/arch/mips/econet/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/Makefile 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,27 @@
+obj-y := \
+ prom.o \
+ setup.o \
+ irq.o \
+ tcwdog.o \
+ ex_mdio_api.o \
+ ecnt_traps.o \
+ libcompileoption.o \
+ ecnt_hook/ecnt_hook.o \
+ ecnt_hook/ecnt_hook_nf.o \
+ ecnt_kernel_ext.o
+# tcadsl.o
+# ledcetrl.o
+
+obj-$(CONFIG_MIPS_TC3162) += time.o
+obj-$(CONFIG_MIPS_TC3162U) += time.o
+obj-$(CONFIG_MIPS_TC3262) += time2.o
+ifneq ($(strip $(TCSUPPORT_MIPS_1004K)),)
+obj-$(CONFIG_MIPS_MT_SMP) += malta-amon.o
+else
+obj-$(CONFIG_MIPS_MT_SMP) += malta-smp.o
+endif
+obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
+obj-$(TCSUPPORT_KPROFILE) += kprofile_hook.o
+ifneq ($(strip $(RTP_RX_SHORTCUT)),)
+obj-$(TCSUPPORT_VOIP) += voip_hook.o
+endif
\ No newline at end of file
Index: linux-3.18.21/arch/mips/econet/Platform
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/Platform 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,11 @@
+#
+# Econet boards
+#
+platform-$(CONFIG_MIPS_TC3262) += econet/ softfloat/
+cflags-$(CONFIG_MIPS_TC3262) += -I$(srctree)/arch/mips/include/asm/tc3162/
+ifneq ($(strip $(TCSUPPORT_FREE_BOOTBASE)),)
+load-$(CONFIG_MIPS_TC3262) += $(KERNEL_START_ADDR)
+else
+load-$(CONFIG_MIPS_TC3262) += 0xffffffff80020000
+endif
+all-$(CONFIG_MIPS_TC3262) := $(COMPRESSION_FNAME).bin
Index: linux-3.18.21/arch/mips/econet/ecnt_hook/ecnt_hook.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/ecnt_hook/ecnt_hook.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,475 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/kernel.h>
+#include <linux/netfilter.h>
+#include <net/protocol.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+struct list_head ecnt_hooks[ECNT_NUM_MAINTYPE][ECNT_MAX_SUBTYPE];
+EXPORT_SYMBOL(ecnt_hooks);
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+static DEFINE_SPINLOCK(ecnt_hook_lock);
+unsigned int hook_id = 0;
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+/*****************************************************************************
+//function :
+// ecnt_iterate
+//description :
+// this function is called by __ECNT_HOOK to iterate ecnt hook list
+//input :
+// struct list_head *head, struct list_head **i, struct ecnt_data *in_data
+//return :
+// ECNT_RETURN_DROP 0
+// ECNT_RETURN 2
+// ECNT_CONTINUE 1
+******************************************************************************/
+
+//static inline ecnt_ret_val ecnt_iterate(struct list_head *head, struct list_head **i, struct ecnt_data *in_data)
+static inline ecnt_ret_val ecnt_iterate(struct list_head *head, struct ecnt_hook_ops **elemp, struct ecnt_data *in_data)
+{
+ ecnt_ret_val verdict = ECNT_CONTINUE;
+
+ /*
+ * The caller must not block between calls to this
+ * function because of risk of continuing from deleted element.
+ */
+ //list_for_each_continue_rcu(*i, head) {
+ list_for_each_entry_continue_rcu((*elemp), head, list){
+ if((*elemp)->is_execute){
+ verdict = (*elemp)->hookfn(in_data);
+ if (verdict == ECNT_RETURN_DROP) {
+ (*elemp)->info.drop_num++;
+ return verdict;
+ }
+ if(verdict == ECNT_RETURN)
+ return verdict;
+ }
+ }
+
+ return verdict;
+}
+
+/*****************************************************************************
+//function :
+// __ECNT_HOOK
+//description :
+// kernel hook API
+//input :
+// unsigned int maintype, unsigned int subtype,struct ecnt_data *in_data
+//return :
+// ECNT_HOOK_ERROR -1
+// ECNT_RETURN_DROP 0
+// ECNT_RETURN 2
+// ECNT_CONTINUE 1
+******************************************************************************/
+
+__IMEM ecnt_ret_val __ECNT_HOOK(unsigned int maintype, unsigned int subtype,struct ecnt_data *in_data)
+{
+ ecnt_ret_val ret;
+ struct ecnt_hook_ops *elem;
+
+ if((maintype >= ECNT_NUM_MAINTYPE) || (subtype >= ECNT_MAX_SUBTYPE)){
+ printk("__ECNT_HOOK fail, max maintype %d, max subtype %d\n", ECNT_NUM_MAINTYPE, ECNT_MAX_SUBTYPE);
+ return ECNT_HOOK_ERROR;
+ }
+ if (list_empty(&ecnt_hooks[maintype][subtype])){
+ return ECNT_HOOK_ERROR;
+ }
+
+ /* We may already have this, but read-locks nest anyway */
+ rcu_read_lock();
+
+ elem = list_entry_rcu(&ecnt_hooks[maintype][subtype], struct ecnt_hook_ops, list);
+ /* We may already have this, but read-locks nest anyway */
+ ret = ecnt_iterate(&ecnt_hooks[maintype][subtype], &elem, in_data);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(__ECNT_HOOK);
+
+/*****************************************************************************
+//function :
+// set_ecnt_hookfn_execute_or_not
+//description :
+// used to control ecnt hook function execute or not
+//input :
+// unsigned int maintype, unsigned int subtype, unsigned int hook_id, unsigned int is_execute
+//return :
+// 0: fail
+// 1: success
+******************************************************************************/
+int set_ecnt_hookfn_execute_or_not(unsigned int maintype, unsigned int subtype, unsigned int hook_id, unsigned int is_execute)
+{
+ struct list_head *pos;
+ if((maintype >= ECNT_NUM_MAINTYPE) || (subtype >= ECNT_MAX_SUBTYPE)){
+ printk("set_ecnt_hookfn_execute_or_not fail, max maintype %d, max subtype %d\n", ECNT_NUM_MAINTYPE, ECNT_MAX_SUBTYPE);
+ return 0;
+ }
+ if(list_empty(&ecnt_hooks[maintype][subtype]))
+ return 0;
+
+ struct ecnt_hook_ops *elem;
+ rcu_read_lock();
+ elem = list_entry_rcu(&ecnt_hooks[maintype][subtype], struct ecnt_hook_ops, list);
+ //list_for_each_continue_rcu(pos, &ecnt_hooks[maintype][subtype]){
+ list_for_each_entry_continue_rcu(elem, &ecnt_hooks[maintype][subtype], list){
+ if(elem->hook_id == hook_id){
+ elem->is_execute = is_execute;
+ rcu_read_unlock();
+ return 1;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL(set_ecnt_hookfn_execute_or_not);
+
+/*****************************************************************************
+//function :
+// get_ecnt_hookfn
+//description :
+// used to get hook
+//input :
+// unsigned int maintype, unsigned int subtype, unsigned int hook_id, unsigned int is_execute
+//return :
+// 0: fail
+// 1: success
+******************************************************************************/
+
+int get_ecnt_hookfn(unsigned int maintype, unsigned int subtype){
+ struct list_head *pos;
+ struct ecnt_hook_ops *elem;
+ int index = 1;
+
+ if((maintype >= ECNT_NUM_MAINTYPE) || (subtype >= ECNT_MAX_SUBTYPE)){
+ printk("get_ecnt_hookfn fail, max maintype %d, max subtype %d\n", ECNT_NUM_MAINTYPE, ECNT_MAX_SUBTYPE);
+ return 0;
+ }
+ if(list_empty(&ecnt_hooks[maintype][subtype])){
+ printk("maintype = %d, subtype=%d, 0 hook functions\n",maintype, subtype);
+ return 0;
+ }
+
+ printk("maintype = %d, subtype=%d\n",maintype, subtype);
+ printk("index\t[id]\tis_exe\tpri\tdropnum\tname\n");
+ rcu_read_lock();
+
+ list_for_each(pos, &ecnt_hooks[maintype][subtype]){
+ elem = (struct ecnt_hook_ops *)pos;
+ printk("%d.\t[%d]\t%d\t%d\t%d\t%s\n",
+ index++, elem->hook_id, elem->is_execute, elem->priority,elem->info.drop_num,elem->name);
+
+ }
+ rcu_read_unlock();
+
+ return 1;
+}
+EXPORT_SYMBOL(get_ecnt_hookfn);
+
+/*****************************************************************************
+//function :
+// show_all_ecnt_hookfn
+//description :
+// used to show all of the ecnt hook functions
+//input :
+// void
+//return :
+// 1
+******************************************************************************/
+int show_all_ecnt_hookfn(void){
+ int maintype, subtype;
+ struct list_head *pos;
+ struct ecnt_hook_ops *elem;
+ int index = 1;
+
+ printk("index\t[main-sub]\t[id]\tis_exe\tpri\tdropnum\tname\n");
+ rcu_read_lock();
+ for(maintype = 0; maintype < ECNT_NUM_MAINTYPE; maintype++){
+ for(subtype = 0; subtype < ECNT_MAX_SUBTYPE; subtype++){
+ list_for_each(pos, &ecnt_hooks[maintype][subtype]){
+ elem = (struct ecnt_hook_ops *)pos;
+ printk("%d.\t[%d-%d]\t\t[%d]\t%d\t%d\t%d\t%s\n",
+ index++, maintype, subtype,elem->hook_id, elem->is_execute, elem->priority,elem->info.drop_num,elem->name);
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ return 1;
+}
+EXPORT_SYMBOL(show_all_ecnt_hookfn);
+
+/*****************************************************************************
+//function :
+// ecnt_register_hook
+//description :
+// used to register hook function
+//input :
+// struct ecnt_hook_ops *reg
+//return:
+// ECNT_REGISTER_FAIL -1
+// ECNT_REGISTER_SUCCESS 0
+******************************************************************************/
+int ecnt_register_hook(struct ecnt_hook_ops *reg)
+{
+ struct ecnt_hook_ops *elem;
+
+ if(!reg){
+ printk("ecnt_register_hook fail, reg is NULL\n");
+ return ECNT_REGISTER_FAIL;
+ }
+ if((reg->maintype >= ECNT_NUM_MAINTYPE) || (reg->subtype >= ECNT_MAX_SUBTYPE)){
+ printk("ecnt_register_hook fail, maintype = %d, subtype=%d, out of range\n", reg->maintype, reg->subtype);
+ return ECNT_REGISTER_FAIL;
+ }
+ if(reg->list.next!= NULL){
+ printk("ecnt_register_hook fail, %s already registered\n", reg->name);
+ return ECNT_REGISTER_FAIL;
+ }
+ if(hook_id >= 0xFFFFFFFF){
+ printk("ecnt_register_hook fail, hook_id out of range\n");
+ return ECNT_REGISTER_FAIL;
+ }
+ spin_lock(&ecnt_hook_lock);
+ list_for_each_entry(elem, &ecnt_hooks[reg->maintype][reg->subtype], list) {
+ if (reg->priority < elem->priority)
+ break;
+ }
+ reg->hook_id = ++hook_id;
+ reg->info.drop_num = 0;
+ list_add_rcu(&reg->list, elem->list.prev);
+ spin_unlock(&ecnt_hook_lock);
+
+ return ECNT_REGISTER_SUCCESS;
+}
+EXPORT_SYMBOL(ecnt_register_hook);
+
+/*****************************************************************************
+//function :
+// ecnt_unregister_hook
+//description :
+// used to unregister hook function
+//input :
+// struct ecnt_hook_ops *reg
+//return :
+// void
+******************************************************************************/
+void ecnt_unregister_hook(struct ecnt_hook_ops *reg)
+{
+ if(!reg){
+ printk("ecnt_unregister_hook fail, reg is NULL\n");
+ return;
+ }
+ if(reg->list.prev == LIST_POISON2){
+ printk("%s already unregistered\n", reg->name);
+ return;
+ }
+ spin_lock(&ecnt_hook_lock);
+ list_del_rcu(&reg->list);
+ spin_unlock(&ecnt_hook_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ecnt_unregister_hook);
+
+/*****************************************************************************
+//function :
+// ecnt_ops_unregister
+//description :
+// used to unregister hook function
+//input :
+// unsigned int maintype, unsigned int subtype, unsigned int hook_id
+//return :
+// 0: fail
+// 1: success
+******************************************************************************/
+
+int ecnt_ops_unregister(unsigned int maintype, unsigned int subtype, unsigned int hook_id)
+{
+ struct ecnt_hook_ops *elem;
+ if((maintype >= ECNT_NUM_MAINTYPE) || (subtype >= ECNT_MAX_SUBTYPE)){
+ printk("set_ecnt_hookfn_execute_or_not fail, max maintype %d, max subtype %d\n", ECNT_NUM_MAINTYPE, ECNT_MAX_SUBTYPE);
+ return 0;
+ }
+ if(list_empty(&ecnt_hooks[maintype][subtype]))
+ return 0;
+
+ rcu_read_lock();
+ elem = list_entry_rcu(&ecnt_hooks[maintype][subtype], struct ecnt_hook_ops, list);
+ //list_for_each_continue_rcu(pos, &ecnt_hooks[maintype][subtype]){
+ list_for_each_entry_continue_rcu(elem, &ecnt_hooks[maintype][subtype], list){
+ if(elem->hook_id == hook_id){
+ ecnt_unregister_hook(elem);
+ rcu_read_unlock();
+ return 1;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL(ecnt_ops_unregister);
+
+/*****************************************************************************
+//function :
+// ecnt_register_hooks
+//description :
+// register ecnt ops
+//input :
+// struct ecnt_hook_ops *reg, unsigned int n
+//return :
+// ECNT_REGISTER_FAIL -1
+// ECNT_REGISTER_SUCCESS 0
+******************************************************************************/
+
+int ecnt_register_hooks(struct ecnt_hook_ops *reg, unsigned int n)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < n; i++) {
+ err = ecnt_register_hook(&reg[i]);
+ if (err)
+ goto err;
+ }
+ return err;
+
+err:
+ if (i > 0)
+ ecnt_unregister_hooks(reg, i);
+ return err;
+}
+EXPORT_SYMBOL(ecnt_register_hooks);
+
+/*****************************************************************************
+//function :
+// ecnt_unregister_hooks
+//description :
+// unregister ecnt ops
+//input :
+// struct ecnt_hook_ops *reg, unsigned int n
+//return :
+// void
+******************************************************************************/
+
+void ecnt_unregister_hooks(struct ecnt_hook_ops *reg, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ ecnt_unregister_hook(&reg[i]);
+}
+EXPORT_SYMBOL(ecnt_unregister_hooks);
+
+/*****************************************************************************
+//function :
+// ecnt_hook_init
+//description :
+// used to init ecnt_hooks list head
+//input :
+// void
+//return :
+// void
+******************************************************************************/
+void ecnt_hook_init(void)
+{
+ int i, h;
+ for (i = 0; i < ARRAY_SIZE(ecnt_hooks); i++){
+ for (h = 0; h < ECNT_MAX_SUBTYPE; h++)
+ INIT_LIST_HEAD(&ecnt_hooks[i][h]);
+ }
+
+}
+
Index: linux-3.18.21/arch/mips/econet/ecnt_hook/ecnt_hook_nf.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/ecnt_hook/ecnt_hook_nf.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,949 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/netfilter.h>
+#include <linux/init.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/ipv6.h>
+#include <ecnt_hook/ecnt_hook_nf.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+unsigned int ipv4_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+unsigned int ipv4_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv4_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv4_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv4_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+
+
+unsigned int ipv6_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv6_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv6_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv6_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int ipv6_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int arp_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+unsigned int arp_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int arp_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int bridge_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int bridge_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int bridge_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+unsigned int bridge_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+unsigned int bridge_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+struct nf_hook_ops ipv4_pre_routing_hook_op = {
+ .hook = ipv4_pre_routing_hook_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv4_local_in_hook_op = {
+ .hook = ipv4_local_in_hook_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv4_forward_hook_op = {
+ .hook = ipv4_forward_hook_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv4_local_out_hook_op = {
+ .hook = ipv4_local_out_hook_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv4_post_routing_hook_op = {
+ .hook = ipv4_post_routing_hook_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv6_pre_routing_hook_op = {
+ .hook = ipv6_pre_routing_hook_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP6_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv6_local_in_hook_op = {
+ .hook = ipv6_local_in_hook_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv6_forward_hook_op = {
+ .hook = ipv6_forward_hook_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_FORWARD,
+ .priority = NF_IP6_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv6_local_out_hook_op = {
+ .hook = ipv6_local_out_hook_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops ipv6_post_routing_hook_op = {
+ .hook = ipv6_post_routing_hook_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops arp_in_hook_op = {
+ .hook = arp_in_hook_fn,
+ .pf = NFPROTO_ARP,
+ .hooknum = NF_ARP_IN,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops arp_forward_hook_op = {
+ .hook = arp_forward_hook_fn,
+ .pf = NFPROTO_ARP,
+ .hooknum = NF_ARP_FORWARD,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops arp_out_hook_op = {
+ .hook = arp_out_hook_fn,
+ .pf = NFPROTO_ARP,
+ .hooknum = NF_ARP_OUT,
+ .priority = NF_IP_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops bridge_pre_routing_hook_op = {
+ .hook = bridge_pre_routing_hook_fn,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops bridge_local_in_hook_op = {
+ .hook = bridge_local_in_hook_fn,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_BR_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops bridge_forward_hook_op = {
+ .hook = bridge_forward_hook_fn,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_FORWARD,
+ .priority = NF_BR_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops bridge_local_out_hook_op = {
+ .hook = bridge_local_out_hook_fn,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_OUT,
+ .priority = NF_BR_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+struct nf_hook_ops bridge_post_routing_hook_op = {
+ .hook = bridge_post_routing_hook_fn,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_POST_ROUTING,
+ .priority = NF_BR_PRI_FIRST,
+ .owner = THIS_MODULE,
+};
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+/*****************************************************************************
+//function :
+// ECNT_NF_HOOK
+//description :
+// called by nf_hook_ops hook functions
+//input :
+// unsigned int maintype, unsigned int subtype,struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+int ECNT_NF_HOOK(unsigned int maintype, unsigned int subtype, struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out)
+{
+ int ret;
+ struct nf_data_s nf_data;
+
+ nf_data.skb = skb;
+ nf_data.in = in;
+ nf_data.out = out;
+
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&nf_data);
+ switch(ret){
+ case ECNT_RETURN_DROP:
+ return NF_DROP;
+ case ECNT_CONTINUE:
+ case ECNT_HOOK_ERROR:
+ return NF_ACCEPT;
+ case ECNT_RETURN:
+ return NF_STOP;
+ }
+ return ret;
+}
+
+/*****************************************************************************
+//function :
+// ipv4_pre_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv4_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV4,ECNT_NF_IP4_PRE_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv4_local_in_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv4_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV4,ECNT_NF_IP4_LOCAL_IN,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv4_forward_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv4_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV4,ECNT_NF_IP4_FORWARD,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv4_local_out_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv4_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV4,ECNT_NF_IP4_LOCAL_OUT,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv4_post_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv4_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV4,ECNT_NF_IP4_POST_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv6_pre_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv6_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV6,ECNT_NF_IP6_PRE_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv6_local_in_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv6_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV6,ECNT_NF_IP6_LOCAL_IN,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv6_forward_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv6_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV6,ECNT_NF_IP6_FORWARD,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv6_local_out_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv6_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV6,ECNT_NF_IP6_LOCAL_OUT,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// ipv6_post_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int ipv6_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_IPV6,ECNT_NF_IP6_POST_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// arp_in_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int arp_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_ARP,ECNT_NF_ARP_IN,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// arp_forward_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int arp_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_ARP,ECNT_NF_ARP_FORWARD,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// arp_out_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int arp_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_ARP,ECNT_NF_ARP_OUT,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// bridge_pre_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int bridge_pre_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_BR,ECNT_NF_BR_PRE_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// bridge_local_in_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int bridge_local_in_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_BR,ECNT_NF_BR_LOCAL_IN,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// bridge_forward_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int bridge_forward_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_BR,ECNT_NF_BR_FORWARD,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// bridge_local_out_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int bridge_local_out_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_BR,ECNT_NF_BR_LOCAL_OUT,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// bridge_post_routing_hook_fn
+//description :
+// nf_hook_ops hook function
+//input :
+// unsigned int hooknum, struct sk_buff *skb,
+// const struct net_device *in, const struct net_device *out, int(*okfn)(struct sk_buff *)
+//return :
+// NF_DROP
+// NF_ACCEPT
+// NF_STOP
+******************************************************************************/
+unsigned int bridge_post_routing_hook_fn(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ECNT_NF_HOOK(ECNT_NF_BR,ECNT_NF_BR_POST_ROUTING,skb,in,out);
+}
+
+/*****************************************************************************
+//function :
+// nf_register_ipv4_hook
+//description :
+// register IPv4 nf_hook_ops
+//input :
+// void
+//return :
+// -1:fail
+// 0: success
+******************************************************************************/
+int nf_register_ipv4_hook(void)
+{
+ if(nf_register_hook(&ipv4_pre_routing_hook_op)){
+ printk("nf_register_hook() ipv4_pre_routing failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv4_local_in_hook_op)){
+ printk("nf_register_hook() ipv4_local_in_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv4_forward_hook_op)){
+ printk("nf_register_hook() ipv4_forward_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv4_local_out_hook_op)){
+ printk("nf_register_hook() ipv4_local_out_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv4_post_routing_hook_op)){
+ printk("nf_register_hook() ipv4_post_routing_hook_op failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*****************************************************************************
+//function :
+// nf_register_ipv6_hook
+//description :
+// register IPv6 nf_hook_ops
+//input :
+// void
+//return :
+// -1:fail
+// 0: success
+******************************************************************************/
+int nf_register_ipv6_hook(void)
+{
+ if(nf_register_hook(&ipv6_pre_routing_hook_op)){
+ printk("nf_register_hook() ipv6_pre_routing failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv6_local_in_hook_op)){
+ printk("nf_register_hook() ipv6_local_in_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv6_forward_hook_op)){
+ printk("nf_register_hook() ipv6_forward_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv6_local_out_hook_op)){
+ printk("nf_register_hook() ipv6_local_out_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&ipv6_post_routing_hook_op)){
+ printk("nf_register_hook() ipv6_post_routing_hook_op failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+//function :
+// nf_register_bridge_hook
+//description :
+// register Bridge nf_hook_ops
+//input :
+// void
+//return :
+// -1:fail
+// 0: success
+******************************************************************************/
+int nf_register_bridge_hook(void)
+{
+ if(nf_register_hook(&bridge_pre_routing_hook_op)){
+ printk("nf_register_hook() bridge_pre_routing_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&bridge_local_in_hook_op)){
+ printk("nf_register_hook() bridge_local_in_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&bridge_forward_hook_op)){
+ printk("nf_register_hook() bridge_forward_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&bridge_local_out_hook_op)){
+ printk("nf_register_hook() bridge_local_out_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&bridge_post_routing_hook_op)){
+ printk("nf_register_hook() bridge_post_routing_hook_op failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+//function :
+// nf_register_arp_hook
+//description :
+// register ARP nf_hook_ops
+//input :
+// void
+//return :
+// -1:fail
+// 0: success
+******************************************************************************/
+int nf_register_arp_hook(void)
+{
+ if(nf_register_hook(&arp_in_hook_op)){
+ printk("nf_register_hook() arp_in_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&arp_forward_hook_op)){
+ printk("nf_register_hook() arp_forward_hook_op failed\n");
+ return -1;
+ }
+ if(nf_register_hook(&arp_out_hook_op)){
+ printk("nf_register_hook() arp_out_hook_op failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*****************************************************************************
+//function :
+// ecnt_net_filter_hook_init
+//description :
+// register nf_hook_ops, called by sock_init()
+//input :
+// void
+//return :
+// -1:fail
+// 0: success
+******************************************************************************/
+int ecnt_net_filter_hook_init(void)
+{
+/*
+ if(nf_register_ipv4_hook()){
+ printk("nf_register_ipv4_hook() failed\n");
+ return -1;
+ }
+
+ if(nf_register_ipv6_hook()){
+ printk("nf_register_ipv6_hook() failed\n");
+ return -1;
+ }
+
+ if(nf_register_bridge_hook()){
+ printk("nf_register_bridge_hook() failed\n");
+ return -1;
+ }
+
+ if(nf_register_arp_hook()){
+ printk("nf_register_arp_hook() failed\n");
+ return -1;
+ }
+*/
+ return 0;
+}
+
+
Index: linux-3.18.21/arch/mips/econet/ecnt_kernel_ext.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/ecnt_kernel_ext.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,551 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <asm/tc3162/tc3162.h>
+#include <asm/tc3162/TCIfSetQuery_os.h>
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration, defined in
+ linux2.6.36/include/linux/netdevice.h*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+/* add for fix compile error */
+int g_port_reverse_kernel = 0;
+EXPORT_SYMBOL(g_port_reverse_kernel);
+
+int rtsp_hwnat_offload = 0;
+EXPORT_SYMBOL(rtsp_hwnat_offload);
+
+EXPORT_SYMBOL(find_task_by_vpid);
+
+#ifdef TCSUPPORT_USB_HOST_LED
+void (*Usb_Led_Flash_Op_hook)(unsigned int opmode ,unsigned int phyport);
+EXPORT_SYMBOL(Usb_Led_Flash_Op_hook);
+int pre_usb_state[2] = {0, 0};
+EXPORT_SYMBOL(pre_usb_state);
+#endif
+
+
+
+
+
+#ifdef TCSUPPORT_VLAN_TAG
+int (*remove_vtag_hook)(struct sk_buff *skb, struct net_device *dev);
+int (*insert_vtag_hook)(struct sk_buff **pskb);
+//#if !defined(TCSUPPORT_FTP_THROUGHPUT)
+int (*check_vtag_hook)(void);
+//#endif
+int (*get_vtag_hook)(struct net_device *dev, struct sk_buff *skb);
+/*TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+struct net_device* (*tcvlan_get_outdev_hook)(struct sk_buff* skb);
+/*END TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+/*TCSUPPORT_CPU_PERFORMANCE_TEST---start---*/
+int (*get_pvid_by_dev_name_hook)(char *devName);
+int (*get_wan_index_by_vlan_hook)(unsigned char vlanLayer, unsigned short vlanId);
+/*TCSUPPORT_CPU_PERFORMANCE_TEST---end---*/
+EXPORT_SYMBOL(remove_vtag_hook);
+EXPORT_SYMBOL(insert_vtag_hook);
+//#if !defined(TCSUPPORT_FTP_THROUGHPUT)
+EXPORT_SYMBOL(check_vtag_hook);
+//#endif
+EXPORT_SYMBOL(get_vtag_hook);
+/*TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+EXPORT_SYMBOL(tcvlan_get_outdev_hook);
+/*END TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+/*TCSUPPORT_CPU_PERFORMANCE_TEST---start---*/
+EXPORT_SYMBOL(get_pvid_by_dev_name_hook);
+EXPORT_SYMBOL(get_wan_index_by_vlan_hook);
+/*TCSUPPORT_CPU_PERFORMANCE_TEST---end---*/
+#endif
+/*TCSUPPORT_VLAN_PASSTHROUGH*/
+int (*check_vlan_range_hook)(struct net_device *dev, struct sk_buff *skb);
+EXPORT_SYMBOL(check_vlan_range_hook);
+/*END TCSUPPORT_VLAN_PASSTHROUGH*/
+
+int (*xpon_store_upstream_igmp_vlan_tci_hook)(struct sk_buff *skb) = NULL;
+int (*xpon_down_multicast_vlan_handle_hook)(struct sk_buff *skb) = NULL;
+int (*xpon_upstream_vlan_recovery_by_dynlist_hook)(struct sk_buff* skb) = NULL;
+int (*xpon_hgu_down_multicast_access_control_hook)(struct sk_buff* skb) = NULL;
+
+
+EXPORT_SYMBOL(xpon_store_upstream_igmp_vlan_tci_hook);
+EXPORT_SYMBOL(xpon_down_multicast_vlan_handle_hook);
+EXPORT_SYMBOL(xpon_upstream_vlan_recovery_by_dynlist_hook);
+EXPORT_SYMBOL(xpon_hgu_down_multicast_access_control_hook);
+
+//#ifdef CONFIG_SMUX
+#if !defined(TCSUPPORT_CT)
+int (*check_smuxIf_exist_hook)(struct net_device *dev);
+EXPORT_SYMBOL(check_smuxIf_exist_hook);
+#endif
+int (*smux_pkt_recv_hook)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net_device *rdev);
+EXPORT_SYMBOL(smux_pkt_recv_hook);
+void (*get_wan_index_info_hook)(char* wan_index);
+EXPORT_SYMBOL(get_wan_index_info_hook);
+
+//#endif
+
+int (*portbind_check_bind_lan2)(int bind_index);
+EXPORT_SYMBOL(portbind_check_bind_lan2);
+int (*portbind_check_bind_wantype)(char *landev, int bind_index);
+EXPORT_SYMBOL(portbind_check_bind_wantype);
+
+#ifdef TCSUPPORT_PORTBIND /* CONFIG_PORT_BINDING */
+#if !defined(TCSUPPORT_CT)
+int (*portbind_sw_hook)(void);
+/*TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+int (*portbind_sw_prior_hook)(struct sk_buff *skb);
+struct net_device* (*portbind_get_outdev_by_indev_hook)(unsigned char* indev_name);
+/*END TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+int (*portbind_check_hook)(char *inIf, char *outIf);
+EXPORT_SYMBOL(portbind_sw_hook);
+EXPORT_SYMBOL(portbind_check_hook);
+/*TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+EXPORT_SYMBOL(portbind_sw_prior_hook);
+EXPORT_SYMBOL(portbind_get_outdev_by_indev_hook);
+/*END TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+#endif
+#endif
+
+
+int (*I2CWriterPtr)(unsigned char DevAddr, unsigned char WordAddr, unsigned char* data_value, unsigned char data_len);
+int (*I2CReaderPtr)(unsigned char DevAddr, unsigned char WordAddr, unsigned char* data_value, unsigned char data_len);
+EXPORT_SYMBOL(I2CWriterPtr);
+EXPORT_SYMBOL(I2CReaderPtr);
+
+#if defined(TCSUPPORT_XPON_IGMP)
+int (*xpon_sfu_up_send_multicast_frame_hook)(struct sk_buff *skb, int clone) = NULL;
+
+int (*xpon_sfu_up_multicast_incoming_hook)(struct sk_buff *skb, int clone) = NULL;
+
+int (*xpon_sfu_down_multicast_incoming_hook)(struct sk_buff *skb, int clone) = NULL;
+
+int (*xpon_sfu_up_multicast_vlan_hook)(struct sk_buff *skb, int clone) = NULL;
+
+int (*xpon_sfu_multicast_protocol_hook)(struct sk_buff *skb) = NULL;
+
+int (*xpon_up_igmp_uni_vlan_filter_hook)(struct sk_buff *skb) = NULL;
+
+int (*xpon_up_igmp_ani_vlan_filter_hook)(struct sk_buff *skb) = NULL;
+
+int (*isVlanOperationInMulticastModule_hook)(struct sk_buff *skb) = NULL;
+
+
+EXPORT_SYMBOL(xpon_sfu_up_send_multicast_frame_hook);
+
+EXPORT_SYMBOL(xpon_sfu_up_multicast_incoming_hook);
+
+EXPORT_SYMBOL(xpon_sfu_down_multicast_incoming_hook);
+
+EXPORT_SYMBOL(xpon_sfu_up_multicast_vlan_hook);
+
+EXPORT_SYMBOL(xpon_sfu_multicast_protocol_hook);
+
+EXPORT_SYMBOL(xpon_up_igmp_uni_vlan_filter_hook);
+
+EXPORT_SYMBOL(xpon_up_igmp_ani_vlan_filter_hook);
+
+EXPORT_SYMBOL(isVlanOperationInMulticastModule_hook);
+#endif
+
+#ifdef TCSUPPORT_PON_MAC_FILTER
+int (*pon_check_mac_hook)(struct sk_buff *skb);
+int (*pon_mac_filter_get_mode_hook)(void);
+
+EXPORT_SYMBOL(pon_check_mac_hook);
+EXPORT_SYMBOL(pon_mac_filter_get_mode_hook);
+#endif
+
+#ifdef TCSUPPORT_GPON_MAPPING
+int (*gpon_mapping_hook)(struct sk_buff *pskb);
+int (*xpon_mode_get_hook)(void);
+
+EXPORT_SYMBOL(gpon_mapping_hook);
+EXPORT_SYMBOL(xpon_mode_get_hook);
+
+#if defined(TCSUPPORT_GPON_DOWNSTREAM_MAPPING)
+int (*gpon_downstream_mapping_hook)(struct sk_buff *skb);
+int (*gpon_downstream_mapping_stag_hook)(struct sk_buff *skb);
+EXPORT_SYMBOL(gpon_downstream_mapping_hook);
+EXPORT_SYMBOL(gpon_downstream_mapping_stag_hook);
+#endif
+#endif
+
+#ifdef TCSUPPORT_EPON_MAPPING
+int (*epon_sfu_clsfy_hook)(struct sk_buff *skb, int port);
+int (*epon_mapping_hook)(struct sk_buff *skb);
+EXPORT_SYMBOL(epon_sfu_clsfy_hook);
+EXPORT_SYMBOL(epon_mapping_hook);
+#endif
+
+#ifdef TCSUPPORT_PON_VLAN
+int (*pon_insert_tag_hook)(struct sk_buff **pskb);
+int (*pon_vlan_get_mode_hook)(void);
+int (*pon_store_tag_hook)(struct sk_buff *skb, struct net_device *dev);
+int (*pon_check_vlan_hook)(struct net_device *dev, struct sk_buff *skb);
+int (*pon_check_tpid_hook)(__u16 * buf);
+int (*pon_check_user_group_hook)(struct sk_buff *skb);
+int (*pon_PCP_decode_hook)(struct sk_buff **pskb);
+
+EXPORT_SYMBOL(pon_insert_tag_hook);
+EXPORT_SYMBOL(pon_vlan_get_mode_hook);
+EXPORT_SYMBOL(pon_store_tag_hook);
+EXPORT_SYMBOL(pon_check_vlan_hook);
+EXPORT_SYMBOL(pon_check_tpid_hook);
+EXPORT_SYMBOL(pon_check_user_group_hook);
+EXPORT_SYMBOL(pon_PCP_decode_hook);
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP)
+int (*xpon_igmp_acl_filter_hook)(struct sk_buff* skb) = NULL;
+EXPORT_SYMBOL(xpon_igmp_acl_filter_hook);
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP)
+int (*xpon_igmp_ioctl_hook)(unsigned long subcmd,unsigned long argv1,unsigned long argv2);
+EXPORT_SYMBOL(xpon_igmp_ioctl_hook);
+#endif
+
+/*#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)*/
+int (*xpon_hgu_multicast_data_hook)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(xpon_hgu_multicast_data_hook);
+/*#endif*/
+int (*hwnat_multicast_data_hook)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(hwnat_multicast_data_hook);
+
+#if defined(TCSUPPORT_XPON_IGMP)
+int (*xpon_add_delete_port_hook)(struct net_device* dev, int op);
+EXPORT_SYMBOL(xpon_add_delete_port_hook);
+#endif
+
+
+/*for atm ptm*/
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+int napi_en = 0;
+EXPORT_SYMBOL(napi_en);
+void (*br2684_config_hook)(int linkMode, int linkType);
+EXPORT_SYMBOL(br2684_config_hook);
+int (*br2684_init_hook)(struct atm_vcc *atmvcc, int encaps) = NULL;
+EXPORT_SYMBOL(br2684_init_hook);
+int (*br2684_push_hook)(struct atm_vcc *atmvcc, struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(br2684_push_hook);
+int (*br2684_xmit_hook)(struct sk_buff *skb, struct net_device *dev, enum br2684_encaps encaps) = NULL;
+EXPORT_SYMBOL(br2684_xmit_hook);
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+extern netdev_tx_t br2684_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+EXPORT_SYMBOL(br2684_start_xmit);
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+extern int br2684_init(void);
+extern void br2684_exit(void);
+
+EXPORT_SYMBOL(br2684_init);
+EXPORT_SYMBOL(br2684_exit);
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505)
+void (*pppoatm_config_hook)(int linkMode, int linkType) = NULL;
+EXPORT_SYMBOL(pppoatm_config_hook);
+
+int (*pppoatm_init_hook)(struct atm_vcc *atmvcc, int encaps) = NULL;
+EXPORT_SYMBOL(pppoatm_init_hook);
+
+int (*pppoatm_push_hook)(struct atm_vcc *atmvcc, struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(pppoatm_push_hook);
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505)
+extern int pppoatm_init(void);
+extern void pppoatm_exit(void);
+
+EXPORT_SYMBOL(pppoatm_init);
+EXPORT_SYMBOL(pppoatm_exit);
+#endif
+/*atm ptm end*/
+#ifdef MTK_CRYPTO_DRIVER
+int ip_output(struct sock *sk, struct sk_buff *skb);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+EXPORT_SYMBOL(ip_output);
+EXPORT_SYMBOL(xfrm_parse_spi);
+#endif
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+/**
+ * random32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 random32(void)
+{
+ return prandom_u32();
+}
+EXPORT_SYMBOL(random32);
+
+#if 1
+struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+ skb->protocol = htons(ETH_P_8021Q);
+ return vlan_insert_tag(skb, ETH_P_8021Q, vlan_tci);
+}
+#else
+struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+ struct vlan_ethhdr *veth;
+
+ if (skb_cow_head(skb, VLAN_HLEN) < 0) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac addresses to the beginning of the new header. */
+ memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
+ skb->mac_header -= VLAN_HLEN;
+
+ /* first, the ethernet type */
+ veth->h_vlan_proto = htons(ETH_P_8021Q);
+
+ /* now, the TCI */
+ veth->h_vlan_TCI = htons(vlan_tci);
+
+ skb->protocol = htons(ETH_P_8021Q);
+
+ return skb;
+}
+#endif
+EXPORT_SYMBOL(__vlan_put_tag);
+
+/**
+ * vlan_put_tag - inserts VLAN tag according to device features
+ * @skb: skbuff to tag
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Assumes skb->dev is the target that will xmit this frame.
+ * Returns a VLAN tagged skb.
+ */
+struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+ if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
+ return __vlan_hwaccel_put_tag(skb, ETH_P_8021Q, vlan_tci);
+ } else {
+ return __vlan_put_tag(skb, vlan_tci);
+ }
+}
+EXPORT_SYMBOL(vlan_put_tag);
+
+#ifdef TCSUPPORT_PON_VLAN
+static DEFINE_SPINLOCK(ptype_lock);
+static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+static struct list_head ptype_all __read_mostly; /* Taps */
+
+//check if type already in the list.
+//return: 0:yes 1:no
+int pon_check_pack(__u16 type)
+{
+ struct list_head *head;
+ struct packet_type *pt1;
+
+ spin_lock_bh(&ptype_lock);
+ if (type == htons(ETH_P_ALL))
+ head = &ptype_all;
+ else
+ head = &ptype_base[ntohs(type) & 15];
+
+ list_for_each_entry(pt1, head, list)
+ {
+ if (type == pt1->type)
+ {
+ spin_unlock_bh(&ptype_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&ptype_lock);
+ return 1;
+}
+EXPORT_SYMBOL(pon_check_pack);
+#endif
+
+#if defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_VLAN_TPID)
+/*
+Uesd for xPon insert tag.Support set TPID
+*/
+struct sk_buff *__pon_vlan_put_tag(struct sk_buff *skb, u16 tpid,unsigned short vlan_tci)
+{
+ struct vlan_ethhdr *veth;
+
+ if (skb_cow_head(skb, VLAN_HLEN) < 0) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac addresses to the beginning of the new header. */
+ memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+ skb->mac_header -= VLAN_HLEN;
+
+ /* first, the ethernet type */
+ veth->h_vlan_proto = htons(tpid);
+
+ /* now, the TCI */
+ veth->h_vlan_TCI = htons(vlan_tci);
+
+ skb->protocol = htons(tpid);
+
+ return skb;
+}
+EXPORT_SYMBOL(__pon_vlan_put_tag);
+#endif
+
+struct net *__get_net_ns_by_pid(pid_t pid) {
+ struct net *netns = NULL;
+
+ netns = get_net_ns_by_pid(pid);
+ if(netns == ERR_PTR(-ESRCH))
+ netns = NULL;
+
+ return netns;
+}
+EXPORT_SYMBOL(__get_net_ns_by_pid);
+/**************************************************
+Function: Clear all the SW and HW maintained
+ multicast flow
+Input:
+ N/A
+Return:
+ 0: ok
+**************************************************/
+#if 0
+int igmp_hwnat_clear_flows(void)
+{
+ IGMP_HWNATEntry_t* entry = NULL,*tmp = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+
+ IGMP_HWNAT_DEBUG("enter");
+
+ spin_lock(&hwnat_lock);
+ #ifdef TCSUPPORT_MULTICAST_SPEED
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+ #else
+ list_for_each_entry_safe(entry,tmp,hwnat_flow,list)
+ #endif
+ {
+ igmp_hwnat_delete_flow(entry);
+ }
+ spin_unlock(&hwnat_lock);
+ return 0;
+}
+EXPORT_SYMBOL(igmp_hwnat_clear_flows);
+#endif
+
+#if defined(TCSUPPORT_LED_SWITCH_BUTTON) && defined(TCSUPPORT_WLAN)
+#if defined(TCSUPPORT_WLAN_AC)
+int (*hook_wlan_led_action_5g)(int action, int gpio);
+EXPORT_SYMBOL(hook_wlan_led_action_5g);
+#endif
+#endif
+
+#if !defined(TCSUPPORT_FH_JOYMEV2_PON)
+void os_TCIfQuery(unsigned short query_id, void *result1, void *result2)
+{
+ if (adsl_dev_ops == NULL)
+ return;
+
+ adsl_dev_ops->query(query_id, result1, result2);
+}
+EXPORT_SYMBOL(os_TCIfQuery);
+#endif
+int wscStatus_24g = 0;
+EXPORT_SYMBOL(wscStatus_24g);
+int wscStatus_5g = 0;
+EXPORT_SYMBOL(wscStatus_5g);
+int wscTimerRunning_5g = 0;
+EXPORT_SYMBOL(wscTimerRunning_5g);
Index: linux-3.18.21/arch/mips/econet/ecnt_traps.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/ecnt_traps.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,433 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/cpu_pm.h>
+#include <linux/kexec.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/bootmem.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/kdb.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cop2.h>
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/module.h>
+#include <asm/msa.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+#include <asm/watch.h>
+#include <asm/mmu_context.h>
+#include <asm/types.h>
+#include <asm/stacktrace.h>
+#include <asm/uasm.h>
+#include <asm/irq.h>
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define NMI_STACK_LEN 80
+#define NMI_STACK_MAGIC_NUM 0x5abc2312
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+extern int regs_to_trapnr(struct pt_regs *regs);
+
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+int watchFlag=0;
+EXPORT_SYMBOL(watchFlag);
+
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+void show_raw_backtrace_nmi(unsigned long sp_start, unsigned long stack_len)
+{
+ unsigned long *sp = (unsigned long *)(sp_start & ~3);
+ unsigned long addr, i = 0;
+
+ printk("Call Trace NMI:");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ while (i < stack_len) {
+ unsigned long __user *p =
+ (unsigned long __user *)(unsigned long)sp++;
+ if (__get_user(addr, p)) {
+ printk(" (Bad stack address)");
+ break;
+ }
+ if (__kernel_text_address(addr))
+ print_ip_sym(addr);
+
+ i++;
+ }
+ printk("\n");
+}
+
+void show_stack_nmi(void)
+{
+ unsigned int dspram_addr = dspram_base_addr();
+ int i, dspram_data_len=NMI_STACK_LEN;
+ unsigned int *p = (unsigned int *)dspram_addr;
+
+ printk("dspram_addr=0x%x\n", dspram_addr);
+
+ if(*p != NMI_STACK_MAGIC_NUM){
+ printk("No NMI Happen!\n");
+ return;
+ }
+
+ p++;
+ printk("epc : %08lx %pS\n", *p,
+ (void *) (*p));
+ p++;
+ printk("ra : %08lx %pS\n",*p,
+ (void *) (*p));
+
+ p++;
+ printk("Status: %08x ", (uint32_t) (*p));
+ p++;
+ printk("Cause : %08x\n", (*p));
+
+ p++;
+
+ while(dspram_data_len){
+ if(dspram_data_len % 8 == 0)
+ printk("\n ");
+ printk(" %08lx", *p);
+
+ p++;
+ dspram_data_len--;
+ }
+ printk("\n ");
+
+ show_raw_backtrace_nmi(dspram_base_addr(),NMI_STACK_LEN);
+
+
+}
+
+static void show_stacktrace_nmi(struct task_struct *task,
+ const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ long stackdata;
+ int i;
+ unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
+
+ printk("Stack :");
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0))
+ printk("\n ");
+ if (i > NMI_STACK_LEN-1) {
+ printk(" ...");
+ break;
+ }
+
+ if (__get_user(stackdata, sp++)) {
+ printk(" (Bad stack address)");
+ break;
+ }
+
+ printk(" %0*lx", field, stackdata);
+ i++;
+ }
+ printk("\n");
+ //show_backtrace(task, regs);
+}
+
+static void __show_regs_nmi(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int cause = regs->cp0_cause;
+ int i;
+
+ printk("Cpu %d\n", smp_processor_id());
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ if (i == 0)
+ printk(" %0*lx", field, 0UL);
+ else if (i == 26 || i == 27)
+ printk(" %*s", field, "");
+ else
+ printk(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ printk("\n");
+ }
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ printk("Acx : %0*lx\n", field, regs->acx);
+#endif
+ printk("Hi : %0*lx\n", field, regs->hi);
+ printk("Lo : %0*lx\n", field, regs->lo);
+
+ /*
+ * Saved cp0 registers
+ */
+ //printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
+ // (void *) regs->cp0_epc);
+ printk("epc : %0*lx\n", field, regs->cp0_epc);
+ printk(" %s\n", print_tainted());
+ //printk("ra : %0*lx %pS\n", field, regs->regs[31],
+ // (void *) regs->regs[31]);
+ printk("ra : %0*lx\n", field, regs->regs[31]);
+
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
+
+ if (current_cpu_data.isa_level == MIPS_CPU_ISA_II) {
+ if (regs->cp0_status & ST0_KUO)
+ printk("KUo ");
+ if (regs->cp0_status & ST0_IEO)
+ printk("IEo ");
+ if (regs->cp0_status & ST0_KUP)
+ printk("KUp ");
+ if (regs->cp0_status & ST0_IEP)
+ printk("IEp ");
+ if (regs->cp0_status & ST0_KUC)
+ printk("KUc ");
+ if (regs->cp0_status & ST0_IEC)
+ printk("IEc ");
+ } else {
+ if (regs->cp0_status & ST0_KX)
+ printk("KX ");
+ if (regs->cp0_status & ST0_SX)
+ printk("SX ");
+ if (regs->cp0_status & ST0_UX)
+ printk("UX ");
+ switch (regs->cp0_status & ST0_KSU) {
+ case KSU_USER:
+ printk("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ printk("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ printk("KERNEL ");
+ break;
+ default:
+ printk("BAD_MODE ");
+ break;
+ }
+ if (regs->cp0_status & ST0_ERL)
+ printk("ERL ");
+ if (regs->cp0_status & ST0_EXL)
+ printk("EXL ");
+ if (regs->cp0_status & ST0_IE)
+ printk("IE ");
+ }
+ printk("\n");
+
+ printk("Cause : %08x\n", cause);
+
+ cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ if (1 <= cause && cause <= 5)
+ printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
+
+ printk("PrId : %08x (%s)\n", read_c0_prid(),
+ cpu_name_string());
+}
+
+void show_registers_nmi(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+
+ __show_regs_nmi(regs);
+ //print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+ current->comm, current->pid, current_thread_info(), current,
+ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+ tls = read_c0_userlocal();
+ if (tls != current_thread_info()->tp_value)
+ printk("*HwTLS: %0*lx\n", field, tls);
+ }
+
+ show_stacktrace_nmi(current, regs);
+ //show_code((unsigned int __user *) regs->cp0_epc);
+ printk("\n");
+}
+
+void nmi_info_store( struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int cause = regs->cp0_cause;
+ long stackdata;
+ int i;
+ unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
+
+ /*Store Magic Number*/
+ write_to_dspram(NMI_STACK_MAGIC_NUM);
+
+ /*Store register value*/
+ write_to_dspram(regs->cp0_epc);
+ write_to_dspram(regs->regs[31]); //ra
+ write_to_dspram((uint32_t) regs->cp0_status);
+ write_to_dspram(cause);
+
+ /*Store stack data*/
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i > NMI_STACK_LEN-1) {
+ break;
+ }
+
+ if (__get_user(stackdata, sp++)) {
+ //printk(" (Bad stack address)");
+ break;
+ }
+
+ //printk(" %0*lx", field, stackdata);
+ write_to_dspram(stackdata);
+ i++;
+ }
+}
+
+void __noreturn die_nmi(const char *str, struct pt_regs *regs, spinlock_t *lock)
+{
+ static int die_counter;
+ int sig = SIGSEGV;
+#ifdef CONFIG_MIPS_MT_SMTC
+ unsigned long dvpret = dvpe();
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+ notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV);
+
+ console_verbose();
+ spin_lock_irq(lock);
+ bust_spinlocks(1);
+#ifdef CONFIG_MIPS_MT_SMTC
+ mips_mt_regdump_nmi(dvpret);
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+
+ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers_nmi(regs);
+
+ while(1); //waiting for watchdog reboot
+#if 0
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
+ do_exit(sig);
+#endif
+}
+
Index: linux-3.18.21/arch/mips/econet/ex_mdio_api.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/ex_mdio_api.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,523 @@
+/*
+ * ex_mdio_api.c
+ *
+ * Created on: 2013/4/22
+ * Author: mtk04880
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/tc3162/tc3162.h>
+#include <asm/tc3162/ledcetrl.h>
+#include <linux/sched.h>
+
+/*MACRO DEFINITION*/
+//#define DBG 1
+#define MDIO_WRITE (1)
+#define MDIO_READ (0)
+
+#define GPIO_HIGH(x) VPint(CR_GPIO_DATA) |= (1<<(x))
+#define GPIO_LOW(x) VPint(CR_GPIO_DATA) &= ~(1<<(x))
+#define GPIO_VALUE(x) ((VPint(CR_GPIO_DATA))&(1<<(x)))>>(x)
+
+#define GPIO_OEN(x) (x > 15) ? (VPint(CR_GPIO_CTRL1) |= (1<<(x-16)*2)):\
+ (VPint(CR_GPIO_CTRL) |= (1<<((x)*2))); VPint(CR_GPIO_ODRAIN) |= (1<<(x));
+/* input enable */
+#define GPIO_IEN(x) (x > 15) ? (VPint(CR_GPIO_CTRL1) &= ~(0x00000003 << ((x-16)* 2)) ): \
+ (VPint(CR_GPIO_CTRL) &= ~(0x00000003 << ((x)* 2))); VPint(CR_GPIO_ODRAIN) &= ~(0x00000001 << (x));
+
+#define MDC_GPIO_DEF (25)
+#define MDIO_GPIO_DEF (26)
+/*GLOBAL VARIABLE*/
+static int mdc_gpio = MDC_GPIO_DEF;
+static int mdio_gpio = MDIO_GPIO_DEF;
+
+static void word2Bit(uint32 word, uint8 *bit){
+ uint8 i;
+ for (i=0;i<32;i++)
+ {
+ bit[i]=(word & (0x00000001<<i))>>i ;
+ }
+}
+static uint32 bit2Word( uint8 *bit){
+ uint8 i;
+ uint32 word=0;
+ for (i=0;i<32;i++)
+ {
+ word+=(bit[i])<<(31-i) ;
+ }
+
+ return word;
+}
+
+static void HwGpioSetMode(uint8 mode){
+ uint32 word;
+ if(isRT63365)
+ VPint(0xbfb00860) &=~((1<<9)|(1<<12));//>63365 GPIO 7 and 10 share use for enthnet LED
+
+ if (mode == MDIO_READ){
+ // set gpio10 as output pin
+ //VPint(CR_GPIO_CTRL) |= (1<<(10*2));
+ //VPint(CR_GPIO_ODRAIN) |= (1<<10);
+#if 0
+ // set gpio7 as input pin
+ VPint(CR_GPIO_ODRAIN) &= ~(1<<7);
+ VPint(CR_GPIO_CTRL) &= ~(1<<(7*2));
+#else
+ GPIO_IEN(mdio_gpio);
+#endif
+ }
+ else if (mode == MDIO_WRITE){
+#if 0
+ // set gpio7/10 as output pin
+ VPint(CR_GPIO_CTRL) |= (1<<(7*2))|(1<<(10*2));
+ VPint(CR_GPIO_ODRAIN) |= (1<<7)|(1<<(10));
+#else
+ GPIO_OEN(mdio_gpio);
+ GPIO_OEN(mdc_gpio);
+#endif
+ }
+}
+
+void exModeMDIOWrite(uint32 reg, uint32 data){
+ uint8 i;
+ uint8 Addr[32]={0};
+ uint8 Data[32]={0};
+ uint8 temp1[32]={0};
+ uint8 temp2[32]={0};
+ //uint32 reg=0x8;
+ //uint32 data=0xb;
+ uint8 mdio_signal_real[116*2] ={0};
+ uint8 mdio_signal[116]={
+ 1,1,1,1,0,1,0,1, //PRE ST OP
+ 1,1, //ADDR 2bit
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,1,1,1, //ADDR 32bit //0xbfb50800//0x7
+ 1,0, //TA
+ 1,1,1,1,1,1,1,1, //byte-enable DATA 8bit
+
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, // DATA 64bit
+ };
+
+ uint8 mdc_signal[116*2+16]={
+ 1,0,1,0,1,0,1,0, //PRE ST OP
+ 1,0, //ADDR 2bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //ADDR 32bit
+ 1,0, //TA
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 8bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 64bit
+
+ 1,0,1,0,1,0,1,0, //PRE ST OP
+ 1,0, //ADDR 2bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //ADDR 32bit
+ 1,0, //TA
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 8bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 64bit
+
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ };
+#ifdef DBG
+ printk("%s(%d)Entry \n",__func__,__LINE__);
+#endif
+ if((reg<0x7)||(reg>0xff)){
+ reg=reg-0xa0000000;//for 0xb4000000 ->0x14000000
+ }
+ word2Bit(reg,temp1);
+ word2Bit(data,temp2);
+
+ for(i=0;i<32;i++)
+ {
+ Addr[i]=temp1[31-i];
+ Data[i]=temp2[31-i];
+ //printf("reg[%d] = %d\r\n", i, Addr[i]);
+ //printf("data[%d] = %d\r\n", i, Data[i]);
+ }
+
+ if((reg>=0x7)&&(reg<=0xff))//contril register space
+ {
+ mdio_signal[8]=0;
+ mdio_signal[9]=0;
+
+ for (i=84;i<116;i++) //Data
+ {
+ mdio_signal[i]=Data[i-84];
+ }
+ }
+ else if(((reg&0xf)==0x0)||((reg&0xf)==0x8))
+ {
+ mdio_signal[8]=1;//high 32bit addr bit[33:32] = 0x10
+ mdio_signal[9]=0;
+
+ mdio_signal[44]=1;//byte-enable DATA 8bit
+ mdio_signal[45]=1;
+ mdio_signal[46]=1;
+ mdio_signal[47]=1;
+ mdio_signal[48]=0;
+ mdio_signal[49]=0;
+ mdio_signal[50]=0;
+ mdio_signal[51]=0;
+
+ for (i=52;i<84;i++) //Data
+ {
+ mdio_signal[i]=Data[i-52];
+ }
+
+
+ }
+ else if(((reg&0xf)==0x4)||((reg&0xf)==0xc))
+ {
+ mdio_signal[8]=0;//low 32bit addr bit[33:32] = 0x01
+ mdio_signal[9]=1;
+
+ mdio_signal[44]=0;//byte-enable DATA 8bit
+ mdio_signal[45]=0;
+ mdio_signal[46]=0;
+ mdio_signal[47]=0;
+ mdio_signal[48]=1;
+ mdio_signal[49]=1;
+ mdio_signal[50]=1;
+ mdio_signal[51]=1;
+
+ for (i=84;i<116;i++) //Data
+ {
+ mdio_signal[i]=Data[i-84];
+ }
+ }
+
+
+
+ for(i=10;i<42;i++)
+ {
+ mdio_signal[i]=Addr[i-10];
+
+ }
+
+ for (i=0;i<116;i++)
+ {
+ mdio_signal_real[i*2]=mdio_signal[i];
+ mdio_signal_real[i*2+1]=mdio_signal[i];
+ //printf("mdio_signal[%d] = %d\r\n", i, mdio_signal[i]);
+ }
+
+ HwGpioSetMode(MDIO_WRITE);//write
+
+ //GPIO_HIGH(mdio_gpio);//let MDIO can be push by MDC
+ //delayDmtSymbols(1);
+
+ for (i=0;i<116*2+16;i++)
+ {
+ if (mdc_signal[i] ==0)
+ {
+ GPIO_LOW(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdc_signal[i] ==1)
+ {
+ GPIO_HIGH(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+
+ if(i<116*2)
+ {
+ if (mdio_signal_real[i] ==0)
+ {
+ GPIO_LOW(mdio_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdio_signal_real[i] ==1)
+ {
+ GPIO_HIGH(mdio_gpio);
+ //delayDmtSymbols(1);
+ }
+ }
+ }
+#ifdef DBG
+ printk("%s(%d)Exit \n",__func__,__LINE__);
+#endif
+}
+
+uint32 exModeMDIORead(uint32 reg){
+ uint32 data = 0;
+ uint8 i;
+ uint8 Addr[32]={0};
+ uint8 Data[32]={0};
+ uint8 temp1[32]={0};
+ //uint8 temp2[32]={0};
+ uint8 mdio_signal_real[116*2] ={0};
+
+ uint8 mdio_signal[116]={
+ 1,1,1,1,0,1,1,0, //PRE ST OP
+ 1,1, //ADDR 2bit
+ 1,0,1,1,1,1,1,1,
+ 1,0,1,1,0,1,0,1,
+ 0,0,0,0,1,0,0,0,
+ 0,0,0,0,0,0,0,0, //ADDR 32bit //0xbfb50800 //0xbfb50a00
+ 1,0, //TA
+ 0,0,0,0,0,0,0,1, //byte-enable DATA 8bit
+
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 1,0,1,1,0,1,0,0, //byte-enable DATA 64bit
+ };
+
+ uint8 mdc_signal[116*2+16]={
+ 1,0,1,0,1,0,1,0, //PRE ST OP
+ 1,0, //ADDR 2bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //ADDR 32bit
+ 1,0, //TA
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 8bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 64bit
+
+ 1,0,1,0,1,0,1,0, //PRE ST OP
+ 1,0, //ADDR 2bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //ADDR 32bit
+ 1,0, //TA
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 8bit
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0, //byte-enable DATA 64bit
+
+ 1,0,1,0,1,0,1,0,
+ 1,0,1,0,1,0,1,0,
+ };
+#ifdef DBG
+ printk("%s(%d)Entry \n",__func__,__LINE__);
+#endif
+
+ if((reg>=0x7)&&(reg<=0xff))//contril register space
+ {
+ mdio_signal[8]=0;
+ mdio_signal[9]=0;
+ }
+ else if(((reg&0xf)==0x0)||((reg&0xf)==0x8)) //high 32bit addr bit[33:32] = 0x10
+ {
+ mdio_signal[8]=1;
+ mdio_signal[9]=0;
+ reg=reg-0xa0000000;//for 0xb4000000 ->0x14000000
+ }
+ else if(((reg&0xf)==0x4)||((reg&0xf)==0xc))//low 32bit addr bit[33:32] = 0x01
+ {
+ mdio_signal[8]=0;
+ mdio_signal[9]=1;
+ reg=reg-0xa0000000;//for 0xb4000000 ->0x14000000
+ }
+
+
+ word2Bit(reg,temp1);
+
+
+ for(i=0;i<32;i++)
+ {
+ Addr[i]=temp1[31-i];
+ // Data[i]=temp2[31-i];
+ //printf("reg[%d] = %d\r\n", i, Addr[i]);
+ //printf("data[%d] = %d\r\n", i, Data[i]);
+ }
+
+ for(i=10;i<42;i++)
+ {
+ mdio_signal[i]=Addr[i-10];
+
+ }
+
+ for (i=0;i<42;i++)
+ {
+ mdio_signal_real[i*2]=mdio_signal[i];
+ mdio_signal_real[i*2+1]=mdio_signal[i];
+ //printf("mdio_signal0[%d] = %d\r\n", i, mdio_signal[i]);
+ }
+
+ HwGpioSetMode(MDIO_WRITE);//write
+ //GPIO_HIGH(mdio_gpio);//let MDIO can be push by MDC
+ //delayDmtSymbols(1);
+
+ for (i=0;i<116*2+16;i++)
+ {
+ //printk("mdiod:%d ",mdio_signal_real[i]);
+ //if(i % 10 == 0)
+ // printk("\n");
+ if(i<42*2)
+ {
+ if (mdc_signal[i] ==0)
+ {
+ GPIO_LOW(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdc_signal[i] ==1)
+ {
+ GPIO_HIGH(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+
+ if (mdio_signal_real[i] ==0)
+ {
+ GPIO_LOW(mdio_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdio_signal_real[i] ==1)
+ {
+ GPIO_HIGH(mdio_gpio);
+ //delayDmtSymbols(1);
+ }
+ }
+ else if (i==42*2)
+ {
+ if (mdc_signal[i] ==0)
+ {
+ GPIO_LOW(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdc_signal[i] ==1)
+ {
+ GPIO_HIGH(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+
+ //delayDmtSymbols(2);
+ HwGpioSetMode(MDIO_READ);//read
+ }
+ else
+ {
+ //delayDmtSymbols(1);
+ if (mdc_signal[i] ==0)
+ {
+ GPIO_LOW(mdc_gpio);
+ //delayDmtSymbols(1);
+ }
+ else if (mdc_signal[i] ==1)
+ {
+ GPIO_HIGH(mdc_gpio);
+ //delayDmtSymbols(1);
+ if((i>42*2)&&(i<116*2+2))
+ {
+ mdio_signal_real[i-2] = GPIO_VALUE(mdio_gpio);
+ }
+ }
+
+ }
+ }
+
+ for (i=0;i<116;i++)
+ {
+ mdio_signal[i]=mdio_signal_real[i*2];
+ //printf("mdio_signal1[%d] = %d\r\n", i, mdio_signal[i]);
+ }
+// get data from mdio_signal
+ for(i=0;i<32;i++)
+ {
+ if((reg>=0x7)&&(reg<=0xff))//contril register space //data low 32bit
+ {
+ Data[i]=mdio_signal[i+84];
+ }
+ else if(((reg&0xf)==0x0)||((reg&0xf)==0x8)) //high 32bit addr bit[33:32] = 0x10
+ {
+ Data[i]=mdio_signal[i+52];
+ }
+ else if(((reg&0xf)==0x4)||((reg&0xf)==0xc))//low 32bit addr bit[33:32] = 0x01
+ {
+ Data[i]=mdio_signal[i+84];
+ }
+ //printf("Data[%d] = %d\r\n", i, Data[i]);
+ }
+ data=bit2Word(Data);
+#ifdef DBG
+ printk("%s(%d)Exit \n",__func__,__LINE__);
+#endif
+ return data;
+
+}
+
+void exModeMDIOGpioQuery(uint8* mdc_gpio_num,uint8* mdio_gpio_num){
+#ifdef DBG
+ printk("%s(%d) Entry\n",__func__,__LINE__);
+#endif
+ if(mdc_gpio_num && mdio_gpio_num){
+ *mdc_gpio_num = mdc_gpio;
+ *mdio_gpio_num = mdio_gpio;
+#ifdef DBG
+ printk("%s(%d) Exit\n",__func__,__LINE__);
+#endif
+ }
+ else{
+ printk("%s(%d) Err Exit:NULL Ptr\n",__func__,__LINE__);
+ }
+
+}
+
+int exModeMDIOGpioConf(uint8 mdc_gpio_num,uint8 mdio_gpio_num){
+#ifdef DBG
+ printk("%s(%d):mdc/mdio gpio:%d/%d \n",__func__,__LINE__,mdc_gpio_num,mdio_gpio_num);
+#endif
+ if((mdc_gpio_num >0) && (mdio_gpio_num > 0)){
+ mdc_gpio = mdc_gpio_num;
+ mdio_gpio = mdio_gpio_num;
+ return 1;
+ }
+ else{
+ return 0;
+ }
+
+}
+EXPORT_SYMBOL(exModeMDIOGpioQuery);
+EXPORT_SYMBOL(exModeMDIOGpioConf);
+EXPORT_SYMBOL(exModeMDIORead);
+EXPORT_SYMBOL(exModeMDIOWrite);
Index: linux-3.18.21/arch/mips/econet/irq.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/irq.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,1067 @@
+/*
+ * Interrupt service routines for Trendchip board
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/tc3162/tc3162.h>
+#include <linux/sched.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_MIPS_TC3262
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <asm/mipsmtregs.h>
+#else
+#include <linux/io.h>
+#include <asm/irq_cpu.h>
+#endif
+
+#define PHY_TO_K1(x) (((unsigned int)x) | 0xa0000000)
+
+#if defined (CONFIG_IRQ_GIC)
+#include <asm/gic.h>
+#include <asm/mips-cm.h>
+#include <linux/proc_fs.h>
+#include <ecnt_hook/ecnt_hook_irq_num.h>
+#include <ecnt_hook/ecnt_hook_cpu_interrupt_type.h>
+
+extern unsigned int gic_present;
+
+#ifdef TCSUPPORT_MIPS_1004K
+#define irq_num_hook_name "irq_num_hook"
+#define CPU_INTERRUPT_MAJOR 224
+
+#define TIMER0_INTSRC 30
+#define TIMER1_INTSRC 29
+#define TIMER2_INTSRC 37
+#define TIMER3_INTSRC 36
+
+#define IPI_CALL0_INTSRC 34
+#define IPI_CALL1_INTSRC 61
+#define IPI_CALL2_INTSRC 62
+#define IPI_CALL3_INTSRC 63
+
+#define IPI_RESCHED0_INTSRC 7
+#define IPI_RESCHED1_INTSRC 8
+#define IPI_RESCHED2_INTSRC 12
+#define IPI_RESCHED3_INTSRC 13
+
+#define IRQ_NUM_MAX_VALUE 63
+
+typedef int (*cpu_interrupt_api_op_t)(struct ecnt_cpu_interrupt_data * data);
+
+unsigned int gicVecPlus1_to_intSrc_arr[IRQ_NUM_MAX_VALUE+1];
+ecnt_ret_val ecnt_irq_num_hook(struct ecnt_data *in_data);
+int cpu_interrupt_api_get_irqnum(struct ecnt_cpu_interrupt_data *data);
+int cpu_interrupt_api_show_interrupts(struct ecnt_cpu_interrupt_data *data);
+int cpu_interrupt_api_check_intrName(struct ecnt_cpu_interrupt_data *data);
+static long cpu_interrupt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+struct file_operations cpu_interrupt_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cpu_interrupt_ioctl,
+};
+
+struct ecnt_hook_ops ecnt_irq_num_op = {
+ .name = irq_num_hook_name,
+ .hookfn = ecnt_irq_num_hook,
+ .maintype = ECNT_IRQ_NUM,
+ .is_execute = 1,
+ .subtype = ECNT_DRIVER_API,
+ .priority = 1
+};
+
+/* Warning: same sequence with enum 'IRQ_NUM_HookFunction_t' in ecnt_hook_irq_num.h */
+unsigned char* irq_name_arr[] = {
+ IRQ_NAME_DMT,
+ IRQ_NAME_PCM1,
+ IRQ_NAME_PCM2,
+};
+
+static cpu_interrupt_api_op_t cpu_interrupt_operation[] = {
+ cpu_interrupt_api_get_irqnum,
+ cpu_interrupt_api_show_interrupts,
+ cpu_interrupt_api_check_intrName,
+};
+
+int timers_intSrcNum[NR_CPUS] = {TIMER0_INTSRC, TIMER1_INTSRC, TIMER2_INTSRC, TIMER3_INTSRC};
+int ipi_call_intSrcNum[NR_CPUS] = {IPI_CALL0_INTSRC, IPI_CALL1_INTSRC, IPI_CALL2_INTSRC, IPI_CALL3_INTSRC};
+int ipi_resched_intSrcNum[NR_CPUS] = {IPI_RESCHED0_INTSRC, IPI_RESCHED1_INTSRC, IPI_RESCHED2_INTSRC, IPI_RESCHED3_INTSRC};
+#endif
+
+#define X GIC_UNUSED
+
+/* When vector is changed, the "smp_affinity" settings in rcS may need to be changed */
+static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] =
+{/* cpu, irqNum-1, polarity, triggerType, flags, name, Src fullname */
+ {cpu0, CPU_CM_ERR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 0 CPU Coherence Manager Error */
+ {cpu0, CPU_CM_PCINT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 1 CPU CM Perf Cnt overflow */
+ {cpu0, UART_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_UART }, /* 2 uart */
+ {X, DRAM_PROTECTION-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_DRAM_PROTECT},/* 3 dram illegal access */
+ {cpu0, TIMER0_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_TIMER0}, /* 4 timer 0 */
+ {cpu0, TIMER1_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_TIMER1 },/* 5 timer 1 */
+ {cpu0, TIMER2_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_TIMER2 },/* 6 timer 2 */
+ {cpu0, IPI_RESCHED_INT0-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 7 ipi resched 0 */
+ {cpu1, IPI_RESCHED_INT1-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 8 ipi resched 1 */
+ {cpu0, TIMER5_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WATCHDOG },/* 9 timer 3 for wdog */
+ {cpu0, GPIO_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_GPIO }, /* 10 GPIO */
+ {cpu0, PCM1_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PCM1 }, /* 11 PCM 1 */
+ {cpu2, IPI_RESCHED_INT2-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 12 ipi resched 2 */
+ {cpu3, IPI_RESCHED_INT3-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 13 ipi resched 3 */
+ {cpu0, GDMA_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_GDMA }, /* 14 GDMA */
+ {cpu0, MAC1_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_GIGA_SWITCH},/*15 LAN Giga Switch */
+ {cpu0, UART2_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_UART2 }, /* 16 uart 2 */
+ {cpu0, IRQ_RT3XXX_USB-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_USB }, /* 17 USB host */
+ {cpu0, DYINGGASP_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_DYING_GASP},/* 18 Dying gasp */
+ {cpu0, DMT_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_DMT }, /* 19 xDSL DMT */
+ {X, RESVINT0-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 20 reserved */
+ {cpu0, QDMA_LAN0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_LAN0},/* 21 QDMA LAN 0 */
+ {cpu0, QDMA_WAN0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_WAN0},/* 22 QDMA WAN 0 */
+ {cpu0, PCIE_0_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PCIE0 }, /* 23 PCIE port 0 */
+ {cpu0, PCIE_A_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PCIE1 }, /* 24 PCIE port 1 */
+ {cpu0, PCIE_SERR_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PCIE_ERR},/* 25 PCIE error */
+ {cpu0, XPON_MAC_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_XPON_MAC },/* 26 XPON MAC */
+ {cpu0, XPON_PHY_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_XPON_PHY },/* 27 XPON PHY */
+ {cpu0, CRYPTO_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_CRYPTO },/* 28 Crypto engine */
+ {cpu1, SI_TIMER_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 29 external CPU timer 1 when bfbf0400[1]=1*/
+ {cpu0, SI_TIMER_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 30 external CPU timer 0 when bfbf0400[0]=1*/
+ {cpu0, BUS_TOUT_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PBUS_TIMEOUT},/* 31 Pbus timeout */
+ {cpu0, PCM2_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_PCM2 }, /* 32 PCM 2 */
+ {cpu0, FE_ERR_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_FE_ERR },/* 33 Frame Engine Error */
+ {cpu0, IPI_CALL_INT0-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 34 ipi call 0 */
+ {cpu0, AUTO_MANUAL_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_SPI }, /* 35 SPI */
+ {cpu3, SI_TIMER_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 36 external CPU timer 3 when bfbe0000[1]=1*/
+ {cpu2, SI_TIMER_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, NULL }, /* 37 external CPU timer 2 when bfbe0000[1]=1*/
+ {cpu0, UART3_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_UART3 }, /* 38 UART3 */
+ {cpu0, QDMA_LAN1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_LAN1},/* 39 QDMA LAN 1 */
+ {cpu0, QDMA_LAN2_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_LAN2 },/* 40 QDMA LAN 2 */
+ {cpu0, QDMA_LAN3_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_LAN3 },/* 41 QDMA LAN 3 */
+ {cpu0, QDMA_WAN1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_WAN1},/* 42 QDMA WAN 1 */
+ {cpu0, QDMA_WAN2_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_WAN2},/* 43 QDMA WAN 2 */
+ {cpu0, QDMA_WAN3_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_QDMA_WAN3},/* 44 QDMA WAN 3 */
+ {cpu0, UART4_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_UART4 }, /* 45 UART 4 */
+ {cpu0, UART5_INT-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_UART5 }, /* 46 UART 5 */
+ {cpu0, HSDMA_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_HSDMA}, /* 47 High Speed DMA */
+ {cpu0, USB_HOST_2-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_USB2 }, /* 48 USB host 2 (port1) */
+ {cpu0, XSI_MAC_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_XSI_MAC},/* 49 XFI/HGSMII MAC interface */
+ {cpu0, XSI_PHY_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_XSI_PHY},/* 50 XFI/HGSMII PHY interface */
+ {cpu0, WOE0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WOE0}, /* 51 WIFI Offload Engine 0 */
+ {cpu0, WOE1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WOE1}, /* 52 WIFI Offload Engine 1 */
+ {cpu0, WDMA0_P0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA0_P0},/* 53 WIFI DMA 0 port 0 */
+ {cpu0, WDMA0_P1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA0_P1},/* 54 WIFI DMA 0 port 1 */
+ {cpu0, WDMA0_WOE_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA0_WOE},/* 55 WIFI DMA 0 for WOE */
+ {cpu0, WDMA1_P0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA1_P0},/* 56 WIFI DMA 1 port 0 */
+ {cpu0, WDMA1_P1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA1_P1},/* 56 WIFI DMA 1 port 1 */
+ {cpu0, WDMA1_WOE_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_WDMA1_WOE},/* 58 WIFI DMA 1 for WOE */
+ {cpu0, EFUSE_ERR0_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_EFUSE_ERR0},/* 59 efuse error for not setting key */
+ {cpu0, EFUSE_ERR1_INTR-1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_IPI, IRQ_NAME_EFUSE_ERR1},/* 60 efuse error for prev action not finished */
+ {cpu1, IPI_CALL_INT1-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 61 ipi call 1 */
+ {cpu2, IPI_CALL_INT2-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 62 ipi call 2 */
+ {cpu3, IPI_CALL_INT3-1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI, NULL }, /* 63 ipi call 3 */
+
+};
+#undef X
+
+#ifdef TCSUPPORT_MIPS_1004K
+int get_irqNum_by_name(int *irqNumP, char *irqName)
+{
+ int i;
+
+ for (i=0; i<(GIC_NUM_INTRS); i++) {
+ if (gic_intr_map[i].name == NULL)
+ continue;
+
+ if (strncmp(gic_intr_map[i].name, irqName, strlen(gic_intr_map[i].name)) == 0) {
+ *(irqNumP) = gic_intr_map[i].pin+1;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+void ecnt_register_cpu_interrupt_fops(void)
+{
+ if (register_chrdev(CPU_INTERRUPT_MAJOR, "/dev/cpu_interrupt", &cpu_interrupt_fops) < 0) {
+ printk(KERN_WARNING "cpu_interrupt: can't get major %d\n", CPU_INTERRUPT_MAJOR);
+ }
+ return;
+}
+
+static long cpu_interrupt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0 ;
+ struct ecnt_cpu_interrupt_data data;
+ struct ecnt_cpu_interrupt_data* puser = (struct ecnt_cpu_interrupt_data*)arg;
+
+ if (cmd >= CPU_INTERRUPT_FUNCTION_MAX_NUM) {
+ printk("\nError: cmd:%d is too large. Max is %d\n", cmd, CPU_INTERRUPT_FUNCTION_MAX_NUM);
+ return -1;
+ }
+
+ memset(&data,0,sizeof(struct ecnt_cpu_interrupt_data));
+ copy_from_user(&data, puser ,sizeof(struct ecnt_cpu_interrupt_data));
+ ret = cpu_interrupt_operation[data.function_id](&data);
+ copy_to_user(puser,&data,sizeof(struct ecnt_cpu_interrupt_data));
+
+ return ret;
+}
+
+int cpu_interrupt_api_get_irqnum(struct ecnt_cpu_interrupt_data *data)
+{
+ if (get_irqNum_by_name(&(data->irqNum), data->irqString)==0)
+ data->retValue = 0;
+ else
+ data->retValue = -1;
+
+ return data->retValue;
+}
+
+int cpu_interrupt_api_show_interrupts(struct ecnt_cpu_interrupt_data *data)
+{
+ int i;
+ unsigned int cpu, value;
+
+ printk("\n");
+ printk("hwIntr swIntr CPU Name\n");
+
+ for (i=0; i<(GIC_NUM_INTRS); i++) {
+
+ if (gic_intr_map[i].name == NULL)
+ continue;
+
+ value = GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(i, 0));
+
+ for (cpu=0; cpu<4; cpu++) {
+ if ((value>>cpu) & 0x1)
+ break;
+ }
+
+ printk("%d\t%d\t%d\t%s\n", i, gic_intr_map[i].pin+1, cpu, gic_intr_map[i].name);
+ }
+
+ printk("\n");
+
+ data->retValue = 0;
+ return data->retValue;
+}
+
+int cpu_interrupt_api_check_intrName(struct ecnt_cpu_interrupt_data *data)
+{
+ return cpu_interrupt_api_get_irqnum(data);
+}
+
+
+void ecnt_register_irq_num_op(void)
+{
+ if(ecnt_register_hook(&ecnt_irq_num_op)) {
+ printk("ecnt_irq_num_op register fail\n");
+ }
+ return;
+}
+
+ecnt_ret_val ecnt_irq_num_hook(struct ecnt_data *in_data)
+{
+ struct ECNT_IRQ_NUM_Data *irq_num_data = (struct ECNT_IRQ_NUM_Data *)in_data ;
+
+
+ if(irq_num_data->function_id >= IRQ_NUM_FUNCTION_MAX_NUM) {
+ printk("irq_num_data->function_id is %d, exceed max number: %d", irq_num_data->function_id, IRQ_NUM_FUNCTION_MAX_NUM);
+ return ECNT_HOOK_ERROR;
+ }
+
+ if (get_irqNum_by_name(&(irq_num_data->irqNum), irq_name_arr[irq_num_data->function_id])==0)
+ return ECNT_RETURN;
+
+ printk("\n%s can't get correct irq number\n", irq_name_arr[irq_num_data->function_id]);
+ return ECNT_HOOK_ERROR;
+}
+
+void ecnt_register_cpu_interrupts(void) {
+ ecnt_register_irq_num_op();
+ ecnt_register_cpu_interrupt_fops();
+ return;
+}
+EXPORT_SYMBOL(ecnt_register_cpu_interrupts);
+
+#endif /*TCSUPPORT_MIPS_1004K*/
+
+void init_gicVecPlus1_to_intSrc(void)
+{
+ int gicVec, intSrc;
+
+ /*irqVec: 1 ~ 63 -> gicVec: 0 ~ 62*/
+ for (gicVec=0; gicVec<IRQ_NUM_MAX_VALUE; gicVec++) {
+ for (intSrc = 0; intSrc < GIC_NUM_INTRS; intSrc++) {
+ if (gicVec == (gic_intr_map[intSrc].pin)) { /*use GIC vector to find intSrc*/
+ gicVecPlus1_to_intSrc_arr[gicVec+1] = intSrc;
+ break;
+ }
+ }
+ }
+
+ return;
+}
+
+/* translate gic_irq_controller's functions' "gicVec+1" into
+ * intSrc, because GIC uses intSrc number to set its registers */
+unsigned int gicVecPlus1_to_intSrc (unsigned int gicVecPlus1)
+{
+ return gicVecPlus1_to_intSrc_arr[gicVecPlus1];
+}
+EXPORT_SYMBOL(gicVecPlus1_to_intSrc);
+
+static int get_intSrc_by_irqNum(int irq)
+{
+ if (irq == SI_TIMER_INT)
+ irq = timers_intSrcNum[smp_processor_id()];
+ else
+ irq = gicVecPlus1_to_intSrc(irq);
+
+ return irq;
+}
+
+static void ecnt_gic_mask_irq(struct irq_data *d)
+{
+ int irq = (d->irq - gic_irq_base);
+
+ /* disable this interrupt */
+
+ GIC_CLR_INTR_MASK(get_intSrc_by_irqNum(irq));
+}
+
+static void ecnt_gic_unmask_irq(struct irq_data *d)
+{
+ int irq = (d->irq - gic_irq_base);
+
+ /* used to enable int mask during setup_irq */
+
+ GIC_SET_INTR_MASK(get_intSrc_by_irqNum(irq));
+}
+
+void gic_irq_ack(struct irq_data *d)
+{
+ int irq = (d->irq - gic_irq_base);
+
+ irq = get_intSrc_by_irqNum(irq);
+
+ GIC_CLR_INTR_MASK(irq);
+ if (gic_irq_flags[irq] & GIC_TRIG_EDGE)
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
+}
+
+void gic_finish_irq(struct irq_data *d)
+{
+ int irq = (d->irq - gic_irq_base);
+
+ /* Enable interrupts. */
+ GIC_SET_INTR_MASK(get_intSrc_by_irqNum(irq));
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(gic_lock);
+
+static int ecnt_gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int irq = (d->irq - gic_irq_base);
+ cpumask_t tmp = CPU_MASK_NONE;
+ unsigned long flags;
+ int i;
+
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ if (cpus_empty(tmp))
+ return -1;
+
+ /* Assumption : cpumask refers to a single CPU */
+ spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+#ifdef TCSUPPORT_MIPS_1004K
+ GIC_SH_MAP_TO_VPE_SMASK(gicVecPlus1_to_intSrc(irq), first_cpu(tmp));
+#else
+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+#endif
+#if 0
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+#endif
+ cpumask_copy(d->affinity, cpumask);
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+#endif
+
+/* ecnt_gic_irq_controller is originated from gic_irq_controller in irq-gic.c */
+static struct irq_chip ecnt_gic_irq_controller = {
+ .name = "ECNT MIPS GIC",
+ .irq_ack = gic_irq_ack,
+ .irq_mask = ecnt_gic_mask_irq,
+ .irq_mask_ack = ecnt_gic_mask_irq,
+ .irq_unmask = ecnt_gic_unmask_irq, /*used to enable int mask during setup_irq*/
+ .irq_eoi = gic_finish_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = ecnt_gic_set_affinity,
+#endif
+};
+
+
+void __init gic_platform_init(int irqs, struct irq_chip *irq_controller)
+{
+ int i;
+
+ /*irqVec starts from 1 and ends at 63*/
+ for (i = gic_irq_base+1; i < (gic_irq_base + irqs); i++){
+ irq_set_chip(i, &ecnt_gic_irq_controller);
+ }
+}
+
+#ifdef CONFIG_MIPS_MT_SMP
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
+ .name = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
+ .name = "IPI_call"
+};
+
+unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
+{
+ return ipi_call_intSrcNum[cpu];
+}
+
+unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
+{
+ return ipi_resched_intSrcNum[cpu];
+}
+
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+ setup_irq(irq, action);
+ irq_set_handler(irq, handle_percpu_irq);
+}
+#endif /* CONFIG_MIPS_MT_SMP */
+#endif /*CONFIG_IRQ_GIC*/
+
+void disable_all_interrupts(void)
+{
+ #ifdef TCSUPPORT_MIPS_1004K
+ VPint(PHY_TO_K1(GIC_BASE_ADDR)+GIC_SH_RMASK_OFS) = 0xffffffff;
+ VPint(PHY_TO_K1(GIC_BASE_ADDR)+GIC_SH_RMASK_OFS+4) = 0xffffffff;
+ #else
+ VPint(CR_INTC_IMR) = 0x0;
+ VPint(CR_INTC_IMR_1) = 0x0;
+ #endif
+}
+EXPORT_SYMBOL(disable_all_interrupts);
+
+void disable_interrupt_by_intSrc(unsigned int intSrc)
+{
+ #ifdef TCSUPPORT_MIPS_1004K
+ GIC_CLR_INTR_MASK(intSrc);
+ #else
+ if (intSrc<=31)
+ VPint(CR_INTC_IMR) &=~(1<<intSrc);
+ else
+ VPint(CR_INTC_IMR_1) &=~(1<<intSrc);
+ #endif
+}
+EXPORT_SYMBOL(disable_interrupt_by_intSrc);
+
+void enable_interrupt_by_intSrc(unsigned int intSrc)
+{
+ #ifdef TCSUPPORT_MIPS_1004K
+ GIC_SET_INTR_MASK(intSrc);
+ #else
+ if (intSrc<=31)
+ VPint(CR_INTC_IMR) |=(1<<intSrc);
+ else
+ VPint(CR_INTC_IMR_1) |=(1<<intSrc);
+ #endif
+}
+EXPORT_SYMBOL(enable_interrupt_by_intSrc);
+
+#define ALLINTS (IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
+
+#ifdef CONFIG_MIPS_TC3262
+
+static DEFINE_SPINLOCK(tc3162_irq_lock);
+
+#ifndef TCSUPPORT_MIPS_1004K
+//static inline void unmask_mips_mt_irq(unsigned int irq)
+static inline void unmask_mips_mt_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+ unsigned int vpflags = dvpe();
+ int cpu_irq = 0;
+
+ if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))
+ cpu_irq = 1;
+
+ set_c0_status(0x100 << cpu_irq);
+ irq_enable_hazard();
+ evpe(vpflags);
+}
+
+//static inline void mask_mips_mt_irq(unsigned int irq)
+static inline void mask_mips_mt_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+ unsigned int vpflags = dvpe();
+ int cpu_irq = 0;
+
+ if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))
+ cpu_irq = 1;
+
+ clear_c0_status(0x100 << cpu_irq);
+ irq_disable_hazard();
+ evpe(vpflags);
+}
+
+//static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
+static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+ unsigned int vpflags = dvpe();
+ unsigned long int tmp;
+ int cpu_irq = 0;
+
+ if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))
+ cpu_irq = 1;
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR);
+#endif
+ tmp = regRead32(CR_INTC_IMR);
+ tmp |= (1 << (irq-1));
+
+ if (irq == SI_SWINT_INT0){
+ tmp |= (1 << (SI_SWINT1_INT0-1));
+ }else if (irq == SI_SWINT_INT1){
+ tmp |= (1 << (SI_SWINT1_INT1-1));
+ }
+ regWrite32(CR_INTC_IMR, tmp);
+
+ clear_c0_cause(0x100 << cpu_irq);
+ evpe(vpflags);
+ unmask_mips_mt_irq(d);
+
+ return 0;
+}
+
+/*
+ * While we ack the interrupt interrupts are disabled and thus we don't need
+ * to deal with concurrency issues. Same for mips_cpu_irq_end.
+ */
+//static void mips_mt_cpu_irq_ack(unsigned int irq)
+static void mips_mt_cpu_irq_ack(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+ unsigned int vpflags = dvpe();
+ int cpu_irq = 0;
+
+ if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))
+ cpu_irq = 1;
+
+ clear_c0_cause(0x100 << cpu_irq);
+ evpe(vpflags);
+ mask_mips_mt_irq(d);
+}
+
+static struct irq_chip mips_mt_cpu_irq_controller = {
+ .name = "MIPS",
+ .irq_startup = mips_mt_cpu_irq_startup,
+ .irq_ack = mips_mt_cpu_irq_ack,
+ .irq_mask = mask_mips_mt_irq,
+ .irq_mask_ack = mips_mt_cpu_irq_ack,
+ .irq_unmask = unmask_mips_mt_irq,
+ .irq_eoi = unmask_mips_mt_irq,
+};
+#endif /* ifndef TCSUPPORT_MIPS_1004K */
+
+#define __BUILD_IRQ_DISPATCH(irq_n) \
+static void __tc3262_irq_dispatch##irq_n(void) \
+{ \
+ do_IRQ(irq_n); \
+}
+
+#define __BUILD_IRQ_DISPATCH_FUNC(irq_n) __tc3262_irq_dispatch##irq_n
+
+/* pre-built 64 irq dispatch function */
+__BUILD_IRQ_DISPATCH(0)
+__BUILD_IRQ_DISPATCH(1)
+__BUILD_IRQ_DISPATCH(2)
+__BUILD_IRQ_DISPATCH(3)
+__BUILD_IRQ_DISPATCH(4)
+__BUILD_IRQ_DISPATCH(5)
+__BUILD_IRQ_DISPATCH(6)
+__BUILD_IRQ_DISPATCH(7)
+__BUILD_IRQ_DISPATCH(8)
+__BUILD_IRQ_DISPATCH(9)
+__BUILD_IRQ_DISPATCH(10)
+__BUILD_IRQ_DISPATCH(11)
+__BUILD_IRQ_DISPATCH(12)
+__BUILD_IRQ_DISPATCH(13)
+__BUILD_IRQ_DISPATCH(14)
+__BUILD_IRQ_DISPATCH(15)
+__BUILD_IRQ_DISPATCH(16)
+__BUILD_IRQ_DISPATCH(17)
+__BUILD_IRQ_DISPATCH(18)
+__BUILD_IRQ_DISPATCH(19)
+__BUILD_IRQ_DISPATCH(20)
+__BUILD_IRQ_DISPATCH(21)
+__BUILD_IRQ_DISPATCH(22)
+__BUILD_IRQ_DISPATCH(23)
+__BUILD_IRQ_DISPATCH(24)
+__BUILD_IRQ_DISPATCH(25)
+__BUILD_IRQ_DISPATCH(26)
+__BUILD_IRQ_DISPATCH(27)
+__BUILD_IRQ_DISPATCH(28)
+__BUILD_IRQ_DISPATCH(29)
+__BUILD_IRQ_DISPATCH(30)
+__BUILD_IRQ_DISPATCH(31)
+__BUILD_IRQ_DISPATCH(32)
+__BUILD_IRQ_DISPATCH(33)
+__BUILD_IRQ_DISPATCH(34)
+__BUILD_IRQ_DISPATCH(35)
+__BUILD_IRQ_DISPATCH(36)
+__BUILD_IRQ_DISPATCH(37)
+__BUILD_IRQ_DISPATCH(38)
+__BUILD_IRQ_DISPATCH(39)
+__BUILD_IRQ_DISPATCH(40)
+__BUILD_IRQ_DISPATCH(41)
+__BUILD_IRQ_DISPATCH(42)
+__BUILD_IRQ_DISPATCH(43)
+__BUILD_IRQ_DISPATCH(44)
+__BUILD_IRQ_DISPATCH(45)
+__BUILD_IRQ_DISPATCH(46)
+__BUILD_IRQ_DISPATCH(47)
+__BUILD_IRQ_DISPATCH(48)
+__BUILD_IRQ_DISPATCH(49)
+__BUILD_IRQ_DISPATCH(50)
+__BUILD_IRQ_DISPATCH(51)
+__BUILD_IRQ_DISPATCH(52)
+__BUILD_IRQ_DISPATCH(53)
+__BUILD_IRQ_DISPATCH(54)
+__BUILD_IRQ_DISPATCH(55)
+__BUILD_IRQ_DISPATCH(56)
+__BUILD_IRQ_DISPATCH(57)
+__BUILD_IRQ_DISPATCH(58)
+__BUILD_IRQ_DISPATCH(59)
+__BUILD_IRQ_DISPATCH(60)
+__BUILD_IRQ_DISPATCH(61)
+__BUILD_IRQ_DISPATCH(62)
+__BUILD_IRQ_DISPATCH(63)
+
+/* register pre-built 64 irq dispatch function */
+static void (*irq_dispatch_tab[])(void) =
+{
+__BUILD_IRQ_DISPATCH_FUNC(0),
+__BUILD_IRQ_DISPATCH_FUNC(1),
+__BUILD_IRQ_DISPATCH_FUNC(2),
+__BUILD_IRQ_DISPATCH_FUNC(3),
+__BUILD_IRQ_DISPATCH_FUNC(4),
+__BUILD_IRQ_DISPATCH_FUNC(5),
+__BUILD_IRQ_DISPATCH_FUNC(6),
+__BUILD_IRQ_DISPATCH_FUNC(7),
+__BUILD_IRQ_DISPATCH_FUNC(8),
+__BUILD_IRQ_DISPATCH_FUNC(9),
+__BUILD_IRQ_DISPATCH_FUNC(10),
+__BUILD_IRQ_DISPATCH_FUNC(11),
+__BUILD_IRQ_DISPATCH_FUNC(12),
+__BUILD_IRQ_DISPATCH_FUNC(13),
+__BUILD_IRQ_DISPATCH_FUNC(14),
+__BUILD_IRQ_DISPATCH_FUNC(15),
+__BUILD_IRQ_DISPATCH_FUNC(16),
+__BUILD_IRQ_DISPATCH_FUNC(17),
+__BUILD_IRQ_DISPATCH_FUNC(18),
+__BUILD_IRQ_DISPATCH_FUNC(19),
+__BUILD_IRQ_DISPATCH_FUNC(20),
+__BUILD_IRQ_DISPATCH_FUNC(21),
+__BUILD_IRQ_DISPATCH_FUNC(22),
+__BUILD_IRQ_DISPATCH_FUNC(23),
+__BUILD_IRQ_DISPATCH_FUNC(24),
+__BUILD_IRQ_DISPATCH_FUNC(25),
+__BUILD_IRQ_DISPATCH_FUNC(26),
+__BUILD_IRQ_DISPATCH_FUNC(27),
+__BUILD_IRQ_DISPATCH_FUNC(28),
+__BUILD_IRQ_DISPATCH_FUNC(29),
+__BUILD_IRQ_DISPATCH_FUNC(30),
+__BUILD_IRQ_DISPATCH_FUNC(31),
+__BUILD_IRQ_DISPATCH_FUNC(32),
+__BUILD_IRQ_DISPATCH_FUNC(33),
+__BUILD_IRQ_DISPATCH_FUNC(34),
+__BUILD_IRQ_DISPATCH_FUNC(35),
+__BUILD_IRQ_DISPATCH_FUNC(36),
+__BUILD_IRQ_DISPATCH_FUNC(37),
+__BUILD_IRQ_DISPATCH_FUNC(38),
+__BUILD_IRQ_DISPATCH_FUNC(39),
+__BUILD_IRQ_DISPATCH_FUNC(40),
+__BUILD_IRQ_DISPATCH_FUNC(41),
+__BUILD_IRQ_DISPATCH_FUNC(42),
+__BUILD_IRQ_DISPATCH_FUNC(43),
+__BUILD_IRQ_DISPATCH_FUNC(44),
+__BUILD_IRQ_DISPATCH_FUNC(45),
+__BUILD_IRQ_DISPATCH_FUNC(46),
+__BUILD_IRQ_DISPATCH_FUNC(47),
+__BUILD_IRQ_DISPATCH_FUNC(48),
+__BUILD_IRQ_DISPATCH_FUNC(49),
+__BUILD_IRQ_DISPATCH_FUNC(50),
+__BUILD_IRQ_DISPATCH_FUNC(51),
+__BUILD_IRQ_DISPATCH_FUNC(52),
+__BUILD_IRQ_DISPATCH_FUNC(53),
+__BUILD_IRQ_DISPATCH_FUNC(54),
+__BUILD_IRQ_DISPATCH_FUNC(55),
+__BUILD_IRQ_DISPATCH_FUNC(56),
+__BUILD_IRQ_DISPATCH_FUNC(57),
+__BUILD_IRQ_DISPATCH_FUNC(58),
+__BUILD_IRQ_DISPATCH_FUNC(59),
+__BUILD_IRQ_DISPATCH_FUNC(60),
+__BUILD_IRQ_DISPATCH_FUNC(61),
+__BUILD_IRQ_DISPATCH_FUNC(62),
+__BUILD_IRQ_DISPATCH_FUNC(63)
+};
+
+#endif
+#ifndef TCSUPPORT_MIPS_1004K
+//__IMEM static inline void unmask_mips_irq(unsigned int irq)
+__IMEM static inline void unmask_mips_irq(struct irq_data *data)
+{
+ //pr_info("\nUNMASK_mips_irq");
+ unsigned int irq = data->irq;
+#ifdef CONFIG_MIPS_TC3262
+ unsigned long flags;
+ unsigned long int tmp;
+ int cpu = smp_processor_id();
+
+ //printk("unmask_mips_irq: 1! irq is %d, \r\n", irq);
+
+ spin_lock_irqsave(&tc3162_irq_lock, flags);
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id != 0) {
+#else
+ if (cpu != 0) {
+#endif
+ if (irq == SI_TIMER_INT)
+ irq = SI_TIMER1_INT;
+ }
+
+ //printk("unmask_mips_irq: 2! irq is %d, \r\n", irq);
+
+ if (irq <= 32)
+ {
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR);
+#endif
+ tmp = regRead32(CR_INTC_IMR);
+ tmp |= (1 << (irq-1));
+ regWrite32(CR_INTC_IMR, tmp);
+
+ //printk("unmask_mips_irq: entered! irq is %d, CR_INTC_IMR %08x write value is [%08x]\r\n", irq, CR_INTC_IMR, tmp);
+ //tmp = regRead32(CR_INTC_IMR);
+ //printk("unmask_mips_irq: entered! irq is %d, REREAD CR_INTC_IMR %08x write value is [%08x]\r\n", irq, CR_INTC_IMR, tmp);
+
+ }else
+ {
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR_1);
+#endif
+ tmp = regRead32(CR_INTC_IMR_1);
+ tmp |= (1 << (irq-33));
+ regWrite32(CR_INTC_IMR_1, tmp);
+ //printk("unmask_mips_irq: entered! irq is %d, CR_INTC_IMR_1 %08x write value is [%08x]\r\n", irq, CR_INTC_IMR_1, tmp);
+ //tmp = regRead32(CR_INTC_IMR_1);
+ //printk("unmask_mips_irq: entered! irq is %d, REREAD CR_INTC_IMR_1 %08x write value is [%08x]\r\n", irq, CR_INTC_IMR_1, tmp);
+
+ }
+ spin_unlock_irqrestore(&tc3162_irq_lock, flags);
+#else
+ VPint(CR_INTC_IMR) |= (1 << irq);
+#endif
+}
+
+//__IMEM static inline void mask_mips_irq(unsigned int irq)
+__IMEM static inline void mask_mips_irq(struct irq_data *data)
+{
+ unsigned int irq = data->irq;
+#ifdef CONFIG_MIPS_TC3262
+ unsigned long flags;
+ unsigned long int tmp;
+ int cpu = smp_processor_id();
+ //printk("mask_mips_irq: 1! irq is %d, \r\n", irq);
+
+ spin_lock_irqsave(&tc3162_irq_lock, flags);
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id != 0) {
+#else
+ if (cpu != 0) {
+#endif
+ if (irq == SI_TIMER_INT)
+ irq = SI_TIMER1_INT;
+ }
+ //printk("mask_mips_irq: 2! irq is %d, \r\n", irq);
+
+ if (irq <= 32){
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR);
+#endif
+ tmp = regRead32(CR_INTC_IMR);
+ tmp &= ~(1 << (irq-1));
+ regWrite32(CR_INTC_IMR, tmp);
+
+ //printk("mask_mips_irq: entered! irq is %d, CR_INTC_IMR %08x write value is [%08x]\r\n", irq, CR_INTC_IMR, tmp);
+ //tmp = regRead32(CR_INTC_IMR);
+ //printk("mask_mips_irq: entered! irq is %d, REREAD CR_INTC_IMR %08x write value is [%08x]\r\n", irq, CR_INTC_IMR, tmp);
+
+ }else{
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR_1);
+#endif
+ tmp = regRead32(CR_INTC_IMR_1);
+ tmp &= ~(1 << (irq-33));
+ regWrite32(CR_INTC_IMR_1, tmp);
+ //printk("mask_mips_irq: entered! irq is %d, CR_INTC_IMR_1 %08x write value is [%08x]\r\n", irq, CR_INTC_IMR_1, tmp);
+ //tmp = regRead32(CR_INTC_IMR_1);
+ //printk("mask_mips_irq: entered! irq is %d, REREAD CR_INTC_IMR_1 %08x write value is [%08x]\r\n", irq, CR_INTC_IMR_1, tmp);
+
+ }
+ spin_unlock_irqrestore(&tc3162_irq_lock, flags);
+#else
+ VPint(CR_INTC_IMR) &= ~(1 << irq);
+#endif
+}
+#endif /* ifndef TCSUPPORT_MIPS_1004K */
+
+void tc3162_enable_irq(unsigned int irq) /* the irq means intSrc+1 in 34K, but means intVec+1 in 1004K */
+{
+#ifdef CONFIG_MIPS_TC3262
+ unsigned long flags;
+ unsigned long int tmp;
+
+ spin_lock_irqsave(&tc3162_irq_lock, flags);
+#ifdef TCSUPPORT_MIPS_1004K
+ GIC_SET_INTR_MASK(get_intSrc_by_irqNum(irq));
+
+#else
+ if (irq <= 32){
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR);
+#endif
+ tmp = regRead32(CR_INTC_IMR);
+ tmp |= (1 << (irq-1));
+ regWrite32(CR_INTC_IMR, tmp);
+ }else{
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR_1);
+#endif
+ tmp = regRead32(CR_INTC_IMR_1);
+ tmp |= (1 << (irq-33));
+ regWrite32(CR_INTC_IMR_1, tmp);
+ }
+#endif
+ spin_unlock_irqrestore(&tc3162_irq_lock, flags);
+#else
+ VPint(CR_INTC_IMR) |= (1 << irq);
+#endif
+}
+EXPORT_SYMBOL(tc3162_enable_irq);
+
+void tc3162_disable_irq(unsigned int irq) /* the irq means intSrc+1 in 34K, but means intVec+1 in 1004K */
+{
+#ifdef CONFIG_MIPS_TC3262
+ unsigned long flags;
+ unsigned long int tmp;
+
+ spin_lock_irqsave(&tc3162_irq_lock, flags);
+#ifdef TCSUPPORT_MIPS_1004K
+ GIC_CLR_INTR_MASK(get_intSrc_by_irqNum(irq));
+
+#else
+ if (irq <= 32){
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR);
+#endif
+ tmp = regRead32(CR_INTC_IMR);
+ tmp &= ~(1 << (irq-1));
+ regWrite32(CR_INTC_IMR, tmp);
+ }else{
+#ifdef TCSUPPORT_MT7510_E1
+ READ_E1(CR_INTC_IMR_1);
+#endif
+ tmp = regRead32(CR_INTC_IMR_1);
+ tmp &= ~(1 << (irq-33));
+ regWrite32(CR_INTC_IMR_1, tmp);
+ }
+#endif
+ spin_unlock_irqrestore(&tc3162_irq_lock, flags);
+#else
+ VPint(CR_INTC_IMR) &= ~(1 << (irq-1));
+#endif
+}
+EXPORT_SYMBOL(tc3162_disable_irq);
+#ifndef TCSUPPORT_MIPS_1004K
+#ifdef CONFIG_MIPS_MT_SMP
+extern int plat_set_irq_affinity(unsigned int irq,
+ const struct cpumask *affinity);
+#endif
+
+static struct irq_chip tc3162_irq_chip = {
+ .name = "MIPS",
+ .irq_ack = mask_mips_irq,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mask_mips_irq,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
+
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ .irq_set_affinity = plat_set_irq_affinity,
+#else
+#ifdef CONFIG_MIPS_MT_SMP
+ .irq_set_affinity = plat_set_irq_affinity,
+#endif
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+};
+#endif /* ifndef TCSUPPORT_MIPS_1004K */
+
+extern void vsmp_int_init(void);
+
+void __init arch_init_irq(void)
+{
+ unsigned int i;
+
+ /* Disable all hardware interrupts */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+#ifdef CONFIG_IRQ_GIC
+ if (mips_cm_base) {
+ write_gcr_gic_base(GIC_BASE_ADDR | CM_GCR_GIC_BASE_GICEN_MSK);
+ gic_present = 1;
+ }
+
+ if (gic_present) {
+ #ifdef TCSUPPORT_MIPS_1004K /*do this after everyone has filled the gic_intr_map table*/
+ init_gicVecPlus1_to_intSrc();
+ #endif
+ /* replace all interrupts' irq_chip with gic_irq_controller and set interrupts' properties */
+ gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
+ ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ set_c0_status(STATUSF_IP7 | STATUSF_IP6 | STATUSF_IP5 | STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2);
+
+ /* set up ipi interrupts */
+ for (i = 0; i < NR_CPUS; i++) {
+ arch_init_ipiirq(IPI_RESCHED_INT0 + i, &irq_resched);
+ arch_init_ipiirq(IPI_CALL_INT0 + i, &irq_call);
+ }
+#else
+ set_c0_status(STATUSF_IP7 | STATUSF_IP6 | STATUSF_IP5 | STATUSF_IP2);
+#endif
+ }
+#endif /*CONFIG_IRQ_GIC*/
+ /* Initialize IRQ action handlers */
+#ifdef TCSUPPORT_MIPS_1004K
+ for (i = 1; i < NR_IRQS; i++) /* irqVec: 1~63 */
+ {
+ /* Note: irq_chip has already been assigned in gic_init() */
+
+ if (i == SI_TIMER_INT) {
+ irq_set_handler(i, handle_percpu_irq);
+ }
+ else if (i >= IPI_RESCHED_INT0) {
+ /* handle_percpu_irq has already been assigned for IPI resched/call interrupts earlier */
+ continue;
+ }
+ else {
+ irq_set_handler(i, handle_level_irq);
+ }
+ }
+
+#else
+ for (i = 0; i < NR_IRQS; i++) {
+#ifdef CONFIG_MIPS_TC3262
+ /*
+ * Only MT is using the software interrupts currently, so we just
+ * leave them uninitialized for other processors.
+ */
+ if (cpu_has_mipsmt) {
+ if ((i == SI_SWINT1_INT0) || (i == SI_SWINT1_INT1) ||
+ (i == SI_SWINT_INT0) || (i == SI_SWINT_INT1)) {
+ irq_set_chip(i, &mips_mt_cpu_irq_controller);
+ continue;
+ }
+ }
+
+ if ((i == SI_TIMER_INT) || (i == SI_TIMER1_INT))
+ irq_set_chip_and_handler(i, &tc3162_irq_chip,
+ handle_percpu_irq);
+ else
+ irq_set_chip_and_handler(i, &tc3162_irq_chip,
+ handle_level_irq);
+#else
+ irq_set_chip_and_handler(i, &tc3162_irq_chip,
+ handle_level_irq);
+#endif
+ }
+#endif /* TCSUPPORT_MIPS_1004K */
+
+#ifdef CONFIG_MIPS_TC3262
+ if (cpu_has_veic || cpu_has_vint) {
+ write_c0_status((read_c0_status() & ~ST0_IM ) |
+ (STATUSF_IP0 | STATUSF_IP1));
+
+ /* register irq dispatch functions */
+ for (i = 0; i < NR_IRQS; i++)
+ set_vi_handler(i, irq_dispatch_tab[i]);
+ } else {
+ change_c0_status(ST0_IM, ALLINTS);
+ }
+#else
+ /* Enable all interrupts */
+ change_c0_status(ST0_IM, ALLINTS);
+#endif
+#ifndef TCSUPPORT_MIPS_1004K /* vsmp_int_init's content has been moved to this file */
+#ifdef CONFIG_MIPS_MT_SMP
+ vsmp_int_init();
+#endif
+#endif
+}
+
+__IMEM asmlinkage void plat_irq_dispatch(void)
+{
+#ifdef CONFIG_MIPS_TC3262
+ int irq = ((read_c0_cause() & ST0_IM) >> 10);
+ pr_info("\nplat_irq_dispatch, irq = %d", irq);
+ do_IRQ(irq);
+#else
+ do_IRQ(VPint(CR_INTC_IVR));
+#endif
+}
+
Index: linux-3.18.21/arch/mips/econet/kprofile_hook.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/kprofile_hook.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,18 @@
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+void (*profilingSetupHook)(unsigned int tbl_size) = NULL;
+void (*profilingEventHook)(unsigned int cntr, unsigned int event, unsigned int count,
+ unsigned int kernel, unsigned int user, unsigned int exl) = NULL;
+void (*profilingStartHook)(void) = NULL;
+void (*profilingStopHook)(void) = NULL;
+void (*profilingLogHook)(unsigned int label, unsigned int usr_data) = NULL;
+
+EXPORT_SYMBOL(profilingSetupHook);
+EXPORT_SYMBOL(profilingEventHook);
+EXPORT_SYMBOL(profilingStartHook);
+EXPORT_SYMBOL(profilingStopHook);
+EXPORT_SYMBOL(profilingLogHook);
+
Index: linux-3.18.21/arch/mips/econet/libcompileoption.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/libcompileoption.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,2781 @@
+/******************************************************************************/
+/*
+ * Copyright (C) 1994-2014 EcoNet Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * TrendChip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of TrendChip Technologies, Corp. and
+ * shall not be reproduced, copied, disclosed, or used in whole or
+ * in part for any reason without the prior express written permission of
+ * TrendChip Technologies, Corp.
+ */
+/******************************************************************************/
+//#include <stdlib.h>
+#ifdef __KERNEL__
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/blkdev.h>
+
+#include <asm/mipsmtregs.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+
+#include <linux/libcompileoption.h>
+#else
+#include "libcompileoption.h"
+#endif
+
+CompileOption_Val TCSUPPORT_MANUAL_ETHERNET_PORTMAP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_MANUAL_ETHERNET_PORTMAP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_VLAN_TAG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_VLAN_TAG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VLAN_TAG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VLAN_TAG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TCAPI_ENHANCE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TCAPI_ENHANCE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_C5_2P_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_C5_2P_VAL);
+#endif
+
+#if 0
+CompileOption_Val TCSUPPORT_CDS_VTAG_TRANSPARENT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CDS_VTAG_TRANSPARENT_VAL);
+#endif
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_GD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_GD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_GDV20_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_GDV20_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CZ_GDCS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CZ_GDCS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ZARLINK_LE89156A_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ZARLINK_LE89156A_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ZARLINK_LE89156B_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ZARLINK_LE89156B_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CY_JX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CY_JX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_BIND2_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_BIND2_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_JS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_JS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_FJ_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_FJ_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_GX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_GX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CQ_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CQ_VAL);
+#endif
+
+
+CompileOption_Val TCSUPPORT_CT_UPG_PINGPONG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_UPG_PINGPONG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_BACKUPROMFILE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_BACKUPROMFILE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PRODUCTIONLINE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PRODUCTIONLINE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_CWMP_PARAMETER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_CWMP_PARAMETER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_NOTDEFAULTROMFILEAREA_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_NOTDEFAULTROMFILEAREA_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_MIDWARE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_MIDWARE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CDS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CDS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VLAN_DOT1P_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VLAN_DOT1P_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VLAN_PASSTHROUGH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VLAN_PASSTHROUGH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_FJ_SFU_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_FJ_SFU_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CY_PON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CY_PON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PHONEAPP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PHONEAPP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_C9_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_C9_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_JS_IP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_JS_IP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_VOIP_CRYPT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_VOIP_CRYPT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VOIP_LED_APPCTRL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VOIP_LED_APPCTRL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_FH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_FH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VOIP_SIP_DNS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VOIP_SIP_DNS_VAL);
+#endif
+
+
+CompileOption_Val TCSUPPORT_AUTOBENCH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_AUTOBENCH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_YN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_YN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_DEV_ACCESS_TYPE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_DEV_ACCESS_TYPE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ECN_GZ_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ECN_GZ_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_WANNINDEX_INCREASE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_WANNINDEX_INCREASE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_MONITORCOLLECTOR_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_MONITORCOLLECTOR_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PPPOE_EMULATOR_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PPPOE_EMULATOR_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_NMG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_NMG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CY_E8_SFU_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CY_E8_SFU_VAL);
+#endif
+
+CompileOption_Val CT_COM_DEVICEREG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(CT_COM_DEVICEREG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ITMS_CONFIG_AS_DEFAULT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ITMS_CONFIG_AS_DEFAULT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_DMS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_DMS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_UPNP_DM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_UPNP_DM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TR69_IP_HOST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TR69_IP_HOST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_IP_HOST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_IP_HOST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C1_MS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C1_MS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C1_ZY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C1_ZY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PPPINFORM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PPPINFORM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PONMGR_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PONMGR_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PMMGR_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PMMGR_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_ATM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_ATM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CPU_MT7505_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CPU_MT7505_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_SWQOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_SWQOS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_SIMCARD_SEPARATION_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_SIMCARD_SEPARATION_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_2PORTS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_2PORTS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_LAN_VLAN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_LAN_VLAN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_LAN_VLAN_RANGE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_LAN_VLAN_RANGE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_SN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_SN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_WAN_PTM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_WAN_PTM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C1_CUC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C1_CUC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SYSLOG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SYSLOG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PORT_BIND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PORT_BIND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_GUIACCESSLIMIT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_GUIACCESSLIMIT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PPPOEPROXY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PPPOEPROXY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ZIPROMFILE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ZIPROMFILE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_RESERVEAREA_EXTEND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_RESERVEAREA_EXTEND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C7_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C7_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_E8B_ADSL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_E8B_ADSL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_WANNODE_MAPPING_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_WANNODE_MAPPING_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_BLOCK_PROCESS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_BLOCK_PROCESS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CPU_MT7510_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CPU_MT7510_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CPU_MT7520_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CPU_MT7520_VAL);
+#endif
+
+CompileOption_Val RA_NAT_EXE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(RA_NAT_EXE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TRUE_LANDING_PAGE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TRUE_LANDING_PAGE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_NO_HOST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_NO_HOST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_INIC_CLIENT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_INIC_CLIENT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CZ_GENERAL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CZ_GENERAL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_KEYPARA_STORE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_KEYPARA_STORE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VPN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VPN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_MULTI_BOOT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_MULTI_BOOT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_START_TRAP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_START_TRAP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SYSLOG_ENHANCE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SYSLOG_ENHANCE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_AC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_AC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_FAST_GET_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_FAST_GET_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CD_DDNS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CD_DDNS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_GPON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_GPON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_EPON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_EPON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_ETHER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_ETHER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_OMCI_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_OMCI_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_EPON_OAM_CTC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_EPON_OAM_CTC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CMDPROMPT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CMDPROMPT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_RANDOM_INFORM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_RANDOM_INFORM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_SERVICELIST_E8C_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_SERVICELIST_E8C_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ALARMMONITOR_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ALARMMONITOR_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CZ_TM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CZ_TM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_INFORM_NODE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_INFORM_NODE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_LONG_RESETBTN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_LONG_RESETBTN_VAL);
+#endif
+
+CompileOption_Val CWMP_REDIRECT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(CWMP_REDIRECT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PPP_ONDEMAND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PPP_ONDEMAND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_CWMP_ZIPROMFILE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_CWMP_ZIPROMFILE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TR69_BIND_PVC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TR69_BIND_PVC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_NO_BOOT_VALUECHANGE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_NO_BOOT_VALUECHANGE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CROSS_REBOOT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CROSS_REBOOT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_BHARTI_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_BHARTI_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TTNET_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TTNET_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_PRECISE_TIME_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_PRECISE_TIME_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C1_NEW_GUI_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C1_NEW_GUI_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PROLINE_CMD_ACTION_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PROLINE_CMD_ACTION_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_MULTISERVICE_ON_WAN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_MULTISERVICE_ON_WAN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_UPNP_ENHANCE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_UPNP_ENHANCE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C1_OBM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C1_OBM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_PTM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_PTM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CD_NEW_GUI_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CD_NEW_GUI_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CCT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CCT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CLMP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CLMP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SAGECOM_CWMP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SAGECOM_CWMP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FW_UPGRADE_DELAY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FW_UPGRADE_DELAY_VAL);
+#endif
+
+CompileOption_Val AZTECH_CWMP_REORDER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(AZTECH_CWMP_REORDER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_IOT_STRINGTYPE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_IOT_STRINGTYPE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_FAULT_RESPONSE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_FAULT_RESPONSE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CWMP_CRPORTCHANGE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CWMP_CRPORTCHANGE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WEB_INTERCEPTION_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WEB_INTERCEPTION_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_MNT_CONF_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_MNT_CONF_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FTP_USB_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FTP_USB_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_FTP_DOWNLOADCLIENT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_FTP_DOWNLOADCLIENT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_NEWGUI_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_NEWGUI_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_IPV6_FIREWALL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_IPV6_FIREWALL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_IPV6_FIREWALL_RFC2827_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_IPV6_FIREWALL_RFC2827_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PRODUCTIONLINE_CONTENT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PRODUCTIONLINE_CONTENT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_HWQOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_HWQOS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CHT_RAMENU_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CHT_RAMENU_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_EPON_OAM_LAN_DBG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_EPON_OAM_LAN_DBG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_E8B_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_E8B_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_VLAN_BIND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_VLAN_BIND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_DSLITE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_DSLITE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TC2031_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TC2031_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_HWNAT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_HWNAT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_HWNAT_WAN_ACCOUNT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_HWNAT_WAN_ACCOUNT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_QOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_QOS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_1FXS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_1FXS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_2PORTS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_2PORTS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_C7_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_C7_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_USBHOST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_USBHOST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_STANDARD_E8C_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_STANDARD_E8C_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_IPV6_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_IPV6_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_RT3390_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_RT3390_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_RT3090_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_RT3090_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_RT5392_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_RT5392_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_MULTIDRIVER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_MULTIDRIVER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_MT7592_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_MT7592_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_OMCI_ALCATEL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_OMCI_ALCATEL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_GPON_MAPPING_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_GPON_MAPPING_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_USER_ISOLATION_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_USER_ISOLATION_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_VLAN_FILTER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_VLAN_FILTER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_MAC_FILTER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_MAC_FILTER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_IGMP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_IGMP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_VLAN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_VLAN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_OMCI_LAN_DEBUG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_OMCI_LAN_DEBUG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_LED_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_LED_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_UPSTREAM_VLAN_POLICER_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_UPSTREAM_VLAN_POLICER_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_GPON_DOWNSTREAM_MAPPING_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_GPON_DOWNSTREAM_MAPPING_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_IGMP_SET_GROUP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_IGMP_SET_GROUP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CD_WIND_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CD_WIND_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ARC_CWMP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ARC_CWMP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_BUTTONDETECT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_BUTTONDETECT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ECN_SIP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ECN_SIP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_ECN_MEGACO_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_ECN_MEGACO_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_VOIP_SIP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_VOIP_SIP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CHS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CHS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VOIP_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VOIP_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SLIC_ZL88801_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SLIC_ZL88801_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_JX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_JX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_HAL_API_QOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_HAL_API_QOS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_HAL_API_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_HAL_API_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_HAL_API_EXT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_HAL_API_EXT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_PON_QOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_PON_QOS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_STBMAC_REPORT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_STBMAC_REPORT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PMINFORM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PMINFORM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_HUB_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_HUB_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ADSL_HN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ADSL_HN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_SC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_SC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_C9_HUN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_C9_HUN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_TR143_WGET_DOWNLOAD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_TR143_WGET_DOWNLOAD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TR143_CURL_UPLOAD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TR143_CURL_UPLOAD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ADSL_TJ_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ADSL_TJ_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ADSL_BIND1_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ADSL_BIND1_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_NAMECHGNOTIFY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_NAMECHGNOTIFY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_C5_HEN_SFU_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_C5_HEN_SFU_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_SFU_SX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_SFU_SX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_EPON_DUMMY_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_EPON_DUMMY_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_HWNAT_OFFLOAD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_HWNAT_OFFLOAD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_DS_HWNAT_OFFLOAD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_DS_HWNAT_OFFLOAD_VAL);
+#endif
+
+/* fix bug OSBNB00044996 by johnson.sun on 20150410 */
+CompileOption_Val TCSUPPORT_CT_PON_SK_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_SK_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CMCC_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CMCC_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CMCCV2_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CMCCV2_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CMCC_GANSU_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CMCC_GANSU_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CSC_EEUROPE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CSC_EEUROPE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_ITMS_TMOUT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_ITMS_TMOUT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_DLF_CTRL_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_DLF_CTRL_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CPU_PERFORMANCE_TEST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CPU_PERFORMANCE_TEST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_IGMP_CHT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_IGMP_CHT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WAN_UPSTREAM_REMARK_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WAN_UPSTREAM_REMARK_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_HAL_API_MCST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_HAL_API_MCST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CPU_EN75XX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CPU_EN75XX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_JOYME_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_JOYME_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_JOYME_BANDWIDTH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_JOYME_BANDWIDTH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FH_JOYMEV2_PON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FH_JOYMEV2_PON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_C9_ROST_LED_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_C9_ROST_LED_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_CWMP_WORK_COMPATIBLE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_CWMP_WORK_COMPATIBLE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_UPGRADE_NO_REBOOT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_UPGRADE_NO_REBOOT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CN_JS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CN_JS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CUC_PON_SD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CUC_PON_SD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PON_ROSTELECOM_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PON_ROSTELECOM_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_LED_UPGRADE_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_LED_UPGRADE_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FWC_ENV_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FWC_ENV_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FWC_QOS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FWC_QOS_VAL);
+#endif
+
+
+CompileOption_Val TCSUPPORT_FWC_FDB_VLAN_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FWC_FDB_VLAN_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FWC_VLAN_TAG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FWC_VLAN_TAG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FWC_MCST_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FWC_MCST_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SWNAT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SWNAT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CZ_GD_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CZ_GD_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CT_PON_CZ_NX_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CT_PON_CZ_NX_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_VNPTT_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_VNPTT_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CCA_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CCA_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TR64_DIGEST_AUTH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TR64_DIGEST_AUTH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_PORT_ISOLATION_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_PORT_ISOLATION_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_TLS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_TLS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_SDN_OVS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_SDN_OVS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_FH_SDN_PON_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_FH_SDN_PON_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_CRJO_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_CRJO_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_XPON_HAL_API_NG_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_XPON_HAL_API_NG_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_WLAN_SW_RPS_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_WLAN_SW_RPS_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_IPSEC_PASSTHROUGH_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_IPSEC_PASSTHROUGH_VAL);
+#endif
+
+CompileOption_Val TCSUPPORT_MIPS_1004K_VAL = 0;
+#ifdef __KERNEL__
+EXPORT_SYMBOL(TCSUPPORT_MIPS_1004K_VAL);
+#endif
+
+
+void
+init_compileoption_val(void){
+
+#ifdef TCSUPPORT_CT
+ TCSUPPORT_CT_VAL = 1;
+#else
+ TCSUPPORT_CT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_VLAN_TAG
+ TCSUPPORT_CT_VLAN_TAG_VAL = 1;
+#else
+ TCSUPPORT_CT_VLAN_TAG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VLAN_TAG
+ TCSUPPORT_VLAN_TAG_VAL = 1;
+#else
+ TCSUPPORT_VLAN_TAG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TCAPI_ENHANCE
+ TCSUPPORT_TCAPI_ENHANCE_VAL = 1;
+#else
+ TCSUPPORT_TCAPI_ENHANCE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC_C5_2P
+ TCSUPPORT_CUC_C5_2P_VAL = 1;
+#else
+ TCSUPPORT_CUC_C5_2P_VAL = 0;
+#endif
+
+#if 0
+#ifdef TCSUPPORT_CDS_VTAG_TRANSPARENT
+ TCSUPPORT_CDS_VTAG_TRANSPARENT_VAL = 1;
+#else
+ TCSUPPORT_CDS_VTAG_TRANSPARENT_VAL = 0;
+#endif
+#endif
+
+#ifdef TCSUPPORT_CT_PON_GD
+ TCSUPPORT_CT_PON_GD_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_GD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_GDV20
+ TCSUPPORT_CT_PON_GDV20_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_GDV20_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CZ_GDCS
+ TCSUPPORT_CT_PON_CZ_GDCS_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CZ_GDCS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CY
+ TCSUPPORT_CT_PON_CY_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ZARLINK_LE89156A
+ TCSUPPORT_ZARLINK_LE89156A_VAL = 1;
+#else
+ TCSUPPORT_ZARLINK_LE89156A_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ZARLINK_LE89156B
+ TCSUPPORT_ZARLINK_LE89156B_VAL = 1;
+#else
+ TCSUPPORT_ZARLINK_LE89156B_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CY_JX
+ TCSUPPORT_CT_PON_CY_JX_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CY_JX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_BIND2
+ TCSUPPORT_CT_PON_BIND2_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_BIND2_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON
+ TCSUPPORT_CT_PON_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_JS
+ TCSUPPORT_CT_PON_JS_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_JS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_FJ
+ TCSUPPORT_CT_FJ_VAL = 1;
+#else
+ TCSUPPORT_CT_FJ_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_GX
+ TCSUPPORT_CT_PON_GX_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_GX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CQ
+ TCSUPPORT_CT_PON_CQ_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CQ_VAL = 0;
+#endif
+
+
+#ifdef TCSUPPORT_CT_UPG_PINGPONG
+ TCSUPPORT_CT_UPG_PINGPONG_VAL = 1;
+#else
+ TCSUPPORT_CT_UPG_PINGPONG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_BACKUPROMFILE
+ TCSUPPORT_BACKUPROMFILE_VAL = 1;
+#else
+ TCSUPPORT_BACKUPROMFILE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PRODUCTIONLINE
+ TCSUPPORT_PRODUCTIONLINE_VAL = 1;
+#else
+ TCSUPPORT_PRODUCTIONLINE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC_CWMP_PARAMETER
+ TCSUPPORT_CUC_CWMP_PARAMETER_VAL = 1;
+#else
+ TCSUPPORT_CUC_CWMP_PARAMETER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_NOTDEFAULTROMFILEAREA
+ TCSUPPORT_NOTDEFAULTROMFILEAREA_VAL = 1;
+#else
+ TCSUPPORT_NOTDEFAULTROMFILEAREA_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_MIDWARE
+ TCSUPPORT_CT_MIDWARE_VAL = 1;
+#else
+ TCSUPPORT_CT_MIDWARE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CDS
+ TCSUPPORT_CDS_VAL = 1;
+#else
+ TCSUPPORT_CDS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VLAN_DOT1P
+ TCSUPPORT_VLAN_DOT1P_VAL = 1;
+#else
+ TCSUPPORT_VLAN_DOT1P_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VLAN_PASSTHROUGH
+ TCSUPPORT_VLAN_PASSTHROUGH_VAL = 1;
+#else
+ TCSUPPORT_VLAN_PASSTHROUGH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC_FJ_SFU
+ TCSUPPORT_CUC_FJ_SFU_VAL = 1;
+#else
+ TCSUPPORT_CUC_FJ_SFU_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT
+ TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT_VAL = 1;
+#else
+ TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC
+ TCSUPPORT_CUC_VAL = 1;
+#else
+ TCSUPPORT_CUC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CY_PON
+ TCSUPPORT_CY_PON_VAL = 1;
+#else
+ TCSUPPORT_CY_PON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PHONEAPP
+ TCSUPPORT_CT_PHONEAPP_VAL = 1;
+#else
+ TCSUPPORT_CT_PHONEAPP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_AUTOBENCH
+ TCSUPPORT_AUTOBENCH_VAL = 1;
+#else
+ TCSUPPORT_AUTOBENCH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_C9
+ TCSUPPORT_CT_PON_C9_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_C9_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_JS_IP
+ TCSUPPORT_CT_PON_JS_IP_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_JS_IP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VOIP_LED_APPCTRL
+ TCSUPPORT_VOIP_LED_APPCTRL_VAL = 1;
+#else
+ TCSUPPORT_VOIP_LED_APPCTRL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_VOIP_CRYPT
+ TCSUPPORT_CT_VOIP_CRYPT_VAL = 1;
+#else
+ TCSUPPORT_CT_VOIP_CRYPT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PON_FH
+ TCSUPPORT_PON_FH_VAL = 1;
+#else
+ TCSUPPORT_PON_FH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VOIP_SIP_DNS
+ TCSUPPORT_VOIP_SIP_DNS_VAL = 1;
+#else
+ TCSUPPORT_VOIP_SIP_DNS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C1_MS
+ TCSUPPORT_C1_MS_VAL = 1;
+#else
+ TCSUPPORT_C1_MS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C1_ZY
+ TCSUPPORT_C1_ZY_VAL = 1;
+#else
+ TCSUPPORT_C1_ZY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PONMGR
+ TCSUPPORT_PONMGR_VAL = 1;
+#else
+ TCSUPPORT_PONMGR_VAL = 0;
+#endif
+
+
+#ifdef TCSUPPORT_PMMGR
+ TCSUPPORT_PMMGR_VAL = 1;
+#else
+ TCSUPPORT_PMMGR_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_ATM
+ TCSUPPORT_WAN_ATM_VAL = 1;
+#else
+ TCSUPPORT_WAN_ATM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CPU_MT7505
+ TCSUPPORT_CPU_MT7505_VAL = 1;
+#else
+ TCSUPPORT_CPU_MT7505_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_YN
+ TCSUPPORT_CT_PON_YN_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_YN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_DEV_ACCESS_TYPE
+ TCSUPPORT_CT_DEV_ACCESS_TYPE_VAL = 1;
+#else
+ TCSUPPORT_CT_DEV_ACCESS_TYPE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ECN_GZ
+ TCSUPPORT_CT_ECN_GZ_VAL = 1;
+#else
+ TCSUPPORT_CT_ECN_GZ_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_WANNINDEX_INCREASE
+ TCSUPPORT_CT_WANNINDEX_INCREASE_VAL = 1;
+#else
+ TCSUPPORT_CT_WANNINDEX_INCREASE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_MONITORCOLLECTOR
+ TCSUPPORT_CT_MONITORCOLLECTOR_VAL = 1;
+#else
+ TCSUPPORT_CT_MONITORCOLLECTOR_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PPPOE_EMULATOR
+ TCSUPPORT_CT_PPPOE_EMULATOR_VAL = 1;
+#else
+ TCSUPPORT_CT_PPPOE_EMULATOR_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_NMG
+ TCSUPPORT_CT_PON_NMG_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_NMG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CY_E8_SFU
+ TCSUPPORT_CY_E8_SFU_VAL = 1;
+#else
+ TCSUPPORT_CY_E8_SFU_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ITMS_CONFIG_AS_DEFAULT
+ TCSUPPORT_ITMS_CONFIG_AS_DEFAULT_VAL = 1;
+#else
+ TCSUPPORT_ITMS_CONFIG_AS_DEFAULT_VAL = 0;
+#endif
+#ifdef CT_COM_DEVICEREG
+ CT_COM_DEVICEREG_VAL = 1;
+#else
+ CT_COM_DEVICEREG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_DMS
+ TCSUPPORT_DMS_VAL = 1;
+#else
+ TCSUPPORT_DMS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_UPNP_DM
+ TCSUPPORT_CT_UPNP_DM_VAL = 1;
+#else
+ TCSUPPORT_CT_UPNP_DM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TR69_IP_HOST
+ TCSUPPORT_TR69_IP_HOST_VAL = 1;
+#else
+ TCSUPPORT_TR69_IP_HOST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PON_IP_HOST
+ TCSUPPORT_PON_IP_HOST_VAL = 1;
+#else
+ TCSUPPORT_PON_IP_HOST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PPPINFORM
+ TCSUPPORT_CT_PPPINFORM_VAL = 1;
+#else
+ TCSUPPORT_CT_PPPINFORM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_SWQOS
+ TCSUPPORT_CT_SWQOS_VAL = 1;
+#else
+ TCSUPPORT_CT_SWQOS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_SIMCARD_SEPARATION
+ TCSUPPORT_CT_SIMCARD_SEPARATION_VAL = 1;
+#else
+ TCSUPPORT_CT_SIMCARD_SEPARATION_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_2PORTS
+ TCSUPPORT_CT_2PORTS_VAL = 1;
+#else
+ TCSUPPORT_CT_2PORTS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_LAN_VLAN
+ TCSUPPORT_LAN_VLAN_VAL = 1;
+#else
+ TCSUPPORT_LAN_VLAN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_LAN_VLAN_RANGE
+ TCSUPPORT_LAN_VLAN_RANGE_VAL = 1;
+#else
+ TCSUPPORT_LAN_VLAN_RANGE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_SN
+ TCSUPPORT_CT_PON_SN_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_SN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_WAN_PTM
+ TCSUPPORT_CT_WAN_PTM_VAL = 1;
+#else
+ TCSUPPORT_CT_WAN_PTM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C1_CUC
+ TCSUPPORT_C1_CUC_VAL = 1;
+#else
+ TCSUPPORT_C1_CUC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_SYSLOG
+ TCSUPPORT_SYSLOG_VAL = 1;
+#else
+ TCSUPPORT_SYSLOG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PORT_BIND
+ TCSUPPORT_CT_PORT_BIND_VAL = 1;
+#else
+ TCSUPPORT_CT_PORT_BIND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_GUIACCESSLIMIT
+ TCSUPPORT_CT_GUIACCESSLIMIT_VAL = 1;
+#else
+ TCSUPPORT_CT_GUIACCESSLIMIT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PPPOEPROXY
+ TCSUPPORT_CT_PPPOEPROXY_VAL = 1;
+#else
+ TCSUPPORT_CT_PPPOEPROXY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ZIPROMFILE
+ TCSUPPORT_CT_ZIPROMFILE_VAL = 1;
+#else
+ TCSUPPORT_CT_ZIPROMFILE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_RESERVEAREA_EXTEND
+ TCSUPPORT_RESERVEAREA_EXTEND_VAL = 1;
+#else
+ TCSUPPORT_RESERVEAREA_EXTEND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C7
+ TCSUPPORT_C7_VAL = 1;
+#else
+ TCSUPPORT_C7_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CY
+ TCSUPPORT_CY_VAL = 1;
+#else
+ TCSUPPORT_CY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_E8B_ADSL
+ TCSUPPORT_CT_E8B_ADSL_VAL = 1;
+#else
+ TCSUPPORT_CT_E8B_ADSL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_WANNODE_MAPPING
+ TCSUPPORT_CT_WANNODE_MAPPING_VAL = 1;
+#else
+ TCSUPPORT_CT_WANNODE_MAPPING_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_BLOCK_PROCESS
+ TCSUPPORT_CT_BLOCK_PROCESS_VAL = 1;
+#else
+ TCSUPPORT_CT_BLOCK_PROCESS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND
+ TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND_VAL = 1;
+#else
+ TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CPU_MT7510
+ TCSUPPORT_CPU_MT7510_VAL = 1;
+#else
+ TCSUPPORT_CPU_MT7510_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CPU_MT7520
+ TCSUPPORT_CPU_MT7520_VAL = 1;
+#else
+ TCSUPPORT_CPU_MT7520_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TRUE_LANDING_PAGE
+ TCSUPPORT_TRUE_LANDING_PAGE_VAL = 1;
+#else
+ TCSUPPORT_TRUE_LANDING_PAGE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP
+ TCSUPPORT_CWMP_VAL = 1;
+#else
+ TCSUPPORT_CWMP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_NO_HOST
+ TCSUPPORT_CWMP_NO_HOST_VAL = 1;
+#else
+ TCSUPPORT_CWMP_NO_HOST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN
+ TCSUPPORT_WLAN_VAL = 1;
+#else
+ TCSUPPORT_WLAN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_INIC_CLIENT
+ TCSUPPORT_INIC_CLIENT_VAL = 1;
+#else
+ TCSUPPORT_INIC_CLIENT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CZ_GENERAL
+ TCSUPPORT_CZ_GENERAL_VAL = 1;
+#else
+ TCSUPPORT_CZ_GENERAL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_KEYPARA_STORE
+ TCSUPPORT_KEYPARA_STORE_VAL = 1;
+#else
+ TCSUPPORT_KEYPARA_STORE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VPN
+ TCSUPPORT_VPN_VAL = 1;
+#else
+ TCSUPPORT_VPN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_MULTI_BOOT
+ TCSUPPORT_MULTI_BOOT_VAL = 1;
+#else
+ TCSUPPORT_MULTI_BOOT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_START_TRAP
+ TCSUPPORT_START_TRAP_VAL = 1;
+#else
+ TCSUPPORT_START_TRAP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_SYSLOG_ENHANCE
+ TCSUPPORT_SYSLOG_ENHANCE_VAL = 1;
+#else
+ TCSUPPORT_SYSLOG_ENHANCE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_AC
+ TCSUPPORT_WLAN_AC_VAL = 1;
+#else
+ TCSUPPORT_WLAN_AC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_FAST_GET
+ TCSUPPORT_CWMP_FAST_GET_VAL = 1;
+#else
+ TCSUPPORT_CWMP_FAST_GET_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CD_DDNS
+ TCSUPPORT_CD_DDNS_VAL = 1;
+#else
+ TCSUPPORT_CD_DDNS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_GPON
+ TCSUPPORT_WAN_GPON_VAL = 1;
+#else
+ TCSUPPORT_WAN_GPON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_EPON
+ TCSUPPORT_WAN_EPON_VAL = 1;
+#else
+ TCSUPPORT_WAN_EPON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_ETHER
+ TCSUPPORT_WAN_ETHER_VAL = 1;
+#else
+ TCSUPPORT_WAN_ETHER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_OMCI
+ TCSUPPORT_OMCI_VAL = 1;
+#else
+ TCSUPPORT_OMCI_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_EPON_OAM_CTC
+ TCSUPPORT_EPON_OAM_CTC_VAL = 1;
+#else
+ TCSUPPORT_EPON_OAM_CTC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CMDPROMPT
+ TCSUPPORT_CMDPROMPT_VAL = 1;
+#else
+ TCSUPPORT_CMDPROMPT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_RANDOM_INFORM
+ TCSUPPORT_RANDOM_INFORM_VAL = 1;
+#else
+ TCSUPPORT_RANDOM_INFORM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_SERVICELIST_E8C
+ TCSUPPORT_CT_SERVICELIST_E8C_VAL = 1;
+#else
+ TCSUPPORT_CT_SERVICELIST_E8C_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ALARMMONITOR
+ TCSUPPORT_CT_ALARMMONITOR_VAL = 1;
+#else
+ TCSUPPORT_CT_ALARMMONITOR_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CZ_TM
+ TCSUPPORT_CZ_TM_VAL = 1;
+#else
+ TCSUPPORT_CZ_TM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_INFORM_NODE
+ TCSUPPORT_CT_INFORM_NODE_VAL = 1;
+#else
+ TCSUPPORT_CT_INFORM_NODE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_LONG_RESETBTN
+ TCSUPPORT_CT_LONG_RESETBTN_VAL = 1;
+#else
+ TCSUPPORT_CT_LONG_RESETBTN_VAL = 0;
+#endif
+
+#ifdef CWMP_REDIRECT
+ CWMP_REDIRECT_VAL = 1;
+#else
+ CWMP_REDIRECT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PPP_ONDEMAND
+ TCSUPPORT_CT_PPP_ONDEMAND_VAL = 1;
+#else
+ TCSUPPORT_CT_PPP_ONDEMAND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_CWMP_ZIPROMFILE
+ TCSUPPORT_CT_CWMP_ZIPROMFILE_VAL = 1;
+#else
+ TCSUPPORT_CT_CWMP_ZIPROMFILE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TR69_BIND_PVC
+ TCSUPPORT_TR69_BIND_PVC_VAL = 1;
+#else
+ TCSUPPORT_TR69_BIND_PVC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_NO_BOOT_VALUECHANGE
+ TCSUPPORT_NO_BOOT_VALUECHANGE_VAL = 1;
+#else
+ TCSUPPORT_NO_BOOT_VALUECHANGE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CROSS_REBOOT
+ TCSUPPORT_CROSS_REBOOT_VAL = 1;
+#else
+ TCSUPPORT_CROSS_REBOOT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_BHARTI
+ TCSUPPORT_BHARTI_VAL = 1;
+#else
+ TCSUPPORT_BHARTI_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TTNET
+ TCSUPPORT_TTNET_VAL = 1;
+#else
+ TCSUPPORT_TTNET_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_PRECISE_TIME
+ TCSUPPORT_CWMP_PRECISE_TIME_VAL = 1;
+#else
+ TCSUPPORT_CWMP_PRECISE_TIME_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C1_NEW_GUI
+ TCSUPPORT_C1_NEW_GUI_VAL = 1;
+#else
+ TCSUPPORT_C1_NEW_GUI_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PROLINE_CMD_ACTION
+ TCSUPPORT_PROLINE_CMD_ACTION_VAL = 1;
+#else
+ TCSUPPORT_PROLINE_CMD_ACTION_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_MULTISERVICE_ON_WAN
+ TCSUPPORT_MULTISERVICE_ON_WAN_VAL = 1;
+#else
+ TCSUPPORT_MULTISERVICE_ON_WAN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_UPNP_ENHANCE
+ TCSUPPORT_UPNP_ENHANCE_VAL = 1;
+#else
+ TCSUPPORT_UPNP_ENHANCE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C1_OBM
+ TCSUPPORT_C1_OBM_VAL = 1;
+#else
+ TCSUPPORT_C1_OBM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_PTM
+ TCSUPPORT_WAN_PTM_VAL = 1;
+#else
+ TCSUPPORT_WAN_PTM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CD_NEW_GUI
+ TCSUPPORT_CD_NEW_GUI_VAL = 1;
+#else
+ TCSUPPORT_CD_NEW_GUI_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CCT
+ TCSUPPORT_CCT_VAL = 1;
+#else
+ TCSUPPORT_CCT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CLMP
+ TCSUPPORT_CLMP_VAL = 1;
+#else
+ TCSUPPORT_CLMP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_SAGECOM_CWMP
+ TCSUPPORT_SAGECOM_CWMP_VAL = 1;
+#else
+ TCSUPPORT_SAGECOM_CWMP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FW_UPGRADE_DELAY
+ TCSUPPORT_FW_UPGRADE_DELAY_VAL = 1;
+#else
+ TCSUPPORT_FW_UPGRADE_DELAY_VAL = 0;
+#endif
+
+#ifdef AZTECH_CWMP_REORDER
+ AZTECH_CWMP_REORDER_VAL = 1;
+#else
+ AZTECH_CWMP_REORDER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_IOT_STRINGTYPE
+ TCSUPPORT_CWMP_IOT_STRINGTYPE_VAL = 1;
+#else
+ TCSUPPORT_CWMP_IOT_STRINGTYPE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_FAULT_RESPONSE
+ TCSUPPORT_CWMP_FAULT_RESPONSE_VAL = 1;
+#else
+ TCSUPPORT_CWMP_FAULT_RESPONSE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CWMP_CRPORTCHANGE
+ TCSUPPORT_CWMP_CRPORTCHANGE_VAL = 1;
+#else
+ TCSUPPORT_CWMP_CRPORTCHANGE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WEB_INTERCEPTION
+ TCSUPPORT_WEB_INTERCEPTION_VAL = 1;
+#else
+ TCSUPPORT_WEB_INTERCEPTION_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_MNT_CONF
+ TCSUPPORT_MNT_CONF_VAL = 1;
+#else
+ TCSUPPORT_MNT_CONF_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FTP_USB
+ TCSUPPORT_FTP_USB_VAL = 1;
+#else
+ TCSUPPORT_FTP_USB_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_FTP_DOWNLOADCLIENT
+ TCSUPPORT_CT_FTP_DOWNLOADCLIENT_VAL = 1;
+#else
+ TCSUPPORT_CT_FTP_DOWNLOADCLIENT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_NEWGUI
+ TCSUPPORT_CT_NEWGUI_VAL = 1;
+#else
+ TCSUPPORT_CT_NEWGUI_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_IPV6_FIREWALL
+ TCSUPPORT_IPV6_FIREWALL_VAL = 1;
+#else
+ TCSUPPORT_IPV6_FIREWALL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_IPV6_FIREWALL_RFC2827
+ TCSUPPORT_IPV6_FIREWALL_RFC2827_VAL = 1;
+#else
+ TCSUPPORT_IPV6_FIREWALL_RFC2827_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PRODUCTIONLINE_CONTENT
+ TCSUPPORT_PRODUCTIONLINE_CONTENT_VAL = 1;
+#else
+ TCSUPPORT_PRODUCTIONLINE_CONTENT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_HWQOS
+ TCSUPPORT_CT_HWQOS_VAL = 1;
+#else
+ TCSUPPORT_CT_HWQOS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CHT_RAMENU
+ TCSUPPORT_CHT_RAMENU_VAL = 1;
+#else
+ TCSUPPORT_CHT_RAMENU_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_EPON_OAM_LAN_DBG
+ TCSUPPORT_EPON_OAM_LAN_DBG_VAL = 1;
+#else
+ TCSUPPORT_EPON_OAM_LAN_DBG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_E8B
+ TCSUPPORT_E8B_VAL = 1;
+#else
+ TCSUPPORT_E8B_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_VLAN_BIND
+ TCSUPPORT_CT_VLAN_BIND_VAL = 1;
+#else
+ TCSUPPORT_CT_VLAN_BIND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_DSLITE
+ TCSUPPORT_CT_DSLITE_VAL = 1;
+#else
+ TCSUPPORT_CT_DSLITE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TC2031
+ TCSUPPORT_TC2031_VAL = 1;
+#else
+ TCSUPPORT_TC2031_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_HWNAT
+ TCSUPPORT_HWNAT_VAL = 1;
+#else
+ TCSUPPORT_HWNAT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_HWNAT_WAN_ACCOUNT
+ TCSUPPORT_HWNAT_WAN_ACCOUNT_VAL = 1;
+#else
+ TCSUPPORT_HWNAT_WAN_ACCOUNT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC_QOS
+ TCSUPPORT_CUC_QOS_VAL = 1;
+#else
+ TCSUPPORT_CUC_QOS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_1FXS
+ TCSUPPORT_CT_1FXS_VAL = 1;
+#else
+ TCSUPPORT_CT_1FXS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_2PORTS
+ TCSUPPORT_2PORTS_VAL = 1;
+#else
+ TCSUPPORT_2PORTS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_C7
+ TCSUPPORT_CT_PON_C7_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_C7_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_USBHOST
+ TCSUPPORT_USBHOST_VAL = 1;
+#else
+ TCSUPPORT_USBHOST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_STANDARD_E8C
+ TCSUPPORT_CT_STANDARD_E8C_VAL = 1;
+#else
+ TCSUPPORT_CT_STANDARD_E8C_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_IPV6
+ TCSUPPORT_IPV6_VAL = 1;
+#else
+ TCSUPPORT_IPV6_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_RT3390
+ TCSUPPORT_WLAN_RT3390_VAL = 1;
+#else
+ TCSUPPORT_WLAN_RT3390_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_RT3090
+ TCSUPPORT_WLAN_RT3090_VAL = 1;
+#else
+ TCSUPPORT_WLAN_RT3090_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_RT5392
+ TCSUPPORT_WLAN_RT5392_VAL = 1;
+#else
+ TCSUPPORT_WLAN_RT5392_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_MULTIDRIVER
+ TCSUPPORT_WLAN_MULTIDRIVER_VAL = 1;
+#else
+ TCSUPPORT_WLAN_MULTIDRIVER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_MT7592
+ TCSUPPORT_WLAN_MT7592_VAL = 1;
+#else
+ TCSUPPORT_WLAN_MT7592_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_OMCI_ALCATEL
+ TCSUPPORT_OMCI_ALCATEL_VAL = 1;
+#else
+ TCSUPPORT_OMCI_ALCATEL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_GPON_MAPPING
+ TCSUPPORT_GPON_MAPPING_VAL = 1;
+#else
+ TCSUPPORT_GPON_MAPPING_VAL = 0;
+#endif
+
+#if defined(TCSUPPORT_XPON_HAL_API_QOS)
+ TCSUPPORT_XPON_HAL_API_QOS_VAL=1;
+#else
+ TCSUPPORT_XPON_HAL_API_QOS_VAL=0;
+#endif
+
+#if defined(TCSUPPORT_XPON_HAL_API)
+ TCSUPPORT_XPON_HAL_API_VAL=1;
+#else
+ TCSUPPORT_XPON_HAL_API_VAL=0;
+#endif
+
+#if defined(TCSUPPORT_XPON_HAL_API_EXT)
+ TCSUPPORT_XPON_HAL_API_EXT_VAL=1;
+#else
+ TCSUPPORT_XPON_HAL_API_EXT_VAL=0;
+#endif
+
+
+#ifdef TCSUPPORT_PON_USER_ISOLATION
+ TCSUPPORT_PON_USER_ISOLATION_VAL = 1;
+#else
+ TCSUPPORT_PON_USER_ISOLATION_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PON_VLAN_FILTER
+ TCSUPPORT_PON_VLAN_FILTER_VAL = 1;
+#else
+ TCSUPPORT_PON_VLAN_FILTER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PON_MAC_FILTER
+ TCSUPPORT_PON_MAC_FILTER_VAL = 1;
+#else
+ TCSUPPORT_PON_MAC_FILTER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_IGMP
+ TCSUPPORT_XPON_IGMP_VAL = 1;
+#else
+ TCSUPPORT_XPON_IGMP_VAL = 0;
+#endif
+
+#if defined(TCSUPPORT_CT_PON) || defined(TCSUPPORT_XPON_HAL_API_QOS) || defined(TCSUPPORT_XPON_HAL_API_EXT)
+ TCSUPPORT_XPON_PON_QOS_VAL=1;
+#else
+ TCSUPPORT_XPON_PON_QOS_VAL=0;
+#endif
+
+#ifdef TCSUPPORT_PON_VLAN
+ TCSUPPORT_PON_VLAN_VAL = 1;
+#else
+ TCSUPPORT_PON_VLAN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_OMCI_LAN_DEBUG
+ TCSUPPORT_OMCI_LAN_DEBUG_VAL = 1;
+#else
+ TCSUPPORT_OMCI_LAN_DEBUG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_LED
+ TCSUPPORT_XPON_LED_VAL = 1;
+#else
+ TCSUPPORT_XPON_LED_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_UPSTREAM_VLAN_POLICER
+ TCSUPPORT_UPSTREAM_VLAN_POLICER_VAL = 1;
+#else
+ TCSUPPORT_UPSTREAM_VLAN_POLICER_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_GPON_DOWNSTREAM_MAPPING
+ TCSUPPORT_GPON_DOWNSTREAM_MAPPING_VAL = 1;
+#else
+ TCSUPPORT_GPON_DOWNSTREAM_MAPPING_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_IGMP_SET_GROUP
+ TCSUPPORT_IGMP_SET_GROUP_VAL = 1;
+#else
+ TCSUPPORT_IGMP_SET_GROUP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CD_WIND
+ TCSUPPORT_CD_WIND_VAL = 1;
+#else
+ TCSUPPORT_CD_WIND_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ARC_CWMP
+ TCSUPPORT_ARC_CWMP_VAL = 1;
+#else
+ TCSUPPORT_ARC_CWMP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_BUTTONDETECT
+ TCSUPPORT_CT_BUTTONDETECT_VAL = 1;
+#else
+ TCSUPPORT_CT_BUTTONDETECT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ECN_SIP
+ TCSUPPORT_ECN_SIP_VAL = 1;
+#else
+ TCSUPPORT_ECN_SIP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_ECN_MEGACO
+ TCSUPPORT_ECN_MEGACO_VAL = 1;
+#else
+ TCSUPPORT_ECN_MEGACO_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_VOIP_SIP
+ TCSUPPORT_CT_VOIP_SIP_VAL = 1;
+#else
+ TCSUPPORT_CT_VOIP_SIP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CHS
+ TCSUPPORT_CHS_VAL = 1;
+#else
+ TCSUPPORT_CHS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VOIP
+ TCSUPPORT_VOIP_VAL = 1;
+#else
+ TCSUPPORT_VOIP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_SLIC_ZL88801
+ TCSUPPORT_SLIC_ZL88801_VAL = 1;
+#else
+ TCSUPPORT_SLIC_ZL88801_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_JX
+ TCSUPPORT_CT_PON_JX_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_JX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_STBMAC_REPORT
+ TCSUPPORT_CT_STBMAC_REPORT_VAL = 1;
+#else
+ TCSUPPORT_CT_STBMAC_REPORT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PMINFORM
+ TCSUPPORT_CT_PMINFORM_VAL = 1;
+#else
+ TCSUPPORT_CT_PMINFORM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_HUB
+ TCSUPPORT_CT_PON_HUB_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_HUB_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_SC
+ TCSUPPORT_CT_PON_SC_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_SC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ADSL_HN
+ TCSUPPORT_CT_ADSL_HN_VAL = 1;
+#else
+ TCSUPPORT_CT_ADSL_HN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_C9_HUN
+ TCSUPPORT_CT_PON_C9_HUN_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_C9_HUN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_TR143_WGET_DOWNLOAD
+ TCSUPPORT_CT_TR143_WGET_DOWNLOAD_VAL = 1;
+#else
+ TCSUPPORT_CT_TR143_WGET_DOWNLOAD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TR143_CURL_UPLOAD
+ TCSUPPORT_TR143_CURL_UPLOAD_VAL = 1;
+#else
+ TCSUPPORT_TR143_CURL_UPLOAD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ADSL_TJ
+ TCSUPPORT_CT_ADSL_TJ_VAL = 1;
+#else
+ TCSUPPORT_CT_ADSL_TJ_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ADSL_BIND1
+ TCSUPPORT_CT_ADSL_BIND1_VAL = 1;
+#else
+ TCSUPPORT_CT_ADSL_BIND1_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_NAMECHGNOTIFY
+ TCSUPPORT_CT_NAMECHGNOTIFY_VAL = 1;
+#else
+ TCSUPPORT_CT_NAMECHGNOTIFY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_C5_HEN_SFU
+ TCSUPPORT_CT_C5_HEN_SFU_VAL = 1;
+#else
+ TCSUPPORT_CT_C5_HEN_SFU_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_SFU_SX
+ TCSUPPORT_CT_SFU_SX_VAL = 1;
+#else
+ TCSUPPORT_CT_SFU_SX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_EPON_DUMMY
+ TCSUPPORT_EPON_DUMMY_VAL = 1;
+#else
+ TCSUPPORT_EPON_DUMMY_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_HWNAT_OFFLOAD
+ TCSUPPORT_CT_HWNAT_OFFLOAD_VAL = 1;
+#else
+ TCSUPPORT_CT_HWNAT_OFFLOAD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_DS_HWNAT_OFFLOAD
+ TCSUPPORT_DS_HWNAT_OFFLOAD_VAL = 1;
+#else
+ TCSUPPORT_DS_HWNAT_OFFLOAD_VAL = 0;
+#endif
+
+/* fix bug OSBNB00044996 by johnson.sun on 20150410 */
+#ifdef TCSUPPORT_CT_PON_SK
+ TCSUPPORT_CT_PON_SK_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_SK_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CMCC
+ TCSUPPORT_CMCC_VAL = 1;
+#else
+ TCSUPPORT_CMCC_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CMCCV2
+ TCSUPPORT_CMCCV2_VAL = 1;
+#else
+ TCSUPPORT_CMCCV2_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CMCC_GANSU
+ TCSUPPORT_CMCC_GANSU_VAL = 1;
+#else
+ TCSUPPORT_CMCC_GANSU_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CSC_EEUROPE
+ TCSUPPORT_CSC_EEUROPE_VAL = 1;
+#else
+ TCSUPPORT_CSC_EEUROPE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_ITMS_TMOUT
+ TCSUPPORT_CT_ITMS_TMOUT_VAL = 1;
+#else
+ TCSUPPORT_CT_ITMS_TMOUT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CPU_PERFORMANCE_TEST
+ TCSUPPORT_CPU_PERFORMANCE_TEST_VAL = 1;
+#else
+ TCSUPPORT_CPU_PERFORMANCE_TEST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_IGMP_CHT
+ TCSUPPORT_XPON_IGMP_CHT_VAL = 1;
+#else
+ TCSUPPORT_XPON_IGMP_CHT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WAN_UPSTREAM_REMARK
+ TCSUPPORT_WAN_UPSTREAM_REMARK_VAL = 1;
+#else
+ TCSUPPORT_WAN_UPSTREAM_REMARK_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_HAL_API_MCST
+ TCSUPPORT_XPON_HAL_API_MCST_VAL = 1;
+#else
+ TCSUPPORT_XPON_HAL_API_MCST_VAL = 0;
+#endif
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ TCSUPPORT_CPU_EN75XX_VAL = 1;
+#else
+ TCSUPPORT_CPU_EN75XX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_JOYME
+ TCSUPPORT_CT_JOYME_VAL = 1;
+#else
+ TCSUPPORT_CT_JOYME_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_JOYME_BANDWIDTH
+ TCSUPPORT_CT_JOYME_BANDWIDTH_VAL = 1;
+#else
+ TCSUPPORT_CT_JOYME_BANDWIDTH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FH_JOYMEV2_PON
+ TCSUPPORT_FH_JOYMEV2_PON_VAL = 1;
+#else
+ TCSUPPORT_FH_JOYMEV2_PON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_C9_ROST_LED
+ TCSUPPORT_C9_ROST_LED_VAL = 1;
+#else
+ TCSUPPORT_C9_ROST_LED_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_CWMP_WORK_COMPATIBLE
+ TCSUPPORT_CT_CWMP_WORK_COMPATIBLE_VAL = 1;
+#else
+ TCSUPPORT_CT_CWMP_WORK_COMPATIBLE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_UPGRADE_NO_REBOOT
+ TCSUPPORT_UPGRADE_NO_REBOOT_VAL = 1;
+#else
+ TCSUPPORT_UPGRADE_NO_REBOOT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CN_JS
+ TCSUPPORT_CT_PON_CN_JS_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CN_JS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CUC_PON_SD
+ TCSUPPORT_CUC_PON_SD_VAL = 1;
+#else
+ TCSUPPORT_CUC_PON_SD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PON_ROSTELECOM
+ TCSUPPORT_PON_ROSTELECOM_VAL = 1;
+#else
+ TCSUPPORT_PON_ROSTELECOM_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_LED_UPGRADE
+ TCSUPPORT_XPON_LED_UPGRADE_VAL = 1;
+#else
+ TCSUPPORT_XPON_LED_UPGRADE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FWC_ENV
+ TCSUPPORT_FWC_ENV_VAL = 1;
+#else
+ TCSUPPORT_FWC_ENV_VAL = 0;
+#endif
+
+#if defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_PON_MAC_FILTER) || (defined(TCSUPPORT_GPON_MAPPING) && defined(TCSUPPORT_GPON_DOWNSTREAM_MAPPING))
+ RA_NAT_EXE_VAL = 1;
+#else
+ RA_NAT_EXE_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_DLF_CTRL
+ TCSUPPORT_DLF_CTRL_VAL = 1;
+#else
+ TCSUPPORT_DLF_CTRL_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FWC_QOS
+ TCSUPPORT_FWC_QOS_VAL = 1;
+#else
+ TCSUPPORT_FWC_QOS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FWC_FDB_VLAN
+ TCSUPPORT_FWC_FDB_VLAN_VAL = 1;
+#else
+ TCSUPPORT_FWC_FDB_VLAN_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FWC_VLAN_TAG
+ TCSUPPORT_FWC_VLAN_TAG_VAL = 1;
+#else
+ TCSUPPORT_FWC_VLAN_TAG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FWC_MCST
+ TCSUPPORT_FWC_MCST_VAL = 1;
+#else
+ TCSUPPORT_FWC_MCST_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_SWNAT
+ TCSUPPORT_SWNAT_VAL = 1;
+#else
+ TCSUPPORT_SWNAT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CZ_GD
+ TCSUPPORT_CT_PON_CZ_GD_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CZ_GD_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CT_PON_CZ_NX
+ TCSUPPORT_CT_PON_CZ_NX_VAL = 1;
+#else
+ TCSUPPORT_CT_PON_CZ_NX_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_VNPTT
+ TCSUPPORT_VNPTT_VAL = 1;
+#else
+ TCSUPPORT_VNPTT_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CCA
+ TCSUPPORT_CCA_VAL = 1;
+#else
+ TCSUPPORT_CCA_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TR64_DIGEST_AUTH
+ TCSUPPORT_TR64_DIGEST_AUTH_VAL = 1;
+#else
+ TCSUPPORT_TR64_DIGEST_AUTH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_PORT_ISOLATION
+ TCSUPPORT_PORT_ISOLATION_VAL = 1;
+#else
+ TCSUPPORT_PORT_ISOLATION_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_TLS
+ TCSUPPORT_TLS_VAL = 1;
+#else
+ TCSUPPORT_TLS_VAL = 0;
+#endif
+#ifdef TCSUPPORT_SDN_OVS
+ TCSUPPORT_SDN_OVS_VAL = 1;
+#else
+ TCSUPPORT_SDN_OVS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_FH_SDN_PON
+ TCSUPPORT_FH_SDN_PON_VAL = 1;
+#else
+ TCSUPPORT_FH_SDN_PON_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_MANUAL_ETHERNET_PORTMAP
+ TCSUPPORT_MANUAL_ETHERNET_PORTMAP_VAL = 1;
+#else
+ TCSUPPORT_MANUAL_ETHERNET_PORTMAP_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_CRJO
+ TCSUPPORT_CRJO_VAL = 1;
+#else
+ TCSUPPORT_CRJO_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_XPON_HAL_API_NG
+ TCSUPPORT_XPON_HAL_API_NG_VAL = 1;
+#else
+ TCSUPPORT_XPON_HAL_API_NG_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_WLAN_SW_RPS
+ TCSUPPORT_WLAN_SW_RPS_VAL = 1;
+#else
+ TCSUPPORT_WLAN_SW_RPS_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_IPSEC_PASSTHROUGH
+ TCSUPPORT_IPSEC_PASSTHROUGH_VAL = 1;
+#else
+ TCSUPPORT_IPSEC_PASSTHROUGH_VAL = 0;
+#endif
+
+#ifdef TCSUPPORT_MIPS_1004K
+ TCSUPPORT_MIPS_1004K_VAL = 1;
+#else
+ TCSUPPORT_MIPS_1004K_VAL = 0;
+#endif
+
+ return;
+}
+
+
+
Index: linux-3.18.21/arch/mips/econet/malta-amon.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/malta-amon.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2007, 2011 MIPS Technologies, Inc.
+ * All rights reserved.
+
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Arbitrary Monitor interface
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/addrspace.h>
+#ifdef TCSUPPORT_MIPS_1004K
+#include <cpu/launch.h>
+#else
+#include <asm/mips-boards/launch.h>
+#endif
+#include <asm/mipsmtregs.h>
+
+int amon_cpu_avail(int cpu)
+{
+#ifdef TCSUPPORT_MIPS_1004K
+ struct cpulaunch *launch = (struct cpulaunch *)CPU_LAUNCH_BASE;
+#else
+ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
+#endif
+ if (cpu < 0 || cpu >= NCPULAUNCH) {
+ printk("avail: cpu%d is out of range\n", cpu);
+ return 0;
+ }
+
+ launch += cpu;
+ if (!(launch->flags & LAUNCH_FREADY)) {
+ printk("avail: cpu%d is not ready\n", cpu);
+ return 0;
+ }
+ if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) {
+ printk("avail: too late.. cpu%d is already gone\n", cpu);
+ return 0;
+ }
+
+ return 1;
+}
+
+void amon_cpu_start(int cpu,
+ unsigned long pc, unsigned long sp,
+ unsigned long gp, unsigned long a0)
+{
+ volatile struct cpulaunch *launch =
+ #ifdef TCSUPPORT_MIPS_1004K
+ (struct cpulaunch *)CPU_LAUNCH_BASE;
+ #else
+ (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
+ #endif
+
+ if (!amon_cpu_avail(cpu)) {
+ return;
+ }
+ if (cpu == smp_processor_id()) {
+ printk("launch: I am cpu%d!\n", cpu);
+ return;
+ }
+ launch += cpu;
+
+ printk("launch: starting cpu%d\n", cpu);
+
+ launch->pc = pc;
+ launch->gp = gp;
+ launch->sp = sp;
+ launch->a0 = a0;
+
+ smp_wmb(); /* Target must see parameters before go */
+ launch->flags |= LAUNCH_FGO;
+ smp_wmb(); /* Target must see go before we poll */
+ while ((launch->flags & LAUNCH_FGONE) == 0)
+ ;
+ smp_rmb(); /* Target will be updating flags soon */
+ printk("launch: cpu%d gone!\n", cpu);
+}
+
Index: linux-3.18.21/arch/mips/econet/malta-smp.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/malta-smp.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2010 PMC-Sierra, Inc.
+ *
+ * VSMP support for MSP platforms . Derived from malta vsmp support.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ */
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <asm/traps.h>
+#include <asm/irq_cpu.h>
+#include <asm/irq_regs.h>
+#include <asm/setup.h>
+#include <asm/tc3162/tc3162.h>
+
+#ifdef CONFIG_MIPS_MT_SMP
+
+#define MIPS_CPU_IPI_RESCHED_IRQ SI_SWINT_INT0 /* SW int 0 for resched */
+#define MIPS_CPU_IPI_CALL_IRQ SI_SWINT_INT1 /* SW int 1 for call */
+
+
+static void ipi_resched_dispatch(void)
+{
+ //printk("ipi_resched_dispatch\n");
+ do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ipi_call_dispatch(void)
+{
+ //printk("ipi_call_dispatch\n");
+ do_IRQ(MIPS_CPU_IPI_CALL_IRQ);
+}
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "IPI_call"
+};
+
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+ setup_irq(irq, action);
+ irq_set_handler(irq, handle_percpu_irq);
+}
+
+extern void *set_vi_handler(int n, vi_handler_t addr);
+
+void __init vsmp_int_init(void)
+{
+ //printk("vsmp_int_init\n");
+ set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+ set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+ arch_init_ipiirq(MIPS_CPU_IPI_RESCHED_IRQ, &irq_resched);
+ arch_init_ipiirq(MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+
+ VPint(CR_INTC_IMR) |= (1 << (SI_SWINT1_INT0-1));
+ VPint(CR_INTC_IMR) |= (1 << (SI_SWINT1_INT1-1));
+}
+
+//int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+ bool force)
+{
+ unsigned int irq = d->irq;
+ cpumask_t tmask;
+ int cpu = 0;
+ //void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
+ int irq_vpe0 = 0;
+ int irq_vpe1 = 0;
+ unsigned int offset1, offset2, tmp;
+
+#if 0
+ printk("\r\n\r\n%s affinity origin value is %08x\r\n\r\n", __FUNCTION__, *affinity);
+
+ if(*(unsigned long *)affinity == 0x8)
+ {
+ *(unsigned long *)affinity = 0x2;
+ }
+ else if(*(unsigned long *)affinity == 0xf)
+ {
+ *(unsigned long *)affinity = 0x3;
+ }
+ else
+ {
+ *(unsigned long *)affinity = 0x1;
+ }
+
+ printk("\r\n\r\n%s affinity changed value is %08x\r\n\r\n", __FUNCTION__, *affinity);
+#endif
+
+ /*
+ * On the legacy Malta development board, all I/O interrupts
+ * are routed through the 8259 and combined in a single signal
+ * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
+ * that signal is brought to IP2 of both VPEs. To avoid racing
+ * concurrent interrupt service events, IP2 is enabled only on
+ * one VPE, by convention VPE0. So long as no bits are ever
+ * cleared in the affinity mask, there will never be any
+ * interrupt forwarding. But as soon as a program or operator
+ * sets affinity for one of the related IRQs, we need to make
+ * sure that we don't ever try to forward across the VPE boundry,
+ * at least not until we engineer a system where the interrupt
+ * _ack() or _end() function can somehow know that it corresponds
+ * to an interrupt taken on another VPE, and perform the appropriate
+ * restoration of Status.IM state using MFTR/MTTR instead of the
+ * normal local behavior. We also ensure that no attempt will
+ * be made to forward to an offline "CPU".
+ */
+
+ cpumask_copy(&tmask, affinity);
+ for_each_cpu(cpu, affinity) {
+ printk("%s: cpu %d \r\n", __FUNCTION__, cpu);
+ if (!cpu_online(cpu))
+ {
+ cpu_clear(cpu, tmask);
+ printk("%s: cpu %d cpu_clear\r\n", __FUNCTION__, cpu);
+
+ }
+ else {
+ printk("%s: cpu %d vpe_id %d \r\n", __FUNCTION__, cpu, cpu_data[cpu].vpe_id);
+#if 0
+ if (cpu_data[cpu].vpe_id == 0)
+ irq_vpe0++;
+ else
+ irq_vpe1++;
+#else
+ if(cpu == 0)
+ irq_vpe0++;
+ else
+ irq_vpe1++;
+#endif
+ }
+
+ }
+ //cpumask_copy(irq_desc[irq].affinity_hint, &tmask);
+
+ printk("%s: irq_vpe0 %d irq_vpe1 %d, irq = %d\r\n", __FUNCTION__, irq_vpe0, irq_vpe1, irq);
+
+
+ /* change IRQ binding to VPE0 or VPE1 */
+ offset1= 32 - irq;
+ offset2 = ((irq - 1) % 4) * 8 + 4;
+ offset1 = (offset1 >> 2) << 2;
+ tmp = regRead32((CR_INTC_IVSR0 + offset1));
+ if (irq_vpe0 >= irq_vpe1)
+ tmp &= ~(1<<offset2);
+ else
+ tmp |= (1<<offset2);
+ regWrite32((CR_INTC_IVSR0 + offset1), tmp);
+ if (cpus_empty(tmask))
+ /*
+ * We could restore a default mask here, but the
+ * runtime code can anyway deal with the null set
+ */
+ printk(KERN_WARNING
+ "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
+
+ /* Do any generic SMTC IRQ affinity setup */
+ //smtc_set_irq_affinity(irq, tmask);
+ printk("%s: irq num %d \r\n", __FUNCTION__, irq);
+ return 0;
+}
+
+#endif /* CONFIG_MIPS_MT_SMP */
+
Index: linux-3.18.21/arch/mips/econet/malta-smtc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/malta-smtc.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,191 @@
+/*
+ * Malta Platform-specific hooks for SMP operation
+ */
+#include <linux/irq.h>
+#include <linux/init.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+#include <asm/tc3162/tc3162.h>
+
+/* VPE/SMP Prototype implements platform interfaces directly */
+
+/*
+ * Cause the specified action to be performed on a targeted "CPU"
+ */
+
+static void msmtc_send_ipi_single(int cpu, unsigned int action)
+{
+ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
+ smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
+}
+
+static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ msmtc_send_ipi_single(i, action);
+}
+
+/*
+ * Post-config but pre-boot cleanup entry point
+ */
+static void __cpuinit msmtc_init_secondary(void)
+{
+ void smtc_init_secondary(void);
+ int myvpe;
+
+ /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
+ myvpe = read_c0_tcbind() & TCBIND_CURVPE;
+ if (myvpe != 0) {
+ /* Ideally, this should be done only once per VPE, but... */
+#ifdef CONFIG_MIPS_TC3262
+ clear_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
+ | STATUSF_IP7);
+ set_c0_status(STATUSF_IP0 | STATUSF_IP1);
+#else
+ clear_c0_status(ST0_IM);
+ set_c0_status((0x100 << cp0_compare_irq)
+ | (0x100 << MIPS_CPU_IPI_IRQ));
+ if (cp0_perfcount_irq >= 0)
+ set_c0_status(0x100 << cp0_perfcount_irq);
+#endif
+ }
+
+ smtc_init_secondary();
+}
+
+/*
+ * Platform "CPU" startup hook
+ */
+static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
+{
+ smtc_boot_secondary(cpu, idle);
+}
+
+extern void tc3162_enable_irq(unsigned int irq);
+
+/*
+ * SMP initialization finalization entry point
+ */
+static void __cpuinit msmtc_smp_finish(void)
+{
+ smtc_smp_finish();
+}
+
+/*
+ * Hook for after all CPUs are online
+ */
+
+static void msmtc_cpus_done(void)
+{
+}
+
+/*
+ * Platform SMP pre-initialization
+ *
+ * As noted above, we can assume a single CPU for now
+ * but it may be multithreaded.
+ */
+
+static void __init msmtc_smp_setup(void)
+{
+ /*
+ * we won't get the definitive value until
+ * we've run smtc_prepare_cpus later, but
+ * we would appear to need an upper bound now.
+ */
+ smp_num_siblings = smtc_build_cpu_map(0);
+}
+
+static void __init msmtc_prepare_cpus(unsigned int max_cpus)
+{
+ smtc_prepare_cpus(max_cpus);
+}
+
+struct plat_smp_ops msmtc_smp_ops = {
+ .send_ipi_single = msmtc_send_ipi_single,
+ .send_ipi_mask = msmtc_send_ipi_mask,
+ .init_secondary = msmtc_init_secondary,
+ .smp_finish = msmtc_smp_finish,
+ .cpus_done = msmtc_cpus_done,
+ .boot_secondary = msmtc_boot_secondary,
+ .smp_setup = msmtc_smp_setup,
+ .prepare_cpus = msmtc_prepare_cpus,
+};
+
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * IRQ affinity hook
+ */
+
+int plat_set_irq_affinity(struct irq_data *data, const struct cpumask *affinity, bool force)
+//int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+{
+ unsigned int irq = data->irq;
+ cpumask_t tmask;
+ int cpu = 0;
+ void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
+ int irq_vpe0 = 0;
+ int irq_vpe1 = 0;
+ unsigned int offset1, offset2, tmp;
+
+ /*
+ * On the legacy Malta development board, all I/O interrupts
+ * are routed through the 8259 and combined in a single signal
+ * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
+ * that signal is brought to IP2 of both VPEs. To avoid racing
+ * concurrent interrupt service events, IP2 is enabled only on
+ * one VPE, by convention VPE0. So long as no bits are ever
+ * cleared in the affinity mask, there will never be any
+ * interrupt forwarding. But as soon as a program or operator
+ * sets affinity for one of the related IRQs, we need to make
+ * sure that we don't ever try to forward across the VPE boundry,
+ * at least not until we engineer a system where the interrupt
+ * _ack() or _end() function can somehow know that it corresponds
+ * to an interrupt taken on another VPE, and perform the appropriate
+ * restoration of Status.IM state using MFTR/MTTR instead of the
+ * normal local behavior. We also ensure that no attempt will
+ * be made to forward to an offline "CPU".
+ */
+ cpumask_copy(&tmask, affinity);
+ for_each_cpu(cpu, affinity) {
+ if (!cpu_online(cpu))
+ cpu_clear(cpu, tmask);
+ else {
+ if (cpu_data[cpu].vpe_id == 0)
+ irq_vpe0++;
+ else
+ irq_vpe1++;
+ }
+
+ }
+ //printk(KERN_INFO "plat_set_irq_affinity 150, irq = %d\n", irq);
+ // 2.6.36
+ //cpumask_copy(irq_desc[irq].affinity, &tmask);
+ /* change IRQ binding to VPE0 or VPE1 */
+ offset1= 32 - irq;
+ offset2 = ((irq - 1) % 4) * 8 + 4;
+ offset1 = (offset1 >> 2) << 2;
+ tmp = regRead32((CR_INTC_IVSR0 + offset1));
+ if (irq_vpe0 >= irq_vpe1)
+ tmp &= ~(1<<offset2);
+ else
+ tmp |= (1<<offset2);
+ regWrite32((CR_INTC_IVSR0 + offset1), tmp);
+ if (cpus_empty(tmask))
+ /*
+ * We could restore a default mask here, but the
+ * runtime code can anyway deal with the null set
+ */
+ printk(KERN_WARNING
+ "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
+
+ /* Do any generic SMTC IRQ affinity setup */
+ smtc_set_irq_affinity(irq, tmask);
+ return 0;
+}
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
Index: linux-3.18.21/arch/mips/econet/prom.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/prom.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,561 @@
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/blkdev.h>
+
+#include <asm/mipsmtregs.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/time.h>
+#include <asm/tc3162/tc3162.h>
+#include <asm/traps.h>
+
+#ifdef TCSUPPORT_INIC_CLIENT
+#include <linux/mtd/fttdp_inic.h>
+#endif
+#if defined (CONFIG_MIPS_CMP)
+#include <asm/mips-cm.h>
+#include <asm/smp-ops.h>
+#endif
+extern int __imem, __dmem;
+
+#ifdef CONFIG_RCU_STALL_COMMON
+extern int rcu_cpu_stall_suppress __read_mostly;
+#endif
+
+/* frankliao added 20101215 */
+unsigned long flash_base;
+EXPORT_SYMBOL(flash_base);
+unsigned int (*ranand_read_byte)(unsigned long long) = NULL;
+EXPORT_SYMBOL(ranand_read_byte);
+unsigned int (*ranand_read_dword)(unsigned long long) = NULL;
+EXPORT_SYMBOL(ranand_read_dword);
+
+#ifdef CONFIG_MIPS_TC3262
+unsigned char io_swap_noneed=0;
+EXPORT_SYMBOL(io_swap_noneed);
+#endif
+
+static void tc3162_component_setup(void)
+{
+ unsigned int controlReg;
+ unsigned long flags;
+
+ /* setup bus timeout value */
+ VPint(CR_AHB_AACS) = 0xffff;
+
+ /* reset hwnat */
+ if (isRT65168) {
+ /* table reset */
+ VPint(0xbfbe0024) = 0x0;
+ VPint(0xbfbe0024) = 0xffff;
+
+ /* hwnat swreset */
+ VPint(0xbfbe0000) = (1<<1);
+ }
+
+#ifdef CONFIG_CPU_TC3162
+#ifdef CONFIG_TC3162_IMEM
+ /* setup imem start address */
+ VPint(CR_IMEM) = CPHYSADDR(&__imem);
+
+ /* clear internal imem */
+ local_irq_save(flags);
+ controlReg = read_c0_cctl();
+ write_c0_cctl(controlReg & ~CCTL_IMEMOFF);
+ write_c0_cctl(controlReg | CCTL_IMEMOFF);
+ write_c0_cctl(controlReg);
+ local_irq_restore(flags);
+
+ /* refill internal imem */
+ local_irq_save(flags);
+ controlReg = read_c0_cctl();
+ write_c0_cctl(controlReg & ~CCTL_IMEMFILL4);
+ write_c0_cctl(controlReg | CCTL_IMEMFILL4);
+ write_c0_cctl(controlReg);
+ local_irq_restore(flags);
+
+ printk("Enable IMEM addr=%x\n", CPHYSADDR(&__imem));
+#endif
+
+#ifdef CONFIG_TC3162_DMEM
+ /* setup dmem start address */
+ VPint(CR_DMEM) = CPHYSADDR(&__dmem);
+
+ memcpy((void *) 0xa0001000, (void *) KSEG1ADDR(&__dmem), 0x800);
+
+ /* clear internal dmem */
+ local_irq_save(flags);
+ controlReg = read_c0_cctl();
+ write_c0_cctl(controlReg & ~CCTL_DMEMOFF);
+ write_c0_cctl(controlReg | CCTL_DMEMOFF);
+ write_c0_cctl(controlReg);
+ local_irq_restore(flags);
+
+ /* internal dmem on */
+ local_irq_save(flags);
+ controlReg = read_c0_cctl();
+ write_c0_cctl(controlReg & ~CCTL_DMEMON);
+ write_c0_cctl(controlReg | CCTL_DMEMON);
+ write_c0_cctl(controlReg);
+ local_irq_restore(flags);
+
+ printk("Enable DMEM addr=%x\n", CPHYSADDR(&__dmem));
+
+ memcpy((void *) KSEG1ADDR(&__dmem), (void *) 0xa0001000, 0x800);
+#endif
+#endif
+}
+
+/* frankliao added 20101215 */
+void flash_init(void)
+{
+
+ if ((IS_NANDFLASH) && (isRT63165 || isRT63365 || isMT751020 || isEN751221 || isEN751627 || isEN7580)) {
+ flash_base = 0x0;
+ } else {
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627 || isEN7580){
+ #ifdef TCSUPPORT_INIC_CLIENT
+ /* To use last INIC_CLIENT_RAM_SIMU_OFFSET size of RAM as flash */
+ flash_base = 0xA0000000 + (0x800000 * (1 << (((VPint(0xbfb0008c) >> 13) & 0x7) - 1))- INIC_CLIENT_RAM_SIMU_MAX_SIZE);
+ #else
+ flash_base = 0xbc000000;
+ #endif
+ } else if (isTC3162U || isRT63260 || isRT65168 || isTC3182 || isRT63165 || isRT63365) {
+ flash_base = 0xb0000000;
+ } else {
+ flash_base = 0xbfc00000;
+ }
+ printk("%s: flash_base:%x \n",__func__,flash_base);
+ }
+}
+
+const char *get_system_type(void)
+{
+#ifdef CONFIG_MIPS_TC3262
+ if( isEN751627){
+ io_swap_noneed = 1;
+ return "EcoNet EN751627 SOC";
+ }else if(isEN7580){
+ io_swap_noneed = 1;
+ return "EcoNet EN7580 SOC";
+ }else if(isEN751221){
+ io_swap_noneed = 1;
+ return "EcoNet EN751221 SOC";
+ }else if (isTC3182)
+ return "TrendChip TC3182 SOC";
+ else if (isRT65168)
+ return "Ralink RT65168 SOC";
+ else if (isRT63165){
+ io_swap_noneed = 1;
+ return "Ralink RT63165 SOC";
+ } else if (isRT63365) {
+ io_swap_noneed = 1;
+#ifdef TCSUPPORT_DYING_GASP
+ if(!isRT63368){
+ //gpio 4 is share pin for rt63365.
+ VPint(0xbfb00860) &= ~(1<<13);//disable port 4 led when use rt63365.
+ }
+#endif
+ return "Ralink RT63365 SOC";
+ }else if (isMT751020){
+ io_swap_noneed = 1;
+ return "Ralink MT751020 SOC";
+ }else if (isMT7505){
+ io_swap_noneed = 1;
+ return "Ralink MT7505 SOC";
+ }
+ else
+ return "TrendChip TC3169 SOC";
+#else
+ if (isRT63260)
+ return "Ralink RT63260 SOC";
+ else if (isTC3162U)
+ return "TrendChip TC3162U SOC";
+ else if (isTC3162L5P5)
+ return "TrendChip TC3162L5/P5 SOC";
+ else if (isTC3162L4P4)
+ return "TrendChip TC3162L4/P4 SOC";
+ else if (isTC3162L3P3)
+ return "TrendChip TC3162L2F/P2F";
+ else if (isTC3162L2P2)
+ return "TrendChip TC3162L2/P2";
+ else
+ return "TrendChip TC3162";
+#endif
+}
+
+extern struct plat_smp_ops msmtc_smp_ops;
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+
+
+void __init mips_nmi_setup (void)
+{
+ void *base;
+ extern char except_vec_nmi;
+ #if 0
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa80) :
+ (void *)(CAC_BASE + 0x380);
+ #endif
+
+ base = cpu_has_veic ?
+ (void *)(ebase + 0x200 + VECTORSPACING*64) :
+ (void *)(ebase + 0x380);
+
+ printk("nmi base is %x\n",base);
+
+ //Fill the NMI_Handler address in a register, which is a R/W register
+ //start.S will read it, then jump to NMI_Handler address
+ VPint(0xbfb00244) = base;
+
+ memcpy(base, &except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+void cpu_dma_round_robin(uint8 mode)
+{
+ uint32 reg_value=0;
+ reg_value = VPint(ARB_CFG);
+ if(mode == ENABLE){
+ reg_value |= ROUND_ROBIN_ENABLE;
+ } else {
+ reg_value &= ROUND_ROBIN_DISBALE;
+ }
+ VPint(ARB_CFG) = reg_value;
+}
+
+
+void __init prom_init(void)
+{
+ unsigned long memsize;
+ unsigned char samt;
+ unsigned long col;
+ unsigned long row;
+ unsigned int qdma_res = 0;
+
+#ifdef CONFIG_RCU_STALL_COMMON
+ rcu_cpu_stall_suppress =1;
+#endif
+
+
+
+
+ /* frankliao added 20101222 */
+ flash_init();
+
+#ifdef CONFIG_MIPS_TC3262
+
+#ifdef TCSUPPORT_IS_FH_PON
+#if !defined(TCSUPPORT_FH_JOYMEV2_PON)
+ strcat(arcs_cmdline, "rootfstype=jffs2 ro init=/etc/preinit.sh");
+#endif
+#endif
+
+ if (isRT63165 || isRT63365 || isMT751020 || isMT7505 || isEN751221 ) {
+ /* enable external sync */
+ strcat(arcs_cmdline, " es=1");
+
+#ifndef CONFIG_SMP
+ /* enable external sync */
+ {
+ /* when kernel is UP, set ES=1. Otherwise, set in mips_mt_set_cpuoptions */
+ unsigned int oconfig7 = read_c0_config7();
+ unsigned int nconfig7 = oconfig7;
+
+ nconfig7 |= (1 << 8);
+
+ __asm__ __volatile("sync");
+ write_c0_config7(nconfig7);
+ ehb();
+ printk("Config7: 0x%08x\n", read_c0_config7());
+ }
+#endif
+ }
+
+ if(isMT751020){
+ memsize = 0x800000 * (1 << (((VPint(0xbfb0008c) >> 13) & 0x7) - 1));
+ if(memsize >= (448<<20)){
+ memsize = (448<<20);
+ }
+ printk("memsize:%dMB\n", (memsize>>20));
+ }
+ else if(isMT7505){
+ if(isFPGA)
+ memsize = 0x800000 * (1 << (((VPint(0xbfb0008c) >> 13) & 0x7) - 1));
+ else{
+ if(VPint(CR_AHB_HWCONF) & (1<<10))
+ {
+ /* DDR1 */
+ memsize = 0x4000000 / (1 << ((VPint(CR_AHB_HWCONF) >> 11) & 0x3));
+ }
+ else{
+ /* DDR2 */
+ if(!((VPint(CR_AHB_HWCONF) >> 11) & 0x3))
+ memsize = 256 * 0x100000;
+ if(((VPint(CR_AHB_HWCONF) >> 11) & 0x3) == 0x1)
+ memsize = 32 * 0x100000;
+ if(((VPint(CR_AHB_HWCONF) >> 11) & 0x3) == 0x2)
+ memsize = 128 * 0x100000;
+ else
+ memsize = 64 * 0x100000;
+ }
+ }
+ printk("memsize:%dMB\n", (memsize>>20));
+
+ }else if(isEN751221 || isEN751627 || isEN7580){
+ memsize = GET_DRAM_SIZE;
+ if(memsize>512)
+ memsize = 512;
+#if (TCSUPPORT_QDMA_WAN_DSCP_NUM == 8)
+ qdma_res = 16;
+#else
+ qdma_res = 8;
+#endif
+ if(memsize == 512){
+ memsize = 448-qdma_res;
+ }
+#if !defined(TCSUPPORT_SLM_EN) ||(TCSUPPORT_QDMA_WAN_DSCP_NUM == 8)
+ else{
+ memsize = memsize-qdma_res;
+ }
+#endif
+ memsize = memsize << 20;
+ printk("memsize:%dMB\n", (memsize>>20));
+ }else if (isRT63165 || isRT63365) {
+ /* DDR */
+ if (VPint(CR_AHB_HWCONF) & (1<<25)) {
+ memsize = 0x800000 * (1 << (((VPint(CR_DMC_DDR_CFG1) >> 18) & 0x7) - 1));
+
+ /* SDRAM */
+ } else {
+ unsigned long sdram_cfg1;
+
+ /* calculate SDRAM size */
+ sdram_cfg1 = VPint(0xbfb20004);
+ row = 11 + ((sdram_cfg1>>16) & 0x3);
+ col = 8 + ((sdram_cfg1>>20) & 0x3);
+ /* 4 bands and 16 bit width */
+ memsize = (1 << row) * (1 << col) * 4 * 2;
+ }
+ } else {
+ memsize = 0x800000 * (1 << (((VPint(CR_DMC_CTL1) >> 18) & 0x7) - 1));
+ }
+#else
+ /* calculate SDRAM size */
+ samt = VPchar(CR_DMC_SAMT);
+ row = 8 + (samt & 0x3);
+ col = 11 + ((samt>>2) & 0x3);
+ /* 4 bands and 16 bit width */
+ memsize = (1 << row) * (1 << col) * 4 * 2;
+#endif
+
+ printk("%s prom init\n", get_system_type());
+
+ tc3162_component_setup();
+
+ #ifdef TCSUPPORT_INIC_CLIENT
+ add_memory_region(0 + 0x20000, memsize - 0x20000 - INIC_CLIENT_RAM_SIMU_MAX_SIZE, BOOT_MEM_RAM);
+ #else
+ add_memory_region(0 + 0x20000, memsize - 0x20000, BOOT_MEM_RAM);
+ #endif
+ if (isMT751020 || isMT7505 || isEN751221 || isEN751627 || isEN7580) {
+ board_nmi_handler_setup = mips_nmi_setup;
+ }
+
+ //mips_machgroup = MACH_GROUP_TRENDCHIP;
+ //mips_machtype = MACH_TRENDCHIP_TC3162;
+ /*set CPU DMA RR */
+ if (isMT751020 || isEN751221 || isEN751627 || isEN7580) {
+ cpu_dma_round_robin(ENABLE);
+ }
+#if defined (CONFIG_MIPS_CMP)
+ /* Early detection of CMP support */
+ mips_cm_probe();
+
+ if (register_cmp_smp_ops())
+ printk("\n\nError: register_cmp_smp_ops failed due to CM being absent !!!\n\n");
+
+#else
+#ifdef CONFIG_MIPS_MT_SMP
+ extern struct plat_smp_ops vsmp_smp_ops;
+ register_smp_ops(&vsmp_smp_ops);
+#endif
+#endif
+#ifdef CONFIG_MIPS_MT_SMTC
+ register_smp_ops(&msmtc_smp_ops);
+#endif
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* We do not have any memory to free */
+}
+
+int prom_putchar(char data)
+{
+ while (!(LSR_INDICATOR & LSR_THRE))
+ ;
+ VPchar(CR_UART_THR) = data;
+ return 1;
+}
+EXPORT_SYMBOL(prom_putchar);
+
+char prom_getchar(void)
+{
+ while (!(LSR_INDICATOR & LSR_RECEIVED_DATA_READY))
+ ;
+ return VPchar(CR_UART_RBR);
+}
+
+static char ppbuf[1024];
+
+void
+prom_write(const char *buf, unsigned int n)
+{
+ char ch;
+
+ while (n != 0) {
+ --n;
+ if ((ch = *buf++) == '\n')
+ prom_putchar('\r');
+ prom_putchar(ch);
+ }
+}
+EXPORT_SYMBOL(prom_write);
+
+void
+prom_printf(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args);
+ va_end(args);
+
+ prom_write(ppbuf, i);
+}
+EXPORT_SYMBOL(prom_printf);
+
+#ifdef CONFIG_KGDB
+static unsigned long uclk_65000[13]={
+ 357500, // uclk 5.5 Baud Rate 115200
+ 175500, // uclk 2.7 Baud Rate 57600
+ 119808, // uclk 1.8432 Baud Rate 38400
+ 89856, // uclk 1.3824 Baud Rate 28800
+ 59904, // uclk 0.9216 Baud Rate 19200
+ 44928, // uclk 0.6912 Baud Rate 14400
+ 29952, // uclk 0.4608 Baud Rate 9600
+ 14976, // uclk 0.2304 Baud Rate 4800
+ 7488, // uclk 0.1152 Baud Rate 2400
+ 3744, // uclk 0.0576 Baud Rate 1200
+ 1872, // uclk 0.0288 Baud Rate 600
+ 936, // uclk 0.0144 Baud Rate 300
+ 343 // uclk 0.00528 Baud Rate 110
+};
+
+static void hsuartInit(void)
+{
+ unsigned long div_x,div_y;
+ unsigned long word;
+ unsigned long tmp;
+
+ tmp = VPint(CR_GPIO_CTRL);
+ tmp &= ~0x0fa30000;
+ tmp |= 0x0fa30000;
+ VPint(CR_GPIO_CTRL) = tmp; // set GPIO pin 13 & pin 12 are alternative outputs, GPIO pin 11 & pin 10 are alternative inputs
+ tmp = VPint(CR_GPIO_ODRAIN);
+ tmp &= ~0x00003000;
+ tmp |= 0x00003000;
+ VPint (CR_GPIO_ODRAIN) = tmp; // set GPIO output enable
+
+// Set FIFO controo enable, reset RFIFO, TFIFO, 16550 mode, watermark=0x00 (1 byte)
+ VPchar(CR_HSUART_FCR) = UART_FCR|UART_WATERMARK;
+
+// Set modem control to 0
+ VPchar(CR_HSUART_MCR) = UART_MCR;
+
+// Disable IRDA, Disable Power Saving Mode, RTS , CTS flow control
+ VPchar(CR_HSUART_MISCC) = UART_MISCC;
+
+ /* access the bardrate divider */
+ VPchar(CR_HSUART_LCR) = UART_BRD_ACCESS;
+
+ div_y = UART_XYD_Y;
+ div_x = (unsigned int)(uclk_65000[0]/SYS_HCLK)*2;
+ word = (div_x<<16)|div_y;
+ VPint(CR_HSUART_XYD) = word;
+
+/* Set Baud Rate Divisor to 3*16 */
+ VPchar(CR_HSUART_BRDL) = UART_BRDL;
+ VPchar(CR_HSUART_BRDH) = UART_BRDH;
+
+/* Set DLAB = 0, clength = 8, stop =1, no parity check */
+ VPchar(CR_HSUART_LCR) = UART_LCR;
+
+// Set interrupt Enable to, enable Tx, Rx and Line status
+ VPchar(CR_HSUART_IER) = UART_IER;
+}
+
+static int hsuartInitialized = 0;
+
+int putDebugChar(char c)
+{
+ if (!hsuartInitialized) {
+ hsuartInit();
+ hsuartInitialized = 1;
+ }
+
+ while (!(VPchar(CR_HSUART_LSR) & LSR_THRE))
+ ;
+ VPchar(CR_HSUART_THR) = c;
+
+ return 1;
+}
+
+char getDebugChar(void)
+{
+ if (!hsuartInitialized) {
+ hsuartInit();
+ hsuartInitialized = 1;
+ }
+
+ while (!(VPchar(CR_HSUART_LSR) & LSR_RECEIVED_DATA_READY))
+ ;
+ return VPchar(CR_HSUART_RBR);
+}
+#endif
+#if defined(TCSUPPORT_DYING_GASP) && (defined(CONFIG_MIPS_RT65168) || defined(CONFIG_MIPS_RT63365))
+__IMEM
+void dying_gasp_setup_mem_cpu(void){
+#ifdef CONFIG_MIPS_RT65168
+ VPint(0xbfb20000) |= (1<<12); //set ddr to self refresh mode.
+ VPint(0xbfb000c0) &= ~((1<<5)|(1<<6)|(1<<7));//CPU divide to 32 and ram divide to 3
+ VPint(0xbfb000c0) |= (1<<3)|(1<<4)|(1<<5)|(1<<7);
+#endif
+#ifdef CONFIG_MIPS_RT63365
+#if defined(TCSUPPORT_CPU_MT7510)|| defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ VPint(0xbfb00044) = 1; //Enable DDR Self Refresh Mode
+ VPint(0xbfb20004) &= ~(1<<15);
+ VPint(0xbfb200e4) &= ~(1<<2);
+ VPint(0xbfb00074) |= (1<<4);
+ VPint(0xbfb00040) |= (1<<0); // reset ddr device
+#else
+ VPint(0xbfb00040) |= (1<<0); // reset ddr device
+ //do not kill CPU because we need do watchdog interrupt
+ //kill CPU
+ //VPint(0xbfb001c8) |= (1<<24); // bypass pll 2 700M
+ //VPint(0xbfb001cc) |= (1<<24); // bypass pll 2 665M
+ //VPint(0xbfb001d0) |= (1<<24); // bypass pll 2 500
+#endif
+#endif
+ if (cpu_wait)
+ (*cpu_wait)();
+}
+EXPORT_SYMBOL(dying_gasp_setup_mem_cpu);
+#endif
+
+
Index: linux-3.18.21/arch/mips/econet/setup.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/setup.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,306 @@
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <asm/mips-boards/prom.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/tc3162/tc3162.h>
+#include <asm/tc3162/TCIfSetQuery_os.h>
+
+#include <ecnt_hook/ecnt_hook_pon_phy.h>
+
+#include <linux/rtnetlink.h>
+#include <linux/netdevice.h>
+
+extern void tc3162_time_init(void);
+static void tc3162_machine_restart(char *command);
+static void tc3162_machine_halt(void);
+static void tc3162_machine_power_off(void);
+
+void (*back_to_prom)(void) = (void (*)(void))0xbfc00000;
+
+extern void timerSet(uint32 timer_no, uint32 timerTime, uint32 enable, uint32 mode, uint32 halt);
+extern void timer_WatchDogConfigure(uint8 tick_enable, uint8 watchdog_enable);
+
+#ifdef CONFIG_TC3162_ADSL
+adsldev_ops *adsl_dev_ops = NULL;
+EXPORT_SYMBOL(adsl_dev_ops);
+#ifdef TCSUPPORT_BONDING
+adsldev_ops *adsl_dev_ops_slave = NULL;
+EXPORT_SYMBOL(adsl_dev_ops_slave);
+#endif
+#if defined(CONFIG_RALINK_VDSL)
+
+vdsldev_ops *vdsl_dev_ops = NULL;
+EXPORT_SYMBOL(vdsl_dev_ops);
+#ifdef TCSUPPORT_BONDING
+vdsldev_ops *vdsl_dev_ops_slave = NULL;
+EXPORT_SYMBOL(vdsl_dev_ops_slave);
+#endif
+#endif
+#endif
+
+static void hw_reset(void)
+{
+#ifdef CONFIG_TC3162_ADSL
+ /* stop adsl */
+ if (adsl_dev_ops)
+ adsl_dev_ops->set(ADSL_SET_DMT_CLOSE, NULL, NULL);
+#endif
+
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_POWERSAVE_ENABLE)
+ if(isRT63365){
+ VPint(CR_AHB_CLK) |= 0x57e1;//restore ahb clk to default value
+ }
+
+#endif
+ /* stop each module dma task */
+ disable_all_interrupts();
+
+ VPint(CR_TIMER_CTL) = 0x0;
+
+ /* stop mac dma */
+#ifndef CONFIG_MIPS_TC3262
+ VPint(CR_MAC_MACCR) = 0;
+#endif
+
+ if (isRT63165)
+ goto watchdog_reset;
+
+ /* stop atm sar dma */
+ TSARM_GFR &= ~((1 << 1) | (1 << 0));
+
+ /* reset USB */
+ /* reset USB DMA */
+ VPint(CR_USB_SYS_CTRL_REG) |= (1 << 31);
+ /* reset USB SIE */
+ VPint(CR_USB_DEV_CTRL_REG) |= (1 << 30);
+ mdelay(5);
+
+ /* restore USB SIE */
+ VPint(CR_USB_DEV_CTRL_REG) &= ~(1 << 30);
+ mdelay(5);
+ VPint(CR_USB_SYS_CTRL_REG) &= ~(1 << 31);
+
+#ifdef CONFIG_MIPS_TC3162U
+ /*stop pcie*/
+ VPint(CR_AHB_PCIC) &= 0x9fffffff;
+ /*reset usb 2.0 device*/
+ /*stop interrupt*/
+ VPint(CR_USB20_INTR_ENABLE_REG) = 0x0;
+ /*do usb reset*/
+ VPint(CR_USB20_SYS_CTRL_REG) |= (1 << 31);
+ mdelay(1);
+ VPint(CR_USB20_SYS_CTRL_REG) &= ~(1 << 31);
+ /*sw disconnect*/
+ VPint(CR_USB20_DEV_CTRL_REG) |= (1 << 31);
+#endif
+
+watchdog_reset:
+ /* watchdog reset */
+//#ifdef CONFIG_MIPS_TC3262
+#if defined(TCSUPPORT_WLAN_MT7592_PCIE) && defined(TCSUPPORT_CPU_MT7520)
+ printk("0xbfb00834=0xeff88ce0 \n");
+ printk("0xbfb00380 reset phy \n");
+ disable_all_interrupts();
+
+ VPint(CR_TIMER_CTL) = 0x0;
+#endif
+ timerSet(5, 10 * TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+ timer_WatchDogConfigure(ENABLE, ENABLE);
+#if defined(TCSUPPORT_WLAN_MT7592_PCIE) && defined(TCSUPPORT_CPU_MT7520)
+ VPint(0xbfbd0040) = 0x0;
+ mdelay(10);
+ VPint(0xbfb00834) = 0xeff88ce0;
+ VPint(0xbfb00380) &= ~(1<<25);
+ udelay(1) ;
+ VPint(0xbfb00380) |= (1<<25);
+#endif
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_MT7510)
+ VPint(CR_DRAMC_CONF) &= ~(0x1<<2);
+#endif
+
+ while (1);
+//#endif
+}
+
+#define RAND_MAX 32767
+/*_____________________________________________________________________________
+** function name: reverseLong
+** descriptions:
+** Reverse the bytes ordering of the input value.
+**
+** parameters:
+** ul: Specify the 4 bytes value that you want to reverse the ordering.
+**
+** global:
+** None
+**
+** return:
+** Reverse value.
+**
+** call:
+** None
+**
+** revision:
+** 1. Here 2008/08/24
+**____________________________________________________________________________
+*/
+static uint32
+reverseLong(
+ uint32 ul
+)
+{
+ uint8 z[4];
+
+ z[3] = *((uint8 *)&ul + 0);
+ z[2] = *((uint8 *)&ul + 1);
+ z[1] = *((uint8 *)&ul + 2);
+ z[0] = *((uint8 *)&ul + 3);
+
+ return *((uint32 *)z);
+}/*end reverseLong*/
+
+/*_____________________________________________________________________________
+** function name: scramble
+** descriptions:
+** Scramble the input 32bits value.
+**
+** parameters:
+** None
+**
+** global:
+** None
+**
+** return:
+** After Scramble the value
+**
+** call:
+** reverseLong
+**
+** revision:
+** 1. Here 2008/08/24
+**____________________________________________________________________________
+*/
+static uint32
+scramble(uint32 checkCode)
+{
+ uint32 a[6];
+
+ a[1] = (checkCode & 0x0000001F) << 0x0C;
+ a[2] = (checkCode & 0x03E003E0) << 0x01;
+ a[3] = (checkCode & 0xF8000400) >> 0x0A;
+ a[4] = (checkCode & 0x0000F800) << 0x10;
+ a[5] = (checkCode & 0x041F0000) >> 0x0F;
+ checkCode = a[1] + a[2] + a[3] + a[4] + a[5];
+
+ /* ICQ's check code is little-endian. Change the endian style */
+ checkCode = reverseLong(checkCode);
+
+ return checkCode;
+}/*end scramble*/
+
+/*_____________________________________________________________________________
+** function name: rand
+** descriptions:
+** Random value generation.
+**
+** parameters:
+** None
+**
+** global:
+** None
+**
+** return:
+** Random value
+**
+** call:
+** timerVlrGet
+**
+** revision:
+** 1. Here 2008/08/24
+**____________________________________________________________________________
+*/
+uint32
+rand(void){
+ uint32 vlr;
+ timerVlrGet(1, vlr);
+ scramble(vlr);
+ return (vlr & RAND_MAX);
+}/*end rand*/
+EXPORT_SYMBOL(rand);
+
+
+
+#if defined(TCSUPPORT_WAN_GPON) || defined(TCSUPPORT_WAN_EPON)
+/* disabel xpon phy TX power to prevent abnomal optical Tx while soft-reboot */
+void xpon_phy_tx_disable(void)
+{
+ xpon_phy_api_data_t api_data;
+ int arg = PHY_DISABLE;
+
+ api_data.api_type = XPON_PHY_API_TYPE_SET ;
+ api_data.cmd_id = PON_SET_PHY_TRANS_POWER_SWITCH ;
+ api_data.data = & arg ;
+ printk("Disable xpon phy Tx power\n");
+ __ECNT_HOOK(ECNT_XPON_PHY, ECNT_XPON_PHY_API, (struct ecnt_data * )(&api_data) ) ;
+}
+#endif
+
+static void tc3162_machine_restart(char *command)
+{
+ printk("Machine restart ... \n");
+
+#if defined(TCSUPPORT_WAN_GPON) || defined(TCSUPPORT_WAN_EPON)
+ xpon_phy_tx_disable();
+#endif
+ hw_reset();
+ back_to_prom();
+}
+
+static void tc3162_machine_halt(void)
+{
+ printk("Machine halted ... \n");
+ hw_reset();
+ while (1);
+}
+
+
+void (*board_time_init)(void);
+static void tc3162_machine_power_off(void)
+{
+ printk("Machine poweroff ... \n");
+ hw_reset();
+ while (1);
+}
+
+static int panic_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ tc3162_machine_restart(NULL);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+};
+
+void __init plat_mem_setup(void)
+{
+ _machine_restart = tc3162_machine_restart;
+ _machine_halt = tc3162_machine_halt;
+ pm_power_off = tc3162_machine_power_off;
+
+ /*below merge from 2.6.22.15*/
+ board_time_init = tc3162_time_init;
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+}
+
+int irq_set_affinity_ex(unsigned int irq, const struct cpumask *cpumask)
+{
+ return irq_set_affinity(irq,cpumask);
+}
+EXPORT_SYMBOL(irq_set_affinity_ex);
Index: linux-3.18.21/arch/mips/econet/tcwdog.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/tcwdog.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,438 @@
+/*
+** $Id: tcwdog.c,v 1.2 2010/12/09 13:18:35 xmdai_nj Exp $
+*/
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+/*
+** $Log: tcwdog.c,v $
+** Revision 1.5 2011/09/23 02:04:50 shnwind
+** Add rt63365 support
+**
+** Revision 1.4 2011/07/07 07:55:51 shnwind
+** RT63260 & RT63260 auto_bench support
+**
+** Revision 1.3 2011/06/03 02:04:23 lino
+** add RT65168 support
+**
+** Revision 1.2 2010/12/09 13:18:35 xmdai_nj
+** #7955:When doing upgrade firmware in web page, it can not reboot.
+**
+** Revision 1.1.1.1 2010/04/09 09:39:13 feiyan
+** New TC Linux Make Flow Trunk
+**
+** Revision 1.1.1.1 2009/12/17 01:43:39 josephxu
+** 20091217, from Hinchu ,with VoIP
+**
+** Revision 1.1.1.1 2007/04/12 09:42:02 ian
+** TCLinuxTurnkey2007
+**
+** Revision 1.2 2006/07/06 07:24:23 lino
+** update copyright year
+**
+** Revision 1.1.1.1 2005/11/02 05:45:19 lino
+** no message
+**
+*/
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+//#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <asm/tc3162/tc3162.h>
+
+#include <asm/tc3162/TCIfSetQuery_os.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+static int nowayout = 1;
+#else
+static int nowayout = 0;
+#endif
+
+#if 0
+MODULE_PARM(nowayout,"i");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+#endif
+extern unsigned int cpu_timer_loss[4];
+
+static int watchdog_enabled = 0;
+
+extern void timer_Configure(uint8 timer_no, uint8 timer_enable, uint8 timer_mode, uint8 timer_halt);
+extern void timerSet(uint32 timer_no, uint32 timerTime, uint32 enable, uint32 mode, uint32 halt);
+
+extern void timer_WatchDogConfigure(uint8 tick_enable, uint8 watchdog_enable);
+
+static spinlock_t protect_lock;
+#define PPE_DFP_CPORT 0xbfb50e48
+#define PPE_TB_CFG 0xbfb50e1c
+
+void
+attack_protect_set(int active, int mode){
+ unsigned int value=0;
+ unsigned int val_1=0;
+ unsigned int flags=0;
+
+ spin_lock_irqsave(&protect_lock, flags);
+
+ /*active : o , off, 1 on. mode: 1 wan protect, 2, lan protect, 0, not set*/
+ if(active == 1){
+ if(mode == 1){
+ /*disable keep alive*/
+ value = regRead32(PPE_TB_CFG);
+ value &= ~(0x3000);
+ regWrite32(PPE_TB_CFG, value);
+ /*default port 2 to drop*/
+ value = regRead32(PPE_DFP_CPORT);
+ value &= ~(0x700);
+ value |= 0x700;
+ regWrite32(PPE_DFP_CPORT, value);
+ }
+ else if(mode == 2){
+ /*disable keep alive*/
+ value = regRead32(PPE_TB_CFG);
+ value &= ~(0x3000);
+ regWrite32(PPE_TB_CFG, value);
+
+ /*default port 1 to drop*/
+ value = regRead32(PPE_DFP_CPORT);
+ value &= ~(0x070);
+ value |= 0x070;
+ regWrite32(PPE_DFP_CPORT, value);
+ }
+ else{
+ ;/*not settings*/
+ }
+ }
+ else{
+ /*restore normal settings*/
+ if(mode == 1){
+ val_1 = regRead32(PPE_DFP_CPORT);
+ if((val_1 & 0x70)== 0x0){ /*make sure the keep alive is setting by qdma driver*/
+ /*restore the keep alive*/
+ value = regRead32(PPE_TB_CFG);
+ value &= ~(0x3000);
+ value |= 0x3000;
+ regWrite32(PPE_TB_CFG, value);
+ }
+ /*restore p2 to qdma port*/
+ value = val_1;
+ value &= ~(0x700);
+ value |= 0x500;
+ regWrite32(PPE_DFP_CPORT, value);
+ }
+ else if(mode == 2){
+ val_1 = regRead32(PPE_DFP_CPORT);
+ if((val_1 & 0x700) == 0x500){ /*make sure the keep alive is setting by femac driver*/
+ /*restore the keep alive*/
+ value = regRead32(PPE_TB_CFG);
+ value &= ~(0x3000);
+ value |= 0x3000;
+ regWrite32(PPE_TB_CFG, value);
+ }
+ /*restore p1 to cpu port*/
+ value = val_1;
+ value &= ~(0x70);
+ regWrite32(PPE_DFP_CPORT, value);
+ }
+ else{
+ ;/*not settings*/
+ }
+ }
+ spin_unlock_irqrestore(&protect_lock, flags);
+
+}
+EXPORT_SYMBOL(attack_protect_set);
+
+void watchDogReset(void)
+{
+#ifdef CONFIG_TC3162_ADSL
+ /* stop adsl */
+ if (adsl_dev_ops)
+ adsl_dev_ops->set(ADSL_SET_DMT_CLOSE, NULL, NULL);
+#endif
+
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_POWERSAVE_ENABLE)
+ if(isRT63365)
+ VPint(CR_AHB_CLK) |= 0x57e1;//restore ahb clk to default value
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_MT7510)
+ disable_all_interrupts();
+
+ VPint(CR_TIMER_CTL) = 0x0;
+#endif
+
+#if defined(TCSUPPORT_WLAN_MT7592_PCIE) && defined(TCSUPPORT_CPU_MT7520)
+ printk("0xbfb00834=0xeff88ce0 \n");
+ printk("0xbfb00380 reset phy \n");
+ disable_all_interrupts();
+
+ VPint(CR_TIMER_CTL) = 0x0;
+#endif
+
+/*watchdog reset*/
+ timerSet(5, 10 * TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+ timer_WatchDogConfigure(ENABLE, ENABLE);
+
+#if defined(TCSUPPORT_WLAN_MT7592_PCIE) && defined(TCSUPPORT_CPU_MT7520)
+ VPint(0xbfbd0040) = 0x0;
+ mdelay(10);
+ VPint(0xbfb00834) = 0xeff88ce0;
+ VPint(0xbfb00380) &= ~(1<<25);
+ udelay(1) ;
+ VPint(0xbfb00380) |= (1<<25);
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_MT7510)
+ VPint(CR_DRAMC_CONF) &= ~(0x1<<2);
+#endif
+
+ timerSet(5, TIMERTICKS_1MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+ while(1);
+// _machine_restart();
+}
+
+void tc3162wdog_kick(void)
+{
+ /* Because kernel 3.18 don't support the type before 63365*/
+ regWrite32(CR_WDOG_RLD, 0x1);
+}
+EXPORT_SYMBOL(tc3162wdog_kick);
+
+/* handle open device */
+
+static int tc3162wdog_open(struct inode *inode, struct file *file)
+{
+ /* Allow only one person to hold it open */
+ if(watchdog_enabled)
+ return -EBUSY;
+ if (nowayout) {
+ printk("Watchdog cannot be stopped once started. \n");
+ }
+
+ timerSet(5, 2000 * TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+ timer_WatchDogConfigure(ENABLE, ENABLE);
+
+ watchdog_enabled=1;
+ printk("TC3162 hardware watchdog initialized\n");
+ return 0;
+}
+
+static int tc3162wdog_release(struct inode *inode, struct file *file)
+{
+ /*
+ * Shut off the watchdog
+ * Lock it in if it's a module and we set nowayout
+ */
+ if (nowayout) {
+ printk(KERN_CRIT "Watchdog cannot be stopped once started! \n");
+ } else {
+ /* Stop watchdog timer */
+ timer_WatchDogConfigure(DISABLE, DISABLE);
+
+ watchdog_enabled = 0;
+ printk("TC3162 hardware watchdog stopped\n");
+ }
+ return 0;
+}
+
+static ssize_t tc3162wdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+ if (len) {
+ tc3162wdog_kick();
+ }
+ return len;
+}
+
+static struct file_operations tc3162wdog_fops = {
+ owner: THIS_MODULE,
+ write: tc3162wdog_write,
+ open: tc3162wdog_open,
+ release: tc3162wdog_release,
+};
+
+static struct miscdevice tc3162wdog_miscdev = {
+ minor: WATCHDOG_MINOR,
+ name: "watchdog",
+ fops: &tc3162wdog_fops,
+};
+
+static int watchdog_reset_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data){
+
+ return 0;
+}
+static int watchdog_reset_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data){
+ watchDogReset();
+ return 0;
+}
+
+extern int is_nmi_enable(void);
+extern void set_nmi_enable(uint8 nmi_enable);
+extern void show_stack_nmi(void);
+
+
+static int nmi_enable_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data){
+
+ int len;
+
+ len = sprintf(page, is_nmi_enable() ? "1\n" : "0\n");
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+static int nmi_enable_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data){
+ char val_string[3];
+ uint8 enable;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+
+ if(val_string[0] == 'd'){
+ show_stack_nmi();
+ }
+ else{
+
+ enable = simple_strtol(val_string, NULL, 10);
+ set_nmi_enable(enable);
+ }
+
+ return count;
+}
+
+static int timer_interrupt_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len, i;
+
+ len = 0;
+ for(i = 0; i < 4; i++) {
+ len += sprintf(page + len, "CPU%d: timer lose d'%08u\n", i, cpu_timer_loss[i]);
+ }
+
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+static int timer_interrupt_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[64], cmd[64] ,subcmd[64];
+ uint action;
+ int i;
+
+ memset(val_string, 0, (sizeof(val_string)));
+ memset(cmd, 0, (sizeof(cmd)));
+ memset(subcmd, 0, (sizeof(subcmd)));
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL ;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT ;
+
+ sscanf(val_string, "%s %s %x", cmd, subcmd, &action) ;
+
+ if(!strcmp(cmd, "reset")) {
+ for(i = 0; i < 4; i++) {
+ cpu_timer_loss[i] = 0;
+ }
+ } else if(!strcmp(cmd, "Usage")) {
+ printk("reset\n");
+ }
+
+ return count ;
+}
+
+struct proc_dir_entry *timer_procdir=NULL;
+
+static int __init tc3162_watchdog_init(void)
+{
+ struct proc_dir_entry *watchdog_proc=NULL;
+ struct proc_dir_entry *timer_proc=NULL;
+ watchdog_proc = create_proc_entry("watchdog_reset", 0, NULL);
+ watchdog_proc->read_proc = watchdog_reset_read_proc;
+ watchdog_proc->write_proc = watchdog_reset_write_proc;
+ if (isMT751020 || isMT7505 || isEN751221 || isEN751627||isEN7580) {
+ watchdog_proc = create_proc_entry("nmi_enable", 0, NULL);
+ watchdog_proc->read_proc = nmi_enable_read_proc;
+ watchdog_proc->write_proc = nmi_enable_write_proc;
+
+ }
+
+ spin_lock_init(&protect_lock);
+
+
+ misc_register(&tc3162wdog_miscdev);
+ if(timer_procdir == NULL) {
+ timer_procdir = proc_mkdir("timer", NULL);
+ if(timer_procdir != NULL) {
+ timer_proc = create_proc_entry("interrupt", 0, timer_procdir);
+ if(timer_proc != NULL) {
+ timer_proc->read_proc = timer_interrupt_read_proc;
+ timer_proc->write_proc = timer_interrupt_write_proc;
+ } else {
+ printk("\ncreate timer_interrupt proc fail.\n");
+ }
+ } else {
+ printk("\ncreate timer_interrupt proc fail.\n");
+ }
+ }
+ printk("TC3162 hardware watchdog module loaded.\n");
+ return 0;
+}
+static void __exit tc3162_watchdog_exit(void)
+{
+ misc_deregister(&tc3162wdog_miscdev);
+ remove_proc_entry("interrupt",timer_procdir);
+ remove_proc_entry("timer",NULL);
+
+}
+
+module_init(tc3162_watchdog_init);
+module_exit(tc3162_watchdog_exit);
+
Index: linux-3.18.21/arch/mips/econet/time.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/time.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,87 @@
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+#include <asm/tc3162/tc3162.h>
+
+void
+timer_Configure(
+ uint8 timer_no,
+ uint8 timer_enable,
+ uint8 timer_mode,
+ uint8 timer_halt
+)
+{
+ uint32 word,word1;
+ word = VPint(CR_TIMER_CTL);
+ word1 = (timer_enable << timer_no)|(timer_mode << (timer_no + 8))|(timer_halt << (timer_no + 26));
+ word |= word1;
+ VPint (CR_TIMER_CTL)=word;
+}
+
+void
+timerSet(
+ uint32 timer_no,
+ uint32 timerTime,
+ uint32 enable,
+ uint32 mode,
+ uint32 halt
+)
+{
+ uint32 word;
+
+ word = (timerTime * SYS_HCLK) * 1000 /2;
+ timerLdvSet(timer_no,word);
+ timerCtlSet(timer_no,enable,mode,halt);
+}
+
+void
+timer_WatchDogConfigure (
+ uint8 tick_enable,
+ uint8 watchdog_enable
+)
+{
+ uint32 word;
+ word = VPint(CR_TIMER_CTL);
+ word &= 0xfdffffdf;
+ word |= ( tick_enable << 5)|(watchdog_enable<<25);
+ VPint (CR_TIMER_CTL)=word;
+}
+
+static void tc3162_timer_ack(void)
+{
+ uint32 word;
+
+ word = VPint(CR_TIMER_CTL);
+ word &= 0xffc0ffff;
+ word |= 0x00020000;
+ VPint(CR_TIMER_CTL) = word;
+}
+
+void __init tc3162_time_init(void)
+{
+ //mips_timer_state = tc3162_timer_state;
+ mips_timer_ack = tc3162_timer_ack;
+
+ timerSet(1, TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+}
+
+void __init plat_timer_setup(struct irqaction *irq)
+{
+ setup_irq(TIMER1_INT, irq);
+}
Index: linux-3.18.21/arch/mips/econet/time2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/time2.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,599 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Setting up the clock on the MIPS boards.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/hardirq.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+#include <asm/cpu.h>
+#include <asm/time.h>
+#include <asm/setup.h>
+
+#include <asm/tc3162/tc3162.h>
+#include <asm/tc3162/TCIfSetQuery_os.h>
+#ifdef TCSUPPORT_MIPS_1004K
+#include <asm/gic.h>
+#endif
+
+unsigned long cpu_khz;
+
+static int mips_cpu_timer_irq;
+extern int cp0_perfcount_irq;
+extern void smtc_timer_broadcast(int);
+static unsigned long cycles_per_jiffy __read_mostly;
+unsigned int cpu_timer_loss[4];
+#ifdef TCSUPPORT_MIPS_1004K
+static unsigned int expirelo[4];
+static const unsigned int cputmr_cnt[] = {CR_CPUTMR_CNT0, CR_CPUTMR_CNT1, CR_CPUTMR_CNT2, CR_CPUTMR_CNT3};
+static const unsigned int cputmr_cmr[] = {CR_CPUTMR_CMR0, CR_CPUTMR_CMR1, CR_CPUTMR_CMR2, CR_CPUTMR_CMR3};
+
+extern int timers_intSrcNum[NR_CPUS];
+#else
+static unsigned int expirelo[2];
+static const unsigned int cputmr_cnt[] = {CR_CPUTMR_CNT0, CR_CPUTMR_CNT1};
+static const unsigned int cputmr_cmr[] = {CR_CPUTMR_CMR0, CR_CPUTMR_CMR1};
+#endif
+
+static int vpe1_timer_installed = 0;
+#ifdef TCSUPPORT_BONDING
+unsigned long int bondaddr;
+EXPORT_SYMBOL(bondaddr);
+static char tag;
+#endif
+#ifdef CONFIG_PCI
+extern int pcieRegInitConfig(void);
+extern void pcieReset(void);
+extern void setahbstat(int val);
+#endif
+
+static void delay1ms(int ms)
+{
+ volatile uint32 timer_now, timer_last;
+ volatile uint32 tick_acc;
+ uint32 one_tick_unit = SYS_HCLK * 500;//1 * SYS_HCLK * 1000 / 2
+ volatile uint32 tick_wait = ms * one_tick_unit;
+ volatile uint32 timer1_ldv = regRead32(CR_TIMER1_LDV);
+
+ tick_acc = 0;
+ timer_last = regRead32(CR_TIMER1_VLR);
+ do {
+ timer_now = regRead32(CR_TIMER1_VLR);
+ if (timer_last >= timer_now)
+ tick_acc += timer_last - timer_now;
+ else
+ tick_acc += timer1_ldv - timer_now + timer_last;
+ timer_last = timer_now;
+ } while (tick_acc < tick_wait);
+}
+
+void delay1us(int period, int number)
+{
+ volatile unsigned int timer_now, timer_last;
+ volatile unsigned int tick_acc;
+ unsigned int one_tick_unit = SYS_HCLK * 500; // 500/100 = 1ms /100 = 10us
+ volatile unsigned int tick_wait = number * one_tick_unit / 10; //caculate 10 us delay wait
+ volatile unsigned int timer1_ldv = regRead32(CR_TIMER1_LDV);
+ int same_count = 0;
+ tick_acc = 0;
+ timer_last = regRead32(CR_TIMER1_VLR);
+ do {
+ timer_now = regRead32(CR_TIMER1_VLR);
+ if(timer_last == timer_now)
+ {
+ same_count++;
+ }
+ if(same_count >= period)
+ {
+ printk("delay1us: dead loop, break;\r\n");
+ return;
+ }
+ if (timer_last >= timer_now)
+ tick_acc += timer_last - timer_now;
+ else
+ tick_acc += timer1_ldv - timer_now + timer_last;
+ timer_last = timer_now;
+ } while (tick_acc < tick_wait);
+}
+
+EXPORT_SYMBOL(delay1us);
+
+
+void
+timer_Configure(
+ uint8 timer_no,
+ uint8 timer_enable,
+ uint8 timer_mode,
+ uint8 timer_halt
+)
+{
+ uint32 word,word1;
+
+ word = regRead32(CR_TIMER_CTL);
+ word1 = (timer_enable << timer_no)|(timer_mode << (timer_no + 8))|(timer_halt << (timer_no + 26));
+ word |= word1;
+ regWrite32(CR_TIMER_CTL, word);
+}
+
+void
+timerSet(
+ uint32 timer_no,
+ uint32 timerTime,
+ uint32 enable,
+ uint32 mode,
+ uint32 halt
+)
+{
+ uint32 word;
+
+ /* when SYS_HCLK is large, it will cause overflow. The calculation will be wrong */
+ /* word = (timerTime * SYS_HCLK) * 1000 / 2; */
+ word = (timerTime * SYS_HCLK) * 500;
+ timerLdvSet(timer_no,word);
+ timerCtlSet(timer_no,enable,mode,halt);
+}
+
+void
+timer_WatchDogConfigure (
+ uint8 tick_enable,
+ uint8 watchdog_enable
+)
+{
+ uint32 word;
+
+ word = regRead32(CR_TIMER_CTL);
+ word &= 0xfdffffdf;
+ word |= ( tick_enable << 5)|(watchdog_enable<<25);
+ regWrite32(CR_TIMER_CTL, word);
+}
+
+int
+is_nmi_enable(void)
+{
+ uint32 word = regRead32(CR_AHB_NMI_CONF);
+
+ if(word & 0x3)
+ return 1;
+ else
+ return 0;
+
+}
+
+void
+set_nmi_enable(uint8 nmi_enable){
+ uint32 word;
+ /*Config NMI0*/
+ word = regRead32(CR_INTC_NMI0IMR0);
+ if(nmi_enable)
+ word |= 0x200;
+ else
+ word &= ~0x200;
+ regWrite32(CR_INTC_NMI0IMR0, word);
+
+ #if 0
+ /*Config NMI1*/
+ word = regRead32(CR_INTC_NMI1IMR0);
+ if(nmi_enable)
+ word |= 0x200;
+ else
+ word &= ~0x200;
+ regWrite32(CR_INTC_NMI1IMR0, word);
+ #endif
+
+}
+
+#if defined(TCSUPPORT_DYING_GASP)
+EXPORT_SYMBOL(timerSet);
+EXPORT_SYMBOL(timer_WatchDogConfigure);
+#endif
+#define get_current_vpe() \
+ ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
+
+extern void tc3162_enable_irq(unsigned int irq);
+
+static void mips_timer_dispatch(void)
+{
+ //pr_info("\nmips_timer_dispatch, Status= %08x", read_c0_status());
+ do_IRQ(SI_TIMER_INT);
+}
+
+static void mips_perf_dispatch(void)
+{
+ do_IRQ(cp0_perfcount_irq);
+}
+
+extern int (*perf_irq)(void);
+
+/*
+ * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
+ */
+static unsigned int __init estimate_cpu_frequency(void)
+{
+ unsigned int count;
+ unsigned long flags;
+ unsigned int start;
+
+ local_irq_save(flags);
+
+ /* Start r4k counter. */
+ start = read_c0_count();
+
+ /* delay 1 second */
+ delay1ms(1000);
+
+ count = read_c0_count() - start;
+
+ /* restore interrupts */
+ local_irq_restore(flags);
+
+ count += 5000; /* round */
+ count -= count%10000;
+
+ mips_hpt_frequency = count;
+
+ /* on 34K, 2 cycles per count */
+ count *= 2;
+
+ return count;
+}
+
+irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
+{
+ return perf_irq();
+}
+
+static struct irqaction perf_irqaction = {
+ .handler = mips_perf_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "performance",
+};
+extern struct clocksource clocksource_mips;
+
+#if defined(TCSUPPORT_DYING_GASP) && (defined(CONFIG_MIPS_RT63365) && !(defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7505)))
+irqreturn_t real_watchdog_timer_interrupt(int irq, void *dev_id)
+#else
+irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
+#endif
+{
+ uint32 word;
+
+ word = regRead32(CR_TIMER_CTL);
+ word &= 0xffc0ffff;
+ word |= 0x00200000;
+ regWrite32(CR_TIMER_CTL, word);
+
+ /* The KERN_ALERT will stop printk ring buffer mode.
+ * This is used for flush ring buffer message to console.
+ */
+ printk(KERN_ALERT "watchdog timer interrupt\n");
+
+#ifdef CONFIG_TC3162_ADSL
+ /* stop adsl */
+ if (adsl_dev_ops)
+ adsl_dev_ops->set(ADSL_SET_DMT_CLOSE, NULL, NULL);
+#endif
+
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_POWERSAVE_ENABLE)
+ if(isRT63365){
+ word = regRead32(CR_AHB_CLK);
+ word |= 0x57e1;//restore ahb clk to default value
+ regWrite32(CR_AHB_CLK, word);
+ }
+#endif
+ dump_stack();
+
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_MT7510)
+ word = regRead32(CR_DRAMC_CONF);
+ word &= ~(0x1<<2);
+ regWrite32(CR_DRAMC_CONF, word);
+#endif
+
+ return IRQ_HANDLED;
+}
+//only 63365 need another watchdog function in IMEM
+#if defined(TCSUPPORT_DYING_GASP) && (defined(CONFIG_MIPS_RT63365) && !(defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)||defined(TCSUPPORT_CPU_MT7505)))
+__IMEM
+irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id){
+
+ unsigned int word;
+ word = regRead32(0xbfb00834);
+ word &= ~(1<<18);//enable spi
+ regWrite32(0xbfb00834, word);
+ word = regRead32(0xbfb00040);
+ word &= ~(1<<0); //enable ddr device
+ regWrite32(0xbfb00040, word);
+
+ return real_watchdog_timer_interrupt(irq, dev_id);
+}
+#endif
+static struct irqaction watchdog_timer_irqaction = {
+ .handler = watchdog_timer_interrupt,
+ .flags = IRQF_DISABLED ,
+ .name = "watchdog",
+};
+
+static void watchdog_timer_dispatch(void)
+{
+ do_IRQ(TIMER5_INT);
+}
+
+/************************************************************************
+* B U S T I M E O U T I N T E R R U P T
+*************************************************************************
+*/
+
+irqreturn_t bus_timeout_interrupt(int irq, void *dev_id)
+{
+ uint32 reg;
+ uint32 addr;
+
+ /* read to clear interrupt */
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627||isEN7580)
+ {
+ if(isMT7505 || isEN751221 || isEN751627||isEN7580){
+ regWrite32(CR_PRATIR, 1);
+ }
+ else
+ regWrite32(CR_PRATIR, 0);
+ addr = regRead32(CR_ERR_ADDR);
+ addr &= ~((1 << 30) | (1 << 31));
+ printk("bus timeout interrupt ERR ADDR=%08lx\n", addr);
+ dump_stack();
+
+#if 0//def CONFIG_PCI
+ if(addr >= 0x1fb80000 && addr <= 0x1fb80064)
+ {
+ pcieReset();
+ pcieRegInitConfig();
+ setahbstat(1);
+ }
+#endif
+ }
+ else
+ {
+ reg = regRead32(CR_PRATIR);
+ reg &= ~((1 << 30) | (1 << 31));
+ printk("bus timeout interrupt ERR ADDR=%08lx\n", reg);
+ dump_stack();
+
+#ifdef CONFIG_PCI
+ pcieReset();
+ pcieRegInitConfig();
+ setahbstat(1);
+#endif
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction bus_timeout_irqaction = {
+ .handler = bus_timeout_interrupt,
+ .flags = IRQF_DISABLED ,
+ .name = "bus timeout",
+};
+
+static void bus_timeout_dispatch(void)
+{
+ do_IRQ(BUS_TOUT_INT);
+}
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+#ifdef TCSUPPORT_MIPS_1004K
+ int i;
+
+ /* 1. this function will return back to r4k_clockevent_init in arch/mips/kernel/cevt-r4k.c,
+ * then setup_irq will be called
+ * 2. during setup_irq, ecnt_gic_unmask_irq in arch/mips/econet/irq.c will be called to
+ * enable GIC mask for four extermal cpu timers
+ * 3. only cpu0 will use setup_irq to register the timer interupt with irqNum as
+ * SI_TIMER_INT and all cpus shared the irqNum (and ISR) */
+ mips_cpu_timer_irq = SI_TIMER_INT;
+
+ /* enable interrupt masks for external cpu timer 1/2/3.
+ * external cpu timer 0's interrupt mask will be enabled later in setup_irq */
+ if (smp_processor_id() > 0) {
+ GIC_SET_INTR_MASK(timers_intSrcNum[smp_processor_id()]);
+ }
+#else
+ if ((get_current_vpe()) && !vpe1_timer_installed) {
+ tc3162_enable_irq(SI_TIMER1_INT);
+ vpe1_timer_installed++;
+ }
+
+ if (vpe1_timer_installed == 0) {
+ if (cpu_has_veic)
+ set_vi_handler(SI_TIMER_INT, mips_timer_dispatch);
+ }
+ mips_cpu_timer_irq = SI_TIMER_INT;
+#endif
+
+ return mips_cpu_timer_irq;
+}
+static cycle_t cputmr_hpt_read(void)
+{
+ return regRead32(cputmr_cnt[0]);
+}
+static void __init cputmr_hpt_timer_init(void)
+{
+ unsigned int tmp, i, j=2;
+
+#ifdef TCSUPPORT_MIPS_1004K
+ j=4;
+#endif
+
+ for (i=0; i<j; i++)
+ regWrite32(cputmr_cnt[i], 0x0);
+
+ expirelo[0] = cycles_per_jiffy;
+ for (i=1; i<j; i++)
+ expirelo[i] = expirelo[0];
+
+ for (i=0; i<j; i++)
+ regWrite32(cputmr_cmr[i], expirelo[i]);
+
+ tmp = regRead32(CR_CPUTMR_CTL);
+ tmp |= (1<<1)|(1<<0);
+ regWrite32(CR_CPUTMR_CTL, tmp);
+#ifdef TCSUPPORT_MIPS_1004K
+ /* after enable external CPU timers 3 & 2, intSrc 36,37 serve for them */
+ tmp = regRead32(CR_CPUTMR_23_CTL);
+ tmp |= (1<<1)|(1<<0);
+ regWrite32(CR_CPUTMR_23_CTL, tmp);
+#endif
+}
+static void cputmr_timer_ack(void)
+{
+ int cpu=0;
+ int vpe=0;
+
+ /*As kernel3.18.21,there is a one - to - one relationship between cpu and vpe,and cpu_data[cpu].vpe_id is not correct,so don't use it*/
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu = smp_processor_id();
+ vpe = cpu;
+#endif
+
+#ifndef TCSUPPORT_MIPS_1004K
+#if defined(MIPS_CPS) || defined(MIPS_CMP)
+ cpu = smp_processor_id();
+ vpe = cpu_data[cpu].vpe_id;
+#endif
+#endif
+ /* Ack this timer interrupt and set the next one. */
+ expirelo[vpe] += cycles_per_jiffy;
+
+ /* Check to see if we have missed any timer interrupts. */
+ while (unlikely((regRead32(cputmr_cnt[vpe]) - expirelo[vpe]) < 0x7fffffff)) {
+ /* missed_timer_count++; */
+ expirelo[vpe] += cycles_per_jiffy;
+ cpu_timer_loss[cpu]++;
+ }
+ /* update CR_CPUTMR_CMR */
+ regWrite32(cputmr_cmr[vpe], expirelo[vpe]);
+#ifdef TCSUPPORT_BONDING
+ if (isMT751020) {
+ if(bondaddr != 0) {
+ if(tag==0) {
+ tag = 1;
+ regWrite32(bondaddr, 1);
+ }else {
+ tag = 0;
+ regWrite32(bondaddr, 0);
+ }
+ }
+ }
+#endif
+}
+
+void ecnt_mips_time_ack(int cpu){
+
+ if (isRT63165 || isRT63365 || isMT751020 || isEN751221 || isEN751627||isEN7580)
+ {
+ mips_timer_ack();
+ }
+}
+void __init tc3162_time_init(void)
+{
+
+ pr_info("\r\ntc3162_time_init: Init bus timeout and watchdog\r\n");
+ timerSet(1, TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+
+ if (isRT63165 || isRT63365 || isMT751020 || isMT7505 || isEN751221 || isEN751627||isEN7580) {
+ /* watchdog timer */
+ /* set count down 3 seconds to issue interrupt */
+ regWrite32(CR_WDOG_THSLD, ((3 * TIMERTICKS_1S * SYS_HCLK) * 500)); // (3 * TIMERTICKS_1S * SYS_HCLK) * 1000 / 2
+ if (cpu_has_vint)
+ set_vi_handler(TIMER5_INT, watchdog_timer_dispatch);
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_irq_smtc(TIMER5_INT, &watchdog_timer_irqaction, 0x0);
+#else
+ setup_irq(TIMER5_INT, &watchdog_timer_irqaction);
+#endif
+
+ /* setup bus timeout interrupt */
+ //VPint(CR_MON_TMR) |= ((1<<30) | (0xff));
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627||isEN7580)
+ {
+ regWrite32(CR_MON_TMR, 0xcfffffff);
+ if(isMT7505 || isEN751221 || isEN751627||isEN7580)
+ regWrite32(CR_BUSTIMEOUT_SWITCH, 0xffffffff);
+ else
+ regWrite32(CR_BUSTIMEOUT_SWITCH, 0xfdbfffff);//switch off usb phy(bit22/bit25) control because hw issue
+ }
+
+ if (cpu_has_vint)
+ set_vi_handler(BUS_TOUT_INT, bus_timeout_dispatch);
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_irq_smtc(BUS_TOUT_INT, &bus_timeout_irqaction, 0x0);
+#else
+ setup_irq(BUS_TOUT_INT, &bus_timeout_irqaction);
+#endif
+ }
+
+}
+
+void (*mips_timer_ack)(void);
+void __init plat_time_init(void)
+{
+ unsigned int est_freq = 0;
+
+
+ if(board_time_init)
+ board_time_init();
+ timerSet(1, TIMERTICKS_10MS, ENABLE, TIMER_TOGGLEMODE, TIMER_HALTDISABLE);
+
+ est_freq = estimate_cpu_frequency ();
+
+ printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
+ (est_freq%1000000)*100/1000000);
+
+ cpu_khz = est_freq / 1000;
+
+ if (isRT63165 || isRT63365 || isMT751020 || isMT7505 ||isEN751221 || isEN751627||isEN7580) {
+
+ /* enable CPU external timer */
+ clocksource_mips.read = cputmr_hpt_read;
+ mips_hpt_frequency = CPUTMR_CLK;
+
+ mips_timer_ack = cputmr_timer_ack;
+
+ printk("plat_time_init: Entered, mips_timer_ack ptr is [%p]\r\n", mips_timer_ack);
+
+ /* Calculate cache parameters. */
+ cycles_per_jiffy =
+ (mips_hpt_frequency + HZ / 2) / HZ;
+
+ cputmr_hpt_timer_init();
+
+ printk(" Using %u.%03u MHz high precision timer.\n",
+ ((mips_hpt_frequency + 500) / 1000) / 1000,
+ ((mips_hpt_frequency + 500) / 1000) % 1000);
+ }
+}
+
Index: linux-3.18.21/arch/mips/econet/voip_hook.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/econet/voip_hook.c 2018-02-05 13:19:53.000000000 +0800
@@ -0,0 +1,7 @@
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+unsigned int* (*recv_rtp_src_port_get_hook)(void) = NULL;
+EXPORT_SYMBOL(recv_rtp_src_port_get_hook);
Index: linux-3.18.21/arch/mips/include/asm/gic.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/gic.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/gic.h 2018-02-05 13:19:53.000000000 +0800
@@ -15,6 +15,9 @@
#include <linux/threads.h>
#include <irq.h>
+/* TCSUPPORT_MIPS_1004K */
+#include <asm/tc3162/tc3162.h>
+/* end of TCSUPPORT_MIPS_1004K */
#undef GICISBYTELITTLEENDIAN
@@ -318,6 +321,9 @@
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
unsigned int flags; /* Misc flags */
+#ifdef TCSUPPORT_MIPS_1004K
+ unsigned char *name;
+#endif
#define GIC_FLAG_TRANSPARENT 0x01
};
@@ -340,15 +346,24 @@
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
-/* Local GIC interrupts. */
#define GIC_INT_TMR (GIC_CPU_INT5)
#define GIC_INT_PERFCTR (GIC_CPU_INT5)
+/* Local GIC interrupts. */
+#ifdef TCSUPPORT_MIPS_1004K
+/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
+#define GIC_PIN_TO_VEC_OFFSET (1)
+
+/* define GIC_CPU_TO_VEC_OFFSET as GIC_PIN_TO_VEC_OFFSET, so that pin_offset in irq-gic.c can be 0 */
+#define GIC_CPU_TO_VEC_OFFSET (GIC_PIN_TO_VEC_OFFSET)
+
+#else
/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
#define GIC_CPU_TO_VEC_OFFSET (2)
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
+#endif
#include <linux/clocksource.h>
#include <linux/irq.h>
Index: linux-3.18.21/arch/mips/include/asm/io.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/io.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/io.h 2018-02-05 13:19:53.000000000 +0800
@@ -308,6 +308,80 @@
#define war_octeon_io_reorder_wmb() do { } while (0)
#endif
+#if defined(WIFI_MODULE) && defined(CONFIG_MIPS_TC3262)
+extern void ahbErrChk(void);
+
+#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
+ \
+static inline void pfx##write##bwlq(type val, \
+ volatile void __iomem *mem) \
+{ \
+ volatile type *__mem; \
+ type __val; \
+ \
+ war_octeon_io_reorder_wmb(); \
+ \
+ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
+ \
+ __val = pfx##ioswab##bwlq(__mem, val); \
+ \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ *__mem = __val; \
+ else if (cpu_has_64bits) { \
+ unsigned long __flags; \
+ type __tmp; \
+ \
+ if (irq) \
+ local_irq_save(__flags); \
+ __asm__ __volatile__( \
+ ".set mips3" "\t\t# __writeq""\n\t" \
+ "dsll32 %L0, %L0, 0" "\n\t" \
+ "dsrl32 %L0, %L0, 0" "\n\t" \
+ "dsll32 %M0, %M0, 0" "\n\t" \
+ "or %L0, %L0, %M0" "\n\t" \
+ "sd %L0, %2" "\n\t" \
+ ".set mips0" "\n" \
+ : "=r" (__tmp) \
+ : "0" (__val), "m" (*__mem)); \
+ if (irq) \
+ local_irq_restore(__flags); \
+ } else \
+ BUG(); \
+} \
+ \
+static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
+{ \
+ volatile type *__mem; \
+ type __val; \
+ \
+ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
+ \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ {ahbErrChk();__val = *__mem;} \
+ else if (cpu_has_64bits) { \
+ unsigned long __flags; \
+ \
+ if (irq) \
+ local_irq_save(__flags); \
+ __asm__ __volatile__( \
+ ".set mips3" "\t\t# __readq" "\n\t" \
+ "ld %L0, %1" "\n\t" \
+ "dsra32 %M0, %L0, 0" "\n\t" \
+ "sll %L0, %L0, 0" "\n\t" \
+ ".set mips0" "\n" \
+ : "=r" (__val) \
+ : "m" (*__mem)); \
+ if (irq) \
+ local_irq_restore(__flags); \
+ } else { \
+ __val = 0; \
+ BUG(); \
+ } \
+ \
+ return pfx##ioswab##bwlq(__mem, __val); \
+}
+
+#else
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
\
static inline void pfx##write##bwlq(type val, \
@@ -331,7 +405,7 @@
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set arch=r4000" "\t\t# __writeq""\n\t" \
+ ".set mips3" "\t\t# __writeq""\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
@@ -361,7 +435,7 @@
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set arch=r4000" "\t\t# __readq" "\n\t" \
+ ".set mips3" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
@@ -377,6 +451,7 @@
\
return pfx##ioswab##bwlq(__mem, __val); \
}
+#endif
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
\
Index: linux-3.18.21/arch/mips/include/asm/mach-generic/mangle-port.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/mach-generic/mangle-port.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/mach-generic/mangle-port.h 2018-02-05 13:19:53.000000000 +0800
@@ -8,6 +8,10 @@
#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
#define __ASM_MACH_GENERIC_MANGLE_PORT_H
+#if defined(CONFIG_MIPS_TC3262)
+extern unsigned char io_swap_noneed;
+#endif
+
#define __swizzle_addr_b(port) (port)
#define __swizzle_addr_w(port) (port)
#define __swizzle_addr_l(port) (port)
@@ -29,13 +33,22 @@
# define ioswabb(a, x) (x)
# define __mem_ioswabb(a, x) (x)
+//auto Choose SWAP function according to platform;
+#if defined(CONFIG_MIPS_TC3262)
+# define ioswabw(a,x) (io_swap_noneed ? (x) : le16_to_cpu(x) )
+# define __mem_ioswabw(a,x) (io_swap_noneed ? cpu_to_le16(x) : (x) )
+# define ioswabl(a,x) (io_swap_noneed ? (x) : le32_to_cpu(x) )
+# define __mem_ioswabl(a,x) (io_swap_noneed ? cpu_to_le32(x) : (x) )
+# define ioswabq(a,x) (io_swap_noneed ? (x) : le64_to_cpu(x) )
+# define __mem_ioswabq(a,x) (io_swap_noneed ? cpu_to_le32(x) : (x) )
+#else
# define ioswabw(a, x) le16_to_cpu(x)
# define __mem_ioswabw(a, x) (x)
# define ioswabl(a, x) le32_to_cpu(x)
# define __mem_ioswabl(a, x) (x)
# define ioswabq(a, x) le64_to_cpu(x)
# define __mem_ioswabq(a, x) (x)
-
+#endif
#else
# define ioswabb(a, x) (x)
Index: linux-3.18.21/arch/mips/include/asm/mips-boards/prom.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/mips-boards/prom.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,47 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ * MIPS boards bootprom interface for the Linux kernel.
+ *
+ */
+
+#ifndef _MIPS_PROM_H
+#define _MIPS_PROM_H
+
+extern char *prom_getcmdline(void);
+extern char *prom_getenv(char *name);
+extern void prom_init_cmdline(void);
+extern void prom_meminit(void);
+extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
+extern void mips_display_message(const char *str);
+extern void mips_display_word(unsigned int num);
+extern void mips_scroll_message(void);
+extern int get_ethernet_addr(char *ethernet_addr);
+
+/* Memory descriptor management. */
+#define PROM_MAX_PMEMBLOCKS 32
+struct prom_pmemblock {
+ unsigned long base; /* Within KSEG0. */
+ unsigned int size; /* In bytes. */
+ unsigned int type; /* free or prom memory */
+};
+
+#endif /* !(_MIPS_PROM_H) */
Index: linux-3.18.21/arch/mips/include/asm/mipsregs.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/mipsregs.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/mipsregs.h 2018-02-05 13:19:54.000000000 +0800
@@ -112,6 +112,27 @@
#define CP0_TX39_CACHE $7
/*
+ * TrendChip cache control register
+ */
+#define CP0_CCTL $20 /* Lexra Cache Control Register */
+
+/*
+ * Lexra Cache Control Register fields
+ */
+#define CCTL_DINVAL 0x00000001
+#define CCTL_IINVAL 0x00000002
+#define CCTL_ILOCK 0x0000000c
+#define CCTL_IRAMFILL4 0x00000010
+#define CCTL_IRAMOFF 0x00000020
+
+#define CCTL_IMEMFILL4 0x00000010
+#define CCTL_IMEMOFF 0x00000020
+#define CCTL_DWB 0x00000100
+#define CCTL_DWBINVAL 0x00000200
+#define CCTL_DMEMON 0x00000400
+#define CCTL_DMEMOFF 0x00000800
+
+/*
* Coprocessor 1 (FPU) register names
*/
#define CP1_REVISION $0
@@ -994,6 +1015,10 @@
local_irq_restore(__flags); \
} while (0)
+/* TrendChip cache control register */
+#define read_c0_cctl() __read_32bit_c0_register($20, 0)
+#define write_c0_cctl(val) __write_32bit_c0_register($20, 0, val)
+
#define read_c0_index() __read_32bit_c0_register($0, 0)
#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
@@ -1205,6 +1230,9 @@
#define read_c0_taglo() __read_32bit_c0_register($28, 0)
#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
+#define read_c0_idatalo() __read_32bit_c0_register($28, 1)
+#define write_c0_idatalo(val) __write_32bit_c0_register($28, 1, val)
+
#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
@@ -1217,6 +1245,9 @@
#define read_c0_taghi() __read_32bit_c0_register($29, 0)
#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
+#define read_c0_idatahi() __read_32bit_c0_register($29, 1)
+#define write_c0_idatahi(val) __write_32bit_c0_register($29, 1, val)
+
#define read_c0_errorepc() __read_ulong_c0_register($30, 0)
#define write_c0_errorepc(val) __write_ulong_c0_register($30, 0, val)
Index: linux-3.18.21/arch/mips/include/asm/spram.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/spram.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/spram.h 2018-02-05 13:19:54.000000000 +0800
@@ -7,4 +7,8 @@
static inline void spram_config(void) { };
#endif /* CONFIG_CPU_MIPSR2 */
+extern int is_sram_addr(void *p);
+extern void *alloc_sram(int n);
+extern void free_sram(void *p, int n);
+
#endif /* _MIPS_SPRAM_H */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/TCIfSetQuery_os.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/TCIfSetQuery_os.h 2018-02-05 14:20:40.000000000 +0800
@@ -0,0 +1,288 @@
+//******************************************************************************
+//
+// Copyright (C) 2003 TrendChip Technologies Corp.
+//
+// Module name
+// -----------
+// TCIfSetQuery.h
+//
+// Abstract
+// --------
+// This file contains the set and query id definition with ZyNOS
+//
+// Modification History
+// --------------------
+// Date : 2003/5/27 Jason
+// Rev. : 001
+// Modify :
+//
+//*****************************************************************************
+// $Id: TCIfSetQuery_os.h,v 1.1.1.1 2010/04/09 09:39:21 feiyan Exp $
+// $Log: TCIfSetQuery_os.h,v $
+// Revision 1.1.1.1 2010/04/09 09:39:21 feiyan
+// New TC Linux Make Flow Trunk
+//
+// Revision 1.2 2010/02/03 10:14:31 yuren_nj
+// [Enhancement]#4788 Add tr69 parameters.
+//
+// Revision 1.1.1.1 2009/12/17 01:42:47 josephxu
+// 20091217, from Hinchu ,with VoIP
+//
+// Revision 1.1.1.1 2007/04/12 09:42:03 ian
+// TCLinuxTurnkey2007
+//
+// Revision 1.2 2006/07/06 04:12:45 lino
+// add kernel module support
+//
+// Revision 1.1.1.1 2005/11/02 05:45:38 lino
+// no message
+//
+// Revision 1.2 2005/08/19 14:36:59 jasonlin
+// Merge Huawei's code into new main trunk
+//
+// Revision 1.1.1.1 2005/03/30 14:04:23 jasonlin
+// Import Linos source code
+//
+// Revision 1.8 2005/03/02 06:57:12 jasonlin
+// add extra query function ID for CHINA_NM
+//
+// Revision 1.7 2004/11/10 06:42:04 jasonlin
+// Add CI command "wan adsl opencmf adsl2|adsl2plus"
+//
+// Revision 1.5 2004/11/10 05:40:52 wheellenni
+// wheellenni_931110
+// compatible for the previous firmware
+//
+// Revision 1.4 2004/11/09 08:51:15 wheellenni
+// 931109_wheellenni
+// Add adsl2/adsl2plus to CI command "wan adsl opencmd xxxx"
+// "adsl2" only support G.992.3 and "adsl2plus" support G.992.5
+//
+// Revision 1.3 2004/10/04 12:46:58 bright
+// bright_931004
+//
+// Revision 1.2 2004/09/14 08:50:21 bright
+// bright_930914
+//
+// Revision 1.1.1.1 2004/08/17 07:06:46 jeffrey
+// Import framework code from V3.0.0.
+//
+// Revision 1.1.1.1 2003/08/22 06:19:18 jasonlin
+// no message
+//
+//*****************************************************************************
+
+#ifndef _TCIFSETQUERY_H
+#define _TCIFSETQUERY_H
+
+// ADSL Query ID list Jason_920527
+#define ADSL_QUERY_MODESET 0x0000
+#define ADSL_QUERY_MODE 0x0001
+#define ADSL_QUERY_STATUS 0x0002
+#define ADSL_QUERY_SW_VER 0x0003
+#define ADSL_QUERY_VENDOR_ID 0x0004
+#define ADSL_QUERY_NEAR_OP_DATA 0x0005
+#define ADSL_QUERY_FAR_OP_DATA 0x0006
+#define ADSL_QUERY_CH_OP_DATA 0x0007
+#define ADSL_QUERY_DEFECT_CNT 0x0008
+#define ADSL_QUERY_DEFECT 0x0009
+#define ADSL_QUERY_NEAR_ITUID 0x000a
+#define ADSL_QUERY_FAR_ITUID 0x000b
+#define ADSL_QUERY_WATCHDOG 0x000c
+#define ADSL_QUERY_AUTOLINK 0x000d
+#define ADSL_QUERY_CELL_CNT 0x000e
+#define ADSL_QUERY_TX_GAIN 0x000f
+#define ADSL_QUERY_TX_FILTER_TYPE 0x0010
+#define ADSL_QUERY_TX_MEDLEY_TONE 0x0011
+#define ADSL_QUERY_SNR_OFFSET 0x0012
+#define ADSL_QUERY_MIN_SNR_M 0x0013
+#define ADSL_QUERY_FW_VER 0x0014
+#define ADSL_QUERY_FW_REV 0x0015
+#define ADSL_QUERY_TDM_FDM_VER 0x0016
+#define ADSL_QUERY_RTS_LOG_SIZE_STR 0x0017
+#define ADSL_QUERY_RTS_ADDR_STR 0x0018
+#define ADSL_QUERY_RTS_LOG_SIZE 0x0019
+#define ADSL_QUERY_HW_VER 0x001a
+#define ADSL_QUERY_DGASP_CNTR 0x001b
+#define ADSL_QUERY_SHOW_DIAG 0x001c
+//#define ADSL_QUERY_CUSTOMER 0x001d //bright_930914_1
+#define ADSL_QUERY_BUILD_VERSION 0x001d //bright_930914_1
+#define ADSL_QUERY_ANNEX 0x001e //julia_070424
+#define ADSL_QUERY_TR69_WLAN_DSL_INTERFACE_Config 0x001f //jf_070522
+#define ADSL_QUERY_PMS_PARAM 0x0020 //cheng_070727
+#define ADSL_QUERY_TR69_WLAN_DSL_DIAGNOSTIC 0x0021 //dyma_071017
+#define ADSL_QUERY_SHOW_STATE_PARAM 0x0022 //jmxu_071219
+#define ADSL_QUERY_LINERATE 0x0023 //haotang_080416
+#define ADSL_QUERY_TR098_WAN_DSL_INTERFACE_CONFIG 0x0024 //rmzha_080527
+#define ADSL_QUERY_PM_STATE 0x0025 //Roger_090206
+#define ADSL_QUERY_BITSWAP_ONOFF 0x0026//Roger_090206
+
+#define ADSL_QUERY_LAST_DROP_REASON 0x002a //whliu_090924
+
+#ifdef DADI //Julia_051117
+#define ADSL_QUERY_ATTAIN_RATE 0x0027 //Julia_051117
+#endif
+#define ADSL_QUERY_CELL_CNT1 0x002b //zzma_091020
+#define ADSL_QUERY_RX_BEAR_TPSTC_TYPE 0x002c
+
+#if defined(TCSUPPORT_CWMP_VDSL)
+#define VDSL_QUERY_TR098_DSL_INTERFACE_CONFIG 0x2001
+#endif
+
+#define ADSL_QUERY_AFE_DMT_LPBK 0x0033
+#define ADSL_QUERY_AFE_R_W 0x0034//Roger120402
+
+#define TPSTC_DISABLE 0x00
+#define TPSTC_ATM_TC 0x01
+#define TPSTC_PTM_TC_64_65B 0x02
+#define TPSTC_PTM_TC_HDLC 0x03
+
+#define ADSL_QUERY_SRA_ONOFF 0x0028 //Roger_090206
+#define ADSL_QUERY_PM_ONOFF 0x0029 //Roger_090206
+
+#define ADSL_SET_MODE_A43 0x1000
+#define ADSL_SET_MODE_LITE 0x1001
+#define ADSL_SET_MODE_MULTIMODE 0x1002
+#define ADSL_SET_MODE_ANSI 0x1003
+#define ADSL_SET_MODE_ADSL2 0x1004 // wheellenni_931110
+#define ADSL_SET_MODE_ADSL2PLUS 0x1005 // wheellenni_931110
+#define ADSL_SET_MODE_AUTO_SENSE_GDMT_FIRST 0x1006 //yyfeng_050719
+#define ADSL_SET_MODE_AUTO_SENSE_T1413_FIRST 0x1007 //yyfeng_050719
+#define ADSL_SET_MODE_GDMT_OR_LITE 0x1008 //yyfeng_050719
+#define ADSL_SET_MODE_AUTO_SENSE_T1413_OFF 0x1009 //yyfeng_070210
+#define ADSL_SET_MODE_AUTO_SENSE_ADSL2_OFF 0x100a //yyfeng_070210
+#define ADSL_SET_MODE_ADSL1_MULTI 0x100b //yyfeng_070210
+#define ADSL_SET_MODE_ADSL2_MULTI 0x100c //yyfeng_070210
+#define ADSL_SET_MODE_ADSL2PLUS_T1413 0x100d // Ryan_961128
+#define ADSL_SET_MODE_VDSL2 0x100e
+#if defined(TCSUPPORT_CWMP_VDSL)
+#define ADSL_SET_MODE_ADSL2PLUS_MULTI 0x100f
+#else
+#endif
+#define ADSL_SET_R_VENDOR_ID 0x1010
+#define ADSL_SET_TX_GAIN 0x1011
+#define ADSL_SET_TX_FILTER_TYPE 0x1012
+#define ADSL_SET_TX_MEDLEY_TONE 0x1013
+#define ADSL_SET_SNR_OFFSET 0x1014
+#define ADSL_SET_MIN_SNR_M 0x1015
+#define ADSL_SET_RTS_LOG_STOP 0x0016
+#define ADSL_SET_RTS_LOG_RESUME 0x0017
+#define ADSL_SET_RTS_LOG_FREE 0x0018
+#define ADSL_SET_R_SOFT_RESET 0x0019
+#define ADSL_SET_DMT_CLOSE 0x001a
+#define ADSL_SET_INT_MASK0 0x001b
+#define ADSL_SET_DGASP_CNTR_ZERO 0x001c // Gilb_920901_1
+#define ADSL_SET_CONSOLE_DATA 0x001d // Jason_930315
+#define ADSL_SET_TRELLIS 0x1100 //yyfeng_050719
+#define ADSL_SET_BITSWAP 0x1101 //yyfeng_050719
+#define ADSL_SET_PRINT 0x1102 //jmxu_071219
+#define ADSL_SET_SRA 0X1103 //Roger_090206
+#define ADSL_SET_PM 0X1104 //Roger_090206
+#define ADSL_SET_LDM 0X1105 //Roger_090206
+#define ADSL_SET_DMT_DYING_GASP 0x3000
+/*//yyfeng_060510_1
+#define ADSL_SET_ANNEX_M 0x1102 //yyfeng_060424
+#define ADSL_SET_ANNEX 0x1103 //yyfeng_060425
+*/
+
+#if defined(TCSUPPORT_CPU_MT7510) && defined(TCSUPPORT_BONDING)
+#define VDSL2_QUERY_BONDING_BACP_SUPPORT 0x2003
+#define TCIF_SET_BONDING_BACP_SUPPORT 0x3001
+#define BONDING_OFF_BACP_OFF 0x0
+#define BONDING_ON_BACP_OFF 0x1
+#define BONDING_OFF_BACP_ON 0x2
+#define BONDING_ON_BACP_ON 0x3
+#endif
+
+
+//julia_070424
+#define ME_CMD_ADSL_ANNEXA 0x21
+#define ME_CMD_ADSL_ANNEXB 0x22
+#define ME_CMD_ADSL_ANNEXI 0x23
+#define ME_CMD_ADSL_ANNEXJ 0x24
+#define ME_CMD_ADSL_ANNEXM 0x25
+#define ME_CMD_ADSL_ANNEXL 0x26
+//julia_070424
+
+#define ADSL_MODEM_STATE_DOWN 0x00
+#define ADSL_MODEM_STATE_WAIT_INIT 0x08
+#define ADSL_MODEM_STATE_INIT 0x10
+#define ADSL_MODEM_STATE_UP 0x20
+
+#define ME_CMD_ADSL_SELFTEST1 0x01
+#define ME_CMD_ADSL_SELFTEST2 0x02
+#define ME_CMD_ADSL_OPEN 0x03
+#define ME_CMD_ADSL_CLOSE 0x04
+/*
+#define ME_CMD_ADSL_ANSI 0x05
+#define ME_CMD_ADSL_OPEN_GDMT 0x06
+#define ME_CMD_ADSL_OPEN_GLITE 0x07
+#define ME_CMD_ADSL_OPEN_MULTIMODE 0x08
+//xzwang 041103
+#define ME_CMD_ADSL_OPEN_GDMT_GLITE 0x0B
+*/
+//Jason_930706
+#define ME_CMD_ADSL2 0x09
+#define ME_CMD_ADSL2PLUS 0x0A
+#define ME_CMD_VDSL2 0x0B
+//Sam 20140305
+#define ME_CMD_GVECTOR 0x0C
+#define ME_CMD_GVECTOR_GINP 0x0D
+#define ME_CMD_VDSL2_GINP 0x0E
+#define ME_CMD_ADSL2_GINP 0x0F
+#define ME_CMD_ADSL2PLUS_GINP 0x10
+
+//xzwang
+#define ME_CMD_ADSL_ANSI 0x05
+#define ME_CMD_ADSL_OPEN_GDMT 0x06
+#define ME_CMD_ADSL_OPEN_GLITE 0x07
+#define ME_CMD_ADSL_OPEN_MULTIMODE 0x08
+#define ME_CMD_ADSL_GDMT_OR_LITE 0x91 //yyfeng_041015_14
+#define ME_CMD_ADSL_AUTO_GDMT_FIRST 0x92
+#define ME_CMD_ADSL_AUTO_T1413_FIRST 0x93
+
+void TCIfQuery( unsigned short query_id, void *result1, void *result2 );
+void TCIfSet( unsigned short set_id, void *value1, void *value2 );
+
+typedef struct {
+ void (*query)(unsigned short query_id, void *result1, void *result2);
+ void (*set)(unsigned short set_id, void *value1, void *value2);
+
+ void (*rts_rcv)(struct sk_buff *skb);
+
+ int (*rts_cmd)(int argc,char *argv[],void *p);
+ int (*dmt_cmd)(int argc,char *argv[],void *p);
+ int (*dmt2_cmd)(int argc,char *argv[],void *p);
+ int (*hw_cmd)(int argc,char *argv[],void *p);
+ int (*sw_cmd)(int argc,char *argv[],void *p);
+ int (*ghs_cmd)(int argc,char *argv[],void *p);
+ int (*tcif_cmd)(int argc,char *argv[],void *p);
+} adsldev_ops;
+
+extern adsldev_ops *adsl_dev_ops;
+void adsl_dev_ops_register(adsldev_ops *ops);
+void adsl_dev_ops_deregister(void);
+#if defined(TCSUPPORT_BONDING)
+extern adsldev_ops *adsl_dev_ops_slave;
+void adsl_dev_ops_register_slave(adsldev_ops *ops);
+void adsl_dev_ops_deregister_slave(void);
+#endif
+
+#if defined(CONFIG_RALINK_VDSL)
+typedef struct {
+ int (*vdsl2_cmd)(int argc,char *argv[],void *p);
+} vdsldev_ops;
+
+extern vdsldev_ops *vdsl_dev_ops;
+void vdsl_dev_ops_register(vdsldev_ops *ops);
+void vdsl_dev_ops_deregister(void);
+
+#if defined(TCSUPPORT_BONDING)
+extern vdsldev_ops *vdsl_dev_ops_slave;
+void vdsl_dev_ops_register_slave(vdsldev_ops *ops);
+void vdsl_dev_ops_deregister_slave(void);
+#endif
+
+#endif
+
+#endif
Index: linux-3.18.21/arch/mips/include/asm/tc3162/cmdparse.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/cmdparse.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,32 @@
+#ifndef _CMDPARSE_H_
+#define _CMDPARSE_H_
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+
+typedef struct {
+ char *name;
+ int (*func)(int argc,char *argv[],void *p);
+ int flags;
+ int argcmin;
+ char *argc_errmsg;
+} cmds_t;
+
+extern int cmd_reg_add(char *cmd_name, cmds_t *cmds_p);
+extern int cmd_register(cmds_t *cmds_p);
+extern int cmd_unregister(char *name);
+extern int subcmd(const cmds_t tab[], int argc, char *argv[], void *p);
+
+#endif /* _CMDPARSE_ */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/cpu-feature-overrides.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/cpu-feature-overrides.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Chris Dearman
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_RALINK_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_RALINK_CPU_FEATURE_OVERRIDES_H
+
+
+/*
+ * CPU feature overrides for MIPS boards
+ */
+#ifdef CONFIG_CPU_MIPS32
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+/* #define cpu_has_fpu ? */
+/* #define cpu_has_32fpr ? */
+#define cpu_has_counter 1
+/* #define cpu_has_watch ? */
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_prefetch ? */
+#define cpu_has_mcheck 1
+/* #define cpu_has_ejtag ? */
+#define cpu_has_llsc 1
+/* #define cpu_has_vtag_icache ? */
+/* #define cpu_has_dc_aliases ? */
+/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_nofpuex 0
+/* #define cpu_has_64bits ? */
+/* #define cpu_has_64bit_zero_reg ? */
+/* #define cpu_has_inclusive_pcaches ? */
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#ifdef CONFIG_CPU_MIPS64
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+/* #define cpu_has_fpu ? */
+/* #define cpu_has_32fpr ? */
+#define cpu_has_counter 1
+/* #define cpu_has_watch ? */
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_prefetch ? */
+#define cpu_has_mcheck 1
+/* #define cpu_has_ejtag ? */
+#define cpu_has_llsc 1
+/* #define cpu_has_vtag_icache ? */
+/* #define cpu_has_dc_aliases ? */
+/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_nofpuex 0
+/* #define cpu_has_64bits ? */
+/* #define cpu_has_64bit_zero_reg ? */
+/* #define cpu_has_inclusive_pcaches ? */
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#endif /* __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/ecnt_traps.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/ecnt_traps.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,8 @@
+#ifndef __ECNT_TRAPS_H
+#define __ECNT_TRAPS_H
+
+extern int watchFlag;
+extern void nmi_info_store( struct pt_regs *regs);
+extern void __noreturn die_nmi(const char *str, struct pt_regs *regs, spinlock_t *lock);
+
+#endif
Index: linux-3.18.21/arch/mips/include/asm/tc3162/int_source.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/int_source.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,101 @@
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+
+#ifndef _INT_SOURCE_H_
+#define _INT_SOURCE_H_
+
+enum
+interrupt_source
+ {
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ DUMMY_INT,
+#endif
+ UART_INT, //0 IPL10
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ PTM_B0_INT, //1
+ SI_SWINT1_INT0, //2
+ SI_SWINT1_INT1, //3
+#else
+ RTC_ALARM_INT, //1 IPL29
+ RTC_TICK_INT, //2 IPL31
+ RESERVED0, //3 IPL30
+#endif
+ TIMER0_INT, //4 IPL1
+ TIMER1_INT, //5 IPL5
+ TIMER2_INT, //6 IPL6
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ SI_SWINT_INT0, //7
+ SI_SWINT_INT1, //8
+#else
+ TIMER3_INT, //7 IPL7
+ TIMER4_INT, //8 IPL8
+#endif
+ TIMER5_INT, //9 IPL9
+ GPIO_INT, //10 IPL11
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ RESERVED1, //11 IPL20
+ SI_PC1_INT, //12
+ SI_PC_INT, //13
+#else
+ PCIE_A_INT, //11 IPL20
+ PCIE_SERR_INT, //12 IPL21
+ RESERVED3, //13 IPL22
+#endif
+ APB_DMA0_INT, //14 IPL12
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ MAC1_INT, //15 IPL13
+#else
+ APB_DMA1_INT, //15 IPL13
+#endif
+ HSUART_INT, //16 IPL23
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+
+ PTM_B1_INT, //17
+#else
+ RESERVED5, //17 IPL24
+#endif
+ DYINGGASP_INT, //18 IPL25
+ DMT_INT, //19 IPL26
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ USB20_INT,
+#else
+ ARBITER_ERR_INT,//20 IPL0
+#endif
+ MAC_INT, //21 IPL3
+ SAR_INT, //22 IPL2
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ USB11_INT,
+#else
+ USB_INT, //23 IPL14
+#endif
+ PCI_A_INT, //24
+ PCI_B_INT, //25
+// RESERVED8, //24 IPL27
+// RESERVED9, //25 IPL28
+ XSLV0_INT, //26 IPL15
+ XSLV1_INT, //27 IPL16
+ XSLV2_INT, //28 IPL17
+#if defined(CONFIG_MIPS_TC3262) && defined(TCSUPPORT_CT)
+ SI_TIMER1_INT, //29
+ SI_TIMER_INT, //30
+#else
+ XAPB0_INT, //29 IPL18
+ XAPB1_INT, //30 IPL19
+#endif
+ SWR_INT //31 IPL4
+ };
+
+#endif /* _INT_SOURCE_H_ */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/irq.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/irq.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_MIPS_IRQ_H
+#define __ASM_MACH_MIPS_IRQ_H
+
+
+#ifdef CONFIG_MIPS_TC3262
+#define NR_IRQS 64
+#else
+#define NR_IRQS 32
+#endif
+
+#include_next <irq.h>
+
+#endif /* __ASM_MACH_MIPS_IRQ_H */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/kprofile_hook.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/kprofile_hook.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,42 @@
+#ifndef __KPROFILE_HOOK_H
+#define __KPROFILE_HOOK_H
+
+extern void (*profilingSetupHook)(unsigned int tbl_size);
+extern void (*profilingEventHook)(unsigned int cntr, unsigned int event, unsigned int count,
+ unsigned int kernel, unsigned int user, unsigned int exl);
+extern void (*profilingStartHook)(void);
+extern void (*profilingStopHook)(void);
+extern void (*profilingLogHook)(unsigned int label, unsigned int usr_data);
+
+static inline void kprofileSetup(unsigned int tbl_size)
+{
+ if (profilingSetupHook)
+ profilingSetupHook(tbl_size);
+}
+
+static inline void kprofileEvent(unsigned int cntr, unsigned int event, unsigned int count,
+ unsigned int kernel, unsigned int user, unsigned int exl)
+{
+ if (profilingEventHook)
+ profilingEventHook(cntr, event, count, kernel, user, exl);
+}
+
+static inline void kprofileStart(void)
+{
+ if (profilingStartHook)
+ profilingStartHook();
+}
+
+static inline void kprofileStop(void)
+{
+ if (profilingStopHook)
+ profilingStopHook();
+}
+
+static inline void kprofileLog(unsigned int label, unsigned int usr_data)
+{
+ if (profilingLogHook)
+ profilingLogHook(label, usr_data);
+}
+
+#endif
Index: linux-3.18.21/arch/mips/include/asm/tc3162/ledcetrl.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/ledcetrl.h 2018-02-05 14:20:40.000000000 +0800
@@ -0,0 +1,341 @@
+/*
+** $Id: ledcetrl.h,v 1.4 2011/01/07 04:05:20 pork Exp $
+*/
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+/*
+** $Log: ledcetrl.h,v $
+** Revision 1.4 2011/01/07 04:05:20 pork
+** commit TDI layer with Zarlink 2S1O
+**
+** Revision 1.3 2010/10/15 05:02:10 xmdai_nj
+** #7017:add function for wlan button(main trunk)
+**
+** Revision 1.2 2010/06/11 00:31:06 xhshi
+** #5964 support dare's new led and key requests
+**
+** Revision 1.1.1.1 2010/04/09 09:39:21 feiyan
+** New TC Linux Make Flow Trunk
+**
+** Revision 1.3 2010/03/22 14:15:16 yzwang_nj
+** [Bug#5128] Implement TR068 LED
+**
+** Revision 1.2 2010/03/19 07:16:59 xyyou
+** # 5213 Add RT3390 11n Webpage
+** Support RT3390 AutoChannel feature
+** Support RT3390 WMM feature
+** # 5214 Support RT3390 wps feature
+** Add wps led spec&not spec
+**
+** Revision 1.1.1.1 2009/12/17 01:42:47 josephxu
+** 20091217, from Hinchu ,with VoIP
+**
+** Revision 1.1.1.1 2007/04/12 09:42:03 ian
+** TCLinuxTurnkey2007
+**
+** Revision 1.2 2006/07/06 07:24:57 lino
+** update copyright year
+**
+** Revision 1.1.1.1 2005/11/02 05:45:38 lino
+** no message
+**
+** Revision 1.3 2004/10/12 08:33:45 lino
+** add 10M/100M ethernet led define
+**
+*/
+
+#ifndef _LEDCETRL_H
+#define _LEDCETRL_H
+
+/*__________________________________________________________________________
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% constant definition
+%%________________________________________________________________________*/
+
+
+
+#ifdef TCSUPPORT_GPIO_ECM
+#define LED_TYPE_MAX_NO 16
+ //define led flash peroid parameter
+#define MAX_LED_FLASH_PERIOD 128
+#define LED_FLASH_PERIOD_LOW 1
+#define LED_FLASH_PERIOD_HIGH 0
+#define MAX_LED_FLASH_MAP 8
+ //define serial gpio control status
+#define SLED_STATUS_REDAY 0x1<<31
+ // define led interface type
+#define LED_SERIAL 1
+#define LED_PARALLEL 0
+#endif
+
+#ifdef TCSUPPORT_USB_HOST_LED
+#define USBPHYPORT1 1
+#define USBPHYPORT2 2
+
+#define USB_DISCONNECT 1
+#define USB_CONNECT 2
+#define USB_BLINK 3
+#define USB_DEFAULT 4
+#endif
+
+#ifdef TCSUPPORT_WLAN_LED_BY_SW
+#define WLAN_DISCONNECT 0
+#define WLAN_CONNECT 1
+#define WLAN_BLINK 2
+#define WLAN_DEFAULT 3
+#endif
+// define led mode
+#define LED_MODE_NOT_USED 0
+#define LED_MODE_ONOFF 1
+#define LED_MODE_BLINK 2
+#define LED_MODE_NOACT 3
+#define LED_MODE_PULSE 4
+/*20090811pork modified: add for sys reset gpio*/
+#define LED_MODE_INPUT 5
+#define LED_MODE_MASK 0xf
+#define LED_MODE_BICOLOR 0x10
+
+// define led blink speed
+#define LED_SPEED_FAST 0
+#define LED_SPEED_MED 1
+#define LED_SPEED_SLOW 2
+#define LED_SPEED_VERY_SLOW 3
+#ifdef TCSUPPORT_WLAN_LED_BY_SW
+#define LED_SPEED_FASTEST 4
+#endif
+// define software led no
+#define LED_BASE 0
+
+#define LED_DSL_STATUS (LED_BASE + 0)
+#define LED_DSL_ACT_STATUS (LED_BASE + 1)
+#define LED_DSL_NOACT_STATUS (LED_BASE + 2)
+#define LED_DSL_IDLE_STATUS (LED_BASE + 3)
+#define LED_DSL_HANDSHAKE_STATUS (LED_BASE + 4)
+#define LED_DSL_TRAINING_STATUS (LED_BASE + 5)
+#define LED_DSL_SHOWTIME_STATUS (LED_BASE + 6)
+
+#define LED_PPP_STATUS (LED_BASE + 7)
+#define LED_PPP_ACT_STATUS (LED_BASE + 8)
+#define LED_PPP_NOACT_STATUS (LED_BASE + 9)
+
+#define LED_USB_STATUS (LED_BASE + 10)
+#define LED_USB_ACT_STATUS (LED_BASE + 11)
+#define LED_USB_NOACT_STATUS (LED_BASE + 12)
+
+#define LED_WLAN_STATUS (LED_BASE + 13)
+#define LED_WLAN_ACT_STATUS (LED_BASE + 14)
+#define LED_WLAN_NOACT_STATUS (LED_BASE + 15)
+
+#define LED_ETHER_STATUS (LED_BASE + 16)
+#define LED_ETHER_ACT_STATUS (LED_BASE + 17)
+#define LED_ETHER_NOACT_STATUS (LED_BASE + 18)
+
+#define LED_FLASH_STATUS (LED_BASE + 19)
+#define LED_SYS_BOOT_STATUS (LED_BASE + 20)
+#define LED_SYS_STATUS (LED_BASE + 21)
+#define LED_SYS_INIT_STATUS (LED_BASE + 22)
+
+#define LED_ETHER_10M_STATUS (LED_BASE + 23)
+#define LED_ETHER_10M_ACT_STATUS (LED_BASE + 24)
+#define LED_ETHER_10M_NOACT_STATUS (LED_BASE + 25)
+
+#define LED_ETHER_100M_STATUS (LED_BASE + 26)
+#define LED_ETHER_100M_ACT_STATUS (LED_BASE + 27)
+#define LED_ETHER_100M_NOACT_STATUS (LED_BASE + 28)
+
+#define LED_SIM_CFG_STATUS (LED_BASE + 29)
+#define LED_SIM_CFG_ACT_STATUS (LED_BASE + 30)
+
+#define LED_LAN_RESET (LED_BASE + 31)
+//add for TR068 compliance
+#define LED_INTERNET_STATUS (LED_BASE + 33)
+#define LED_INTERNET_ACT_STATUS (LED_BASE + 34)
+#define LED_INTERNET_NOACT_STATUS (LED_BASE + 35)
+#define LED_INTERNET_TRYING_STATUS (LED_BASE + 38)
+#define LED_TR68_PWR_BOOTING (LED_BASE + 40)
+#define LED_TR68_PWR_BOOTED (LED_BASE + 41)
+#define LED_PHY_TX_POWER_DISABLE (LED_BASE + 42)
+#define LED_WLAN_WPS_STATUS (LED_BASE + 43)
+#define LED_WLAN_WPS_ACT_STATUS (LED_BASE + 44)
+#define LED_WLAN_WPS_NOACT_STATUS (LED_BASE + 45)
+
+#define LED_VOIP_SLIC1_RESET (LED_BASE + 46)
+#define LED_VOIP_SLIC2_RESET (LED_BASE + 47)
+#define LED_DSL_ALARM (LED_BASE + 48)
+#define LED_VOIP_HOOK1_STATUS (LED_BASE + 49)
+#define LED_VOIP_HOOK2_STATUS (LED_BASE + 50)
+
+#define LED_SIM_STATUS (LED_BASE + 51)
+#define LED_SIM_ACT_STATUS (LED_BASE + 52)
+#define LED_SIM_NOACT_STATUS (LED_BASE + 53)
+
+
+#ifdef TCSUPPORT_USB_HOST_LED
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7505)
+#define LED_USB2_STATUS (LED_BASE + 54)
+#define LED_USB2_ACT_STATUS (LED_BASE + 55)
+#endif
+#endif
+
+#define GPIO_SYS_RESET (LED_BASE + 56)
+
+#define LED_WLAN_RADIO (LED_BASE + 57)
+#define GPIO_WLAN_WPS (LED_BASE + 58)
+
+//add by brian for gpio additional request
+#define LED_PWR_FLASH (LED_BASE + 59)
+
+
+#define LED_PWR_USB (LED_BASE + 61)
+
+#define LED_LED_SWITCH (LED_BASE + 62)
+
+/*20090811 pork added: fit for linos LED setting*/
+#define LED_LAN1_RESET (LED_BASE + 63)
+
+#ifdef TCSUPPORT_XPON_LED
+#define LED_XPON_STATUS (LED_BASE + 64)
+#define LED_XPON_TRYING_STATUS (LED_BASE + 65)
+
+#define LED_XPON_LOS_ON_STATUS (LED_BASE + 66)
+#define LED_XPON_LOS_STATUS (LED_BASE + 67)
+
+#define LED_ETHER_PORT1_STATUS (LED_BASE + 68)
+#define LED_ETHER_PORT1_ACT_STATUS (LED_BASE + 69)
+#define LED_ETHER_PORT2_STATUS (LED_BASE + 70)
+#define LED_ETHER_PORT2_ACT_STATUS (LED_BASE + 71)
+#define LED_ETHER_PORT3_STATUS (LED_BASE + 72)
+#define LED_ETHER_PORT3_ACT_STATUS (LED_BASE + 73)
+#define LED_ETHER_PORT4_STATUS (LED_BASE + 74)
+#define LED_ETHER_PORT4_ACT_STATUS (LED_BASE + 75)
+#endif
+
+#define LED_VOIP_REG_STATUS (LED_BASE + 101)
+#define LED_PHY_VCC_DISABLE (LED_BASE + 102)
+
+#if defined(TCSUPPORT_CPU_EN7516)||defined(TCSUPPORT_CPU_EN7527)
+#define GPIO_WLAN_WPS_5G (LED_BASE + 104)
+#define LED_WLAN_RADIO_5G (LED_BASE + 105)
+#endif
+
+#define mtkled_read_reg_word(reg) regRead32(reg)
+#define mtkled_write_reg_word(reg, wdata) regWrite32(reg, wdata)
+/*__________________________________________________________________________
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% function declaration
+%%________________________________________________________________________*/
+
+void ledInit(void);
+void ledSysInitOn(void);
+void ledSysInitOff(void);
+void ledTurnOn(uint8 led_no);
+void ledTurnOff(uint8 led_no);
+uint8 ledGetMode(uint8 led_no);
+void ledSetMode(uint8 led_no, uint8 mode);
+uint8 ledGetGpio(uint8 led_no);
+void led_oen(uint8 led_no);
+void led_ien(uint8 led_no);
+//#define ledTurnOn(led_no)
+//#define ledTurnOff(led_no)
+int exModeMDIOGpioConf(uint8 mdc_gpio_num,uint8 mdio_gpio_num);
+void exModeMDIOGpioQuery(uint8 * mdc_gpio_num,uint8 * mdio_gpio_num);
+uint32 exModeMDIORead(uint32 reg);
+void exModeMDIOWrite(uint32 reg, uint32 data);
+
+void ledMtnInit(void);
+
+int ledTaskInit(void);
+void ledTask(void);
+
+#ifdef TCSUPPORT_XPON_HAL_API_EXT
+
+#define WLAN_LED_OFF 0
+#define WLAN_LED_ON 1
+#define WLAN_LED_OEN 2
+#define WLAN_LED_IEN 3
+#define WLAN_LED_RECOVER 4
+
+int led_wififh(int action, int num, int b);
+int get_led_mode_data(int led_gpio);
+int get_led_datafh(int num);
+
+/* get the ctrl value of gpio num x */
+#define MTK_LED_GET_GPIO_CTRL(x) get_led_mode_data(x)
+
+/* get the data value of gpio num x */
+#define MTK_LED_GET_GPIO_DATA(x) get_led_datafh(x)
+
+/* gpio num x to write value 1 */
+#define MTK_DO_LED_OFF(x) do { \
+ if(!(led_wififh(WLAN_LED_OFF, x, 0))){ \
+ if(x > 31){ \
+ regWrite32(CR_GPIO_DATA1,regRead32(CR_GPIO_DATA1)|(1<<(x-32))); \
+ }else{ \
+ regWrite32(CR_GPIO_DATA,regRead32(CR_GPIO_DATA)|(1<<x)); \
+ } \
+ } \
+ } while (0)
+/* gpio num x to write value 0 */
+#define MTK_DO_LED_ON(x) do { \
+ if(!(led_wififh(WLAN_LED_ON, x, 0))){ \
+ if(x > 31){ \
+ regWrite32(CR_GPIO_DATA1,regRead32(CR_GPIO_DATA1)& ~(1<<(x-32))); \
+ }else{ \
+ regWrite32(CR_GPIO_DATA,regRead32(CR_GPIO_DATA)& ~(1<<x)); \
+ } \
+ } \
+ } while (0)
+/* gpio num x set to ouput enable */
+#define MTK_LED_OEN(x) do { if(!(led_wififh(WLAN_LED_OEN, x, 0))){ \
+ if(x > 31){ \
+ if(x > 47){ \
+ regWrite32(CR_GPIO_CTRL3,regRead32(CR_GPIO_CTRL3)|(1<<((x-48)*2))); \
+ }else{ \
+ regWrite32(CR_GPIO_CTRL2,regRead32(CR_GPIO_CTRL2)|(1<<((x-32)*2))); \
+ } \
+ regWrite32(CR_GPIO_ODRAIN1,regRead32(CR_GPIO_ODRAIN1)|(1<<(x-32))); \
+ } \
+ else{ \
+ if(x > 15){ \
+ regWrite32(CR_GPIO_CTRL1,regRead32(CR_GPIO_CTRL1)|(1<<((x-16)*2))); \
+ }else{ \
+ regWrite32(CR_GPIO_CTRL,regRead32(CR_GPIO_CTRL)|(1<<(x*2))); \
+ } \
+ regWrite32(CR_GPIO_ODRAIN,regRead32(CR_GPIO_ODRAIN)|(1<<(x))); \
+ }\
+ }\
+ } while(0)
+/* gpio num x set to input enable */
+#define MTK_LED_IEN(x) do { if(!(led_wififh(WLAN_LED_IEN, x, 0))){ \
+ if(x > 31){ \
+ if(x > 47) \
+ regWrite32(CR_GPIO_CTRL3,regRead32(CR_GPIO_CTRL3)&~(0x00000003 << ((x-48)* 2))); \
+ else \
+ regWrite32(CR_GPIO_CTRL2,regRead32(CR_GPIO_CTRL2)&~(0x00000003 << ((x-32)* 2))); \
+ regWrite32(CR_GPIO_ODRAIN1,regRead32(CR_GPIO_ODRAIN1)&~(0x00000001 << (x-32))); \
+ } \
+ else{ \
+ if(x > 15) \
+ regWrite32(CR_GPIO_CTRL1,regRead32(CR_GPIO_CTRL1)&~(0x00000003 << ((x-16)* 2))); \
+ else \
+ regWrite32(CR_GPIO_CTRL,regRead32(CR_GPIO_CTRL)&~(0x00000003 << (x* 2))); \
+ regWrite32(CR_GPIO_ODRAIN,regRead32(CR_GPIO_ODRAIN)&~(0x00000001 << x)); \
+ } \
+ }\
+ } while(0)
+
+#endif
+#endif
Index: linux-3.18.21/arch/mips/include/asm/tc3162/tc3162.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/tc3162.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,1445 @@
+/*
+** $Id: tc3162.h,v 1.7 2011/01/07 06:05:58 pork Exp $
+*/
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+/*
+** $Log: tc3162.h,v $
+** Revision 1.7 2011/01/07 06:05:58 pork
+** add the definition of INT!16,INT32,SINT15,SINT7
+**
+** Revision 1.6 2010/09/20 07:08:02 shnwind
+** decrease nf_conntrack buffer size
+**
+** Revision 1.5 2010/09/03 16:43:07 here
+** [Ehance] TC3182 GMAC Driver is support TC-Console & WAN2LAN function & update the tc3182 dmt version (3.12.8.83)
+**
+** Revision 1.4 2010/09/02 07:04:50 here
+** [Ehance] Support TC3162U/TC3182 Auto-Bench
+**
+** Revision 1.3 2010/08/30 07:53:02 lino
+** add power saving mode kernel module support
+**
+** Revision 1.2 2010/06/05 05:40:29 lino
+** add tc3182 asic board support
+**
+** Revision 1.1.1.1 2010/04/09 09:39:21 feiyan
+** New TC Linux Make Flow Trunk
+**
+** Revision 1.4 2010/01/14 10:56:42 shnwind
+** recommit
+**
+** Revision 1.3 2010/01/14 08:00:10 shnwind
+** add TC3182 support
+**
+** Revision 1.2 2010/01/10 15:27:26 here
+** [Ehancement]TC3162U MAC EEE is operated at 100M-FD, SAR interface is accroding the SAR_CLK to calculate atm rate.
+**
+** Revision 1.1.1.1 2009/12/17 01:42:47 josephxu
+** 20091217, from Hinchu ,with VoIP
+**
+** Revision 1.2 2006/07/06 07:24:57 lino
+** update copyright year
+**
+** Revision 1.1.1.1 2005/11/02 05:45:38 lino
+** no message
+**
+** Revision 1.5 2005/09/27 08:01:38 bread.hsu
+** adding IMEM support for Tc3162L2
+**
+** Revision 1.4 2005/09/14 11:06:20 bread.hsu
+** new definition for TC3162L2
+**
+** Revision 1.3 2005/06/17 16:26:16 jasonlin
+** Remove redundant code to gain extra 100K bytes free memory.
+** Add "CODE_REDUCTION" definition to switch
+**
+** Revision 1.2 2005/06/14 10:02:01 jasonlin
+** Merge TC3162L2 source code into new main trunk
+**
+** Revision 1.1.1.1 2005/03/30 14:04:22 jasonlin
+** Import Linos source code
+**
+** Revision 1.4 2004/11/15 03:43:17 lino
+** rename ATM SAR max packet length register
+**
+** Revision 1.3 2004/09/01 13:15:47 lino
+** fixed when pc shutdown, system will reboot
+**
+** Revision 1.2 2004/08/27 12:16:37 lino
+** change SYS_HCLK to 96Mhz
+**
+** Revision 1.1 2004/07/02 08:03:04 lino
+** tc3160 and tc3162 code merge
+**
+*/
+
+#ifndef _TC3162_H_
+#define _TC3162_H_
+
+#ifdef CONFIG_MIPS_TC3262
+#include "./tc3182_int_source.h"
+#else
+#include "./int_source.h" /*shnwind add*/
+#endif
+
+
+#ifndef INT32
+#define INT32
+typedef signed long int int32; /* 32-bit signed integer */
+#endif
+
+#ifndef SINT31
+#define SINT31
+typedef signed long int sint31; /* 32-bit signed integer */
+#endif
+
+#ifndef UINT32
+#define UINT32
+typedef unsigned long int uint32; /* 32-bit unsigned integer */
+#endif
+
+#ifndef SINT15
+#define SINT15
+typedef signed short sint15; /* 16-bit signed integer */
+#endif
+
+#ifndef INT16
+#define INT16
+typedef signed short int int16; /* 16-bit signed integer */
+#endif
+
+#ifndef UINT16
+#define UINT16
+typedef unsigned short uint16; /* 16-bit unsigned integer */
+#endif
+
+#ifndef SINT7
+#define SINT7
+typedef signed char sint7; /* 8-bit signed integer */
+#endif
+
+
+#ifndef UINT8
+#define UINT8
+typedef unsigned char uint8; /* 8-bit unsigned integer */
+#endif
+
+#ifndef VPint
+#define VPint *(volatile unsigned long int *)
+#endif
+#ifndef VPshort
+#define VPshort *(volatile unsigned short *)
+#endif
+#ifndef VPchar
+#define VPchar *(volatile unsigned char *)
+#endif
+#ifdef TCSUPPORT_MT7510_E1
+static inline unsigned long int regRead32(uint32 reg) \
+{ \
+ uint32 tmp; \
+ tmp = VPint((reg & 0xf) + 0xbfb003a0); \
+ __asm__ __volatile("sync"); \
+ tmp = VPint(reg); \
+ __asm__ __volatile("sync"); \
+ return tmp; \
+}
+static inline void regWrite32(uint32 reg, uint32 vlaue) \
+{ \
+ VPint(reg) = vlaue; \
+ __asm__ __volatile("sync"); \
+}
+
+#else
+static inline uint32 regRead32(uint32 reg) \
+{ \
+ return VPint(reg); \
+}
+static inline void regWrite32(uint32 reg, uint32 vlaue) \
+{ \
+ VPint(reg) = vlaue; \
+}
+#endif
+static inline unsigned long int regReadPhy32(uint32 reg) \
+{ \
+ uint32 tmp; \
+ tmp = VPint(reg); \
+ tmp = VPint(reg); \
+ return tmp; \
+}
+
+#ifdef CONFIG_CPU_TC3162
+#define TC3162L2 1
+#endif
+
+#define isTC3162L2P2 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)!=0)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isTC3162L3P3 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==7)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isTC3162L4P4 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==8)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isTC3162L5P5E2 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==0xa)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isTC3162L5P5E3 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==0xb)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isTC3162L5P5 (isTC3162L5P5E2 || isTC3162L5P5E3)
+#define isTC3162U ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==0x10)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+#define isRT63260 ((((unsigned char)(regRead32(0xbfb0008c)>>12)&0xff)==0x20)&&(((regRead32(0xbfb00064)&0xffffffff))==0x00000000)?1:0)
+
+#define isTC3169 (((regRead32(0xbfb00064)&0xffff0000))==0x00000000)
+#define isTC3182 (((regRead32(0xbfb00064)&0xffff0000))==0x00010000)
+#define isRT65168 (((regRead32(0xbfb00064)&0xffff0000))==0x00020000)
+#define isRT63165 (((regRead32(0xbfb00064)&0xffff0000))==0x00030000)
+#define isRT63365 (((regRead32(0xbfb00064)&0xffff0000))==0x00040000)
+#define isRT63368 (isRT63365 ? ((((regRead32(0xbfb0008c)>>8) & 0x3) == 0x3) ? 1 : 0): 0)
+#define isRT62806 (((gswPbusRead(0x7ffc))&0xffff0000)==0x28060000)
+
+#define isENP_MOD (((regRead32(0xBFBF8214)&0x8)==0x8)?(((regRead32(0xBFBF8214)&0x20)==0x20)?1:0):(((regRead32(0xBFBF8214)&0x2)==0x2)?1:0))
+#define isENS_MOD (((regRead32(0xBFBF8214)&0x8)==0x8)?((regRead32(0xBFBF8214)&0x40)==0x40):((regRead32(0xBFBF8214)&0x4)==0x4))
+#define isMT751020 (((regRead32(0xbfb00064)&0xffff0000))==0x00050000)
+#define isMT7505 (((regRead32(0xbfb00064)&0xffff0000))==0x00060000)
+#define isEN7526c (((VPint(0xbfb00064)&0xffff0000))==0x00080000)
+#define isEN751221 ((((VPint(0xbfb00064)&0xffff0000))==0x00070000) || isEN7526c)
+#define isEN751627 (((VPint(0xbfb00064)&0xffff0000))==0x00090000)
+#define isEN7580 (((VPint(0xbfb00064)&0xffff0000))==0x000A0000)
+#define isMT7510 (isMT751020 && ((regRead32(0xbfb000f8)&0x3)==0) && (isENP_MOD))
+#define isMT7511 (isMT751020 && ((regRead32(0xbfb000f8)&0x3)==0)&& (!isENP_MOD))
+#define isMT7520 (((regRead32(0xbfb000f8)&0x3)==0x2) && isMT751020 && (isENP_MOD) && (!isENS_MOD))
+#define isMT7520G (((regRead32(0xbfb000f8)&0x3)==0x3) && isMT751020 && (isENP_MOD))
+#define isMT7525 (((regRead32(0xbfb000f8)&0x3)==0x2) && isMT751020 && (!isENP_MOD) && (!isENS_MOD))
+#define isMT7525G (((regRead32(0xbfb000f8)&0x3)==0x3) && isMT751020 && (!isENP_MOD))
+#define isMT7520S (((regRead32(0xbfb000f8)&0x3)==0x2) && isMT751020 && (isENS_MOD))
+#define isMT7520E2E3 ((regRead32(0xbfb00064) & 0xFF) <= 0x2 && (isMT7520 || isMT7520G || isMT7525 || isMT7525G || isMT7520S))
+#define isEPONFWID (((regRead32(0xbfb00064) & 0xF) == 0x3) && ((regRead32(0xbfb5fffc) & 0xF) >= 0x1) && (isMT7520 || isMT7520G || isMT7525 || isMT7525G || isMT7520S))
+
+
+#define isEN751221FPGA ((regRead32(0xBFB0008C)&(1<<29)) ? 0 : 1) //used for 7512/7521
+#define isGenernalFPGA ((regRead32(0xBFB0008C)&(1<<31)) ? 1 : 0) //used for 63365/751020
+#define isGenernalFPGA_2 (((VPint(CR_AHB_SSTR) & 0x1) == 0) ? 1 : 0) /*used for EN7526c and later version*/
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+#define isFPGA GET_IS_FPGA
+#define SYNC_TYPE4() __asm__ volatile ("sync 0x4")
+#else
+#define isFPGA (isEN7526c ? isGenernalFPGA_2 : (isEN751221 ? isEN751221FPGA : isGenernalFPGA) )
+#endif
+
+#define isEN751627QFP (((regRead32(0xbfa20174) & 0x8000) == 0x8000) ? 1 : 0)
+
+#define EFUSE_VERIFY_DATA0 (0xBFBF8214)
+#define EFUSE_PKG_INGORE_BITE0_MASK (0x3C)
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527)
+#define EFUSE_PKG_MASK (0xC0000)
+#define EFUSE_REMARK_BIT (1<<0)
+#define EFUSE_PKG_REMARK_SHITF 2
+#else
+#define EFUSE_PKG_MASK (0x3F)
+#define EFUSE_REMARK_BIT (1<<6)
+#define EFUSE_PKG_REMARK_SHITF 7
+#endif
+
+
+#define EFUSE_EN7527H (0x0)
+#define EFUSE_EN7527G (0x0)
+#define EFUSE_EN7516G (0x80000)
+#define EFUSE_EN7526F (0x0)
+#define EFUSE_EN7521F (0x10)
+#define EFUSE_EN7521S (0x20)
+#define EFUSE_EN7512 (0x4)
+#define EFUSE_EN7526D (0x1)
+#define EFUSE_EN7513 (0x5)
+#define EFUSE_EN7526G (0x2)
+#define EFUSE_EN7521G (0x12)
+#define EFUSE_EN7513G (0x6)
+#define EFUSE_EN7586 (0xA)
+#define EFUSE_EN7586 (0xA)
+
+#define isEN7527H ((isEN751627 && isEN751627QFP) && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7527H): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7527H)))
+#define isEN7527G ((isEN751627 && !isEN751627QFP) && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7527G): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7527G)))
+#define isEN7516G (isEN751627 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7516G): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7516G)))
+#define isEN7526F (isEN7526c? \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_INGORE_BITE0_MASK)== EFUSE_EN7526F): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_INGORE_BITE0_MASK)==EFUSE_EN7526F)): \
+ (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7526F): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7526F))))
+ #define isEN7526FP (isEN7526c & isEN7526F & (((VPint(0xBFA201EC) & (1<<10)))==(1<<10)))
+#define isEN7521F (isEN7526c? \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_INGORE_BITE0_MASK)== EFUSE_EN7521F): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_INGORE_BITE0_MASK)==EFUSE_EN7521F)): \
+ (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7521F): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7521F))))
+#define isEN7521S (isEN7526c? \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_INGORE_BITE0_MASK)== EFUSE_EN7521S): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_INGORE_BITE0_MASK)==EFUSE_EN7521S)): \
+ (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7521S): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7521S))))
+#define isEN7512 (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7512): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7512)))
+#define isEN7526D (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7526D): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7526D)))
+#define isEN7513 (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7513): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7513)))
+#define isEN7526G (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7526G): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7526G)))
+#define isEN7521G (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7521G): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7521G)))
+#define isEN7513G (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7513G): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7513G)))
+#define isEN7586 (isEN751221 && ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ (((regRead32(EFUSE_VERIFY_DATA0)>>EFUSE_PKG_REMARK_SHITF)& EFUSE_PKG_MASK)== EFUSE_EN7586): \
+ ((regRead32(EFUSE_VERIFY_DATA0)&EFUSE_PKG_MASK)==EFUSE_EN7586)))
+
+
+#define EFUSE_DDR3_BIT (1<<23)
+#define EFUSE_DDR3_REMARK_BIT (1<<24)
+#define EFUSE_IS_DDR3 ( (regRead32(EFUSE_VERIFY_DATA0)&EFUSE_REMARK_BIT)? \
+ ((regRead32(EFUSE_VERIFY_DATA0)& EFUSE_DDR3_REMARK_BIT)): \
+ ((regRead32(EFUSE_VERIFY_DATA0)& EFUSE_DDR3_BIT)))
+
+#define REG_SAVE_INFO 0xBFB00284
+#define GET_REG_SAVE_INFO_POINT ((volatile SYS_GLOBAL_PARM_T *)REG_SAVE_INFO)
+
+typedef union {
+ struct {
+ uint32 resv : 8 ;
+ uint32 isCtrlEcc : 1 ;
+ uint32 isFpga : 1 ;
+ uint32 sys_clk : 10; /* bus clock can support up to 1024MHz */
+ uint32 dram_size : 12; /* DRAM size can support up to 2048MB */
+ } raw ;
+ uint32 word;
+} SYS_GLOBAL_PARM_T ;
+
+#define GET_DRAM_SIZE (GET_REG_SAVE_INFO_POINT->raw.dram_size)
+#define SET_DRAM_SIZE(x) (GET_DRAM_SIZE = (x & 0xFFF))
+#define GET_SYS_CLK (GET_REG_SAVE_INFO_POINT->raw.sys_clk)
+#define SET_SYS_CLK(x) (GET_SYS_CLK = (x & 0x3FF))
+#define GET_IS_FPGA (GET_REG_SAVE_INFO_POINT->raw.isFpga)
+#define SET_IS_FPGA(x) (GET_IS_FPGA = (x & 0x1))
+#define GET_IS_SPI_CONTROLLER_ECC (GET_REG_SAVE_INFO_POINT->raw.isCtrlEcc)
+#define SET_IS_SPI_CONTROLLER_ECC(x) (GET_IS_SPI_CONTROLLER_ECC = (x & 0x1))
+
+
+#ifdef TCSUPPORT_MT7510_E1
+#define READ_E1(x) do{if (VPint(x) == 0)printk("Error Reg %x\n",x);}while(0)
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520)
+#define JUDGE_SWITCH_SCENARIO_BY_751020_SUBMODEL
+#ifdef JUDGE_SWITCH_SCENARIO_BY_751020_SUBMODEL
+#define MT751020_SUBMODEL_REG (0xbfb000f8)
+
+#define ExistExtMT7530 (isMT7520G || isMT7525G)
+#define DefaultUseExtMT7530 (isMT7520G || isMT7525G)
+#define LanPortIntMT7530 (isMT7510 || isMT7511 || isMT7520 || isMT7525)
+#define LanPortExtMT7530 (isMT7520G || isMT7525G)
+#endif
+#endif
+
+#define isMT7530 (((regRead32(0xbfb58000+0x7ffc)&0xffff0000))==0x75300000)
+//#define isMT7530FPGA (((gswPbusRead(0x7ffc))&0xffff0000)==0x75300000)
+#define isMT7530ext (((gswPbusRead(0x7ffc))&0xffff0000)==0x75300000)
+
+#ifdef TC3162L2
+#define RT63260_SYS_HCLK ((12*(((regRead32(0xbfb000b0))&0x1ff)+1)/(((regRead32(0xbfb000b0)>>9)&0x1f)+1))/5)
+#define TC3162U_SYS_HCLK (3*(((regRead32(0xbfb000b0)>>16)&0x1ff)+1)/(((regRead32(0xbfb000b0)>>25)&0x1f)+1))
+#define SYS_HCLK (isRT63260 ? RT63260_SYS_HCLK : (isTC3162U ? TC3162U_SYS_HCLK : 133))
+#endif
+
+#ifdef CONFIG_MIPS_TC3262
+/* RT63165 ASIC */
+/* FPGA is 25Mhz, ASIC LQFP128 is 166.67Mhz, others are 200Mhz */
+#define RT63165_SYS_HCLK (regRead32(0xbfb0008c)&(1<<31) ? 25 : (regRead32(0xbfb0008c)&(1<<9) ? (200) : (16667/100)))
+/* RT63365 ASIC */
+/* FPGA is 25/32Mhz
+ * ASIC RT6856/RT63368: DDR(0): 233.33, DDR(1): 175, SDR: 140
+ * RT6855/RT63365: DDR(0): 166.67, DDR(1): 125, SDR: 140 */
+#define RT63365_SYS_HCLK (regRead32(0xbfb0008c)&(1<<31) ? (25) : (regRead32(0xbfb0008c)&(1<<9) ? (regRead32(0xbfb0008c)&(1<<25) ? (regRead32(0xbfb0008c)&(1<<26) ? (175) : (23333/100)) : (140)) : (regRead32(0xbfb0008c)&(1<<25) ? (regRead32(0xbfb0008c)&(1<<26) ? (125) : (16667/100)) : (140))))
+#define MT751020_CPU_CLK (((regRead32(0xbfb0008c) >> 0x9) & 0x3) == 0x0) ? (750) : (((regRead32(0xbfb0008c) >> 0x9) & 0x3) == 0x1) ? (650) : (((regRead32(0xbfb0008c) >> 0x9) & 0x3) == 0x2) ? (500) : (250)
+#define MT751020_SYS_HCLK ((regRead32(0xbfb0008c)&(1<<31)) ? (32) : ((((MT751020_CPU_CLK)* 100) / ((regRead32(0xbfb000f8) >> 0x3) & 0x7)) / 100))
+#define MT7505_SYS_HCLK ((regRead32(0xbfb0008c)&(1<<31)) ? (32) : (135)) //MT7505 CPU clock is 540
+#define EN7512_SYS_HCLK ((isFPGA) ? (32) : (GET_SYS_CLK)) //ASIC Clock need Check
+
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+#define SYS_HCLK (GET_SYS_CLK)
+#else
+#define SYS_HCLK (isEN751221?EN7512_SYS_HCLK:(isMT7505? MT7505_SYS_HCLK:(isMT751020 ? MT751020_SYS_HCLK : (isRT63365 ? RT63365_SYS_HCLK : (isRT63165 ? RT63165_SYS_HCLK : (isRT65168 ? (1024/10) : (isTC3182 ? (1024/10) : (3*((VPint(0xbfb00058)>>16)+1)/(((VPint(0xbfb00058)&0x1f)+1)<<1)))))))))
+#endif
+#endif
+
+#define SAR_CLK (SYS_HCLK)/(4.0) //more accurate if 4.0 not 4
+
+/* define CPU timer clock, FPGA is 50Mhz, ASIC is 250Mhz */
+#define CPUTMR_CLK (isFPGA ? (50000000) : ((isEN751221 || isEN751627||isEN7580) ? (200000000) : (isMT7505 ? (100000000) : (isMT751020 ? (800000000/3) :(250000000)))))
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520)
+#define DSPRAM_BASE 0x9dff8000
+#else
+#define DSPRAM_BASE 0x9c000000
+#endif
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define WAN2LAN_CH_ID (1<<31)
+
+#define tc_inb(offset) (*(volatile unsigned char *)(offset))
+#define tc_inw(offset) (*(volatile unsigned short *)(offset))
+#define tc_inl(offset) (*(volatile unsigned long *)(offset))
+
+#define tc_outb(offset,val) (*(volatile unsigned char *)(offset) = val)
+#define tc_outw(offset,val) (*(volatile unsigned short *)(offset) = val)
+#define tc_outl(offset,val) (*(volatile unsigned long *)(offset) = val)
+
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#define IS_SPIFLASH ((~(VPint(0xBFA10114))) & 0x2)
+#define IS_NANDFLASH (VPint(0xBFA10114) & 0x2)
+#ifdef TCSUPPORT_SPI_CONTROLLER_ECC
+#define isSpiControllerECC (GET_IS_SPI_CONTROLLER_ECC)
+#else
+#define isSpiControllerECC (0)
+#endif
+#define isSpiNandAndCtrlECC (IS_NANDFLASH && isSpiControllerECC)
+
+#else
+#if defined(TCSUPPORT_CPU_MT7505)
+#define IS_SPIFLASH 1
+#define IS_NANDFLASH 0
+#else
+#define IS_SPIFLASH ((regRead32(CR_AHB_SSR) & (1<<20)) || !(regRead32(CR_AHB_HWCONF) & 0x1))
+#define IS_NANDFLASH (regRead32(CR_AHB_HWCONF) & 0x1)
+#endif
+#endif
+#define NF_CONNTRACK_BUF_SIZE 4096
+/*****************************
+ * RBUS CORE Module Registers *
+ *****************************/
+#define ARB_CFG 0xBFA00008
+#define ROUND_ROBIN_ENABLE (1<<30)
+#define ROUND_ROBIN_DISBALE ~(1<<30)
+
+
+
+/*****************************
+ * DMC Module Registers *
+ *****************************/
+
+#define CR_DMC_BASE 0xBFB20000
+#define CR_DMC_SRT (0x00 | CR_DMC_BASE)
+#define CR_DMC_STC (0x01 | CR_DMC_BASE)
+#define CR_DMC_SAMT (0x02 | CR_DMC_BASE)
+#define CR_DMC_SCR (0x03 | CR_DMC_BASE)
+
+/* RT63165 specific */
+/* DDR self refresh control register */
+#define CR_DMC_DDR_SR (0x18 | CR_DMC_BASE)
+/* DDR self refresh target count */
+#define CR_DMC_DDR_SR_CNT (0x1c | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG0 (0x40 | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG1 (0x44 | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG2 (0x48 | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG3 (0x4c | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG4 (0x50 | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG8 (0x60 | CR_DMC_BASE)
+#define CR_DMC_DDR_CFG9 (0x64 | CR_DMC_BASE)
+
+#define CR_DMC_CTL0 (0x70 | CR_DMC_BASE)
+#define CR_DMC_CTL1 (0x74 | CR_DMC_BASE)
+#define CR_DMC_CTL2 (0x78 | CR_DMC_BASE)
+#define CR_DMC_CTL3 (0x7c | CR_DMC_BASE)
+#define CR_DMC_CTL4 (0x80 | CR_DMC_BASE)
+
+#define CR_DMC_DCSR (0xb0 | CR_DMC_BASE)
+
+#define CR_DMC_ISPCFGR (0xc0 | CR_DMC_BASE)
+#define CR_DMC_DSPCFGR (0xc4 | CR_DMC_BASE)
+
+/* MT7510 */
+#define CR_DRAMC_CONF1 (0x04 | CR_DMC_BASE)
+#define CR_DRAMC_PADCTL4 (0xe4 | CR_DMC_BASE)
+
+/*****************************
+ * GDMA Module Registers *
+ *****************************/
+
+#define CR_GDMA_BASE 0xBFB30000
+#define CR_GDMA_DCSA (0x00 | CR_GDMA_BASE)
+#define CR_GDMA_DCDA (0x04 | CR_GDMA_BASE)
+#define CR_GDMA_DCBT (0x08 | CR_GDMA_BASE)
+#define CR_GDMA_DCBL (0x0a | CR_GDMA_BASE)
+#define CR_GDMA_DCC (0x0c | CR_GDMA_BASE)
+#define CR_GDMA_DCS (0x0e | CR_GDMA_BASE)
+#define CR_GDMA_DCKSUM (0x10 | CR_GDMA_BASE)
+
+/*****************************
+ * SPI Module Registers *
+ *****************************/
+
+#define CR_SPI_BASE 0xBFBC0000
+#define CR_SPI_CTL (0x00 | CR_SPI_BASE)
+#define CR_SPI_OPCODE (0x04 | CR_SPI_BASE)
+#define CR_SPI_DATA (0x08 | CR_SPI_BASE)
+
+/*****************************
+ * Ethernet Module Registers *
+ *****************************/
+
+#define CR_MAC_BASE 0xBFB50000
+#define CR_MAC_ISR (0x00 | CR_MAC_BASE)// --- Interrupt Status Register ---
+#define CR_MAC_IMR (0x04 | CR_MAC_BASE)// --- Interrupt Mask Register ---
+#define CR_MAC_MADR (0x08 | CR_MAC_BASE)// --- MAC Address Register [47:32] ---
+#define CR_MAC_LADR (0x0c | CR_MAC_BASE)// --- MAC Address Register [31:0] ---
+#define CR_MAC_EEE (0x10 | CR_MAC_BASE)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+// None
+#else
+ #define CR_MAC_MAHT0 (0x10 | CR_MAC_BASE)// --- MAC Hash Table Address Register [31:0] ---
+ #define CR_MAC_MAHT1 (0x14 | CR_MAC_BASE)// --- MAC Hash Table Address Register [31:0] ---
+#endif
+#define CR_MAC_TXPD (0x18 | CR_MAC_BASE)// --- Transmit Poll Demand Register ---
+#define CR_MAC_RXPD (0x1c | CR_MAC_BASE)// --- Receive Poll Demand Register ---
+#define CR_MAC_TXR_BADR (0x20 | CR_MAC_BASE)// --- Transmit Ring Base Address Register ---
+#define CR_MAC_RXR_BADR (0x24 | CR_MAC_BASE)// --- Receive Ring Base Address Register ---
+#define CR_MAC_ITC (0x28 | CR_MAC_BASE)// --- Interrupt Timer Control Register ---
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+ #define CR_MAC_TXR_SIZE (0x2c | CR_MAC_BASE)// --- Transmit Ring Size Register ---
+ #define CR_MAC_RXR_SIZE (0x30 | CR_MAC_BASE)// --- Receive Ring Size Register ---
+ #define CR_MAC_RXR_SWIDX (0x34 | CR_MAC_BASE)// --- Receive Ring Software Index Register ---
+#else
+#define CR_MAC_APTC (0x2c | CR_MAC_BASE)// --- Automatic Polling Timer Control Register ---
+#define CR_MAC_DBLAC (0x30 | CR_MAC_BASE)// --- DMA Burst Length and Arbitration Control Register ---
+#endif
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+ #define CR_MAC_TXDESP_SIZE (0x38 | CR_MAC_BASE)// --- Transmit Descriptor Size Register ---
+ #define CR_MAC_RXDESP_SIZE (0x3c | CR_MAC_BASE)// --- Receive Descriptor Size Register ---
+#else
+ #define CR_MAC_TXDESCP_ADR (0x38 | CR_MAC_BASE)// --- Current Transmit Descriptor Address Register ---
+ #define CR_MAC_RXDESCP_ADR (0x3c | CR_MAC_BASE)// --- Current Receive Descriptor Address Register ---
+#endif
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+ #define CR_MAC_PRIORITY_CFG (0x50 | CR_MAC_BASE)// --- Priority Configuration Register ---
+ #define CR_MAC_VLAN_CFG (0x54 | CR_MAC_BASE)// --- VLAN Configuration Register ---
+ #define CR_MAC_TOS0_CFG (0x58 | CR_MAC_BASE)// --- TOS 0 Configuration Register ---
+ #define CR_MAC_TOS1_CFG (0x5c | CR_MAC_BASE)// --- TOS 1 Configuration Register ---
+ #define CR_MAC_TOS2_CFG (0x60 | CR_MAC_BASE)// --- TOS 2 Configuration Register ---
+ #define CR_MAC_TOS3_CFG (0x64 | CR_MAC_BASE)// --- TOS 3 Configuration Register ---
+ #define CR_MAC_TCP_CFG (0x68 | CR_MAC_BASE)// --- TCP Configuration Register ---
+ #define CR_MAC_SWTAG_CFG (0x6c | CR_MAC_BASE)// --- Software Tagging Configuration Register ---
+ #define CR_MAC_PMBL_CYC_NUM (0x70 | CR_MAC_BASE)// --- Preamble Cycle Number Register ---
+ #define CR_MAC_FCTL_CYC_NUM (0x74 | CR_MAC_BASE)// --- Flow Control Cycle Number Register ---
+ #define CR_MAC_JAM_CYC_NUM (0x78 | CR_MAC_BASE)// --- JAM Cycle Number Register ---
+ #define CR_MAC_DEFER_VAL (0x7c | CR_MAC_BASE)// --- Defer Value Register ---
+ #define CR_MAC_RANDOM_POLY (0x80 | CR_MAC_BASE)// --- Random Polynomial Register ---
+#else
+// None
+#endif
+#define CR_MAC_MACCR (0x88 | CR_MAC_BASE)// --- MAC Control Register ---
+#define CR_MAC_MACSR (0x8c | CR_MAC_BASE)// --- MAC Status Register ---
+#define CR_MAC_PHYCR (0x90 | CR_MAC_BASE)// --- PHY Control Register ---
+#define CR_MAC_PHYWDATA (0x94 | CR_MAC_BASE)// --- PHY Write Data Register ---
+#define CR_MAC_FCR (0x98 | CR_MAC_BASE)// --- Flow Control Register ---
+#define CR_MAC_BPR (0x9c | CR_MAC_BASE)// --- Back Pressure Register ---
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define CR_MAC_DESP_IDX (0xc4 | CR_MAC_BASE)// --- Current Tx/Rx Descriptor Index ---
+#endif
+#define CR_MAC_WOLCR (0xa0 | CR_MAC_BASE)// --- Wake-On-LAN Control Register ---
+#define CR_MAC_WOLSR (0xa4 | CR_MAC_BASE)// --- Wake-On-LAN Status Register ---
+#define CR_MAC_WFCRC (0xa8 | CR_MAC_BASE)// --- Wake-up Frame CRC Register ---
+#define CR_MAC_WFBM1 (0xb0 | CR_MAC_BASE)// --- Wake-up Frame Byte Mask 1st Double Word Register ---
+#define CR_MAC_WFBM2 (0xb4 | CR_MAC_BASE)// --- Wake-up Frame Byte Mask 2nd Double Word Register ---
+#define CR_MAC_WFBM3 (0xb8 | CR_MAC_BASE)// --- Wake-up Frame Byte Mask 3rd Double Word Register ---
+#define CR_MAC_WFBM4 (0xbc | CR_MAC_BASE)// --- Wake-up Frame Byte Mask 4th Double Word Register ---
+#define CR_MAC_DMA_FSM (0xc8 | CR_MAC_BASE)// --- DMA State Machine
+#define CR_MAC_TM (0xcc | CR_MAC_BASE)// --- Test Mode Register ---
+#define CR_MAC_XMPG_CNT (0xdc | CR_MAC_BASE)// --- XM and PG Counter Register ---
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define CR_MAC_RUNT_TLCC_CNT (0xe0 | CR_MAC_BASE)// --- Receive Runt and Transmit Late Collision Packet Counter Register ---
+#define CR_MAC_RCRC_RLONG_CNT (0xe4 | CR_MAC_BASE)// --- Receive CRC Error and Long Packet Counter Register ---
+#define CR_MAC_RLOSS_RCOL_CNT (0xe8 | CR_MAC_BASE)// --- Receive Packet Loss and Receive Collision Counter Register ---
+#else
+#define CR_MAC_RUNT_LCOL_CNT (0xe0 | CR_MAC_BASE)// --- Runt and Late Collision Packet Counter Register ---
+#define CR_MAC_CRC_LONG_CNT (0xe4 | CR_MAC_BASE)// --- CRC and Long Packet Counter Register ---
+#define CR_MAC_LOSS_COL_CNT (0xe8 | CR_MAC_BASE)// --- Receive Packet Loss and Receive Collision Counter Register ---
+#endif
+#define CR_MAC_BROADCAST_CNT (0xec | CR_MAC_BASE)// --- Receive Broadcast Counter Register ---
+#define CR_MAC_MULTICAST_CNT (0xf0 | CR_MAC_BASE)// --- Receive Multicast Counter Register ---
+#define CR_MAC_RX_CNT (0xf4 | CR_MAC_BASE)// --- Receive Good Packet Counter Register ---
+#define CR_MAC_TX_CNT (0xf8 | CR_MAC_BASE)// --- Transmit Good Packet Counter Register ---
+
+/*************************
+ * UART Module Registers *
+ *************************/
+#ifdef __BIG_ENDIAN
+#define CR_UART_OFFSET (0x03)
+#else
+#define CR_UART_OFFSET (0x0)
+#endif
+
+#define CR_UART_BASE 0xBFBF0000
+#define CR_UART_RBR (0x00+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_THR (0x00+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_IER (0x04+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_IIR (0x08+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_FCR (0x08+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_LCR (0x0c+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_MCR (0x10+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_LSR (0x14+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_MSR (0x18+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_SCR (0x1c+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_BRDL (0x00+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_BRDH (0x04+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_WORDA (0x20+CR_UART_BASE+0x00)
+#define CR_UART_HWORDA (0x28+CR_UART_BASE+0x00)
+#define CR_UART_MISCC (0x24+CR_UART_BASE+CR_UART_OFFSET)
+#define CR_UART_XYD (0x2c+CR_UART_BASE)
+
+#define UART_BRD_ACCESS 0x80
+#define UART_XYD_Y 65000
+#define UART_UCLK_115200 0
+#define UART_UCLK_57600 1
+#define UART_UCLK_38400 2
+#define UART_UCLK_28800 3
+#define UART_UCLK_19200 4
+#define UART_UCLK_14400 5
+#define UART_UCLK_9600 6
+#define UART_UCLK_4800 7
+#define UART_UCLK_2400 8
+#define UART_UCLK_1200 9
+#define UART_UCLK_600 10
+#define UART_UCLK_300 11
+#define UART_UCLK_110 12
+#define UART_BRDL 0x03
+#define UART_BRDH 0x00
+#define UART_LCR 0x03
+#define UART_FCR 0x0f
+#define UART_WATERMARK (0x0<<6)
+#define UART_MCR 0x0
+#define UART_MISCC 0x0
+//#define UART_IER 0x07
+//#define UART_IER 0x05
+#define UART_IER 0x01
+
+#define IER_RECEIVED_DATA_INTERRUPT_ENABLE 0x01
+#define IER_THRE_INTERRUPT_ENABLE 0x02
+#define IER_LINE_STATUS_INTERRUPT_ENABLE 0x04
+
+#define IIR_INDICATOR VPchar(CR_UART_IIR)
+#define IIR_RECEIVED_LINE_STATUS 0x06
+#define IIR_RECEIVED_DATA_AVAILABLE 0x04
+#define IIR_RECEIVER_IDLE_TRIGGER 0x0C
+#define IIR_TRANSMITTED_REGISTER_EMPTY 0x02
+#define LSR_INDICATOR VPchar(CR_UART_LSR)
+#define LSR_RECEIVED_DATA_READY 0x01
+#define LSR_OVERRUN 0x02
+#define LSR_PARITY_ERROR 0x04
+#define LSR_FRAME_ERROR 0x08
+#define LSR_BREAK 0x10
+#define LSR_THRE 0x20
+#define LSR_THE 0x40
+#define LSR_RFIFO_FLAG 0x80
+
+#define uartTxIntOn() VPchar(CR_UART_IER) |= IER_THRE_INTERRUPT_ENABLE
+#define uartTxIntOff() VPchar(CR_UART_IER) &= ~IER_THRE_INTERRUPT_ENABLE
+#define uartRxIntOn() VPchar(CR_UART_IER) |= IER_RECEIVED_DATA_INTERRUPT_ENABLE
+#define uartRxIntOff() VPchar(CR_UART_IER) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE
+
+/*************************
+ * UART2 Module Registers *
+ *************************/
+#ifdef TCSUPPORT_UART2
+#define CR_UART2_BASE 0xBFBF0300
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7580)
+#define CR_UART3_BASE 0xBFBE1000
+#if defined(TCSUPPORT_CPU_EN7580)
+#define CR_UART4_BASE 0xBFBF0600
+#define CR_UART5_BASE 0xBFBF0700
+#endif
+#endif
+#define CR_UART2_RBR 0x00
+#define CR_UART2_THR 0x00
+#define CR_UART2_IER 0x04
+#define CR_UART2_IIR 0x08
+#define CR_UART2_FCR 0x08
+#define CR_UART2_LCR 0x0c
+#define CR_UART2_MCR 0x10
+#define CR_UART2_LSR 0x14
+#define CR_UART2_MSR 0x18
+#define CR_UART2_SCR 0x1c
+#define CR_UART2_BRDL 0x00
+#define CR_UART2_BRDH 0x04
+#define CR_UART2_WORDA 0x20
+#define CR_UART2_HWORDA 0x28
+#define CR_UART2_MISCC 0x24
+#define CR_UART2_XYD 0x2c
+
+#define UART_HWFC_ENABLE (1 << 0)
+#define UART_HWFC_DISABLE (0 << 0)
+#define UART_DEBUG (1 << 1)
+
+#define UART_DPRINT_MSG() { \
+ if(port->unused1 & UART_DEBUG) \
+ printk("[UART debug] iobase = %08x, function : %s\n", port->iobase, __func__); \
+ }
+#endif
+
+/*************************
+ * HSUART Module Registers *
+ *************************/
+#define CR_HSUART_BASE 0xBFBF0300
+#define CR_HSUART_RBR (0x00+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_THR (0x00+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_IER (0x04+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_IIR (0x08+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_FCR (0x08+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_LCR (0x0c+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_MCR (0x10+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_LSR (0x14+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_MSR (0x18+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_SCR (0x1c+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_BRDL (0x00+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_BRDH (0x04+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_WORDA (0x20+CR_HSUART_BASE+0x00)
+#define CR_HSUART_HWORDA (0x28+CR_HSUART_BASE+0x00)
+#define CR_HSUART_MISCC (0x24+CR_HSUART_BASE+CR_UART_OFFSET)
+#define CR_HSUART_XYD (0x2c+CR_HSUART_BASE)
+
+/**********************************
+ * Interrupt Controller Registers *
+ **********************************/
+#define CR_INTC_BASE 0xBFB40000
+ // --- Interrupt Type Register ---
+#define CR_INTC_ITR (CR_INTC_BASE+0x0000)
+ // --- Interrupt Mask Register ---
+#define CR_INTC_IMR (CR_INTC_BASE+0x0004)
+ // --- Interrupt Pending Register ---
+#define CR_INTC_IPR (CR_INTC_BASE+0x0008)
+ // --- Interrupt Set Register ---
+#define CR_INTC_ISR (CR_INTC_BASE+0x000c)
+ // --- Interrupt Priority Register 0 ---
+#define CR_INTC_IPR0 (CR_INTC_BASE+0x0010)
+ // --- Interrupt Priority Register 1 ---
+#define CR_INTC_IPR1 (CR_INTC_BASE+0x0014)
+ // --- Interrupt Priority Register 2 ---
+#define CR_INTC_IPR2 (CR_INTC_BASE+0x0018)
+ // --- Interrupt Priority Register 3 ---
+#define CR_INTC_IPR3 (CR_INTC_BASE+0x001c)
+ // --- Interrupt Priority Register 4 ---
+#define CR_INTC_IPR4 (CR_INTC_BASE+0x0020)
+ // --- Interrupt Priority Register 5 ---
+#define CR_INTC_IPR5 (CR_INTC_BASE+0x0024)
+ // --- Interrupt Priority Register 6 ---
+#define CR_INTC_IPR6 (CR_INTC_BASE+0x0028)
+ // --- Interrupt Priority Register 7 ---
+#define CR_INTC_IPR7 (CR_INTC_BASE+0x002c)
+ // --- Interrupt Vector egister ---
+#ifdef CONFIG_MIPS_TC3262
+ // --- Interrupt VPE and SRS Register 0 ---
+#define CR_INTC_IVSR0 (CR_INTC_BASE+0x0030)
+ // --- Interrupt VPE and SRS Register 1 ---
+#define CR_INTC_IVSR1 (CR_INTC_BASE+0x0034)
+ // --- Interrupt VPE and SRS Register 2 ---
+#define CR_INTC_IVSR2 (CR_INTC_BASE+0x0038)
+ // --- Interrupt VPE and SRS Register 3 ---
+#define CR_INTC_IVSR3 (CR_INTC_BASE+0x003c)
+ // --- Interrupt VPE and SRS Register 4 ---
+#define CR_INTC_IVSR4 (CR_INTC_BASE+0x0040)
+ // --- Interrupt VPE and SRS Register 5 ---
+#define CR_INTC_IVSR5 (CR_INTC_BASE+0x0044)
+ // --- Interrupt VPE and SRS Register 6 ---
+#define CR_INTC_IVSR6 (CR_INTC_BASE+0x0048)
+ // --- Interrupt VPE and SRS Register 7 ---
+#define CR_INTC_IVSR7 (CR_INTC_BASE+0x004c)
+ // --- Interrupt Vector egister ---
+#define CR_INTC_IVR (CR_INTC_BASE+0x0050)
+
+/* RT63165 */
+ // --- Interrupt Mask Register ---
+#define CR_INTC_IMR_1 (CR_INTC_BASE+0x0050)
+ // --- Interrupt Pending Register ---
+#define CR_INTC_IPR_1 (CR_INTC_BASE+0x0054)
+ // --- Interrupt Priority Register 8 ---
+#define CR_INTC_IPSR8 (CR_INTC_BASE+0x0058)
+ // --- Interrupt Priority Register 9 ---
+#define CR_INTC_IPSR9 (CR_INTC_BASE+0x005c)
+ // --- Interrupt VPE and SRS Register 8 ---
+#define CR_INTC_IVSR8 (CR_INTC_BASE+0x0060)
+ // --- Interrupt VPE and SRS Register 9 ---
+#define CR_INTC_IVSR9 (CR_INTC_BASE+0x0064)
+
+/*MT7510*/
+#define CR_INTC_NMI0IMR0 (CR_INTC_BASE+0x0080)
+#define CR_INTC_NMI1IMR0 (CR_INTC_BASE+0x0088)
+
+
+
+
+#else
+ // --- Interrupt Vector egister ---
+#define CR_INTC_IVR (CR_INTC_BASE+0x0030)
+#endif
+
+#if 0
+/*move this to the int_source.h because the definition of unsigned long int...etc has been defined in type.h.
+ The ralink driver uses type.h and needs this interrupt_source structure so move this interrupt_source structure
+ to int_source.h. shnwind */
+enum
+interrupt_source
+ {
+#ifdef CONFIG_MIPS_TC3262
+ DUMMY_INT,
+#endif
+ UART_INT, //0 IPL10
+ RTC_ALARM_INT, //1 IPL29
+#ifdef CONFIG_MIPS_TC3262
+ SI_SWINT1_INT0, //2
+ SI_SWINT1_INT1, //3
+#else
+ RTC_TICK_INT, //2 IPL31
+ RESERVED0, //3 IPL30
+#endif
+ TIMER0_INT, //4 IPL1
+ TIMER1_INT, //5 IPL5
+ TIMER2_INT, //6 IPL6
+#ifdef CONFIG_MIPS_TC3262
+ SI_SWINT_INT0, //7
+ SI_SWINT_INT1, //8
+#else
+ TIMER3_INT, //7 IPL7
+ TIMER4_INT, //8 IPL8
+#endif
+ TIMER5_INT, //9 IPL9
+ GPIO_INT, //10 IPL11
+#ifdef CONFIG_MIPS_TC3262
+ RESERVED1, //11 IPL20
+ SI_PC1_INT, //12
+ SI_PC_INT, //13
+#else
+ PCIE_A_INT, //11 IPL20
+ PCIE_SERR_INT, //12 IPL21
+ RESERVED3, //13 IPL22
+#endif
+ APB_DMA0_INT, //14 IPL12
+ APB_DMA1_INT, //15 IPL13
+#ifdef CONFIG_MIPS_TC3262
+ HSUART_INT, //16 IPL23
+#else
+ RESERVED4, //16 IPL23
+#endif
+ RESERVED5, //17 IPL24
+ DYINGGASP_INT, //18 IPL25
+ DMT_INT, //19 IPL26
+ ARBITER_ERR_INT,//20 IPL0
+ MAC_INT, //21 IPL3
+ SAR_INT, //22 IPL2
+ USB_INT, //23 IPL14
+ PCI_A_INT, //24
+ PCI_B_INT, //25
+// RESERVED8, //24 IPL27
+// RESERVED9, //25 IPL28
+ XSLV0_INT, //26 IPL15
+ XSLV1_INT, //27 IPL16
+ XSLV2_INT, //28 IPL17
+#ifdef CONFIG_MIPS_TC3262
+ SI_TIMER1_INT, //29
+ SI_TIMER_INT, //30
+#else
+ XAPB0_INT, //29 IPL18
+ XAPB1_INT, //30 IPL19
+#endif
+ SWR_INT //31 IPL4
+ };
+#endif
+
+enum
+interrupt_priority
+{
+ IPL0, IPL1, IPL2, IPL3, IPL4,
+ IPL5, IPL6, IPL7, IPL8, IPL9,
+ IPL10, IPL11, IPL12, IPL13, IPL14,
+ IPL15, IPL16, IPL17, IPL18, IPL19,
+ IPL20, IPL21, IPL22, IPL23, IPL24,
+ IPL25, IPL26, IPL27, IPL28, IPL29,
+ IPL30, IPL31
+};
+
+/**************************
+ * Timer Module Registers *
+ **************************/
+#define CR_TIMER_BASE 0xBFBF0100
+#define CR_TIMER_CTL (CR_TIMER_BASE + 0x00)
+#define CR_TIMER0_LDV (CR_TIMER_BASE + 0x04)
+#define CR_TIMER0_VLR (CR_TIMER_BASE + 0x08)
+#define CR_TIMER1_LDV (CR_TIMER_BASE + 0x0C)
+#define CR_TIMER1_VLR (CR_TIMER_BASE + 0x10)
+#define CR_TIMER2_LDV (CR_TIMER_BASE + 0x14)
+#define CR_TIMER2_VLR (CR_TIMER_BASE + 0x18)
+#define CR_TIMER3_LDV (CR_TIMER_BASE + 0x1C)
+#define CR_TIMER3_VLR (CR_TIMER_BASE + 0x20)
+#define CR_TIMER4_LDV (CR_TIMER_BASE + 0x24)
+#define CR_TIMER4_VLR (CR_TIMER_BASE + 0x28)
+#define CR_TIMER5_LDV (CR_TIMER_BASE + 0x2C)
+#define CR_TIMER5_VLR (CR_TIMER_BASE + 0x30)
+/* new watchdog design */
+#define CR_WDOG_THSLD (CR_TIMER_BASE + 0x34)
+#define CR_WDOG_RLD (CR_TIMER_BASE + 0x38)
+
+#define TIMER_ENABLE 1
+#define TIMER_DISABLE 0
+#define TIMER_TOGGLEMODE 1
+#define TIMER_INTERVALMODE 0
+#define TIMER_TICKENABLE 1
+#define TIMER_TICKDISABLE 0
+#define TIMER_WDENABLE 1
+#define TIMER_WDDISABLE 0
+#define TIMER_HALTENABLE 1
+#define TIMER_HALTDISABLE 0
+
+#define TIMERTICKS_1MS 1
+#define TIMERTICKS_10MS 10 // set timer ticks as 10 ms
+#define TIMERTICKS_100MS 100
+#define TIMERTICKS_1S 1000
+#define TIMERTICKS_10S 10000
+
+#define timerCtlSet(timer_no, timer_enable, timer_mode,timer_halt) timer_Configure(timer_no, timer_enable, timer_mode, timer_halt)
+#define timerWdSet(tick_enable, watchdog_enable) timer_WatchDogConfigure(tick_enable,watchdog_enable)
+#define timerLdvSet(timer_no,val) *(volatile uint32 *)(CR_TIMER0_LDV+timer_no*0x08) = (val)
+#define timerVlrGet(timer_no,val) (val)=*(volatile uint32 *)(CR_TIMER0_VLR+timer_no*0x08)
+
+/**************************
+ * Timer Module Registers *
+ **************************/
+#define CR_CPUTMR_BASE 0xBFBF0400
+#define CR_CPUTMR_CTL (CR_CPUTMR_BASE + 0x00)
+#define CR_CPUTMR_CMR0 (CR_CPUTMR_BASE + 0x04)
+#define CR_CPUTMR_CNT0 (CR_CPUTMR_BASE + 0x08)
+#define CR_CPUTMR_CMR1 (CR_CPUTMR_BASE + 0x0c)
+#define CR_CPUTMR_CNT1 (CR_CPUTMR_BASE + 0x10)
+/* TCSUPPORT_MIPS_1004K */
+#define CR_CPUTMR_23_BASE 0xBFBE0000
+#define CR_CPUTMR_23_CTL (CR_CPUTMR_23_BASE + 0x00)
+#define CR_CPUTMR_CMR2 (CR_CPUTMR_23_BASE + 0x04)
+#define CR_CPUTMR_CNT2 (CR_CPUTMR_23_BASE + 0x08)
+#define CR_CPUTMR_CMR3 (CR_CPUTMR_23_BASE + 0x0c)
+#define CR_CPUTMR_CNT3 (CR_CPUTMR_23_BASE + 0x10)
+/* end of TCSUPPORT_MIPS_1004K */
+
+/*************************
+ * GPIO Module Registers *
+ *************************/
+#define CR_GPIO_BASE 0xBFBF0200
+#define CR_GPIO_CTRL (CR_GPIO_BASE + 0x00)
+#define CR_GPIO_DATA (CR_GPIO_BASE + 0x04)
+#define CR_GPIO_INTS (CR_GPIO_BASE + 0x08)
+#define CR_GPIO_EDET (CR_GPIO_BASE + 0x0C)
+#define CR_GPIO_LDET (CR_GPIO_BASE + 0x10)
+#define CR_GPIO_ODRAIN (CR_GPIO_BASE + 0x14)
+#define CR_GPIO_CTRL1 (CR_GPIO_BASE + 0x20)
+#ifdef TCSUPPORT_GPIO_ECM
+#define CR_SGPIO_DATA (CR_GPIO_BASE + 0x24)
+#define CR_SGPIO_CDIV (CR_GPIO_BASE + 0x28)
+#define CR_SGPIO_CDLY (CR_GPIO_BASE + 0x2C)
+#define CR_SGPIO_MODE (CR_GPIO_BASE + 0x30)
+#define CR_GPIO_FLAMOD (CR_GPIO_BASE + 0x34)
+#define CR_GPIO_IMME (CR_GPIO_BASE + 0x38)
+#define CR_GPIO_FLAP0 (CR_GPIO_BASE + 0x3C)
+#define CR_GPIO_FLAP1 (CR_GPIO_BASE + 0x40)
+#define CR_GPIO_FLAP2 (CR_GPIO_BASE + 0x44)
+#define CR_GPIO_FLAP3 (CR_GPIO_BASE + 0x48)
+#define CR_GPIO_FMAP0 (CR_GPIO_BASE + 0x4C)
+#define CR_GPIO_FMAP1 (CR_GPIO_BASE + 0x50)
+#define CR_SGPIO_FMAP0 (CR_GPIO_BASE + 0x54)
+#define CR_SGPIO_FMAP1 (CR_GPIO_BASE + 0x58)
+#define CR_SGPIO_FMAP2 (CR_GPIO_BASE + 0x5C)
+#define CR_GPIO_TYPE 0xBFB000B0
+#endif
+/* MT7510 */
+#define CR_GPIO_CTRL2 (CR_GPIO_BASE + 0x60)
+#define CR_GPIO_CTRL3 (CR_GPIO_BASE + 0x64)
+#define CR_GPIO_FLAMOD_EXT (CR_GPIO_BASE + 0x68)
+#define CR_GPIO_DATA1 (CR_GPIO_BASE + 0x70)
+#define CR_GPIO_ODRAIN1 (CR_GPIO_BASE + 0x78)
+
+#define GPIO_IN 0x0
+#define GPIO_OUT 0x1
+#define GPIO_ALT_IN 0x2
+#define GPIO_ALT_OUT 0x3
+
+#define GPIO_E_DIS 0x0
+#define GPIO_E_RISE 0x1
+#define GPIO_E_FALL 0x2
+#define GPIO_E_BOTH 0x3
+
+#define GPIO_L_DIS 0x0
+#define GPIO_L_HIGH 0x1
+#define GPIO_L_LOW 0x2
+#define GPIO_L_BOTH 0x3
+
+/*****************************
+ * Arbiter/Decoder Registers *
+ *****************************/
+#define CR_AHB_BASE 0xBFB00000
+#define CR_AHB_AACS (CR_AHB_BASE + 0x00)
+#define CR_AHB_ABEM (CR_AHB_BASE + 0x08)
+#define CR_AHB_ABEA (CR_AHB_BASE + 0x0C)
+#define CR_AHB_DMB0 (CR_AHB_BASE + 0x10)
+#define CR_AHB_DMB1 (CR_AHB_BASE + 0x14)
+#define CR_AHB_DMB2 (CR_AHB_BASE + 0x18)
+#define CR_AHB_DMB3 (CR_AHB_BASE + 0x1C)
+#define CR_AHB_SMB0 (CR_AHB_BASE + 0x20)
+#define CR_AHB_SMB1 (CR_AHB_BASE + 0x24)
+#define CR_AHB_SMB2 (CR_AHB_BASE + 0x28)
+#define CR_AHB_SMB3 (CR_AHB_BASE + 0x2C)
+#define CR_AHB_SMB4 (CR_AHB_BASE + 0x30)
+#define CR_AHB_SMB5 (CR_AHB_BASE + 0x34)
+
+/* RT63165 */
+#define CR_ERR_ADDR (CR_AHB_BASE + 0x3c)
+#define CR_PRATIR (CR_AHB_BASE + 0x58)
+#define CR_MON_TMR (CR_AHB_BASE + 0x60)
+
+/*MT7510*/
+#define CR_AHB_NMI_CONF (CR_AHB_BASE + 0x50)
+
+#define CR_AHB_PMCR (CR_AHB_BASE + 0x80)
+#define CR_AHB_DMTCR (CR_AHB_BASE + 0x84)
+#define CR_AHB_PCIC (CR_AHB_BASE + 0x88)
+#if defined(TCSUPPORT_CPU_EN7516)||defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+#define CR_AHB_HWCONF (isFPGA ? 0xBFA2FF28 : 0xBFA20174)
+#else
+#define CR_AHB_HWCONF (CR_AHB_BASE + 0x8C)
+#endif
+#define CR_AHB_SSR (CR_AHB_BASE + 0x90)
+#define CR_AHB_SSTR (CR_AHB_BASE + 0x9C)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define CR_IMEM (CR_AHB_BASE + 0x9C)
+#define CR_DMEM (CR_AHB_BASE + 0xA0)
+#endif
+/* RT63365 */
+#define CR_CRCC_REG (CR_AHB_BASE + 0xA0)
+#define CR_AHB_UHCR (CR_AHB_BASE + 0xA8)
+#define CR_AHB_ABMR (CR_AHB_BASE + 0xB8)
+#define CR_CKGEN_CONF (CR_AHB_BASE + 0xC0)
+#define CR_PSMCR (CR_AHB_BASE + 0xCC)
+#define CR_PSMDR (CR_AHB_BASE + 0xD0)
+#define CR_PSMMR (CR_AHB_BASE + 0xD0)
+
+/* RT63165 */
+#define CR_SRAM (CR_AHB_BASE + 0xF4)
+#define CR_AHB_HWCONF2 (CR_AHB_BASE + 0xF8)
+
+/* RT63365 */
+#define CR_AHB_CLK (CR_AHB_BASE + 0x1c0)
+#define CR_CLK_CFG (CR_AHB_BASE + 0x82c)
+#define CR_RSTCTRL2 (CR_AHB_BASE + 0x834)
+#define CR_GPIO_SHR (CR_AHB_BASE + 0x860)
+
+#define CR_BUSTIMEOUT_SWITCH (CR_AHB_BASE + 0x92c)
+/*************************************************
+ * SRAM/FLASH/ROM Controller Operation Registers *
+ *************************************************/
+#define CR_SMC_BASE 0xBFB10000
+#define CR_SMC_BCR0 (CR_SMC_BASE + 0x00)
+#define CR_SMC_BCR1 (CR_SMC_BASE + 0x04)
+#define CR_SMC_BCR2 (CR_SMC_BASE + 0x08)
+#define CR_SMC_BCR3 (CR_SMC_BASE + 0x0C)
+#define CR_SMC_BCR4 (CR_SMC_BASE + 0x10)
+#define CR_SMC_BCR5 (CR_SMC_BASE + 0x14)
+
+/************************************************
+ * System Control Uint *
+ ************************************************/
+ /* MT7510 */
+#define CR_SCU_BASE 0xbfb00000
+#define CR_DRAMC_HW_SREF_CONF (0x44 | CR_SCU_BASE)
+#define CR_DRAMC_CONF (0x74 | CR_SCU_BASE)
+
+/*****************************
+ * Clock Generator Registers *
+ *****************************/
+
+/****************************
+ * USB Module Registers *
+ ****************************/
+
+#define CR_USB_BASE 0xBFB70000
+
+ // --- System Control Register ---
+#define CR_USB_SYS_CTRL_REG (0x00 | CR_USB_BASE)
+
+ // --- Device Control Register ---
+#define CR_USB_DEV_CTRL_REG (0x04 | CR_USB_BASE)
+
+ // --- Interrupt Status Register ---
+#define CR_USB_INTR_STATUS_REG (0x08 | CR_USB_BASE)
+
+ // --- Interrupt Mask Register ---
+#define CR_USB_INTR_MASK_REG (0x0c | CR_USB_BASE)
+
+ // --- Control Endpoint I/O Mode Control Register ---
+#define CR_USB_CTRL_ENDP_IO_CTRL_REG (0x10 | CR_USB_BASE)
+
+ // --- Control Endpoint I/O Mode OUT Transfer Data Register #00 ---
+#define CR_USB_CTRL_ENDP_IO_OUT_REG0 (0x18 | CR_USB_BASE)
+
+ // --- Control Endpoint I/O Mode OUT Transfer Data Register #01 ---
+#define CR_USB_CTRL_ENDP_IO_OUT_REG1 (0x1c | CR_USB_BASE)
+
+ // --- Control Endpoint I/O Mode IN Transfer Data Register #00 ---
+#define CR_USB_CTRL_ENDP_IO_IN_REG0 (0x20 | CR_USB_BASE)
+
+ // --- Control Endpoint I/O Mode IN Transfer Data Register #01 ---
+#define CR_USB_CTRL_ENDP_IO_IN_REG1 (0x24 | CR_USB_BASE)
+
+ // --- Interrupt IN Endpoint Control Register ---
+#define CR_USB_INTR_IN_ENDP_CTRL_REG (0x30 | CR_USB_BASE)
+
+ // --- Interrupt IN Endpoint IN Transfer Data Register #00 ---
+#define CR_USB_INTR_IN_ENDP_IN_REG0 (0x38 | CR_USB_BASE)
+
+ // --- Interrupt IN Endpoint IN Transfer Data Register #01 ---
+#define CR_USB_INTR_IN_ENDP_IN_REG1 (0x3c | CR_USB_BASE)
+
+ // --- Bulk/ISO OUT Descriptor Pointer Register ---
+#define CR_USB_BULKISO_OUT_DESCP_BASE_REG (0x40 | CR_USB_BASE)
+
+ // --- Bulk/ISO IN Descriptor Pointer Register ---
+#define CR_USB_BULKISO_IN_DESCP_BASE_REG (0x44 | CR_USB_BASE)
+
+ // --- Bulk/ISO IN/OUT Endpoint Number Register ---
+#define CR_USB_BULKISO_INOUT_ENDP_NUM_REG (0x48 | CR_USB_BASE)
+
+ // --- Bulk/ISO Endpoint DMA Control Register ---
+#define CR_USB_BULKISO_ENDP_DMA_CTRL_REG (0x4c | CR_USB_BASE)
+
+ // --- Bulk/ISO Endpoint DMA Configuration Register ---
+#define CR_USB_BULKISO_ENDP_DMA_CONF_REG (0x50 | CR_USB_BASE)
+
+ // --- ISO Endpoint Transfer Delimiter Register #00 ---
+#define CR_USB_ISO_ENDP_DELIMITER_REG0 (0x58 | CR_USB_BASE)
+
+ // --- ISO Endpoint Transfer Delimiter Register #01 ---
+#define CR_USB_ISO_ENDP_DELIMITER_REG1 (0x5c | CR_USB_BASE)
+
+ // --- Vendor ID Register ---
+#define CR_USB_VENDOR_ID_REG (0x68 | CR_USB_BASE)
+
+ // --- Product ID Register ---
+#define CR_USB_PRODUCT_ID_REG (0x6c | CR_USB_BASE)
+
+/*************************
+ * HOST BRIDGE Registers *
+ * ***********************/
+#define HOST_BRIDGE_BASE 0xBFB80000
+#define CR_CFG_ADDR_REG (HOST_BRIDGE_BASE+0x0020)
+#define CR_CFG_DATA_REG (HOST_BRIDGE_BASE+0x0024)
+/****************************
+ * ATM SAR Module Registers *
+ ****************************/
+#define TSCONTROL_BASE 0xBFB00000
+#define TSARM_REGISTER_BASE (TSCONTROL_BASE + 0x00060000)
+
+/* ----- General configuration registers ----- */
+
+/* ----- Reset And Identify register ----- */
+#define TSARM_RAI VPint(TSARM_REGISTER_BASE + 0x0000)
+/* ----- General Configuration register ----- */
+#define TSARM_GFR VPint(TSARM_REGISTER_BASE + 0x0004)
+/* ----- Traffic Scheduler Timer Base Counter register ----- */
+#define TSARM_TSTBR VPint(TSARM_REGISTER_BASE + 0x0008)
+/* ----- Receive Maximum Packet Length register ----- */
+#define TSARM_RMPLR VPint(TSARM_REGISTER_BASE + 0x000c)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+//Transmit Priority 0/1 Data Buffer Control and Status Register
+#define TSARM_TXDBCSR_P01 VPint(TSARM_REGISTER_BASE + 0x0010)
+#else
+/* ----- TX Data Buffer Control and Status register ----- */
+#define TSARM_TXDBCSR VPint(TSARM_REGISTER_BASE + 0x0010)
+#endif
+/* ----- TX OAM Buffer Control and Status register ----- */
+#define TSARM_TXMBCSR VPint(TSARM_REGISTER_BASE + 0x0014)
+/* ----- RX Data Buffer Control and Status register ----- */
+#define TSARM_RXDBCSR VPint(TSARM_REGISTER_BASE + 0x0018)
+/* ----- RX OAM Buffer Control and Status register ----- */
+#define TSARM_RXMBCSR VPint(TSARM_REGISTER_BASE + 0x001c)
+/* ----- Last IRQ Status register ----- */
+#define TSARM_LIRQ VPint(TSARM_REGISTER_BASE + 0x0020)
+/* ----- IRQ Queue Base Address register ----- */
+#define TSARM_IRQBA VPint(TSARM_REGISTER_BASE + 0x0024)
+/* ----- IRQ Queue Entry Length register ----- */
+#define TSARM_IRQLEN VPint(TSARM_REGISTER_BASE + 0x0028)
+/* ----- IRQ Head Indication register ----- */
+#define TSARM_IRQH VPint(TSARM_REGISTER_BASE + 0x002c)
+/* ----- Clear IRQ Entry register ----- */
+#define TSARM_IRQC VPint(TSARM_REGISTER_BASE + 0x0030)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+//Traffic Scheduler Line Rate Counter Register
+#define TSARM_TXSLRC VPint(TSARM_REGISTER_BASE + 0x0034)
+//Transmit Priority 2/3 Data Buffer Control and Status Register
+#define TSARM_TXDBCSR_P23 VPint(TSARM_REGISTER_BASE + 0x0038)
+#endif
+
+/* ----- VC IRQ Mask register ----- */
+#define TSARM_IRQM_BASE (TSARM_REGISTER_BASE + 0x0040)
+#define TSARM_IRQM(vc) VPint(TSARM_IRQM_BASE + (vc * 4))
+#define TSARM_IRQMCC VPint(TSARM_IRQM_BASE + 0x0040)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define TSARM_IRQ_QUE_THRE VPint(TSARM_REGISTER_BASE + 0x0084) //IRQ Queue Threshold Register
+#define TSARM_IRQ_TIMEOUT_CTRL VPint(TSARM_REGISTER_BASE + 0x0088) //IRQ Timeout Control Register
+#endif
+
+/* ----- VC Configuration register ----- */
+#define TSARM_VCCR_BASE (TSARM_REGISTER_BASE + 0x0100)
+#define TSARM_VCCR(vc) VPint(TSARM_VCCR_BASE + (vc * 4))
+#define TSARM_CCCR VPint(TSARM_VCCR_BASE + 0x0040)
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+/* ----- DMA WRR Configuration Register (DMA_WRR_WEIT) (for TC3162L4) ----- */
+#define TSARM_DMAWRRCR VPint(TSARM_REGISTER_BASE + 0x0150)
+#endif
+/* ----- Transmit Buffer Descriptor register ----- */
+#define TSARM_TXDCBDA_BASE (TSARM_REGISTER_BASE + 0x0200)
+#define TSARM_TXDCBDA(vc) VPint(TSARM_TXDCBDA_BASE + (vc * 4))
+#define TSARM_TXMCBDA_BASE (TSARM_REGISTER_BASE + 0x0240)
+#define TSARM_TXMCBDA(vc) VPint(TSARM_TXMCBDA_BASE + (vc * 4))
+
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define TSARM_CC_TX_BD_BASE VPint(TSARM_REGISTER_BASE + 0x0228) //Control Channel Transmit BD Base Address 0x228
+#define TSARM_CC_TX_BD_MNG_BASE VPint(TSARM_REGISTER_BASE + 0x0268) //Control Channel Transmit BD Management Base
+#define TSARM_VC_TX_BD_PRIORITY01_BASE (TSARM_REGISTER_BASE + 0x0280)
+#define TSARM_VC_TX_BD_PRIORITY01(vc) VPint(TSARM_VC_TX_BD_PRIORITY01_BASE + vc * 4) //VC0 Transmit BD Data Priority 0/1 Base 280
+#define TSARM_VC_TX_BD_PRIORITY23_BASE (TSARM_REGISTER_BASE + 0x02c0)
+#define TSARM_VC_TX_BD_PRIORITY23(vc) VPint(TSARM_VC_TX_BD_PRIORITY23_BASE + vc * 4) //VC0 Transmit BD Data Priority 0/1 Base 280
+#else
+#define TSARM_TXCCBDA VPint(TSARM_REGISTER_BASE + 0x0280)
+#endif
+
+/* ----- Receive Buffer Descriptor register ----- */
+#define TSARM_RXDCBDA_BASE (TSARM_REGISTER_BASE + 0x0300)
+#define TSARM_RXDCBDA(vc) VPint(TSARM_RXDCBDA_BASE + (vc * 4))
+#define TSARM_RXMCBDA_BASE (TSARM_REGISTER_BASE + 0x0340)
+#define TSARM_RXMCBDA(vc) VPint(TSARM_RXMCBDA_BASE + (vc * 4))
+
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define TSARM_CC_RX_BD_BASE VPint(TSARM_REGISTER_BASE + 0x328) //Control Channel Receive BD Base Address 0x328
+#define TSARM_CC_RX_BD_MNG_BASE VPint(TSARM_REGISTER_BASE + 0x368) //Control Channel Receive BD Management Base 0x368
+#define TSARM_VC_RX_DATA_BASE (TSARM_REGISTER_BASE + 0x380)
+#define TSARM_VC_RX_DATA(vc) VPint(TSARM_VC_RX_DATA_BASE + vc * 4) //VC0 Receive BD Data Base 0x380
+#else
+#define TSARM_RXCCBDA VPint(TSARM_REGISTER_BASE + 0x0380)
+#endif
+
+/* ----- Traffic Scheduler register ----- */
+#define TSARM_PCR_BASE (TSARM_REGISTER_BASE + 0x0400)
+#define TSARM_PCR(vc) VPint(TSARM_PCR_BASE + (vc * 4))
+#define TSARM_SCR_BASE (TSARM_REGISTER_BASE + 0x0440)
+#define TSARM_SCR(vc) VPint(TSARM_SCR_BASE + (vc * 4))
+#define TSARM_MBSTP_BASE (TSARM_REGISTER_BASE + 0x0480)
+#define TSARM_MBSTP(vc) VPint(TSARM_MBSTP_BASE + (vc * 4))
+
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define TSARM_MAX_FRAME_SIZE_BASE (TSARM_REGISTER_BASE + 0x04c0)
+#define TSARM_MAX_FRAME_SIZE(vc) VPint(TSARM_MAX_FRAME_SIZE_BASE + (vc * 4))
+/* define for TC3162L4 */
+#define TSARM_TRAFFIC_SHAPER_WEIGHT_BASE (TSARM_REGISTER_BASE + 0x0500)
+#define TSARM_TRAFFIC_SHAPER_WEIGHT(vc) VPint(TSARM_TRAFFIC_SHAPER_WEIGHT_BASE + (vc * 4))
+#else
+/* ----- Receive Timeout register ----- */
+#define TSARM_RTOCNT_BASE (TSARM_REGISTER_BASE + 0x0500)
+#define TSARM_RTOCNT(vc) VPint(TSARM_RTOCNT_BASE + (vc * 4))
+#endif
+
+/* ----- TX Statistic Counter register ----- */
+#define TSARM_TDCNT_BASE (TSARM_REGISTER_BASE + 0x0600)
+#define TSARM_TDCNT(vc) VPint(TSARM_TDCNT_BASE + (vc * 4))
+#define TSARM_TDCNTCC VPint(TSARM_TDCNT_BASE + 0x0040)
+
+/* ----- RX Statistic Counter register ----- */
+#define TSARM_RDCNT_BASE (TSARM_REGISTER_BASE + 0x0700)
+#define TSARM_RDCNT(vc) VPint(TSARM_RDCNT_BASE + (vc * 4))
+#define TSARM_RDCNTCC VPint(TSARM_RDCNT_BASE + 0x0040)
+#define TSARM_MISCNT VPint(TSARM_RDCNT_BASE + 0x0044)
+
+#if defined(TC3162L2) || defined(CONFIG_MIPS_TC3262)
+#define TSARM_MPOA_GCR VPint(TSARM_REGISTER_BASE + 0x0800) //MPOA global control register
+#define TSARM_VC_MPOA_CTRL_BASE (TSARM_REGISTER_BASE + 0x0810) //VC0 ~9 MPOA Control register
+#define TSARM_VC_MPOA_CTRL(vc) VPint(TSARM_VC_MPOA_CTRL_BASE + vc * 4)
+#define TSARM_MPOA_HFIV11 VPint(TSARM_REGISTER_BASE + 0x0850) //MPOA header Field1 Insertion Value1
+#define TSARM_MPOA_HFIV12 VPint(TSARM_REGISTER_BASE + 0x0854) //MPOA header Field1 Insertion Value2
+#define TSARM_MPOA_HFIV13 VPint(TSARM_REGISTER_BASE + 0x0858) //MPOA header Field2 Insertion Value1
+#define TSARM_MPOA_HFIV21 VPint(TSARM_REGISTER_BASE + 0x0860) //MPOA header Field2 Insertion Value2
+#define TSARM_MPOA_HFIV22 VPint(TSARM_REGISTER_BASE + 0x0864) //MPOA header Field2 Insertion Value2
+#define TSARM_MPOA_HFIV23 VPint(TSARM_REGISTER_BASE + 0x0868) //MPOA header Field2 Insertion Value2
+#define TSARM_MPOA_HFIV31 VPint(TSARM_REGISTER_BASE + 0x0870) //MPOA header Field3 Insertion Value1
+#define TSARM_MPOA_HFIV32 VPint(TSARM_REGISTER_BASE + 0x0874) //MPOA header Field3 Insertion Value2
+#define TSARM_MPOA_HFIV33 VPint(TSARM_REGISTER_BASE + 0x0878) //MPOA header Field3 Insertion Value3
+#define TSARM_MPOA_HFIV41 VPint(TSARM_REGISTER_BASE + 0x0880) //MPOA header Field4 Insertion Value1
+#define TSARM_MPOA_HFIV42 VPint(TSARM_REGISTER_BASE + 0x0884) //MPOA header Field4 Insertion Value2
+#define TSARM_MPOA_HFIV43 VPint(TSARM_REGISTER_BASE + 0x0888) //MPOA header Field4 Insertion Value2
+#endif
+
+/**************************
+ * USB Module Registers *
+ **************************/
+
+#define LA_DEBUG_TRIGGER(addr,val) VPint(0xbfc00000+addr) = val
+/**************************
+* USB 2.0 device Register *
+**************************/
+#define CR_USB20_BASE 0xBFB70000
+
+// --- System Control Register ---
+#define CR_USB20_SYS_CTRL_REG (0x00 | CR_USB20_BASE)
+
+// --- Device Control Register ---
+#define CR_USB20_DEV_CTRL_REG (0x04 | CR_USB20_BASE)
+
+// --- Interrupt Status Register ---
+#define CR_USB20_INTR_STATUS_REG (0x0c | CR_USB20_BASE)
+
+// --- Interrupt ENABLE Register ---
+#define CR_USB20_INTR_ENABLE_REG (0x10 | CR_USB20_BASE)
+
+// --- Interrupt Timing Control Register ---
+#define CR_USB20_INTR_TIMING_CONTROL_REG (0x14 | CR_USB20_BASE)
+
+// --- SETUP Receive Data Buffer Register ---
+#define CR_USB20_SETUP_BASE_REG (0x20 | CR_USB20_BASE)
+
+// --- Control OUT Receive Data Buffer Pointer Register ---
+#define CR_USB20_CONTROL_OUT_BASE_REG (0x24 | CR_USB20_BASE)
+
+// --- Control IN Receive Data Buffer Pointer Register ---
+#define CR_USB20_CONTROL_IN_BASE_REG (0x28 | CR_USB20_BASE)
+
+// --- Control IN Receive Data Buffer Pointer Register ---
+#define CR_USB20_CONTROL_CONF_REG (0x2c | CR_USB20_BASE)
+
+// --- CONTROL Endpoint DMA Transfer Control Register ---
+#define CR_USB20R_CONTROL_EP_DMA_CTRL_REG (0x30 | CR_USB20_BASE)
+
+// --- CONTROL Endpoint DMA Transfer Status Register ---
+#define CR_USB20_CONTROL_EP_DMA_STATUS_REG (0x34 | CR_USB20_BASE)
+
+// --- INTERRUPT IN Transmit Data Buffer Pointer Register ---
+#define CR_USB20_INTERRUPT_IN_BASE_REG (0x38 | CR_USB20_BASE)
+
+// --- INTERRUPT IN Configuration Register ---
+#define CR_USB20_INTERRUPT_IN_CONF_REG (0x3c | CR_USB20_BASE)
+
+// --- INTERRUPT IN Endpoint DMA Transfer Control Register ---
+#define CR_USB20_INTERRUPT_IN_DMA_CTRL_REG (0x40 | CR_USB20_BASE)
+
+// --- INTERRUPT IN Endpoint DMA Transfer Status Register ---
+#define CR_USB20_INTERRUPT_IN_EP_DMA_STATUS_REG (0x44 | CR_USB20_BASE)
+
+// --- Bulk/Ctrl/Intr IN/OUT Underrun/Overrun Error Counter Register ---
+#define CR_USB20_STATUS_COUNT_REG (0x48 | CR_USB20_BASE)
+
+// --- BULK OUT Endpoint Transfer DMA Polling Demand Control Register ---
+#define CR_USB20_BULK_OUT_DMA_POLLING_REG (0x60 | CR_USB20_BASE)
+
+// --- BULK IN Endpoint Transfer DMA Polling Demand Control Register ---
+#define CR_USB20_BULK_IN_DMA_POLLING_REG (0x64 | CR_USB20_BASE)
+
+// --- Bulk OUT Endpoint Transfer Dscriptor Base Address Register ---
+#define CR_USB20_BULK_OUT_DESC_BASE_REG (0x68 | CR_USB20_BASE)
+
+// --- Bulk IN Endpoint Transfer Dscriptor Base Address Register ---
+#define CR_USB20_BULK_IN_DESC_BASE_REG (0x6c | CR_USB20_BASE)
+
+// --- Bulk OUT/IN Endpoint Transfer Dscriptor Rinf Size/Offset Register ---
+#define CR_USB20_BULK_DESC_SIZE_OFFSET_REG (0x70 | CR_USB20_BASE)
+
+// --- Bulk OUT/IN Endpoint Configuration Register ---
+#define CR_USB20_BULK_EP_CONF_REG (0x74 | CR_USB20_BASE)
+
+// --- Bulk OUT/IN Endpoint DMA Transfer Control Register ---
+#define CR_USB20_BULK_EP_DMA_CTRL_REG (0x78 | CR_USB20_BASE)
+
+// --- Bulk OUT/IN Endpoint DMA Transfer Status Register ---
+#define CR_USB20_BULK_EP_DMA_STATUS_REG (0x7c | CR_USB20_BASE)
+
+// --- UDC Setup Command Address Register ---
+#define CR_USB20_UDC_SETUP_COMMAND_ADDR_REG (0x80 | CR_USB20_BASE)
+
+// --- UDC Control Endpoint Information Register ---
+#define CR_USB20_UDC_CTRL_EP_INFO_REG (0x84 | CR_USB20_BASE)
+
+// --- UDC BULK IN Endpoint Information Register ---
+#define CR_USB20_UDC_BULK_IN_EP_INFO_REG (0x88 | CR_USB20_BASE)
+
+// --- UUDC BULK OUT Endpoint Information Register ---
+#define CR_USB20_UDC_BULK_OUT_EP_INFO_REG (0x8c | CR_USB20_BASE)
+
+// --- UDC INTERRUPT IN Endpoint Information Register ---
+#define CR_USB20_UDC_INTERRUPT_IN_EP_INFO_REG (0x90 | CR_USB20_BASE)
+
+/* TCSUPPORT_MIPS_1004K */
+/* GCR */
+#define GCMP_BASE_ADDR 0x1f8e0000
+#define GCMP_ADDRSPACE_SZ (32 * 1024)
+/* GIC */
+#define GIC_BASE_ADDR 0x1f8c0000
+#define GIC_ADDRSPACE_SZ (128 * 1024)
+
+#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE)
+
+#define GIC_FLAG_IPI 0x02
+#define GIC_NUM_INTRS (56 + NR_CPUS * 2)
+
+extern unsigned int gicVecPlus1_to_intSrc (unsigned int gicVecPlus1);
+
+enum cpu_num {
+ cpu0, cpu1, cpu2, cpu3
+};
+/* end of TCSUPPORT_MIPS_1004K */
+
+#if 0
+#define TCSUPPORT_CPU_MT7510_FAKE
+
+#ifdef TCSUPPORT_CPU_MT7510
+#undef DEFAULT_USE_EXT_SWIC
+#endif
+#ifdef TCSUPPORT_CPU_MT7520
+#define DEFAULT_USE_EXT_SWIC
+#endif
+#if defined(TCSUPPORT_CPU_RT65168) || defined(TCSUPPORT_CPU_TC3182) // for early MT7530 FPGA verification only
+#define DEFAULT_USE_EXT_SWIC
+#endif
+
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7520E) || defined(TCSUPPORT_CPU_MT751x20G25G) || defined(TCSUPPORT_CPU_MT7511G) || defined(TCSUPPORT_CPU_MT752025)
+//#define EXT_SWITCH_PHY_CONNECT_INT_MDIO
+
+#ifdef EXT_SWITCH_PHY_CONNECT_INT_MDIO
+#define EXT_SWITCH_PHY_ADDR_OFFSET (0)
+#define INT_SWITCH_PHY_ADDR_OFFSET (8)
+
+#endif //EXT_SWITCH_PHY_CONNECT_INT_MDIO
+
+#endif
+#endif
+#endif /* _TC3162_H_ */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/tc3182_int_source.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/tc3182_int_source.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,137 @@
+/************************************************************************
+ *
+ * Copyright (C) 2006 Trendchip Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * Trendchip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of Trendchip Technologies, Co. and shall
+ * not be reproduced, copied, disclosed, or used in whole or in part
+ * for any reason without the prior express written permission of
+ * Trendchip Technologeis, Co.
+ *
+ *************************************************************************/
+
+#ifndef _INT_SOURCE_H_
+#define _INT_SOURCE_H_
+
+enum
+interrupt_source
+{
+#ifdef TCSUPPORT_MIPS_1004K
+ /* intrName irqNum Source fullName */
+ DUMMY=0, /* n/a n/a dummy */
+ UART5_INT, /* 1 46 UART5 */
+ UART4_INT, /* 2 45 UART4 */
+ UART3_INT, /* 3 38 UART3 */
+ UART2_INT, /* 4 16 UART2 */
+ UART_INT, /* 5 2 UART */
+ GPIO_INT, /* 6 10 GPIO */
+ GDMA_INTR, /* 7 14 GDMA */
+ CRYPTO_INT, /* 8 28 Crypto engine */
+ USB_HOST_2, /* 9 48 USB host 2 (port1) */
+ IRQ_RT3XXX_USB, /* 10 17 USB host (port0)*/
+ HSDMA_INTR, /* 11 47 High Speed DMA */
+ WDMA1_WOE_INTR, /* 12 58 WIFI DMA 1 for WOE */
+ WDMA1_P1_INTR, /* 13 57 WIFI DMA 1 port 1 */
+ WDMA1_P0_INTR, /* 14 56 WIFI DMA 1 port 0 */
+ WDMA0_WOE_INTR, /* 15 55 WIFI DMA 1 for WOE */
+ WDMA0_P1_INTR, /* 16 54 WIFI DMA 1 port 1 */
+ WDMA0_P0_INTR, /* 17 53 WIFI DMA 1 port 0 */
+ WOE1_INTR, /* 18 52 WIFI Offload Engine 1 */
+ WOE0_INTR, /* 19 51 WIFI Offload Engine 0 */
+ PCIE_A_INT, /* 20 24 PCIE port 1 */
+ PCIE_0_INT, /* 21 23 PCIE port 0 */
+ MAC1_INT, /* 22 15 Giga Switch */
+ XSI_PHY_INTR, /* 23 50 XFI/HGSMII PHY interface */
+ XSI_MAC_INTR, /* 24 49 XFI/HGSMII MAC interface */
+ QDMA_LAN3_INTR, /* 25 41 QDMA LAN 3 */
+ QDMA_LAN2_INTR, /* 26 40 QDMA LAN 2 */
+ QDMA_LAN1_INTR, /* 27 39 QDMA LAN 1 */
+ QDMA_LAN0_INTR, /* 28 21 QDMA LAN 0 */
+ QDMA_WAN3_INTR, /* 29 44 QDMA WAN 3 */
+ QDMA_WAN2_INTR, /* 30 43 QDMA WAN 2 */
+ QDMA_WAN1_INTR, /* 31 42 QDMA WAN 1 */
+ QDMA_WAN0_INTR, /* 32 22 QDMA WAN 0 */
+ TIMER2_INT, /* 33 6 timer 2 */
+ TIMER1_INT, /* 34 5 timer 1 */
+ TIMER0_INT, /* 35 4 timer 0 */
+ PCM2_INT, /* 36 32 PCM 2 */
+ PCM1_INT, /* 37 11 PCM 1 */
+ XPON_PHY_INTR, /* 38 27 XPON PHY */
+ XPON_MAC_INTR, /* 39 26 XPON MAC */
+ DMT_INT, /* 40 19 xDSL DMT */
+ DYINGGASP_INT, /* 41 18 Dying gasp */
+ CPU_CM_PCINT, /* 42 1 CPU CM Perf Cnt overflow */
+ CPU_CM_ERR, /* 43 0 CPU Coherence Manager Error */
+ FE_ERR_INTR, /* 44 33 Frame Engine Error */
+ EFUSE_ERR1_INTR,/* 45 60 efuse error for prev action not finished */
+ EFUSE_ERR0_INTR,/* 46 59 efuse error for not setting key */
+ AUTO_MANUAL_INT,/* 47 35 SPI Controller Error */
+ PCIE_SERR_INT, /* 48 25 PCIE error */
+ DRAM_PROTECTION,/* 49 3 dram illegal access*/
+ BUS_TOUT_INT, /* 50 31 Pbus timeout */
+ TIMER5_INT, /* 51 9 timer 3 (watchdog) */
+ SI_TIMER_INT, /* 52 30/29/37/36 external CPU timers 0/1/2/3 */
+ RESVINT0, /* 53 n/a n/a */
+ RESVINT1, /* 54 n/a n/a */
+ RESVINT2, /* 55 n/a n/a */
+ IPI_RESCHED_INT0,/* 56 7 ipi resched 0 */
+ IPI_RESCHED_INT1,/* 57 8 ipi resched 1 */
+ IPI_RESCHED_INT2,/* 58 12 ipi resched 2 */
+ IPI_RESCHED_INT3,/* 59 13 ipi resched 3 */
+ IPI_CALL_INT0, /* 60 34 ipi call 0 */
+ IPI_CALL_INT1, /* 61 61 ipi call 0 */
+ IPI_CALL_INT2, /* 62 62 ipi call 0 */
+ IPI_CALL_INT3, /* 63 63 ipi call 0 */
+
+#else
+ /* name IntSrc */
+ DUMMY_INT,
+ UART_INT, //0 IPL10
+ PTM_B0_INT, //1
+ SI_SWINT1_INT0, //2
+ SI_SWINT1_INT1, //3
+ TIMER0_INT, //4 IPL1
+ TIMER1_INT, //5 IPL5
+ TIMER2_INT, //6 IPL6
+ SI_SWINT_INT0, //7
+ SI_SWINT_INT1, //8
+ TIMER5_INT, //9 IPL9
+ GPIO_INT, //10 IPL11
+ PCM1_INT, //11 IPL20
+ SI_PC1_INT, //12
+ SI_PC_INT, //13
+ APB_DMA0_INT, //14 IPL12
+ MAC1_INT, //15 IPL13
+ HSUART_INT, //16 IPL23
+ #if !defined(CONFIG_MIPS_RT63365)
+ RESERVED2, //17
+ #else
+ IRQ_RT3XXX_USB, //17 IPL24
+ #endif
+ DYINGGASP_INT, //18 IPL25
+ DMT_INT, //19 IPL26
+ USB20_INT, //20
+ MAC_INT, //21 IPL3
+ SAR_INT, //22 IPL2
+ USB11_INT,
+ PCIE_A_INT, //24
+ PCIE_SERR_INT, //25
+ PTM_B1_INT, //26 IPL15
+ XSLV1_INT, //27 IPL16
+ USB_INT, //28 IPL17
+ SI_TIMER1_INT, //29
+ SI_TIMER_INT, //30
+ SWR_INT, //31 IPL4
+ BUS_TOUT_INT, //32
+ PCM2_INT, //33
+ RESERVE_B_INT, //34
+ RESERVE_C_INT, //35
+ AUTO_MANUAL_INT //36
+#endif
+};
+
+#endif /* _INT_SOURCE_H_ */
Index: linux-3.18.21/arch/mips/include/asm/tc3162/voip_hook.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/voip_hook.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,5 @@
+#ifndef __VOIP_HOOK_H
+#define __VOIP_HOOK_H
+
+extern unsigned int* (*recv_rtp_src_port_get_hook)(void);
+#endif
Index: linux-3.18.21/arch/mips/include/asm/tc3162/war.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/include/asm/tc3162/war.h 2018-02-05 13:19:54.000000000 +0800
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_RALINK_WAR_H
+#define __ASM_MIPS_MACH_RALINK_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 1
+#define MIPS_CACHE_SYNC_WAR 1
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 1
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_RALINK_WAR_H */
Index: linux-3.18.21/arch/mips/include/asm/thread_info.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/thread_info.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/thread_info.h 2018-02-05 13:19:54.000000000 +0800
@@ -69,8 +69,12 @@
/* thread information allocation */
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+#define THREAD_SIZE_ORDER (2)
+#else
#define THREAD_SIZE_ORDER (1)
#endif
+#endif
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_64BIT)
#define THREAD_SIZE_ORDER (2)
#endif
Index: linux-3.18.21/arch/mips/include/asm/time.h
===================================================================
--- linux-3.18.21.orig/arch/mips/include/asm/time.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/include/asm/time.h 2018-02-05 13:19:54.000000000 +0800
@@ -22,6 +22,14 @@
extern spinlock_t rtc_lock;
/*
+ * Timer interrupt functions.
+ * mips_timer_state is needed for high precision timer calibration.
+ * mips_timer_ack may be NULL if the interrupt is self-recoverable.
+ */
+extern void (*board_time_init)(void);
+extern void (*mips_timer_ack)(void);
+
+/*
* RTC ops. By default, they point to weak no-op RTC functions.
* rtc_mips_set_time - reverse the above translation and set time to RTC.
* rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need
Index: linux-3.18.21/arch/mips/kernel/cevt-r4k.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/cevt-r4k.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/cevt-r4k.c 2018-02-05 13:19:55.000000000 +0800
@@ -15,6 +15,7 @@
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/gic.h>
+#include <asm/tc3162/tc3162.h>
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -53,6 +54,8 @@
if (handle_perf_irq(r2))
goto out;
+ ecnt_mips_time_ack(cpu);
+
/*
* The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
@@ -164,9 +167,12 @@
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
+#ifndef TCSUPPORT_MIPS_1004K
+/* don't need to check c0_compare_int_usable because we use external CPU timers.
+ * Note: system will hang if checking c0_compare_int_usable */
if (!c0_compare_int_usable())
return -ENXIO;
-
+#endif
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
@@ -204,6 +210,7 @@
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
+ printk("r4k_clockevent_init: setup_irq OK, irq is [%d]\r\n", irq);
return 0;
}
Index: linux-3.18.21/arch/mips/kernel/csrc-r4k.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/csrc-r4k.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/csrc-r4k.c 2018-02-05 13:19:55.000000000 +0800
@@ -15,7 +15,8 @@
return read_c0_count();
}
-static struct clocksource clocksource_mips = {
+//static
+struct clocksource clocksource_mips = {
.name = "MIPS",
.read = c0_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
Index: linux-3.18.21/arch/mips/kernel/irq.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/irq.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/irq.c 2018-02-05 13:19:55.000000000 +0800
@@ -137,11 +137,12 @@
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
-void __irq_entry do_IRQ(unsigned int irq)
+__IMEM void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
check_stack_overflow();
generic_handle_irq(irq);
irq_exit();
}
+EXPORT_SYMBOL(do_IRQ);
Index: linux-3.18.21/arch/mips/kernel/mips-mt-fpaff.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/mips-mt-fpaff.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/mips-mt-fpaff.c 2018-02-05 13:19:55.000000000 +0800
@@ -73,6 +73,27 @@
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
+#ifndef TCSUPPORT_MIPS_1004K /* for 34K CPU only */
+#ifdef CONFIG_MIPS_MT_SMP
+ printk("\r\n\r\n%s new_mask origin value is %08x\r\n\r\n", __FUNCTION__, *new_mask);
+ if(*(unsigned long *)new_mask == 0x8)
+ {
+ *(unsigned long *)new_mask = 0x2;
+ }
+ else if(*(unsigned long *)new_mask == 0xf)
+ {
+ *(unsigned long *)new_mask = 0x3;
+ }
+ else
+ {
+ *(unsigned long *)new_mask = 0x1;
+ }
+
+ printk("\r\n\r\n%s new_mask changed value is %08x\r\n\r\n", __FUNCTION__, *new_mask);
+#endif
+
+#endif
+
get_online_cpus();
rcu_read_lock();
Index: linux-3.18.21/arch/mips/kernel/mips-mt.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/mips-mt.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/mips-mt.c 2018-02-05 13:19:55.000000000 +0800
@@ -30,7 +30,11 @@
__setup("maxvpes=", maxvpes);
-int tclimit;
+#ifndef CONFIG_MIPS_MT_SMTC
+int tclimit = 2;
+#else
+int tclimit = 4;
+#endif
static int __init maxtcs(char *str)
{
@@ -118,12 +122,107 @@
local_irq_restore(flags);
}
+/*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+ * Takes an argument which taken to be a pre-call MVPControl value.
+ */
+
+void mips_mt_regdump_nmi(unsigned long mvpctl)
+{
+ unsigned long flags;
+ unsigned long vpflags;
+ unsigned long mvpconf0;
+ int nvpe;
+ int ntc;
+ int i;
+ int tc;
+ unsigned long haltval;
+ unsigned long tcstatval;
+#ifdef CONFIG_MIPS_MT_SMTC
+ void smtc_soft_dump(void);
+#endif /* CONFIG_MIPT_MT_SMTC */
+
+ local_irq_save(flags);
+ vpflags = dvpe();
+ printk("=== MIPS MT State Dump ===\n");
+ printk("-- Global State --\n");
+ printk(" MVPControl Passed: %08lx\n", mvpctl);
+ printk(" MVPControl Read: %08lx\n", vpflags);
+ printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+ printk("-- per-VPE State --\n");
+ for (i = 0; i < nvpe; i++) {
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+ i, read_vpe_c0_status());
+ //printk(" VPE%d.EPC : %08lx %pS\n",
+ // i, read_vpe_c0_epc(),
+ // (void *) read_vpe_c0_epc());
+ printk(" VPE%d.EPC : %08lx\n",
+ i, read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
+ }
+ }
+ printk("-- per-TC State --\n");
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if (read_tc_c0_tcbind() == read_c0_tcbind()) {
+ /* Are we dumping ourself? */
+ haltval = 0; /* Then we're not halted, and mustn't be */
+ tcstatval = flags; /* And pre-dump TCStatus is flags */
+ printk(" TC %d (current TC with VPE EPC above)\n", tc);
+ } else {
+ haltval = read_tc_c0_tchalt();
+ write_tc_c0_tchalt(1);
+ tcstatval = read_tc_c0_tcstatus();
+ printk(" TC %d\n", tc);
+ }
+ printk(" TCStatus : %08lx\n", tcstatval);
+ printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
+ //printk(" TCRestart : %08lx %pS\n",
+ // read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
+ printk(" TCRestart : %08lx\n",
+ read_tc_c0_tcrestart());
+ printk(" TCHalt : %08lx\n", haltval);
+ printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
+ if (!haltval)
+ write_tc_c0_tchalt(0);
+ }
+#ifdef CONFIG_MIPS_MT_SMTC
+ smtc_soft_dump();
+#endif /* CONFIG_MIPT_MT_SMTC */
+ printk("===========================\n");
+ evpe(vpflags);
+ local_irq_restore(flags);
+}
+
+static int mt_opt_es;
static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
+static int __init es_set(char *str)
+{
+ get_option(&str, &mt_opt_es);
+ return 1;
+}
+__setup("es=", es_set);
+
static int __init rps_disable(char *s)
{
mt_opt_norps = 1;
@@ -202,6 +301,13 @@
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
+ if (mt_opt_es >= 0) {
+ printk("34K sync es set to %d.\n", mt_opt_es);
+ if (mt_opt_es)
+ nconfig7 |= (1 << 8);
+ else
+ nconfig7 &= ~(1 << 8);
+ }
if (mt_opt_norps) {
printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
Index: linux-3.18.21/arch/mips/kernel/mips_ksyms.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/mips_ksyms.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/mips_ksyms.c 2018-02-05 13:19:55.000000000 +0800
@@ -50,6 +50,7 @@
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(kernel_thread);
/*
* Functions that operate on entire pages. Mostly used by memory management.
*/
Index: linux-3.18.21/arch/mips/kernel/smp-cmp.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/smp-cmp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/smp-cmp.c 2018-02-05 13:19:55.000000000 +0800
@@ -44,9 +44,19 @@
struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
/* Assume GIC is present */
+#ifdef TCSUPPORT_MIPS_1004K
+ if(cpu_has_veic) {
+ /* When EIC is enabled, this field is the encoded (0:63) value of the current IPL.
+ An interrupt will be signaled only if the present IPL is higher than this value. */
+ change_c0_status(ST0_IM, 0);
+ } else {
+ /* enable all interrupts' mask */
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+ }
+#else
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
STATUSF_IP7);
-
+#endif
/* Enable per-cpu interrupts: platform specific */
#ifdef CONFIG_MIPS_MT_SMP
Index: linux-3.18.21/arch/mips/kernel/smp-gic.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/smp-gic.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/smp-gic.c 2018-02-05 13:19:55.000000000 +0800
@@ -20,6 +20,7 @@
void gic_send_ipi_single(int cpu, unsigned int action)
{
+#ifdef TCSUPPORT_MIPS_1004K
unsigned long flags;
unsigned int intr;
unsigned int core = cpu_data[cpu].core;
@@ -53,6 +54,7 @@
}
local_irq_restore(flags);
+#endif
}
void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
Index: linux-3.18.21/arch/mips/kernel/smp-mt.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/smp-mt.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/smp-mt.c 2018-02-05 13:19:55.000000000 +0800
@@ -158,6 +158,10 @@
static void vsmp_init_secondary(void)
{
+#ifdef CONFIG_MIPS_TC3262
+ write_c0_status((read_c0_status() & ~ST0_IM ) |
+ (STATUSF_IP0 | STATUSF_IP1));
+#else
#ifdef CONFIG_IRQ_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
@@ -167,6 +171,7 @@
#endif
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
+#endif
}
static void vsmp_smp_finish(void)
Index: linux-3.18.21/arch/mips/kernel/spram.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/spram.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/spram.c 2018-02-05 13:19:55.000000000 +0800
@@ -16,6 +16,19 @@
#include <asm/mipsregs.h>
#include <asm/r4kcache.h>
#include <asm/hazards.h>
+#include <linux/module.h>
+#include <asm/cpu.h>
+#ifdef CONFIG_MIPS_TC3262
+#include <asm/tc3162/tc3162.h>
+extern int __imem, __dmem;
+#endif
+static char *sram_allocp = NULL;
+static int sram_size = 0;
+static int sram_free = 0;
+
+static char *dspram_p = NULL;
+static int dspram_used_size = 0;
+static int dspram_max_size = 0x1000; //4K
/*
* These definitions are correct for the 24K/34K/74K SPRAM sample
@@ -174,6 +187,7 @@
if (strcmp(type, "DSPRAM") == 0) {
unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
unsigned int v;
+ if(!isMT751020 && !isMT7505 && !isEN751221){
#define TDAT 0x5a5aa5a5
vp[0] = TDAT;
vp[1] = ~TDAT;
@@ -189,12 +203,172 @@
printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
vp+1, ~TDAT, v);
}
+#ifdef CONFIG_MIPS_TC3262
+ if (enabled) {
+ if(isMT751020 || isMT7505 || isEN751221){
+ dspram_max_size = size;
+ }
+ else{
+ sram_allocp = (char *) vp;
+ sram_size = sram_free = size;
+ }
+ }
+#endif
+ }
pr_info("%s%d: PA=%08x,Size=%08x%s\n",
type, i, pa, size, enabled ? ",enabled" : "");
offset += 2 * SPRAM_TAG_STRIDE;
}
}
+
+/*****************************************************************************/
+int is_sram_addr(void *p)
+{
+ if ((CKSEG1ADDR(p) & 0xffffc000) == (CKSEG1ADDR(DSPRAM_BASE) & 0xffffc000))
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(is_sram_addr);
+
+void *alloc_sram(int n)
+{
+ if (sram_allocp == NULL)
+ return NULL;
+
+ if (sram_free >= n) {
+ sram_free -= n;
+ sram_allocp += n;
+ printk("alloc_sram p=%p free=%04x\n", sram_allocp, sram_free);
+ return sram_allocp - n;
+ } else
+ return NULL;
+}
+EXPORT_SYMBOL(alloc_sram);
+
+void free_sram(void *p, int n)
+{
+ if (sram_allocp == (p+n)) {
+ sram_free += n;
+ sram_allocp -= n;
+ }
+ printk("free_sram p=%p free=%04x\n", sram_allocp, sram_free);
+}
+EXPORT_SYMBOL(free_sram);
+
+void write_to_dspram(long data)
+{
+ if(dspram_p == NULL || dspram_max_size == 0)
+ return;
+
+
+ *(long *)dspram_p = data;
+ dspram_p += sizeof(long);
+
+ dspram_used_size += sizeof(long);
+
+ if(dspram_used_size >= dspram_max_size){
+ dspram_p = (char *)(DSPRAM_BASE);
+ dspram_used_size = 0;
+ }
+}
+
+unsigned int dspram_base_addr(void)
+{
+ return DSPRAM_BASE;
+}
+
+#define MIPS34K_Index_Store_Data_I 0x0c
+
+static void ispram_store_data(unsigned int offset, unsigned int datalo, unsigned int datahi)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ write_c0_idatalo(datahi);
+ ehb();
+
+ write_c0_idatahi(datalo);
+ ehb();
+#else
+ write_c0_idatalo(datalo);
+ ehb();
+
+ write_c0_idatahi(datahi);
+ ehb();
+#endif
+
+ cache_op(MIPS34K_Index_Store_Data_I, CKSEG0|offset);
+ ehb();
+
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+void ispram_fill(void)
+{
+ unsigned int pa, size, tag0, tag1;
+ unsigned int offset;
+ unsigned int datalo, datahi;
+
+ tag0 = ispram_load_tag(0);
+ tag1 = ispram_load_tag(0+SPRAM_TAG_STRIDE);
+
+ pa = tag0 & SPRAM_TAG0_PA_MASK;
+ size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+ if (size == 0)
+ return;
+
+ for (offset = 0; offset < size; offset += 8) {
+ datalo = *(unsigned int *) (PHYS_TO_K0(pa + offset));
+ datahi = *(unsigned int *) (PHYS_TO_K0(pa + offset + 4));
+ ispram_store_data(offset, datalo, datahi);
+ }
+}
+
+void ispram_refill(void)
+{
+ /*
+ * probe_spram("ISPRAM", CPHYSADDR(&__imem), &ispram_load_tag, &ispram_store_tag);
+ */
+ if (read_c0_config() & (1<<24))
+ ispram_fill();
+}
+
+static inline void ecnt_spram_config(unsigned int config0)
+{
+#ifdef CONFIG_TC3162_IMEM
+ if (config0 & (1<<24)) {
+ probe_spram("ISPRAM", CPHYSADDR(&__imem),
+ &ispram_load_tag, &ispram_store_tag);
+ ispram_fill();
+ if (!isRT63165 && !isRT63365 && !isMT751020 && !isMT7505 && !isEN751221)
+ VPint(CR_DMC_ISPCFGR) = (CPHYSADDR(&__imem) & 0xfffff000) | (1<<8) | (0x7);
+ }
+#endif
+#ifdef CONFIG_TC3162_DMEM
+ if (config0 & (1<<23)) {
+ if(isMT751020){
+ probe_spram("DSPRAM", CPHYSADDR(DSPRAM_BASE),
+ &dspram_load_tag, &dspram_store_tag);
+ dspram_p = (char *)(DSPRAM_BASE);
+ }
+ else{
+ probe_spram("DSPRAM", CPHYSADDR(DSPRAM_BASE),
+ &dspram_load_tag, &dspram_store_tag);
+ VPint(CR_DMC_DSPCFGR) = (CPHYSADDR(DSPRAM_BASE) & 0xfffff000) | (1<<8) | (0x7);
+ }
+ }
+#endif
+}
+/*****************************************************************************/
+
void spram_config(void)
{
unsigned int config0;
@@ -210,6 +384,9 @@
case CPU_P5600:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
+#ifdef CONFIG_MIPS_TC3262
+ ecnt_spram_config(config0);
+#else
if (config0 & (1<<24)) {
probe_spram("ISPRAM", 0x1c000000,
&ispram_load_tag, &ispram_store_tag);
@@ -217,5 +394,6 @@
if (config0 & (1<<23))
probe_spram("DSPRAM", 0x1c100000,
&dspram_load_tag, &dspram_store_tag);
+#endif
}
}
Index: linux-3.18.21/arch/mips/kernel/traps.c
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/traps.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/traps.c 2018-02-05 13:19:55.000000000 +0800
@@ -61,6 +61,7 @@
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/uasm.h>
+#include <asm/tc3162/ecnt_traps.h>
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
@@ -368,7 +369,7 @@
set_fs(old_fs);
}
-static int regs_to_trapnr(struct pt_regs *regs)
+int regs_to_trapnr(struct pt_regs *regs)
{
return (regs->cp0_cause >> 2) & 0x1f;
}
@@ -1358,6 +1359,15 @@
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
+ if(watchFlag){
+ dump_tlb_all();
+ show_registers(regs);
+ //show_regs(regs);
+ //compute_return_epc(regs);
+ panic("Caught WATCH exception - probably caused by stack overflow\n");
+ return;
+ }
+ else{
cause = read_c0_cause();
cause &= ~(1 << 22);
write_c0_cause(cause);
@@ -1377,6 +1387,7 @@
}
exception_exit(prev_state);
}
+}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
@@ -1706,10 +1717,12 @@
raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
+ nmi_info_store(regs);
snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
smp_processor_id(), regs->cp0_epc);
regs->cp0_epc = read_c0_errorepc();
- die(str, regs);
+ //die(str, regs);
+ die_nmi(str, regs,&die_lock);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -1864,6 +1877,7 @@
{
return set_vi_srs_handler(n, addr, 0);
}
+EXPORT_SYMBOL(set_vi_handler);
extern void tlb_init(void);
Index: linux-3.18.21/arch/mips/kernel/vmlinux.lds.S
===================================================================
--- linux-3.18.21.orig/arch/mips/kernel/vmlinux.lds.S 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/kernel/vmlinux.lds.S 2018-02-05 13:19:55.000000000 +0800
@@ -13,6 +13,10 @@
#undef mips
#define mips mips
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#undef CONFIG_IMEM_SIZE
+#define CONFIG_IMEM_SIZE 65536
+#endif
OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
PHDRS {
@@ -60,7 +64,21 @@
*(.fixup)
*(.gnu.warning)
} :text = 0
+
+#ifdef CONFIG_TC3162_IMEM
+#ifdef CONFIG_CPU_TC3162
+ . = ALIGN(16384);
+#else
+ . = ALIGN(32768);
+#endif
+ __imem = . ;
+ .imem_text : { *(.imem_text) }
+ _imem_end = .;
+ _etext = (__imem + CONFIG_IMEM_SIZE) > . ? (__imem + CONFIG_IMEM_SIZE) : .;
+ . = _etext;
+#else
_etext = .; /* End of text section */
+#endif
EXCEPTION_TABLE(16)
Index: linux-3.18.21/arch/mips/mm/c-r4k.c
===================================================================
--- linux-3.18.21.orig/arch/mips/mm/c-r4k.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/mm/c-r4k.c 2018-02-05 13:19:56.000000000 +0800
@@ -700,7 +700,7 @@
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
-static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+__IMEM static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
@@ -733,7 +733,7 @@
__sync();
}
-static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+__IMEM static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
Index: linux-3.18.21/arch/mips/mm/cache.c
===================================================================
--- linux-3.18.21.orig/arch/mips/mm/cache.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/mm/cache.c 2018-02-05 13:19:56.000000000 +0800
@@ -57,6 +57,7 @@
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+EXPORT_SYMBOL(_dma_cache_inv);
EXPORT_SYMBOL(_dma_cache_wback_inv);
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
Index: linux-3.18.21/arch/mips/mm/tlbex.c
===================================================================
--- linux-3.18.21.orig/arch/mips/mm/tlbex.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/mm/tlbex.c 2018-02-05 13:19:56.000000000 +0800
@@ -1243,7 +1243,7 @@
* unused TLB refill exception.
*/
#define MIPS64_REFILL_INSNS 32
-
+u32 workaround_painc;
static void build_r4000_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
@@ -1300,6 +1300,7 @@
build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
+ workaround_painc = p;//no hang
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
Index: linux-3.18.21/arch/mips/pci/Makefile
===================================================================
--- linux-3.18.21.orig/arch/mips/pci/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/arch/mips/pci/Makefile 2018-02-05 13:19:56.000000000 +0800
@@ -18,6 +18,18 @@
obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
ops-bcm63xx.o
+
+obj-$(CONFIG_MIPS_TC3162) += ops-tc3162.o pci-tc3162.o fixup-tc3162.o
+ifdef CONFIG_MIPS_TC3262
+EXTRA_CFLAGS += -DPCIE_PCI_COEXIT
+endif
+obj-$(CONFIG_MIPS_TC3262) += ops-tc3162u.o pci-tc3162u.o fixup-tc3162u.o pcie-phy.o
+obj-$(CONFIG_MIPS_TC3262) += ops-tc3162.o pci-tc3162.o fixup-tc3162.o pci-7512api.o
+ifdef CONFIG_MIPS_TC3162U
+EXTRA_CFLAGS += -DPCIE_PCI_COEXIT
+endif
+obj-$(CONFIG_MIPS_TC3162U) += ops-tc3162u.o pci-tc3162u.o fixup-tc3162u.o pcie-phy.o
+obj-$(CONFIG_MIPS_TC3162U) += ops-tc3162.o pci-tc3162.o fixup-tc3162.o
obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
Index: linux-3.18.21/arch/mips/pci/fixup-tc3162.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/fixup-tc3162.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,73 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/tc3162/tc3162.h>
+
+static char irq_tab_tc3162[] __initdata = {
+#ifndef CONFIG_MIPS_TC3262
+ [1] = PCI_A_INT,
+ [2] = PCI_B_INT,
+#endif
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+#ifndef PCIE_PCI_COEXIT
+ if (slot <= 0)
+ return -1;
+#else
+
+ if (isEN751221 || isEN751627){
+ if (slot == 0) {
+ #ifdef TCSUPPORT_MIPS_1004K
+ return PCIE_0_INT;
+ #else
+ return USB11_INT;
+ #endif
+ }
+ else if(slot == 1)
+ return PCIE_A_INT;
+ }
+
+ if (slot == 0){
+ return PCIE_A_INT;
+ }
+#ifdef CONFIG_MIPS_TC3262
+ else if((isRT63365 || isMT751020 || isMT7505) && (slot == 1)){
+ #ifdef TCSUPPORT_MIPS_1004K
+ return PCIE_0_INT;
+ #else
+ return USB11_INT; //rt63365 use original usb 11 INT number
+ #endif
+ }
+#endif
+#endif
+ return irq_tab_tc3162[slot];
+}
+
+static void tc3162_pci_fixup(struct pci_dev *dev)
+{
+ /* setup COMMAND register */
+ pci_write_config_word(dev, PCI_COMMAND,
+ (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER));
+
+ /* setup CACHE_LINE_SIZE register */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x8);
+
+ /* setup LATENCY_TIMER register */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+
+ /* setup BARS */
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0x1FBA0000);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0x1FBB0000);
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TRENDCHIP, PCI_DEVICE_ID_TC3162,
+ tc3162_pci_fixup);
+
Index: linux-3.18.21/arch/mips/pci/fixup-tc3162u.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/fixup-tc3162u.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,408 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/tc3162/tc3162.h>
+#include <linux/delay.h>
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#define isRC0_LINKUP ((regRead32(0xbfb80050) & 0x2) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb80050) & 0x4) ? 1 : 0)
+#else
+#define isRC0_LINKUP ((regRead32(0xbfb82050) & 0x1) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb83050) & 0x1) ? 1 : 0)
+#endif
+
+
+unsigned long int pcie_read_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum);
+int pcie_write_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned long int value);
+
+extern int mt7512_pcie_is_slave(void);
+extern int dual_band_support;
+
+#ifndef PCIE_PCI_COEXIT
+/*
+static char irq_tab_tc3162_pcie[] __initdata = {
+ //[1] = PCI_A_INT,
+ //[2] = PCI_B_INT,
+ [0] = PCIE_A_INT,//krammer
+};
+*/
+int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ //if (slot <= 0)
+#if 0
+ if (slot < 0)
+ return -1;
+#endif
+ //return irq_tab_tc3162[slot];
+ return PCIE_A_INT;
+}
+#endif
+
+
+static void tc3162_pcie_fixup(struct pci_dev *dev)
+{
+ /* setup COMMAND register */
+ pci_write_config_word(dev, PCI_COMMAND,
+ (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER));
+
+ /* setup CACHE_LINE_SIZE register */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x8);
+
+ /* setup LATENCY_TIMER register */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+
+ /* setup BARS */
+// pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+// pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0x1FBA0000);
+// pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0x1FBB0000);
+}
+static void tc3162_pcie_fixup_ra63165(struct pci_dev *dev)
+{
+ uint32 tmp;
+#if defined(TCSUPPORT_BONDING)
+ int i;
+#endif
+
+ /* setup COMMAND register */
+ pci_write_config_word(dev, PCI_COMMAND,
+ (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER));
+
+#if defined(TCSUPPORT_BONDING)
+ //Enable slave RC ECRC count . //bus1, dev1
+ regWrite32(0xbfb80020, 0x1080118);
+ tmp = regRead32(0xbfb80024);
+ regWrite32(0xbfb80024, (tmp | (1<<8)));
+
+ //config PCIe RC/EP VC mapping
+ //set bus0, dev0, fun0, reg154 (setup VC0)
+ regWrite32(0xbfb80020, 0x154);
+ regWrite32(0xbfb80024, 0X80000001);
+ //set bus0, dev0, fun0, reg160 (setup VC1)
+ regWrite32(0xbfb80020, 0x160);
+ regWrite32(0xbfb80024, 0X81000002);
+ //set bus1, dev0, fun0, reg154 (setup VC0)
+ regWrite32(0xbfb80020, 0x1000154);
+ regWrite32(0xbfb80024, 0X80000001);
+ //set bus1, dev0, fun0, reg160 (setup VC1)
+ regWrite32(0xbfb80020, 0x1000160);
+ regWrite32(0xbfb80024, 0X81000002);
+
+ //config slave chip EP MSI
+ regWrite32(0xbfb80020, 0x1000050);
+ tmp = regRead32(0xbfb80024);
+ regWrite32(0xbfb80020, 0x1000050);
+ regWrite32(0xbfb80024, (tmp | 0x510000));
+ regWrite32(0xbfb80020, 0x1000054);
+ regWrite32(0xbfb80024, 0x20af1000);
+ regWrite32(0xbfb80020, 0x100005c);
+ regWrite32(0xbfb80024, 0x0);
+
+ //setup RC0 MSI address reg
+ regWrite32(0xbfb82090, 0x20af1000);
+
+ //setup RC0 Pbus/Rbus VC mapping
+ regWrite32(0xbfb82094, 0x1);
+ regWrite32(0xbfb83094, 0x0);
+
+ //wait RC0 VC1 set up OK
+ for(i=0 ; i<1000 ; i++){
+ mdelay(1);
+ regWrite32(0xbfb80020, 0x164);
+ if((regRead32(0xbfb80024) & (1<<17)) == 0){
+ break;
+ }
+ }
+#endif
+
+ //pci-e interrupt enable_dma
+ if(isRT63365 || isMT751020 || isMT7505){
+#if defined(TCSUPPORT_BONDING)
+ if((regRead32(0xbfb82050) & 0x1) != 0){
+ /* slave dmt */
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<23)));
+ /* slave gdma */
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<25)));
+
+ if(regRead32(0xbfb80050) == 1){
+ /* wifi 0 (slave)*/
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<22)));
+ }
+ }
+#else
+ //if((regRead32(0xbfb82050) & 0x1) != 0){
+ if(isRC0_LINKUP){
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<20)));
+ }
+#endif
+ //second band
+ if(dual_band_support){
+ if((regRead32(0xbfb83050) & 0x1) != 0){
+ if(isMT751020 || isMT7505){
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<26)));
+ }else{
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<21)));
+ }
+ }
+ }
+ }else{
+ tmp = regRead32(0xbfb8100c);
+ regWrite32(0xbfb8100c, (tmp | (1<<20)));
+ }
+
+ if(isMT751020 || isMT7505){
+ pcie_timeout_disable();
+ aer_config(1);
+ }
+}
+
+#ifndef PCIE_PCI_COEXIT
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif
+
+
+int mt7512_pcie_get_pos(char bus,char dev)
+{
+ unsigned int val,pos;
+
+ val = pcie_read_config_word(0,bus,dev,0x34);
+ pos = val&0xff;
+ while(pos && pos != 0xff)
+ {
+ val = pcie_read_config_word(0,bus,dev,pos);
+ if ( (val&0xff) == 0x10)
+ return pos;
+ pos = (val >> 0x08) & 0xff;
+ }
+ return 0;
+}
+
+int mt7512_pcie_rc0_retrain(void)
+{
+ unsigned int pos = 0, ppos = 0,bus;
+ unsigned int linkcap, plinkcap,plinksta;
+
+ ppos = mt7512_pcie_get_pos(0,0);//bus0 dev0 rc0
+ if (mt7512_pcie_is_slave()==1)
+ return;
+ else
+ bus = 1; //bus1 dev0
+
+ pos = mt7512_pcie_get_pos(bus,0); //bus1 dev0
+
+ if (pos <0x40 || ppos < 0x40)
+ return 0;
+
+ plinkcap = pcie_read_config_word(0,0,0,ppos+0x0c);//bus0 dev0 rc0
+ linkcap = pcie_read_config_word(0,bus,0,pos+0x0c);//bus1 dev0 ep0
+
+ printk("\n mt7512_pcie_rc0_retrain: %x = %08x %x = %08x",pos,linkcap,ppos,plinkcap);
+
+ if ((linkcap&0x0f)== 1 || (plinkcap&0x0f)==1)
+ return 0;
+
+ plinksta = pcie_read_config_word(0,0,0,ppos+0x10);//bus0 dev0 rc0
+ if( ((plinksta>>16)&0x0f) == (plinkcap&0x0f))
+ return 0;
+
+ plinksta = pcie_read_config_word(0,0,0,ppos+0x10);//bus0 dev0 rc0
+ plinksta |= 0x20;
+ pcie_write_config_word(0,0,0,ppos+0x10,plinksta);//bus0 dev0 rc0
+
+ mdelay(1000);
+
+ plinksta = pcie_read_config_word(0,0,0,ppos+0x10);//bus0 dev0 rc0
+
+ printk("\nRC0 Link Traing Result: %08x",plinksta);//rc0
+
+ return 1;
+}
+
+
+
+int mt7512_pcie_rc1_retrain(void)
+{
+ unsigned int pos = 0, ppos = 0,bus;
+ unsigned int linkcap, plinkcap,plinksta;
+
+ ppos = mt7512_pcie_get_pos(0,1);
+ if (mt7512_pcie_is_slave()==1)
+ bus = 1;
+ else
+ bus = 2;
+
+ pos = mt7512_pcie_get_pos(bus,0);
+
+ if (pos <0x40 || ppos < 0x40)
+ return 0;
+
+ plinkcap = pcie_read_config_word(0,0,1,ppos+0x0c);
+ linkcap = pcie_read_config_word(0,bus,0,pos+0x0c);
+
+ printk("\n mt7512_pcie_rc1_retrain: %x = %08x %x = %08x",pos,linkcap,ppos,plinkcap);
+
+ if ((linkcap&0x0f)== 1 || (plinkcap&0x0f)==1)
+ return 0;
+
+ plinksta = pcie_read_config_word(0,0,1,ppos+0x10);
+ if( ((plinksta>>16)&0x0f) == (plinkcap&0x0f))
+ return 0;
+
+ plinksta = pcie_read_config_word(0,0,1,ppos+0x10);
+ plinksta |= 0x20;
+ pcie_write_config_word(0,0,1,ppos+0x10,plinksta);
+
+ mdelay(1000);
+
+ plinksta = pcie_read_config_word(0,0,1,ppos+0x10);
+
+ printk("\nRC1 Link Traing Result: %08x",plinksta);
+
+ return 1;
+}
+
+
+void mt7512_pcie_fixup(void)
+{
+ unsigned int val = 0,tmp = 0 ,i = 0;
+
+ if (isRC0_LINKUP)
+ {
+
+ val = pcie_read_config_word(0,0,0,0x20);
+ tmp = ((val&0xffff)<<16);
+ val = (val&0xffff0000) + 0x100000;
+ val = val - tmp;
+ i = 0;
+ while(i < 32)
+ {
+ if((1<<i) >= val)
+ break;
+ i++;
+ }
+ regWrite32(0xbfb81438,tmp | i); //config RC0 to EP Addr window
+ mdelay(1);
+ regWrite32(0xbfb81448,0x80); //enable EP to RC0 access
+ printk("\n mt7512_pcie_fixup: 0x1438 = %x ",tmp | i);
+
+ mt7512_pcie_rc0_retrain();
+ }
+
+ if (isRC1_LINKUP)
+ {
+
+ val = pcie_read_config_word(0,0,1,0x20);
+ tmp = ((val&0xffff)<<16);
+ val = (val&0xffff0000) + 0x100000;
+ val = val - tmp;
+ i = 0;
+ while(i < 32)
+ {
+ if((1<<i) >= val)
+ break;
+ i++;
+ }
+ regWrite32(0xbfb83438,tmp | i); //config RC1 to EP Addr window
+ mdelay(1);
+ regWrite32(0xbfb83448,0x80); //enable EP to RC1 access
+ printk("\n mt7512_pcie_fixup: 0x3438 = %x ",tmp | i);
+
+ mt7512_pcie_rc1_retrain();
+
+ }
+
+ return ;
+}
+
+EXPORT_SYMBOL(mt7512_pcie_fixup);
+
+
+
+void mt7512_pcie_fixup_rc0(struct pci_dev *dev)
+{
+
+ unsigned int val = 0,tmp = 0 ,i = 0;
+
+ if (isRC0_LINKUP)
+ {
+ val = pcie_read_config_word(0,0,0,0x20);
+ tmp = ((val&0xffff)<<16);
+ val = (val&0xffff0000) + 0x100000;
+ val = val - tmp;
+ i = 0;
+
+ while(i < 32)
+ {
+ if((1<<i) >= val)
+ break;
+ i++;
+ }
+
+ /* config RC1 to EP Addr window */
+ regWrite32(0xbfb81438,tmp | i);
+ mdelay(1);
+
+ /* enable EP to RC1 access */
+ regWrite32(0xbfb81448,0x80);
+ printk("\n mt7512_pcie_fixup: 0x1438 = %x ",tmp | i);
+
+ mt7512_pcie_rc0_retrain();
+ }
+ return;
+}
+
+
+void mt7512_pcie_fixup_rc1(struct pci_dev *dev)
+{
+
+ unsigned int val = 0,tmp = 0 ,i = 0;
+
+
+ if (isRC1_LINKUP)
+ {
+ val = pcie_read_config_word(0,0,1,0x20);
+ tmp = ((val&0xffff)<<16);
+ val = (val&0xffff0000) + 0x100000;
+ val = val - tmp;
+ i = 0;
+ while(i < 32)
+ {
+ if((1<<i) >= val)
+ break;
+ i++;
+ }
+
+ /* config RC1 to EP Addr window */
+ regWrite32(0xbfb83438,tmp | i);
+ mdelay(1);
+
+ /* enable EP to RC1 access */
+ regWrite32(0xbfb83448,0x80);
+ printk("\n mt7512_pcie_fixup: 0x3438 = %x ",tmp | i);
+
+ mt7512_pcie_rc1_retrain();
+ }
+ return;
+
+}
+
+
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIS, PCI_DEVICE_ID_SIS,
+ tc3162_pcie_fixup);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RT, PCI_DEVICE_ID_RT,
+ tc3162_pcie_fixup_ra63165);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MTK, PCI_DEVICE_ID_MTK,
+ tc3162_pcie_fixup_ra63165);
+
+DECLARE_PCI_FIXUP_FINAL(0x14c3,0x0810,mt7512_pcie_fixup_rc0);
+DECLARE_PCI_FIXUP_FINAL(0x14c3,0x0811,mt7512_pcie_fixup_rc1);
+
Index: linux-3.18.21/arch/mips/pci/ops-tc3162.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/ops-tc3162.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,136 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/addrspace.h>
+#include <asm/tc3162/tc3162.h>
+
+#define PCI_CONFIG_ADDR 0x1fb80cf8
+#define PCI_CONFIG_DATA 0x1fb80cfc
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(f, a...) printk(f, ## a )
+#else
+#define DBG(f, a...) do {} while (0)
+#endif
+
+#define PCI_ENABLE 0x80000000
+
+static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
+
+/* -------------------------------------------------------------------------*/
+
+static inline void write_cfgaddr(u32 addr)
+{
+ __raw_writel((addr | PCI_ENABLE),
+ (void __iomem *)(KSEG1ADDR(PCI_CONFIG_ADDR)));
+}
+
+static inline void write_cfgdata(u32 data)
+{
+ __raw_writel(data, (void __iomem *)KSEG1ADDR(PCI_CONFIG_DATA));
+}
+
+static inline u32 read_cfgdata(void)
+{
+ return __raw_readl((void __iomem *)KSEG1ADDR(PCI_CONFIG_DATA));
+}
+
+static inline u32 mkaddr(struct pci_bus *bus, unsigned int devfn, int where)
+{
+#ifndef PCIE_PCI_COEXIT
+ return (((bus->number & 0xFF) << 16) | ((devfn & 0xFF) << 8) | \
+ (where & 0xFC));
+#else
+ return (((bus->number & 0) << 16) | ((devfn & 0xFF) << 8) | \
+ (where & 0xFC));
+#endif
+}
+
+/* -------------------------------------------------------------------------*/
+
+static int tc3162_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&pci_lock, flags);
+
+ write_cfgaddr(mkaddr(bus,devfn,where));
+ data = read_cfgdata();
+
+ DBG("PCI: cfg_read %02u.%02u.%01u/%02X:%01d, cfg:0x%08X",
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ where, size, data);
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFF;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFFFF;
+ break;
+ }
+
+ *val = data;
+ DBG(", 0x%08X returned\n", data);
+
+ spin_unlock_irqrestore(&pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int tc3162_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ unsigned long flags;
+ u32 data;
+ int s;
+
+ spin_lock_irqsave(&pci_lock, flags);
+
+ write_cfgaddr(mkaddr(bus,devfn,where));
+ data = read_cfgdata();
+
+ DBG("PCI: cfg_write %02u.%02u.%01u/%02X:%01d, cfg:0x%08X",
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ where, size, data);
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) << 3);
+ data &= ~(0xFF << s);
+ data |= ((val & 0xFF) << s);
+ break;
+ case 2:
+ s = ((where & 2) << 4);
+ data &= ~(0xFFFF << s);
+ data |= ((val & 0xFFFF) << s);
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ write_cfgdata(data);
+ DBG(", 0x%08X written\n", data);
+
+ spin_unlock_irqrestore(&pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops tc3162_pci_ops = {
+ .read = tc3162_pcibios_read,
+ .write = tc3162_pcibios_write
+};
Index: linux-3.18.21/arch/mips/pci/ops-tc3162u.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/ops-tc3162u.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,230 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/addrspace.h>
+#include <asm/tc3162/tc3162.h>
+
+uint32 pcie_config_addr = 0x1fb81cf8;
+uint32 pcie_config_data = 0x1fb81cfc;
+
+#undef DEBUG
+
+//#define DEBUG
+#ifdef DEBUG
+#define DBG(f, a...) printk(f, ## a )
+#else
+#define DBG(f, a...) do {} while (0)
+#endif
+
+#define PCIE_ENABLE 0x80000000
+
+spinlock_t pcie_lock = SPIN_LOCK_UNLOCKED;
+
+/* -------------------------------------------------------------------------*/
+
+static inline void write_cfgaddr(u32 addr)
+{
+ __raw_writel(addr,
+ (void __iomem *)(KSEG1ADDR(pcie_config_addr)));
+// __raw_writel((addr | PCI_ENABLE),
+// (void __iomem *)(KSEG1ADDR(PCI_CONFIG_ADDR)));
+}
+
+static inline void write_cfgdata(u32 data)
+{
+ __raw_writel(data, (void __iomem *)KSEG1ADDR(pcie_config_data));
+}
+
+static inline u32 read_cfgdata(void)
+{
+ u32 tmp;
+
+ if(isRT63365)
+ tmp = __raw_readl((void __iomem *)KSEG1ADDR(pcie_config_data));
+
+ return __raw_readl((void __iomem *)KSEG1ADDR(pcie_config_data));
+}
+
+static inline u32 mkaddr(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ u32 type;
+
+ if(isRT63165 || isRT63365 || isMT751020 || isMT7505 || isEN751221 || isEN751627){
+ return (((bus->number & 0xFF) << 24) | ((devfn & 0xFF) << 16) |\
+ (where & 0xFFC));
+ }else{
+ type=(bus->number & 0xFF)?PCIE_ENABLE:0;
+
+ return (type | ((bus->number & 0xFF) << 20) | ((devfn & 0xFF) << 12) | \
+ (where & 0xFFC));
+ }
+}
+
+/* -------------------------------------------------------------------------*/
+extern unsigned long int pcie_read_config_word_extend(unsigned char bus,unsigned char dev,unsigned char func,unsigned int reg);
+extern int pcie_write_config_word_extend(unsigned char bus, unsigned char dev,unsigned char func, unsigned int reg, unsigned long int value);
+
+static int mt7512_pciebios_read(struct pci_bus *bus, unsigned int devfn, int where,int size, u32 *val)
+{
+ unsigned long flags;
+ u32 data,addr;
+
+/* spin_lock_irqsave(&pcie_lock, flags);*/
+
+ addr = mkaddr(bus,devfn,where);
+ data = pcie_read_config_word_extend((addr>>24)&0xff,(addr>>19)&0x1f,(addr>>16)&0x7,addr&0xffc);
+
+/* printk("\n pcie_read: bus = %d, dev = %d func = %d, reg = %x, val = %x",(addr>>24)&0xff,(addr>>19)&0x1f,(addr>>16)&0x7,addr&0xffc,data); */
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFF;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFFFF;
+ break;
+ }
+ *val = data;
+/* spin_unlock_irqrestore(&pcie_lock, flags);*/
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int mt7512_pciebios_write(struct pci_bus *bus, unsigned int devfn, int where,int size, u32 val)
+{
+ unsigned long flags;
+ u32 data,addr;
+ int s;
+
+
+ addr= mkaddr(bus,devfn,where);
+ data = pcie_read_config_word_extend((addr>>24)&0xff,(addr>>19)&0x1f,(addr>>16)&0x7,addr&0xffc);
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) << 3);
+ data &= ~(0xFF << s);
+ data |= ((val & 0xFF) << s);
+ break;
+ case 2:
+ s = ((where & 2) << 3);
+ data &= ~(0xFFFF << s);
+ data |= ((val & 0xFFFF) << s);
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+/* printk("\n pcie_write: bus = %d, dev = %d func = %d, reg = %x, val = %x",(addr>>24)&0xff,(addr>>19)&0x1f,(addr>>16)&0x7,addr&0xffc,data);*/
+
+ pcie_write_config_word_extend((addr>>24)&0xff,(addr>>19)&0x1f,(addr>>16)&0x7,addr&0xffc,data);
+ return PCIBIOS_SUCCESSFUL;
+
+
+}
+
+static int tc3162_pciebios_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&pcie_lock, flags);
+
+ if(isRT63165){
+ if((devfn & 0xFF) != 0){
+ *val = 0xffffffff;
+ spin_unlock_irqrestore(&pcie_lock, flags);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+
+ write_cfgaddr(mkaddr(bus,devfn,where));
+ data = read_cfgdata();
+
+ DBG("PCIE: cfg_read %02u.%02u.%01u/%02X:%01d, cfg:0x%08X",
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ where, size, data);
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFF;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xFFFF;
+ break;
+ }
+
+ *val = data;
+ DBG(", 0x%08X returned\n", data);
+
+ spin_unlock_irqrestore(&pcie_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int tc3162_pciebios_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ unsigned long flags;
+ u32 data;
+ int s;
+
+ spin_lock_irqsave(&pcie_lock, flags);
+
+ write_cfgaddr(mkaddr(bus,devfn,where));
+ data = read_cfgdata();
+
+ DBG("PCIE: cfg_write %02u.%02u.%01u/%02X:%01d,%08x, cfg:0x%08X",
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ where, size, val, data);
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) << 3);
+ data &= ~(0xFF << s);
+ data |= ((val & 0xFF) << s);
+ break;
+ case 2:
+ //s = ((where & 2) << 4);
+ s = ((where & 2) << 3);//krammer try
+ data &= ~(0xFFFF << s);
+ data |= ((val & 0xFFFF) << s);
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ write_cfgdata(data);
+ DBG(", 0x%08X written\n", data);
+
+ spin_unlock_irqrestore(&pcie_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mt7512_pcie_ops = {
+ .read = mt7512_pciebios_read,
+ .write = mt7512_pciebios_write
+};
+
+struct pci_ops tc3162_pcie_ops = {
+ .read = tc3162_pciebios_read,
+ .write = tc3162_pciebios_write
+};
Index: linux-3.18.21/arch/mips/pci/pci-7512api.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/pci-7512api.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,685 @@
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/tc3162/tc3162.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <ecnt_hook/ecnt_hook_pcie.h>
+
+#if (defined(TCSUPPORT_CPU_EN7516) ||defined(TCSUPPORT_CPU_EN7527)) && defined(TCSUPPORT_AUTOBENCH)
+/*==pcie slt test=============================================*/
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+/*==pcie slt test=============================================*/
+#endif
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#define isRC0_LINKUP ((regRead32(0xbfb80050) & 0x2) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb80050) & 0x4) ? 1 : 0)
+#else
+#define isRC0_LINKUP ((regRead32(0xbfb82050) & 0x1) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb83050) & 0x1) ? 1 : 0)
+#endif
+
+#define PCIE_MAJOR 225
+#define PCIE_CNT_NUMBER 6
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef int (*pcie_api_op_t)(struct ecnt_pcie_data * data);
+enum {
+ PCIE_DEV_RC0 = 0,
+ PCIE_DEV_RC1,
+ PCIE_DEV_EP0,
+ PCIE_DEV_EP1,
+};
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+
+#if (defined(TCSUPPORT_CPU_EN7516) ||defined(TCSUPPORT_CPU_EN7527)) && defined(TCSUPPORT_AUTOBENCH)
+/*==pcie slt test=============================================*/
+#define tc_outl(offset,val) (*(volatile unsigned long *)(offset) = val)
+#define tc_inl(offset) (*(volatile unsigned long *)(offset))
+
+#define RG_RC0_ECRC_CNT 0xBFB80054
+#define RG_RC1_ECRC_CNT 0xBFB80058
+
+#define RG_RC0_TLPCRC 0xBFB801D8
+#define RG_RC0_DLLPCRC 0xBFB801DC
+#define RG_RC0_RPL_TimeOut 0xBFB801E0
+#define RG_RC0_RPL_Rollover 0xBFB801E4
+#define RG_RC0_CPLERR 0xBFB801E8
+
+#define RG_RC1_TLPCRC 0xBFB801EC
+#define RG_RC1_DLLPCRC 0xBFB801F0
+#define RG_RC1_RPL_TimeOut 0xBFB801F4
+#define RG_RC1_RPL_Rollover 0xBFB801F8
+#define RG_RC1_CPLERR 0xBFB801FC
+
+int pcie_751627_slt_test(void){
+ unsigned long val;
+ unsigned int count, temp;
+ printk("PCIe testing!!\n");
+
+
+ if(!isRC0_LINKUP){
+ printk(" PCIE(RC0) link down\n");
+ return 1;
+ }
+
+ val = tc_inl(RG_RC0_ECRC_CNT);
+ if (val != 0){
+ printk(" PCIE have RC0 ECRC (0x%x):%ld\n", RG_RC0_ECRC_CNT, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC0_TLPCRC);
+ if (val != 0){
+ printk(" PCIE have RC0 TLPCRC (0x%x):%ld\n", RG_RC0_TLPCRC, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC0_DLLPCRC);
+ if (val != 0){
+ printk(" PCIE have RC0 DLLPCRC (0x%x):%ld\n", RG_RC0_DLLPCRC, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC0_RPL_TimeOut);
+ if (val != 0){
+ printk(" PCIE have RC0 RPL TimeOut (0x%x):%ld\n", RG_RC0_RPL_TimeOut, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC0_RPL_Rollover);
+ if (val != 0){
+ printk(" PCIE have RC0 RPL Rollover (0x%x):%ld\n", RG_RC0_RPL_Rollover, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC0_CPLERR);
+ if (val != 0){
+ printk(" PCIE have RC0 CPLERR (0x%x):%ld\n", RG_RC0_CPLERR, val);
+ return 1;
+ }
+
+
+ if(isEN7513 || isEN7513G || isEN7526D || isEN7526G || isEN751627 ){
+ if(!isRC1_LINKUP){
+ printk(" PCIE(RC1) link down\n");
+ return 1;
+ }
+ val = tc_inl(RG_RC1_ECRC_CNT);
+ if (val != 0){
+ printk(" PCIE have RC1 ECRC (0x%x):%ld\n", RG_RC1_ECRC_CNT, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC1_TLPCRC);
+ if (val != 0){
+ printk(" PCIE have RC1 TLPCRC (0x%x):%ld\n", RG_RC1_TLPCRC, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC1_DLLPCRC);
+ if (val != 0){
+ printk(" PCIE have RC1 DLLPCRC (0x%x):%ld\n", RG_RC1_DLLPCRC, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC1_RPL_TimeOut);
+ if (val != 0){
+ printk(" PCIE have RC1 RPL TimeOut (0x%x):%ld\n", RG_RC1_RPL_TimeOut, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC1_RPL_Rollover);
+ if (val != 0){
+ printk(" PCIE have RC1 RPL Rollover (0x%x):%ld\n", RG_RC1_RPL_Rollover, val);
+ return 1;
+ }
+ val = tc_inl(RG_RC1_CPLERR);
+ if (val != 0){
+ printk(" PCIE have RC1 CPLERR (0x%x):%ld\n", RG_RC1_CPLERR, val);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+/*==pcie slt test=============================================*/
+#endif
+
+int pcie_write_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned long int value);
+unsigned long pcie_read_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum);
+int pcie_api_get_confreg(struct ecnt_pcie_data *data);
+int pcie_api_set_confreg(struct ecnt_pcie_data *data);
+int pcie_api_get_aspm(struct ecnt_pcie_data *data);
+int pcie_api_set_aspm(struct ecnt_pcie_data *data);
+int pcie_api_get_speed(struct ecnt_pcie_data *data);
+int pcie_api_set_speed(struct ecnt_pcie_data *data);
+int pcie_api_get_count(struct ecnt_pcie_data *data);
+int pcie_api_get_linkstate(struct ecnt_pcie_data *data);
+int pcie_function_autobench_loopback(struct ecnt_pcie_data *data);
+
+ecnt_ret_val ecnt_pcie_api_hook(struct ecnt_data *in_data);
+static long pcie_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+struct ecnt_hook_ops ecnt_pcie_api_op = {
+ .name = "pcie_api_hook",
+ .is_execute = 1,
+ .hookfn = ecnt_pcie_api_hook,
+ .maintype = ECNT_PCIE,
+ .subtype = ECNT_PCIE_API,
+ .priority = 1
+};
+
+struct file_operations pcie_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = pcie_ioctl,
+};
+
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+static spinlock_t pcie_api_lock = SPIN_LOCK_UNLOCKED;
+
+static unsigned int pcie_err_reg[2][PCIE_CNT_NUMBER] =
+{
+ {0xbfb80054,0xbfb801d8,0xbfb801dc,0xbfb801e0,0xbfb801e4,0xbfb801e8},
+ {0xbfb80058,0xbfb801ec,0xbfb801f0,0xbfb801f4,0xbfb801f8,0xbfb801fc}
+};
+
+static pcie_api_op_t pcie_operation[] = {
+ pcie_api_get_confreg,
+ pcie_api_set_confreg,
+ pcie_api_get_aspm,
+ pcie_api_set_aspm,
+ pcie_api_get_speed,
+ pcie_api_set_speed,
+ pcie_api_get_count,
+ pcie_api_get_linkstate,
+ pcie_function_autobench_loopback
+};
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+
+
+int pcie_write_config_word_hw(unsigned char bus, unsigned char dev,unsigned char func, unsigned int reg, unsigned long int value)
+{
+ return pcie_write_config_word(0,bus,dev,reg,value);
+}
+
+unsigned int pcie_read_config_word_hw(unsigned char bus,unsigned char dev,unsigned char func ,unsigned int reg)
+{
+ return pcie_read_config_word(0,bus,dev,reg);
+}
+
+static int get_cap_pos(char bus,char dev,char func, char id)
+{
+ unsigned int val,pos;
+
+ val = pcie_read_config_word_hw(bus,dev,func,0x34);
+ pos = val&0xff;
+ while(pos && pos != 0xff)
+ {
+ val = pcie_read_config_word_hw(bus,dev,func,pos);
+ if ( (val&0xff) == id)
+ return pos;
+ pos = (val >> 0x08) & 0xff;
+ }
+ return 0;
+}
+
+
+static int pcie_get_confreg(int idx,int offset)
+{
+ unsigned char bus,dev,func;
+ unsigned int val;
+
+ if (offset >= 0x1000 || offset < 0)
+ return -1;
+
+ if (isRC0_LINKUP == 0 && idx == PCIE_DEV_EP0)
+ return -1;
+
+ if (isRC1_LINKUP == 0 && idx == PCIE_DEV_EP1)
+ return -1;
+
+ if (idx == PCIE_DEV_RC0){
+ bus = 0; dev = 0; func = 0;
+ }else if (idx == PCIE_DEV_RC1){
+ bus = 0; dev = 1; func = 0;
+ }else if (idx == PCIE_DEV_EP0){
+ bus = 1; dev = 0; func = 0;
+ }else if (idx == PCIE_DEV_EP1){
+ bus = 2;dev = 0; func = 0;
+ }else{
+ return -1;
+ }
+
+ offset &= 0xffc;
+
+ val = pcie_read_config_word_hw(bus,dev,func,offset);
+
+ return val;
+}
+
+
+
+static int pcie_set_confreg(int idx,int offset,unsigned int val)
+{
+ unsigned char bus,dev,func;
+
+ if (offset >= 0x1000 || offset < 0)
+ return -1;
+
+ if (isRC0_LINKUP == 0 && idx == PCIE_DEV_EP0)
+ return -1;
+
+ if (isRC1_LINKUP == 0 && idx == PCIE_DEV_EP1)
+ return -1;
+
+ if (idx == PCIE_DEV_RC0){
+ bus = 0; dev = 0; func = 0;
+ }else if (idx == PCIE_DEV_RC1){
+ bus = 0; dev = 1; func = 0;
+ }else if (idx == PCIE_DEV_EP0){
+ bus = 1; dev = 0; func = 0;
+ }else if (idx == PCIE_DEV_EP1){
+ bus = 2;dev = 0; func = 0;
+ }else{
+ return -1;
+ }
+
+
+ offset &= 0xffc;
+
+ pcie_write_config_word_hw(bus,dev,func,offset,val);
+
+ return 0;
+}
+
+
+
+static int pcie_set_aspm_ext(char bus,char dev,char func,unsigned int val)
+{
+ unsigned int pos = 0,value = 0;
+
+ pos = get_cap_pos(bus,dev,func,0x10);
+
+ if (pos < 0x40)
+ return -1;
+
+ if (val){
+ value = pcie_read_config_word_hw(bus,dev,func,pos+12);
+ value &= 0x0c00;
+ value = value >> 10;
+ if ((val > value) || ((val & value) == 0))
+ return -1;
+ }
+
+ value = pcie_read_config_word_hw(bus,dev,func,pos+16);
+
+ value &= 0xfffffffc;
+
+ value |= val;
+
+ pcie_write_config_word_hw(bus,dev,func,pos+16,value);
+
+ return 0;
+}
+
+static int pcie_set_aspm(int idx,int sw)
+{
+ sw &= 0x3;
+
+ if (idx == PCIE_DEV_RC0)
+ return pcie_set_aspm_ext(0,0,0,sw);
+
+ if (idx == PCIE_DEV_RC1)
+ return pcie_set_aspm_ext(0,1,0,sw);
+
+ if (idx == PCIE_DEV_EP0 && isRC0_LINKUP)
+ return pcie_set_aspm_ext(1,0,0,sw);
+
+ if (idx == PCIE_DEV_EP1 && isRC1_LINKUP)
+ return pcie_set_aspm_ext(2,0,0,sw);
+
+ return -1;
+}
+
+static int pcie_get_aspm_ext(char bus,char dev,char func)
+{
+ unsigned int pos = 0,value = 0;
+
+ pos = get_cap_pos(bus,dev,func,0x10);
+
+ if (pos < 0x40)
+ return -1;
+
+ value = pcie_read_config_word_hw(bus,dev,func,pos+16);
+
+ value &= 0x3;
+
+ return value;
+}
+
+static int pcie_get_aspm(int idx)
+{
+ if (idx == PCIE_DEV_RC0)
+ return pcie_get_aspm_ext(0,0,0);
+
+ if (idx == PCIE_DEV_RC1)
+ return pcie_get_aspm_ext(0,1,0);
+
+ if (idx == PCIE_DEV_EP0 && isRC0_LINKUP)
+ return pcie_get_aspm_ext(1,0,0);
+
+ if (idx == PCIE_DEV_EP1 && isRC1_LINKUP)
+ return pcie_get_aspm_ext(2,0,0);
+
+ return -1;
+}
+
+
+static int pcie_set_speed_ext(char bus,char dev,char func,unsigned int val)
+{
+ unsigned int pos = 0,value = 0;
+
+ pos = get_cap_pos(bus,dev,func,0x10);
+ if (pos < 0x40)
+ return -1;
+
+ value = pcie_read_config_word_hw(bus,dev,func,pos+0x30);
+ value &= (~0x0f);
+ value |= val ;
+ pcie_write_config_word_hw(bus,dev,func,pos+0x30,value);
+ return 0;
+}
+
+static int pcie_set_speed(int idx,unsigned int mode)
+{
+ unsigned int pos = 0,val = 0,dev ,bus;
+
+ if (idx != PCIE_DEV_EP0 && idx != PCIE_DEV_EP1)
+ return -1;
+
+ if (isRC0_LINKUP == 0 && idx == PCIE_DEV_EP0)
+ return -1;
+
+ if (isRC1_LINKUP == 0 && idx == PCIE_DEV_EP1)
+ return -1;
+
+ if (idx == PCIE_DEV_EP0)
+ {
+ dev = 0;
+ bus = 1;
+ }
+ else
+ {
+ dev = 1;
+ bus = 2;
+ }
+
+ mode &= 0x3;
+
+ pos = get_cap_pos(0,dev,0,0x10);
+ if (pos < 0x40)
+ return -1;
+
+ val = pcie_read_config_word_hw(0,dev,0,pos+0x0c);
+ if ((val&0x0f) < mode)
+ return -1;
+
+ pos = get_cap_pos(bus,0,0,0x10);
+ if (pos < 0x40)
+ return -1;
+
+ val = pcie_read_config_word_hw(bus,0,0,pos+0x0c);
+ if ((val&0x0f) < mode)
+ return -1;
+
+ pcie_set_speed_ext(0,dev,0,mode);
+ pcie_set_speed_ext(bus,0,0,mode);
+
+ val = pcie_read_config_word_hw(0,dev,0,pos+0x10);
+ val |= (1 << 5);
+ pcie_write_config_word_hw(0,dev,0,pos+0x10,val);
+ mdelay(100);
+
+ return 0;
+}
+
+
+static int pcie_get_speed(int idx)
+{
+ unsigned int pos = 0,val = 0,dev = 0;
+
+ if (idx != PCIE_DEV_EP0 && idx != PCIE_DEV_EP1)
+ return -1;
+
+ if (isRC0_LINKUP == 0 && idx == PCIE_DEV_EP0)
+ return -1;
+
+ if (isRC1_LINKUP == 0 && idx == PCIE_DEV_EP1)
+ return -1;
+
+
+ if (idx == PCIE_DEV_EP0)
+ dev = 0;
+ else
+ dev = 1;
+
+ pos = get_cap_pos(0,dev,0,0x10);
+ if (pos < 0x40)
+ return -1;
+
+ val = pcie_read_config_word_hw(0,dev,0,pos+0x10);
+ val = (val >> 16) & 0x0f ;
+
+ return val;
+}
+
+
+static int pcie_get_count(int idx, struct ecnt_pcie_count_data* pcnt)
+{
+ int i;
+
+ if (idx != PCIE_DEV_RC0 && idx != PCIE_DEV_RC1)
+ return -1;
+
+ for(i=0; i < PCIE_CNT_NUMBER; i++){
+ pcnt->err[i] = regRead32(pcie_err_reg[idx][i]);
+ }
+
+ return 0;
+}
+
+static int pcie_get_linkstate(int idx)
+{
+ if (idx == PCIE_DEV_RC0)
+ return isRC0_LINKUP;
+
+ if (idx == PCIE_DEV_RC1)
+ return isRC1_LINKUP;
+
+ return -1;
+}
+
+int pcie_api_get_confreg(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ int off = data->conf.off;
+ data->retValue = pcie_get_confreg(idx,off);
+ return 0;
+}
+
+int pcie_api_set_confreg(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ unsigned int off = data->conf.off;
+ unsigned int val = data->conf.val;
+ data->retValue = pcie_set_confreg(idx,off,val);
+ return 0;
+}
+
+int pcie_api_get_aspm(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ data->retValue = pcie_get_aspm(idx);
+ return 0;
+}
+
+int pcie_api_set_aspm(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ unsigned int val = data->conf.val;
+ data->retValue = pcie_set_aspm(idx,val);
+ return 0;
+}
+
+int pcie_api_get_speed(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ data->retValue = pcie_get_speed(idx);
+ return 0;
+}
+
+int pcie_api_set_speed(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ unsigned int val = data->conf.val;
+ data->retValue = pcie_set_speed(idx,val);
+ return 0;
+}
+
+int pcie_api_get_count(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ data->retValue = pcie_get_count(idx, &data->cnt);
+ return 0;
+}
+
+ int pcie_api_get_linkstate(struct ecnt_pcie_data *data)
+{
+ int idx = data->idx;
+ data->retValue = pcie_get_linkstate(idx);
+ return 0;
+}
+
+ int pcie_function_autobench_loopback(struct ecnt_pcie_data *data)
+ {
+#if (defined(TCSUPPORT_CPU_EN7516) ||defined(TCSUPPORT_CPU_EN7527)) && defined(TCSUPPORT_AUTOBENCH)
+ data->retValue = pcie_751627_slt_test();
+#endif
+
+ return 0;
+ }
+
+ecnt_ret_val ecnt_pcie_api_hook(struct ecnt_data *in_data)
+{
+ struct ecnt_pcie_data *data = (struct ecnt_pcie_data *)in_data ;
+
+ if(data->function_id >= PCIE_FUNCTION_MAX_NUM) {
+ printk("pcie data->function_id is %d, exceed max number: %d", data->function_id, PCIE_FUNCTION_MAX_NUM);
+ return ECNT_HOOK_ERROR;
+ }
+
+ spin_lock(&pcie_api_lock);
+ pcie_operation[data->function_id](data) ;
+ spin_unlock(&pcie_api_lock);
+
+ return ECNT_CONTINUE;
+}
+
+static long pcie_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0 ;
+ struct ecnt_pcie_data data;
+ struct ecnt_pcie_data* puser = (struct ecnt_pcie_data*)arg;
+
+ if (cmd >= PCIE_FUNCTION_MAX_NUM)
+ return -1;
+
+ memset(&data,0,sizeof(struct ecnt_pcie_data));
+ copy_from_user(&data, puser ,sizeof(struct ecnt_pcie_data));
+ spin_lock(&pcie_api_lock);
+ ret = pcie_operation[data.function_id](&data);
+ spin_unlock(&pcie_api_lock);
+ copy_to_user(puser,&data,sizeof(struct ecnt_pcie_data));
+
+ return ret;
+}
+
+int pcie_api_init(void)
+{
+ int ret;
+
+ if(ecnt_register_hook(&ecnt_pcie_api_op)){
+ printk("pcie ecnt_dev_fe_api_op register fail\n");
+ return 0;
+ }
+
+ ret = register_chrdev(PCIE_MAJOR, "/dev/pcie", &pcie_fops);
+
+ if (ret < 0) {
+ printk(KERN_WARNING "pcie: can't get major %d\n", PCIE_MAJOR);
+ return ret;
+ }
+
+ return 0;
+}
+
+#else
+
+int pcie_api_init(void)
+{
+ return 0;
+}
+
+#endif
+
+
Index: linux-3.18.21/arch/mips/pci/pci-tc3162.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/pci-tc3162.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,118 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/tc3162/tc3162.h>
+
+
+#ifdef CONFIG_MIPS_TC3262
+#define PCI_COMMAND_WINDOW 0xBFB80CF8
+#endif
+static struct resource tc3162_pci_io_resource = {
+ .name = "pci IO space",
+ .start = 0x1FB90000,
+ .end = 0x1FB9FFFF,
+ .flags = IORESOURCE_IO
+};
+
+static struct resource tc3162_pci_mem_resource = {
+ .name = "pci memory space",
+ .start = 0x1FBA0000,
+ .end = 0x1FBCFFFF,
+ .flags = IORESOURCE_MEM
+};
+
+extern struct pci_ops tc3162_pci_ops;
+
+struct pci_controller tc3162_controller = {
+ .pci_ops = &tc3162_pci_ops,
+ .io_resource = &tc3162_pci_io_resource,
+ .mem_resource = &tc3162_pci_mem_resource,
+};
+
+static __init int tc3162_pci_init(void)
+{
+ int pci_bios;
+ unsigned long tmp;
+
+ if(isRT63365 || isMT751020 || isMT7505 || isEN751221 || isEN751627)
+ return -1;
+
+#ifndef CONFIG_MIPS_TC3262
+ pci_bios = regRead32(CR_AHB_HWCONF) & (1<<8);
+
+ printk(KERN_INFO "tc3162: system has %sPCI BIOS\n",
+ pci_bios ? "" : "no ");
+ if (pci_bios == 0)
+ return -1;
+#endif
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp & ~(1<<31)));
+ mdelay(100);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<31)));
+ mdelay(300);
+
+ /* PCI memory byte swap enable */
+ /*
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | ((1<<24) | (1<<25))) );
+ */
+
+#ifdef CONFIG_MIPS_TC3262
+
+ /*read pci enable bit from PCI bridge command window to check pci support.
+ shnwind*/
+ regWrite32(PCI_COMMAND_WINDOW, (1<<31));
+ pci_bios = regRead32(PCI_COMMAND_WINDOW);
+
+ printk(KERN_INFO "system has %sPCI BIOS\n",pci_bios ? "" : "no ");
+ if (pci_bios == 0){
+ return -1;
+ }
+#endif
+ /* Set I/O resource limits. */
+ ioport_resource.end = 0x1fffffff;
+ iomem_resource.end = 0xffffffff;
+
+if(isRT63165)
+{
+/* rt63165's PCI bridge has additional config registers
+ * which can be direct-accessed, such as the first 3
+ * registers shown below
+ */
+ //Disable PCI IO SWAP.
+
+ tmp = regRead32(0xbfb000ec);
+ tmp &= ~(1<<9);
+ regWrite32(0xbfb000ec, tmp);
+
+ /* configure USB Host Control Register to
+ do byte swaping in HW --Trey */
+ regWrite32(0xbfb000a8, 0x00000060);
+ mdelay(10);
+ /* set space of PCI base address
+ up to 256M --Trey*/
+ regWrite32(0xbfb80010, 0x0fff0001);
+ /* configure PCIArbitor Control Register to
+ set priority scheme --Trey*/
+ regWrite32(0xbfb80080, 0x00000079);
+
+ //set base address of PCI
+ regWrite32(0xbfb80cf8, 0x80000410);
+ regWrite32(0xbfb80cfc, 0x00000000);
+
+ //enable PCI's master, memory functions
+ regWrite32(0xbfb80cf8, 0x80000404);
+ regWrite32(0xbfb80cfc, 0xa4800016);
+
+ //set PCI's latency-timer, cache-line-size
+ regWrite32(0xbfb80cf8, 0x8000040c);
+ regWrite32(0xbfb80cfc, 0x00002008);
+}
+ register_pci_controller(&tc3162_controller);
+ return 0;
+}
+
+arch_initcall(tc3162_pci_init);
Index: linux-3.18.21/arch/mips/pci/pci-tc3162u.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/pci-tc3162u.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,3392 @@
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/tc3162/tc3162.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+//#define PCIE_DEBUG 1
+//#define ESD_DEBUG 1
+//#define PRE_CONFIG_DEBUG 1
+#ifdef TCSUPPORT_WLAN_MT7592_PCIE
+#define MCU_CFG_BASE 0x2000
+#define MCU_PCIE_REMAP_1 (MCU_CFG_BASE + 0x500)
+#define REMAP_1_OFFSET_MASK (0x3ffff)
+#define GET_REMAP_1_OFFSET(p) (((p) & REMAP_1_OFFSET_MASK))
+#define REMAP_1_BASE_MASK (0x3fff << 18)
+#define GET_REMAP_1_BASE(p) (((p) & REMAP_1_BASE_MASK) >> 18)
+#define MCU_PCIE_REMAP_2 (MCU_CFG_BASE + 0x504)
+#define REMAP_2_OFFSET_MASK (0x7ffff)
+#define GET_REMAP_2_OFFSET(p) (((p) & REMAP_2_OFFSET_MASK))
+#define REMAP_2_BASE_MASK (0x1fff << 19)
+#define GET_REMAP_2_BASE(p) (((p) & REMAP_2_BASE_MASK) >> 19)
+#endif
+
+#define PCIE_CONFIG_ADDR 0xbfb81cf8
+#define PCIE_CONFIG_DATA 0xbfb81cfc
+#define AHB_BUS_TIMEOUT_ERR (1<<25)
+#define AHB_BUS_ADDR_ERR (1<<24)
+
+#define NIC3090_PCIe_DEVICE_ID 0x3090 // 1T/1R miniCard
+#define NIC3091_PCIe_DEVICE_ID 0x3091 // 1T/2R miniCard
+#define NIC3092_PCIe_DEVICE_ID 0x3092 // 2T/2R miniCard
+#define NIC3390_PCIe_DEVICE_ID 0x3390 // 1T/1R miniCard
+#define NIC5390_PCIe_DEVICE_ID 0x5390
+#define NIC539F_PCIe_DEVICE_ID 0x539F
+#define NIC5392_PCIe_DEVICE_ID 0x5392
+#define NIC5592_PCIe_DEVICE_ID 0x5592
+#define NIC3593_PCIe_DEVICE_ID 0x3593
+#define PCIE60901_DEVICE_ID 0x7510
+#define NIC7603_PCIe_DEVICE_ID 0x7603
+
+#define NIC_PCI_VENDOR_ID 0x1814
+#define MTK_PCI_VENDOR_ID 0x14c3
+
+#define PCI_DEVICE_MEM1 0xbf700000
+#define PCIE_TYPE_RC 0x0
+#define PCIE_TYPE_DEV 0x1
+
+#define PCIE_BUS_0 0x0
+#define PCIE_BUS_1 0x1
+#define PCIE_BUS_2 0x2
+#define PCIE_BUS_3 0x3
+#define PCIE_DEV_0 0x0
+#define PCIE_DEV_1 0x1
+#define PCIE_BUS_RC 0x0
+#define PCIE_BUS_DEV 0x1
+#define PCIE_DEVNUM_0 0x0
+#define PCIE_DEVNUM_1 0x1
+#define PCIE_RESET_CON_BUSTIMEOUT 0
+#define PCIE_RESET_CON_PCIEERROR 1
+#define PCIE_SAVE_BUS_NUM 4
+#define PCIE_SAVE_DEVICE_NUM 2
+#define ENABLE 1
+#define DISABLE 0
+
+#define PHYSADDR(a) ((unsigned int)(a)&0x1fffffff)
+
+#if defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#define isRC0_LINKUP ((regRead32(0xbfb80050) & 0x2) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb80050) & 0x4) ? 1 : 0)
+#else
+#define isRC0_LINKUP ((regRead32(0xbfb82050) & 0x1) ? 1 : 0)
+#define isRC1_LINKUP ((regRead32(0xbfb83050) & 0x1) ? 1 : 0)
+
+#define isSLAVERC_LINKUP ((regRead32(0xbfb80050) & 0x1) ? 1 : 0)
+
+#endif
+
+struct pci_config_s{
+ int reg;
+ unsigned long int value;
+};
+
+#ifdef TCSUPPORT_WLAN_MT7592_PCIE
+struct pci_linkpath_s{
+ int rcbusnum;
+ int rcdevnum;
+ int epbusnum;
+ int epdevnum;
+};
+
+struct pci_probeinfo_s{
+ char rcindex;
+ char flag;
+ struct pci_linkpath_s num[2];
+};
+
+struct pci_access_s{
+ int busnum;
+ int devnum;
+ int reg;
+ unsigned long int value;
+};
+#endif
+#if defined(TCSUPPORT_BONDING)
+#define MAX_REG_CONFIG 3
+struct reg_config_s{
+ unsigned int reg;
+ unsigned int value;
+};
+#endif
+
+/**************************Gloabol Value***************************/
+
+#if defined(TCSUPPORT_BONDING)
+unsigned long slaveVirBaseAddr = 0;
+struct reg_config_s reg_config_temp[MAX_REG_CONFIG];
+#endif
+extern uint32 pcie_config_addr;
+extern uint32 pcie_config_data;
+
+static int ahb_status=0;
+static int pcie_soft_patch=1;
+static int wifiDeviceId = 0;
+int dual_band_support = 0;
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+static char pcie_reset_condiction = PCIE_RESET_CON_PCIEERROR;/*change default value*/
+#else
+static char pcie_reset_condiction = PCIE_RESET_CON_BUSTIMEOUT;
+#endif
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+static int pcie_error_detect_count = 1;/*when pcie error count is beyond pcie_error_detect_count, do pcie reset*/
+#endif
+static int pcie_reset_count = 0;
+static char rc0_is_exist=0, rc1_is_exist=0;
+static DEFINE_SPINLOCK(pcie_esd_lock);
+struct pci_config_s *pcie_config_temp[PCIE_SAVE_BUS_NUM][PCIE_SAVE_DEVICE_NUM];//save 4 bus and 2 dev for each bus
+
+#ifdef TCSUPPORT_WLAN_MT7592_PCIE
+#define MAX_PRE_CONFIG_NUM 20
+
+enum
+{
+ MODE0RCBASEADDR = 0,
+ MODE1RCBASEADDR = 2,
+ MODE2RCBASEADDR = 4,
+};
+
+struct pci_config_s pcie_config_table_WiFi_RC0_Special[] =
+{
+ //single wifi
+ //wifi mode :(dual band)
+ //linebond mode:(only master wifi)
+ {0x18,0x00010100},
+ {-1,0},
+ //linebond mode:(only slave wifi,dualband)
+ {0x18,0x00020100},
+ {0x18,0x00020201},
+ {-1,0}
+};
+
+struct pci_config_s pcie_config_table_WiFi_RC1_Special[] =
+{
+ //single wifi
+ {0x18,0x00010100},
+ {-1,0},
+ //wifi mode :(dual band)
+ //linebond mode:(only master wifi)
+ {0x18,0x00020200},
+ {-1,0},
+ //linebond mode:(dualband)
+ {0x18,0x00020100},
+ {0x18,0x00030300},
+ {-1,0}
+};
+
+#ifdef TCSUPPORT_BONDING
+struct pci_config_s pcie_config_table_EP[] =
+{
+ {0x04,0x00100006},{0x10,0x21000000},
+ {-1,0}
+};
+
+struct pci_config_s pcie_config_table_WiFi_RC_General[] =
+{
+ //first RC1 and then RC0
+ {0x04,0x00100007},{0x20,0x21002100},
+ {-1,0},
+ {0x04,0x00100007},{0x20,0x21702000},
+ {-1,0}
+};
+struct pci_config_s pcie_config_table_WiFi_RC_Slave_General[] =
+{
+ //first RC1 and then RC0
+ {0x04,0x00100007},{0x20,0x21702100},
+ {-1,0},
+ {0x04,0x00100007},{0x20,0x21702000},
+ {-1,0}
+};
+
+struct pci_config_s pcie_config_table_WiFi_RC_Master_General[] =
+{
+ //first RC1 and then RC0
+ {0x04,0x00100007},{0x20,0x21002100},
+ {-1,0},
+ {0x04,0x00100007},{0x20,0x20002000},
+ {-1,0}
+};
+
+struct pci_config_s pcie_config_table_EP_DMT[] =
+{
+ {0x04,0x00100007},{0x10,0x20000000},
+ {-1,0}
+};
+
+#else
+struct pci_config_s pcie_config_table_EP[] =
+{
+ {0x04,0x00100006},{0x10,0x20000000},
+ {-1,0}
+};
+
+struct pci_config_s pcie_config_table_WiFi_RC_General[] =
+{
+ {0x04,0x00100007},{0x20,0x20002000},
+ {-1,0}
+};
+
+#endif
+
+struct pci_access_s pcie_config_table_temp[MAX_PRE_CONFIG_NUM];
+
+#endif
+
+/**************************function definition***************************/
+static void err_status_dump(void);
+static void err_status_clear(void);
+
+static void aer_status_dump(void);
+extern int pciePhyInit(void);
+extern void pcie_phy_force_mode_en(char enable, char rc_num);
+void aer_config(int aerEnable);
+void pcieResetRC0(void);
+void pcieResetRC1(void);
+int pcie_timeout_disable(void);
+static void aer_status_clear(void);
+unsigned long int pcie_read_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum);
+int pcie_write_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned long int value);
+int pcie_write_config_byte(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned char value);
+int pcie_reset_handler(char rc_num);
+struct pci_config_s *save_pcie_config(char bus, char dev);
+void restore_pcie_config(char bus, char dev, struct pci_config_s* restore_p);
+
+
+
+static struct resource tc3162_pcie_io_resource = {
+ .name = "pcie IO space",
+#ifdef CONFIG_MIPS_TC3162U
+ .start = 0x1FBD0000,
+ .end = 0x1FBEFFFF,
+#endif
+#ifdef CONFIG_MIPS_TC3262
+ .start = 0x1F600000,
+ .end = 0x1F61FFFF,
+#endif
+ .flags = IORESOURCE_IO
+};
+
+static struct resource tc3162_pcie_mem_resource = {
+ .name = "pcie memory space",
+ .start = 0x1F700000,
+ .end = 0x1F8FFFFF,
+ .flags = IORESOURCE_MEM
+};
+
+extern struct pci_ops tc3162_pcie_ops;
+
+struct pci_controller tc3162_pcie_controller = {
+ .pci_ops = &tc3162_pcie_ops,
+ .io_resource = &tc3162_pcie_io_resource,
+ .mem_resource = &tc3162_pcie_mem_resource,
+};
+
+extern struct pci_ops mt7512_pcie_ops;
+struct pci_controller mt7512_pcie_controller = {
+ .pci_ops = &mt7512_pcie_ops,
+ .io_resource = &tc3162_pcie_io_resource,
+ .mem_resource = &tc3162_pcie_mem_resource,
+};
+
+#ifdef TCSUPPORT_WLAN_MT7592_PCIE
+unsigned long wifi_csr_addr = 0;
+#ifdef TCSUPPORT_BONDING
+unsigned long mappingaddr = 0;
+#endif
+#define RTMP_IO_READ32(_A, _R, _pV) \
+{ \
+ (*_pV = readl((void *)((_A) + (_R)))); \
+}
+
+#define RTMP_IO_WRITE32(_A, _R, _V) \
+{ \
+ writel((_V), (void *)((_A) + (_R)));\
+}
+
+void GlobalCRWrite(unsigned char* baseAddr, unsigned long Offset, unsigned long Value)
+{
+ unsigned long RemapBase, RemapOffset;
+ unsigned long RestoreValue;
+
+ RTMP_IO_READ32(baseAddr, MCU_PCIE_REMAP_1, &RestoreValue);
+ RemapBase = GET_REMAP_1_BASE(Offset) << 18;
+ RemapOffset = GET_REMAP_1_OFFSET(Offset);
+ RTMP_IO_WRITE32(baseAddr, MCU_PCIE_REMAP_1, RemapBase);
+ RTMP_IO_WRITE32(baseAddr, 0x40000 + RemapOffset, Value);
+
+ RTMP_IO_WRITE32(baseAddr, MCU_PCIE_REMAP_1, RestoreValue);
+}
+
+void GlobalCRRead(unsigned char* baseAddr, unsigned long Offset, unsigned long *Value)
+{
+ unsigned long RemapBase, RemapOffset;
+ unsigned long RestoreValue;
+
+ RTMP_IO_READ32(baseAddr, MCU_PCIE_REMAP_1, &RestoreValue);
+ RemapBase = GET_REMAP_1_BASE(Offset) << 18;
+ RemapOffset = GET_REMAP_1_OFFSET(Offset);
+ RTMP_IO_WRITE32(baseAddr, MCU_PCIE_REMAP_1, RemapBase);
+ RTMP_IO_READ32(baseAddr, 0x40000 + RemapOffset, Value);
+
+ RTMP_IO_WRITE32(baseAddr, MCU_PCIE_REMAP_1, RestoreValue);
+}
+
+void RecoverInitialValue(void)
+{
+ struct pci_access_s * config_p = &pcie_config_table_temp[0];
+ int i = 0;
+#ifdef PRE_CONFIG_DEBUG
+ printk("\r\n====RecoverInitValue====\r\n");
+#endif
+ while((i < MAX_PRE_CONFIG_NUM) && ((*(config_p + (i))).reg != 0)){
+#ifdef PRE_CONFIG_DEBUG
+ printk("[write]pre config bus %x dev %x reg %x value %lx \n", (*(config_p + i)).busnum,(*(config_p + i)).devnum,(*(config_p + i)).reg, (*(config_p + i)).value);
+#endif
+ pcie_write_config_word(PCIE_TYPE_DEV, (*(config_p + i)).busnum, (*(config_p + i)).devnum, (*(config_p + i)).reg, (*(config_p + i)).value);
+ memset((config_p + (i)),0,sizeof(struct pci_access_s ));
+ i++;
+ }
+}
+
+/*we config RC first and then read EP info,if wifichip==0x7603 then config EP*/
+int RegConfigFunc(struct pci_probeinfo_s* pre_info, struct pci_config_s* pre_config_RC_S)
+{
+ int i = 0,j = 0,k = 0,m = 0;
+ int wifiDevice;
+ int rcnum = pre_info->rcindex;
+ int rcbusindex = pre_info->num[0].rcbusnum;
+ int rcdevindex = pre_info->num[0].rcdevnum;
+ int epbusindex = pre_info->num[0].epbusnum;
+ int epdevindex = pre_info->num[0].epdevnum;
+ struct pci_config_s* pre_config_RC_G = &pcie_config_table_WiFi_RC_General[0];
+ struct pci_config_s* pre_config_EP = &pcie_config_table_EP[0];
+ #ifdef TCSUPPORT_BONDING
+ struct pci_config_s* pre_config_EP_DMT;
+ #endif
+ /*step1: config RC special register to led data pass RC to EP*/
+ while((*(pre_config_RC_S + (i))).reg != -1){
+ if(i >= MAX_PRE_CONFIG_NUM)
+ {
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\n pcie_config_table_temp is not enough");
+ #endif
+ RecoverInitialValue();
+ i = 0;
+ }
+ pcie_config_table_temp[i].busnum = rcbusindex;
+ pcie_config_table_temp[i].devnum = rcdevindex;
+ pcie_config_table_temp[i].reg = (*(pre_config_RC_S + i)).reg;
+ pcie_config_table_temp[i].value = pcie_read_config_word(PCIE_TYPE_DEV, rcbusindex,rcdevindex, (*(pre_config_RC_S + i)).reg);
+
+ pcie_write_config_word(PCIE_TYPE_DEV, rcbusindex, rcdevindex, (*(pre_config_RC_S + i)).reg, (*(pre_config_RC_S + i)).value);
+#ifdef PRE_CONFIG_DEBUG
+ printk("[Config RC]pre config bus %x dev %x reg %x value %lx \n", rcbusindex,rcdevindex,(*(pre_config_RC_S + i)).reg, (*(pre_config_RC_S + i)).value);
+#endif
+ i++;
+#ifdef TCSUPPORT_BONDING
+ if(--rcnum > 0)
+ {
+ rcbusindex = pre_info->num[i].rcbusnum;
+ rcdevindex = pre_info->num[i].rcdevnum;
+ }
+#endif
+ }
+
+ /*step2: read EP CHIP info,if not 7603,just return*/
+ wifiDevice = pcie_read_config_word(PCIE_TYPE_DEV, epbusindex,epdevindex, 0) >> 16;
+#ifdef PRE_CONFIG_DEBUG
+ printk("[read device id]wifi config bus %x dev %x value %lx \n",epbusindex,epdevindex, pcie_read_config_word(PCIE_TYPE_DEV, epbusindex, epdevindex, 0));
+#endif
+ if(0x7603 != wifiDevice)
+ {
+ RecoverInitialValue();
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\n not 7592 wifi,so just return");
+ #endif
+ return -1;
+ }
+
+ /*step3: config RC general register*/
+ #ifdef TCSUPPORT_BONDING
+ if(1 == pre_info->flag)
+ pre_config_RC_G = &pcie_config_table_WiFi_RC_Slave_General[0];
+ else if(2 == pre_info->flag)
+ pre_config_RC_G = &pcie_config_table_WiFi_RC_Master_General[0];
+ #endif
+
+ m = pre_info->rcindex;
+ while(--m >= 0)
+ {
+ while((*(pre_config_RC_G + (k))).reg != -1){
+ if(i >= MAX_PRE_CONFIG_NUM)
+ {
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\n pcie_config_table_temp is not enough");
+ #endif
+ RecoverInitialValue();
+ i = 0;
+ }
+ pcie_config_table_temp[i].busnum = rcbusindex;
+ pcie_config_table_temp[i].devnum = rcdevindex;
+ pcie_config_table_temp[i].reg = (*(pre_config_RC_G + k)).reg;
+ pcie_config_table_temp[i].value = pcie_read_config_word(PCIE_TYPE_DEV, rcbusindex,rcdevindex, (*(pre_config_RC_G + k)).reg);
+
+ pcie_write_config_word(PCIE_TYPE_DEV, rcbusindex, rcdevindex, (*(pre_config_RC_G + k)).reg, (*(pre_config_RC_G + k)).value);
+#ifdef PRE_CONFIG_DEBUG
+ printk("[Config RC]pre config bus %x dev %x reg %x value %lx \n", rcbusindex,rcdevindex,(*(pre_config_RC_G + k)).reg, (*(pre_config_RC_G + k)).value);
+#endif
+ k++;
+ i++;
+ }
+ #ifdef TCSUPPORT_BONDING
+ if(m > 0)
+ {
+ k++;
+ rcbusindex = pre_info->num[m-1].rcbusnum;
+ rcdevindex = pre_info->num[m-1].rcdevnum;
+ }
+ #endif
+ };
+
+#ifdef TCSUPPORT_BONDING
+ if(1 == pre_info->flag)
+ {
+ pre_config_EP_DMT = &pcie_config_table_EP_DMT[0];
+ m = 0;
+ /*step4: config EP dmt general register,only for linebond dual band and 7592 in slave ep*/
+ while((*(pre_config_EP_DMT + (m))).reg != -1){
+ if(i >= MAX_PRE_CONFIG_NUM)
+ {
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\n pcie_config_table_temp is not enough");
+ #endif
+ RecoverInitialValue();
+ i = 0;
+ }
+ pcie_config_table_temp[i].busnum = 1;
+ pcie_config_table_temp[i].devnum = 0;
+ pcie_config_table_temp[i].reg = (*(pre_config_EP_DMT + m)).reg;
+ pcie_config_table_temp[i].value = pcie_read_config_word(PCIE_TYPE_DEV, 1, 0, (*(pre_config_EP_DMT + m)).reg);
+
+ pcie_write_config_word(PCIE_TYPE_DEV, 1, 0, (*(pre_config_EP_DMT + m)).reg, (*(pre_config_EP_DMT + m)).value);
+#ifdef PRE_CONFIG_DEBUG
+ printk("[Config EP]pre config bus %x dev %x reg %x value %lx \n", 1,0,(*(pre_config_EP_DMT + m)).reg, (*(pre_config_EP_DMT + m)).value);
+#endif
+ i++;
+ m++;
+ }
+ mappingaddr = 0x20000000;
+ }
+ else
+ {
+ mappingaddr = 0x21000000;
+ }
+#endif
+
+ /*step5: config EP general register*/
+ while((*(pre_config_EP + (j))).reg != -1){
+ if(i >= MAX_PRE_CONFIG_NUM)
+ {
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\n pcie_config_table_temp is not enough");
+ #endif
+ RecoverInitialValue();
+ i = 0;
+ }
+
+ pcie_config_table_temp[i].busnum = epbusindex;
+ pcie_config_table_temp[i].devnum = epdevindex;
+ pcie_config_table_temp[i].reg = (*(pre_config_EP + j)).reg;
+ pcie_config_table_temp[i].value = pcie_read_config_word(PCIE_TYPE_DEV, epbusindex, epdevindex, (*(pre_config_EP + j)).reg);
+
+ pcie_write_config_word(PCIE_TYPE_DEV, epbusindex, epdevindex, (*(pre_config_EP + j)).reg, (*(pre_config_EP + j)).value);
+#ifdef PRE_CONFIG_DEBUG
+ printk("[Config EP]pre config bus %x dev %x reg %x value %lx \n", epbusindex,epdevindex,(*(pre_config_EP + j)).reg, (*(pre_config_EP + j)).value);
+#endif
+ i++;
+ j++;
+ }
+
+
+#ifdef PRE_CONFIG_DEBUG
+ printk("\r\nsave total index is %d",i);
+#endif
+
+ return 0;
+}
+
+int pciePreConfigReg(void)
+{
+ int preconfigflag = -1;
+ int preconfigtemp = -1;
+ int rctempflag = 0;
+ struct pci_probeinfo_s preinfo;
+ struct pci_config_s* pre_config_RC_S;
+ /*
+ preconfigflag
+ 0:single wifi(include dual band with only one wifi)
+ 1:wifi mode((dual band) and (lineband mode with only master wifi))
+ 2:linebond mode(only slave wifi)
+ 3:linebond mode(dual band)
+ */
+ memset(&preinfo,0,sizeof(struct pci_probeinfo_s));
+ if(isMT751020 || isRT63368 || isEN751221 || isEN751627)
+ {
+ if(dual_band_support)
+ {
+ #ifdef TCSUPPORT_BONDING
+ rctempflag = (isRC0_LINKUP)|(isRC1_LINKUP<<1)|(isSLAVERC_LINKUP<<2);
+ #else
+ rctempflag = (isRC0_LINKUP)|(isRC1_LINKUP<<1);
+ #endif
+ switch (rctempflag)
+ {
+ #ifdef TCSUPPORT_BONDING
+ case 3:
+ //master wifi
+ preconfigflag = 1;
+ break;
+ case 5:
+ //slave wifi
+ preconfigflag = 2;
+ break;
+ case 7:
+ //dual band
+ preconfigflag = 3;
+ break;
+ #else
+ case 1:
+ case 2:
+ //one wifi
+ preconfigflag = 0;
+ break;
+ case 3:
+ //dual band
+ preconfigflag = 1;
+ break;
+
+ #endif
+ default:
+ break;
+ }
+ }
+ else
+ {
+ if(isRC0_LINKUP)
+ preconfigflag = 0;
+ }
+ }
+ else if(isMT7505 || isRT63365)
+ {
+ if(isRC0_LINKUP)
+ preconfigflag = 0;
+ }
+
+
+#ifdef PRE_CONFIG_DEBUG
+ printk("preconfigflag =%d\n",preconfigflag);
+#endif
+ preinfo.rcindex = 1;
+ switch (preconfigflag)
+ {
+ case 0:
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_0;
+ preinfo.num[0].epbusnum = PCIE_BUS_1;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC0_Special[MODE0RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ break;
+ case 1:
+ #ifndef TCSUPPORT_BONDING
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_0;
+ preinfo.num[0].epbusnum = PCIE_BUS_1;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC0_Special[MODE0RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ if(!preconfigtemp)
+ return;
+ #endif
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_1;
+ preinfo.num[0].epbusnum = PCIE_BUS_2;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC1_Special[MODE1RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ break;
+ #ifdef TCSUPPORT_BONDING
+ case 2:
+ preinfo.rcindex = 2;
+ preinfo.flag = 1;
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_0;
+ preinfo.num[1].rcbusnum = PCIE_BUS_1;
+ preinfo.num[1].rcdevnum = PCIE_DEV_1;
+ preinfo.num[0].epbusnum = PCIE_BUS_2;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC0_Special[MODE1RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ break;
+ case 3:
+ preinfo.flag = 1;
+ preinfo.rcindex = 2;
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_0;
+ preinfo.num[1].rcbusnum = PCIE_BUS_1;
+ preinfo.num[1].rcdevnum = PCIE_DEV_1;
+ preinfo.num[0].epbusnum = PCIE_BUS_2;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC0_Special[MODE1RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ if(!preconfigtemp)
+ return;
+
+ preinfo.flag = 2;
+ preinfo.num[0].rcbusnum = PCIE_BUS_0;
+ preinfo.num[0].rcdevnum = PCIE_DEV_0;
+ preinfo.num[1].rcbusnum = PCIE_BUS_0;
+ preinfo.num[1].rcdevnum = PCIE_DEV_1;
+ preinfo.num[0].epbusnum = PCIE_BUS_3;
+ preinfo.num[0].epdevnum = PCIE_DEV_0;
+ pre_config_RC_S = &pcie_config_table_WiFi_RC1_Special[MODE2RCBASEADDR];
+ preconfigtemp = RegConfigFunc(&preinfo,pre_config_RC_S);
+ break;
+ #endif
+ default:
+ #ifdef PRE_CONFIG_DEBUG
+ printk("\r\nflag not support:%d",preconfigflag);
+ #endif
+ preconfigtemp = -2;
+ break;
+ }
+
+#ifdef PRE_CONFIG_DEBUG
+ printk("\r\nreturn preconfigflag =%d\n",preconfigflag);
+#endif
+
+ return preconfigtemp;
+}
+
+void SetLDO(void)
+{
+
+ unsigned int Value1 = 0;
+ unsigned int Value2 = 0;
+ int cnt = 10;
+ unsigned char *baseAddr = (unsigned char *)wifi_csr_addr;
+
+ while(cnt--){
+ GlobalCRWrite(baseAddr, 0x50012498, 0x5);
+ GlobalCRRead(baseAddr, 0x50012498, &Value1);
+ if(Value1 == 0x5) //Setting success
+ break;
+ }
+
+ if(Value1 != 0x5){
+ printk("Wifi CR 0x%x value setting fail(Value=0x%x)\n", 0x50012498, Value1);
+ }
+
+ while(cnt--){
+ GlobalCRWrite(baseAddr, 0x50012040, 0x80);
+ GlobalCRRead(baseAddr, 0x50012040, &Value2);
+ if(Value2 == 0x80) //Setting success
+ break;
+ }
+
+ if(Value2 != 0x80){
+ printk("Wifi CR 0x%x value setting fail(Value=0x%x)\n", 0x50012040, Value2);
+ }
+
+ printk("CR 0x%x value is 0x%x\n", 0x50012498, Value1);
+ printk("CR 0x%x value is 0x%x\n", 0x50012040, Value2);
+}
+
+void preConfigLDO(void)
+{
+ int i,preconfigflag;
+ for(i = 0;i < MAX_PRE_CONFIG_NUM; i++)
+ {
+ memset(&pcie_config_table_temp[i],0,sizeof(struct pci_access_s ));
+ }
+ preconfigflag = pciePreConfigReg();
+ if(!preconfigflag)
+ {
+#ifdef TCSUPPORT_BONDING
+ wifi_csr_addr = (unsigned long)ioremap(mappingaddr,0x100000);
+#else
+ wifi_csr_addr = (unsigned long)ioremap(0x20000000,0x100000);
+#endif
+ SetLDO();
+ RecoverInitialValue();
+ iounmap((char *)wifi_csr_addr);
+ }
+}
+#endif
+
+struct pci_config_s *save_pcie_config(char bus, char dev){
+ int reco_reg[] = {0x4, 0xc, 0x10, 0x18, 0x1c, 0x20, 0x24, 0x30, 0x3c, 0x50, 0x54, 0x5c, 0x154, 0x160, -1};
+ int i, num_reg;
+ struct pci_config_s * ret_p;
+#ifdef ESD_DEBUG
+ printk("save pcie config bus %d dev %d\n",bus,dev);
+#endif
+ i = 0;
+ num_reg = 0;
+
+ if(pcie_read_config_word(PCIE_TYPE_RC, bus, dev, 0) == 0xffffffff)//means no device
+ return NULL;
+
+ while(reco_reg[num_reg++] != -1){}; // caclute total reg_num
+ ret_p = (struct pci_config_s *)kzalloc(sizeof(struct pci_config_s) * num_reg ,GFP_KERNEL);
+ if(ret_p == NULL)
+ return NULL;
+ for(i=0; i<num_reg; i++){
+ (*(ret_p + i)).reg = reco_reg[i];
+ if(reco_reg[i] != -1){
+ (*(ret_p + i)).value= pcie_read_config_word(PCIE_TYPE_RC, bus, dev, reco_reg[i]);
+#ifdef ESD_DEBUG
+ printk("reg %x value %lx\n",(*(ret_p + i)).reg,(*(ret_p + i)).value);
+#endif
+ }
+ }
+ return ret_p;
+}
+void restore_pcie_config(char bus, char dev, struct pci_config_s* restore_p){
+ int i;
+#ifdef ESD_DEBUG
+ printk("restore pcie config bus %d dev %d\n", bus, dev);
+#endif
+ if(restore_p == NULL){
+ printk("Recover BUS %d DEV %d Faill !!\n",bus,dev);
+ return;
+ }
+ i = 0;
+ while((*(restore_p + (i))).reg != -1){
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, (*(restore_p + i)).reg, (*(restore_p + i)).value);
+#ifdef ESD_DEBUG
+ printk("restore reg %x value %lx \n", (*(restore_p + i)).reg, (*(restore_p + i)).value);
+#endif
+ i++;
+ }
+
+}
+void pcie_int_enable(char enable, char rc_num){
+ unsigned long int tmp;
+
+ if(enable == ENABLE){
+ if(rc_num == 0){//Enable Interrupt
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ if(isMT751020 || isMT7505 || isRT63368){
+ #else
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627){
+ #endif
+ tmp = regRead32(0xbfb8000c);
+#if defined(TCSUPPORT_BONDING)
+ /* slave dmt, error interrupt, slave gdma */
+ tmp |= ((1<<23) | (1<<24) | (1<<25));
+#else
+ tmp |= (1<<20);
+#endif
+ regWrite32(0xbfb8000c, tmp);
+ }
+ }else{//only need rc0 or 1
+ if(isMT751020 || isEN751221 || isEN751627){
+ tmp = regRead32(0xbfb8000c);
+ tmp |= (1<<26);
+ regWrite32(0xbfb8000c, tmp);
+ }
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ else if(isRT63368){
+ tmp = regRead32(0xbfb8000c);
+ tmp |= (1<<21);
+ regWrite32(0xbfb8000c, tmp);
+ }
+ #endif
+ }
+ }else{//Disable Interrupt
+ if(rc_num == 0){
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ if(isMT751020 || isMT7505 || isRT63368){
+ #else
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627){
+ #endif
+ tmp = regRead32(0xbfb8000c);
+#if defined(TCSUPPORT_BONDING)
+ /* slave dmt, error interrupt, slave gdma */
+ tmp &= ~((1<<23) | (1<<24) | (1<<25));
+#else
+ tmp &= ~(1<<20);
+#endif
+ regWrite32(0xbfb8000c, tmp);
+ }
+ }else{
+ if(isMT751020 || isEN751221 || isEN751627){
+ tmp = regRead32(0xbfb8000c);
+ tmp &= ~(1<<26);
+ regWrite32(0xbfb8000c, tmp);
+ }
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ else if(isRT63368){
+ tmp = regRead32(0xbfb8000c);
+ tmp &= ~(1<<21);
+ regWrite32(0xbfb8000c, tmp);
+ }
+ #endif
+ }
+ }
+}
+
+int pcie_reset_handler(char rc_num){
+ int wifi_dev_bus_id = 1;
+ #if defined(TCSUPPORT_BONDING)
+ int i;
+ #endif
+
+ if((rc_num == 0) || (rc_num == 2)){
+ pcie_reset_count ++;
+ aer_config(0);
+ pcieResetRC0();
+ if(isRC0_LINKUP){
+ restore_pcie_config(PCIE_BUS_0, PCIE_DEV_0, pcie_config_temp[PCIE_BUS_0][PCIE_DEV_0]);
+ restore_pcie_config(PCIE_BUS_1, PCIE_DEV_0, pcie_config_temp[PCIE_BUS_1][PCIE_DEV_0]);
+
+#ifdef TCSUPPORT_BONDING
+#ifdef TCSUPPORT_CPU_EN7512
+
+ printk(" pcie_reset_handler function is not implement in chip 7512.");
+#else
+ if(isSLAVERC_LINKUP){
+ wifi_dev_bus_id++;
+ restore_pcie_config(PCIE_BUS_1, PCIE_DEV_1, pcie_config_temp[PCIE_BUS_1][PCIE_DEV_1]);//slave RC
+ restore_pcie_config(wifi_dev_bus_id, PCIE_DEV_0, pcie_config_temp[wifi_dev_bus_id][PCIE_DEV_0]);//slave WIFI
+ }else{
+ wifi_dev_bus_id = -1; //no wifi
+ }
+
+ for (i = 0; i < MAX_REG_CONFIG; i++)
+ regWrite32(reg_config_temp[i].reg, reg_config_temp[i].value);
+#endif /*TCSUPPORT_CPU_EN7512*/
+#endif/*TCSUPPORT_BONDING*/
+ if(wifi_dev_bus_id != -1){
+ wifiDeviceId = pcie_read_config_word(PCIE_TYPE_DEV, wifi_dev_bus_id, PCIE_DEV_0, 0) >> 16;
+#ifdef ESD_DEBUG
+ printk("wifi DeviceID %x\n",wifiDeviceId);
+#endif
+ }
+ pcie_int_enable(ENABLE, 0);
+ pcie_timeout_disable();
+ aer_status_clear();
+ aer_config(1);
+ }else{
+ printk("RC0 Dead\n");
+ return -1;
+ }
+
+ #if defined(TCSUPPORT_CPU_RT63368) ||defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7510)|| defined(TCSUPPORT_CPU_MT7505)
+ #if 0/*def TCSUPPORT_WLAN_MT7592_PCIE*/
+ if(0x7603 == wifiDeviceId)
+ preConfigLDO();
+ #endif
+ #endif
+
+ }else if(rc_num == 1){
+ pcie_reset_count ++;
+ aer_config(0);
+ pcieResetRC1();
+ if(isRC1_LINKUP){
+ restore_pcie_config(PCIE_BUS_0, PCIE_DEV_1, pcie_config_temp[PCIE_BUS_0][PCIE_DEV_1]);
+ if(isRC0_LINKUP){
+ wifi_dev_bus_id++;
+
+#ifdef TCSUPPORT_BONDING
+#ifdef TCSUPPORT_CPU_EN7512
+ /*pcie_reset_handler function is not implement in chip 7512.*/
+#else
+ if(isSLAVERC_LINKUP){
+ wifi_dev_bus_id++;
+ restore_pcie_config(wifi_dev_bus_id, PCIE_DEV_0, pcie_config_temp[wifi_dev_bus_id][PCIE_DEV_0]);
+ }else
+#endif /*TCSUPPORT_CPU_EN7512*/
+#endif /*TCSUPPORT_BONDING*/
+ {
+ restore_pcie_config(wifi_dev_bus_id, PCIE_DEV_0, pcie_config_temp[wifi_dev_bus_id][PCIE_DEV_0]);
+ }
+ }else{
+ restore_pcie_config(wifi_dev_bus_id, PCIE_DEV_0, pcie_config_temp[wifi_dev_bus_id][PCIE_DEV_0]);
+ }
+ if(wifi_dev_bus_id != -1){
+ wifiDeviceId = pcie_read_config_word(PCIE_TYPE_DEV, wifi_dev_bus_id, PCIE_DEV_0, 0) >> 16;
+#ifdef ESD_DEBUG
+ printk("wifi DeviceID %x\n",wifiDeviceId);
+#endif
+ }
+ pcie_int_enable(ENABLE, 1);
+ pcie_timeout_disable();
+ aer_status_clear();
+ aer_config(1);
+ }else{
+ printk("RC1 Dead\n");
+
+ return -1;
+ }
+ #if defined(TCSUPPORT_CPU_RT63368) ||defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7510)|| defined(TCSUPPORT_CPU_MT7505)
+ #if 0/*def TCSUPPORT_WLAN_MT7592_PCIE*/
+ if(0x7603 == wifiDeviceId)
+ preConfigLDO();
+ #endif
+ #endif
+
+ }else{
+ printk("RC ID %d Fail\n",rc_num);
+ return -1;
+ }
+
+
+ return 0;
+}
+
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+/*______________________________________________________________________________
+**function name:pcie_reset_handler_o
+**
+**description:
+* 63368 reset handler
+**parameters:
+* none
+**global:
+* none
+**return:
+* 0:success
+* -1:fail
+**call:
+* start_cc()
+**revision:
+* Brian.shi
+**____________________________________________________________________________*/
+int pcie_reset_handler_o()
+{
+ int wifi_dev_bus_id = 1;
+ int ret = 0;
+ prom_printf("\r\n======pcie_reset_handler_o=======");
+ pcieReset();
+ if(isRC0_LINKUP){
+ restore_pcie_config(PCIE_BUS_0, PCIE_DEV_0, pcie_config_temp[PCIE_BUS_0][PCIE_DEV_0]);
+ restore_pcie_config(PCIE_BUS_1, PCIE_DEV_0, pcie_config_temp[PCIE_BUS_1][PCIE_DEV_0]);
+ if(wifi_dev_bus_id != -1){
+ wifiDeviceId = pcie_read_config_word(PCIE_TYPE_DEV, wifi_dev_bus_id, PCIE_DEV_0, 0) >> 16;
+#ifdef ESD_DEBUG
+ prom_printf("wifi DeviceID %x\n",wifiDeviceId);
+#endif
+ }
+ pcie_int_enable(ENABLE, 0);
+ pcie_timeout_disable();
+ }else{
+ printk("RC0 Dead\n");
+ ret += 1;
+ }
+
+ if(isRC1_LINKUP){
+ if(isRC0_LINKUP)
+ {
+ wifi_dev_bus_id++;
+ restore_pcie_config(PCIE_BUS_0, PCIE_DEV_1, pcie_config_temp[PCIE_BUS_0][PCIE_DEV_1]);
+
+ }else
+ {
+ restore_pcie_config(PCIE_BUS_0, PCIE_DEV_0, pcie_config_temp[PCIE_BUS_0][PCIE_DEV_0]);
+ }
+ restore_pcie_config(wifi_dev_bus_id, PCIE_DEV_0, pcie_config_temp[wifi_dev_bus_id][PCIE_DEV_0]);
+
+ if(wifi_dev_bus_id != -1){
+ wifiDeviceId = pcie_read_config_word(PCIE_TYPE_DEV, wifi_dev_bus_id, PCIE_DEV_0, 0) >> 16;
+#ifdef ESD_DEBUG
+ prom_printf("wifi 5g DeviceID %x\n",wifiDeviceId);
+#endif
+ }
+ pcie_int_enable(ENABLE, 1);
+ pcie_timeout_disable();
+ }else{
+ printk("RC1 Dead\n");
+ ret += 1;
+ }
+
+ if(ret > 1)
+ return -1;
+
+ return 0;
+}
+#endif
+
+void pcie_check(char force){
+ uint32 val, addr;
+ unsigned long flags;
+ char need_reset = 0;
+
+ if(pcie_soft_patch == 0){
+ return;
+ }
+
+ spin_lock_irqsave(&pcie_esd_lock, flags);
+ if(pcie_reset_condiction == PCIE_RESET_CON_BUSTIMEOUT){
+ //Read Config 0 for RC0/RC1 to generate Bustime out
+ if(force == 0){
+ if(rc0_is_exist){
+ val = regRead32(0xbfb82034);
+ }
+ if(rc1_is_exist){
+ val = regRead32(0xbfb83034);
+ }
+ if(regRead32(CR_PRATIR)){
+ //bus timeout happened
+ addr = regRead32(CR_ERR_ADDR);
+ if(((addr >= 0x1fb80000) && (addr <= 0x1fb83fff))
+ || ((addr >= 0x20000000) && (addr <= 0x2fffffff)) )
+ {
+ if(isMT7505 || isEN751221 || isEN751627){
+ regWrite32(CR_PRATIR, 1);
+ }
+ else
+ {
+ regWrite32(CR_PRATIR, 0);
+ }
+ printk("check bustimeout and reset %lx\n",addr);
+ need_reset = 1;
+ }
+ }
+ }else{
+ addr = regRead32(CR_ERR_ADDR);
+ if(((addr >= 0x1fb80000) && (addr <= 0x1fb83fff))
+ || ((addr >= 0x20000000) && (addr <= 0x2fffffff)) )
+ {
+ if(isMT7505 || isEN751221 || isEN751627){
+ regWrite32(CR_PRATIR, 1);
+ }
+ else
+ {
+ regWrite32(CR_PRATIR, 0);
+ }
+ printk("bustimeout int and reset %lx\n",addr);
+ need_reset = 1;
+ }
+ }
+
+ if(need_reset){
+ if(rc0_is_exist){
+ if(pcie_reset_handler(0) == 0){
+ ahb_status = 1;
+ }
+ }
+ if(rc1_is_exist){
+ if(pcie_reset_handler(1) == 0){
+ ahb_status = 1;
+ }
+ }
+ }
+ }else if(pcie_reset_condiction == PCIE_RESET_CON_PCIEERROR){
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ /*use pcie_reset_handler_o() do reset(63368 reset rc0 & rc1 using one register one bit)*/
+ if(pcie_reset_handler_o() == 0){
+ ahb_status = 1;
+ }
+#else
+ val = regRead32(0xbfb80060);
+ if((val & (1<<1)) != 0){
+ regWrite32(0xbfb82070, 1); //clear interrupt status
+ regWrite32(0xbfb82070, 0); //set interrupt status
+
+ if(pcie_reset_handler(0) == 0){
+ ahb_status = 1;
+ }
+ }
+ if((val & (1<<2)) != 0){
+ regWrite32(0xbfb83070, 1); //clear interrupt status
+ regWrite32(0xbfb83070, 0); //set interrupt status
+
+ if(pcie_reset_handler(1) == 0){
+ ahb_status = 1;
+ }
+
+ }
+#if defined(TCSUPPORT_BONDING)
+ if((val & (1<<0)) != 0){
+ if(slaveVirBaseAddr == 0){
+ spin_unlock_irqrestore(&pcie_esd_lock, flags);
+ printk("No slaveVirBaseAddr\n");
+ return;
+ }
+ regWrite32(slaveVirBaseAddr + 0xb83070, 1); //clear interrupt status
+ regWrite32(slaveVirBaseAddr + 0xb83070, 0); //set interrupt status
+
+ if(pcie_reset_handler(2) == 0){
+ ahb_status = 1;
+ }
+ }
+#endif
+#endif
+ }
+ spin_unlock_irqrestore(&pcie_esd_lock, flags);
+ return;
+}
+
+void setahbstat(int val){
+ ahb_status = val;
+}
+EXPORT_SYMBOL(setahbstat);
+
+void pcieReset(void){
+ int i;
+ unsigned long tmp;
+
+ if(isRT63165){
+ return;
+ }
+ if(isRT63365){
+ //disable interrupt
+ if(dual_band_support){
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp & (~((1<<20) | (1<<21)) )));
+ }else{
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp & ~(1<<20)));
+ }
+ mdelay(5);
+ //PCI-E reset
+ if (isFPGA) {
+ //FPGA mode
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & ~(1<<26)));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | (1<<26)));
+ mdelay(1);
+ }else{
+ //rt63368 enable pci-e port1 and port1 do not have power will cause hang. shnwind.
+ if(dual_band_support){
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<27) | (1<<29))));
+ #ifdef MT7592
+ mdelay(100);
+ #else
+ mdelay(1);
+ #endif
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ }else{
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<29))));
+ #ifdef MT7592
+ mdelay(100);
+ #else
+ mdelay(1);
+ #endif
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<29)))));
+ mdelay(1);
+ }
+ }
+
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp | (1<<1)));
+ mdelay(1);
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp & ~(1<<1)));
+ mdelay(1);
+ //wait device link up
+ for(i=0 ; i<1000 ; i++){
+ mdelay(1);
+ if(dual_band_support){
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ /*need to dualband all up*/
+ if(isRC0_LINKUP && isRC1_LINKUP){
+#else
+ if(isRC0_LINKUP || isRC1_LINKUP){
+#endif
+ break;
+ }
+ }else{
+ if(isRC0_LINKUP){
+ break;
+ }
+ }
+ }
+ if(i == 1000){
+ printk("PCI-E RC can not link up\n");
+ return ;
+ }
+ if(dual_band_support){
+ if(isRC0_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+ }else{
+ //disable port 0
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<23)));
+ mdelay(1);
+ }
+
+ if(isRC1_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb83010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb83034, 0x06040001);
+ }else{
+ //disable port 1
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<22)));
+ mdelay(1);
+ }
+ }else{
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+ }
+
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+
+ }else{
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp & ~(1<<29)));
+ mdelay(5);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp & ~(1<<30)));
+ mdelay(5);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<29)));
+ mdelay(5);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<30)));
+ mdelay(5);
+ /*force link up, workaround the pcie hardware problems.*/
+ if(isTC3162U){
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x40);
+ regWrite32(KSEG1ADDR(pcie_config_data), 0x20);
+ }
+ }
+}
+
+EXPORT_SYMBOL(pcieReset);
+
+
+void pcieResetRC0(void){
+ unsigned long tmp;
+
+ if(!isMT751020 && !isMT7505 && !isEN751221 && !isEN751627){
+ return;
+ }
+
+ pcie_int_enable(DISABLE, 0);
+ mdelay(5);
+
+ //PCI-E reset
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~(1<<26))));
+ mdelay(1);
+ regWrite32(0xbfb00834, (tmp | (1<<26)));
+#ifdef MT7592
+ mdelay(100);
+#else
+ mdelay(1);
+#endif
+ regWrite32(0xbfb00834, (tmp & (~(1<<26))));
+ mdelay(1);
+
+#if defined(TCSUPPORT_BONDING)
+ //printk("reset slave chip \n");
+ tmp = regRead32(0xbfbf0214);
+ regWrite32(0xbfbf0214, (tmp | (1<<6)));
+ tmp = regRead32(0xbfbf0234);
+ regWrite32(0xbfbf0234, (tmp & ~(1<<6)));
+ tmp = regRead32(0xbfbf021c);
+ regWrite32(0xbfbf021c, (tmp & (~((1<<12) | (1<<13)))));
+ tmp = regRead32(0xbfbf0200);
+ regWrite32(0xbfbf0200, (tmp & ~(1<<13)));
+ tmp = regRead32(0xbfbf0200);
+ regWrite32(0xbfbf0200, (tmp | (1<<12)));
+ if (isFPGA) {
+ //printk("isFPGA \n");
+ tmp = regRead32(0xbfbf0204);
+ regWrite32(0xbfbf0204, (tmp | (1<<6)));
+ mdelay(1);
+ tmp = regRead32(0xbfbf0204);
+ regWrite32(0xbfbf0204, (tmp & ~(1<<6)));
+ }else{
+ tmp = regRead32(0xbfbf0204);
+ regWrite32(0xbfbf0204, (tmp & ~(1<<6)));
+ mdelay(1);
+ tmp = regRead32(0xbfbf0204);
+ regWrite32(0xbfbf0204, (tmp | (1<<6)));
+ }
+ tmp = regRead32(0xbfbf0214);
+ regWrite32(0xbfbf0214, (tmp & ~(1<<6)));
+
+ //disable VC1
+ regWrite32(0xbfb80020, 0x160);
+ regWrite32(0xbfb80024, 0x0);
+#endif
+ if(isMT7505 || isEN751221 || isEN751627)
+ {
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | ((1<<29) | (1<<26))));
+ }
+ else
+ {
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp & (~((1<<1) | (1<<2)))));
+ }
+ //wait device link up
+ mdelay(250);
+ //printk("#isRC0_LINKUP=%x \n", isRC0_LINKUP);
+#if defined(TCSUPPORT_BONDING)
+ //printk("isSLAVERC_LINKUP=%x \n", isSLAVERC_LINKUP);
+#endif
+
+ if(isRC0_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ pcie_phy_force_mode_en(ENABLE, 0);
+ }else{
+ //disable port 0
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<23)));
+ mdelay(1);
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ if(isSLAVERC_LINKUP){
+ pcie_phy_force_mode_en(ENABLE, 2);
+ }
+ else{
+ //disable slave RC
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<24)));
+ mdelay(1);
+ }
+#endif
+
+
+ return;
+}
+EXPORT_SYMBOL(pcieResetRC0);
+
+void pcieResetRC1(void){
+ unsigned long tmp;
+
+ if(!isMT751020){
+ return;
+ }
+
+ pcie_int_enable(DISABLE, 1);
+ mdelay(5);
+
+ //PCI-E reset
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & ~(1<<27)));
+ mdelay(1);
+ regWrite32(0xbfb00834, (tmp | (1<<27)));
+#ifdef MT7592
+ mdelay(100);
+#else
+ mdelay(1);
+#endif
+ regWrite32(0xbfb00834, (tmp & ~(1<<27)));
+ mdelay(1);
+
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp & (~((1<<1) | (1<<2)))));
+ //wait device link up
+ mdelay(250);
+ //printk("isRC1_LINKUP=%d \n", isRC1_LINKUP);;
+ if(isRC1_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb83010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb83034, 0x06040001);
+
+ //Enable RC1 ECRC count . //bus0, dev1
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x80118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ pcie_phy_force_mode_en(ENABLE, 1);
+ }else{
+ //disable port 1
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<22)));
+ mdelay(1);
+ }
+
+ return;
+}
+
+
+extern spinlock_t pcie_lock;
+
+int mt7512_pcie_is_slave(void)
+{
+ if (isFPGA)
+ return (regRead32(0xbfb0008c) & (1 << 30)) == (1 << 30)?1:0;
+
+ return (regRead32(0xbfb0008c) & (1 << 5)) == 0?1:0;
+}
+
+static int get_rc_port(unsigned char bus,unsigned char dev)
+{
+ int rc = 2;
+
+ if (mt7512_pcie_is_slave())
+ {
+ if ((bus == 0) && (dev < 2))
+ rc = dev ;
+ else if ((bus == 1) && (dev == 0) && isRC1_LINKUP)
+ rc = 1 ;
+ }
+ else
+ {
+ if ((bus == 0) && (dev < 2))
+ {
+ rc = dev;
+ }
+ else if ((bus == 1) && (dev == 0) && isRC0_LINKUP)
+ {
+ rc = 0;
+ }
+ else if ((bus == 2) && (dev == 0) && isRC1_LINKUP)
+ {
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
+int pcie_write_config_word_extend(unsigned char bus, unsigned char dev,unsigned char func, unsigned int reg, unsigned long int value)
+{
+ unsigned int val,rc,offset = 0;
+ unsigned long flags;
+
+ rc = get_rc_port(bus,dev);
+
+ if (rc == 0){
+ offset = 0x1000;
+ }else if(rc == 1){
+ offset = 0x3000;
+ }else{
+ printk("pcie_write err: rc = %d",rc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&pcie_lock, flags);
+
+ /*mt=2|type=4|length=1 */
+ val = (2 << 29) | (4 << 24) | 1;
+
+ /* write TLP Header offset 0-3 */
+ regWrite32(0xbfb80460+offset, val);
+
+ /*write requester ID */
+ val = (rc<<19) | 0x070f;
+
+ /*write TLP Header offset 4-7 */
+ regWrite32(0xbfb80464+offset, val);
+
+ val = (bus << 24) | (dev << 19) |(func << 16) | reg;
+
+ /*write TLP Header offset 8-11 */
+ regWrite32(0xbfb80468+offset, val);
+
+ /*write TLP data */
+ regWrite32(0xbfb80470+offset, value);
+
+ /*start TLP Requuest */
+ regWrite32(0xbfb80488+offset, 1);
+
+ mdelay(1);
+
+ val = 0;
+
+ /*polling TLP Request status */
+ while((val++)<10)
+ {
+ /*TLP Request finished or timeout */
+ if ((regRead32(0xbfb80488+offset)&0x1)==0)
+ break;
+ mdelay(1);
+ }
+
+ spin_unlock_irqrestore(&pcie_lock, flags);
+
+ if (val==10)
+ printk("\nPCIE Write Err: bus = %d, dev = %d Reg = %d",bus,dev,reg);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(pcie_write_config_word_extend);
+
+
+unsigned int pcie_read_config_word_extend(unsigned char bus,unsigned char dev,unsigned char func ,unsigned int reg)
+{
+ unsigned long flags;
+ unsigned int val,rc, offset = 0;
+ if (mt7512_pcie_is_slave()){
+ if ((bus == 0) && (dev == 0) && (reg == 0))
+ return 0xffffffff;
+ }
+
+ rc = get_rc_port(bus,dev);
+
+
+ if (rc == 0){
+ offset = 0x1000;
+ }else if(rc == 1){
+ offset = 0x3000;
+ }else{
+ return 0xffffffff;
+ }
+ spin_lock_irqsave(&pcie_lock, flags);
+
+ /* iniitialize the data reg */
+ regWrite32(0xbfb8048c+offset, 0xffffffff);
+
+ /*fmt=2|type=4|length=1 */
+ val = (4 << 24) | 1;
+
+ /*write TLP Header offset 0-3 */
+ regWrite32(0xbfb80460+offset, val);
+
+ /*write requester ID */
+ val = (rc << 19) | 0x070f;
+
+ /*write TLP Header offset 4-7*/
+ regWrite32(0xbfb80464+offset, val);
+
+ val = (bus << 24) | (dev << 19) | (func << 16) | reg;
+
+ /*write TLP Header offset 8-11*/
+ regWrite32(0xbfb80468+offset, val);
+
+ /*start TLP Requuest*/
+ regWrite32(0xbfb80488+offset, 1);
+
+ mdelay(1);
+
+ val = 0;
+
+ /*polling TLP Request status */
+ while((val++)<10)
+ {
+ /*TLP Request finished or timeout*/
+ if ((regRead32(0xbfb80488+offset)&0x1)==0)
+ break;
+ mdelay(1);
+ }
+
+ if (val==10)
+ {
+ printk("\n pcie_read_timeout: bus = %d, dev = %d, func = %d,reg = %d val = %x",bus,dev,func,reg,0xffffffff);
+ return 0xffffffff;
+ }
+
+ /*return the data from data reg*/
+ val = regRead32(0xbfb8048c+offset);
+
+ spin_unlock_irqrestore(&pcie_lock, flags);
+
+ return val;
+}
+
+EXPORT_SYMBOL(pcie_read_config_word_extend);
+
+
+void mt7512_pcie_reset(void)
+{
+ unsigned int tmp;
+
+ /* enabled PCIe port 1 */
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | (1<<22)));
+ mdelay(1);
+
+ /*first reset to default*/
+ if(mt7512_pcie_is_slave() == 0)
+ {
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<27) | (1<<29))));
+ mdelay(100);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(5);
+ }
+ else
+ {
+
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<27) ))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<27) )));
+ mdelay(100);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<27) ))));
+ mdelay(5);
+
+ }
+ /*release device*/
+ if(mt7512_pcie_is_slave() == 0)
+ {
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & (~((1<<29) | (1<<26)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | ((1<<29) | (1<<26))));
+ }
+ else
+ {
+
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & (~((1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | ((1<<29))));
+
+ }
+
+ /*wait link up*/
+ mdelay(250);
+
+
+ if (mt7512_pcie_is_slave() == 0)
+ {
+ /*change RC0 class to pci-pci class*/
+ regWrite32(0xbfb81104, 0x06040001);
+ mdelay(1);
+
+ /*set pcie host mode*/
+ regWrite32(0xbfb81000, 0x804201);
+ mdelay(1);
+ }
+ /*change RC1 class to pci-pci class */
+ regWrite32(0xbfb83104, 0x06040001);
+ mdelay(1);
+
+ /*set pcie host mode*/
+ regWrite32(0xbfb83000, 0x804201);
+ mdelay(1);
+
+ return ;
+}
+
+EXPORT_SYMBOL(mt7512_pcie_reset);
+
+
+void mt7512_pcie_set_mac(void)
+{
+ unsigned int tmp;
+
+ if(isRC0_LINKUP && (mt7512_pcie_is_slave() == 0) ){
+
+ /*disable MSI interrupt*/
+ tmp = regRead32(0xbfb8111c);
+ regWrite32(0xbfb8111c, tmp & (~(1<<5)));
+ mdelay(1);
+
+ /*Enable Interrupt*/
+ tmp = regRead32(0xbfb81420);
+ regWrite32(0xbfb81420, tmp & (~(1<<16)));
+ mdelay(1);
+
+ /* Enable PCIE Error Interrupt */
+ tmp = regRead32(0xbfb80040);
+ regWrite32(0xbfb80040, tmp | 0x03);
+ mdelay(1);
+
+ }
+
+ if(isRC1_LINKUP){
+ /*disable MSI interrupt*/
+ tmp = regRead32(0xbfb8311c);
+ regWrite32(0xbfb8311c, tmp & (~(1<<5)));
+ mdelay(1);
+
+ /*Enable Interrupt*/
+ tmp = regRead32(0xbfb83420);
+ regWrite32(0xbfb83420, tmp & (~(1<<16)));
+ mdelay(1);
+
+ /*Enable PCIE Error Interrupt */
+ tmp = regRead32(0xbfb80040);
+ regWrite32(0xbfb80040, tmp | 0x0c);
+ mdelay(1);
+
+ }
+
+ return;
+}
+EXPORT_SYMBOL(mt7512_pcie_set_mac);
+
+int pcie_write_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned long int value)
+{
+ if (isEN751221 || isEN751627)
+ return pcie_write_config_word_extend(bus,devnum,0,regnum,value);
+
+ if(isRT63165 || isRT63365 || isMT751020 || isMT7505){
+ regWrite32(KSEG1ADDR(pcie_config_addr), (bus<<24 |devnum<<19|regnum));
+ }else{
+ regWrite32(KSEG1ADDR(pcie_config_addr), (type<<31|bus<<20 |devnum<<15|regnum));
+ }
+ regWrite32(KSEG1ADDR(pcie_config_data), value);
+
+ return 0;
+}
+int pcie_write_config_byte(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum, unsigned char value)
+{
+ if(isRT63165 || isRT63365 || isMT751020 || isMT7505){
+ regWrite32(KSEG1ADDR(pcie_config_addr), (bus<<24 |devnum<<19|regnum));
+ }else{
+ regWrite32(KSEG1ADDR(pcie_config_addr), (type<<31|bus<<20 |devnum<<15|regnum));
+ }
+ regWrite32(KSEG1ADDR(pcie_config_data), value);
+
+ return 0;
+}
+unsigned long int pcie_read_config_word(unsigned char type, unsigned char bus, unsigned char devnum, unsigned int regnum)
+{
+
+ if (isEN751221 || isEN751627)
+ return pcie_read_config_word_extend(bus,devnum,0,regnum);
+
+ if(isRT63165 || isRT63365 || isMT751020 || isMT7505){
+ regWrite32(KSEG1ADDR(pcie_config_addr), (bus<<24 |devnum<<19|regnum));
+ }else{
+ regWrite32(KSEG1ADDR(pcie_config_addr), (type<<31|bus<<20|devnum<<15|regnum));
+ }
+ return regRead32(KSEG1ADDR(pcie_config_data));
+}
+#if (defined(TCSUPPORT_POWERSAVE_ENABLE) && (defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7505)))
+EXPORT_SYMBOL(pcie_write_config_word);
+EXPORT_SYMBOL(pcie_read_config_word);
+#endif
+int pcieRegInitConfig(void)
+{
+ unsigned int reg1_val, reg2_val;
+ unsigned int reg_val = 0;
+ int i = 0;
+ int slot;
+ int pci_device_exist = 0;
+ unsigned long tmp;
+
+ if(isRT63165){
+ return 0;
+ }
+
+ /* PCIe init module */
+ /* reset PCIe module */
+ /*
+ * From: TC/Li Fengbo
+ * To: 'krammer' ; 'Marshall Yen \
+ * Cc: 'Liu, Shuenn-Ren' ; 'Graham Fan\
+ * Sent: Friday, May 22, 2009 2:49 PM
+ * Subject: new pof for software reboot
+ *
+ * Dear both,
+ * I have generated a new pof for software reboot, the pof file name is
+ * software_reboot_20090522.pof
+ * It has been transported to Hsingchu, please help to check it
+ * Software Reset User Guide:
+ * After power on, there are two steps to active PCIe System
+ * 1 Wait for minimum 50us, Write ¡§1¡¨ to bit 29 of Register bfb0_0088, then
+ * 2 Wait for minimum 3.5us, write ¡§1¡¨ to bit 30 of Register bfb0_0088
+ *
+ * Before do software reboot,
+ * 1 Write ¡§0¡¨ to bit 29 and bit 30 of Register bfb0_0088
+ * Then reset for PCIE system is completed, you can reboot system
+ * Don¡¦t forget to release PCIe reset
+ * 2 Wait for minimum 50us , Write ¡§1¡¨ to bit 29 of bfb0_0088, then
+ * 3 Wait for minimum 3.5us, write ¡§1¡¨ to bit 30 of bfb0_0088
+ *
+ * Best regards
+ * Fengbo Li
+ *
+ */
+ /* pcie fixup start */
+ /* setup COMMAND register */
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x04, 0x00100007);
+
+ /* setup CACHE_LINE_SIZE register */
+ if(isRT63365){
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x0c, 0x00010000);
+ }else{
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x0c/*PCI_CACHE_LINE_SIZE*/, 0x00000008);//duoduo_20090701
+ }
+ /* setup LATENCY_TIMER register */
+ /* pcie fixup end */
+ /*setup secondary bus number*/
+ /*setup subordinate bus number*/
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x18, 0x00010100);
+ /*setup I/O Base register*/
+ if(isRT63365){
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x30, 0x0);
+ }else{
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x30, 0x0000FFFF);
+ }
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x1C, 0x000000F0);
+ /*setup memory base register*/
+ if(isRT63365){
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x20, 0x20002000);
+ }else{
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x20, 0x1F701F70);
+ }
+ /*setup prefetchable memory base register */
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x24, 0x0000FFF0);
+ /*setup I/O Base upper 16 bits register*/
+ /*setup interrupt line register*/
+ /*setup bridge control*/
+ if(isRT63365){
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x3C, 0x00040119);
+ }else{
+ pcie_write_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x3C, 0x0004010B);
+ }
+
+ /* pci register 0x10 config needed or not? Linos for L2H will configure it */
+ if(isRT63365){
+ for(i = 0; i < 10; i++){
+ reg1_val = pcie_read_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x0);
+ mdelay(1);
+ //reg2_val = pcie_read_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_1, 0x0);
+ //mdelay(1);
+ }
+ reg2_val = 0xffffffff;
+ //Enable Interrupt
+ if(isRC0_LINKUP){
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<20)));
+ }
+ //second band
+ if(dual_band_support){
+ if(isRC1_LINKUP){
+ tmp = regRead32(0xbfb8000c);
+ regWrite32(0xbfb8000c, (tmp | (1<<21)));
+ }
+ }
+ }else{
+ do
+ {
+ mdelay(30);
+ reg_val = pcie_read_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0xe0);
+ i++;
+ }
+ while((reg_val & 0x03f00000) != 0x00100000 && i <= 10);//check the if the dev has been link up
+ for(i = 0; i < 10; i++){
+ reg1_val = pcie_read_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x0);
+ mdelay(1);
+ reg2_val = pcie_read_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_1, 0x0);
+ mdelay(1);
+ }
+ }
+ if( (reg1_val != 0xffffffff) &&
+ ( (reg1_val == ((NIC3090_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID)) //duoduo_20090702
+ || (reg1_val == ((NIC3091_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC3092_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC3390_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC5390_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))//xyyou_20101111
+ || (reg1_val == ((NIC539F_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC7603_PCIe_DEVICE_ID <<16) |MTK_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC5392_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))) ){//xyyou wait to do
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x04, 0x00100006);
+ if(isRT63365)
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x10, 0x20000000);
+ else
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x10, PHYSADDR(PCI_DEVICE_MEM1));
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x14, 0);
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x18, 0);
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x1C, 0);
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x20, 0);
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x24, 0);
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x30, 0);
+ if(isRT63365)
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x3C, 0x00000119);
+ else
+ pcie_write_config_word(PCIE_TYPE_DEV, PCIE_BUS_DEV, PCIE_DEVNUM_0, 0x3C, 0x0000010B);
+
+ slot = PCIE_DEVNUM_0;
+ pci_device_exist++;
+ }
+ else if( (reg2_val != 0xffffffff) &&
+ ( (reg2_val == ((NIC3090_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg2_val == ((NIC3091_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg2_val == ((NIC3092_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC3390_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC5390_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC539F_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC7603_PCIe_DEVICE_ID <<16) |MTK_PCI_VENDOR_ID))
+ || (reg1_val == ((NIC5392_PCIe_DEVICE_ID <<16) |NIC_PCI_VENDOR_ID))) ){
+ slot=PCIE_DEVNUM_1;
+ pci_device_exist++;
+ }
+ else{
+ printk("no_pci_found error case\n");
+ return -1;
+ }
+ return slot;
+}
+EXPORT_SYMBOL(pcieRegInitConfig);
+
+
+
+void pcie_init(void)
+{
+ int i, j;
+#if defined(TCSUPPORT_BONDING)
+ unsigned int regs[] = {0xbfb8200c, 0xbfb82090, 0xbfb82094};
+#endif
+
+ /*63368 use new recover mechanism ,so need to save pcie info*/
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ if(isRT63365 && !isRT63368){
+#else
+ if(isRT63365){
+#endif
+ return;
+ }
+
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ if(isMT751020 || isMT7505 || isRT63368){
+#else
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627){
+#endif
+ //Save all configuration
+ for(i=0; i<PCIE_SAVE_BUS_NUM;i++){
+ for(j=0;j<PCIE_SAVE_DEVICE_NUM;j++){
+ if(pcie_config_temp[i][j] == NULL){
+ pcie_config_temp[i][j] = save_pcie_config(i, j);
+ }
+ }
+ }
+ #if defined(TCSUPPORT_BONDING)
+ for (i = 0; i < MAX_REG_CONFIG; i++)
+ {
+ reg_config_temp[i].reg = regs[i];
+ reg_config_temp[i].value = regRead32(regs[i]);
+ }
+ #endif
+ pcie_soft_patch = ENABLE;
+ }else{
+ pcieReset();
+ pcieRegInitConfig();
+ }
+}
+EXPORT_SYMBOL(pcie_init);
+
+int pcie_timeout_disable(void)
+{
+ unsigned int val = 0x10;
+ unsigned int reg = 0x98;
+ unsigned int dev = 0x0;
+ unsigned int bus;
+
+ bus = 0x1;
+ if(isRC0_LINKUP){
+ //disable RC0's device timeout
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, reg, val);
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ if(isSLAVERC_LINKUP){
+ bus += 1;
+
+ //disable slave RC's device timeout
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, reg, val);
+ }
+#endif
+
+ if(isRC1_LINKUP){
+ bus += 1;
+
+ //disable RC1's device timeout
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, reg, val);
+ }
+
+ return 0;
+}
+
+#if defined(TCSUPPORT_BONDING)
+void pcie_virBaseAddr_set(unsigned long addr)
+{
+ slaveVirBaseAddr = addr;
+}
+EXPORT_SYMBOL(pcie_virBaseAddr_set);
+#endif
+
+
+void aer_config(int aerEnable)
+{
+ unsigned int advRcErrReg = 0x12c, advRcErrVal = 0x7, disAdvRcErr = 0x0;
+ unsigned int briCtrlReg = 0x3c, briCtrlVal = 0x60119, disBriCtrl = 0x40119;
+ unsigned int devCtrlReg = 0x78, devCtrlVal = 0x201f, disDevCtrl = 0x2010;
+ unsigned int dev = 0x0;
+ unsigned int bus = 0x0;
+
+
+ if(aerEnable){ // Enable AER
+ // configure RC
+ bus = 0x0;
+ dev = 0x0;
+ if(isRC0_LINKUP){
+ //Enable RC0 interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, advRcErrVal);
+ regWrite32(KSEG1ADDR(pcie_config_data), advRcErrVal);
+ //Enable RC0 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, briCtrlVal);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+ }
+ if(isRC1_LINKUP){
+ dev += 1;
+
+ //Enable RC1 interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, advRcErrVal);
+
+ //Enable RC1 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, briCtrlVal);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+ }
+
+ // configure device
+ dev = 0x0;
+ if(isRC0_LINKUP){
+ bus += 1;
+
+ //Enable EP0 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ // configure slave RC & device
+ if(isSLAVERC_LINKUP){
+ dev += 1;
+
+ //Enable slave RC interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, advRcErrVal);
+
+ //Enable slave RC error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, briCtrlVal);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+
+ dev = 0x0;
+ bus += 1;
+ //Enable slave wifi error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+ }
+#endif
+
+ // configure device
+ dev = 0x0;
+ if(isRC1_LINKUP){
+ bus += 1;
+
+ //Enable EP1 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, devCtrlVal);
+ }
+
+ }else{ // Disable AER
+ // configure RC
+ bus = 0x0;
+ dev = 0x0;
+ if(isRC0_LINKUP){
+ //Disable RC0 interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, disAdvRcErr);
+
+ //Disable RC0 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, disBriCtrl);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, disDevCtrl);
+
+ }
+ if(isRC1_LINKUP){
+ dev = 0x1;
+
+ //Disable RC1 interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, disAdvRcErr);
+
+ //Disable RC1 error report
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, disBriCtrl);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, disDevCtrl);
+
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ if(isSLAVERC_LINKUP){
+ bus += 1;
+ dev = 0x1;
+
+ //Disable slave RC interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, advRcErrReg, disAdvRcErr);
+
+ //Disable slave RC interrupt
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, briCtrlReg, disBriCtrl);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, devCtrlReg, disDevCtrl);
+
+ }
+#endif
+ }
+}
+
+
+static void err_status_dump(void)
+{
+ unsigned int DevStatusReg = 0x88;
+ unsigned int dev = 0x0;
+ unsigned int bus = 0x0;
+ unsigned long int errStatus;
+
+
+ if(isRC0_LINKUP){
+ printk("### RC0 ###\n");
+ //check pcie capability structure-device status register
+ errStatus = pcie_read_config_word_extend(bus,dev,0,DevStatusReg);
+ printk("device status register:0x%lx\n",errStatus);
+ if(0x10000==errStatus&0x10000)
+ printk("correctable error detected ! \n");
+ if(0x20000==errStatus&0x20000)
+ printk("non-fatal error detected ! \n");
+ if(0x40000==errStatus&0x40000)
+ printk("fatal error detected ! \n");
+ if(0x80000==errStatus&0x80000)
+ printk("unsupported request detected ! \n");
+
+ //check error counter
+ printk("ECRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb80054));
+ printk("TLP CRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb801d8));
+ printk("DLLP CRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb801dc));
+ printk("Replay Timeout: 0x%x \n",(unsigned int)regRead32(0xbfb801e0));
+ printk("Replay Rollover: 0x%x \n",(unsigned int)regRead32(0xbfb801e4));
+ printk("CPL Unexpected: 0x%x \n",(unsigned int)regRead32(0xbfb801e8));
+
+ }
+
+ if(isRC1_LINKUP){
+ bus = 0x0;
+ dev = 0x1;
+
+ printk("### RC1 ###\n");
+ //check pcie capability structure-device status register
+ errStatus = pcie_read_config_word_extend(bus,dev,0,DevStatusReg);
+ printk("device status register:0x%lx\n",errStatus);
+ if(0x10000==errStatus&0x10000)
+ printk("correctable error detected ! \n");
+ if(0x20000==errStatus&0x20000)
+ printk("non-fatal error detected ! \n");
+ if(0x40000==errStatus&0x40000)
+ printk("fatal error detected ! \n");
+ if(0x80000==errStatus&0x80000)
+ printk("unsupported request detected ! \n");
+
+ //check error counter
+ printk("ECRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb80058));
+ printk("TLP CRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb801ec));
+ printk("DLLP CRC Error: 0x%x \n",(unsigned int)regRead32(0xbfb801f0));
+ printk("Replay Timeout: 0x%x \n",(unsigned int)regRead32(0xbfb801f4));
+ printk("Replay Rollover: 0x%x \n",(unsigned int)regRead32(0xbfb801f8));
+ printk("CPL Unexpected: 0x%x \n",(unsigned int)regRead32(0xbfb801fc));
+
+ }
+}
+
+static void err_status_clear(void)
+{
+ unsigned int DevStatusReg = 0x88;
+
+ unsigned int dev = 0x0;
+ unsigned int bus = 0x0;
+ unsigned long int errStatus;
+
+ if(isRC0_LINKUP){
+ //printk("RC0\n");
+ //clear error status
+ errStatus = pcie_read_config_word_extend(bus,dev,0,DevStatusReg);
+ pcie_write_config_word_extend(bus,dev,0,DevStatusReg,errStatus&0x0f);
+ }
+
+ if(isRC1_LINKUP){
+ bus = 0x0;
+ dev = 0x1;
+
+ //printk("RC1\n");
+ //clear error status
+ errStatus = pcie_read_config_word_extend(bus,dev,0,DevStatusReg);
+ pcie_write_config_word_extend(bus,dev,0,DevStatusReg,errStatus&0x0f);
+
+ }
+
+}
+
+static void aer_status_dump(void)
+{
+ unsigned int rootErrReg = 0x130;
+ unsigned int errSrcIdReg = 0x134;
+ unsigned int unCorErrReg = 0x104;
+ unsigned int corErrReg = 0x110;
+ unsigned int dev = 0x0;
+ unsigned int bus = 0x0;
+
+
+ if(isRC0_LINKUP){
+ printk("### RC0 ###\n");
+ //check root error status
+ printk("root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ //check error source ID
+ printk("error source ID=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, errSrcIdReg));
+ //check uncorrectable error status
+ printk("UN-correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //check correctable error status
+ printk("Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //check error counter
+ printk("RC0 LCRC counter: 0x%x RC0 ECRC counter: 0x%x\n",(unsigned int)regRead32(0xbfb82060),(unsigned int)regRead32(0xbfb82064));
+ printk("EP correctable counter: 0x%x EP non-fatal counter: 0x%x \n\n",(unsigned int)regRead32(0xbfb82068),(unsigned int)regRead32(0xbfb8206c));
+ }
+
+ if(isRC1_LINKUP){
+ bus = 0x0;
+ dev = 0x1;
+
+ printk("### RC1 ###\n");
+ //check root error status
+ printk("root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ //check error source ID
+ printk("error source ID=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, errSrcIdReg));
+ //check uncorrectable error status
+ printk("UN-correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //check correctable error status
+ printk("Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //check error counter
+ printk("RC1 LCRC counter: 0x%x RC1 ECRC counter: 0x%x\n",(unsigned int)regRead32(0xbfb83060),(unsigned int)regRead32(0xbfb83064));
+ printk("EP correctable counter: 0x%x EP non-fatal counter: 0x%x \n\n",(unsigned int)regRead32(0xbfb83068),(unsigned int)regRead32(0xbfb8306c));
+ }
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+
+ if(isSLAVERC_LINKUP){
+ bus = 0x0;
+ if(isRC0_LINKUP){
+ bus += 1;
+ }
+ dev = 0x1;
+
+ printk("### slave RC ###\n");
+ //check root error status
+ printk("root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ //check error source ID
+ printk("error source ID=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, errSrcIdReg));
+ //check uncorrectable error status
+ printk("UN-correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //check correctable error status
+ printk("Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //check error counter
+ if(slaveVirBaseAddr != 0){
+ printk("slaveRC LCRC counter: 0x%x slaveRC ECRC counter: 0x%x\n",(unsigned int)regRead32(slaveVirBaseAddr + 0xb83060),(unsigned int)regRead32(slaveVirBaseAddr + 0xb83064));
+ printk("EP correctable counter: 0x%x EP non-fatal counter: 0x%x \n\n",(unsigned int)regRead32(slaveVirBaseAddr + 0xb83068),(unsigned int)regRead32(slaveVirBaseAddr + 0xb8306c));
+ }
+ }
+#endif
+}
+
+static void aer_status_clear(void)
+{
+ unsigned int unCorErrReg = 0x104;
+ unsigned int corErrReg = 0x110;
+ unsigned int rootErrReg = 0x130;
+ unsigned int dev = 0x0;
+ unsigned int bus = 0x0;
+ unsigned long int errStatus;
+
+ if(isRC0_LINKUP){
+ //printk("RC0\n");
+ //clear UN-correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg, errStatus);
+ //printk("Clear UN-orrectable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //clear correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg, errStatus);
+ //printk("Clear Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //clear root error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg, errStatus);
+ //printk("Clear root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ }
+
+ if(isRC1_LINKUP){
+ bus = 0x0;
+ dev = 0x1;
+
+ //printk("RC1\n");
+ //clear UN-correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg, errStatus);
+ //printk("Clear UN-orrectable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //clear correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg, errStatus);
+ //printk("Clear Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //clear root error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg, errStatus);
+ //printk("Clear root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ if(isSLAVERC_LINKUP){
+ bus = 0x0;
+ if(isRC0_LINKUP){
+ bus += 1;
+ }
+ dev = 0x1;
+
+ //printk("slave RC\n");
+ //clear UN-correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg, errStatus);
+ //printk("Clear UN-orrectable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, unCorErrReg));
+ //clear correctable error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg, errStatus);
+ //printk("Clear Correctable status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, corErrReg));
+ //clear root error status
+ errStatus = pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg);
+ pcie_write_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg, errStatus);
+ //printk("Clear root error status=0x%lx\n", pcie_read_config_word(PCIE_TYPE_DEV, bus, dev, rootErrReg));
+ }
+#endif
+}
+
+
+void
+ahbErrChk(void){
+ register uint32 status=0;
+ unsigned long flags;
+
+ if(pcie_soft_patch == 0){
+ return;
+ }
+
+ if(isRT63365 || isMT7505 || isEN751221 || isEN751627)
+ return;
+
+
+ if(isMT751020 || isMT7505){
+ pcie_check(0);
+ }else
+ {
+ status=regRead32(CR_AHB_AACS);
+ if((status & AHB_BUS_TIMEOUT_ERR)||(status & AHB_BUS_ADDR_ERR)){
+ printk("CR_AHB_AACS:0x%08lx\n", status);
+ printk("CR_AHB_ABEM:0x%08lx\n", regRead32(CR_AHB_ABEM));
+ printk("CR_AHB_ABEA:0x%08lx\n", regRead32(CR_AHB_ABEA));
+ local_irq_save(flags);
+ ahb_status=1;
+ pcieReset();
+ pcieRegInitConfig();
+ local_irq_restore(flags);
+ }
+ }
+
+
+}
+EXPORT_SYMBOL(ahbErrChk);
+
+#ifdef CONFIG_MIPS_TC3162U
+static irqreturn_t ahbErrIsr(int irq, void *dev){
+ ahbErrChk();
+ return IRQ_HANDLED;
+}
+#endif
+
+void chkAhbErr(int force){
+ uint32 val=0;
+ unsigned long flags;
+ uint32 lcrc, ecrc;
+
+ if(pcie_soft_patch == 0){
+ return;
+ }
+
+ if(isTC3162U){
+ local_irq_save(flags);
+ /*check the pcie bus crc error counter*/
+ val= pcie_read_config_word(PCIE_TYPE_RC, PCIE_BUS_RC, PCIE_DEVNUM_0, 0x54);
+ if((val!=0x0) || (force==0x1) ){
+ /*Reset pcie and refill pcie-registers*/
+ pcieReset();
+ pcieRegInitConfig();
+ ahb_status = 1;
+ }
+ local_irq_restore(flags);
+ }
+
+ else if(isRT63365){
+ local_irq_save(flags);
+ /*check the pcie bus crc error counter*/
+ lcrc = regRead32(0xbfb82060);
+ ecrc = regRead32(0xbfb82064);
+ if((lcrc != 0xdeadbeaf) || (ecrc != 0xdeadbeaf)){
+ if((lcrc != 0x0) || (ecrc != 0x0) || (force == 0x1)){
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ /*add a new mechanism*/
+ if((lcrc+ecrc) >= pcie_error_detect_count)
+ {
+ printk("PCI-E L-crc %lx E-crc %lx!!\n", lcrc, ecrc);
+ pcie_reset_condiction = PCIE_RESET_CON_PCIEERROR;
+ ahb_status = 1;
+ }
+ #else
+ printk("RC0 PCI-E L-crc %lx E-crc %lx!!\n", lcrc, ecrc);
+ /*Reset pcie and refill pcie-registers*/
+ pcieReset();
+ pcieRegInitConfig();
+ ahb_status = 1;
+ #endif
+
+ }
+ }
+ local_irq_restore(flags);
+ }
+
+
+}
+EXPORT_SYMBOL(chkAhbErr);
+
+static int ahb_status_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "%d %d %d", pcie_soft_patch, ahb_status, wifiDeviceId);
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ chkAhbErr(0);
+ return len;
+}
+
+static int ahb_status_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[8];
+ int val=0;
+ unsigned long flags;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ if(sscanf(val_string,"%d %d", &pcie_soft_patch, &val)!=2){
+ printk("usage: <onoff> <type>\n");
+ return count;
+ }
+ if(val==0x2){
+ /*Reset pcie and refill pcie-registers*/
+ local_irq_save(flags);
+ #ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+ /*63368 use new mechanism*/
+ if(isMT751020 || isMT7505 || isRT63368){
+ #else
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627){
+ #endif
+ pcie_check(0);
+ }else{
+ pcieReset();
+ pcieRegInitConfig();
+ }
+ local_irq_restore(flags);
+ }
+ if (val == 0) /*Disable wifi interface down to up*/{
+ ahb_status = 0;
+ wifiDeviceId = 0;
+ }else{
+ ahb_status = 1;
+ }
+ return count;
+}
+
+
+
+static int pcie_rcnum_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+
+
+ return 0;
+}
+
+static int pcie_rcnum_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[8];
+ int reset_rc = -1;
+ unsigned long flags;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ if(sscanf(val_string,"%d", &reset_rc)!=1){
+ printk("usage: <RC reset Number> \n");
+ return count;
+ }
+
+ if(reset_rc != -1){
+ printk("reset and recover start RC %d\n",reset_rc);
+ spin_lock_irqsave(&pcie_esd_lock, flags);
+ pcie_reset_handler(reset_rc);
+ spin_unlock_irqrestore(&pcie_esd_lock, flags);
+
+ ahb_status = 1;
+ }
+
+ return count;
+}
+static int pcie_reset_con_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "currect pcie_reset_con %d", pcie_reset_condiction);
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+static int pcie_reset_con_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[8];
+ int value;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ if(sscanf(val_string,"%d", &value)!=1){
+ printk("usage: <pcie_reset_condiction 0:bustime out 1:pcie error> \n");
+ return count;
+ }
+ pcie_reset_condiction = (char)value;
+ return count;
+}
+static int pcie_reset_count_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "pcie_reset_count %d\n", pcie_reset_count);
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+
+static int err_status_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ err_status_dump();
+
+ return 0;
+}
+
+static int err_status_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[5];
+ int clear=0;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ sscanf(val_string,"%d", &clear);
+
+ if(clear == 1){
+ err_status_dump();
+ err_status_clear();
+ }
+
+ return count;
+}
+
+
+
+static int aer_status_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ aer_status_dump();
+
+ return 0;
+}
+
+static int aer_status_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[5];
+ int clear=0;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ sscanf(val_string,"%d", &clear);
+
+ if(clear == 1){
+ aer_status_dump();
+ aer_status_clear();
+ }
+
+ return count;
+}
+
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+/*______________________________________________________________________________
+**function name:pcie_error_detect_count_read_proc
+**
+**description:
+* get error detect count
+**parameters:
+* char *page
+* char **start
+* off_t off
+* int count
+* int *eof
+* void *data
+**global:
+* pcie_error_detect_count
+**return:
+* len
+**call:
+*
+**revision:
+* Brian.shi
+**____________________________________________________________________________*/
+
+static int pcie_error_detect_count_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "currect pcie_error_detect_count %d", pcie_error_detect_count);
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+/*______________________________________________________________________________
+**function name:pcie_error_detect_count_write_proc
+**
+**description:
+* set error detect count
+**parameters:
+* char *page
+* char **start
+* off_t off
+* int count
+* int *eof
+* void *data
+**global:
+* pcie_error_detect_count
+**return:
+* len
+**call:
+*
+**revision:
+* Brian.shi
+**____________________________________________________________________________*/
+
+static int pcie_error_detect_count_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[8];
+ int value;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ if(sscanf(val_string,"%d", &value)!=1){
+ printk("usage: <count:overflow will do pcie reset> \n");
+ return count;
+ }
+ pcie_error_detect_count = (char)value;
+ return count;
+}
+
+/*______________________________________________________________________________
+**function name:ahb_status_value_count_read_proc
+**
+**description:
+* get ahb status
+**parameters:
+* char *page
+* char **start
+* off_t off
+* int count
+* int *eof
+* void *data
+**global:
+* ahb_status
+**return:
+* len
+**call:
+*
+**revision:
+* Brian.shi
+**____________________________________________________________________________*/
+
+static int ahb_status_value_count_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ len = sprintf(page, "currect pcie_error_detect_count %d", ahb_status);
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+/*______________________________________________________________________________
+**function name:ahb_status_value_count_write_proc
+**
+**description:
+* set ahb status
+**parameters:
+* char *page
+* char **start
+* off_t off
+* int count
+* int *eof
+* void *data
+**global:
+* ahb_status
+**return:
+* len
+**call:
+*
+**revision:
+* Brian.shi
+**____________________________________________________________________________*/
+
+static int ahb_status_value_count_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char val_string[8];
+ int value;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ if(sscanf(val_string,"%d", &value)!=1){
+ printk("usage: <count:overflow will do pcie reset> \n");
+ return count;
+ }
+ ahb_status = (char)value;
+ return count;
+}
+#endif
+
+
+#if defined(TCSUPPORT_BONDING)
+static void reset_slave_chip(void){
+ uint32 tmp;
+
+ tmp = regRead32(CR_GPIO_CTRL);
+ tmp &= ~(0x3 << (6<<1));
+ tmp |= (0x1 << (6<<1));
+ regWrite32(CR_GPIO_CTRL, tmp);
+ tmp = regRead32(CR_GPIO_ODRAIN);
+ regWrite32(CR_GPIO_ODRAIN, (tmp | (1<<6)));
+ tmp = regRead32(CR_GPIO_DATA);
+ regWrite32(CR_GPIO_DATA, (tmp | (1<<6)));
+ udelay(1);
+ regWrite32(CR_GPIO_DATA, (tmp & (~(1<<6))));
+ udelay(1);
+ regWrite32(CR_GPIO_DATA, (tmp | (1<<6)));
+ udelay(1);
+
+}
+#endif
+
+int pcie_api_init(void);
+static __init int tc3162_pcie_init(void)
+{
+ struct proc_dir_entry *ahb_status_proc;
+ struct proc_dir_entry *aer_status_proc;
+ int i;
+ uint32 tmp;
+
+#ifdef TCSUPPORT_DMT_CO
+ return 0;
+#endif
+
+ if(isMT7505 || isEN751221 || isEN751627){
+ /*before reset host,need to pull device low*/
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & (~((1<<29) | (1<<26)))));
+ mdelay(1);
+ }
+#ifdef TCSUPPORT_MT7510_E1
+ if(isMT751020){
+ #ifndef TCSUPPORT_MT7520_PCIE
+ if (!isFPGA)
+ return -1;
+ #endif
+ /* enabled PCIe port 0,1 */
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | (0x1<<22)));
+ mdelay(1);
+ }
+#endif
+ if (!isFPGA){
+#if defined(TCSUPPORT_BONDING)
+ printk("slave chip reset\n");
+ reset_slave_chip();
+#endif
+ pciePhyInit();
+ }
+ if (isEN751221 || isEN751627){
+
+ if(isEN751221) {
+ printk("EN751221 ");
+ } else if(isEN751627) {
+ printk("EN751627 ");
+ }
+ printk("pcie_init\n");
+
+ tc3162_pcie_mem_resource.start = 0x20000000;
+ tc3162_pcie_mem_resource.end = 0x2FFFFFFF;
+
+ mt7512_pcie_reset();
+
+ printk("check pcie link up status: \n");
+ printk("isRC0_LINKUP=%d \n",isRC0_LINKUP);
+ printk("isRC1_LINKUP=%d \n",isRC1_LINKUP);
+ if((!isRC0_LINKUP) && (!isRC1_LINKUP)){
+ printk("PCI-E RC0 & RC1 can not link up\n");
+ return -1;
+ }
+ mt7512_pcie_set_mac();
+
+ }else if(isRT63365 || isMT751020 || isMT7505){
+ if(isMT751020){
+ dual_band_support = 1;
+ printk("MT7510_pcie_init\n");
+ }
+ else if(isRT63368){
+#if defined(TCSUPPORT_DUAL_WLAN)
+ //rt63368 enable pci-e port1 and port1 do not have power will cause hang. shnwind.
+ dual_band_support = 1;
+#else
+ dual_band_support = 0;
+#endif
+ printk("RT63368_pcie_init:%d\n",dual_band_support);
+ } else if(isMT7505){
+ printk("MT7505_pcie_init\n");
+ } else{
+ printk("RT63365_pcie_init\n");
+ }
+
+ //change memory mapping affress.
+ tc3162_pcie_mem_resource.start = 0x20000000;
+ tc3162_pcie_mem_resource.end = 0x2FFFFFFF;
+
+ //change pcie addr and data window.
+ pcie_config_addr = 0x1fb80020;
+ pcie_config_data = 0x1fb80024;
+
+ //PCI-E reset
+ if(isMT751020 || isMT7505){
+ /* enabled PCIe port 1 */
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | (1<<22)));
+ mdelay(1);
+#if !defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_MT7505)
+#if !defined(TCSUPPORT_XPON_HAL_API_EXT)
+ if (!isFPGA)
+#endif
+ {
+ //set GPIO share scheme reg for PERST output
+ tmp = regRead32(0xbfb00860);
+ regWrite32(0xbfb00860, (tmp | ((1<<19) | (1<<20))));
+ }
+#endif
+ /*first reset to default*/
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<27) | (1<<29))));
+ #ifdef MT7592
+ mdelay(100);
+ #else
+ mdelay(1);
+ #endif
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ }else{
+ if (isFPGA) {
+ //FPGA mode
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & ~(1<<26)));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | (1<<26)));
+ mdelay(1);
+ }else{
+ if(dual_band_support){
+ /* enabled PCIe port 1 */
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | (1<<22)));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<27) | (1<<29))));
+ #ifdef MT7592
+ mdelay(100);
+ #else
+ mdelay(1);
+ #endif
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<27) | (1<<29)))));
+ mdelay(1);
+ }else{
+ /* disabled PCIe port 1 */
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<22)));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<29)))));
+ mdelay(1);
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp | ((1<<26) | (1<<29))));
+ #ifdef MT7592
+ mdelay(100);
+ #else
+ mdelay(1);
+ #endif
+ tmp = regRead32(0xbfb00834);
+ regWrite32(0xbfb00834, (tmp & (~((1<<26) | (1<<29)))));
+ mdelay(1);
+ }
+ }
+ }
+ if(isMT751020 || isMT7505){
+ //relese pcie device reset
+#ifndef TCSUPPORT_CPU_MT7505
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp & (~((1<<1) | (1<<2)))));
+ mdelay(1);
+#else
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp | ((1<<29) | (1<<26))));
+ mdelay(1);
+#endif
+ }else{
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp | (1<<1)));
+ mdelay(1);
+ tmp = regRead32(0xbfb80000);
+ regWrite32(0xbfb80000, (tmp & ~(1<<1)));
+ mdelay(1);
+ }
+
+ if (isRT63365 && (isFPGA)) {
+ //FPGA mode
+ tmp = regRead32(0xbfbc0028);
+ regWrite32(0xbfbc0028, 0x60068880);
+ regWrite32(0xbfbc0004, 0x08000002);
+ regWrite32(0xbfbc0008, 0x00000700);
+ regWrite32(0xbfbc0000, 0x00160106);
+ regWrite32(0xbfbc0028, tmp);
+ }
+
+ //wait device link up
+ mdelay(250);
+ printk("check pcie link up status: \n");
+ printk("isRC0_LINKUP=%d \n",isRC0_LINKUP);
+ if(isMT751020){
+ printk("isRC1_LINKUP=%d \n",isRC1_LINKUP);
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ printk("isSLAVERC_LINKUP=%d \n",isSLAVERC_LINKUP);
+#endif
+ if((!isRC0_LINKUP) && (!isRC1_LINKUP)){
+ printk("PCI-E RC0 & RC1 can not link up\n");
+ return -1;
+ }
+ }else{
+ if(dual_band_support){
+ printk("isRC1_LINKUP=%d \n",isRC1_LINKUP);
+ if((!isRC0_LINKUP) && (!isRC1_LINKUP)){
+ printk("PCI-E RC can not link up\n");
+ return -1;
+ }
+ }else{
+ if(!isRC0_LINKUP){
+ printk("PCI-E RC can not link up\n");
+ return -1;
+ }
+ }
+ }
+
+
+ if(isMT751020 || isMT7505){
+ pcie_soft_patch = DISABLE; //Enable after temp all config
+ if(isRC0_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ pcie_phy_force_mode_en(ENABLE, 0);
+ rc0_is_exist = 1;
+ }else{
+ //disable port 0
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<23)));
+ mdelay(1);
+ }
+
+ if(isRC1_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb83010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb83034, 0x06040001);
+
+ //Enable RC1 ECRC count . //bus0, dev1
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x80118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ pcie_phy_force_mode_en(ENABLE, 1);
+ rc1_is_exist = 1;
+ }else{
+ //disable port 1
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<22)));
+ mdelay(1);
+ }
+
+#if defined(TCSUPPORT_BONDING) && !defined(TCSUPPORT_CPU_EN7512)
+ if(isSLAVERC_LINKUP){
+ pcie_phy_force_mode_en(ENABLE, 2);
+ }else{
+ //disable slave RC
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<24)));
+ mdelay(1);
+ }
+#endif
+ }else{
+ if(dual_band_support){
+ if(isRC0_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ }else{
+ //disable port 0
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<23)));
+ mdelay(1);
+ }
+
+ if(isRC1_LINKUP){
+ //config PCI-E RC
+ regWrite32(0xbfb83010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb83034, 0x06040001);
+
+ //Enable RC1 ECRC count . //bus0, dev1
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x80118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ }else{
+ //disable port 1
+ tmp = regRead32(0xbfb00088);
+ regWrite32(0xbfb00088, (tmp & ~(1<<22)));
+ mdelay(1);
+ }
+ }else{
+ //config PCI-E RC
+ regWrite32(0xbfb82010, 0xffff0001); //disable support BAR0
+
+ //change class PCI-PCI Bridge
+ regWrite32(0xbfb82034, 0x06040001);
+
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ }
+ }
+#if defined(TCSUPPORT_CPU_RT63368) ||defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_MT7510)|| defined(TCSUPPORT_CPU_MT7505)
+#ifdef TCSUPPORT_WLAN_MT7592_PCIE
+ preConfigLDO();
+#endif
+#endif
+ }
+ else if(isRT63165){
+
+ printk("RT63165_pcie_init\n");
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<29)));
+ mdelay(1);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<30)));
+ mdelay(1);
+ //wait device link up
+ for(i=0 ; i<1000 ; i++){
+ mdelay(1);
+ if((regRead32(0xbfb81050) & 0x1) != 0){
+ break;
+ }
+ }
+ if(i == 1000){
+ printk("PCI-E RC can not link up\n");
+ return -1;
+ }
+ regWrite32(0xbfb81cf8, 0x0);
+ if((regRead32(0xbfb81cfc) & 0xffff) == 0xffff){
+ printk("No PCI-E device found\n");
+ return -1;
+ }
+
+ //config PCI-E RC
+ regWrite32(0xbfb81010, 0xffff0001); //not support BAR0
+ //check has device or not
+ regWrite32(0xbfb81034, 0x06040001); //change class PCI-PCI Bridge
+ //set pci-e burst size
+ //regWrite32(0xbfb81060, 0x3);
+ //Enable CRC count .
+ regWrite32(KSEG1ADDR(pcie_config_addr), 0x118);
+ tmp = regRead32(KSEG1ADDR(pcie_config_data));
+ regWrite32(KSEG1ADDR(pcie_config_data), (tmp | (1<<8)));
+ }else{
+ printk("tc3162_pcie_init\n");
+#ifdef CONFIG_MIPS_TC3262
+ regWrite32(0xbfb000b8, 0x00000001);
+#endif
+
+#if defined(CONFIG_MIPS_TC3162U) || defined(CONFIG_MIPS_TC3262)
+ /*pcie relate clock setting*/
+ tmp = regRead32(CR_AHB_SSR);
+ //tmp &= ~(1<<0 | 1<<2 | 1<<3 | 1<<4);
+ //tmp |= (1<<0 | 1<<2 | 1<<3 | 1<<4);
+ /*use internal clock,*/
+ tmp &= ~(1<<0 | 1<<2 | 1<<3);
+ tmp |= (1<<0 | 1<<2 | 1<<3);
+ regWrite32(CR_AHB_SSR, tmp);
+ mdelay(1);
+#endif
+
+ //tmp = regRead32(CR_AHB_PCIC);
+ //regWrite32(CR_AHB_PCIC), (tmp & ~(1<<29)));
+ //mdelay(5);
+ //tmp = regRead32(CR_AHB_PCIC);
+ //regWrite32(CR_AHB_PCIC), (tmp & ~(1<<30)));
+ //mdelay(5);
+
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<29)));
+ mdelay(5);
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | (1<<30)));
+ mdelay(5);
+
+#ifdef CONFIG_MIPS_TC3162U
+ /*work arround for pcie link up*/
+ regWrite32(PCIE_CONFIG_ADDR, 0x40);
+ regWrite32(PCIE_CONFIG_DATA, 0x20);
+#endif
+ /* PCI memory byte swap enable */
+ /*
+ tmp = regRead32(CR_AHB_PCIC);
+ regWrite32(CR_AHB_PCIC, (tmp | ((1<<24) | (1<<25))));
+ */
+ }
+
+ /* Set I/O resource limits. */
+ ioport_resource.end = 0x1fffffff;
+ iomem_resource.end = 0xffffffff;
+
+ if (isEN751221 || isEN751627)
+ {
+ register_pci_controller(&mt7512_pcie_controller);
+ pcie_api_init();
+ }
+ else
+ register_pci_controller(&tc3162_pcie_controller);
+
+#ifdef CONFIG_MIPS_TC3162U
+ /*Add AHB error monitor check*/
+ if(request_irq(ARBITER_ERR_INT, ahbErrIsr, 0, "AHB ERR", ahbErrIsr) != 0) {
+ printk("request ARBITER err isr error.\n");
+ }
+#endif
+ /*create a proc to check wifi dead or not*/
+ ahb_status_proc = create_proc_entry("tc3162/ahb_status", 0, NULL);
+ ahb_status_proc->read_proc = ahb_status_read_proc;
+ ahb_status_proc->write_proc = ahb_status_write_proc;
+
+#ifdef TCSUPPORT_RT63368_PCIE_RECOVER
+/*add two proc file,ahb_status_value only for test*/
+if(isRT63368){
+ /*create a proc to check AER*/
+ aer_status_proc = create_proc_entry("tc3162/pcie_error_detect_count", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = pcie_error_detect_count_read_proc;
+ aer_status_proc->write_proc = pcie_error_detect_count_write_proc;
+ }
+ //for test
+ /*create a proc to check AER*/
+ aer_status_proc = create_proc_entry("tc3162/ahb_status_value", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = ahb_status_value_count_read_proc;
+ aer_status_proc->write_proc = ahb_status_value_count_write_proc;
+ }
+ }
+#endif
+
+ /*------------create a proc to check ERR--------------------------------*/
+ if(isEN751221 || isEN751627){
+ aer_status_proc = create_proc_entry("tc3162/pcie_err_status", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = err_status_read_proc;
+ aer_status_proc->write_proc = err_status_write_proc;
+ }
+ }
+ /*-------------------------------------------------------------------*/
+
+ if(isMT751020 || isMT7505){
+
+ /*create a proc to check AER*/
+ aer_status_proc = create_proc_entry("tc3162/aer_status", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = aer_status_read_proc;
+ aer_status_proc->write_proc = aer_status_write_proc;
+ }
+ aer_status_proc = create_proc_entry("tc3162/pcie_reset", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = pcie_rcnum_read_proc;
+ aer_status_proc->write_proc = pcie_rcnum_write_proc;
+ }
+ aer_status_proc = create_proc_entry("tc3162/pcie_reset_cond", 0, NULL);
+ if(aer_status_proc != NULL){
+ aer_status_proc->read_proc = pcie_reset_con_read_proc;
+ aer_status_proc->write_proc = pcie_reset_con_write_proc;
+ }
+ create_proc_read_entry("tc3162/pcie_reset_count", 0, NULL, pcie_reset_count_read_proc, NULL);
+
+ }
+ return 0;
+}
+
+arch_initcall(tc3162_pcie_init);
Index: linux-3.18.21/arch/mips/pci/pcie-phy.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/pci/pcie-phy.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,473 @@
+#include <asm/tc3162/tc3162.h>
+#include <linux/delay.h>
+
+//#define PCIE_PHY_DEBUG 1
+
+#define ENABLE 1
+#define DISABLE 0
+
+#if defined(TCSUPPORT_BONDING)
+extern unsigned long slaveVirBaseAddr;
+#endif
+
+
+#if 0 //for FPGA auto scan timing
+#define PCIe_AUTO_SCAN_TIMING 1
+#define PCIE_7510_FPGA_TIMING_PHY_P0 12
+#define PCIE_7510_FPGA_TIMING_PHY_P1 13
+#define PHY_PORT0 0
+#define PHY_PORT1 1
+
+static int pcie_reset_link(check_addr)
+{
+ int i;
+ uint32 tmp;
+ //PCI-E reset
+ /* enabled PCIe port 1 */
+ VPint(0xbfb00088) |= (1<<22);
+ mdelay(1);
+ /*first reset to default*/
+ VPint(0xbfb00834) &= ~((1<<26) | (1<<27) | (1<<29));
+ mdelay(1);
+ VPint(0xbfb00834) |= ((1<<26) | (1<<27) | (1<<29));
+ mdelay(1);
+ VPint(0xbfb00834) &= ~((1<<26) | (1<<27) | (1<<29));
+ mdelay(1);
+
+ //relese pcie device reset
+ VPint(0xbfb80000) &= ~((1<<1) | (1<<2));
+ mdelay(1);
+
+ //start to check pcie if link up
+ //wait device link up
+ for(i=1 ; i<601 ; i++){
+ mdelay(1);
+ if((VPint(check_addr) & 0x1) != 0){
+ break;
+ }
+ if((i%100)==0){//reset device PCIe MAC if not link up every 100ms biker_20120816
+ //pcie reset again
+ VPint(0xbfb80000) |= ((1<<1) | (1<<2));
+ mdelay(1);
+
+ VPint(0xbfb80000) &= ~((1<<1) | (1<<2));
+ mdelay(1);
+ }
+ }
+ if(i == 601)
+ return -1;
+ else
+ return 0;
+
+}
+#endif
+
+void pcie_phy_force_mode_en(char enable, char rc_num){
+
+ if(isFPGA)
+ return;
+
+ //printk("pcie_phy_force_mode_en %x %x \n",enable,rc_num);
+ if(enable == ENABLE) //means disable
+ {
+ switch (rc_num){
+ case 0:
+ regWrite32(0xbfaf202c, 0x1010);
+ regWrite32(0xbfaf202c, 0x1090);
+ regWrite32(0xbfaf202c, 0x1010);
+ break;
+ case 1:
+ regWrite32(0xbfaf212c, 0x1010);
+ regWrite32(0xbfaf212c, 0x1090);
+ regWrite32(0xbfaf212c, 0x1010);
+ break;
+#if defined(TCSUPPORT_BONDING)
+
+ case 2: //external chip use for bonding
+ if(slaveVirBaseAddr != 0){
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x1010);
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x1090);
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x1010);
+
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x1010);
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x1090);
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x1010);
+ }
+ break;
+#endif
+ default:
+ printk("RC number %d Error !! \n",rc_num);
+ break;
+ }
+
+ }
+ else
+ {
+ switch (rc_num){
+ case 0:
+ regWrite32(0xbfaf202c, 0x0);
+ regWrite32(0xbfaf202c, 0x80);
+ regWrite32(0xbfaf202c, 0x0);
+ break;
+ case 1:
+ regWrite32(0xbfaf212c, 0x0);
+ regWrite32(0xbfaf212c, 0x80);
+ regWrite32(0xbfaf212c, 0x0);
+ break;
+#if defined(TCSUPPORT_BONDING)
+
+ case 2: //external
+ if(slaveVirBaseAddr != 0){
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x0);
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x80);
+ regWrite32(slaveVirBaseAddr + 0xaf202c, 0x0);
+
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x0);
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x80);
+ regWrite32(slaveVirBaseAddr + 0xaf212c, 0x0);
+ }
+ break;
+#endif
+ default:
+ printk("RC number %d Error !! \n",rc_num);
+ break;
+ }
+ }
+}
+ void init_60901(void)
+{
+ uint32 tmp;
+ #ifdef PCIE_PHY_DEBUG
+ printk("biker1_pcie_phy_init debug message: master init \n");
+ printk("debug: read address 0xbfaf24a0 (380389); value is: %lx\n",VPint(0xbfaf24a0));
+ printk("debug: read address 0xbfaf24a8 (E80E8); value is: %lx\n",VPint(0xbfaf24a8));
+ #endif
+ //init Master
+ //init 20MHz or 25MHz
+ tmp = (regRead32(0xbfb0008c)&(0x3 <<22))>>22;
+ if (tmp == 0x1 )
+ {
+ //xtal is 20MHz
+ #ifdef PCIE_PHY_DEBUG
+ printk("xtal is 20MHz \n");
+ #endif
+
+ //I2C 70 0xA8[11:00] 0x74 RW RG_PE1_H_LCDDS_SSC_DELTA //Improve SSC deviation
+ //tmp = regRead32(0xbfaf24a8);
+ regWrite32(0xbfaf24a8, 0x740074);
+ //VPint(0xbfaf24a8) = (VPint(0xbfaf24a8) & (~0xfff )) | (0x74);
+ mdelay(1);
+ //I2C 70 0xA8[23:16] 0x74 RW RG_PE1_H_LCDDS_SSC_DELTA1 //Improve SSC deviation
+ //VPint(0xbfaf24a8) = (VPint(0xbfaf24a8) & (~(0xfff <<16))) | (0x74<<16);
+ //I2C 70 0xA0[19:19] 0x00 RW RG_PE1_H_LCDDS_SSC_EN //Disable SSC
+ tmp = regRead32(0xbfaf24a0);
+ regWrite32(0xbfaf24a0, tmp & (~(0x1 <<19)) );
+ //VPint(0xbfaf24a0) = VPint(0xbfaf24a0) & (~(0x1 <<19)) ;
+ mdelay(1);
+ //I2C 70 0xA0[19:19] 0x01 RW RG_PE1_H_LCDDS_SSC_EN //Enable SSC
+ tmp = regRead32(0xbfaf24a0);
+ regWrite32(0xbfaf24a0, tmp | (0x1<<19) );
+ //VPint(0xbfaf24a0) = VPint(0xbfaf24a0) | (0x1<<19);
+ mdelay(1);
+ }else if(tmp == 0x0 )
+ {
+ //xtal is 25MHz
+ #ifdef PCIE_PHY_DEBUG
+ printk("xtal is 25MHz \n");
+ #endif
+// I2C 70 0xFC[31:24] 0x00 RW //Change bank address to 0x00
+// I2C 70 0x00[04:04] 0x01 RW rg_pe1_frc_phy_en //Force Port 0 disable control
+ regWrite32(0xbfaf2000, (regRead32(0xbfaf2000) &(~(0x1 <<4)) ) | (0x1<<4) );
+ mdelay(1);
+// I2C 70 0xFC[31:24] 0x01 RW //Change bank address to 0x01
+// I2C 70 0x00[04:04] 0x01 RW rg_pe1_frc_phy_en //Force Port 1 disable control
+ regWrite32(0xbfaf2100, (regRead32(0xbfaf2100) &(~(0x1 <<4)) ) | (0x1<<4) );
+ mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x00 RW //Change bank address to 0x00
+// I2C 70 0x00[05:05] 0x00 RW rg_pe1_phy_en //Port 0 disable
+ regWrite32(0xbfaf2000, (regRead32(0xbfaf2000) &(~(0x1 <<5)) ) );
+ mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x01 RW //Change bank address to 0x01
+// I2C 70 0x00[05:05] 0x00 RW rg_pe1_phy_en //Port 1 disable
+ regWrite32(0xbfaf2100, (regRead32(0xbfaf2100) &(~(0x1 <<5)) ) );
+ mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x04 RW //Change bank address to 0x04
+// I2C 70 0x00[08:08] 0x01 RW rg_pe1_frc_h_xtal_type //
+ regWrite32(0xbfaf2400, (regRead32(0xbfaf2400) &(~(0x1 <<8)) ) | (0x1<<8) );
+ mdelay(1);
+
+// I2C 70 0x00[10:09] 0x00 RW rg_pe1_h_xtal_type //
+ regWrite32(0xbfaf2400, (regRead32(0xbfaf2400) &(~(0x3 <<9)) ) );
+ mdelay(1);
+
+// I2C 70 0x90[15:12] 0x0A RW RG_PE1_H_PLL_IR //
+ regWrite32(0xbfaf2490, (regRead32(0xbfaf2490) &(~(0xf <<12)) ) | (0xA<<12) );
+ mdelay(1);
+
+// I2C 70 0xAC[18:16] 0x01 RW RG_PE1_H_PLL_BR //
+ regWrite32(0xbfaf24AC, (regRead32(0xbfaf24AC) &(~(0x7 <<16)) ) | (0x1<<16) );
+ mdelay(1);
+
+// I2C 70 0x9C[30:16] 0x1900 RW RG_PE1_H_LCDDS_PCW_NCPO //
+ regWrite32(0xbfaf249C, (regRead32(0xbfaf249C) &(~(0x7fff <<16)) ) | (0x1900<<16) );
+ mdelay(1);
+
+// I2C 70 0x9C[31:31] 0x00 RW RG_PE1_H_LCDDS_PCW_NCPO_CHG //
+ regWrite32(0xbfaf249C, (regRead32(0xbfaf249C) &(~(0x1 <<31)) ) );
+ mdelay(1);
+
+// I2C 70 0x9C[31:31] 0x01 RW RG_PE1_H_LCDDS_PCW_NCPO_CHG //
+ regWrite32(0xbfaf249C, regRead32(0xbfaf249C) | (0x1<<31) );
+ mdelay(1);
+
+// I2C 70 0xA4[15:00] 0x018D RW RG_PE1_H_LCDDS_SSC_PRD //
+ regWrite32(0xbfaf24A4, (regRead32(0xbfaf24A4) &(~(0xffff )) ) | (0x18D) );
+ mdelay(1);
+
+// I2C 70 0xA8[11:00] 0x004A RW RG_PE1_H_LCDDS_SSC_DELTA //
+ regWrite32(0xbfaf24A8, (regRead32(0xbfaf24A8) &(~(0xfff )) ) | (0x4A) );
+ mdelay(1);
+
+// I2C 70 0xA8[27:16] 0x004A RW RG_PE1_H_LCDDS_SSC_DELTA1 //
+ regWrite32(0xbfaf24A8, (regRead32(0xbfaf24A8) &(~(0xfff <<16)) ) | (0x4A<<16) );
+ mdelay(1);
+
+// I2C 70 0x00[12:12] 0x01 RW rg_pe1_mstck_25m //value of mstck_25m when force mode enable
+ regWrite32(0xbfaf2400, (regRead32(0xbfaf2400) ) | (0x1<<12) );
+ mdelay(1);
+
+// I2C 70 0x00[11:11] 0x01 RW rg_pe1_frc_mstck_25m //force mode enable of mstck_25m
+ regWrite32(0xbfaf2400, (regRead32(0xbfaf2400) ) | (0x1<<11) );
+ mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x00 RW //Change bank address to 0x00
+// I2C 70 0x00[05:05] 0x01 RW rg_pe1_phy_en //Port 0 enable
+ regWrite32(0xbfaf2000, (regRead32(0xbfaf2000) |(0x1 <<5) ) );
+ mdelay(1);
+// I2C 70 0xFC[31:24] 0x01 RW //Change bank address to 0x01
+// I2C 70 0x00[05:05] 0x01 RW rg_pe1_phy_en //Port 1 enable
+ regWrite32(0xbfaf2100, (regRead32(0xbfaf2100) |(0x1 <<5) ));
+ mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x00 RW //Change bank address to 0x00
+// I2C 70 0x00[04:04] 0x00 RW rg_pe1_frc_phy_en //Force Port 0 disable control
+ //regWrite32(0xbfaf2000, (regRead32(0xbfaf2000) &(~(0x1 <<4)) ));
+ //mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x01 RW //Change bank address to 0x01
+// I2C 70 0x00[04:04] 0x00 RW rg_pe1_frc_phy_en //Force Port 1 disable control
+ //regWrite32(0xbfaf2100, (regRead32(0xbfaf2100) &(~(0x1 <<4)) ));
+ //mdelay(1);
+
+// I2C 70 0xFC[31:24] 0x00 RW //Change bank address to 0x00
+
+ }
+ #ifdef PCIE_PHY_DEBUG
+ printk("pcie_phy_init debug message:\n");
+ printk("debug: read address 0xbfaf24a0 (380389); value is: %lx\n",VPint(0xbfaf24a0));
+ printk("debug: read address 0xbfaf24a8 (740074); value is: %lx\n",VPint(0xbfaf24a8));
+ #endif
+
+ //init Slave
+
+}
+
+#if 0 //for FPGA auto scan timing
+uint8 pcie_60901_auto_scan(uint8 PHY_PORT, uint8 win_range, uint8 win_loop, uint32 check_addr)
+{
+ uint8 win_init = 0;
+ uint8 win_start = 0;
+ uint8 win_len = 0;
+ uint8 max_win_start = 0;
+ uint8 max_win_len = 0;
+ uint8 i = 0;
+ uint8 timing = 0;
+
+ printk(">>>>>Start to scan PCIe timing setting<<<<<\n");
+ for (i=win_init; i<win_range; i++){
+ if (PHY_PORT == PHY_PORT0)
+ init_60901((uint32) (i%win_loop),0);
+ else
+ init_60901(0,(uint32) (i%win_loop));
+ //printk("timing setting: %2d ",i);
+ if (pcie_reset_link(check_addr) == 0){
+ win_len++;
+ if(win_len > max_win_len){
+ max_win_len = win_len;
+ max_win_start = win_start;
+ }
+ printk("Link up! \n");
+ }else{
+ win_len = 0;
+ win_start = i + 1;
+ printk("Not link \n");
+ }
+ }
+ timing = (uint8) ((max_win_start%win_loop) + ((max_win_len%win_loop)>>1));
+ printk("The pcie timing window start at : %d \n",max_win_start);
+ printk("Window length is : %d \n",max_win_len);
+ printk("The chosen PCIe: %d \n ",timing);
+ return timing;
+}
+#endif
+
+ void init_7505(void)
+{
+ //For WCN ch14 de-sence issue, default Disable SSC, bit 28 => 1
+ printk("Disable PCIe SSC\n");
+ regWrite32(0xbfaf2414, (regRead32(0xbfaf2414) |(0x1 <<28) ));
+
+ //change MSTSET from 1 to 5;
+ mdelay(1);
+ regWrite32(0xbfaf2498, ((regRead32(0xbfaf2498) &(~0xff) )|(0x5) ));
+
+ //decrease PLL BW to improve jitter; by biker_20140721; add 25MHz_20140815
+ mdelay(1);
+ regWrite32(0xbfaf2400, ((regRead32(0xbfaf2400) &(~(0x1<<8)) )|(0x1<<8) ));
+ mdelay(1);
+ regWrite32(0xbfaf2400, ((regRead32(0xbfaf2400) &(~(0x3<<9)) )|(0x0<<9) ));
+ mdelay(1);
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) &(~(0x1<<4)) )|(0x1<<4) ));
+ mdelay(1);
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) &(~(0x1<<5)) )|(0x0<<5) ));
+ mdelay(1);
+ regWrite32(0xbfaf24ac, ((regRead32(0xbfaf24ac) &(~(0x7<<16)) )|(0x3<<16) ));
+ mdelay(1);
+ regWrite32(0xbfaf24bc, ((regRead32(0xbfaf24bc) &(~(0xff<<24)) )|(0x64<<24) ));
+ mdelay(1);
+ regWrite32(0xbfaf2490, ((regRead32(0xbfaf2490) &(~(0xf<<12)) )|(0xa<<12) ));
+ mdelay(1);
+ regWrite32(0xbfaf2490, ((regRead32(0xbfaf2490) &(~(0x3<<6)) )|(0x0<<6) ));
+ mdelay(1);
+ regWrite32(0xbfaf24c0, ((regRead32(0xbfaf24c0) &(~(0xffffffff<<0)) )|(0x19000000<<0) ));
+ mdelay(1);
+ regWrite32(0xbfaf24a4, ((regRead32(0xbfaf24a4) &(~(0xffff<<0)) )|(0x18d<<0) ));
+ mdelay(1);
+ regWrite32(0xbfaf24a8, ((regRead32(0xbfaf24a8) &(~(0xfff<<0)) )|(0x4a<<0) ));
+ mdelay(1);
+ regWrite32(0xbfaf24a8, ((regRead32(0xbfaf24a8) &(~(0xfff<<16)) )|(0x4a<<16) ));
+ mdelay(1);
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) &(~(0x1<<5)) )|(0x1<<5) ));
+ mdelay(1);
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) &(~(0x1<<4)) )|(0x0<<4) ));
+ mdelay(1);
+
+
+ /*MT7505 input crystall is 40Mhz*/
+ //Fix frequence setting bug when hardware trap is 40MHz XTAL ;
+#if 0 //Already done in bootloader
+ if(VPint(CR_AHB_HWCONF)&(1<<1)){
+ VPint(0xbfaf2400) = (VPint(0xbfaf2400) | 0x100); /*force mode enable xtal type*/
+ }
+#endif
+
+}
+
+void init_751627(void)
+{
+ printk("PCIe 7516 PHY init \n");
+}
+void init_751221(void)
+{
+
+ //LCDDS_CLK_PH_INV
+ regWrite32(0xbfaf24a0, ((regRead32(0xbfaf24a0) &(~(0x1<<5)) )|(0x1<<5) ));
+ mdelay(1);
+
+ //7522 (7526c)
+ // EN7526F use gen1 port
+ // EN7521S/F no PCIe port, disable gen1 port
+
+ //7512
+ // EN7526F use gen1 port, disable gen2 port
+ // EN7521S/F no PCIe port, disable gen1 & gen2 port
+
+ if (isEN7526c) //7522 PHY setting
+ {
+ //empty
+ if(isEN7521S || isEN7521F) // Disable gen1 port, Biker_20170807
+ {
+ //disable gen1 port
+ printk("7522 - 21S/F disable PCIe gen1 port \n");
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) )|(0x1<<4) )); //rg_pe1_frc_phy_en
+ mdelay(1);
+ }
+ }else // 7512, 7526 PHY setting
+ {
+ //Patch TxDetRx Timing for 7512 E1, from DR 20160421, Biker_20160516
+ regWrite32(0xbfac0a28, ((regRead32(0xbfac0a28) &(~(0x1ff<<9)) )|(0x10<<9) ));//rg_ssusb_rxdet_stb2_set[8:0]
+ mdelay(1);
+ regWrite32(0xbfac0a2c, ((regRead32(0xbfac0a2c) &(~0x1ff) )|0x10 ));//rg_ssusb_rxdet_stb2_set_p3[8:0]
+ mdelay(1);
+
+
+ if(isEN7512 || isEN7526F) //Disable gen2 port, Biker_20160516
+ {
+ //7512 is QFP IC, use gen1 port only, disable gen2 port for COC test
+ printk("7512 - 26F disable PCIe gen2 port \n");
+ regWrite32(0xbfac030c, (regRead32(0xbfac030c) |(0x1 <<31) )); //PHY IP_SW_RESET
+ mdelay(1);
+ }
+
+
+ if(isEN7521S || isEN7521F) // Disable gen1 & gen2 port, Biker_20160516
+ {
+ printk("7512 - 21S/F disable PCIe gen1 & gen2 port \n");
+ //disable gen2 port
+ regWrite32(0xbfac030c, (regRead32(0xbfac030c) |(0x1 <<31) )); //PHY IP_SW_RESET
+ mdelay(1);
+
+ //disable gen1 port
+ regWrite32(0xbfaf2000, ((regRead32(0xbfaf2000) )|(0x1<<4) )); //rg_pe1_frc_phy_en
+ mdelay(1);
+ }
+ }
+}
+
+int pciePhyInit(void){
+
+#if 0 //for FPGA auto scan timing
+#ifdef PCIe_AUTO_SCAN_TIMING
+ timing_p0 = pcie_60901_auto_scan(PHY_PORT0,36,18,0xbfb83050);
+ timing_p1 = pcie_60901_auto_scan(PHY_PORT1,36,18,0xbfb82050);
+#endif
+#endif
+ if(isFPGA) {
+
+ printk("PCIe PHY init for FPGA \n");
+
+ }else if(isEN751627) {
+
+ init_751627();
+ printk("PCIe driver version: 751627.1.20170815\n");
+
+ }else if(isEN751221) {
+
+ init_751221();
+ printk("PCIe driver version: 751221.5.20170807\n");
+
+ }else if(isMT7505) {
+
+ init_7505();
+ printk("PCIe driver version: 7505\n");
+
+ }else if(isMT751020){
+#if 0 //for FPGA auto scan timing
+#ifdef PCIe_AUTO_SCAN_TIMING
+ timing_p0 = pcie_60901_auto_scan(PHY_PORT0,36,18,0xbfb83050);
+ timing_p1 = pcie_60901_auto_scan(PHY_PORT1,36,18,0xbfb82050);
+#endif
+#endif
+ init_60901();
+ printk("PCIe driver version: 751020\n");
+
+ }else {
+ printk("Unknown chip ID for PCIe phy setting \n");
+ }
+
+ return 0;
+}
+
Index: linux-3.18.21/arch/mips/softfloat/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/Makefile 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,62 @@
+#
+# Makefile for MIPS-specific library files..
+#
+obj-y := softfloat.o
+
+# soft-float
+FPBIT_FUNCS = _pack_sf _unpack_sf _addsub_sf _mul_sf _div_sf \
+ _fpcmp_parts_sf _compare_sf _eq_sf _ne_sf _gt_sf _ge_sf \
+ _lt_sf _le_sf _unord_sf _si_to_sf _sf_to_si _negate_sf _make_sf \
+ _sf_to_df _sf_to_tf _thenan_sf _sf_to_usi _usi_to_sf
+
+DPBIT_FUNCS = _pack_df _unpack_df _addsub_df _mul_df _div_df \
+ _fpcmp_parts_df _compare_df _eq_df _ne_df _gt_df _ge_df \
+ _lt_df _le_df _unord_df _si_to_df _df_to_si _negate_df _make_df \
+ _df_to_sf _df_to_tf _thenan_df _df_to_usi _usi_to_df
+
+#LIB2FUNCS_1 = _muldi3 _negdi2 _lshrdi3 _ashldi3 _ashrdi3 _ffsdi2 _clz \
+# _cmpdi2 _ucmpdi2 _floatdidf _floatdisf _fixunsdfsi _fixunssfsi \
+# _fixunsdfdi _fixdfdi _fixunssfdi _fixsfdi _fixxfdi _fixunsxfdi
+LIB2FUNCS_1 = _muldi3 _negdi2 _ffsdi2 _clz \
+ _floatdidf _floatundidf _floatdisf _fixunsdfsi _fixunssfsi \
+ _fixunsdfdi _fixdfdi _fixunssfdi _fixsfdi _fixxfdi _fixunsxfdi
+
+
+LIB2FUNCS_2 = _floatdixf _fixunsxfsi _fixtfdi _fixunstfdi _floatditf \
+ _trampoline _absvsi2 _absvdi2 _addvsi3 \
+ _addvdi3 _subvsi3 _subvdi3 _mulvsi3 _mulvdi3 _negvsi2 _negvdi2
+# These might cause a divide overflow trap and so are compiled with
+# unwinder info.
+LIB2_DIVMOD_FUNCS = _divdi3 _moddi3 _udivdi3 _umoddi3 _udiv_w_sdiv _udivmoddi4
+
+DP_OBJS := $(patsubst %,%.o, $(DPBIT_FUNCS))
+FP_OBJS := $(patsubst %,%.o, $(FPBIT_FUNCS))
+GCC_OBJS := $(patsubst %,%.o, $(LIB2FUNCS_1)) $(patsubst %,%.o, $(LIB2FUNCS_2)) $(patsubst %,%.o, $(LIB2_DIVMOD_FUNCS))
+softfloat-objs := $(DP_OBJS) $(FP_OBJS) $(GCC_OBJS)
+clean-files := dp-bits.c fp-bits.c
+# end softfloat
+
+# soft-float
+$(addprefix $(obj)/,$(patsubst %,%.o, $(DPBIT_FUNCS))): $(obj)/dp-bits.c
+ $(CC) $(c_flags) -c $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -DFINE_GRAINED_LIBRARIES -DL$(notdir $(patsubst %.o,%, $@)) $(obj)/dp-bits.c -o $@
+
+$(addprefix $(obj)/,$(patsubst %,%.o, $(FPBIT_FUNCS))): $(obj)/fp-bits.c
+ $(CC) $(c_flags) -c $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -DFINE_GRAINED_LIBRARIES -DL$(notdir $(patsubst %.o,%, $@)) $(obj)/fp-bits.c -o $@
+
+$(addprefix $(obj)/,$(patsubst %,%.o, $(LIB2FUNCS_1))): $(obj)/libgcc2.c
+ $(CC) $(c_flags) -c $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -DFINE_GRAINED_LIBRARIES -DL$(notdir $(patsubst %.o,%, $@)) $(obj)/libgcc2.c -o $@
+
+$(addprefix $(obj)/,$(patsubst %,%.o, $(LIB2FUNCS_2))): $(obj)/libgcc2.c
+ $(CC) $(c_flags) -c $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -DFINE_GRAINED_LIBRARIES -DL$(notdir $(patsubst %.o,%, $@)) $(obj)/libgcc2.c -o $@
+$(addprefix $(obj)/,$(patsubst %,%.o, $(LIB2_DIVMOD_FUNCS))): $(obj)/libgcc2.c
+ $(CC) $(c_flags) -c $(NOSTDINC_FLAGS) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -DFINE_GRAINED_LIBRARIES -DL$(notdir $(patsubst %.o,%, $@)) $(obj)/libgcc2.c -o $@
+$(obj)/dp-bits.c: $(src)/fp-bit.c
+ cp $(src)/fp-bit.c $(obj)/dp-bits.c
+
+$(obj)/fp-bits.c: $(src)/fp-bit.c
+ -echo '#define FLOAT' > $@
+ cat $< >> $@
+
+# end softfloat
+
+
Index: linux-3.18.21/arch/mips/softfloat/fp-bit.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/fp-bit.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,1643 @@
+/* This is a software floating point library which can be used
+ for targets without hardware floating point.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003
+ Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* This implements IEEE 754 format arithmetic, but does not provide a
+ mechanism for setting the rounding mode, or for generating or handling
+ exceptions.
+
+ The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
+ Wilson, all of Cygnus Support. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+/* The intended way to use this file is to make two copies, add `#define FLOAT'
+ to one copy, then compile both copies and add them to libgcc.a. */
+#ifndef CONFIG_MIPS_TC3262
+#define US_SOFTWARE_GOFAST
+#endif
+#include "tconfig.h"
+#include "fp-bit.h"
+
+/* The following macros can be defined to change the behavior of this file:
+ FLOAT: Implement a `float', aka SFmode, fp library. If this is not
+ defined, then this file implements a `double', aka DFmode, fp library.
+ FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e.
+ don't include float->double conversion which requires the double library.
+ This is useful only for machines which can't support doubles, e.g. some
+ 8-bit processors.
+ CMPtype: Specify the type that floating point compares should return.
+ This defaults to SItype, aka int.
+ US_SOFTWARE_GOFAST: This makes all entry points use the same names as the
+ US Software goFast library.
+ _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding
+ two integers to the FLO_union_type.
+ NO_DENORMALS: Disable handling of denormals.
+ NO_NANS: Disable nan and infinity handling
+ SMALL_MACHINE: Useful when operations on QIs and HIs are faster
+ than on an SI */
+
+/* We don't currently support extended floats (long doubles) on machines
+ without hardware to deal with them.
+
+ These stubs are just to keep the linker from complaining about unresolved
+ references which can be pulled in from libio & libstdc++, even if the
+ user isn't using long doubles. However, they may generate an unresolved
+ external to abort if abort is not used by the function, and the stubs
+ are referenced from within libc, since libgcc goes before and after the
+ system library. */
+
+#ifdef DECLARE_LIBRARY_RENAMES
+ DECLARE_LIBRARY_RENAMES
+#endif
+
+#ifdef EXTENDED_FLOAT_STUBS
+extern void abort (void);
+void __extendsfxf2 (void) { abort(); }
+void __extenddfxf2 (void) { abort(); }
+void __truncxfdf2 (void) { abort(); }
+void __truncxfsf2 (void) { abort(); }
+void __fixxfsi (void) { abort(); }
+void __floatsixf (void) { abort(); }
+void __addxf3 (void) { abort(); }
+void __subxf3 (void) { abort(); }
+void __mulxf3 (void) { abort(); }
+void __divxf3 (void) { abort(); }
+void __negxf2 (void) { abort(); }
+void __eqxf2 (void) { abort(); }
+void __nexf2 (void) { abort(); }
+void __gtxf2 (void) { abort(); }
+void __gexf2 (void) { abort(); }
+void __lexf2 (void) { abort(); }
+void __ltxf2 (void) { abort(); }
+
+void __extendsftf2 (void) { abort(); }
+void __extenddftf2 (void) { abort(); }
+void __trunctfdf2 (void) { abort(); }
+void __trunctfsf2 (void) { abort(); }
+void __fixtfsi (void) { abort(); }
+void __floatsitf (void) { abort(); }
+void __addtf3 (void) { abort(); }
+void __subtf3 (void) { abort(); }
+void __multf3 (void) { abort(); }
+void __divtf3 (void) { abort(); }
+void __negtf2 (void) { abort(); }
+void __eqtf2 (void) { abort(); }
+void __netf2 (void) { abort(); }
+void __gttf2 (void) { abort(); }
+void __getf2 (void) { abort(); }
+void __letf2 (void) { abort(); }
+void __lttf2 (void) { abort(); }
+#else /* !EXTENDED_FLOAT_STUBS, rest of file */
+
+/* IEEE "special" number predicates */
+
+#ifdef NO_NANS
+
+#define nan() 0
+#define isnan(x) 0
+#define isinf(x) 0
+#else
+
+#if defined L_thenan_sf
+const fp_number_type __thenan_sf = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined L_thenan_df
+const fp_number_type __thenan_df = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined L_thenan_tf
+const fp_number_type __thenan_tf = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined TFLOAT
+extern const fp_number_type __thenan_tf;
+#elif defined FLOAT
+extern const fp_number_type __thenan_sf;
+#else
+extern const fp_number_type __thenan_df;
+#endif
+
+INLINE
+static fp_number_type *
+nan (void)
+{
+ /* Discard the const qualifier... */
+#ifdef TFLOAT
+ return (fp_number_type *) (& __thenan_tf);
+#elif defined FLOAT
+ return (fp_number_type *) (& __thenan_sf);
+#else
+ return (fp_number_type *) (& __thenan_df);
+#endif
+}
+
+INLINE
+static int
+isnan ( fp_number_type * x)
+{
+ return x->class == CLASS_SNAN || x->class == CLASS_QNAN;
+}
+
+INLINE
+static int
+isinf ( fp_number_type * x)
+{
+ return x->class == CLASS_INFINITY;
+}
+
+#endif /* NO_NANS */
+
+INLINE
+static int
+iszero ( fp_number_type * x)
+{
+ return x->class == CLASS_ZERO;
+}
+
+INLINE
+static void
+flip_sign ( fp_number_type * x)
+{
+ x->sign = !x->sign;
+}
+
+extern FLO_type pack_d ( fp_number_type * );
+
+#if defined(L_pack_df) || defined(L_pack_sf) || defined(L_pack_tf)
+FLO_type
+pack_d ( fp_number_type * src)
+{
+ FLO_union_type dst;
+ fractype fraction = src->fraction.ll; /* wasn't unsigned before? */
+ int sign = src->sign;
+ int exp = 0;
+
+ if (LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS) && (isnan (src) || isinf (src)))
+ {
+ /* We can't represent these values accurately. By using the
+ largest possible magnitude, we guarantee that the conversion
+ of infinity is at least as big as any finite number. */
+ exp = EXPMAX;
+ fraction = ((fractype) 1 << FRACBITS) - 1;
+ }
+ else if (isnan (src))
+ {
+ exp = EXPMAX;
+ if (src->class == CLASS_QNAN || 1)
+ {
+ fraction |= QUIET_NAN;
+ }
+ }
+ else if (isinf (src))
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else if (iszero (src))
+ {
+ exp = 0;
+ fraction = 0;
+ }
+ else if (fraction == 0)
+ {
+ exp = 0;
+ }
+ else
+ {
+ if (src->normal_exp < NORMAL_EXPMIN)
+ {
+#ifdef NO_DENORMALS
+ /* Go straight to a zero representation if denormals are not
+ supported. The denormal handling would be harmless but
+ isn't unnecessary. */
+ exp = 0;
+ fraction = 0;
+#else /* NO_DENORMALS */
+ /* This number's exponent is too low to fit into the bits
+ available in the number, so we'll store 0 in the exponent and
+ shift the fraction to the right to make up for it. */
+
+ int shift = NORMAL_EXPMIN - src->normal_exp;
+
+ exp = 0;
+
+ if (shift > FRAC_NBITS - NGARDS)
+ {
+ /* No point shifting, since it's more that 64 out. */
+ fraction = 0;
+ }
+ else
+ {
+ int lowbit = (fraction & (((fractype)1 << shift) - 1)) ? 1 : 0;
+ fraction = (fraction >> shift) | lowbit;
+ }
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if ((fraction & (1 << NGARDS)))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add to the guards to round up. */
+ fraction += GARDROUND;
+ }
+ /* Perhaps the rounding means we now need to change the
+ exponent, because the fraction is no longer denormal. */
+ if (fraction >= IMPLICIT_1)
+ {
+ exp += 1;
+ }
+ fraction >>= NGARDS;
+#endif /* NO_DENORMALS */
+ }
+ else if (!LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS)
+ && src->normal_exp > EXPBIAS)
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else
+ {
+ exp = src->normal_exp + EXPBIAS;
+ if (!ROUND_TOWARDS_ZERO)
+ {
+ /* IF the gard bits are the all zero, but the first, then we're
+ half way between two numbers, choose the one which makes the
+ lsb of the answer 0. */
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if (fraction & (1 << NGARDS))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add a one to the guards to round up */
+ fraction += GARDROUND;
+ }
+ if (fraction >= IMPLICIT_2)
+ {
+ fraction >>= 1;
+ exp += 1;
+ }
+ }
+ fraction >>= NGARDS;
+
+ if (LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS) && exp > EXPMAX)
+ {
+ /* Saturate on overflow. */
+ exp = EXPMAX;
+ fraction = ((fractype) 1 << FRACBITS) - 1;
+ }
+ }
+ }
+
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ dst.bits.fraction = fraction;
+ dst.bits.exp = exp;
+ dst.bits.sign = sign;
+#else
+# if defined TFLOAT && defined HALFFRACBITS
+ {
+ halffractype high, low;
+
+ high = (fraction >> (FRACBITS - HALFFRACBITS));
+ high &= (((fractype)1) << HALFFRACBITS) - 1;
+ high |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << HALFFRACBITS;
+ high |= ((fractype) (sign & 1)) << (HALFFRACBITS | EXPBITS);
+
+ low = (halffractype)fraction &
+ ((((halffractype)1) << (FRACBITS - HALFFRACBITS)) - 1);
+
+ if (exp == EXPMAX || exp == 0 || low == 0)
+ low = 0;
+ else
+ {
+ exp -= HALFFRACBITS + 1;
+
+ while (exp > 0
+ && low < ((halffractype)1 << HALFFRACBITS))
+ {
+ low <<= 1;
+ exp--;
+ }
+
+ if (exp <= 0)
+ {
+ halffractype roundmsb, round;
+
+ exp = -exp + 1;
+
+ roundmsb = (1 << (exp - 1));
+ round = low & ((roundmsb << 1) - 1);
+
+ low >>= exp;
+ exp = 0;
+
+ if (round > roundmsb || (round == roundmsb && (low & 1)))
+ {
+ low++;
+ if (low >= ((halffractype)1 << HALFFRACBITS))
+ /* We don't shift left, since it has just become the
+ smallest normal number, whose implicit 1 bit is
+ now indicated by the non-zero exponent. */
+ exp++;
+ }
+ }
+
+ low &= ((halffractype)1 << HALFFRACBITS) - 1;
+ low |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << HALFFRACBITS;
+ low |= ((fractype) (sign & 1)) << (HALFFRACBITS | EXPBITS);
+ }
+
+ dst.value_raw = (((fractype) high) << HALFSHIFT) | low;
+ }
+# else
+ dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1);
+ dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS;
+ dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS);
+# endif
+#endif
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+#ifdef TFLOAT
+ {
+ qrtrfractype tmp1 = dst.words[0];
+ qrtrfractype tmp2 = dst.words[1];
+ dst.words[0] = dst.words[3];
+ dst.words[1] = dst.words[2];
+ dst.words[2] = tmp2;
+ dst.words[3] = tmp1;
+ }
+#else
+ {
+ halffractype tmp = dst.words[0];
+ dst.words[0] = dst.words[1];
+ dst.words[1] = tmp;
+ }
+#endif
+#endif
+
+ return dst.value;
+}
+#endif
+
+#if defined(L_unpack_df) || defined(L_unpack_sf) || defined(L_unpack_tf)
+void
+unpack_d (FLO_union_type * src, fp_number_type * dst)
+{
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+ fractype fraction;
+ int exp;
+ int sign;
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ FLO_union_type swapped;
+
+#ifdef TFLOAT
+ swapped.words[0] = src->words[3];
+ swapped.words[1] = src->words[2];
+ swapped.words[2] = src->words[1];
+ swapped.words[3] = src->words[0];
+#else
+ swapped.words[0] = src->words[1];
+ swapped.words[1] = src->words[0];
+#endif
+ src = &swapped;
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ fraction = src->bits.fraction;
+ exp = src->bits.exp;
+ sign = src->bits.sign;
+#else
+# if defined TFLOAT && defined HALFFRACBITS
+ {
+ halffractype high, low;
+
+ high = src->value_raw >> HALFSHIFT;
+ low = src->value_raw & (((fractype)1 << HALFSHIFT) - 1);
+
+ fraction = high & ((((fractype)1) << HALFFRACBITS) - 1);
+ fraction <<= FRACBITS - HALFFRACBITS;
+ exp = ((int)(high >> HALFFRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(high >> (((HALFFRACBITS + EXPBITS))))) & 1;
+
+ if (exp != EXPMAX && exp != 0 && low != 0)
+ {
+ int lowexp = ((int)(low >> HALFFRACBITS)) & ((1 << EXPBITS) - 1);
+ int lowsign = ((int)(low >> (((HALFFRACBITS + EXPBITS))))) & 1;
+ int shift;
+ fractype xlow;
+
+ xlow = low & ((((fractype)1) << HALFFRACBITS) - 1);
+ if (lowexp)
+ xlow |= (((halffractype)1) << HALFFRACBITS);
+ else
+ lowexp = 1;
+ shift = (FRACBITS - HALFFRACBITS) - (exp - lowexp);
+ if (shift > 0)
+ xlow <<= shift;
+ else if (shift < 0)
+ xlow >>= -shift;
+ if (sign == lowsign)
+ fraction += xlow;
+ else
+ fraction -= xlow;
+ }
+ }
+# else
+ fraction = src->value_raw & ((((fractype)1) << FRACBITS) - 1);
+ exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1;
+# endif
+#endif
+
+ dst->sign = sign;
+ if (exp == 0)
+ {
+ /* Hmm. Looks like 0 */
+ if (fraction == 0
+#ifdef NO_DENORMALS
+ || 1
+#endif
+ )
+ {
+ /* tastes like zero */
+ dst->class = CLASS_ZERO;
+ }
+ else
+ {
+ /* Zero exponent with nonzero fraction - it's denormalized,
+ so there isn't a leading implicit one - we'll shift it so
+ it gets one. */
+ dst->normal_exp = exp - EXPBIAS + 1;
+ fraction <<= NGARDS;
+
+ dst->class = CLASS_NUMBER;
+#if 1
+ while (fraction < IMPLICIT_1)
+ {
+ fraction <<= 1;
+ dst->normal_exp--;
+ }
+#endif
+ dst->fraction.ll = fraction;
+ }
+ }
+ else if (!LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS) && exp == EXPMAX)
+ {
+ /* Huge exponent*/
+ if (fraction == 0)
+ {
+ /* Attached to a zero fraction - means infinity */
+ dst->class = CLASS_INFINITY;
+ }
+ else
+ {
+ /* Nonzero fraction, means nan */
+ if (fraction & QUIET_NAN)
+ {
+ dst->class = CLASS_QNAN;
+ }
+ else
+ {
+ dst->class = CLASS_SNAN;
+ }
+ /* Keep the fraction part as the nan number */
+ dst->fraction.ll = fraction;
+ }
+ }
+ else
+ {
+ /* Nothing strange about this number */
+ dst->normal_exp = exp - EXPBIAS;
+ dst->class = CLASS_NUMBER;
+ dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1;
+ }
+}
+#endif /* L_unpack_df || L_unpack_sf */
+
+#if defined(L_addsub_sf) || defined(L_addsub_df) || defined(L_addsub_tf)
+static fp_number_type *
+_fpadd_parts (fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ intfrac tfraction;
+
+ /* Put commonly used fields in local variables. */
+ int a_normal_exp;
+ int b_normal_exp;
+ fractype a_fraction;
+ fractype b_fraction;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+ if (isinf (a))
+ {
+ /* Adding infinities with opposite signs yields a NaN. */
+ if (isinf (b) && a->sign != b->sign)
+ return nan ();
+ return a;
+ }
+ if (isinf (b))
+ {
+ return b;
+ }
+ if (iszero (b))
+ {
+ if (iszero (a))
+ {
+ *tmp = *a;
+ tmp->sign = a->sign & b->sign;
+ return tmp;
+ }
+ return a;
+ }
+ if (iszero (a))
+ {
+ return b;
+ }
+
+ /* Got two numbers. shift the smaller and increment the exponent till
+ they're the same */
+ {
+ int diff;
+
+ a_normal_exp = a->normal_exp;
+ b_normal_exp = b->normal_exp;
+ a_fraction = a->fraction.ll;
+ b_fraction = b->fraction.ll;
+
+ diff = a_normal_exp - b_normal_exp;
+
+ if (diff < 0)
+ diff = -diff;
+ if (diff < FRAC_NBITS)
+ {
+ /* ??? This does shifts one bit at a time. Optimize. */
+ while (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp++;
+ LSHIFT (b_fraction);
+ }
+ while (b_normal_exp > a_normal_exp)
+ {
+ a_normal_exp++;
+ LSHIFT (a_fraction);
+ }
+ }
+ else
+ {
+ /* Somethings's up.. choose the biggest */
+ if (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp = a_normal_exp;
+ b_fraction = 0;
+ }
+ else
+ {
+ a_normal_exp = b_normal_exp;
+ a_fraction = 0;
+ }
+ }
+ }
+
+ if (a->sign != b->sign)
+ {
+ if (a->sign)
+ {
+ tfraction = -a_fraction + b_fraction;
+ }
+ else
+ {
+ tfraction = a_fraction - b_fraction;
+ }
+ if (tfraction >= 0)
+ {
+ tmp->sign = 0;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = tfraction;
+ }
+ else
+ {
+ tmp->sign = 1;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = -tfraction;
+ }
+ /* and renormalize it */
+
+ while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll)
+ {
+ tmp->fraction.ll <<= 1;
+ tmp->normal_exp--;
+ }
+ }
+ else
+ {
+ tmp->sign = a->sign;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = a_fraction + b_fraction;
+ }
+ tmp->class = CLASS_NUMBER;
+ /* Now the fraction is added, we have to shift down to renormalize the
+ number */
+
+ if (tmp->fraction.ll >= IMPLICIT_2)
+ {
+ LSHIFT (tmp->fraction.ll);
+ tmp->normal_exp++;
+ }
+ return tmp;
+
+}
+
+FLO_type
+add (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+EXPORT_SYMBOL(add);
+
+FLO_type
+sub (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ b.sign ^= 1;
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+EXPORT_SYMBOL(sub);
+#endif /* L_addsub_sf || L_addsub_df */
+
+#if defined(L_mul_sf) || defined(L_mul_df) || defined(L_mul_tf)
+static inline __attribute__ ((__always_inline__)) fp_number_type *
+_fpmul_parts ( fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ fractype low = 0;
+ fractype high = 0;
+
+ if (isnan (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isnan (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (isinf (a))
+ {
+ if (iszero (b))
+ return nan ();
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isinf (b))
+ {
+ if (iszero (a))
+ {
+ return nan ();
+ }
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (iszero (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (iszero (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+
+ /* Calculate the mantissa by multiplying both numbers to get a
+ twice-as-wide number. */
+ {
+#if defined(NO_DI_MODE) || defined(TFLOAT)
+ {
+ fractype x = a->fraction.ll;
+ fractype ylow = b->fraction.ll;
+ fractype yhigh = 0;
+ int bit;
+
+ /* ??? This does multiplies one bit at a time. Optimize. */
+ for (bit = 0; bit < FRAC_NBITS; bit++)
+ {
+ int carry;
+
+ if (x & 1)
+ {
+ carry = (low += ylow) < ylow;
+ high += yhigh + carry;
+ }
+ yhigh <<= 1;
+ if (ylow & FRACHIGH)
+ {
+ yhigh |= 1;
+ }
+ ylow <<= 1;
+ x >>= 1;
+ }
+ }
+#elif defined(FLOAT)
+ /* Multiplying two USIs to get a UDI, we're safe. */
+ {
+ UDItype answer = (UDItype)a->fraction.ll * (UDItype)b->fraction.ll;
+
+ high = answer >> BITS_PER_SI;
+ low = answer;
+ }
+#else
+ /* fractype is DImode, but we need the result to be twice as wide.
+ Assuming a widening multiply from DImode to TImode is not
+ available, build one by hand. */
+ {
+ USItype nl = a->fraction.ll;
+ USItype nh = a->fraction.ll >> BITS_PER_SI;
+ USItype ml = b->fraction.ll;
+ USItype mh = b->fraction.ll >> BITS_PER_SI;
+ UDItype pp_ll = (UDItype) ml * nl;
+ UDItype pp_hl = (UDItype) mh * nl;
+ UDItype pp_lh = (UDItype) ml * nh;
+ UDItype pp_hh = (UDItype) mh * nh;
+ UDItype res2 = 0;
+ UDItype res0 = 0;
+ UDItype ps_hh__ = pp_hl + pp_lh;
+ if (ps_hh__ < pp_hl)
+ res2 += (UDItype)1 << BITS_PER_SI;
+ pp_hl = (UDItype)(USItype)ps_hh__ << BITS_PER_SI;
+ res0 = pp_ll + pp_hl;
+ if (res0 < pp_ll)
+ res2++;
+ res2 += (ps_hh__ >> BITS_PER_SI) + pp_hh;
+ high = res2;
+ low = res0;
+ }
+#endif
+ }
+
+ tmp->normal_exp = a->normal_exp + b->normal_exp
+ + FRAC_NBITS - (FRACBITS + NGARDS);
+ tmp->sign = a->sign != b->sign;
+ while (high >= IMPLICIT_2)
+ {
+ tmp->normal_exp++;
+ if (high & 1)
+ {
+ low >>= 1;
+ low |= FRACHIGH;
+ }
+ high >>= 1;
+ }
+ while (high < IMPLICIT_1)
+ {
+ tmp->normal_exp--;
+
+ high <<= 1;
+ if (low & FRACHIGH)
+ high |= 1;
+ low <<= 1;
+ }
+ /* rounding is tricky. if we only round if it won't make us round later. */
+#if 0
+ if (low & FRACHIGH2)
+ {
+ if (((high & GARDMASK) != GARDMSB)
+ && (((high + 1) & GARDMASK) == GARDMSB))
+ {
+ /* don't round, it gets done again later. */
+ }
+ else
+ {
+ high++;
+ }
+ }
+#endif
+ if (!ROUND_TOWARDS_ZERO && (high & GARDMASK) == GARDMSB)
+ {
+ if (high & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ high += GARDROUND + 1;
+ }
+ else if (low)
+ {
+ /* but we really weren't half way */
+ high += GARDROUND + 1;
+ }
+ }
+ tmp->fraction.ll = high;
+ tmp->class = CLASS_NUMBER;
+ return tmp;
+}
+
+FLO_type
+multiply (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpmul_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+EXPORT_SYMBOL(multiply);
+#endif /* L_mul_sf || L_mul_df */
+
+#if defined(L_div_sf) || defined(L_div_df) || defined(L_div_tf)
+static inline __attribute__ ((__always_inline__)) fp_number_type *
+_fpdiv_parts (fp_number_type * a,
+ fp_number_type * b)
+{
+ fractype bit;
+ fractype numerator;
+ fractype denominator;
+ fractype quotient;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+
+ a->sign = a->sign ^ b->sign;
+
+ if (isinf (a) || iszero (a))
+ {
+ if (a->class == b->class)
+ return nan ();
+ return a;
+ }
+
+ if (isinf (b))
+ {
+ a->fraction.ll = 0;
+ a->normal_exp = 0;
+ return a;
+ }
+ if (iszero (b))
+ {
+ a->class = CLASS_INFINITY;
+ return a;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+ /* quotient =
+ ( numerator / denominator) * 2^(numerator exponent - denominator exponent)
+ */
+
+ a->normal_exp = a->normal_exp - b->normal_exp;
+ numerator = a->fraction.ll;
+ denominator = b->fraction.ll;
+
+ if (numerator < denominator)
+ {
+ /* Fraction will be less than 1.0 */
+ numerator *= 2;
+ a->normal_exp--;
+ }
+ bit = IMPLICIT_1;
+ quotient = 0;
+ /* ??? Does divide one bit at a time. Optimize. */
+ while (bit)
+ {
+ if (numerator >= denominator)
+ {
+ quotient |= bit;
+ numerator -= denominator;
+ }
+ bit >>= 1;
+ numerator *= 2;
+ }
+
+ if (!ROUND_TOWARDS_ZERO && (quotient & GARDMASK) == GARDMSB)
+ {
+ if (quotient & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ quotient += GARDROUND + 1;
+ }
+ else if (numerator)
+ {
+ /* but we really weren't half way, more bits exist */
+ quotient += GARDROUND + 1;
+ }
+ }
+
+ a->fraction.ll = quotient;
+ return (a);
+ }
+}
+
+FLO_type
+divide (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpdiv_parts (&a, &b);
+
+ return pack_d (res);
+}
+EXPORT_SYMBOL(divide);
+#endif /* L_div_sf || L_div_df */
+
+#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df) \
+ || defined(L_fpcmp_parts_tf)
+/* according to the demo, fpcmp returns a comparison with 0... thus
+ a<b -> -1
+ a==b -> 0
+ a>b -> +1
+ */
+
+int
+__fpcmp_parts (fp_number_type * a, fp_number_type * b)
+{
+#if 0
+ /* either nan -> unordered. Must be checked outside of this routine. */
+ if (isnan (a) && isnan (b))
+ {
+ return 1; /* still unordered! */
+ }
+#endif
+
+ if (isnan (a) || isnan (b))
+ {
+ return 1; /* how to indicate unordered compare? */
+ }
+ if (isinf (a) && isinf (b))
+ {
+ /* +inf > -inf, but +inf != +inf */
+ /* b \a| +inf(0)| -inf(1)
+ ______\+--------+--------
+ +inf(0)| a==b(0)| a<b(-1)
+ -------+--------+--------
+ -inf(1)| a>b(1) | a==b(0)
+ -------+--------+--------
+ So since unordered must be nonzero, just line up the columns...
+ */
+ return b->sign - a->sign;
+ }
+ /* but not both... */
+ if (isinf (a))
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (isinf (b))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (a) && iszero (b))
+ {
+ return 0;
+ }
+ if (iszero (a))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (b))
+ {
+ return a->sign ? -1 : 1;
+ }
+ /* now both are "normal". */
+ if (a->sign != b->sign)
+ {
+ /* opposite signs */
+ return a->sign ? -1 : 1;
+ }
+ /* same sign; exponents? */
+ if (a->normal_exp > b->normal_exp)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->normal_exp < b->normal_exp)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* same exponents; check size. */
+ if (a->fraction.ll > b->fraction.ll)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->fraction.ll < b->fraction.ll)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* after all that, they're equal. */
+ return 0;
+}
+#endif
+
+#if defined(L_compare_sf) || defined(L_compare_df) || defined(L_compoare_tf)
+CMPtype
+compare (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ return __fpcmp_parts (&a, &b);
+}
+EXPORT_SYMBOL(compare);
+#endif /* L_compare_sf || L_compare_df */
+
+#ifndef US_SOFTWARE_GOFAST
+
+/* These should be optimized for their specific tasks someday. */
+
+#if defined(L_eq_sf) || defined(L_eq_df) || defined(L_eq_tf)
+CMPtype
+_eq_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth == 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+EXPORT_SYMBOL(_eq_f2);
+#endif /* L_eq_sf || L_eq_df */
+
+#if defined(L_ne_sf) || defined(L_ne_df) || defined(L_ne_tf)
+CMPtype
+_ne_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* true, truth != 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+EXPORT_SYMBOL(_ne_f2);
+#endif /* L_ne_sf || L_ne_df */
+
+#if defined(L_gt_sf) || defined(L_gt_df) || defined(L_gt_tf)
+CMPtype
+_gt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth > 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+EXPORT_SYMBOL(_gt_f2);
+#endif /* L_gt_sf || L_gt_df */
+
+#if defined(L_ge_sf) || defined(L_ge_df) || defined(L_ge_tf)
+CMPtype
+_ge_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth >= 0 */
+ return __fpcmp_parts (&a, &b) ;
+}
+EXPORT_SYMBOL(_ge_f2);
+#endif /* L_ge_sf || L_ge_df */
+
+#if defined(L_lt_sf) || defined(L_lt_df) || defined(L_lt_tf)
+CMPtype
+_lt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth < 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+EXPORT_SYMBOL(_lt_f2);
+#endif /* L_lt_sf || L_lt_df */
+
+#if defined(L_le_sf) || defined(L_le_df) || defined(L_le_tf)
+CMPtype
+_le_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth <= 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+EXPORT_SYMBOL(_le_f2);
+#endif /* L_le_sf || L_le_df */
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_unord_sf) || defined(L_unord_df) || defined(L_unord_tf)
+CMPtype
+_unord_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ return (isnan (&a) || isnan (&b));
+}
+EXPORT_SYMBOL(_unord_f2);
+#endif /* L_unord_sf || L_unord_df */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df) || defined(L_si_to_tf)
+FLO_type
+si_to_float (SItype arg_a)
+{
+ fp_number_type in;
+
+ in.class = CLASS_NUMBER;
+ in.sign = arg_a < 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ in.normal_exp = FRACBITS + NGARDS;
+ if (in.sign)
+ {
+ /* Special case for minint, since there is no +ve integer
+ representation for it */
+ if (arg_a == (- MAX_SI_INT - 1))
+ {
+ return (FLO_type)(- MAX_SI_INT - 1);
+ }
+ in.fraction.ll = (-arg_a);
+ }
+ else
+ in.fraction.ll = arg_a;
+
+ while (in.fraction.ll < ((fractype)1 << (FRACBITS + NGARDS)))
+ {
+ in.fraction.ll <<= 1;
+ in.normal_exp -= 1;
+ }
+ }
+ return pack_d (&in);
+}
+EXPORT_SYMBOL(si_to_float);
+#endif /* L_si_to_sf || L_si_to_df */
+
+#if defined(L_usi_to_sf) || defined(L_usi_to_df) || defined(L_usi_to_tf)
+FLO_type
+usi_to_float (USItype arg_a)
+{
+ fp_number_type in;
+
+ in.sign = 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ in.class = CLASS_NUMBER;
+ in.normal_exp = FRACBITS + NGARDS;
+ in.fraction.ll = arg_a;
+
+ while (in.fraction.ll > ((fractype)1 << (FRACBITS + NGARDS)))
+ {
+ in.fraction.ll >>= 1;
+ in.normal_exp += 1;
+ }
+ while (in.fraction.ll < ((fractype)1 << (FRACBITS + NGARDS)))
+ {
+ in.fraction.ll <<= 1;
+ in.normal_exp -= 1;
+ }
+ }
+ return pack_d (&in);
+}
+EXPORT_SYMBOL(usi_to_float);
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si) || defined(L_tf_to_si)
+SItype
+float_to_si (FLO_type arg_a)
+{
+ fp_number_type a;
+ SItype tmp;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* get reasonable MAX_SI_INT... */
+ if (isinf (&a))
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > BITS_PER_SI - 2)
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+ return a.sign ? (-tmp) : (tmp);
+}
+EXPORT_SYMBOL(float_to_si);
+#endif /* L_sf_to_si || L_df_to_si */
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi) || defined(L_tf_to_usi)
+#if defined US_SOFTWARE_GOFAST || defined(L_tf_to_usi)
+/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines,
+ we also define them for GOFAST because the ones in libgcc2.c have the
+ wrong names and I'd rather define these here and keep GOFAST CYG-LOC's
+ out of libgcc2.c. We can't define these here if not GOFAST because then
+ there'd be duplicate copies. */
+
+USItype
+float_to_usi (FLO_type arg_a)
+{
+ fp_number_type a;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* it is a negative number */
+ if (a.sign)
+ return 0;
+ /* get reasonable MAX_USI_INT... */
+ if (isinf (&a))
+ return MAX_USI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > BITS_PER_SI - 1)
+ return MAX_USI_INT;
+ else if (a.normal_exp > (FRACBITS + NGARDS))
+ return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS));
+ else
+ return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+}
+EXPORT_SYMBOL(float_to_usi);
+#endif /* US_SOFTWARE_GOFAST */
+#endif /* L_sf_to_usi || L_df_to_usi */
+
+#if defined(L_negate_sf) || defined(L_negate_df) || defined(L_negate_tf)
+FLO_type
+negate (FLO_type arg_a)
+{
+ fp_number_type a;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ flip_sign (&a);
+ return pack_d (&a);
+}
+EXPORT_SYMBOL(negate);
+#endif /* L_negate_sf || L_negate_df */
+
+#ifdef FLOAT
+
+#if defined(L_make_sf)
+SFtype
+__make_fp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ USItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_sf */
+
+#ifndef FLOAT_ONLY
+
+/* This enables one to build an fp library that supports float but not double.
+ Otherwise, we would get an undefined reference to __make_dp.
+ This is needed for some 8-bit ports that can't handle well values that
+ are 8-bytes in size, so we just don't support double for them at all. */
+
+#if defined(L_sf_to_df)
+DFtype
+sf_to_df (SFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_dp (in.class, in.sign, in.normal_exp,
+ ((UDItype) in.fraction.ll) << F_D_BITOFF);
+}
+EXPORT_SYMBOL(sf_to_df);
+#endif /* L_sf_to_df */
+
+#if defined(L_sf_to_tf) && defined(TMODES)
+TFtype
+sf_to_tf (SFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_tp (in.class, in.sign, in.normal_exp,
+ ((UTItype) in.fraction.ll) << F_T_BITOFF);
+}
+EXPORT_SYMBOL(sf_to_tf);
+#endif /* L_sf_to_df */
+
+#endif /* ! FLOAT_ONLY */
+#endif /* FLOAT */
+
+#ifndef FLOAT
+
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+
+#if defined(L_make_df)
+DFtype
+__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_df */
+
+#if defined(L_df_to_sf)
+SFtype
+df_to_sf (DFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> F_D_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+EXPORT_SYMBOL(df_to_sf);
+#endif /* L_df_to_sf */
+
+#if defined(L_df_to_tf) && defined(TMODES) \
+ && !defined(FLOAT) && !defined(TFLOAT)
+TFtype
+df_to_tf (DFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_tp (in.class, in.sign, in.normal_exp,
+ ((UTItype) in.fraction.ll) << D_T_BITOFF);
+}
+EXPORT_SYMBOL(df_to_tf);
+#endif /* L_sf_to_df */
+
+#ifdef TFLOAT
+#if defined(L_make_tf)
+TFtype
+__make_tp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ UTItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_tf */
+
+#if defined(L_tf_to_df)
+DFtype
+tf_to_df (TFtype arg_a)
+{
+ fp_number_type in;
+ UDItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> D_T_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((UTItype) 1 << D_T_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_dp (in.class, in.sign, in.normal_exp, sffrac);
+}
+EXPORT_SYMBOL(tf_to_df);
+#endif /* L_tf_to_df */
+
+#if defined(L_tf_to_sf)
+SFtype
+tf_to_sf (TFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> F_T_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((UTItype) 1 << F_T_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+EXPORT_SYMBOL(tf_to_sf);
+#endif /* L_tf_to_sf */
+#endif /* TFLOAT */
+
+#endif /* ! FLOAT */
+#endif /* !EXTENDED_FLOAT_STUBS */
+
Index: linux-3.18.21/arch/mips/softfloat/fp-bit.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/fp-bit.h 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,539 @@
+/* Header file for fp-bit.c. */
+/* Copyright (C) 2000, 2002, 2003
+ Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifndef GCC_FP_BIT_H
+#define GCC_FP_BIT_H
+
+/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines
+ from this file are compiled via additional -D options.
+
+ This avoids the need to pull in the entire fp emulation library
+ when only a small number of functions are needed.
+
+ If FINE_GRAINED_LIBRARIES is not defined, then compile every
+ suitable routine. */
+#ifndef FINE_GRAINED_LIBRARIES
+#define L_pack_df
+#define L_unpack_df
+#define L_pack_sf
+#define L_unpack_sf
+#define L_addsub_sf
+#define L_addsub_df
+#define L_mul_sf
+#define L_mul_df
+#define L_div_sf
+#define L_div_df
+#define L_fpcmp_parts_sf
+#define L_fpcmp_parts_df
+#define L_compare_sf
+#define L_compare_df
+#define L_eq_sf
+#define L_eq_df
+#define L_ne_sf
+#define L_ne_df
+#define L_gt_sf
+#define L_gt_df
+#define L_ge_sf
+#define L_ge_df
+#define L_lt_sf
+#define L_lt_df
+#define L_le_sf
+#define L_le_df
+#define L_unord_sf
+#define L_unord_df
+#define L_usi_to_sf
+#define L_usi_to_df
+#define L_si_to_sf
+#define L_si_to_df
+#define L_sf_to_si
+#define L_df_to_si
+#define L_f_to_usi
+#define L_df_to_usi
+#define L_negate_sf
+#define L_negate_df
+#define L_make_sf
+#define L_make_df
+#define L_sf_to_df
+#define L_df_to_sf
+#ifdef FLOAT
+#define L_thenan_sf
+#else
+#define L_thenan_df
+#endif
+#endif /* ! FINE_GRAINED_LIBRARIES */
+
+#if __LDBL_MANT_DIG__ == 113 || __LDBL_MANT_DIG__ == 106
+# define TMODES
+#endif
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+#ifdef TMODES
+typedef float TFtype __attribute__ ((mode (TF)));
+#endif
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+#ifdef TMODES
+typedef int TItype __attribute__ ((mode (TI)));
+#endif
+
+/* The type of the result of a fp compare */
+#ifndef CMPtype
+#define CMPtype SItype
+#endif
+
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#ifdef TMODES
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#endif
+
+#define MAX_USI_INT (~(USItype)0)
+#define MAX_SI_INT ((SItype) (MAX_USI_INT >> 1))
+#define BITS_PER_SI (4 * BITS_PER_UNIT)
+#ifdef TMODES
+#define MAX_UDI_INT (~(UDItype)0)
+#define MAX_DI_INT ((DItype) (MAX_UDI_INT >> 1))
+#define BITS_PER_DI (8 * BITS_PER_UNIT)
+#endif
+
+#ifdef FLOAT_ONLY
+#define NO_DI_MODE
+#endif
+
+#ifdef TFLOAT
+# ifndef TMODES
+# error "TFLOAT requires long double to have 113 bits of mantissa"
+# endif
+
+# define PREFIXFPDP tp
+# define PREFIXSFDF tf
+# define NGARDS 10L /* Is this right? */
+# define GARDROUND 0x1ff
+# define GARDMASK 0x3ff
+# define GARDMSB 0x200
+# define FRAC_NBITS 128
+
+# if __LDBL_MANT_DIG__ == 113 /* IEEE quad */
+# define EXPBITS 15
+# define EXPBIAS 16383
+# define EXPMAX (0x7fff)
+# define QUIET_NAN ((TItype)0x8 << 108)
+# define FRACHIGH ((TItype)0x8 << 124)
+# define FRACHIGH2 ((TItype)0xc << 124)
+# define FRACBITS 112
+# endif
+
+# if __LDBL_MANT_DIG__ == 106 /* IBM extended (double+double) */
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define EXPMAX (0x7ff)
+# define QUIET_NAN ((TItype)0x8 << (48 + 64))
+# define FRACHIGH ((TItype)0x8 << 124)
+# define FRACHIGH2 ((TItype)0xc << 124)
+# define FRACBITS 105
+# define HALFFRACBITS 52
+# define HALFSHIFT 64
+# endif
+
+# define pack_d __pack_t
+# define unpack_d __unpack_t
+# define __fpcmp_parts __fpcmp_parts_t
+ typedef UTItype fractype;
+ typedef UDItype halffractype;
+ typedef USItype qrtrfractype;
+#define qrtrfractype qrtrfractype
+ typedef TFtype FLO_type;
+ typedef TItype intfrac;
+#elif defined FLOAT
+# define NGARDS 7L
+# define GARDROUND 0x3f
+# define GARDMASK 0x7f
+# define GARDMSB 0x40
+# define EXPBITS 8
+# define EXPBIAS 127
+# define FRACBITS 23
+# define EXPMAX (0xff)
+# define QUIET_NAN 0x100000L
+# define FRAC_NBITS 32
+# define FRACHIGH 0x80000000L
+# define FRACHIGH2 0xc0000000L
+# define pack_d __pack_f
+# define unpack_d __unpack_f
+# define __fpcmp_parts __fpcmp_parts_f
+ typedef USItype fractype;
+ typedef UHItype halffractype;
+ typedef SFtype FLO_type;
+ typedef SItype intfrac;
+
+#else
+# define PREFIXFPDP dp
+# define PREFIXSFDF df
+# define NGARDS 8L
+# define GARDROUND 0x7f
+# define GARDMASK 0xff
+# define GARDMSB 0x80
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define FRACBITS 52
+# define EXPMAX (0x7ff)
+# define QUIET_NAN 0x8000000000000LL
+# define FRAC_NBITS 64
+# define FRACHIGH 0x8000000000000000LL
+# define FRACHIGH2 0xc000000000000000LL
+# define pack_d __pack_d
+# define unpack_d __unpack_d
+# define __fpcmp_parts __fpcmp_parts_d
+ typedef UDItype fractype;
+ typedef USItype halffractype;
+ typedef DFtype FLO_type;
+ typedef DItype intfrac;
+#endif /* FLOAT */
+
+#ifdef US_SOFTWARE_GOFAST
+# ifdef TFLOAT
+# error "GOFAST TFmode not supported"
+# elif defined FLOAT
+# define add fpadd
+# define sub fpsub
+# define multiply fpmul
+# define divide fpdiv
+# define compare fpcmp
+# define _unord_f2 __unordsf2
+# define usi_to_float __floatunsisf
+# define si_to_float sitofp
+# define float_to_si fptosi
+# define float_to_usi fptoui
+# define negate __negsf2
+# define sf_to_df fptodp
+# define sf_to_tf __extendsftf2
+# else
+# define add dpadd
+# define sub dpsub
+# define multiply dpmul
+# define divide dpdiv
+# define compare dpcmp
+# define _unord_f2 __unorddf2
+# define usi_to_float __floatunsidf
+# define si_to_float litodp
+# define float_to_si dptoli
+# define float_to_usi dptoul
+# define negate __negdf2
+# define df_to_sf dptofp
+# define df_to_tf __extenddftf2
+# endif /* FLOAT */
+#else
+# ifdef TFLOAT
+# define add __addtf3
+# define sub __subtf3
+# define multiply __multf3
+# define divide __divtf3
+# define compare __cmptf2
+# define _eq_f2 __eqtf2
+# define _ne_f2 __netf2
+# define _gt_f2 __gttf2
+# define _ge_f2 __getf2
+# define _lt_f2 __lttf2
+# define _le_f2 __letf2
+# define _unord_f2 __unordtf2
+# define usi_to_float __floatunsitf
+# define si_to_float __floatsitf
+# define float_to_si __fixtfsi
+# define float_to_usi __fixunstfsi
+# define negate __negtf2
+# define tf_to_sf __trunctfsf2
+# define tf_to_df __trunctfdf2
+# elif defined FLOAT
+# define add __addsf3
+# define sub __subsf3
+# define multiply __mulsf3
+# define divide __divsf3
+# define compare __cmpsf2
+# define _eq_f2 __eqsf2
+# define _ne_f2 __nesf2
+# define _gt_f2 __gtsf2
+# define _ge_f2 __gesf2
+# define _lt_f2 __ltsf2
+# define _le_f2 __lesf2
+# define _unord_f2 __unordsf2
+# define usi_to_float __floatunsisf
+# define si_to_float __floatsisf
+# define float_to_si __fixsfsi
+# define float_to_usi __fixunssfsi
+# define negate __negsf2
+# define sf_to_df __extendsfdf2
+# define sf_to_tf __extendsftf2
+# else
+# define add __adddf3
+# define sub __subdf3
+# define multiply __muldf3
+# define divide __divdf3
+# define compare __cmpdf2
+# define _eq_f2 __eqdf2
+# define _ne_f2 __nedf2
+# define _gt_f2 __gtdf2
+# define _ge_f2 __gedf2
+# define _lt_f2 __ltdf2
+# define _le_f2 __ledf2
+# define _unord_f2 __unorddf2
+# define usi_to_float __floatunsidf
+# define si_to_float __floatsidf
+# define float_to_si __fixdfsi
+# define float_to_usi __fixunsdfsi
+# define negate __negdf2
+# define df_to_sf __truncdfsf2
+# define df_to_tf __extenddftf2
+# endif /* FLOAT */
+#endif /* US_SOFTWARE_GOFAST */
+
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+
+/* Preserve the sticky-bit when shifting fractions to the right. */
+#define LSHIFT(a) { a = (a & 1) | (a >> 1); }
+
+/* numeric parameters */
+/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa
+ of a float and of a double. Assumes there are only two float types.
+ (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS))
+ */
+#define F_D_BITOFF (52+8-(23+7))
+
+#ifdef TMODES
+# define F_T_BITOFF (__LDBL_MANT_DIG__-1+10-(23+7))
+# define D_T_BITOFF (__LDBL_MANT_DIG__-1+10-(52+8))
+#endif
+
+
+#define NORMAL_EXPMIN (-(EXPBIAS)+1)
+#define IMPLICIT_1 ((fractype)1<<(FRACBITS+NGARDS))
+#define IMPLICIT_2 ((fractype)1<<(FRACBITS+1+NGARDS))
+
+/* common types */
+
+typedef enum
+{
+ CLASS_SNAN,
+ CLASS_QNAN,
+ CLASS_ZERO,
+ CLASS_NUMBER,
+ CLASS_INFINITY
+} fp_class_type;
+
+typedef struct
+{
+#ifdef SMALL_MACHINE
+ char class;
+ unsigned char sign;
+ short normal_exp;
+#else
+ fp_class_type class;
+ unsigned int sign;
+ int normal_exp;
+#endif
+
+ union
+ {
+ fractype ll;
+ halffractype l[2];
+ } fraction;
+} fp_number_type;
+
+typedef union
+{
+ FLO_type value;
+ fractype value_raw;
+
+#ifndef FLOAT
+# ifdef qrtrfractype
+ qrtrfractype qwords[4];
+# else
+ halffractype words[2];
+# endif
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits;
+#endif
+
+#ifdef _DEBUG_BITFLOAT
+ struct
+ {
+ unsigned int sign:1 __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ }
+ bits_big_endian;
+
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits_little_endian;
+#endif
+}
+FLO_union_type;
+
+/* Prototypes */
+
+#if defined(L_pack_df) || defined(L_pack_sf) || defined(L_pack_tf)
+extern FLO_type pack_d (fp_number_type *);
+#endif
+
+extern void unpack_d (FLO_union_type *, fp_number_type *);
+
+#if defined(L_addsub_sf) || defined(L_addsub_df) || defined(L_addsub_tf)
+extern FLO_type add (FLO_type, FLO_type);
+extern FLO_type sub (FLO_type, FLO_type);
+#endif
+
+#if defined(L_mul_sf) || defined(L_mul_df) || defined(L_mul_tf)
+extern FLO_type multiply (FLO_type, FLO_type);
+#endif
+
+#if defined(L_div_sf) || defined(L_div_df) || defined(L_div_tf)
+extern FLO_type divide (FLO_type, FLO_type);
+#endif
+
+extern int __fpcmp_parts (fp_number_type *, fp_number_type *);
+
+#if defined(L_compare_sf) || defined(L_compare_df) || defined(L_compare_tf)
+extern CMPtype compare (FLO_type, FLO_type);
+#endif
+
+#ifndef US_SOFTWARE_GOFAST
+
+#if defined(L_eq_sf) || defined(L_eq_df) || defined(L_eq_tf)
+extern CMPtype _eq_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_ne_sf) || defined(L_ne_df) || defined(L_ne_tf)
+extern CMPtype _ne_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_gt_sf) || defined(L_gt_df) || defined(L_gt_tf)
+extern CMPtype _gt_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_ge_sf) || defined(L_ge_df) || defined(L_ge_tf)
+extern CMPtype _ge_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_lt_sf) || defined(L_lt_df) || defined(L_lt_tf)
+extern CMPtype _lt_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_le_sf) || defined(L_le_df) || defined(L_le_tf)
+extern CMPtype _le_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_unord_sf) || defined(L_unord_df) || defined(L_unord_tf)
+extern CMPtype _unord_f2 (FLO_type, FLO_type);
+#endif
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df) || defined(L_si_to_tf)
+extern FLO_type si_to_float (SItype);
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si) || defined(L_tf_to_si)
+extern SItype float_to_si (FLO_type);
+#endif
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi) || defined(L_tf_to_usi)
+#if defined(US_SOFTWARE_GOFAST) || defined(L_tf_to_usi)
+extern USItype float_to_usi (FLO_type);
+#endif
+#endif
+
+#if defined(L_usi_to_sf) || defined(L_usi_to_df) || defined(L_usi_to_tf)
+extern FLO_type usi_to_float (USItype);
+#endif
+
+#if defined(L_negate_sf) || defined(L_negate_df) || defined(L_negate_tf)
+extern FLO_type negate (FLO_type);
+#endif
+
+#ifdef FLOAT
+#if defined(L_make_sf)
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+#endif
+#ifndef FLOAT_ONLY
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+#if defined(L_sf_to_df)
+extern DFtype sf_to_df (SFtype);
+#endif
+#if defined(L_sf_to_tf) && defined(TMODES)
+extern TFtype sf_to_tf (SFtype);
+#endif
+#endif /* ! FLOAT_ONLY */
+#endif /* FLOAT */
+
+#ifndef FLOAT
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+#if defined(L_make_df)
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+#endif
+#if defined(L_df_to_sf)
+extern SFtype df_to_sf (DFtype);
+#endif
+#if defined(L_df_to_tf) && defined(TMODES)
+extern TFtype df_to_tf (DFtype);
+#endif
+#endif /* ! FLOAT */
+
+#ifdef TMODES
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+extern TFtype __make_tp (fp_class_type, unsigned int, int, UTItype);
+#ifdef TFLOAT
+#if defined(L_tf_to_sf)
+extern SFtype tf_to_sf (TFtype);
+#endif
+#if defined(L_tf_to_df)
+extern DFtype tf_to_df (TFtype);
+#endif
+#if defined(L_di_to_tf)
+extern TFtype di_to_df (DItype);
+#endif
+#endif /* TFLOAT */
+#endif /* TMODES */
+
+#endif /* ! GCC_FP_BIT_H */
Index: linux-3.18.21/arch/mips/softfloat/libgcc2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/libgcc2.c 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,2036 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+/* It is incorrect to include config.h here, because this file is being
+ compiled for the target, and hence definitions concerning only the host
+ do not apply. */
+//#include <linux/config.h>
+//#include <linux/slab.h>
+//#include <linux/module.h>
+//#ifdef CONFIG_KMOD
+//#include <linux/kmod.h>
+//#endif
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "tconfig.h"
+//#include "tsystem.h"
+#include "stddef.h"
+#include "float.h"
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+#define abort() printk("libgcc2 abort.")
+
+#include "libgcc2.h"
+
+#ifdef DECLARE_LIBRARY_RENAMES
+ DECLARE_LIBRARY_RENAMES
+#endif
+
+#if defined (L_negdi2)
+DWtype
+__negdi2 (DWtype u)
+{
+ DWunion w;
+ DWunion uu;
+
+ uu.ll = u;
+
+ w.s.low = -uu.s.low;
+ w.s.high = -uu.s.high - ((UWtype) w.s.low > 0);
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_addvsi3
+Wtype
+__addvsi3 (Wtype a, Wtype b)
+{
+ Wtype w;
+
+ w = a + b;
+
+ if (b >= 0 ? w < a : w > a)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_addvdi3
+DWtype
+__addvdi3 (DWtype a, DWtype b)
+{
+ DWtype w;
+
+ w = a + b;
+
+ if (b >= 0 ? w < a : w > a)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_subvsi3
+Wtype
+__subvsi3 (Wtype a, Wtype b)
+{
+#ifdef L_addvsi3
+ return __addvsi3 (a, (-b));
+#else
+ DWtype w;
+
+ w = a - b;
+
+ if (b >= 0 ? w > a : w < a)
+ abort ();
+
+ return w;
+#endif
+}
+#endif
+
+#ifdef L_subvdi3
+DWtype
+__subvdi3 (DWtype a, DWtype b)
+{
+#ifdef L_addvdi3
+ return (a, (-b));
+#else
+ DWtype w;
+
+ w = a - b;
+
+ if (b >= 0 ? w > a : w < a)
+ abort ();
+
+ return w;
+#endif
+}
+#endif
+
+#ifdef L_mulvsi3
+Wtype
+__mulvsi3 (Wtype a, Wtype b)
+{
+ DWtype w;
+
+ w = a * b;
+
+ if (((a >= 0) == (b >= 0)) ? w < 0 : w > 0)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_negvsi2
+Wtype
+__negvsi2 (Wtype a)
+{
+ Wtype w;
+
+ w = -a;
+
+ if (a >= 0 ? w > 0 : w < 0)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_negvdi2
+DWtype
+__negvdi2 (DWtype a)
+{
+ DWtype w;
+
+ w = -a;
+
+ if (a >= 0 ? w > 0 : w < 0)
+ abort ();
+
+ return w;
+}
+#endif
+
+#ifdef L_absvsi2
+Wtype
+__absvsi2 (Wtype a)
+{
+ Wtype w = a;
+
+ if (a < 0)
+#ifdef L_negvsi2
+ w = __negvsi2 (a);
+#else
+ w = -a;
+
+ if (w < 0)
+ abort ();
+#endif
+
+ return w;
+}
+#endif
+
+#ifdef L_absvdi2
+DWtype
+__absvdi2 (DWtype a)
+{
+ DWtype w = a;
+
+ if (a < 0)
+#ifdef L_negvsi2
+ w = __negvsi2 (a);
+#else
+ w = -a;
+
+ if (w < 0)
+ abort ();
+#endif
+
+ return w;
+}
+#endif
+
+#ifdef L_mulvdi3
+DWtype
+__mulvdi3 (DWtype u, DWtype v)
+{
+ DWtype w;
+
+ w = u * v;
+
+ if (((u >= 0) == (v >= 0)) ? w < 0 : w > 0)
+ abort ();
+
+ return w;
+}
+#endif
+
+
+/* Unless shift functions are defined whith full ANSI prototypes,
+ parameter b will be promoted to int if word_type is smaller than an int. */
+#ifdef L_lshrdi3
+DWtype
+__lshrdi3 (DWtype u, word_type b)
+{
+ DWunion w;
+ word_type bm;
+ DWunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.high = 0;
+ w.s.low = (UWtype) uu.s.high >> -bm;
+ }
+ else
+ {
+ UWtype carries = (UWtype) uu.s.high << bm;
+
+ w.s.high = (UWtype) uu.s.high >> b;
+ w.s.low = ((UWtype) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashldi3
+DWtype
+__ashldi3 (DWtype u, word_type b)
+{
+ DWunion w;
+ word_type bm;
+ DWunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (UWtype) uu.s.low << -bm;
+ }
+ else
+ {
+ UWtype carries = (UWtype) uu.s.low >> bm;
+
+ w.s.low = (UWtype) uu.s.low << b;
+ w.s.high = ((UWtype) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashrdi3
+DWtype
+__ashrdi3 (DWtype u, word_type b)
+{
+ DWunion w;
+ word_type bm;
+ DWunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (Wtype) * BITS_PER_UNIT) - b;
+ if (bm <= 0)
+ {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high = uu.s.high >> (sizeof (Wtype) * BITS_PER_UNIT - 1);
+ w.s.low = uu.s.high >> -bm;
+ }
+ else
+ {
+ UWtype carries = (UWtype) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((UWtype) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ffsdi2
+DWtype
+__ffsdi2 (DWtype u)
+{
+ DWunion uu;
+ UWtype word, count, add;
+
+ uu.ll = u;
+ if (uu.s.low != 0)
+ word = uu.s.low, add = 0;
+ else if (uu.s.high != 0)
+ word = uu.s.high, add = BITS_PER_UNIT * sizeof (Wtype);
+ else
+ return 0;
+
+ count_trailing_zeros (count, word);
+ return count + add + 1;
+}
+#endif
+
+#ifdef L_muldi3
+DWtype
+__muldi3 (DWtype u, DWtype v)
+{
+ DWunion w;
+ DWunion uu, vv;
+
+ uu.ll = u,
+ vv.ll = v;
+
+ w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+ w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+ + (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+ return w.ll;
+}
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#if defined (sdiv_qrnnd)
+#define L_udiv_w_sdiv
+#endif
+#endif
+
+#ifdef L_udiv_w_sdiv
+#if defined (sdiv_qrnnd)
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline __attribute__ ((__always_inline__))
+#endif
+UWtype
+__udiv_w_sdiv (UWtype *rp, UWtype a1, UWtype a0, UWtype d)
+{
+ UWtype q, r;
+ UWtype c0, c1, b1;
+
+ if ((Wtype) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (W_TYPE_SIZE - 1)))
+ {
+ /* dividend, divisor, and quotient are nonnegative */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (W_TYPE_SIZE - 1));
+ /* Divide (c1*2^32 + c0) by d */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient */
+ q += (UWtype) 1 << (W_TYPE_SIZE - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (W_TYPE_SIZE - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
+#else
+/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
+UWtype
+__udiv_w_sdiv (UWtype *rp __attribute__ ((__unused__)),
+ UWtype a1 __attribute__ ((__unused__)),
+ UWtype a0 __attribute__ ((__unused__)),
+ UWtype d __attribute__ ((__unused__)))
+{
+ return 0;
+}
+#endif
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#define L_udivmoddi4
+#endif
+
+#ifdef L_clz
+const UQItype __clz_tab[] =
+{
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+#endif
+
+#ifdef L_udivmoddi4
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline __attribute__ ((__always_inline__))
+#endif
+UDWtype
+__udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
+{
+ DWunion ww;
+ DWunion nn, dd;
+ DWunion rr;
+ UWtype d0, d1, n0, n1, n2;
+ UWtype q0, q1;
+ UWtype b, bm;
+
+ nn.ll = n;
+ dd.ll = d;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of W_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ UWtype m1, m0;
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+ return ww.ll;
+}
+#endif
+
+#ifdef L_divdi3
+DWtype
+__divdi3 (DWtype u, DWtype v)
+{
+ word_type c = 0;
+ DWunion uu, vv;
+ DWtype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = -uu.ll;
+ if (vv.s.high < 0)
+ c = ~c,
+ vv.ll = -vv.ll;
+
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
+ if (c)
+ w = -w;
+
+ return w;
+}
+#endif
+
+#ifdef L_moddi3
+DWtype
+__moddi3 (DWtype u, DWtype v)
+{
+ word_type c = 0;
+ DWunion uu, vv;
+ DWtype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = -uu.ll;
+ if (vv.s.high < 0)
+ vv.ll = -vv.ll;
+
+ (void) __udivmoddi4 (uu.ll, vv.ll, &w);
+ if (c)
+ w = -w;
+
+ return w;
+}
+#endif
+
+#ifdef L_umoddi3
+UDWtype
+__umoddi3 (UDWtype u, UDWtype v)
+{
+ UDWtype w;
+
+ (void) __udivmoddi4 (u, v, &w);
+
+ return w;
+}
+#endif
+
+#ifdef L_udivdi3
+UDWtype
+__udivdi3 (UDWtype n, UDWtype d)
+{
+ return __udivmoddi4 (n, d, (UDWtype *) 0);
+}
+#endif
+
+#ifdef L_cmpdi2
+word_type
+__cmpdi2 (DWtype a, DWtype b)
+{
+ DWunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+ if ((UWtype) au.s.low < (UWtype) bu.s.low)
+ return 0;
+ else if ((UWtype) au.s.low > (UWtype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_ucmpdi2
+word_type
+__ucmpdi2 (DWtype a, DWtype b)
+{
+ DWunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if ((UWtype) au.s.high < (UWtype) bu.s.high)
+ return 0;
+ else if ((UWtype) au.s.high > (UWtype) bu.s.high)
+ return 2;
+ if ((UWtype) au.s.low < (UWtype) bu.s.low)
+ return 0;
+ else if ((UWtype) au.s.low > (UWtype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DWtype
+__fixunstfDI (TFtype a)
+{
+ TFtype b;
+ UDWtype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ v = (UWtype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype)v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+DWtype
+__fixtfdi (TFtype a)
+{
+ if (a < 0)
+ return - __fixunstfDI (-a);
+ return __fixunstfDI (a);
+}
+#endif
+
+#if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DWtype
+__fixunsxfDI (XFtype a)
+{
+ XFtype b;
+ UDWtype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ v = (UWtype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the XFtype, leaving the low part as flonum. */
+ a -= (XFtype)v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+DWtype
+__fixxfdi (XFtype a)
+{
+ if (a < 0)
+ return - __fixunsxfDI (-a);
+ return __fixunsxfDI (a);
+}
+#endif
+
+#ifdef L_fixunsdfdi
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DWtype
+__fixunsdfDI (DFtype a)
+{
+ DFtype b;
+ UDWtype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ v = (UWtype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixdfdi
+DWtype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - __fixunsdfDI (-a);
+ return __fixunsdfDI (a);
+}
+#endif
+
+#ifdef L_fixunssfdi
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DWtype
+__fixunssfDI (SFtype original_a)
+{
+ /* Convert the SFtype to a DFtype, because that is surely not going
+ to lose any bits. Some day someone else can write a faster version
+ that avoids converting to DFtype, and verify it really works right. */
+ DFtype a = original_a;
+ DFtype b;
+ UDWtype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DWtype!),
+ and shift it into the high word. */
+ v = (UWtype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype) v;
+ /* Convert that to fixed (but not to DWtype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (UWtype) (- a);
+ else
+ v += (UWtype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixsfdi
+DWtype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - __fixunssfDI (-a);
+ return __fixunssfDI (a);
+}
+#endif
+
+#if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+XFtype
+__floatdixf (DWtype u)
+{
+ XFtype d;
+
+ d = (Wtype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+TFtype
+__floatditf (DWtype u)
+{
+ TFtype d;
+
+ d = (Wtype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatdidf
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DFtype
+__floatdidf (DWtype u)
+{
+ DFtype d;
+
+ d = (Wtype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatundidf
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+DFtype
+__floatundidf (UDWtype u)
+{
+ DFtype d;
+
+ d = (UWtype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (UWtype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatdisf
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+#define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
+#define DF_SIZE DBL_MANT_DIG
+#define SF_SIZE FLT_MANT_DIG
+
+SFtype
+__floatdisf (DWtype u)
+{
+ /* Do the calculation in DFmode
+ so that we don't lose any of the precision of the high word
+ while multiplying it. */
+ DFtype f;
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated in DFmode,
+ by a bit that won't be lost. The bit can go in anywhere below the
+ rounding position of the SFmode. A fixed mask and bit position
+ handles all usual configurations. It doesn't handle the case
+ of 128-bit DImode, however. */
+ if (DF_SIZE < DI_SIZE
+ && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
+ {
+#define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
+ if (! (- ((DWtype) 1 << DF_SIZE) < u
+ && u < ((DWtype) 1 << DF_SIZE)))
+ {
+ if ((UDWtype) u & (REP_BIT - 1))
+ {
+ u &= ~ (REP_BIT - 1);
+ u |= REP_BIT;
+ }
+ }
+ }
+ f = (Wtype) (u >> WORD_SIZE);
+ f *= HIGH_HALFWORD_COEFF;
+ f *= HIGH_HALFWORD_COEFF;
+ f += (UWtype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (SFtype) f;
+}
+#endif
+
+#if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+UWtype
+__fixunsxfSI (XFtype a)
+{
+ if (a >= - (DFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+#endif
+
+#ifdef L_fixunsdfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+//#include <limits.h>
+
+UWtype
+__fixunsdfSI (DFtype a)
+{
+ if (a >= - (DFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+EXPORT_SYMBOL(__fixunsdfSI);
+#endif
+
+#ifdef L_fixunssfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+//#include <limits.h>
+
+UWtype
+__fixunssfSI (SFtype a)
+{
+ if (a >= - (SFtype) Wtype_MIN)
+ return (Wtype) (a + Wtype_MIN) - Wtype_MIN;
+ return (Wtype) a;
+}
+EXPORT_SYMBOL(__fixunssfSI);
+#endif
+
+/* From here on down, the routines use normal data types. */
+
+#define SItype bogus_type
+#define USItype bogus_type
+#define DItype bogus_type
+#define UDItype bogus_type
+#define SFtype bogus_type
+#define DFtype bogus_type
+#undef Wtype
+#undef UWtype
+#undef HWtype
+#undef UHWtype
+#undef DWtype
+#undef UDWtype
+
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+
+#ifdef L_divdi3
+EXPORT_SYMBOL(__divdi3);
+#endif
+#ifdef L_moddi3
+EXPORT_SYMBOL(__moddi3);
+#endif
+#ifdef L_udivdi3
+EXPORT_SYMBOL(__udivdi3);
+#endif
+#ifdef L__gcc_bcmp
+
+/* Like bcmp except the sign is meaningful.
+ Result is negative if S1 is less than S2,
+ positive if S1 is greater, 0 if S1 and S2 are equal. */
+
+int
+__gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
+{
+ while (size > 0)
+ {
+ unsigned char c1 = *s1++, c2 = *s2++;
+ if (c1 != c2)
+ return c1 - c2;
+ size--;
+ }
+ return 0;
+}
+
+#endif
+
+/* __eprintf used to be used by GCC's private version of <assert.h>.
+ We no longer provide that header, but this routine remains in libgcc.a
+ for binary backward compatibility. Note that it is not included in
+ the shared version of libgcc. */
+#ifdef L_eprintf
+#ifndef inhibit_libc
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+
+void
+__eprintf (const char *string, const char *expression,
+ unsigned int line, const char *filename)
+{
+ fprintf (stderr, string, expression, line, filename);
+ fflush (stderr);
+ abort ();
+}
+
+#endif
+#endif
+
+#ifdef L_bb
+
+struct bb_function_info {
+ long checksum;
+ int arc_count;
+ const char *name;
+};
+
+/* Structure emitted by --profile-arcs */
+struct bb
+{
+ long zero_word;
+ const char *filename;
+ gcov_type *counts;
+ long ncounts;
+ struct bb *next;
+
+ /* Older GCC's did not emit these fields. */
+ long sizeof_bb;
+ struct bb_function_info *function_infos;
+};
+
+#ifndef inhibit_libc
+
+/* Arc profile dumper. Requires atexit and stdio. */
+
+#undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
+#include <stdio.h>
+
+#include "gcov-io.h"
+#include <string.h>
+#ifdef TARGET_HAS_F_SETLKW
+#include <fcntl.h>
+#include <errno.h>
+#endif
+
+/* Chain of per-object file bb structures. */
+static struct bb *bb_head;
+
+/* Dump the coverage counts. We merge with existing counts when
+ possible, to avoid growing the .da files ad infinitum. */
+
+void
+__bb_exit_func (void)
+{
+ struct bb *ptr;
+ int i;
+ gcov_type program_sum = 0;
+ gcov_type program_max = 0;
+ long program_arcs = 0;
+ gcov_type merged_sum = 0;
+ gcov_type merged_max = 0;
+ long merged_arcs = 0;
+
+#if defined (TARGET_HAS_F_SETLKW)
+ struct flock s_flock;
+
+ s_flock.l_type = F_WRLCK;
+ s_flock.l_whence = SEEK_SET;
+ s_flock.l_start = 0;
+ s_flock.l_len = 0; /* Until EOF. */
+ s_flock.l_pid = getpid ();
+#endif
+
+ /* Non-merged stats for this program. */
+ for (ptr = bb_head; ptr; ptr = ptr->next)
+ {
+ for (i = 0; i < ptr->ncounts; i++)
+ {
+ program_sum += ptr->counts[i];
+
+ if (ptr->counts[i] > program_max)
+ program_max = ptr->counts[i];
+ }
+ program_arcs += ptr->ncounts;
+ }
+
+ for (ptr = bb_head; ptr; ptr = ptr->next)
+ {
+ FILE *da_file;
+ gcov_type object_max = 0;
+ gcov_type object_sum = 0;
+ long object_functions = 0;
+ int merging = 0;
+ int error = 0;
+ struct bb_function_info *fn_info;
+ gcov_type *count_ptr;
+
+ /* Open for modification */
+ da_file = fopen (ptr->filename, "r+b");
+
+ if (da_file)
+ merging = 1;
+ else
+ {
+ /* Try for appending */
+ da_file = fopen (ptr->filename, "ab");
+ /* Some old systems might not allow the 'b' mode modifier.
+ Therefore, try to open without it. This can lead to a
+ race condition so that when you delete and re-create the
+ file, the file might be opened in text mode, but then,
+ you shouldn't delete the file in the first place. */
+ if (!da_file)
+ da_file = fopen (ptr->filename, "a");
+ }
+
+ if (!da_file)
+ {
+ fprintf (stderr, "arc profiling: Can't open output file %s.\n",
+ ptr->filename);
+ ptr->filename = 0;
+ continue;
+ }
+
+#if defined (TARGET_HAS_F_SETLKW)
+ /* After a fork, another process might try to read and/or write
+ the same file simultanously. So if we can, lock the file to
+ avoid race conditions. */
+ while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
+ && errno == EINTR)
+ continue;
+#endif
+ for (fn_info = ptr->function_infos; fn_info->arc_count != -1; fn_info++)
+ object_functions++;
+
+ if (merging)
+ {
+ /* Merge data from file. */
+ long tmp_long;
+ gcov_type tmp_gcov;
+
+ if (/* magic */
+ (__read_long (&tmp_long, da_file, 4) || tmp_long != -123l)
+ /* functions in object file. */
+ || (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != object_functions)
+ /* extension block, skipped */
+ || (__read_long (&tmp_long, da_file, 4)
+ || fseek (da_file, tmp_long, SEEK_CUR)))
+ {
+ read_error:;
+ fprintf (stderr, "arc profiling: Error merging output file %s.\n",
+ ptr->filename);
+ clearerr (da_file);
+ }
+ else
+ {
+ /* Merge execution counts for each function. */
+ count_ptr = ptr->counts;
+
+ for (fn_info = ptr->function_infos; fn_info->arc_count != -1;
+ fn_info++)
+ {
+ if (/* function name delim */
+ (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != -1)
+ /* function name length */
+ || (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != (long) strlen (fn_info->name))
+ /* skip string */
+ || fseek (da_file, ((tmp_long + 1) + 3) & ~3, SEEK_CUR)
+ /* function name delim */
+ || (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != -1))
+ goto read_error;
+
+ if (/* function checksum */
+ (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != fn_info->checksum)
+ /* arc count */
+ || (__read_long (&tmp_long, da_file, 4)
+ || tmp_long != fn_info->arc_count))
+ goto read_error;
+
+ for (i = fn_info->arc_count; i > 0; i--, count_ptr++)
+ if (__read_gcov_type (&tmp_gcov, da_file, 8))
+ goto read_error;
+ else
+ *count_ptr += tmp_gcov;
+ }
+ }
+ fseek (da_file, 0, SEEK_SET);
+ }
+
+ /* Calculate the per-object statistics. */
+ for (i = 0; i < ptr->ncounts; i++)
+ {
+ object_sum += ptr->counts[i];
+
+ if (ptr->counts[i] > object_max)
+ object_max = ptr->counts[i];
+ }
+ merged_sum += object_sum;
+ if (merged_max < object_max)
+ merged_max = object_max;
+ merged_arcs += ptr->ncounts;
+
+ /* Write out the data. */
+ if (/* magic */
+ __write_long (-123, da_file, 4)
+ /* number of functions in object file. */
+ || __write_long (object_functions, da_file, 4)
+ /* length of extra data in bytes. */
+ || __write_long ((4 + 8 + 8) + (4 + 8 + 8), da_file, 4)
+
+ /* whole program statistics. If merging write per-object
+ now, rewrite later */
+ /* number of instrumented arcs. */
+ || __write_long (merging ? ptr->ncounts : program_arcs, da_file, 4)
+ /* sum of counters. */
+ || __write_gcov_type (merging ? object_sum : program_sum, da_file, 8)
+ /* maximal counter. */
+ || __write_gcov_type (merging ? object_max : program_max, da_file, 8)
+
+ /* per-object statistics. */
+ /* number of counters. */
+ || __write_long (ptr->ncounts, da_file, 4)
+ /* sum of counters. */
+ || __write_gcov_type (object_sum, da_file, 8)
+ /* maximal counter. */
+ || __write_gcov_type (object_max, da_file, 8))
+ {
+ write_error:;
+ fprintf (stderr, "arc profiling: Error writing output file %s.\n",
+ ptr->filename);
+ error = 1;
+ }
+ else
+ {
+ /* Write execution counts for each function. */
+ count_ptr = ptr->counts;
+
+ for (fn_info = ptr->function_infos; fn_info->arc_count != -1;
+ fn_info++)
+ {
+ if (__write_gcov_string (fn_info->name,
+ strlen (fn_info->name), da_file, -1)
+ || __write_long (fn_info->checksum, da_file, 4)
+ || __write_long (fn_info->arc_count, da_file, 4))
+ goto write_error;
+
+ for (i = fn_info->arc_count; i > 0; i--, count_ptr++)
+ if (__write_gcov_type (*count_ptr, da_file, 8))
+ goto write_error; /* RIP Edsger Dijkstra */
+ }
+ }
+
+ if (fclose (da_file))
+ {
+ fprintf (stderr, "arc profiling: Error closing output file %s.\n",
+ ptr->filename);
+ error = 1;
+ }
+ if (error || !merging)
+ ptr->filename = 0;
+ }
+
+ /* Upate whole program statistics. */
+ for (ptr = bb_head; ptr; ptr = ptr->next)
+ if (ptr->filename)
+ {
+ FILE *da_file;
+
+ da_file = fopen (ptr->filename, "r+b");
+ if (!da_file)
+ {
+ fprintf (stderr, "arc profiling: Cannot reopen %s.\n",
+ ptr->filename);
+ continue;
+ }
+
+#if defined (TARGET_HAS_F_SETLKW)
+ while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
+ && errno == EINTR)
+ continue;
+#endif
+
+ if (fseek (da_file, 4 * 3, SEEK_SET)
+ /* number of instrumented arcs. */
+ || __write_long (merged_arcs, da_file, 4)
+ /* sum of counters. */
+ || __write_gcov_type (merged_sum, da_file, 8)
+ /* maximal counter. */
+ || __write_gcov_type (merged_max, da_file, 8))
+ fprintf (stderr, "arc profiling: Error updating program header %s.\n",
+ ptr->filename);
+ if (fclose (da_file))
+ fprintf (stderr, "arc profiling: Error reclosing %s\n",
+ ptr->filename);
+ }
+}
+
+/* Add a new object file onto the bb chain. Invoked automatically
+ when running an object file's global ctors. */
+
+void
+__bb_init_func (struct bb *blocks)
+{
+ if (blocks->zero_word)
+ return;
+
+ /* Initialize destructor and per-thread data. */
+ if (!bb_head)
+ atexit (__bb_exit_func);
+
+ /* Set up linked list. */
+ blocks->zero_word = 1;
+ blocks->next = bb_head;
+ bb_head = blocks;
+}
+
+/* Called before fork or exec - write out profile information gathered so
+ far and reset it to zero. This avoids duplication or loss of the
+ profile information gathered so far. */
+
+void
+__bb_fork_func (void)
+{
+ struct bb *ptr;
+
+ __bb_exit_func ();
+ for (ptr = bb_head; ptr != (struct bb *) 0; ptr = ptr->next)
+ {
+ long i;
+ for (i = ptr->ncounts - 1; i >= 0; i--)
+ ptr->counts[i] = 0;
+ }
+}
+
+#endif /* not inhibit_libc */
+#endif /* L_bb */
+
+#ifdef L_clear_cache
+/* Clear part of an instruction cache. */
+
+#define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
+
+void
+__clear_cache (char *beg __attribute__((__unused__)),
+ char *end __attribute__((__unused__)))
+{
+#ifdef CLEAR_INSN_CACHE
+ CLEAR_INSN_CACHE (beg, end);
+#else
+#ifdef INSN_CACHE_SIZE
+ static char array[INSN_CACHE_SIZE + INSN_CACHE_PLANE_SIZE + INSN_CACHE_LINE_WIDTH];
+ static int initialized;
+ int offset;
+ void *start_addr
+ void *end_addr;
+ typedef (*function_ptr) (void);
+
+#if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
+ /* It's cheaper to clear the whole cache.
+ Put in a series of jump instructions so that calling the beginning
+ of the cache will clear the whole thing. */
+
+ if (! initialized)
+ {
+ int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH);
+ int end_ptr = ptr + INSN_CACHE_SIZE;
+
+ while (ptr < end_ptr)
+ {
+ *(INSTRUCTION_TYPE *)ptr
+ = JUMP_AHEAD_INSTRUCTION + INSN_CACHE_LINE_WIDTH;
+ ptr += INSN_CACHE_LINE_WIDTH;
+ }
+ *(INSTRUCTION_TYPE *) (ptr - INSN_CACHE_LINE_WIDTH) = RETURN_INSTRUCTION;
+
+ initialized = 1;
+ }
+
+ /* Call the beginning of the sequence. */
+ (((function_ptr) (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH))
+ ());
+
+#else /* Cache is large. */
+
+ if (! initialized)
+ {
+ int ptr = (((int) array + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH);
+
+ while (ptr < (int) array + sizeof array)
+ {
+ *(INSTRUCTION_TYPE *)ptr = RETURN_INSTRUCTION;
+ ptr += INSN_CACHE_LINE_WIDTH;
+ }
+
+ initialized = 1;
+ }
+
+ /* Find the location in array that occupies the same cache line as BEG. */
+
+ offset = ((int) beg & -INSN_CACHE_LINE_WIDTH) & (INSN_CACHE_PLANE_SIZE - 1);
+ start_addr = (((int) (array + INSN_CACHE_PLANE_SIZE - 1)
+ & -INSN_CACHE_PLANE_SIZE)
+ + offset);
+
+ /* Compute the cache alignment of the place to stop clearing. */
+#if 0 /* This is not needed for gcc's purposes. */
+ /* If the block to clear is bigger than a cache plane,
+ we clear the entire cache, and OFFSET is already correct. */
+ if (end < beg + INSN_CACHE_PLANE_SIZE)
+#endif
+ offset = (((int) (end + INSN_CACHE_LINE_WIDTH - 1)
+ & -INSN_CACHE_LINE_WIDTH)
+ & (INSN_CACHE_PLANE_SIZE - 1));
+
+#if INSN_CACHE_DEPTH > 1
+ end_addr = (start_addr & -INSN_CACHE_PLANE_SIZE) + offset;
+ if (end_addr <= start_addr)
+ end_addr += INSN_CACHE_PLANE_SIZE;
+
+ for (plane = 0; plane < INSN_CACHE_DEPTH; plane++)
+ {
+ int addr = start_addr + plane * INSN_CACHE_PLANE_SIZE;
+ int stop = end_addr + plane * INSN_CACHE_PLANE_SIZE;
+
+ while (addr != stop)
+ {
+ /* Call the return instruction at ADDR. */
+ ((function_ptr) addr) ();
+
+ addr += INSN_CACHE_LINE_WIDTH;
+ }
+ }
+#else /* just one plane */
+ do
+ {
+ /* Call the return instruction at START_ADDR. */
+ ((function_ptr) start_addr) ();
+
+ start_addr += INSN_CACHE_LINE_WIDTH;
+ }
+ while ((start_addr % INSN_CACHE_SIZE) != offset);
+#endif /* just one plane */
+#endif /* Cache is large */
+#endif /* Cache exists */
+#endif /* CLEAR_INSN_CACHE */
+}
+
+#endif /* L_clear_cache */
+
+#ifdef L_trampoline
+
+/* Jump to a trampoline, loading the static chain address. */
+
+#if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
+
+long
+getpagesize (void)
+{
+#ifdef _ALPHA_
+ return 8192;
+#else
+ return 4096;
+#endif
+}
+
+#ifdef __i386__
+extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
+#endif
+
+int
+mprotect (char *addr, int len, int prot)
+{
+ int np, op;
+
+ if (prot == 7)
+ np = 0x40;
+ else if (prot == 5)
+ np = 0x20;
+ else if (prot == 4)
+ np = 0x10;
+ else if (prot == 3)
+ np = 0x04;
+ else if (prot == 1)
+ np = 0x02;
+ else if (prot == 0)
+ np = 0x01;
+
+ if (VirtualProtect (addr, len, np, &op))
+ return 0;
+ else
+ return -1;
+}
+
+#endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
+
+#ifdef TRANSFER_FROM_TRAMPOLINE
+TRANSFER_FROM_TRAMPOLINE
+#endif
+
+#ifdef __sysV68__
+
+#include <sys/signal.h>
+#include <errno.h>
+
+/* Motorola forgot to put memctl.o in the libp version of libc881.a,
+ so define it here, because we need it in __clear_insn_cache below */
+/* On older versions of this OS, no memctl or MCT_TEXT are defined;
+ hence we enable this stuff only if MCT_TEXT is #define'd. */
+
+#ifdef MCT_TEXT
+asm("\n\
+ global memctl\n\
+memctl:\n\
+ movq &75,%d0\n\
+ trap &0\n\
+ bcc.b noerror\n\
+ jmp cerror%\n\
+noerror:\n\
+ movq &0,%d0\n\
+ rts");
+#endif
+
+/* Clear instruction cache so we can call trampolines on stack.
+ This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
+
+void
+__clear_insn_cache (void)
+{
+#ifdef MCT_TEXT
+ int save_errno;
+
+ /* Preserve errno, because users would be surprised to have
+ errno changing without explicitly calling any system-call. */
+ save_errno = errno;
+
+ /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
+ No need to use an address derived from _start or %sp, as 0 works also. */
+ memctl(0, 4096, MCT_TEXT);
+ errno = save_errno;
+#endif
+}
+
+#endif /* __sysV68__ */
+#endif /* L_trampoline */
+
+#ifndef __CYGWIN__
+#ifdef L__main
+
+#include "gbl-ctors.h"
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__main"
+#define SYMBOL__MAIN __main
+#endif
+
+#ifdef INIT_SECTION_ASM_OP
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+#endif
+
+#if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
+
+/* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
+ code to run constructors. In that case, we need to handle EH here, too. */
+
+#ifdef EH_FRAME_SECTION_NAME
+#include "unwind-dw2-fde.h"
+extern unsigned char __EH_FRAME_BEGIN__[];
+#endif
+
+/* Run all the global destructors on exit from the program. */
+
+void
+__do_global_dtors (void)
+{
+#ifdef DO_GLOBAL_DTORS_BODY
+ DO_GLOBAL_DTORS_BODY;
+#else
+ static func_ptr *p = __DTOR_LIST__ + 1;
+ while (*p)
+ {
+ p++;
+ (*(p-1)) ();
+ }
+#endif
+#if defined (EH_FRAME_SECTION_NAME) && !defined (HAS_INIT_SECTION)
+ {
+ static int completed = 0;
+ if (! completed)
+ {
+ completed = 1;
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+ }
+ }
+#endif
+}
+#endif
+
+#ifndef HAS_INIT_SECTION
+/* Run all the global constructors on entry to the program. */
+
+void
+__do_global_ctors (void)
+{
+#ifdef EH_FRAME_SECTION_NAME
+ {
+ static struct object object;
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+ }
+#endif
+ DO_GLOBAL_CTORS_BODY;
+ atexit (__do_global_dtors);
+}
+#endif /* no HAS_INIT_SECTION */
+
+#if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
+/* Subroutine called automatically by `main'.
+ Compiling a global function named `main'
+ produces an automatic call to this function at the beginning.
+
+ For many systems, this routine calls __do_global_ctors.
+ For systems which support a .init section we use the .init section
+ to run __do_global_ctors, so we need not do anything here. */
+
+void
+SYMBOL__MAIN ()
+{
+ /* Support recursive calls to `main': run initializers just once. */
+ static int initialized;
+ if (! initialized)
+ {
+ initialized = 1;
+ __do_global_ctors ();
+ }
+}
+#endif /* no HAS_INIT_SECTION or INVOKE__main */
+
+#endif /* L__main */
+#endif /* __CYGWIN__ */
+
+#ifdef L_ctors
+
+#include "gbl-ctors.h"
+
+/* Provide default definitions for the lists of constructors and
+ destructors, so that we don't get linker errors. These symbols are
+ intentionally bss symbols, so that gld and/or collect will provide
+ the right values. */
+
+/* We declare the lists here with two elements each,
+ so that they are valid empty lists if no other definition is loaded.
+
+ If we are using the old "set" extensions to have the gnu linker
+ collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
+ must be in the bss/common section.
+
+ Long term no port should use those extensions. But many still do. */
+#if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
+#if defined (TARGET_ASM_CONSTRUCTOR) || defined (USE_COLLECT2)
+func_ptr __CTOR_LIST__[2] = {0, 0};
+func_ptr __DTOR_LIST__[2] = {0, 0};
+#else
+func_ptr __CTOR_LIST__[2];
+func_ptr __DTOR_LIST__[2];
+#endif
+#endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
+#endif /* L_ctors */
+
+#ifdef L_exit
+
+#include "gbl-ctors.h"
+
+#ifdef NEED_ATEXIT
+
+#ifndef ON_EXIT
+
+# include <errno.h>
+
+static func_ptr *atexit_chain = 0;
+static long atexit_chain_length = 0;
+static volatile long last_atexit_chain_slot = -1;
+
+int
+atexit (func_ptr func)
+{
+ if (++last_atexit_chain_slot == atexit_chain_length)
+ {
+ atexit_chain_length += 32;
+ if (atexit_chain)
+ atexit_chain = (func_ptr *) realloc (atexit_chain, atexit_chain_length
+ * sizeof (func_ptr));
+ else
+ atexit_chain = (func_ptr *) malloc (atexit_chain_length
+ * sizeof (func_ptr));
+ if (! atexit_chain)
+ {
+ atexit_chain_length = 0;
+ last_atexit_chain_slot = -1;
+ errno = ENOMEM;
+ return (-1);
+ }
+ }
+ atexit_chain[last_atexit_chain_slot] = func;
+ return (0);
+}
+
+extern void _cleanup (void);
+extern void _exit (int) __attribute__ ((__noreturn__));
+
+void
+exit (int status)
+{
+ if (atexit_chain)
+ {
+ for ( ; last_atexit_chain_slot-- >= 0; )
+ {
+ (*atexit_chain[last_atexit_chain_slot + 1]) ();
+ atexit_chain[last_atexit_chain_slot + 1] = 0;
+ }
+ free (atexit_chain);
+ atexit_chain = 0;
+ }
+#ifdef EXIT_BODY
+ EXIT_BODY;
+#else
+ _cleanup ();
+#endif
+ _exit (status);
+}
+
+#else /* ON_EXIT */
+
+/* Simple; we just need a wrapper for ON_EXIT. */
+int
+atexit (func_ptr func)
+{
+ return ON_EXIT (func);
+}
+
+#endif /* ON_EXIT */
+#endif /* NEED_ATEXIT */
+
+#endif /* L_exit */
Index: linux-3.18.21/arch/mips/softfloat/libgcc2.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/libgcc2.h 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,314 @@
+/* Header file for libgcc2.c. */
+/* Copyright (C) 2000, 2001
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+
+#ifndef GCC_LIBGCC2_H
+#define GCC_LIBGCC2_H
+
+extern int __gcc_bcmp (const unsigned char *, const unsigned char *, size_t);
+extern void __clear_cache (char *, char *);
+extern void __eprintf (const char *, const char *, unsigned int, const char *)
+ __attribute__ ((__noreturn__));
+
+struct bb;
+extern void __bb_exit_func (void);
+extern void __bb_init_func (struct bb *);
+extern void __bb_fork_func (void);
+
+#if LONG_TYPE_SIZE == GCOV_TYPE_SIZE
+typedef long gcov_type;
+#else
+typedef long long gcov_type;
+#endif
+
+extern gcov_type *__bb_find_arc_counters (void);
+
+struct exception_descriptor;
+extern short int __get_eh_table_language (struct exception_descriptor *);
+extern short int __get_eh_table_version (struct exception_descriptor *);
+
+/* Permit the tm.h file to select the endianness to use just for this
+ file. This is used when the endianness is determined when the
+ compiler is run. */
+
+#ifndef LIBGCC2_WORDS_BIG_ENDIAN
+#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
+#endif
+
+#ifndef MIN_UNITS_PER_WORD
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#endif
+
+/* In the first part of this file, we are interfacing to calls generated
+ by the compiler itself. These calls pass values into these routines
+ which have very specific modes (rather than very specific types), and
+ these compiler-generated calls also expect any return values to have
+ very specific modes (rather than very specific types). Thus, we need
+ to avoid using regular C language type names in this part of the file
+ because the sizes for those types can be configured to be anything.
+ Instead we use the following special type names. */
+
+typedef int QItype __attribute__ ((mode (QI)));
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int HItype __attribute__ ((mode (HI)));
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+#if MIN_UNITS_PER_WORD > 1
+/* These typedefs are usually forbidden on dsp's with UNITS_PER_WORD 1 */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+#if LONG_LONG_TYPE_SIZE > 32
+/* These typedefs are usually forbidden on archs with UNITS_PER_WORD 2 */
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#if MIN_UNITS_PER_WORD > 4
+/* These typedefs are usually forbidden on archs with UNITS_PER_WORD 4 */
+typedef int TItype __attribute__ ((mode (TI)));
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#endif
+#endif
+#endif
+
+#if BITS_PER_UNIT == 8
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+typedef float XFtype __attribute__ ((mode (XF)));
+#endif
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
+typedef float TFtype __attribute__ ((mode (TF)));
+#endif
+
+#else /* BITS_PER_UNIT != 8 */
+
+/* On dsp's there are usually qf/hf/tqf modes used instead of the above.
+ For now we don't support them in libgcc2.c. */
+
+#undef L_fixdfdi
+#undef L_fixsfdi
+#undef L_fixtfdi
+#undef L_fixunsdfdi
+#undef L_fixunsdfsi
+#undef L_fixunssfdi
+#undef L_fixunssfsi
+#undef L_fixunstfdi
+#undef L_fixunsxfdi
+#undef L_fixunsxfsi
+#undef L_fixxfdi
+#undef L_floatdidf
+#undef L_floatdisf
+#undef L_floatditf
+#undef L_floatdixf
+
+#endif /* BITS_PER_UNIT != 8 */
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+/* Make sure that we don't accidentally use any normal C language built-in
+ type names in the first part of this file. Instead we want to use *only*
+ the type names defined above. The following macro definitions insure
+ that if we *do* accidentally use some normal C language built-in type name,
+ we will get a syntax error. */
+
+#define char bogus_type
+#define short bogus_type
+#define int bogus_type
+#define long bogus_type
+#define unsigned bogus_type
+#define float bogus_type
+#define double bogus_type
+
+#if MIN_UNITS_PER_WORD > 4
+#define W_TYPE_SIZE (8 * BITS_PER_UNIT)
+#define Wtype DItype
+#define UWtype UDItype
+#define HWtype DItype
+#define UHWtype UDItype
+#define DWtype TItype
+#define UDWtype UTItype
+#define __NW(a,b) __ ## a ## di ## b
+#define __NDW(a,b) __ ## a ## ti ## b
+#elif MIN_UNITS_PER_WORD > 2 \
+ || (MIN_UNITS_PER_WORD > 1 && LONG_LONG_TYPE_SIZE > 32)
+#define W_TYPE_SIZE (4 * BITS_PER_UNIT)
+#define Wtype SItype
+#define UWtype USItype
+#define HWtype SItype
+#define UHWtype USItype
+#define DWtype DItype
+#define UDWtype UDItype
+#define __NW(a,b) __ ## a ## si ## b
+#define __NDW(a,b) __ ## a ## di ## b
+#elif MIN_UNITS_PER_WORD > 1
+#define W_TYPE_SIZE (2 * BITS_PER_UNIT)
+#define Wtype HItype
+#define UWtype UHItype
+#define HWtype HItype
+#define UHWtype UHItype
+#define DWtype SItype
+#define UDWtype USItype
+#define __NW(a,b) __ ## a ## hi ## b
+#define __NDW(a,b) __ ## a ## si ## b
+#else
+#define W_TYPE_SIZE BITS_PER_UNIT
+#define Wtype QItype
+#define UWtype UQItype
+#define HWtype QItype
+#define UHWtype UQItype
+#define DWtype HItype
+#define UDWtype UHItype
+#define __NW(a,b) __ ## a ## qi ## b
+#define __NDW(a,b) __ ## a ## hi ## b
+#endif
+
+#define Wtype_MAX ((Wtype)(((UWtype)1 << (W_TYPE_SIZE - 1)) - 1))
+#define Wtype_MIN (- Wtype_MAX - 1)
+
+#define __muldi3 __NDW(mul,3)
+#define __divdi3 __NDW(div,3)
+#define __udivdi3 __NDW(udiv,3)
+#define __moddi3 __NDW(mod,3)
+#define __umoddi3 __NDW(umod,3)
+#define __negdi2 __NDW(neg,2)
+#define __lshrdi3 __NDW(lshr,3)
+#define __ashldi3 __NDW(ashl,3)
+#define __ashrdi3 __NDW(ashr,3)
+#define __ffsdi2 __NDW(ffs,2)
+#define __cmpdi2 __NDW(cmp,2)
+#define __ucmpdi2 __NDW(ucmp,2)
+#define __udivmoddi4 __NDW(udivmod,4)
+#define __fixunstfDI __NDW(fixunstf,)
+#define __fixtfdi __NDW(fixtf,)
+#define __fixunsxfDI __NDW(fixunsxf,)
+#define __fixxfdi __NDW(fixxf,)
+#define __fixunsdfDI __NDW(fixunsdf,)
+#define __fixdfdi __NDW(fixdf,)
+#define __fixunssfDI __NDW(fixunssf,)
+#define __fixsfdi __NDW(fixsf,)
+#define __floatdixf __NDW(float,xf)
+#define __floatditf __NDW(float,tf)
+#define __floatdidf __NDW(float,df)
+#define __floatdisf __NDW(float,sf)
+#define __fixunsxfSI __NW(fixunsxf,)
+#define __fixunstfSI __NW(fixunstf,)
+#define __fixunsdfSI __NW(fixunsdf,)
+#define __fixunssfSI __NW(fixunssf,)
+
+extern DWtype __muldi3 (DWtype, DWtype);
+extern DWtype __divdi3 (DWtype, DWtype);
+extern UDWtype __udivdi3 (UDWtype, UDWtype);
+extern UDWtype __umoddi3 (UDWtype, UDWtype);
+extern DWtype __moddi3 (DWtype, DWtype);
+
+/* __udivmoddi4 is static inline when building other libgcc2 portions. */
+#if (!defined (L_udivdi3) && !defined (L_divdi3) && \
+ !defined (L_umoddi3) && !defined (L_moddi3))
+extern UDWtype __udivmoddi4 (UDWtype, UDWtype, UDWtype *);
+#endif
+
+/* __negdi2 is static inline when building other libgcc2 portions. */
+#if !defined(L_divdi3) && !defined(L_moddi3)
+extern DWtype __negdi2 (DWtype);
+#endif
+
+extern DWtype __lshrdi3 (DWtype, word_type);
+extern DWtype __ashldi3 (DWtype, word_type);
+extern DWtype __ashrdi3 (DWtype, word_type);
+extern DWtype __ffsdi2 (DWtype);
+
+/* __udiv_w_sdiv is static inline when building other libgcc2 portions. */
+#if (!defined(L_udivdi3) && !defined(L_divdi3) && \
+ !defined(L_umoddi3) && !defined(L_moddi3))
+extern UWtype __udiv_w_sdiv (UWtype *, UWtype, UWtype, UWtype);
+#endif
+
+extern word_type __cmpdi2 (DWtype, DWtype);
+extern word_type __ucmpdi2 (DWtype, DWtype);
+
+extern Wtype __absvsi2 (Wtype);
+extern DWtype __absvdi2 (DWtype);
+extern Wtype __addvsi3 (Wtype, Wtype);
+extern DWtype __addvdi3 (DWtype, DWtype);
+extern Wtype __subvsi3 (Wtype, Wtype);
+extern DWtype __subvdi3 (DWtype, DWtype);
+extern Wtype __mulvsi3 (Wtype, Wtype);
+extern DWtype __mulvdi3 (DWtype, DWtype);
+extern Wtype __negvsi2 (Wtype);
+extern DWtype __negvdi2 (DWtype);
+
+#if BITS_PER_UNIT == 8
+extern DWtype __fixdfdi (DFtype);
+extern DWtype __fixsfdi (SFtype);
+extern DFtype __floatdidf (DWtype);
+extern SFtype __floatdisf (DWtype);
+extern UWtype __fixunsdfSI (DFtype);
+extern UWtype __fixunssfSI (SFtype);
+extern DWtype __fixunsdfDI (DFtype);
+extern DWtype __fixunssfDI (SFtype);
+
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
+extern DWtype __fixxfdi (XFtype);
+extern DWtype __fixunsxfDI (XFtype);
+extern XFtype __floatdixf (DWtype);
+extern UWtype __fixunsxfSI (XFtype);
+#endif
+
+#if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
+extern DWtype __fixunstfDI (TFtype);
+extern DWtype __fixtfdi (TFtype);
+extern TFtype __floatditf (DWtype);
+#endif
+#endif /* BITS_PER_UNIT == 8 */
+
+/* DWstructs are pairs of Wtype values in the order determined by
+ LIBGCC2_WORDS_BIG_ENDIAN. */
+
+#if LIBGCC2_WORDS_BIG_ENDIAN
+ struct DWstruct {Wtype high, low;};
+#else
+ struct DWstruct {Wtype low, high;};
+#endif
+
+/* We need this union to unpack/pack DImode values, since we don't have
+ any arithmetic yet. Incoming DImode parameters are stored into the
+ `ll' field, and the unpacked result is read from the struct `s'. */
+
+typedef union
+{
+ struct DWstruct s;
+ DWtype ll;
+} DWunion;
+
+#include "longlong.h"
+
+#endif /* ! GCC_LIBGCC2_H */
Index: linux-3.18.21/arch/mips/softfloat/longlong.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/longlong.h 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,1346 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+ Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
+ Free Software Foundation, Inc.
+
+ This definition file is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2, or (at your option) any later version.
+
+ This definition file is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* You have to define the following before including this file:
+
+ UWtype -- An unsigned type, default type for operations (typically a "word")
+ UHWtype -- An unsigned type, at least half the size of UWtype.
+ UDWtype -- An unsigned type, at least twice as large a UWtype
+ W_TYPE_SIZE -- size in bits of UWtype
+
+ UQItype -- Unsigned 8 bit type.
+ SItype, USItype -- Signed and unsigned 32 bit types.
+ DItype, UDItype -- Signed and unsigned 64 bit types.
+
+ On a 32 bit machine UWtype should typically be USItype;
+ on a 64 bit machine, UWtype should typically be UDItype.
+*/
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+#ifndef W_TYPE_SIZE
+#define W_TYPE_SIZE 32
+#define UWtype USItype
+#define UHWtype USItype
+#define UDWtype UDItype
+#endif
+
+/* Define auxiliary asm macros.
+
+ 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
+ UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
+ word product in HIGH_PROD and LOW_PROD.
+
+ 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+ UDWtype product. This is just a variant of umul_ppmm.
+
+ 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator) divides a UDWtype, composed by the UWtype integers
+ HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ than DENOMINATOR for correct operation. If, in addition, the most
+ significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+ 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator). Like udiv_qrnnd but the numbers are signed. The quotient
+ is rounded towards 0.
+
+ 5) count_leading_zeros(count, x) counts the number of zero-bits from the
+ msb to the first nonzero bit in the UWtype X. This is the number of
+ steps X needs to be shifted left to set the msb. Undefined for X == 0,
+ unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+
+ 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+ from the least significant end.
+
+ 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ (i.e. carry out) is not stored anywhere, and is lost.
+
+ 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ and is lost.
+
+ If any of these macros are left undefined for a particular CPU,
+ C macros are used. */
+
+/* The CPUs come in alphabetical order below.
+
+ Please add support for more CPUs here, or improve the current support
+ for the CPUs below!
+ (E.g. WE32100, IBM360.) */
+
+#if defined (__GNUC__) && !defined (NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if defined (__alpha) && W_TYPE_SIZE == 64
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("umulh %r1,%2,%0" \
+ : "=r" ((UDItype) ph) \
+ : "%rJ" (__m0), \
+ "rI" (__m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 46
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UDItype __r; \
+ (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#ifdef __alpha_cix__
+#define count_leading_zeros(COUNT,X) \
+ __asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
+#define count_trailing_zeros(COUNT,X) \
+ __asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
+#define COUNT_LEADING_ZEROS_0 64
+#else
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(COUNT,X) \
+ do { \
+ UDItype __xr = (X), __t, __a; \
+ __asm__("cmpbge $31,%1,%0" : "=r"(__t) : "r"(__xr)); \
+ __a = __clz_tab[__t ^ 0xff] - 1; \
+ __asm__("extbl %1,%2,%0" : "=r"(__t) : "r"(__xr), "r"(__a)); \
+ (COUNT) = 64 - (__clz_tab[__t] + __a*8); \
+ } while (0)
+#define count_trailing_zeros(COUNT,X) \
+ do { \
+ UDItype __xr = (X), __t, __a; \
+ __asm__("cmpbge $31,%1,%0" : "=r"(__t) : "r"(__xr)); \
+ __t = ~__t & -~__t; \
+ __a = ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ __asm__("extbl %1,%2,%0" : "=r"(__t) : "r"(__xr), "r"(__a)); \
+ __a <<= 3; \
+ __t &= -__t; \
+ __a += ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ (COUNT) = __a; \
+ } while (0)
+#endif /* __alpha_cix__ */
+#endif /* __alpha */
+
+#if defined (__arc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.f %1, %4, %5\n\tadc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.f %1, %4, %5\n\tsbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+/* Call libgcc routine. */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ DWunion __w; \
+ __w.ll = __umulsidi3 (u, v); \
+ w1 = __w.s.high; \
+ w0 = __w.s.low; \
+} while (0)
+#define __umulsidi3 __umulsidi3
+UDItype __umulsidi3 (USItype, USItype);
+#endif
+
+#if defined (__arm__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds %1, %4, %5\n\tadc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subs %1, %4, %5\n\tsbc %0, %2, %3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "r" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rI" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2; \
+ __asm__ ("%@ Inlined umul_ppmm\n" \
+ " mov %2, %5, lsr #16\n" \
+ " mov %0, %6, lsr #16\n" \
+ " bic %3, %5, %2, lsl #16\n" \
+ " bic %4, %6, %0, lsl #16\n" \
+ " mul %1, %3, %4\n" \
+ " mul %4, %2, %4\n" \
+ " mul %3, %0, %3\n" \
+ " mul %0, %2, %0\n" \
+ " adds %3, %4, %3\n" \
+ " addcs %0, %0, #65536\n" \
+ " adds %1, %1, %3, lsl #16\n" \
+ " adc %0, %0, %3, lsr #16" \
+ : "=&r" ((USItype) (xh)), \
+ "=r" ((USItype) (xl)), \
+ "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
+ : "r" ((USItype) (a)), \
+ "r" ((USItype) (b)));}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#if defined (__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "%rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rM" ((USItype) (ah)), \
+ "rM" ((USItype) (bh)), \
+ "rM" ((USItype) (al)), \
+ "rM" ((USItype) (bl)))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ union \
+ { \
+ UDItype __f; \
+ struct {USItype __w1, __w0;} __w1w0; \
+ } __t; \
+ __asm__ ("xmpyu %1,%2,%0" \
+ : "=x" (__t.__f) \
+ : "x" ((USItype) (u)), \
+ "x" ((USItype) (v))); \
+ (w1) = __t.__w1w0.__w1; \
+ (w0) = __t.__w1w0.__w0; \
+ } while (0)
+#define UMUL_TIME 8
+#else
+#define UMUL_TIME 30
+#endif
+#define UDIV_TIME 40
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __tmp; \
+ __asm__ ( \
+ "ldi 1,%0\n" \
+" extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
+" extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n"\
+" ldo 16(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
+" extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n"\
+" ldo 8(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
+" extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n"\
+" ldo 4(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
+" extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n"\
+" ldo 2(%0),%0 ; Yes. Perform add.\n" \
+" extru %1,30,1,%1 ; Extract bit 1.\n" \
+" sub %0,%1,%0 ; Subtract it.\n" \
+ : "=r" (count), "=r" (__tmp) : "1" (x)); \
+ } while (0)
+#endif
+
+#if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
+#define smul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("lr %N0,%1\n\tmr %0,%2" \
+ : "=&r" (__x.__ll) \
+ : "r" (m0), "r" (m1)); \
+ (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __x.__i.__h = n1; __x.__i.__l = n0; \
+ __asm__ ("dr %0,%2" \
+ : "=r" (__x.__ll) \
+ : "0" (__x.__ll), "r" (d)); \
+ (q) = __x.__i.__l; (r) = __x.__i.__h; \
+ } while (0)
+#endif
+
+#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl %5,%1\n\tadcl %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "rm" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, dv) \
+ __asm__ ("divl %4" \
+ : "=a" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "rm" ((USItype) (dv)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("bsrl %1,%0" \
+ : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define count_trailing_zeros(count, x) \
+ __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
+#define UMUL_TIME 40
+#define UDIV_TIME 40
+#endif /* 80x86 */
+
+#if defined (__i960__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__xx.__ll) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__w) \
+ : "%dI" ((USItype) (u)), \
+ "dI" ((USItype) (v))); \
+ __w; })
+#endif /* __i960__ */
+
+#if defined (__M32R__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0\n\taddx %%5,%1\n\taddx %%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+ __asm__ ("cmp %0,%0\n\tsubx %5,%1\n\tsubx %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)) \
+ : "cbit")
+#endif /* __M32R__ */
+
+#if defined (__mc68000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
+ : "=d" ((USItype) (sh)), \
+ "=&d" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "d" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+
+/* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
+#if defined (__mc68020__) || defined(mc68020) \
+ || defined(__mc68030__) || defined(mc68030) \
+ || defined(__mc68040__) || defined(mc68040) \
+ || defined(__mcpu32__) || defined(mcpu32)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "dmi" ((USItype) (v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "dmi" ((USItype) (d)))
+
+#else /* not mc68020 */
+#if !defined(__mcf5200__)
+/* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX. */
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("| Inlined umul_ppmm\n" \
+ " move%.l %2,%/d0\n" \
+ " move%.l %3,%/d1\n" \
+ " move%.l %/d0,%/d2\n" \
+ " swap %/d0\n" \
+ " move%.l %/d1,%/d3\n" \
+ " swap %/d1\n" \
+ " move%.w %/d2,%/d4\n" \
+ " mulu %/d3,%/d4\n" \
+ " mulu %/d1,%/d2\n" \
+ " mulu %/d0,%/d3\n" \
+ " mulu %/d0,%/d1\n" \
+ " move%.l %/d4,%/d0\n" \
+ " eor%.w %/d0,%/d0\n" \
+ " swap %/d0\n" \
+ " add%.l %/d0,%/d2\n" \
+ " add%.l %/d3,%/d2\n" \
+ " jcc 1f\n" \
+ " add%.l %#65536,%/d1\n" \
+ "1: swap %/d2\n" \
+ " moveq %#0,%/d0\n" \
+ " move%.w %/d2,%/d0\n" \
+ " move%.w %/d4,%/d2\n" \
+ " move%.l %/d2,%1\n" \
+ " add%.l %/d1,%/d0\n" \
+ " move%.l %/d0,%0" \
+ : "=g" ((USItype) (xh)), \
+ "=g" ((USItype) (xl)) \
+ : "g" ((USItype) (a)), \
+ "g" ((USItype) (b)) \
+ : "d0", "d1", "d2", "d3", "d4")
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mcf5200 */
+#endif /* not mc68020 */
+
+/* The '020, '030, '040 and '060 have bitfield insns. */
+#if defined (__mc68020__) || defined(mc68020) \
+ || defined(__mc68030__) || defined(mc68030) \
+ || defined(__mc68040__) || defined(mc68040) \
+ || defined(__mc68060__) || defined(mc68060)
+#define count_leading_zeros(count, x) \
+ __asm__ ("bfffo %1{%b2:%b2},%0" \
+ : "=d" ((USItype) (count)) \
+ : "od" ((USItype) (x)), "n" (0))
+#endif
+#endif /* mc68000 */
+
+#if defined (__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rJ" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rJ" ((USItype) (bl)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("ff1 %0,%1" \
+ : "=r" (__cbtmp) \
+ : "r" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
+#if defined (__mc88110__)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ __asm__ ("mulu.d %0,%1,%2" \
+ : "=r" (__xx.__ll) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v))); \
+ (wh) = __xx.__i.__h; \
+ (wl) = __xx.__i.__l; \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __xx; \
+ USItype __q; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q) \
+ : "r" (__xx.__ll), \
+ "r" ((USItype) (d))); \
+ (r) = (n0) - __q * (d); (q) = __q; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __mc88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mips__) && W_TYPE_SIZE == 32
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+} while (0)
+#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3" \
+ : "=l" ((USItype) (w0)), \
+ "=h" ((USItype) (w1)) \
+ : "d" ((USItype) (u)), \
+ "d" ((USItype)(v)))
+#else
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3\n" \
+ "mflo %0\n" \
+ "mfhi %1" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "d" ((USItype)(u)), \
+ "d" ((USItype)(v)))
+#endif
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+#if defined (__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype) (u)), \
+ "g" ((USItype) (v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "0" (__xx.__ll), \
+ "g" ((USItype) (d))); \
+ (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#define count_trailing_zeros(count,x) \
+ do { \
+ __asm__ ("ffsd %2,%0" \
+ : "=r" ((USItype) (count)) \
+ : "0" ((USItype) 0), \
+ "r" ((USItype) (x))); \
+ } while (0)
+#endif /* __ns32000__ */
+
+/* FIXME: We should test _IBMR2 here when we add assembly support for the
+ system vendor compilers.
+ FIXME: What's needed for gcc PowerPC VxWorks? __vxworks__ is not good
+ enough, since that hits ARM and m68k too. */
+#if (defined (_ARCH_PPC) /* AIX */ \
+ || defined (_ARCH_PWR) /* AIX */ \
+ || defined (_ARCH_COM) /* AIX */ \
+ || defined (__powerpc__) /* gcc */ \
+ || defined (__POWERPC__) /* BEOS */ \
+ || defined (__ppc__) /* Darwin */ \
+ || defined (PPC) /* GNU/Linux, SysV */ \
+ ) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 32
+#if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \
+ || defined (__ppc__) || defined (PPC) || defined (__vxworks__)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#elif defined (_ARCH_PWR)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
+#define UDIV_TIME 100
+#endif
+#endif /* 32-bit POWER architecture variants. */
+
+/* We should test _IBMR2 here when we add assembly support for the system
+ vendor compilers. */
+#if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 64
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ DItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14 /* ??? */
+#define UDIV_TIME 120 /* ??? */
+#endif /* 64-bit PowerPC. */
+
+#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5\n\tae %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5\n\tse %0,%3" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "r" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "r" ((USItype) (bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ( \
+ "s r2,r2\n" \
+" mts r10,%2\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" cas %0,r2,r0\n" \
+" mfs r10,%1" \
+ : "=r" ((USItype) (ph)), \
+ "=r" ((USItype) (pl)) \
+ : "%r" (__m0), \
+ "r" (__m1) \
+ : "r2"); \
+ (ph) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+ do { \
+ if ((x) >= 0x10000) \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x) >> 16)); \
+ else \
+ { \
+ __asm__ ("clz %0,%1" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ (count) += 16; \
+ } \
+ } while (0)
+#endif
+
+#if defined (__sh2__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ( \
+ "dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+#if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32
+#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
+#define count_leading_zeros(count, x) \
+ do \
+ { \
+ UDItype x_ = (USItype)(x); \
+ SItype c_; \
+ \
+ __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_)); \
+ (count) = c_ - 31; \
+ } \
+ while (0)
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+
+#if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \
+ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "%rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "%rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "rJ" ((USItype) (ah)), \
+ "rI" ((USItype) (bh)), \
+ "rJ" ((USItype) (al)), \
+ "rI" ((USItype) (bl)) \
+ __CLOBBER_CC)
+#if defined (__sparc_v8__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+ __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
+ : "=&r" ((USItype) (__q)), \
+ "=&r" ((USItype) (__r)) \
+ : "r" ((USItype) (__n1)), \
+ "r" ((USItype) (__n0)), \
+ "r" ((USItype) (__d)))
+#else
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "r" ((USItype) (u)), \
+ "r" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+" wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
+" tst %%g0\n" \
+" divscc %3,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%0\n" \
+" rd %%y,%1\n" \
+" bl,a 1f\n" \
+" add %1,%4,%1\n" \
+"1: ! End of inline udiv_qrnnd" \
+ : "=r" ((USItype) (q)), \
+ "=r" ((USItype) (r)) \
+ : "r" ((USItype) (n1)), \
+ "r" ((USItype) (n0)), \
+ "rI" ((USItype) (d)) \
+ : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+ do { \
+ __asm__ ("scan %1,1,%0" \
+ : "=r" ((USItype) (count)) \
+ : "r" ((USItype) (x))); \
+ } while (0)
+/* Early sparclites return 63 for an argument of 0, but they warn that future
+ implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
+ undefined. */
+#else
+/* SPARC without integer multiplication and divide instructions.
+ (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm\n" \
+" wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n"\
+" sra %3,31,%%o5 ! Don't move this insn\n" \
+" and %2,%%o5,%%o5 ! Don't move this insn\n" \
+" andcc %%g0,0,%%g1 ! Don't move this insn\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,0,%%g1\n" \
+" add %%g1,%%o5,%0\n" \
+" rd %%y,%1" \
+ : "=r" ((USItype) (w1)), \
+ "=r" ((USItype) (w0)) \
+ : "%rI" ((USItype) (u)), \
+ "r" ((USItype) (v)) \
+ : "g1", "o5" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+ The default udiv_qrnnd (in C) is more than 10 times slower! */
+#define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+" mov 32,%%g1\n" \
+" subcc %1,%2,%%g0\n" \
+"1: bcs 5f\n" \
+" addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
+" sub %1,%2,%1 ! this kills msb of n\n" \
+" addx %1,%1,%1 ! so this can't give carry\n" \
+" subcc %%g1,1,%%g1\n" \
+"2: bne 1b\n" \
+" subcc %1,%2,%%g0\n" \
+" bcs 3f\n" \
+" addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n" \
+" b 3f\n" \
+" sub %1,%2,%1 ! this kills msb of n\n" \
+"4: sub %1,%2,%1\n" \
+"5: addxcc %1,%1,%1\n" \
+" bcc 2b\n" \
+" subcc %%g1,1,%%g1\n" \
+"! Got carry from n. Subtract next step to cancel this carry.\n" \
+" bne 4b\n" \
+" addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n" \
+" sub %1,%2,%1\n" \
+"3: xnor %0,0,%0\n" \
+" ! End of inline udiv_qrnnd" \
+ : "=&r" ((USItype) (__q)), \
+ "=&r" ((USItype) (__r)) \
+ : "r" ((USItype) (__d)), \
+ "1" ((USItype) (__n1)), \
+ "0" ((USItype) (__n0)) : "g1" __AND_CLOBBER_CC)
+#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* sparc32 */
+
+#if ((defined (__sparc__) && defined (__arch64__)) || defined (__sparcv9)) \
+ && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n\t" \
+ "add %r2,%3,%0\n\t" \
+ "bcs,a,pn %%xcc, 1f\n\t" \
+ "add %0, 1, %0\n" \
+ "1:" \
+ : "=r" ((UDItype)(sh)), \
+ "=&r" ((UDItype)(sl)) \
+ : "%rJ" ((UDItype)(ah)), \
+ "rI" ((UDItype)(bh)), \
+ "%rJ" ((UDItype)(al)), \
+ "rI" ((UDItype)(bl)) \
+ __CLOBBER_CC)
+
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n\t" \
+ "sub %r2,%3,%0\n\t" \
+ "bcs,a,pn %%xcc, 1f\n\t" \
+ "sub %0, 1, %0\n\t" \
+ "1:" \
+ : "=r" ((UDItype)(sh)), \
+ "=&r" ((UDItype)(sl)) \
+ : "rJ" ((UDItype)(ah)), \
+ "rI" ((UDItype)(bh)), \
+ "rJ" ((UDItype)(al)), \
+ "rI" ((UDItype)(bl)) \
+ __CLOBBER_CC)
+
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ UDItype tmp1, tmp2, tmp3, tmp4; \
+ __asm__ __volatile__ ( \
+ "srl %7,0,%3\n\t" \
+ "mulx %3,%6,%1\n\t" \
+ "srlx %6,32,%2\n\t" \
+ "mulx %2,%3,%4\n\t" \
+ "sllx %4,32,%5\n\t" \
+ "srl %6,0,%3\n\t" \
+ "sub %1,%5,%5\n\t" \
+ "srlx %5,32,%5\n\t" \
+ "addcc %4,%5,%4\n\t" \
+ "srlx %7,32,%5\n\t" \
+ "mulx %3,%5,%3\n\t" \
+ "mulx %2,%5,%5\n\t" \
+ "sethi %%hi(0x80000000),%2\n\t" \
+ "addcc %4,%3,%4\n\t" \
+ "srlx %4,32,%4\n\t" \
+ "add %2,%2,%2\n\t" \
+ "movcc %%xcc,%%g0,%2\n\t" \
+ "addcc %5,%4,%5\n\t" \
+ "sllx %3,32,%3\n\t" \
+ "add %1,%3,%1\n\t" \
+ "add %5,%2,%0" \
+ : "=r" ((UDItype)(wh)), \
+ "=&r" ((UDItype)(wl)), \
+ "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
+ : "r" ((UDItype)(u)), \
+ "r" ((UDItype)(v)) \
+ __CLOBBER_CC); \
+ } while (0)
+#define UMUL_TIME 96
+#define UDIV_TIME 230
+#endif /* sparc64 */
+
+#if defined (__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "%0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "%1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
+ : "=g" ((USItype) (sh)), \
+ "=&g" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union { \
+ UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=r" (__xx.__ll) \
+ : "g" (__m0), \
+ "g" (__m1)); \
+ (xh) = __xx.__i.__h; \
+ (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) \
+ : "g" (__xx.__ll), "g" (d)); \
+ } while (0)
+#endif /* __vax__ */
+
+#if defined (__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "%0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "%1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {long int __ll; \
+ struct {unsigned int __h, __l;} __i; \
+ } __xx; \
+ unsigned int __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mult %S0,%H3" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "%1" (__m0), \
+ "rQR" (__m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+ (xh) += ((((signed int) __m0 >> 15) & __m1) \
+ + (((signed int) __m1 >> 15) & __m0)); \
+ } while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+/* If this machine has no inline assembler, use C macros. */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+/* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
+ smul_ppmm. */
+#if !defined (umul_ppmm) && defined (smul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __w1; \
+ UWtype __xm0 = (u), __xm1 = (v); \
+ smul_ppmm (__w1, w0, __xm0, __xm1); \
+ (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
+ + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
+ } while (0)
+#endif
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __x0, __x1, __x2, __x3; \
+ UHWtype __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart (u); \
+ __uh = __ll_highpart (u); \
+ __vl = __ll_lowpart (v); \
+ __vh = __ll_highpart (v); \
+ \
+ __x0 = (UWtype) __ul * __vl; \
+ __x1 = (UWtype) __ul * __vh; \
+ __x2 = (UWtype) __uh * __vl; \
+ __x3 = (UWtype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart (__x1); \
+ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
+ } while (0)
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({DWunion __w; \
+ umul_ppmm (__w.s.high, __w.s.low, u, v); \
+ __w.ll; })
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ UWtype __d1, __d0, __q1, __q0; \
+ UWtype __r1, __r0, __m; \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (UWtype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (UWtype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (UWtype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ USItype __r; \
+ (q) = __udiv_w_sdiv (&__r, nh, nl, d); \
+ (r) = __r; \
+ } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+extern const UQItype __clz_tab[];
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype __xr = (x); \
+ UWtype __a; \
+ \
+ if (W_TYPE_SIZE <= 32) \
+ { \
+ __a = __xr < ((UWtype)1<<2*__BITS4) \
+ ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \
+ : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
+ } \
+ else \
+ { \
+ for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
+ if (((__xr >> __a) & 0xff) != 0) \
+ break; \
+ } \
+ \
+ (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
+#endif
+
+#if !defined (count_trailing_zeros)
+/* Define count_trailing_zeros using count_leading_zeros. The latter might be
+ defined in asm, but if it is not, the C version above is good enough. */
+#define count_trailing_zeros(count, x) \
+ do { \
+ UWtype __ctz_x = (x); \
+ UWtype __ctz_c; \
+ count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
+ (count) = W_TYPE_SIZE - 1 - __ctz_c; \
+ } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
Index: linux-3.18.21/arch/mips/softfloat/tconfig.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/arch/mips/softfloat/tconfig.h 2018-02-05 13:19:56.000000000 +0800
@@ -0,0 +1,26 @@
+#define WORDS_BIG_ENDIAN 1
+#define BYTES_BIG_ENDIAN 1
+
+#define UNITS_PER_WORD 4
+#define MIN_UNITS_PER_WORD 4
+
+#define MIN_UNITS_PER_WORD 4
+#define LONG_TYPE_SIZE 32
+
+#define LONG_LONG_TYPE_SIZE 64
+
+#define BITS_PER_UNIT 8
+#define ROUND_TOWARDS_ZERO 0
+#ifndef LARGEST_EXPONENT_IS_NORMAL
+#define LARGEST_EXPONENT_IS_NORMAL(SIZE) 0
+#define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD)
+
+#if (defined _ABIN32 && _MIPS_SIM == _ABIN32) \
+ || (defined _ABI64 && _MIPS_SIM == _ABI64)
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+# else
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+# endif
+
+/*typedef unsigned int size_t;*/
+#endif
Index: linux-3.18.21/drivers/mtd/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/mtd/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/Kconfig 2018-02-05 13:20:41.000000000 +0800
@@ -325,4 +325,7 @@
source "drivers/mtd/ubi/Kconfig"
+source "drivers/mtd/econet/Kconfig"
+
+source "drivers/mtd/mtk/Kconfig"
endif # MTD
Index: linux-3.18.21/drivers/mtd/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/mtd/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/Makefile 2018-02-05 13:20:41.000000000 +0800
@@ -30,7 +30,7 @@
nftl-objs := nftlcore.o nftlmount.o
inftl-objs := inftlcore.o inftlmount.o
-obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
+obj-y += chips/ mtk/ econet/ lpddr/ maps/ devices/ nand/ onenand/ tests/
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
obj-$(CONFIG_MTD_UBI) += ubi/
Index: linux-3.18.21/drivers/mtd/chips/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/mtd/chips/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/chips/Kconfig 2018-02-05 13:20:41.000000000 +0800
@@ -200,6 +200,12 @@
config MTD_CFI_UTIL
tristate
+config MTD_SPIFLASH
+ tristate "Support for SPIFLASH on TC3162/TC3262"
+ depends on (MIPS_TC3162 || MIPS_TC3162U || MIPS_TC3262)
+ help
+ This option enables basic support for SPIFLASH on TC3162 chip.
+
config MTD_RAM
tristate "Support for RAM chips in bus mapping"
help
Index: linux-3.18.21/drivers/mtd/chips/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/mtd/chips/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/chips/Makefile 2018-02-05 13:20:41.000000000 +0800
@@ -10,6 +10,30 @@
obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
+ifneq ($(TCSUPPORT_NEW_SPIFLASH),)
+obj-$(CONFIG_MTD_SPIFLASH) += newspiflash.o
+else
+obj-$(CONFIG_MTD_SPIFLASH) += spiflash_tc3162.o
+endif
+ifneq ($(strip $(TCSUPPORT_CPU_EN7512) $(TCSUPPORT_CPU_EN7521)),)
+ifneq ($(strip $(TCSUPPORT_SPI_CONTROLLER_ECC)),)
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/spi_nand_flash_table.o
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/spi_nand_flash.o
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/spi_controller.o
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/spi_ecc.o
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/spi_nfi.o
+ifneq ($(strip $(TCSUPPORT_CPU_EN7516) $(TCSUPPORT_CPU_EN7527)),)
+ifneq ($(strip $(TCSUPPORT_AUTOBENCH)),)
+obj-y += ../../../../kernel_ext/drivers/mtd/chips/flash_test.o
+endif
+endif
+else
+obj-y += spi_nand_flash.o
+obj-y += spi_controller.o
+obj-y += spi_ecc.o
+obj-y += spi_nfi.o
+endif
+endif
obj-$(CONFIG_MTD_RAM) += map_ram.o
obj-$(CONFIG_MTD_ROM) += map_rom.o
obj-$(CONFIG_MTD_ABSENT) += map_absent.o
Index: linux-3.18.21/drivers/mtd/chips/newspiflash.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/newspiflash.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,2846 @@
+/*
+ * SPIFLASH support for TC3162
+ */
+
+/*
+ * MTD driver for the SPI Flash Memory support.
+ *
+ * Copyright (c) 2005-2006 Atheros Communications Inc.
+ * Copyright (C) 2006-2007 FON Technology, SL.
+ * Copyright (C) 2006-2007 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+#include <linux/proc_fs.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/gen_probe.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include "newspiflash.h"
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+
+/* debugging */
+/* #define SPIFLASH_DEBUG */
+//#define TC_SOC2
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define FALSE 0
+#define TRUE 1
+
+#define SPI_NOR_TEST "driver/spi_nor_test"
+
+#define ID_MASK 0xffff
+
+#define MANUFACTURER_ID(id) ((id >> 16) & ID_MASK)
+#define DEVICE_ID(id) (id & ID_MASK)
+
+#define SIZE_64KiB 0x10000
+#define SIZE_64MiB 0x4000000
+#define SIZE_32MiB 0x2000000
+#define SIZE_16MiB 0x1000000
+#define SIZE_8MiB 0x800000
+#define SIZE_4MiB 0x400000
+#define SIZE_2MiB 0x200000
+
+/* Manufacturers */
+#define MANUFACTURER_ST 0x0020
+#define MANUFACTURER_WINBOND 0x00ef
+#define MANUFACTURER_SST 0x00bf
+#define MANUFACTURER_MXIC 0x00c2
+#define MANUFACTURER_SPANSION 0x0001
+#define MANUFACTURER_EON 0x001c
+#define MANUFACTURER_NUMONYX 0x0020
+#define MANUFACTURER_GIGADEVICE 0x00c8
+#define MANUFACTURER_ESMT 0x008c
+
+/* ST */
+#define M25P16 0x2015
+#define M25P32 0x2016
+#define M25P64 0x2017
+
+/* Winbond */
+#define W25X16 0x3015
+#define W25X32 0x3016
+#define W25X64 0x3017
+#define W25X128 0x3018
+#define W25X256 0x3019
+
+#define W25Q16 0x4015
+#define W25Q32 0x4016
+#define W25Q64 0x4017
+#define W25Q128 0x4018
+#define W25Q256 0x4019
+
+/* SST */
+#define SST25VF032B 0x254a
+
+/* MXIC */
+#define MX25L16 0x2015
+#define MX25L3205D 0x2016
+#define MX25L6405D 0x2017
+#define MX25L12805D 0x2018
+#define MX25L25635E 0x2019
+
+/* SPANSION */
+#define S25FL016A 0x0214
+#define S25FL032A 0x0215
+#define S25FL064A 0x0216
+#define S25FL128P 0x2018
+#define S25FL256S 0x0219
+
+#define S25FL208K 0x4014
+#define S25FL116K 0x4015
+#define S25FL216K 0x4015
+#define S25FL127S 0x2018
+
+/* MICRON(NUMONYX) */
+#define N25Q064 0xba17
+#define N25Q128 0xba18
+#define N25Q256 0xba19
+
+/* EON */
+#define EN25Q64 0x3017
+
+/* GIGA */
+#define GD25Q16 0x4015
+#define GD25Q32 0x4016
+#define GD25Q64 0x4017
+#define GD25Q128B 0x4018
+#define GD25Q256C 0x4019
+
+/* ESMT */
+#define F25L16PA 0x2115
+#define F25L32QA 0x4116
+#define F25L64QA 0x4117
+
+struct mtd_info *spi_nor_mtd;
+static DEFINE_SPINLOCK(spinorLock);
+static DEFINE_SPINLOCK(spiLock);
+
+//#if defined(TC_SOC2)
+DECLARE_MUTEX(SPI_SEM);//Make sure all related SPI operations are atomic
+//DECLARE_MUTEX_LOCKED(SPI_SEM);//Make sure all related SPI operations are atomic
+//#endif
+
+struct spi_flash_info {
+ const u16 mfr_id;
+ const u16 dev_id;
+ const u16 extra_id;
+ const char *name;
+ const int DeviceSize;
+ const int EraseSize;
+};
+
+struct spi_chip_info {
+ struct spi_flash_info *flash;
+ void (*destroy)(struct spi_chip_info *chip_info);
+
+ u32 (*read)(struct map_info *map, u32 from, u32 to, u32 size);
+ u32 (*read_manual)(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len);
+ u32 (*write)(struct mtd_info *mtd, u32 from, u32 to, u32 size);
+ u32 (*erase)(struct mtd_info *mtd, u32 addr);
+};
+
+struct _SPI_NOR_FLASH_RW_TEST_T {
+ u32 times;
+ u32 block_idx;
+};
+
+static struct _SPI_NOR_FLASH_RW_TEST_T rw_test_param;
+
+static int
+spiflash_manual_read_internal (struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len, unsigned char read_type);
+
+static wait_queue_head_t spiflash_wq;
+static spinlock_t spiflash_mutex;
+static int spiflash_state;
+
+static __u8 byte_program_mode = 0;
+static __u8 four_byte_mode = 0;
+static __u8 is_W25Q256 = 0;
+static __u8 is_MX25L256 = 0;
+static __u8 is_S25FL256S = 0;
+static __u8 is_N25Q256 = 0;
+static __u8 is_SPI_32MB = 0;
+static __u8 epcheck_flag = 0;
+
+void send_single_opcode_cmd(uint8 op_code)
+{
+ SEND_CSL_CMD;
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE);
+ SEND_DFIFO_WRITE_CMD(op_code);
+ SEND_CSH_CMD;
+}
+
+void sf_manual_begin(void)
+{
+ /* disable interrupt */
+ /*local_irq_disable(); */
+ WARN_ON(in_interrupt());
+ spin_lock(&spiLock);
+
+ /* disable READ_IDLE_EN */
+ SEND_READ_IDLE_DIS_CMD;
+ /* wait until auto read status is IDLE */
+ while(ReadReg(SF_RDCTL_FSM)) ;
+ /* auto mode -> manual mode */
+ SEND_AUTO2MANUAL_CMD;
+}
+
+void sf_manual_end(void)
+{
+ /* manual mode -> auto mode */
+ SEND_MANUAL2AUTO_CMD;
+ /* enable READ_IDLE_EN */
+ SEND_READ_IDLE_EN_CMD;
+
+ /* enable interrupt */
+ /*local_irq_enable();*/
+ spin_unlock(&spiLock);
+}
+
+void sf_manual_wren_begin(void)
+{
+ sf_manual_begin();
+ SEND_WREN_CMD;
+}
+
+void sf_manual_wren_end(void)
+{
+ uint8 reg_sr1 = 0;
+ /* wait until WIP bit is idle */
+ SEND_WAIT_WIP_IDLE_INTERNAL_CMD(reg_sr1);
+
+ sf_manual_end();
+}
+
+#if defined(TCSUPPORT_NEW_SPIFLASH_DEBUG)
+#define PP_MAX (256)
+#define SE_MAX (4096)
+#define SE_CNT (16)
+#define SEED_MAX (0x100)
+
+#define AUTOREAD_TEST (0)
+#define MANUALREAD_TEST (1)
+#define MUXREAD_TEST (2)
+
+#define SIZE_32MiB 0x2000000
+#define AUTOREAD_TEST_MAX (4)
+#define MANUALREAD_TEST_MAX (4)
+#define MUXREAD_TEST_MAX (8)
+
+#define SFC_CLOCK_MAX 150
+
+#define TP_MAX (10)
+uint8 TestPattern[TP_MAX] =
+{
+ {0xA5},
+ {0x5A},
+ {0xFF},
+ {0x00},
+ {0x55},
+ {0xAA},
+ {0x96},
+ {0x69},
+ {0xCC},
+ {0x33}
+};
+
+uint8 TestMode_TEXT[3][12] =
+{
+ {"AutoMode"},
+ {"ManualMode"},
+ {"MuxMode"},
+};
+
+uint8 ReadMode_TEXT[4][20] =
+{
+ {"ReadData"},
+ {"FastRead"},
+ {"FastRead_DualOut"},
+ {"FastRead_DualIO"},
+};
+
+uint8 MuxMode_TEXT[8][25] =
+{
+ {"Auto_ReadData"},
+ {"Auto_FastRead"},
+ {"Auto_FastRead_DualOut"},
+ {"Auto_FastRead_DualIO"},
+ {"Manual_ReadData"},
+ {"Manual_FastRead"},
+ {"Manual_FastRead_DualOut"},
+ {"Manual_FastRead_DualIO"},
+};
+
+uint8 clk_rate=0;
+uint8 CLKRate_TEXT[3][12] =
+{
+ {"25MHZ"},
+ {"50MHZ"},
+ {"Changing"},
+};
+
+uint8 Address3B4B_TEXT[3][40] =
+{
+ {"4Bytes access all 32MB flash"},
+ {"3Bytes access front 16MB flash"},
+ {"3Bytes access, and use 13/0C/3C/BC"},
+};
+
+uint32 Round_Value = 0;
+uint16 AutoRead_Value = 0;
+uint16 ManualRead_Value = 0;
+uint16 MuxRead_Value = 0;
+uint16 isProgram_Value = 0;
+uint16 CLKRate_Value = 0;
+uint16 Exit_Value = 0;
+uint16 Address3B4B_Value = 0;
+uint32 TestPattern_Value = 0;
+uint32 StartAddress_Value = 0;
+uint16 ReadIdle_Value = 0;
+uint16 Tn_Value = 0;
+uint16 Tn_cnt = 0;
+uint16 TnMappingTab[6];
+
+uint16 AutoRead_cnt = 0;
+uint16 ManualRead_cnt = 0;
+uint16 MuxRead_cnt = 0;
+uint16 AutoMappingTab[4];
+uint16 ManualMappingTab[4];
+uint16 MuxMappingTab[8];
+uint32 spi_task2_en=0;
+uint16 Addr3B4B_Flag=0;
+
+uint32 getTime(void)
+{
+ return (jiffies * 10);
+}
+
+void exit_4Byte_mode(void)
+{
+ if(is_S25FL256S){
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_BANKREG, SF_BANKREG_3B);
+ SEND_WRDIS_CMD; // spansion bank register does not require the WREN command to precede it, which means WREN bit need to disable by manually.
+ } else if(is_N25Q256){
+ SEND_EXIT_4BYTES_MODE_WREN_CMD; //for N25Q256, to enter/exit 4Byte address mode, the WREN bit must be set to 1 first.
+ } else {
+ SEND_EXIT_4BYTES_MODE_CMD;
+ }
+ Addr3B4B_Flag = AUTO_3BMODE;
+}
+#endif
+
+void enter_4Byte_mode(void)
+{
+ if(is_S25FL256S){
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_BANKREG, SF_BANKREG_4B);
+ SEND_WRDIS_CMD; // spansion bank register does not require the WREN command to precede it, which means WREN bit need to disable by manually.
+ } else if(is_N25Q256){
+ SEND_ENTER_4BYTES_MODE_WREN_CMD; //for N25Q256, to enter/exit 4Byte address mode, the WREN bit must be set to 1 first.
+ }else{
+ SEND_ENTER_4BYTES_MODE_CMD;
+ }
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ Addr3B4B_Flag = AUTO_4BMODE;
+#endif
+}
+
+static void dumpCell_buffer(u8* buf, u32 len, u32 from){
+ u32 i=0;
+
+ from &= 0x03FFFFFF;
+ from |= 0xBC000000;
+ printk("\r\n\t\t00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F");
+ for(i=0; i<len; i++){
+ if((i&15)==0){
+ printk("\r\n0x%08X:\t", from+i);
+ }
+ printk("%02x ", *(buf+i));
+ }
+ printk("\r\n");
+}/*end dumpCell_buffer*/
+
+#if 0
+void enter_dualIO_enable_mode(void)
+{
+ u8 reg_value = 0;
+ if(is_N25Q256) {
+ SEND_READ_REGISTER_CMD(SF_OP_RD_ENHANCED_CFGREG, reg_value);
+ reg_value &= 0x3F;
+ reg_value |= 0x80;
+ //SEND_WRITE_REGISTER_CMD(SF_OP_WR_ENHANCED_CFGREG, reg_value);
+ }
+}
+#endif
+
+/* Probe SPI flash device
+ * Function returns 0 for failure.
+ * and flashconfig_tbl array index for success.
+ */
+static u32
+spiflash_read_id (void)
+{
+ u32 flash_id=0;
+ u8 id[3]={0};
+ u8 cur_mode=0;
+
+ WARN_ON(in_interrupt());
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#else
+ down_Normal_interruptible(&SPI_SEM); /*This is empty fuction*/
+ spin_lock(&spinorLock);
+#endif
+
+ SEND_RD_ID_CMD(id);
+
+ flash_id = id[0] << 16 | id[1] << 8 | id[2];
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#else
+ up_Normal(&SPI_SEM); /*This is empty fuction*/
+ spin_unlock(&spinorLock);
+#endif
+
+ return flash_id;
+}
+
+static u32
+spiflash_erase (struct mtd_info *mtd, u32 addr)
+{
+ u32 i = 0;
+ u8 cur_mode=0;
+ u8 mode=0;
+ unsigned char *rd_buf=NULL;
+ u32 destAddr = addr;
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ u32 begtime=0, endtime=0, passtime=0;
+ u32 begtick=0, endtick=0;
+ unsigned long dest_addr = addr;
+ uint16 cur3B4B_Flag = Addr3B4B_Flag;
+#endif
+
+ WARN_ON(in_interrupt());
+ /* sanity checks */
+ if (addr + mtd->erasesize > mtd->size) return (-1);
+
+#ifndef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ printk("\nerase addr=%x size=%x", addr, mtd->erasesize);
+#endif
+
+ if(mtd->erasesize == SIZE_64KiB){
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#else
+ down_Normal_interruptible(&SPI_SEM); /*This is empty fuction*/
+ spin_lock(&spinorLock);
+#endif
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if(mtd->size >= SIZE_32MiB) {
+ if((Address3B4B_Value != 0) && ((dest_addr>>24) != 0)) {
+ if(Addr3B4B_Flag == AUTO_3BMODE) {
+ enter_4Byte_mode();
+ }
+ }
+ }
+ begtime = getTime();
+ begtick = VPint(CR_TIMER1_VLR);
+
+ if(mtd->size >= SIZE_32MiB) {
+ if((Address3B4B_Value == 0) || ((Address3B4B_Value != 0) && ((dest_addr>>24) != 0)))
+ {
+ //printk("\nErase::mode=1, addr is 0x%08x", addr);
+ mode = 1;
+ } else {
+ if(Addr3B4B_Flag == AUTO_4BMODE) {
+ exit_4Byte_mode();
+ }
+ if(is_W25Q256 == 1) {
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_STATUSEX, 0);
+ }
+ mode = 0;
+ }
+ } else {
+ mode = 0;
+ }
+#else
+ if(mtd->size >= SIZE_32MiB){
+ //printk("\nErase::mode=1, mtd->size is %X\n", mtd->size);
+ mode = 1;
+ } else {
+ mode = 0;
+ }
+#endif
+ SEND_SECTOR_ERASE_CMD(addr, mode);
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ endtick = VPint(CR_TIMER1_VLR);
+ endtime = getTime();
+
+ if(endtime < begtime)
+ passtime = 0xffffffff - begtime + endtime;
+ else
+ passtime = endtime - begtime;
+ passtime = (passtime/10) * VPint(CR_TIMER1_LDV)+ begtick - endtick;
+ printk("\nerase addr=%x size=%x passtime is %lu", addr, mtd->erasesize, passtime);
+
+ if(mtd->size >= SIZE_32MiB) {
+ if((cur3B4B_Flag == AUTO_4BMODE) && (Addr3B4B_Flag == AUTO_3BMODE)) {
+ enter_4Byte_mode();
+ } else if((cur3B4B_Flag == AUTO_3BMODE) && (Addr3B4B_Flag == AUTO_4BMODE)) {
+ exit_4Byte_mode();
+ }
+ }
+#endif
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#else
+ up_Normal(&SPI_SEM); /*This is empty fuction*/
+ spin_unlock(&spinorLock);
+#endif
+
+ if(epcheck_flag == 1) {
+ printk("\nerase check start");
+ rd_buf = kmalloc(sizeof(char) * SIZE_64KiB, GFP_KERNEL);
+ spiflash_manual_read_internal(mtd, destAddr, rd_buf, SIZE_64KiB, READ_DATA);
+ for(i=0; i<SIZE_64KiB; i++) {
+ if( rd_buf[i] != 0xFF) {
+ printk("\nErase fail at Address %X::index is %d, value is %X, should be %X, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", destAddr+i, i, rd_buf[i], 0xFF, SF_READ_MODE_VALUE, ((SF_CLK_CHANGE_VALUE==0)?("25MHZ"):("50MHZ")));
+ dumpCell_buffer((u8 *)(rd_buf), SIZE_64KiB, destAddr);
+ break;
+ }
+ }
+ kfree(rd_buf);
+ }
+ } else {
+ return (-1);
+ }
+
+ return (0);
+}
+
+/* wait until the flash chip is ready and grab a lock */
+static int spiflash_wait_ready(int state)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+retry:
+ spin_lock_bh(&spiflash_mutex);
+ if (spiflash_state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&spiflash_wq, &wait);
+ spin_unlock_bh(&spiflash_mutex);
+ schedule();
+ remove_wait_queue(&spiflash_wq, &wait);
+
+ if(signal_pending(current))
+ return 0;
+
+ goto retry;
+ }
+ spiflash_state = state;
+
+ return 1;
+}
+
+static inline void spiflash_done(void)
+{
+ spiflash_state = FL_READY;
+ spin_unlock_bh(&spiflash_mutex);
+ wake_up(&spiflash_wq);
+}
+
+static int
+spiflash_manual_read_internal (struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len, unsigned char read_type)
+{
+ int mode=0;
+ u8 cur_mode=0;
+ unsigned long dest_addr = from;
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ uint16 cur3B4B_Flag = Addr3B4B_Flag;
+#endif
+
+ WARN_ON(in_interrupt());
+#ifdef SPIFLASH_DEBUG
+ printk ("spiflash_read (from = 0x%08x, len = %d)\n", from, len);
+#endif
+
+ /* sanity checks */
+ if (!len) return (0);
+ if (from + len > mtd->size) return (-1);
+
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#else
+ down_Normal_interruptible(&SPI_SEM); /*This is empty fuction*/
+ spin_lock(&spinorLock);
+#endif
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if((Address3B4B_Value == 2) && (mtd->size >= SIZE_32MiB) && (is_MX25L256 == 0) && ((dest_addr>>24) != 0)) {
+ if(Addr3B4B_Flag == AUTO_3BMODE) {
+ enter_4Byte_mode();
+ }
+ switch(read_type){
+ case READ_DATA:
+ SEND_4BYTES_READ_CMD(from, buf, len);
+ break;
+
+ case FAST_READ:
+ SEND_4BYTES_FASTREAD_CMD(from, buf, len);
+ break;
+
+ case FAST_READ_DUALOUT:
+ SEND_4BYTES_FASTREAD_DUALOUT_CMD(from, buf, len);
+ break;
+
+ case FAST_READ_DUALIO:
+ SEND_4BYTES_FASTREAD_DUALIO_CMD(from, buf, len);
+ break;
+
+ default:
+ printk("spiflash_read: not support this mode\n");
+ break;
+ }
+
+ if(cur3B4B_Flag == AUTO_3BMODE) {
+ exit_4Byte_mode();
+ }
+ } else {
+ if(mtd->size >= SIZE_32MiB) {
+ if((Address3B4B_Value != 0) && ((dest_addr>>24) != 0)) {
+ if(Addr3B4B_Flag == AUTO_3BMODE) {
+ enter_4Byte_mode();
+ }
+ }
+
+ if((Address3B4B_Value == 0) || ((Address3B4B_Value != 0) && ((dest_addr>>24) != 0))) {
+ mode = 1;
+ } else {
+ if(Addr3B4B_Flag == AUTO_4BMODE) {
+ exit_4Byte_mode();
+ }
+ if(is_W25Q256 == 1) {
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_STATUSEX, 0);
+ }
+ mode = 0;
+ }
+ } else {
+ mode = 0;
+ }
+#else
+ if(mtd->size >= SIZE_32MiB) {
+ mode = 1;
+ //printk("\nRead::mode=1, mtd->size is %X\n", mtd->size);
+ } else {
+ mode = 0;
+ }
+#endif
+
+ switch(read_type){
+ case READ_DATA:
+ SEND_READ_CMD(from, buf, len, mode);
+ break;
+
+ case FAST_READ:
+ SEND_FASTREAD_CMD(from, buf, len, mode);
+ break;
+
+ case FAST_READ_DUALOUT:
+ SEND_FASTREAD_DUALOUT_CMD(from, buf, len, mode);
+ break;
+
+ case FAST_READ_DUALIO:
+ SEND_FASTREAD_DUALIO_CMD(from, buf, len, mode);
+ break;
+
+ default:
+ printk("spiflash_read: not support this mode\n");
+ break;
+ }
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if(mtd->size >= SIZE_32MiB) {
+ if((cur3B4B_Flag == AUTO_4BMODE) && (Addr3B4B_Flag == AUTO_3BMODE)) {
+ enter_4Byte_mode();
+ } else if((cur3B4B_Flag == AUTO_3BMODE) && (Addr3B4B_Flag == AUTO_4BMODE)) {
+ exit_4Byte_mode();
+ }
+ }
+ }
+#endif
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#else
+ up_Normal(&SPI_SEM); /*This is empty fuction*/
+ spin_unlock(&spinorLock);
+#endif
+
+ return (0);
+}
+
+
+static int
+spiflash_manual_read_data(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len)
+{
+ spiflash_manual_read_internal(mtd, from, buf, len, READ_DATA);
+ return (0);
+}
+
+static int
+spiflash_manual_fast_read(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len)
+{
+ spiflash_manual_read_internal(mtd, from, buf, len, FAST_READ);
+ return (0);
+}
+
+static int
+spiflash_manual_fast_read_dualOut(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len)
+{
+ spiflash_manual_read_internal(mtd, from, buf, len, FAST_READ_DUALOUT);
+ return (0);
+}
+
+static int
+spiflash_manual_fast_read_dualIO(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len)
+{
+ spiflash_manual_read_internal(mtd, from, buf, len, FAST_READ_DUALIO);
+ return (0);
+}
+
+static u32
+spiflash_read (struct map_info *map, u32 from, u32 to, u32 size)
+{
+ u8 cur_mode=0;
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ uint16 cur3B4B_Flag = Addr3B4B_Flag;
+#endif
+
+ WARN_ON(in_interrupt());
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ if(down_Auto_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#else
+ down_Normal_interruptible(&SPI_SEM); /*This is empty fuction*/
+ spin_lock(&spinorLock);
+#endif
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if(is_SPI_32MB) {
+ if((Address3B4B_Value == 0) || ((Address3B4B_Value != 0) && ((from>>24) != 0)))
+ {
+ if(Addr3B4B_Flag == AUTO_3BMODE) {
+ enter_4Byte_mode();
+ }
+ SEND_AUTO_4B_CMD;
+ } else {
+ if(Addr3B4B_Flag == AUTO_4BMODE) {
+ exit_4Byte_mode();
+ }
+ SEND_AUTO_3B_CMD;
+ if(is_W25Q256 == 1) {
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_STATUSEX, 0);
+ }
+ }
+ }
+#endif
+
+ memcpy4((unsigned char*)to, (unsigned char*)(map->virt + from), size);
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if(is_SPI_32MB) {
+ if((cur3B4B_Flag == AUTO_4BMODE) && (Addr3B4B_Flag == AUTO_3BMODE)) {
+ enter_4Byte_mode();
+ SEND_AUTO_4B_CMD;
+ } else if((cur3B4B_Flag == AUTO_3BMODE) && (Addr3B4B_Flag == AUTO_4BMODE)) {
+ exit_4Byte_mode();
+ SEND_AUTO_3B_CMD;
+ }
+ }
+#endif
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ up_Auto(&SPI_SEM);
+#else
+ up_Normal(&SPI_SEM); /*This is empty fuction*/
+ spin_unlock(&spinorLock);
+#endif
+ return (0);
+}
+
+static u32
+spiflash_write (struct mtd_info *mtd, u32 from, u32 to, u32 len)
+{
+ u8 cur_mode=0;
+ u8 mode=0;
+ static int cnt=0;
+ u32 i=0;
+ u32 pp_len = 0;
+ u32 pp_cur_len = 0;
+ unsigned long tmp_len = 0;
+ u32 destAddr = to;
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ u32 last_addr = 0;
+ u32 begtime=0, endtime=0, passtime=0;
+ u32 begtick=0, endtick=0;
+ uint16 cur3B4B_Flag = Addr3B4B_Flag;
+#endif
+ unsigned char *buf=NULL;
+ unsigned char *rd_buf=NULL;
+
+ WARN_ON(in_interrupt());
+ buf = (unsigned char *) from;
+
+ /* sanity checks */
+ if (!len) return (0);
+ if (to + len > mtd->size) return (-1);
+
+ //printk("program from %x to %x\n", to, to+len);
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#else
+ down_Normal_interruptible(&SPI_SEM); /*This is empty fuction*/
+ spin_lock(&spinorLock);
+#endif
+
+ pp_len = len;
+
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ //last_addr = to + len;
+ last_addr = to + SF_PP_MAX*8;
+ if(mtd->size >= SIZE_32MiB) {
+ if((Address3B4B_Value != 0) && ((last_addr>>24) != 0))
+ {
+ if(Addr3B4B_Flag == AUTO_3BMODE) {
+ enter_4Byte_mode();
+ }
+ }
+ }
+#endif
+
+ if(mtd->size >= SIZE_32MiB) {
+ mode = 1;
+ } else {
+ mode = 0;
+ }
+
+ while(pp_len > 0) {
+ cnt++;
+ if ((cnt & 0x3f) == 0)
+ printk(".");
+
+ if((to & (SF_PP_MAX-1)) != 0) {
+ pp_cur_len = MIN((SF_PP_MAX - (to % SF_PP_MAX)), pp_len);
+ }
+ else {
+ pp_cur_len = MIN(SF_PP_MAX, pp_len);
+ }
+ pp_len -= pp_cur_len;
+
+ //SEND_WREN_CMD;
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ begtime = getTime();
+ begtick = VPint(CR_TIMER1_VLR);
+ if(mtd->size >= SIZE_32MiB) {
+ if((Address3B4B_Value == 0) || ((Address3B4B_Value != 0) && ((last_addr>>24) != 0)))
+ {
+ //printk("\nProgram::mode=1, mtd->size is %X\n", mtd->size);
+ mode = 1;
+ } else {
+ if(Addr3B4B_Flag == AUTO_4BMODE) {
+ exit_4Byte_mode();
+ }
+ if(is_W25Q256 == 1) {
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_STATUSEX, 0);
+ }
+ mode = 0;
+ }
+ } else {
+ mode = 0;
+ }
+#endif
+ tmp_len = pp_cur_len;
+ SEND_PAGE_PROGRAM_CMD(to, buf, pp_cur_len, mode);
+
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ endtick = VPint(CR_TIMER1_VLR);
+ endtime = getTime();
+ if((to & 0xFFFFFF)>>12 == 0xFFF) {
+ if(endtime < begtime)
+ passtime = 0xffffffff - begtime + endtime;
+ else
+ passtime = endtime - begtime;
+ passtime = (passtime/10) * VPint(CR_TIMER1_LDV)+ begtick - endtick;
+ printk("\nto is 0x%08x, last_addr is 0x%08x, pp_cur_len is 0x%08x", to, last_addr, pp_cur_len);
+ printk("\nprogram addr=%x size=%x passtime is %lu ", to, pp_cur_len, passtime);
+ }
+#endif
+ to += tmp_len;
+ }
+#ifdef TCSUPPORT_NEW_SPIFLASH_DEBUG
+ if(mtd->size >= SIZE_32MiB) {
+ if((cur3B4B_Flag == AUTO_4BMODE) && (Addr3B4B_Flag == AUTO_3BMODE)) {
+ enter_4Byte_mode();
+ } else if((cur3B4B_Flag == AUTO_3BMODE) && (Addr3B4B_Flag == AUTO_4BMODE)) {
+ exit_4Byte_mode();
+ }
+ }
+#endif
+
+#if defined(TC_SOC2) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#else
+ up_Normal(&SPI_SEM); /*This is empty fuction*/
+ spin_unlock(&spinorLock);
+#endif
+
+ if(epcheck_flag == 1) {
+ buf = (unsigned char *) from;
+ rd_buf = kmalloc(sizeof(char) * len, GFP_KERNEL);
+ spiflash_manual_read_internal(mtd, destAddr, rd_buf, len, READ_DATA);
+ for(i=0; i<len; i++) {
+ if( rd_buf[i] != buf[i]) {
+ printk("\nProgram fail at Address %X::index is %d, value is %X, should be %X, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", destAddr+i, i, rd_buf[i], buf[i], SF_READ_MODE_VALUE, ((SF_CLK_CHANGE_VALUE==0)?("25MHZ"):("50MHZ")));
+ dumpCell_buffer((u8 *)(rd_buf), len, destAddr);
+ break;
+ }
+ }
+ kfree(rd_buf);
+ }
+
+ return (0);
+}
+
+static struct spi_flash_info flash_tables[] = {
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P64,
+ name: "ST M25P64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P32,
+ name: "ST M25P32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P16,
+ name: "ST M25P16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X256,
+ name: "Winbond W25X256",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X128,
+ name: "Winbond W25X128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X64,
+ name: "Winbond W25X64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X32,
+ name: "Winbond W25X32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X16,
+ name: "Winbond W25X16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q256,
+ name: "Winbond W25Q256",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q128,
+ name: "Winbond W25Q128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q64,
+ name: "Winbond W25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q32,
+ name: "Winbond W25Q32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q16,
+ name: "Winbond W25Q16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SST,
+ dev_id: SST25VF032B,
+ name: "SST 25VF032B",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L16,
+ name: "MX25L16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L3205D,
+ name: "MX25L3205D",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L6405D,
+ name: "MX25L6405D",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L12805D,
+ name: "MX25L12805D",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L25635E,
+ name: "MX25L25635E",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL064A,
+ name: "S25FL064A",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL032A,
+ name: "S25FL032A",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL016A,
+ name: "S25FL016A",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL128P,
+ name: "S25FL128P",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL256S,
+ name: "Spasion S25FL256S",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_EON,
+ dev_id: EN25Q64,
+ name: "EN25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_NUMONYX,
+ dev_id: N25Q064,
+ name: "N25Q064",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_NUMONYX,
+ dev_id: N25Q128,
+ name: "N25Q128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_NUMONYX,
+ dev_id: N25Q256,
+ name: "N25Q256",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q16,
+ name: "GD25Q16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q32,
+ name: "GD25Q32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q64,
+ name: "GD25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q128B,
+ name: "GD25Q128B",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q256C,
+ name: "GD25Q256C",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ESMT,
+ dev_id: F25L16PA,
+ name: "F25L16PA",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ESMT,
+ dev_id: F25L32QA,
+ name: "F25L32QA",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ESMT,
+ dev_id: F25L64QA,
+ name: "F25L64QA",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL208K,
+ name: "S25FL208K",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL116K,
+ name: "S25FL116K",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL216K,
+ name: "S25FL216K",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL127S,
+ name: "S25FL127S",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+};
+
+static struct spi_chip_info *spiflash_tc3162_setup(struct map_info *map)
+{
+ struct spi_chip_info *chip_info=NULL;
+
+ chip_info = kmalloc(sizeof(*chip_info), GFP_KERNEL);
+ if (!chip_info) {
+ printk(KERN_WARNING "Failed to allocate memory for chip_info\n");
+ return NULL;
+ }
+
+ memset(chip_info, 0, sizeof(struct spi_chip_info));
+
+ return chip_info;
+}
+
+static void spiflash_tc3162_destroy(struct spi_chip_info *chip_info)
+{
+ if (chip_info) {
+ kfree(chip_info);
+ }
+}
+
+struct spi_chip_info *spiflash_probe_tc3162(struct map_info *map)
+{
+ int i=0;
+ struct spi_chip_info *chip_info = NULL;
+ unsigned long flash_id=0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+#if 0
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+#endif
+#if 0
+ spin_lock_init(&spiflash_mutex);
+ init_waitqueue_head(&spiflash_wq);
+ spiflash_state = FL_READY;
+#endif
+
+ flash_id = spiflash_read_id();
+
+ for (i=0; i < ARRAY_SIZE(flash_tables); i++) {
+ if ((MANUFACTURER_ID(flash_id) == flash_tables[i].mfr_id) &&
+ (DEVICE_ID(flash_id) == flash_tables[i].dev_id)) {
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_WINBOND) && (DEVICE_ID(flash_id) == W25Q256) ) {
+ is_W25Q256 = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_MXIC) && (DEVICE_ID(flash_id) == MX25L25635E) ) {
+ is_MX25L256 = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_SPANSION) && (DEVICE_ID(flash_id) == S25FL256S) ) {
+ is_S25FL256S = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_NUMONYX) && (DEVICE_ID(flash_id) == N25Q256) ) {
+ is_N25Q256 = 1;
+ }
+ if ( flash_tables[i].DeviceSize == SIZE_32MiB) {
+ is_SPI_32MB = 1;
+ }
+ chip_info = spiflash_tc3162_setup(map);
+ if (chip_info) {
+ chip_info->flash = &flash_tables[i];
+ chip_info->destroy = spiflash_tc3162_destroy;
+
+ chip_info->read = spiflash_read;
+ chip_info->read_manual = spiflash_manual_fast_read;
+ chip_info->write = spiflash_write;
+ chip_info->erase = spiflash_erase;
+ }
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return chip_info;
+ }
+ }
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return NULL;
+}
+
+int mtd_spiflash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ unsigned long adr=0;
+ unsigned long len=0;
+ int ret = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#if 0
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+#endif
+ if (!chip_info->erase){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize -1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ if ((instr->len + instr->addr) > mtd->size){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ adr = instr->addr;
+ len = instr->len;
+
+ while (len) {
+#if 0
+ if (!spiflash_wait_ready(FL_ERASING)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EINTR;
+ }
+#endif
+ ret = chip_info->erase(mtd, adr);
+ // spiflash_done();
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback)
+ instr->callback(instr);
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return 0;
+}
+
+int mtd_spiflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ int ret = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+#if 0
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+#endif
+ //if (!chip_info->read){
+ if (!chip_info->read_manual){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ //ret = chip_info->read(map, from, (u32)buf, len);
+ ret = chip_info->read_manual(mtd, from, buf, len);
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ if(retlen)
+ (*retlen) = len;
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return 0;
+}
+
+int mtd_spiflash_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ int ret = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_Manual_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+#if 0
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+#endif
+ if (!chip_info->write){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+#if 0
+ if (!spiflash_wait_ready(FL_WRITING)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return -EINTR;
+ }
+#endif
+ ret = chip_info->write(mtd, (u32)buf, to, len);
+ //spiflash_done();
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ if (retlen)
+ (*retlen) = len;
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up_Manual(&SPI_SEM);
+#endif
+ return 0;
+}
+
+unsigned char ReadSPIByte(unsigned long index)
+{
+ unsigned char buf[2] = {0};
+ size_t retlen;
+
+ index &= 0x03FFFFFF;
+
+ mtd_spiflash_read(spi_nor_mtd, (loff_t)index, 1, &retlen, buf);
+
+ return buf[0];
+}
+
+unsigned long ReadSPIDWord(unsigned long index)
+{
+ unsigned long dword = 0;
+ size_t retlen;
+
+ index &= 0x03FFFFFF;
+
+ mtd_spiflash_read(spi_nor_mtd, (loff_t)index, 4, &retlen, &dword);
+
+ return dword;
+}
+EXPORT_SYMBOL(ReadSPIByte);
+EXPORT_SYMBOL(ReadSPIDWord);
+
+void sfc_AutoManualInterrupt_handler(void)
+{
+ if(ReadReg(SF_INTERRUPT) == 1) {
+ printk("\n>>> %d: Auto Mode and Manual Mode access in the same time, Reg(0xbfb40054) is 0x%08x\n", AUTO_MANUAL_INT, VPint(0xbfb40054));
+ WriteReg(SF_INTERRUPT, 1);
+ } else {
+ printk("\n>>> %d: Strange!! should not come here\n", AUTO_MANUAL_INT);
+ }
+}
+
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+int offset = 0;
+#endif
+
+static struct mtd_info *spiflash_probe(struct map_info *map)
+{
+ struct spi_chip_info *chip_info = NULL;
+ struct mtd_info *mtd=NULL;
+
+ chip_info = spiflash_probe_tc3162(map);
+ if (!chip_info)
+ return NULL;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ return NULL;
+ }
+
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+ mtd->_erase = mtd_spiflash_erase;
+ mtd->_write = mtd_spiflash_write;
+ mtd->_read = mtd_spiflash_read;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+
+ mtd->size = chip_info->flash->DeviceSize;
+ mtd->erasesize = chip_info->flash->EraseSize;
+
+ map->fldrv_priv = chip_info;
+
+ printk(KERN_INFO "%s: Found SPIFLASH %dMiB %s\n",
+ map->name, chip_info->flash->DeviceSize/(1024*1024), chip_info->flash->name);
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ offset = chip_info->flash->DeviceSize/2;
+#endif
+
+ spi_nor_mtd = mtd;
+
+ return mtd;
+}
+
+static void spiflash_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = (struct map_info *)mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+
+ if (chip_info->destroy) {
+ chip_info->destroy(chip_info);
+ }
+}
+
+static struct mtd_chip_driver spiflash_chipdrv = {
+ .probe = spiflash_probe,
+ .destroy = spiflash_destroy,
+ .name = "spiflash_probe",
+ .module = THIS_MODULE
+};
+
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+static int read_proc_support_flash(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int flash_id;
+ int index_flash;
+ char *buf_proc = NULL;
+ char buf_line[200];
+ char buf_name[100];
+ char *buf_replace = NULL;
+ int total_len=0;
+
+
+ buf_proc = kmalloc(4*1024, GFP_KERNEL);
+ if (!buf_proc)
+ {
+ printk(KERN_WARNING "Failed to allocate memory for /proc/tc3162/support_flash\n");
+ return 0;
+ }
+
+ memset(buf_proc,0,4*1024);
+
+ for(index_flash=0; index_flash < sizeof(flash_tables)/sizeof(struct spi_flash_info); index_flash++)
+ {
+ strcpy(buf_name,flash_tables[index_flash].name);//replace whitespace with '_'
+ while( (buf_replace=strstr(buf_name, " "))!=NULL)
+ *buf_replace='#';
+
+ if(flash_tables[index_flash].DeviceSize/0x100000 <4)
+ continue;
+
+ flash_id = (flash_tables[index_flash].mfr_id <<16) | ( flash_tables[index_flash].dev_id & 0xffff);
+
+ sprintf(buf_line,"%s %#x %d \n",buf_name , flash_id,
+ flash_tables[index_flash].DeviceSize/0x100000);
+
+ total_len += strlen(buf_line);
+ if(total_len>4*1024)
+ break;
+ strcat(buf_proc,buf_line);
+ }
+
+ len = sprintf(page, "%s", buf_proc);
+
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ kfree(buf_proc);
+ return len;
+}
+#endif
+
+static int write_test(void *arg)
+{
+ struct _SPI_NOR_FLASH_RW_TEST_T param;
+ u8 buf[64], read_buf[64];
+ int i;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ int cpu = smp_processor_id();
+ int vpe = cpu_data[cpu].vpe_id;
+#else
+ int cpu = 0;
+ int vpe = 0;
+#endif
+
+ memcpy(&param, arg, sizeof(struct _SPI_NOR_FLASH_RW_TEST_T));
+ printk("write_test: run at vpe:%d, cpu:%d\n", vpe, cpu);
+ printk("write_test: times=%d, block_idx=%d\n", param.times, param.block_idx);
+
+ while (!kthread_should_stop() && param.times > 0) {
+ if(param.times % 10 == 0)
+ printk("write_test:%d\n", param.times);
+ msleep(1);
+ param.times--;
+
+ get_random_bytes(buf, sizeof(buf));
+
+ spiflash_erase(spi_nor_mtd, param.block_idx * spi_nor_mtd->erasesize);
+ spiflash_write(spi_nor_mtd, buf, param.block_idx * spi_nor_mtd->erasesize, sizeof(buf));
+ for(i = 0; i < sizeof(read_buf); i++) {
+ read_buf[i] = ReadSPIByte((param.block_idx * spi_nor_mtd->erasesize) + i);
+ }
+
+ if(memcmp(buf, read_buf, sizeof(buf)) != 0) {
+ printk("write fail\n");
+ return -1;
+ }
+ }
+
+ printk("write done\n");
+
+ return 0;
+}
+
+static int read_test(void *arg)
+{
+ struct _SPI_NOR_FLASH_RW_TEST_T param;
+ u8 buf[64];
+ int i;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ int cpu = smp_processor_id();
+ int vpe = cpu_data[cpu].vpe_id;
+#else
+ int cpu = 0;
+ int vpe = 0;
+#endif
+
+ memcpy(&param, arg, sizeof(struct _SPI_NOR_FLASH_RW_TEST_T));
+ printk("read_test: run at vpe:%d, cpu:%d\n", vpe, cpu);
+ printk("read_test: times=%d, block_idx=%d\n", param.times, param.block_idx);
+
+ memset(buf, 0xaa, sizeof(buf));
+
+ while (!kthread_should_stop() && param.times > 0) {
+ if(param.times % 10 == 0)
+ printk("read_test:%d\n", param.times);
+ msleep(1);
+ param.times--;
+ for(i = 0; i < sizeof(buf); i++) {
+ buf[i] = ReadSPIByte((param.block_idx * spi_nor_mtd->erasesize) + i);
+ }
+ }
+
+ printk("read done\n");
+
+ return 0;
+}
+
+static int spi_nor_proc_test_write(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+ char buf[64], cmd[32];
+ u32 arg1, arg2;
+ struct task_struct *thread;
+ u32 idx;
+ unsigned int cpu;
+
+ if (copy_from_user(buf, buffer, count)) {
+ return -EFAULT;
+ }
+
+ buf[count] = '\0';
+
+ sscanf(buf, "%s %d %d", cmd, &arg1, &arg2) ;
+
+ printk("cmd:%s, arg1=%u, arg2=%u\n", cmd, arg1, arg2);
+
+ if (!strcmp(cmd, "rw_test")) {
+ rw_test_param.times = arg1;
+ rw_test_param.block_idx = arg2;
+
+ thread = kthread_create(write_test, (void *)&rw_test_param, "write_test");
+ kthread_bind(thread, 3);
+ wake_up_process(thread);
+ thread = kthread_create(read_test, (void *)&rw_test_param, "read_test");
+ kthread_bind(thread, 2);
+ wake_up_process(thread);
+ } else {
+ printk("input not defined.\n");
+ }
+
+ return count;
+
+}
+
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+/*------------------------------------------------------------------------------------
+ * FUNCTION: spi_nor_set_clock_speed( u32 clock_factor)
+ * PURPOSE : To set SPI clock.
+ * clock_factor = 0
+ * EN7512: SPI clock = 500MHz / 40 = 12.5MHz
+ * EN7522: SPI clock = 400MHz / 40 = 10MHz
+ * clock_factor > 0
+ * EN7512: SPI clock = 500MHz / (clock_factor * 2)
+ * EN7522: SPI clock = 400MHz / (clock_factor * 2)
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : clock_factor - The SPI clock divider.
+ * RETURN : NONE.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+void spi_nor_set_clock_speed( u32 clk)
+{
+ u32 val;
+ u32 dividend;
+ u32 clock_factor;
+
+ if(!isFPGA) {
+ if(isEN7526c || isEN751627||isEN7580) {
+ dividend = 400;
+ } else {
+ dividend = 500;
+ }
+
+ clock_factor = (dividend / (clk * 2));
+
+ val = VPint(SF_CLK_CHANGE);
+ val &= 0xffff0000;
+ VPint(SF_CLK_CHANGE) = val;
+
+ val |= (((clock_factor) << 8)|1);
+ VPint(SF_CLK_CHANGE) = val;
+
+ printk("Set SPI Clock to %u Mhz\n", (dividend / (clock_factor * 2)));
+ } else {
+ printk("FPGA can not Set SPI Clock, FPGA SPI Clock is fixed to 25 Mhz\n");
+ }
+}
+#endif
+
+
+static int __init spiflash_probe_init(void)
+{
+ int index=0;
+ unsigned long flash_id=0;
+ int spi_controller_clk;
+ struct proc_dir_entry *entry;
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+ create_proc_read_entry("tc3162/support_flash", 0, NULL, read_proc_support_flash, NULL);
+#endif
+
+ printk("\nSPI Flash Init Start\n");
+ /* enable SF_MAXMUX_SEL */
+ WriteReg(SF_MACMUX_SEL, MANUAL_MACMUXSEL);
+
+ flash_id = spiflash_read_id();
+
+ for (index=0; index < ARRAY_SIZE(flash_tables); index++) {
+ if ((MANUFACTURER_ID(flash_id) == flash_tables[index].mfr_id) &&
+ (DEVICE_ID(flash_id) == flash_tables[index].dev_id)) {
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_WINBOND) && (DEVICE_ID(flash_id) == W25Q256) ) {
+ is_W25Q256 = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_MXIC) && (DEVICE_ID(flash_id) == MX25L25635E) ) {
+ is_MX25L256 = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_SPANSION) && (DEVICE_ID(flash_id) == S25FL256S) ) {
+ is_S25FL256S = 1;
+ }
+ if ( (MANUFACTURER_ID(flash_id) == MANUFACTURER_NUMONYX) && (DEVICE_ID(flash_id) == N25Q256) ) {
+ is_N25Q256 = 1;
+ }
+ if ( flash_tables[index].DeviceSize == SIZE_32MiB) {
+ is_SPI_32MB = 1;
+ }
+ break;
+ }
+ }
+
+ if (index == ARRAY_SIZE(flash_tables)) {
+ printk ("Found no SPI flash device\n");
+ return -1;
+ }
+
+ if(flash_tables[index].DeviceSize >= SIZE_32MiB){
+ //enter_dualIO_enable_mode();
+ enter_4Byte_mode();
+ SEND_AUTO_4B_CMD;
+ } else {
+ SEND_AUTO_3B_CMD;
+ }
+ SEND_AUTO_READ_CMD;
+
+ SEND_WRITE_REGISTER_CMD(SF_OP_WR_STATUS1, SF_SR1_DEFAULT);
+
+ // set SFC Clock to 25MHZ
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+ spi_controller_clk = 25;
+ spi_nor_set_clock_speed(spi_controller_clk);
+#else
+ WriteReg(SF_CLK_CHANGE, 6);
+#endif
+ // set Tn to 1
+ WriteReg(SF_SI_CK_SEL, 9);
+
+ printk("\nSPI Flash Init Finish\n");
+
+ VPint(SF_INTERRUPT_EN) |= AUTO_MANUAL_INTR_EN;
+ if (request_irq(AUTO_MANUAL_INT, sfc_AutoManualInterrupt_handler, 0, "SFC_AutoManual", NULL) != 0) {
+ printk("can not allocate IRQ for auto_manual_interrupt.\n");
+ return -1;
+ }
+ #ifndef TCSUPPORT_MIPS_1004K
+ VPint(CR_INTC_IMR_1) |= (0x01 << (AUTO_MANUAL_INT-33));
+ #endif
+
+ register_mtd_chip_driver(&spiflash_chipdrv);
+
+ entry = create_proc_entry(SPI_NOR_TEST, 0666, NULL);
+ if (entry == NULL)
+ {
+ printk("SPI NOR unable to create /proc entry\n");
+ return -ENOMEM;
+ }
+ entry->write_proc = spi_nor_proc_test_write;
+ return 0;
+}
+
+static void __exit spiflash_probe_exit(void)
+{
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+ remove_proc_entry(SPI_NOR_TEST, NULL);
+ remove_proc_entry("tc3162/support_flash", NULL);
+#endif
+
+ unregister_mtd_chip_driver(&spiflash_chipdrv);
+}
+
+#if defined(TCSUPPORT_NEW_SPIFLASH_DEBUG)
+static void SF_Test_Helper(void){
+#if 1
+ printk("\nusage: \tsftest [Round] [AutoRD:0~15] [ManualRD:0~15] [MuxRD:0~1] [isProg:0~1] [CLK Rate:0~31] [Exit:0~1] [3B4B:0~2] [Pattern] [StartAddr] [RDIdle] [Tn]\n");
+#else
+ printk("\nusage: \tsftest [Round] [AutoRD:0~15] [ManualRD:0~15] [MuxRD:0~1] [isProg:0~1] [CLK Rate:0~2] [Exit:0~1] [3B4B:0~2] [Pattern] [StartAddr] [RDIdle] [Tn]\n");
+#endif
+ printk("Round: \t\t0~65535 means test round. DEFAULT VALUE is 10\n");
+ printk("AutoRD: \t0~15 is b'0000~1111, bit3 means Auto_singleRead, bit2 means Auto_FastRead, bit1 means Auto_FastDualOutput, \n\t\tbit0 means Auto_FastDualIO. DEFAULT VALUE is 8\n");
+ printk("ManualRD: \t0~15 is b'0000~1111, bit3 means Manual_singleRead, bit2 means Manual_FastRead, bit1 means Manual_FastDualOutput, \n\t\tbit0 means Manual_FastDualIO. DEFAULT VALUE is 0\n");
+ printk("MuxRD: \t\t0 means disable MuxRead, 1 means enable MuxRead. DEFAULT VALUE is 0\n");
+ printk("isProg: \t0 means disable Program, 1 means enable Program. DEFAULT VALUE is 0\n");
+#if 1
+ printk("CLK Rate: \t0 means 15MHZ, 1 means (150/1)MHZ, 2 means (150/2)MHZ, 3 means (150/3)MHZ, etc... DEFAULT VALUE is 0\n");
+#else
+ printk("CLK Rate: \t0 means 25MHZ, 1 means 50MHZ, 2 means CLK Rate change on the fly. DEFAULT VALUE is 0\n");
+#endif
+ printk("Exit: \t\t0 means continue test while error happens, 1 means exit test while error happens. DEFAULT VALUE is 0\n");
+ printk("3B4B: \t\t0 means 4Bytes access all 32MB, 1 means 3Bytes access front 16MB, 2 means uses 13/0C/3C/BC. DEFAULT VALUE is 0\n");
+ printk("TestPattern: \t0:normal test, 1:A5, 2:5A, 3:FF, 4:00, 5:55, 6:AA, 7:96, 8:69, 9:CC, 10:33, 11:Random. DEFAULT VALUE is 0\n");
+ printk("StartAddr: \tDEFAULT VALUE is 0xF00000\n\n");
+ printk("RDIdle: \t0: Disable Read IDLE, 1: Enable Read IDLE. DEFAULT VALUE is 0\n");
+ printk("Tn: \t0~15 is b'000000~111111, the value of bit0~bit5 means T0~T5 enable or desable. DEFAULT VALUE is 0\n");
+}
+
+unsigned long sf_rand_ascend(void)
+{
+ static unsigned long rand_asc=0;
+ rand_asc++;
+ return rand_asc;
+}
+unsigned long sf_rand_descend(void)
+{
+ static unsigned long rand_des=0;
+ rand_des--;
+ return rand_des;
+}
+unsigned long sf_rand(void)
+{
+ static unsigned long rand_item=0;
+ rand_item = (rand_item * 123 + 59) % 65536;
+ return rand_item;
+}
+
+void spiflash_Tn_change(void)
+{
+ static int Tn_cur_index = 0;
+ SEND_TN_CMD(TnMappingTab[(Tn_cur_index++)%Tn_cnt]);
+}
+
+#if 1
+void spiflash_clk_change(int mode)
+{
+ WriteReg(SF_CLK_CHANGE, mode);
+}
+
+void spiflash_clk_change_25MHZ(void)
+{
+ spiflash_clk_change(6);
+}
+
+void spiflash_clk_change_50MHZ(void)
+{
+ spiflash_clk_change(3);
+}
+#else
+void spiflash_clk_change(int mode)
+{
+ if(mode & 0x1) {
+ SEND_CLK_25MHZ_CMD;
+ clk_rate = 0;
+ }
+ else {
+ SEND_CLK_50MHZ_CMD;
+ clk_rate = 1;
+ }
+}
+
+void spiflash_clk_change_25MHZ()
+{
+ SEND_CLK_25MHZ_CMD;
+}
+
+void spiflash_clk_change_50MHZ()
+{
+ SEND_CLK_50MHZ_CMD;
+}
+#endif
+
+int spiflash_autoread_test(struct mtd_info *mtd,
+ unsigned long from, unsigned char *buf, unsigned long len, unsigned long *retlen, u32 mode)
+{
+ mode %= AutoRead_cnt;
+ *retlen = AutoMappingTab[mode];
+ if(Tn_cnt >= 2)
+ spiflash_Tn_change();
+#if 0
+ if(CLKRate_Value == 2)
+ spiflash_clk_change(mode);
+#endif
+ switch(AutoMappingTab[mode]) {
+ case 0:
+ SEND_AUTO_READ_CMD;
+ break;
+ case 1:
+ SEND_AUTO_FASTREAD_CMD;
+ break;
+ case 2:
+ SEND_AUTO_FASTREAD_DUALOUT_CMD;
+ break;
+ case 3:
+ SEND_AUTO_FASTREAD_DUALIO_CMD;
+ break;
+ }
+ return spiflash_read(mtd->priv, from, (u32)buf, len);
+}
+
+int spiflash_manualread_test(struct mtd_info *mtd,
+ unsigned long from, unsigned char *buf, unsigned long len, unsigned long *retlen, u32 mode)
+{
+ mode %= ManualRead_cnt;
+ *retlen = ManualMappingTab[mode];
+ if(Tn_cnt >= 2)
+ spiflash_Tn_change();
+#if 0
+ if(CLKRate_Value == 2)
+ spiflash_clk_change(mode);
+#endif
+ switch(ManualMappingTab[mode]) {
+ case 0:
+ return spiflash_manual_read_data(mtd, from, buf, len);
+ case 1:
+ return spiflash_manual_fast_read(mtd, from, buf, len);
+ case 2:
+ return spiflash_manual_fast_read_dualOut(mtd, from, buf, len);
+ case 3:
+ return spiflash_manual_fast_read_dualIO(mtd, from, buf, len);
+ }
+}
+
+int spiflash_muxread_test(struct mtd_info *mtd,
+ unsigned long from, unsigned char *buf, unsigned long len, unsigned long *retlen, u32 mode)
+{
+ mode = sf_rand() % MuxRead_cnt;
+ *retlen = MuxMappingTab[mode];
+ if(Tn_cnt >= 2)
+ spiflash_Tn_change();
+#if 0
+ if(CLKRate_Value == 2)
+ spiflash_clk_change(mode);
+#endif
+ switch(MuxMappingTab[mode]) {
+ case 0:
+ SEND_AUTO_READ_CMD;
+ break;
+ case 1:
+ SEND_AUTO_FASTREAD_CMD;
+ break;
+ case 2:
+ SEND_AUTO_FASTREAD_DUALOUT_CMD;
+ break;
+ case 3:
+ SEND_AUTO_FASTREAD_DUALIO_CMD;
+ break;
+ case 4:
+ return spiflash_manual_read_data(mtd, from, buf, len);
+ case 5:
+ return spiflash_manual_fast_read(mtd, from, buf, len);
+ case 6:
+ return spiflash_manual_fast_read_dualOut(mtd, from, buf, len);
+ case 7:
+ return spiflash_manual_fast_read_dualIO(mtd, from, buf, len);
+ }
+ return spiflash_read(mtd->priv, from, (u32)buf, len);
+}
+
+
+int sf_basic_test(u32 round, u32 readtest)
+{
+ unsigned long retlen=0;
+ unsigned long erasesize=0;
+ unsigned long size=0;
+ unsigned long cur_se_off=0;
+ unsigned long cur_pp_off=0;
+ unsigned long start_addr=0;
+ unsigned char se_buf[SE_MAX];
+ unsigned char pp_buf[PP_MAX];
+ unsigned char seed=0;
+ int cur_round=0;
+ int i=0, j=0;
+ int index=0;
+ u32 read_mode=0;
+ u32 begtime=0, endtime=0, passtime=0;
+ struct erase_info instr;
+ struct map_info *map=NULL;
+ struct mtd_info *mtd=NULL;
+
+ map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ if (!map) {
+ printk(KERN_WARNING "Failed to allocate memory for MAP\n");
+ return NULL;
+ }
+ map->virt = 0xBC000000;
+
+ /* check flash ID */
+ mtd = spiflash_probe(map);
+ if (!mtd) {
+ printk ("\n########spiflash ID read error\n");
+ }
+
+ erasesize = mtd->erasesize;
+ size = mtd->size;
+ start_addr = StartAddress_Value;
+
+ cur_round = round % (TP_MAX+1);
+ //read_mode = round % 5;
+ if(readtest == AUTOREAD_TEST)
+ printk("\n\n\n>>>>>>> Round%d Test::current test is AUTOREAD_TEST >>>>>>\n", round);
+ else if(readtest == MANUALREAD_TEST)
+ printk("\n\n\n>>>>>>> Round%d Test::current test is MANUALREAD_TEST >>>>>>\n", round);
+ else if(readtest == MUXREAD_TEST)
+ printk("\n\n\n>>>>>>> Round%d Test::current test is MUXREAD_TEST >>>>>>\n", round);
+
+#if 1
+ begtime = getTime();
+#endif
+ for(cur_se_off = start_addr; (cur_se_off + erasesize) <= size; cur_se_off += erasesize) {
+ /* erase one block, the size is 64KB */
+ instr.addr = cur_se_off;
+ instr.len = erasesize;
+ tc3162wdog_kick();
+ mtd_spiflash_erase(mtd, &instr);
+ //printk("\n\n Erase flash from %X to %X\n", cur_se_off, cur_se_off + erasesize);
+
+ /* check the content of the block */
+ for(j=0; j<SE_CNT; j++){
+ //mtd_spiflash_read(mtd, cur_se_off+j*SE_MAX, SE_MAX, &retlen, se_buf);
+ if(readtest == AUTOREAD_TEST)
+ spiflash_autoread_test(mtd, cur_se_off+j*SE_MAX, se_buf, SE_MAX, &retlen, (++read_mode));
+ else if(readtest == MANUALREAD_TEST)
+ spiflash_manualread_test(mtd, cur_se_off+j*SE_MAX, se_buf, SE_MAX, &retlen, (++read_mode));
+ else if(readtest == MUXREAD_TEST)
+ spiflash_muxread_test(mtd, cur_se_off+j*SE_MAX, se_buf, SE_MAX, &retlen, (++read_mode));
+ for(i=0; i<SE_MAX; i++) {
+ if(se_buf[i] != 0xFF) {
+ if(readtest == MUXREAD_TEST)
+ printk("\n####Erase fail at Address %X::index is %d, value is %X, should be %X, ReadMode is %s::%s, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", cur_se_off+j*SE_MAX+i, i, se_buf[i], 0xFF, TestMode_TEXT[readtest], MuxMode_TEXT[retlen & 0x7], SF_READ_MODE_VALUE, CLKRate_TEXT[clk_rate]);
+ else
+ printk("\n####Erase fail at Address %X::index is %d, value is %X, should be %X, ReadMode is %s::%s, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", cur_se_off+j*SE_MAX+i, i, se_buf[i], 0xFF, TestMode_TEXT[readtest], ReadMode_TEXT[retlen & 0x3], SF_READ_MODE_VALUE, CLKRate_TEXT[clk_rate]);
+ dumpCell_buffer((u8 *)(se_buf), SE_MAX, cur_se_off+j*SE_MAX);
+ if(Exit_Value == 1)
+ goto erase_fail;
+ else
+ break;
+ }
+ }
+ }
+
+ if(isProgram_Value == 1) {
+ for(cur_pp_off = cur_se_off; (cur_pp_off + PP_MAX) <= (cur_se_off + erasesize); cur_pp_off += PP_MAX) {
+ /* program 256Bytes of current blcok */
+ if(TestPattern_Value == 0) {
+ if(cur_round == TP_MAX) {
+ seed = sf_rand() % SEED_MAX;
+ for(i=0; i<PP_MAX; i++) {
+ pp_buf[i] = (seed+i) % SEED_MAX;
+ }
+ } else {
+ for(i=0; i<PP_MAX; i++) {
+ pp_buf[i] = TestPattern[cur_round];
+ }
+ }
+ } else if(TestPattern_Value == 11) {
+ seed = sf_rand() % SEED_MAX;
+ for(i=0; i<PP_MAX; i++) {
+ pp_buf[i] = (seed+i) % SEED_MAX;
+ }
+ } else {
+ for(i=0; i<PP_MAX; i++) {
+ pp_buf[i] = TestPattern[TestPattern_Value-1];
+ }
+ }
+ mtd_spiflash_write(mtd, cur_pp_off, PP_MAX, &retlen, pp_buf);
+ //printk("\n Program flash from %X to %X", cur_pp_off, cur_pp_off + PP_MAX);
+
+ /* check the content of current 256 Bytes */
+ //mtd_spiflash_read(mtd, cur_pp_off, PP_MAX, &retlen, pp_buf);
+ if(readtest == AUTOREAD_TEST)
+ spiflash_autoread_test(mtd, cur_pp_off, pp_buf, PP_MAX, &retlen, (++read_mode));
+ else if(readtest == MANUALREAD_TEST)
+ spiflash_manualread_test(mtd, cur_pp_off, pp_buf, PP_MAX, &retlen, (++read_mode));
+ else if(readtest == MUXREAD_TEST)
+ spiflash_muxread_test(mtd, cur_pp_off, pp_buf, PP_MAX, &retlen, (++read_mode));
+ for(i=0; i<PP_MAX; i++) {
+ if( pp_buf[i] != ((TestPattern_Value==0)?((cur_round == TP_MAX)?((seed+i) % SEED_MAX):(TestPattern[cur_round])):((TestPattern_Value==11)?((seed+i) % SEED_MAX):(TestPattern[TestPattern_Value-1]))) ) {
+ if(readtest == MUXREAD_TEST)
+ printk("\n####Program fail at Address %X::index is %d, value is %X, should be %X, ReadMode is %s::%s, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", cur_pp_off+i, i, pp_buf[i], ((TestPattern_Value==0)?((cur_round == TP_MAX)?((seed+i) % SEED_MAX):(TestPattern[cur_round])):((TestPattern_Value==11)?((seed+i) % SEED_MAX):(TestPattern[TestPattern_Value-1]))), TestMode_TEXT[readtest], MuxMode_TEXT[retlen & 0x7], SF_READ_MODE_VALUE, CLKRate_TEXT[clk_rate]);
+ else
+ printk("\n####Program fail at Address %X::index is %d, value is %X, should be %X, ReadMode is %s::%s, reg 0xBFA10000 is 0x%08x, clk_rate is %s\n", cur_pp_off+i, i, pp_buf[i], ((TestPattern_Value==0)?((cur_round == TP_MAX)?((seed+i) % SEED_MAX):(TestPattern[cur_round])):((TestPattern_Value==11)?((seed+i) % SEED_MAX):(TestPattern[TestPattern_Value-1]))), TestMode_TEXT[readtest], ReadMode_TEXT[retlen & 0x3], SF_READ_MODE_VALUE, CLKRate_TEXT[clk_rate]);
+ dumpCell_buffer((u8 *)(pp_buf), PP_MAX, cur_pp_off);
+ if(Exit_Value == 1)
+ goto program_fail;
+ else
+ break;
+ }
+ }
+ }
+ }
+ }
+#if 1
+ endtime = getTime();
+ if(endtime < begtime)
+ passtime = 0xffffffff - begtime + endtime;
+ else
+ passtime = endtime - begtime;
+ printk("\n>>>>>>> Round%d Test Finish::passtime is %lums\n", round, passtime);
+ printk("\nCurrent Time is %d:%d:%d\n", endtime/3600000, (endtime/60000)%60, (endtime/1000)%60);
+#endif
+
+ kzfree(map->fldrv_priv);
+ kzfree(map);
+ kzfree(mtd);
+
+ return 0;
+
+erase_fail:
+ return -1;
+
+program_fail:
+ return -2;
+}
+
+int sf_manual_test(u32 round)
+{
+ return sf_basic_test(round, MANUALREAD_TEST);
+}
+
+int sf_auto_test(u32 round)
+{
+ return sf_basic_test(round, AUTOREAD_TEST);
+}
+
+int sf_mux_test(u32 round)
+{
+ return sf_basic_test(round, MUXREAD_TEST);
+}
+
+int sf_basic_test_entry(int argc, char *argv[])
+{
+ int i=0;
+ u32 round = 0;
+ u32 pattern = 0;
+ int result = 0;
+ struct map_info *map=NULL;
+ struct mtd_info *mtd=NULL;
+
+ clk_rate = 0;
+ Round_Value = 10;
+ AutoRead_Value = 8;
+ ManualRead_Value = 0;
+ MuxRead_Value = 0;
+ isProgram_Value = 0;
+ CLKRate_Value = 0;
+ Exit_Value = 0;
+ Address3B4B_Value = 0;
+ TestPattern_Value = 0;
+ StartAddress_Value = 0xF00000;
+ ReadIdle_Value = 0;
+ Tn_Value = 0;
+ Tn_cnt = 0;
+ TnMappingTab[0] = 0;
+
+ AutoRead_cnt = 0;
+ ManualRead_cnt = 0;
+ MuxRead_cnt = 0;
+
+ AutoRead_cnt = 1;
+ AutoMappingTab[0] = 0;
+
+ if(argc == 1){
+ SF_Test_Helper();
+ return -1;
+ }
+
+ if(argc >= 2){
+ Round_Value = simple_strtoul(argv[1], NULL, 10);
+ printk("Round is %lu\n", Round_Value);
+ }
+ if(argc >= 3){
+ AutoRead_Value = simple_strtoul(argv[2], NULL, 10);
+ if(AutoRead_Value > 15) {
+ printk("\nError: AutoRead Value must be [0~15]!\n");
+ SF_Test_Helper();
+ return -3;
+ }
+ else {
+ AutoRead_cnt = 0;
+ MuxRead_cnt = 0;
+ printk("AutoRead Value is %u, ", AutoRead_Value);
+ if(AutoRead_Value == 0)
+ printk("no Auto Read Test\n");
+ else {
+ printk("including ");
+ if(AutoRead_Value & 0x8) {
+ printk("Single Read, ");
+ AutoMappingTab[AutoRead_cnt++] = 0;
+ MuxMappingTab[MuxRead_cnt++] = 0;
+ }
+ if(AutoRead_Value & 0x4) {
+ printk("Fast Read, ");
+ AutoMappingTab[AutoRead_cnt++] = 1;
+ MuxMappingTab[MuxRead_cnt++] = 1;
+ }
+ if(AutoRead_Value & 0x2) {
+ printk("Fast Read Dual Output, ");
+ AutoMappingTab[AutoRead_cnt++] = 2;
+ MuxMappingTab[MuxRead_cnt++] = 2;
+ }
+ if(AutoRead_Value & 0x1) {
+ printk("Fast Read Dual IO, ");
+ AutoMappingTab[AutoRead_cnt++] = 3;
+ MuxMappingTab[MuxRead_cnt++] = 3;
+ }
+ printk("\n");
+ }
+ }
+ }
+ if(argc >= 4){
+ ManualRead_Value = simple_strtoul(argv[3], NULL, 10);
+ if(ManualRead_Value > 15) {
+ printk("\nError: ManualRead Value must be [0~15]!\n");
+ SF_Test_Helper();
+ return -4;
+ }
+ else {
+ ManualRead_cnt = 0;
+ printk("ManualRead Value is %u, ", ManualRead_Value);
+ if(ManualRead_Value == 0)
+ printk("no Manual Read Test\n");
+ else {
+ printk("including ");
+ if(ManualRead_Value & 0x8) {
+ printk("Single Read, ");
+ ManualMappingTab[ManualRead_cnt++] = 0;
+ MuxMappingTab[MuxRead_cnt++] = 4;
+ }
+ if(ManualRead_Value & 0x4) {
+ printk("Fast Read, ");
+ ManualMappingTab[ManualRead_cnt++] = 1;
+ MuxMappingTab[MuxRead_cnt++] = 5;
+ }
+ if(ManualRead_Value & 0x2) {
+ printk("Fast Read Dual Output, ");
+ ManualMappingTab[ManualRead_cnt++] = 2;
+ MuxMappingTab[MuxRead_cnt++] = 6;
+ }
+ if(ManualRead_Value & 0x1) {
+ printk("Fast Read Dual IO, ");
+ ManualMappingTab[ManualRead_cnt++] = 3;
+ MuxMappingTab[MuxRead_cnt++] = 7;
+ }
+ printk("\n");
+ }
+ }
+ }
+ if(argc >= 5){
+ MuxRead_Value = simple_strtoul(argv[4], NULL, 10);
+ if(MuxRead_Value > 1) {
+ printk("\nError: MuxRead Value must be [0~1]!\n");
+ SF_Test_Helper();
+ return -5;
+ }
+ else
+ printk("MuxRead is %s\n", ((MuxRead_Value==1)?"enable":"disable"));
+ }
+ if(argc >= 6){
+ isProgram_Value = simple_strtoul(argv[5], NULL, 10);
+ if(isProgram_Value > 1) {
+ printk("\nError: isProgram Value must be [0~1]!\n");
+ SF_Test_Helper();
+ return -6;
+ }
+ else
+ printk("Program is %s\n", ((isProgram_Value==1)?"enable":"disable"));
+ }
+#if 1
+ if(argc >= 7){
+ CLKRate_Value = simple_strtoul(argv[6], NULL, 10);
+ if(CLKRate_Value > 31) {
+ printk("\nError: CLK Rate Value must be [0~31]!\n");
+ SF_Test_Helper();
+ return -7;
+ }
+ else {
+ if(CLKRate_Value == 0)
+ CLKRate_Value = 10;
+ printk("CLK Rate is %dMHZ\n", (SFC_CLOCK_MAX / CLKRate_Value));
+ }
+ }
+#else
+ if(argc >= 7){
+ CLKRate_Value = simple_strtoul(argv[6], NULL, 10);
+ if(CLKRate_Value > 2) {
+ printk("\nError: CLK Rate Value must be [0~2]!\n");
+ SF_Test_Helper();
+ return -7;
+ }
+ else
+ printk("CLK Rate is %s\n", CLKRate_TEXT[CLKRate_Value]);
+ }
+#endif
+ if(argc >= 8){
+ Exit_Value = simple_strtoul(argv[7], NULL, 10);
+ if(Exit_Value > 1) {
+ printk("\nError: Exit Value must be [0~1]!\n");
+ SF_Test_Helper();
+ return -8;
+ }
+ else
+ printk("Exit while error is %s\n", ((Exit_Value==1)?"enable":"disable"));
+ }
+ if(argc >= 9){
+ Address3B4B_Value = simple_strtoul(argv[8], NULL, 10);
+ if(Address3B4B_Value > 2) {
+ printk("\nError: 3B4B Value must be [0~2]!\n");
+ SF_Test_Helper();
+ return -9;
+ }
+ else
+ printk("The way to access 32MB flash is %s\n", Address3B4B_TEXT[Address3B4B_Value]);
+ }
+ if(argc >= 10){
+ TestPattern_Value = simple_strtoul(argv[9], NULL, 10);
+ if(TestPattern_Value > 11) {
+ printk("\nError: Pattern Value must be [0~11]!\n");
+ SF_Test_Helper();
+ return -10;
+ }
+ else {
+ if(TestPattern_Value == 0)
+ printk("TestPattern is Normal Test\n");
+ else if(TestPattern_Value == 11)
+ printk("TestPattern is Increment Digital\n");
+ else
+ printk("TestPattern is %X\n", TestPattern[TestPattern_Value-1]);
+ }
+ }
+ if(argc >= 11){
+ StartAddress_Value = simple_strtoul(argv[10], NULL, 16);
+ StartAddress_Value &= 0xFFFF0000;
+ if(StartAddress_Value < 0x20000) {
+ StartAddress_Value = 0x20000;
+ }
+ printk("Test Start Address 0x%08x\n", StartAddress_Value);
+ }
+
+ if(argc >= 12){
+ ReadIdle_Value = simple_strtoul(argv[11], NULL, 10);
+ if(ReadIdle_Value > 1) {
+ printk("\nError: RDIdle Value must be [0~1]!\n");
+ SF_Test_Helper();
+ return -11;
+ }
+ else
+ printk("Read Idle is %s\n", ((ReadIdle_Value==1)?"enable":"disable"));
+ }
+ if(argc >= 13){
+ Tn_Value = simple_strtoul(argv[12], NULL, 10);
+ if(Tn_Value > 0x3F) {
+ printk("\nError: Tn Value must be [0~0x3F]!\n");
+ SF_Test_Helper();
+ return -12;
+ }
+ else {
+ Tn_cnt = 0;
+ if(Tn_Value == 0)
+ printk("Tn Switching Disable\n");
+ else if((Tn_Value & (Tn_Value-1)) == 0) {
+ Tn_cnt = 1;
+ printk("Tn Switching unnecessary\n");
+ } else {
+ if(Tn_Value & 0x1) {
+ printk("T0 ");
+ TnMappingTab[Tn_cnt++] = 0;
+ }
+ if(Tn_Value & 0x2) {
+ printk("T1 ");
+ TnMappingTab[Tn_cnt++] = 1;
+ }
+ if(Tn_Value & 0x4) {
+ printk("T2 ");
+ TnMappingTab[Tn_cnt++] = 2;
+ }
+ if(Tn_Value & 0x8) {
+ printk("T3 ");
+ TnMappingTab[Tn_cnt++] = 3;
+ }
+ if(Tn_Value & 0x10) {
+ printk("T4 ");
+ TnMappingTab[Tn_cnt++] = 4;
+ }
+ if(Tn_Value & 0x20) {
+ printk("T5 ");
+ TnMappingTab[Tn_cnt++] = 5;
+ }
+ printk("is Switching now!\n");
+ }
+ }
+ }
+
+#if 1
+ spiflash_clk_change(CLKRate_Value);
+ clk_rate = 0;
+#else
+ if(CLKRate_Value == 0) {
+ clk_rate = 0;
+ spiflash_clk_change_25MHZ();
+ } else if(CLKRate_Value == 1) {
+ clk_rate = 1;
+ spiflash_clk_change_50MHZ();
+ }
+#endif
+
+ map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ if (!map) {
+ printk(KERN_WARNING "Failed to allocate memory for MAP\n");
+ return NULL;
+ }
+ map->virt = 0xBC000000;
+
+ /* check flash ID */
+ mtd = spiflash_probe(map);
+ if (!mtd) {
+ printk ("\n########spiflash ID read error\n");
+ }
+
+ if(mtd->size >= SIZE_32MiB){
+ if(Address3B4B_Value == 0) {
+ enter_4Byte_mode();
+ SEND_AUTO_4B_CMD;
+ printk("\n>>>come in 4Bytes Mode\n");
+ } else {
+ exit_4Byte_mode();
+ SEND_AUTO_3B_CMD;
+ printk("\n>>>exit 4Bytes Mode\n");
+ }
+ }
+
+ printk("\nAutoRead_cnt is %u, ManualRead_cnt is %u, MuxRead_cnt is %u\n", AutoRead_cnt, ManualRead_cnt, MuxRead_cnt);
+ for(i=0; i<AutoRead_cnt; i++)
+ printk("AutoRead[%d] is %u\n", i, AutoMappingTab[i]);
+ for(i=0; i<ManualRead_cnt; i++)
+ printk("ManualRead[%d] is %u\n", i, ManualMappingTab[i]);
+ for(i=0; i<MuxRead_cnt; i++)
+ printk("MuxRead[%d] is %u\n", i, MuxMappingTab[i]);
+
+ printk("\n>>>>>>>>>>>> SPI FLASH Test Begin!!!\n");
+ printk("\nVPint(CR_TIMER1_LDV) is 0x%08X\n", VPint(CR_TIMER1_LDV));
+ //VPint(CR_TIMER1_LDV) = 0xFFFFFFFF;
+ printk("\nVPint(CR_TIMER1_LDV) is 0x%08X\n", VPint(CR_TIMER1_LDV));
+
+#if 0
+ sf_regDef_test();
+ for(pattern=0; pattern<16; pattern++)
+ sf_regRW_test(pattern);
+ sf_regRW_test(0xFFFFFFFF);
+#endif
+
+ for(round = 0; round < Round_Value; round++) {
+ if(AutoRead_Value != 0) {
+ result = sf_auto_test(round);
+ if((result != 0) && (Exit_Value == 1))
+ return -13;
+ }
+ if(ManualRead_Value != 0) {
+ result = sf_manual_test(round);
+ if((result != 0) && (Exit_Value == 1))
+ return -14;
+ }
+ if(MuxRead_Value != 0) {
+ result = sf_mux_test(round);
+ if((result != 0) && (Exit_Value == 1))
+ return -15;
+ }
+ }
+
+ kzfree(map->fldrv_priv);
+ kzfree(map);
+ kzfree(mtd);
+ return 0;
+}
+
+struct map_info *map2=NULL;
+struct mtd_info *mtd2=NULL;
+unsigned long cur_se_off2=0x00600000;
+unsigned long max_size=0x00700000;
+int sf_task2_test(void)
+{
+ unsigned long retlen=0;
+ unsigned char se_buf[SE_MAX];
+ int i=0, j=0;
+ uint32 read_mode=0;
+ struct erase_info instr;
+ static unsigned long erasesize=0;
+ static unsigned long size=0;
+ static int first_flag = 0;
+ static unsigned char pp_data = 1;
+ unsigned char reg_sr1=0;
+ unsigned long cur_pp_off=0;
+ unsigned char pp_buf[PP_MAX];
+
+ if(first_flag == 0) {
+ printk("\nTASK2::Init map2 & mtd2");
+ map2 = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ if (!map2) {
+ printk(KERN_WARNING "\nTASK2::Failed to allocate memory for MAP\n");
+ return NULL;
+ }
+ map2->virt = 0xBC000000;
+
+ /* check flash ID */
+ mtd2 = spiflash_probe(map2);
+ if (!mtd2) {
+ printk ("\n########TASK::spiflash ID read error\n");
+ }
+ first_flag = 1;
+
+ erasesize = mtd2->erasesize;
+ size = mtd2->size;
+ }
+
+ instr.addr = cur_se_off2;
+ instr.len = erasesize;
+ instr.callback = NULL;
+ mtd_spiflash_erase(mtd2, &instr);
+
+ for(j=0; j<SE_CNT; j++){
+ spiflash_autoread_test(mtd2, cur_se_off2+j*SE_MAX, se_buf, SE_MAX, &retlen, (++read_mode));
+ for(i=0; i<SE_MAX; i++) {
+ if(se_buf[i] != 0xFF) {
+ SEND_READ_REGISTER_CMD(SF_OP_RD_STATUS1, reg_sr1);
+ printk("\n##########TASK2::Erase fail at Address %X::index is %d, value is %X, should be %X, read_mode is %d, reg_sr1 is 0x%04x\n", cur_se_off2+j*SE_MAX+i, i, se_buf[i], 0xFF, AutoMappingTab[read_mode%AutoRead_cnt], reg_sr1);
+ dumpCell_buffer((u8 *)(se_buf), SE_MAX, cur_se_off2+j*SE_MAX);
+ goto e_fail;
+ }
+ }
+ }
+
+ for(i=0; i<PP_MAX; i++) {
+ pp_buf[i] = pp_data;
+ }
+ for(cur_pp_off = cur_se_off2; (cur_pp_off + PP_MAX) <= (cur_se_off2 + erasesize); cur_pp_off += PP_MAX) {
+ mtd_spiflash_write(mtd2, cur_pp_off, PP_MAX, &retlen, pp_buf);
+ }
+
+e_fail:
+ cur_se_off2 += erasesize;
+ if(cur_se_off2 >= max_size) {
+ cur_se_off2 = 0x00600000;
+ pp_data++;
+ }
+ printk ("\nTASK2::finished___cur_se_off2 is 0x%08x\n", cur_se_off2);
+
+#if 0
+ spiflash_destroy(mtd2);
+ kzfree(map2);
+ kzfree(mtd2);
+#endif
+ return 0;
+}
+
+int sf_handler_test_entry(int argc, char *argv[])
+{
+ u32 i=0;
+ u32 count=10;
+ u32 autoMode=0;
+ u8 data[3]={0};
+ u8 to[257]={0};
+ u32 from = 0;
+
+ if(argc >= 2){
+ count = simple_strtoul(argv[1], NULL, 10);
+ printk("count is %lu\n", count);
+ }
+ if(argc >= 3){
+ autoMode = simple_strtoul(argv[2], NULL, 10);
+ printk("autoMode is %lu\n", autoMode);
+ }
+
+ for(i=0; i<count; i++) {
+ SEND_AUTO2MANUAL_CMD;
+ SEND_CSL_CMD;
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE);
+ SEND_DFIFO_WRITE_CMD(SF_OP_RD_ID);
+ SEND_OPFIFO_WRITE_CMD(OP_INS, OP_REPEAT_THREE);
+ if(autoMode != 0) {
+ memcpy((char *)to, (char *)(0xbc000000 + from), 0x100);
+ }
+ SEND_DFIFO_READ_CMD(*(data));
+ SEND_DFIFO_READ_CMD(*((data)+1));
+ SEND_DFIFO_READ_CMD(*((data)+2));
+ SEND_CSH_CMD;
+ SEND_MANUAL2AUTO_CMD;
+ printk("Flash ID is %x:%x:%x\n", data[0], data[1], data[2]);
+ }
+
+ return 0;
+}
+
+int sf_task2_test_entry(int argc, char *argv[])
+{
+ if(argc>=2){
+ spi_task2_en = simple_strtoul(argv[1], NULL, 10);
+ }
+ printk("test2_flag is %lu\n", spi_task2_en);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sf_task2_test_entry);
+EXPORT_SYMBOL(spi_task2_en);
+EXPORT_SYMBOL(sf_task2_test);
+EXPORT_SYMBOL(sf_basic_test_entry);
+EXPORT_SYMBOL(sf_handler_test_entry);
+EXPORT_SYMBOL(spiflash_clk_change);
+
+#endif
+int sf_EPCheck_entry(int argc, char *argv[])
+{
+ if(argc >= 2){
+ epcheck_flag = simple_strtoul(argv[1], NULL, 10);
+ if(epcheck_flag > 1) {
+ printk("\nError: EPCheck Flag must be [0~1]!\n");
+ return -1;
+ }
+ }
+ printk("epcheck is %s\n", ((epcheck_flag==1)?"enable":"disable"));
+ return 0;
+}
+int sf_EPCheck_enable(void)
+{
+ epcheck_flag = 1;
+ return 0;
+}
+int sf_EPCheck_disable(void)
+{
+ epcheck_flag = 0;
+ return 0;
+}
+#ifdef TCSUPPORT_AUTOBENCH
+int sf_autobench_check(void)
+{
+ int i=0;
+ unsigned long flash_id=0;
+ unsigned long retlen=0;
+ unsigned long erasesize=0;
+ unsigned long start_addr=0;
+ unsigned char se_buf[SE_MAX]={0};
+ unsigned char pp_buf[PP_MAX]={0};
+ unsigned char rd_buf[PP_MAX]={0};
+ unsigned char seed=0;
+ struct erase_info instr;
+
+
+ struct map_info *map=NULL;
+ struct mtd_info *mtd=NULL;
+
+ map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+ if (!map) {
+ printk(KERN_WARNING "Failed to allocate memory for MAP\n");
+ return NULL;
+ }
+ map->virt = 0xBC000000;
+
+ /* check flash ID */
+ mtd = spiflash_probe(map);
+ if (!mtd) {
+ printk ("\n########spiflash ID read error\n");
+ }
+
+ erasesize = mtd->erasesize;
+ start_addr = 0x700000;
+
+ /* Check Flash ID */
+ flash_id = spiflash_read_id();
+ for (i=0; i < ARRAY_SIZE(flash_tables); i++) {
+ if ((MANUFACTURER_ID(flash_id) == flash_tables[i].mfr_id) &&
+ (DEVICE_ID(flash_id) == flash_tables[i].dev_id)) {
+ printk("mfr id: %x \n", MANUFACTURER_ID(flash_id));
+ printk("dev id: %x\n", DEVICE_ID(flash_id));
+
+ break;
+ }
+ }
+
+ if(i == ARRAY_SIZE(flash_tables)) {
+ goto read_id_fail;
+ }
+
+ /* Erase and Check */
+ //flash_erase(start_addr, erasesize);
+ spiflash_erase(mtd, start_addr);
+ //instr.addr = start_addr;
+ //instr.len = erasesize;
+ //tc3162wdog_kick();
+ //mtd_spiflash_erase(mtd, &instr);
+
+ //SEND_AUTO_READ_CMD;
+
+ //spiflash_autoread_test(mtd, start_addr, se_buf, SE_MAX, &retlen, (++read_mode));
+
+
+ spiflash_read(mtd->priv, start_addr, se_buf, SE_MAX);
+ //spiflash_read_internal(&mtd, start_addr, SE_MAX, &retlen, se_buf);
+ for(i=0; i<SE_MAX; i++) {
+ if(se_buf[i] != 0xFF) {
+ printk("\nAuto Read fail at Address %X::index is %d, value is %X, should be %X, curLen is %d\n", start_addr+i, i, se_buf[i], 0xFF, SE_MAX);
+ goto erase_fail;
+ }
+ }
+
+ /* Program and Check */
+ seed = sf_rand() % SEED_MAX;
+ for(i=0; i<SEED_MAX; i++) {
+ pp_buf[i] = (seed+i) % SEED_MAX;
+ }
+ //flash_write(start_addr, PP_MAX, &retlen, pp_buf);
+ //spiflash_write(start_addr, PP_MAX, &retlen, pp_buf);
+ spiflash_write(mtd, pp_buf, start_addr, PP_MAX);
+ //SEND_AUTO_READ_CMD;
+ spiflash_read(mtd->priv, start_addr, rd_buf, PP_MAX);
+ //spiflash_man_read_data(&mtd, start_addr, PP_MAX, &retlen, rd_buf);
+ for(i=0; i<PP_MAX; i++) {
+ if(rd_buf[i] != pp_buf[i]) {
+ printk("\nRead fail at Address %X::index is %d, value is %X, should be %X, curLen is %d\n", start_addr+i, i, rd_buf[i], pp_buf[i], PP_MAX);
+ goto program_fail;
+ }
+ }
+
+ return 0;
+
+read_id_fail:
+erase_fail:
+program_fail:
+
+ return -1;
+
+}
+EXPORT_SYMBOL(sf_autobench_check);
+#endif
+EXPORT_SYMBOL(sf_EPCheck_entry);
+EXPORT_SYMBOL(sf_EPCheck_enable);
+EXPORT_SYMBOL(sf_EPCheck_disable);
+
+module_init(spiflash_probe_init);
+module_exit(spiflash_probe_exit);
+/*#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)*/
+EXPORT_SYMBOL(SPI_SEM);
+/*#endif*/
+
Index: linux-3.18.21/drivers/mtd/chips/newspiflash.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/newspiflash.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,938 @@
+/*
+ * SPIFLASH support for TC3162
+ */
+
+/*
+ * SPI Flash Memory support header file.
+ *
+ * Copyright (c) 2005, Atheros Communications Inc.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __NEWSPIFLASH_H
+#define __NEWSPIFLASH_H
+
+#define SF_BASE 0xBFA10000
+#define SF_READ_MODE (SF_BASE + 0x0000)
+#define SF_READ_IDLE_EN (SF_BASE + 0x0004)
+#define SF_SIDLY (SF_BASE + 0x0008)
+#define SF_CSHEXT (SF_BASE + 0x000C)
+#define SF_CSLEXT (SF_BASE + 0x0010)
+#define SF_MTX_MODE_TOG (SF_BASE + 0x0014)
+#define SF_RDCTL_FSM (SF_BASE + 0x0018)
+#define SF_MACMUX_SEL (SF_BASE + 0x001C)
+#define SF_MANUAL_EN (SF_BASE + 0x0020)
+#define SF_MANUAL_OPFIFO_EMPTY (SF_BASE + 0x0024)
+#define SF_MANUAL_OPFIFO_WDATA (SF_BASE + 0x0028)
+#define SF_MANUAL_OPFIFO_FULL (SF_BASE + 0x002C)
+#define SF_MANUAL_OPFIFO_WR (SF_BASE + 0x0030)
+#define SF_MANUAL_DFIFO_FULL (SF_BASE + 0x0034)
+#define SF_MANUAL_DFIFO_WDATA (SF_BASE + 0x0038)
+#define SF_MANUAL_DFIFO_EMPTY (SF_BASE + 0x003C)
+#define SF_MANUAL_DFIFO_RD (SF_BASE + 0x0040)
+#define SF_MANUAL_DFIFO_RDATA (SF_BASE + 0x0044)
+#define SF_DUMMY (SF_BASE + 0x0080)
+#define SF_ADDR_3B4B (SF_BASE + 0x0084)
+#define SF_PROBE_SEL (SF_BASE + 0x0088)
+#define SF_CFG3B4B_EN (SF_BASE + 0x008C)
+#define SF_INTERRUPT (SF_BASE + 0x0090)
+#define SF_INTERRUPT_EN (SF_BASE + 0x0094)
+#define SF_SI_CK_SEL (SF_BASE + 0x009C)
+#if defined(TCSUPPORT_CPU_EN7516)||defined(TCSUPPORT_CPU_EN7527)
+#define SF_CLK_CHANGE (0xBFA2011C)
+#elif defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+#define SF_CLK_CHANGE (0xBFA200CC)
+#else
+#define SF_CLK_CHANGE (0xBFA20068)
+#endif
+
+#define SF_READ_MODE_VALUE VPint(SF_READ_MODE)
+#define SF_READ_IDLE_EN_VALUE VPint(SF_READ_IDLE_EN)
+#define SF_SIDLY_VALUE VPint(SF_SIDLY)
+#define SF_CSHEXT_VALUE VPint(SF_CSHEXT)
+#define SF_CSLEXT_VALUE VPint(SF_CSLEXT)
+#define SF_MTX_MODE_TOG_VALUE VPint(SF_MTX_MODE_TOG)
+#define SF_RDCTL_FSM_VALUE VPint(SF_RDCTL_FSM)
+#define SF_MACMUX_SEL_VALUE VPint(SF_MACMUX_SEL)
+#define SF_MANUAL_EN_VALUE VPint(SF_MANUAL_EN)
+#define SF_MANUAL_OPFIFO_EMPTY_VALUE VPint(SF_MANUAL_OPFIFO_EMPTY)
+#define SF_MANUAL_OPFIFO_WDATA_VALUE VPint(SF_MANUAL_OPFIFO_WDATA)
+#define SF_MANUAL_OPFIFO_FULL_VALUE VPint(SF_MANUAL_OPFIFO_FULL)
+#define SF_MANUAL_OPFIFO_WR_VALUE VPint(SF_MANUAL_OPFIFO_WR)
+#define SF_MANUAL_DFIFO_FULL_VALUE VPint(SF_MANUAL_DFIFO_FULL)
+#define SF_MANUAL_DFIFO_WDATA_VALUE VPint(SF_MANUAL_DFIFO_WDATA)
+#define SF_MANUAL_DFIFO_EMPTY_VALUE VPint(SF_MANUAL_DFIFO_EMPTY)
+#define SF_MANUAL_DFIFO_RD_VALUE VPint(SF_MANUAL_DFIFO_RD)
+#define SF_MANUAL_DFIFO_RDATA_VALUE VPint(SF_MANUAL_DFIFO_RDATA)
+#define SF_DUMMY_VALUE VPint(SF_DUMMY)
+#define SF_ADDR_3B4B_VALUE VPint(SF_ADDR_3B4B)
+#define SF_PROBE_SEL_VALUE VPint(SF_PROBE_SEL)
+#define SF_CFG3B4B_EN_VALUE VPint(SF_CFG3B4B_EN)
+#define SF_INTERRUPT_VALUE VPint(SF_INTERRUPT)
+#define SF_INTERRUPT_EN_VALUE VPint(SF_INTERRUPT_EN)
+#define SF_SI_CK_SEL_VALUE VPint(SF_SI_CK_SEL)
+#define SF_CLK_CHANGE_VALUE VPint(SF_CLK_CHANGE)
+
+#define SF_OP_WR_ENABLE 0x06 /* Write Enable */
+#define SF_OP_WR_DISABLE 0x04 /* Write Disable */
+#define SF_OP_RD_STATUS1 0x05 /* Read Status 1 */
+#define SF_OP_WR_STATUS1 0x01 /* Write Status 1 */
+#define SF_OP_RD_STATUS2 0x35 /* Read Status 2 */
+#define SF_OP_WR_STATUS2 0x31 /* Write Status 2 */
+#define SF_OP_RD_STATUS3 0x15 /* Read Status 3 */
+#define SF_OP_WR_STATUS3 0x11 /* Write Status 3 */
+#define SF_OP_RD_STATUSEX 0xC8 /* Read extend Status */ // only for W25Q256 & N25Q256
+#define SF_OP_WR_STATUSEX 0xC5 /* Write extend Status */ // only for W25Q256 & N25Q256
+#define SF_OP_RD_BANKREG 0x16 /* Read Bank Register */ // only for S25FL256S
+#define SF_OP_WR_BANKREG 0x17 /* Write Bank Register */ // only for S25FL256S
+#define SF_OP_RD_ENHANCED_CFGREG 0x65 /* Read Enhanced Register */ // only for N25Q256
+#define SF_OP_WR_ENHANCED_CFGREG 0x61 /* Write Enhanced Register */ // only for N25Q256
+#define SF_OP_RD_VOLATILE_CFGREG 0x85 /* Read Volatile Register */ // only for N25Q256
+#define SF_OP_WR_VOLATILE_CFGREG 0x81 /* Write Volatile Register */ // only for N25Q256
+#define SF_OP_RD_NONVOLATILE_CFGREG 0xB5 /* Read Non-Volatile Register */ // only for N25Q256
+#define SF_OP_WR_NONVOLATILE_CFGREG 0xB1 /* Write Non-Volatile Register */ // only for N25Q256
+#define SF_OP_RD_DATA 0x03 /* Read Data */
+#define SF_OP_FAST_RD_DATA 0x0B /* Fast Read Data */
+#define SF_OP_FAST_RD_DUAL_OUT 0x3B /* Fast Read Data Dual Output */
+#define SF_OP_FAST_RD_DUAL_IO 0xBB /* Fast Read Data Dual I/O */
+#define SF_OP_PAGE_PROGRAM 0x02 /* Page Program */
+#define SF_OP_SECTOR_ERASE 0xD8 /* Sector Erase */
+#define SF_OP_BULK_ERASE 0xC7 /* Bulk Erase */
+#define SF_OP_4BYTES_RD_DATA 0x13 /* Read Data in 4Bytes Address */
+#define SF_OP_4BYTES_FAST_RD_DATA 0x0C /* Fast Read Data in 4Bytes Address */
+#define SF_OP_4BYTES_FAST_RD_DUAL_OUT 0x3C /* Fast Read Data Dual Output in 4Bytes Address */
+#define SF_OP_4BYTES_FAST_RD_DUAL_IO 0xBC /* Fast Read Data Dual I/O in 4Bytes Address */
+#define SF_OP_4BYTES_PAGE_PROGRAM 0x12 /* Page Program in 4Bytes Address */
+#define SF_OP_4BYTES_SECTOR_ERASE 0xDC /* Sector Erase in 4Bytes Address */
+#define SF_OP_ENTER_4BYTES_MODE 0xB7 /* enter 4bytes address mode */
+#define SF_OP_EXIT_4BYTES_MODE 0xE9 /* exit 4bytes address mode */
+#define SF_OP_POWER_DOWN 0xB9 /* Power Down to reduce power */
+#define SF_OP_RELEASE_POWER_DOWN 0xAB /* release power down */
+#define SF_OP_RD_ID 0x9F /* Read JEDEC ID */
+
+
+#define SF_STATUS_WIP 0x01 /* Write-In-Progress */
+#define SF_STATUS_WEL 0x02 /* Write Enable Latch */
+#define SF_STATUS_BP0 0x04 /* Block Protect 0 */
+#define SF_STATUS_BP1 0x08 /* Block Protect 1 */
+#define SF_STATUS_BP2 0x10 /* Block Protect 2 */
+#define SF_STATUS_SRWD 0x80 /* Status Register Write Disable */
+
+#define SF_STATUS_QE 0x02 /* Quad Enable */
+
+
+#define OP_CSH 0x00
+#define OP_CSL 0x01
+#define OP_CK 0x02
+#define OP_OUTS 0x08
+#define OP_OUTD 0x09
+#define OP_OUTQ 0x0A
+#define OP_INS 0x0C
+#define OP_INS0 0x0D
+#define OP_IND 0x0E
+#define OP_INQ 0x0F
+#define OP_OS2IS 0x10
+#define OP_OS2ID 0x11
+#define OP_OS2IQ 0x12
+#define OP_OD2IS 0x13
+#define OP_OD2ID 0x14
+#define OP_OD2IQ 0x15
+#define OP_OQ2IS 0x16
+#define OP_OQ2ID 0x17
+#define OP_OQ2IQ 0x18
+#define OP_OSNIS 0x19
+#define OP_ODNID 0x1A
+
+#define OP_SHIFT (9)
+#define OP_REPEAT_ONE (1)
+#define OP_REPEAT_TWO (2)
+#define OP_REPEAT_THREE (3)
+#define OP_REPEAT_FOUR (4)
+#define OP_REPEAT_FIVE (5)
+
+#define OP_ENABLE (1)
+#define OP_DISABLE (0)
+#define OP_CMD_MASK (0x1f)
+#define OP_LEN_MASK (0x1ff)
+#define SF_RD_MAX (0x100)//(0x1ff)
+#define SF_PP_MAX (0x100)
+
+#define DDATA_ENABLE (1)
+#define DDATA_DISABLE (0)
+#define DDATA_MASK (0xff)
+
+#define SI_SEL_MASK (0x7)
+
+#define DUMMY_DATA 0x0
+
+#define AUTO_MTXMODE 0x0
+#define AUTO_MANUALEN 0x0
+#define MANUAL_MTXMODE 0x9
+#define MANUAL_MANUALEN 0x1
+#define MANUAL_MACMUXSEL 0x1
+
+#define READ_DATA 0x0
+#define FAST_READ 0x1
+#define FAST_READ_DUALOUT 0x2
+#define FAST_READ_DUALIO 0x3
+
+#define CFG3B4B_DIS (0)
+#define CFG3B4B_EN (1)
+#define AUTO_3BMODE (0)
+#define AUTO_4BMODE (1)
+#define RD_IDLE_DIS (0)
+#define RD_IDLE_EN (1)
+#define CUR_AUTO_MODE (0)
+#define CUR_MANUAL_MODE (1)
+
+
+#define EXTSYNC_RFIFO_UNF_INTR_EN (0x100)
+#define EXTSYNC_RFIFO_OVF_INTR_EN (0x80)
+#define EXTSYNC_TPFIFO_UNF_INTR_EN (0x40)
+#define EXTSYNC_TPFIFO_OVF_INTR_EN (0x20)
+#define MANUAL_DFIFO_UNF_INTR_EN (0x10)
+#define MANUAL_DFIFO_OVF_INTR_EN (0x8)
+#define MANUAL_OPFIFO_UNF_INTR_EN (0x4)
+#define MANUAL_OPFIFO_OVF_INTR_EN (0x2)
+#define AUTO_MANUAL_INTR_EN (0x1)
+
+#define AUTO_READ (0)
+#define AUTO_FAST_READ (1)
+#define AUTO_FAST_READ_DUALOUT (2)
+#define AUTO_FAST_READ_DUALIO (3)
+
+#define SF_SR1_DEFAULT 0x0
+#define SF_SR2_DEFAULT 0x0
+#define SF_BANKREG_3B 0x0
+#define SF_BANKREG_4B 0x80
+
+#define WriteReg(reg, data) (VPint(reg) = data)
+#define ReadReg(reg) (VPint(reg))
+#define bReadReg(reg, mask) (VPint(reg) & mask)
+
+#define wLoWord(data) ((data) & 0xffff)
+#define wHiWord(data) (((data)>>16) & 0xffff)
+#define bLoByte(data) ((data) & 0xff)
+#define bHiByte(data) (((data)>>8) & 0xff)
+
+// Send change sf clk to 25MHZ command
+#define SEND_CLK_25MHZ_CMD \
+ { \
+ /* change sf clk to 25MHZ */ \
+ WriteReg(SF_CLK_CHANGE, 6); \
+ }
+
+// Send change sf clk to 50MHZ command
+#define SEND_CLK_50MHZ_CMD \
+ { \
+ /* change sf clk to 50MHZ */ \
+ WriteReg(SF_CLK_CHANGE, 3); \
+ }
+
+// Send opfifo write command
+#define SEND_OPFIFO_WRITE_CMD(op_cmd, op_len) \
+ { \
+ /* write op_cmd to register OPFIFO_WDATA */ \
+ WriteReg(SF_MANUAL_OPFIFO_WDATA, ((((op_cmd) & OP_CMD_MASK) << OP_SHIFT) | ((op_len) & OP_LEN_MASK))); \
+ /* wait until opfifo is not full */ \
+ while(ReadReg(SF_MANUAL_OPFIFO_FULL)) ; \
+ /* enable write from register OPFIFO_WDATA to opfifo */ \
+ WriteReg(SF_MANUAL_OPFIFO_WR, OP_ENABLE); \
+ /* wait until opfifo is empty */ \
+ while(!ReadReg(SF_MANUAL_OPFIFO_EMPTY)); \
+ }
+
+// Send dfifo write command
+// Parameters:
+// data: type is char
+#define SEND_DFIFO_WRITE_CMD(data) \
+ { \
+ /* wait until dfifo is not full */ \
+ while(ReadReg(SF_MANUAL_DFIFO_FULL)) ; \
+ /* write data to register DFIFO_WDATA */ \
+ WriteReg(SF_MANUAL_DFIFO_WDATA, ((data) & DDATA_MASK)); \
+ }
+
+// Send dfifo read command
+// Parameters:
+// data: type is char
+#define SEND_DFIFO_READ_CMD(data) \
+ { \
+ /* wait until dfifo is not empty */ \
+ while(ReadReg(SF_MANUAL_DFIFO_EMPTY)) ; \
+ /* read from dfifo to register DFIFO_RDATA */ \
+ (data) = ReadReg(SF_MANUAL_DFIFO_RDATA); \
+ /* enable register DFIFO_RD to read next byte */ \
+ WriteReg(SF_MANUAL_DFIFO_RD, DDATA_ENABLE); \
+ }
+
+// Send manual mode begin command
+#define SEND_MANUAL_BEGIN_CMD \
+ { \
+ sf_manual_begin(); \
+ }
+
+// Send manual mode end command
+#define SEND_MANUAL_END_CMD \
+ { \
+ sf_manual_end(); \
+ }
+
+// Send manual mode begin command
+#define SEND_MANUAL_WREN_BEGIN_CMD \
+ { \
+ sf_manual_wren_begin(); \
+ }
+
+// Send manual mode end command
+#define SEND_MANUAL_WREN_END_CMD \
+ { \
+ sf_manual_wren_end(); \
+ }
+
+// Send CSL command
+#define SEND_CSL_CMD \
+ { \
+ SEND_OPFIFO_WRITE_CMD(OP_CSL, OP_REPEAT_ONE) \
+ SEND_OPFIFO_WRITE_CMD(OP_CSL, OP_REPEAT_ONE) \
+ }
+
+// Send CSH command
+#define SEND_CSH_CMD \
+ { \
+ SEND_OPFIFO_WRITE_CMD(OP_CSH, OP_REPEAT_ONE) \
+ SEND_OPFIFO_WRITE_CMD(OP_CK, OP_REPEAT_FIVE) \
+ }
+
+// Send write enable command
+#define SEND_WREN_CMD \
+ { \
+ send_single_opcode_cmd(SF_OP_WR_ENABLE); \
+ }
+
+// Send write disable command
+#define SEND_WRDIS_CMD \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_WR_DISABLE); \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send read register internal command
+// Parameters:
+// data: type is char
+#define SEND_READ_REGISTER_INTERNAL_CMD(op_cmd, data) \
+ { \
+ SEND_CSL_CMD \
+ /* write op_cmd */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(op_cmd) \
+ /* read register */ \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, OP_REPEAT_ONE) \
+ SEND_DFIFO_READ_CMD(data) \
+ SEND_CSH_CMD \
+ }
+
+// Send general read register command
+// Parameters:
+// data: type is char
+#define SEND_READ_REGISTER_CMD(op_cmd, data) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_READ_REGISTER_INTERNAL_CMD(op_cmd, data) \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send write register internal command
+// Parameters:
+// data: type is char
+#define SEND_WRITE_REGISTER_INTERNAL_CMD(op_cmd, data) \
+ { \
+ SEND_CSL_CMD \
+ /* write op_cmd */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(op_cmd) \
+ /* write register */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(data) \
+ SEND_CSH_CMD \
+ }
+
+// Send general write register command
+// Parameters:
+// data: type is char
+#define SEND_WRITE_REGISTER_CMD(op_cmd, data) \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ SEND_WRITE_REGISTER_INTERNAL_CMD(op_cmd, data) \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+// Send wait WIP bit idle command
+// Parameters:
+// reg_val: type is char
+#define SEND_WAIT_WIP_IDLE_INTERNAL_CMD(reg_val) \
+ { \
+ do { \
+ SEND_READ_REGISTER_INTERNAL_CMD(SF_OP_RD_STATUS1, reg_val) \
+ } while (reg_val & SF_STATUS_WIP); \
+ }
+
+// Send read data command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_READ_CMD(dAddr, data, len, mode) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x03 */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_RD_DATA) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, ((mode)?0x03:0x02)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send fast read command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_FASTREAD_CMD(dAddr, data, len, mode) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x0B */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_FAST_RD_DATA) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, ((mode)?0x04:0x03)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send fast read dual output command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_FASTREAD_DUALOUT_CMD(dAddr, data, len, mode) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x3B */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_FAST_RD_DUAL_OUT) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, ((mode)?0x04:0x03)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2ID, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send fast read dual I/O command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_FASTREAD_DUALIO_CMD(dAddr, data, len, mode) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0xBB */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_FAST_RD_DUAL_IO) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTD, ((mode)?0x04:0x03)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OD2ID, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send page program command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// data: type is char*
+// len: the value is [1, 256]
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_PAGE_PROGRAM_CMD(dAddr, data, len, mode) \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x02 */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_PAGE_PROGRAM) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, ((mode)?0x04:0x03)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* page program */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, (len)) \
+ do \
+ { \
+ SEND_DFIFO_WRITE_CMD(*((data)++)) \
+ } while ((--(len)) > 0); \
+ SEND_CSH_CMD \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+// Send sector erase command
+// Parameters:
+// dAddr: 3bytes or 4bytes length, depend the value of mode
+// mode: 1 means 4bytes, 0 means 3bytes
+#define SEND_SECTOR_ERASE_CMD(dAddr, mode) \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0xD8 */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_SECTOR_ERASE) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, ((mode)?0x04:0x03)) \
+ if(mode){ \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ } \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ SEND_CSH_CMD \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+// Send bulk erase command
+#define SEND_BULK_ERASE_CMD \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_BULK_ERASE); \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+// Send 4-Bytes read data command
+// Parameters:
+// dAddr: 4bytes length
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+#define SEND_4BYTES_READ_CMD(dAddr, data, len) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x13 */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_4BYTES_RD_DATA) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_THREE) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send 4-Bytes fast read command
+// Parameters:
+// dAddr: 4bytes length
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+#define SEND_4BYTES_FASTREAD_CMD(dAddr, data, len) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x0C */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_4BYTES_FAST_RD_DATA) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_FOUR) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send 4-Bytes fast read dual output command
+// Parameters:
+// dAddr: 4bytes length
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+#define SEND_4BYTES_FASTREAD_DUALOUT_CMD(dAddr, data, len) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x3C */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_4BYTES_FAST_RD_DUAL_OUT) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_FOUR) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2ID, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send 4-Bytes fast read dual I/O command
+// Parameters:
+// dAddr: 4bytes length
+// data: type is char*
+// len: the value is [1, FlashSize-1]
+#define SEND_4BYTES_FASTREAD_DUALIO_CMD(dAddr, data, len) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0xBC */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_4BYTES_FAST_RD_DUAL_IO) \
+ /* write address */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OUTD, OP_REPEAT_FOUR) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wHiWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bHiByte(wLoWord(dAddr))) \
+ SEND_DFIFO_WRITE_CMD(bLoByte(wLoWord(dAddr))) \
+ /* write dummy data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OD2ID, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(DUMMY_DATA) \
+ /* read data */ \
+ while((len) > 0) { \
+ if((len) % SF_RD_MAX) { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, ((len)%SF_RD_MAX)) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } else { \
+ SEND_OPFIFO_WRITE_CMD(OP_IND, SF_RD_MAX) \
+ do \
+ { \
+ SEND_DFIFO_READ_CMD(*((data)++)) \
+ } while ((--(len)) % SF_RD_MAX); \
+ } \
+ } \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send enter 4bytes address mode command
+#define SEND_ENTER_4BYTES_MODE_CMD \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_ENTER_4BYTES_MODE); \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send exit 4bytes address mode command
+#define SEND_EXIT_4BYTES_MODE_CMD \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_EXIT_4BYTES_MODE); \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send enter 4bytes address mode command with Write Enable
+#define SEND_ENTER_4BYTES_MODE_WREN_CMD \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_ENTER_4BYTES_MODE); \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+// Send exit 4bytes address mode command with Write Enable
+#define SEND_EXIT_4BYTES_MODE_WREN_CMD \
+ { \
+ SEND_MANUAL_WREN_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_EXIT_4BYTES_MODE); \
+ SEND_MANUAL_WREN_END_CMD \
+ }
+
+
+// Send power down command
+#define SEND_POWER_DOWN_CMD \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_POWER_DOWN); \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send release power down command
+#define SEND_RELEASE_POWER_DOWN_CMD \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ send_single_opcode_cmd(SF_OP_RELEASE_POWER_DOWN); \
+ SEND_MANUAL_END_CMD \
+ }
+
+// Send read jedec id command
+// Parameters:
+// data: type is char[3]
+#define SEND_RD_ID_CMD(data) \
+ { \
+ SEND_MANUAL_BEGIN_CMD \
+ SEND_CSL_CMD \
+ /* write op_cmd 0x9F */ \
+ SEND_OPFIFO_WRITE_CMD(OP_OS2IS, OP_REPEAT_ONE) \
+ SEND_DFIFO_WRITE_CMD(SF_OP_RD_ID) \
+ /* read data */ \
+ SEND_OPFIFO_WRITE_CMD(OP_INS, OP_REPEAT_THREE) \
+ SEND_DFIFO_READ_CMD(*(data)) \
+ SEND_DFIFO_READ_CMD(*((data)+1)) \
+ SEND_DFIFO_READ_CMD(*((data)+2)) \
+ SEND_CSH_CMD \
+ SEND_MANUAL_END_CMD \
+ }
+
+// change from auto mode 2 manual mode
+#define SEND_AUTO2MANUAL_CMD \
+ { \
+ /* set 9 to SF_MTX_MODE_TOG */ \
+ WriteReg(SF_MTX_MODE_TOG, MANUAL_MTXMODE); \
+ /* enable Manual mode */ \
+ WriteReg(SF_MANUAL_EN, MANUAL_MANUALEN); \
+ }
+
+// change from manual mode 2 auto mode
+#define SEND_MANUAL2AUTO_CMD \
+ { \
+ /* set 0 to SF_MTX_MODE_TOG */ \
+ WriteReg(SF_MTX_MODE_TOG, AUTO_MTXMODE); \
+ /* enable auto mode */ \
+ WriteReg(SF_MANUAL_EN, AUTO_MANUALEN); \
+ }
+
+// change auto mode to 3Byte address
+#define SEND_AUTO_3B_CMD \
+ { \
+ /* enable auto address change */ \
+ WriteReg(SF_CFG3B4B_EN, CFG3B4B_EN); \
+ /* aauto address is 3Byte */ \
+ WriteReg(SF_ADDR_3B4B, AUTO_3BMODE); \
+ }
+
+// change Tn Value
+#define SEND_TN_CMD(data) \
+ { \
+ /* set data to SF_SI_CK_SEL */ \
+ WriteReg(SF_SI_CK_SEL, ((data) & SI_SEL_MASK)); \
+ }
+
+// change auto mode to 4Byte address
+#define SEND_AUTO_4B_CMD \
+ { \
+ /* enable auto address change */ \
+ WriteReg(SF_CFG3B4B_EN, CFG3B4B_EN); \
+ /* auto address is 4Byte */ \
+ WriteReg(SF_ADDR_3B4B, AUTO_4BMODE); \
+ }
+
+// Enable Auto Mode Read Idle Capability
+#define SEND_READ_IDLE_EN_CMD \
+ { \
+ /* Enalbe Read IDLE */ \
+ WriteReg(SF_READ_IDLE_EN, RD_IDLE_EN); \
+ }
+
+// Disable Auto Mode Read Idle Capability
+#define SEND_READ_IDLE_DIS_CMD \
+ { \
+ /* Disable Read IDLE */ \
+ WriteReg(SF_READ_IDLE_EN, RD_IDLE_DIS); \
+ }
+
+// change auto mode to Read
+#define SEND_AUTO_READ_CMD \
+ { \
+ /* auto mode is Read */ \
+ WriteReg(SF_READ_MODE, AUTO_READ); \
+ }
+
+// change auto mode to Fast Read
+#define SEND_AUTO_FASTREAD_CMD \
+ { \
+ /* auto mode is Fast Read */ \
+ WriteReg(SF_READ_MODE, AUTO_FAST_READ); \
+ }
+
+// change auto mode to Fast Read Dual Output
+#define SEND_AUTO_FASTREAD_DUALOUT_CMD \
+ { \
+ /* auto mode is Fast Read Dual Output */ \
+ WriteReg(SF_READ_MODE, AUTO_FAST_READ_DUALOUT); \
+ }
+
+// change auto mode to Fast Read Dual IO
+#define SEND_AUTO_FASTREAD_DUALIO_CMD \
+ { \
+ /* auto mode is Fast Read Dual IO */ \
+ WriteReg(SF_READ_MODE, AUTO_FAST_READ_DUALIO); \
+ }
+#endif
+
Index: linux-3.18.21/drivers/mtd/chips/spi_controller.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_controller.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,677 @@
+/***************************************************************************************
+ * Copyright(c) 2014 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+/*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_controller.c
+ * DATE: 2014/12/16
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI Controller Access interface.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ *
+ * SPI_CONTROLLER_Enable_Manual_Mode To provide interface for Enable SPI Controller Manual Mode.
+ * SPI_CONTROLLER_Write_One_Byte To provide interface for write one byte to SPI bus.
+ * SPI_CONTROLLER_Write_NByte To provide interface for write N bytes to SPI bus.
+ * SPI_CONTROLLER_Read_NByte To provide interface for read N bytes from SPI bus.
+ * SPI_CONTROLLER_Chip_Select_Low To provide interface for set chip select low in SPI bus.
+ * SPI_CONTROLLER_Chip_Select_High To provide interface for set chip select high in SPI bus.
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2014/12/16 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+#include "spi_controller.h"
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include <asm/types.h>
+#include <asm/string.h>
+
+/* NAMING CONSTANT DECLARATIONS ------------------------------------------------------ */
+
+/* SPI Controller Register Definition */
+#define _SPI_CONTROLLER_REGS_BASE 0xBFA10000
+#define _SPI_CONTROLLER_REGS_READ_MODE (_SPI_CONTROLLER_REGS_BASE + 0x0000)
+#define _SPI_CONTROLLER_REGS_READ_IDLE_EN (_SPI_CONTROLLER_REGS_BASE + 0x0004)
+#define _SPI_CONTROLLER_REGS_SIDLY (_SPI_CONTROLLER_REGS_BASE + 0x0008)
+#define _SPI_CONTROLLER_REGS_CSHEXT (_SPI_CONTROLLER_REGS_BASE + 0x000C)
+#define _SPI_CONTROLLER_REGS_CSLEXT (_SPI_CONTROLLER_REGS_BASE + 0x0010)
+#define _SPI_CONTROLLER_REGS_MTX_MODE_TOG (_SPI_CONTROLLER_REGS_BASE + 0x0014)
+#define _SPI_CONTROLLER_REGS_RDCTL_FSM (_SPI_CONTROLLER_REGS_BASE + 0x0018)
+#define _SPI_CONTROLLER_REGS_MACMUX_SEL (_SPI_CONTROLLER_REGS_BASE + 0x001C)
+#define _SPI_CONTROLLER_REGS_MANUAL_EN (_SPI_CONTROLLER_REGS_BASE + 0x0020)
+#define _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_EMPTY (_SPI_CONTROLLER_REGS_BASE + 0x0024)
+#define _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_WDATA (_SPI_CONTROLLER_REGS_BASE + 0x0028)
+#define _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_FULL (_SPI_CONTROLLER_REGS_BASE + 0x002C)
+#define _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_WR (_SPI_CONTROLLER_REGS_BASE + 0x0030)
+#define _SPI_CONTROLLER_REGS_MANUAL_DFIFO_FULL (_SPI_CONTROLLER_REGS_BASE + 0x0034)
+#define _SPI_CONTROLLER_REGS_MANUAL_DFIFO_WDATA (_SPI_CONTROLLER_REGS_BASE + 0x0038)
+#define _SPI_CONTROLLER_REGS_MANUAL_DFIFO_EMPTY (_SPI_CONTROLLER_REGS_BASE + 0x003C)
+#define _SPI_CONTROLLER_REGS_MANUAL_DFIFO_RD (_SPI_CONTROLLER_REGS_BASE + 0x0040)
+#define _SPI_CONTROLLER_REGS_MANUAL_DFIFO_RDATA (_SPI_CONTROLLER_REGS_BASE + 0x0044)
+#define _SPI_CONTROLLER_REGS_DUMMY (_SPI_CONTROLLER_REGS_BASE + 0x0080)
+#define _SPI_CONTROLLER_REGS_PROBE_SEL (_SPI_CONTROLLER_REGS_BASE + 0x0088)
+#define _SPI_CONTROLLER_REGS_INTERRUPT (_SPI_CONTROLLER_REGS_BASE + 0x0090)
+#define _SPI_CONTROLLER_REGS_INTERRUPT_EN (_SPI_CONTROLLER_REGS_BASE + 0x0094)
+#define _SPI_CONTROLLER_REGS_SI_CK_SEL (_SPI_CONTROLLER_REGS_BASE + 0x009C)
+#define _SPI_CONTROLLER_REGS_SW_CFGNANDADDR_VAL (_SPI_CONTROLLER_REGS_BASE + 0x010C)
+#define _SPI_CONTROLLER_REGS_SW_CFGNANDADDR_EN (_SPI_CONTROLLER_REGS_BASE + 0x0110)
+#define _SPI_CONTROLLER_REGS_NFI2SPI_EN (_SPI_CONTROLLER_REGS_BASE + 0x0130)
+
+/* Register Value Definition */
+#define _SPI_CONTROLLER_VAL_OP_CSH (0x00)
+#define _SPI_CONTROLLER_VAL_OP_CSL (0x01)
+#define _SPI_CONTROLLER_VAL_OP_CK (0x02)
+#define _SPI_CONTROLLER_VAL_OP_OUTS (0x08)
+#define _SPI_CONTROLLER_VAL_OP_OUTD (0x09)
+#define _SPI_CONTROLLER_VAL_OP_OUTQ (0x0A)
+#define _SPI_CONTROLLER_VAL_OP_INS (0x0C)
+#define _SPI_CONTROLLER_VAL_OP_INS0 (0x0D)
+#define _SPI_CONTROLLER_VAL_OP_IND (0x0E)
+#define _SPI_CONTROLLER_VAL_OP_INQ (0x0F)
+#define _SPI_CONTROLLER_VAL_OP_OS2IS (0x10)
+#define _SPI_CONTROLLER_VAL_OP_OS2ID (0x11)
+#define _SPI_CONTROLLER_VAL_OP_OS2IQ (0x12)
+#define _SPI_CONTROLLER_VAL_OP_OD2IS (0x13)
+#define _SPI_CONTROLLER_VAL_OP_OD2ID (0x14)
+#define _SPI_CONTROLLER_VAL_OP_OD2IQ (0x15)
+#define _SPI_CONTROLLER_VAL_OP_OQ2IS (0x16)
+#define _SPI_CONTROLLER_VAL_OP_OQ2ID (0x17)
+#define _SPI_CONTROLLER_VAL_OP_OQ2IQ (0x18)
+#define _SPI_CONTROLLER_VAL_OP_OSNIS (0x19)
+#define _SPI_CONTROLLER_VAL_OP_ODNID (0x1A)
+#define _SPI_CONTROLLER_VAL_OP_LEN_MAX (0x1ff)
+#define _SPI_CONTROLLER_VAL_OP_LEN_ONE (1)
+#define _SPI_CONTROLLER_VAL_OP_LEN_TWO (2)
+#define _SPI_CONTROLLER_VAL_OP_LEN_THREE (3)
+#define _SPI_CONTROLLER_VAL_OP_LEN_FOUR (4)
+#define _SPI_CONTROLLER_VAL_OP_LEN_FIVE (5)
+#define _SPI_CONTROLLER_VAL_OP_CMD_MASK (0x1f)
+#define _SPI_CONTROLLER_VAL_OP_LEN_MASK (0x1ff)
+#define _SPI_CONTROLLER_VAL_OP_SHIFT (0x9)
+#define _SPI_CONTROLLER_VAL_OP_ENABLE (0x1)
+#define _SPI_CONTROLLER_VAL_DFIFO_MASK (0xff)
+#define _SPI_CONTROLLER_VAL_READ_IDLE_DISABLE (0x0)
+#define _SPI_CONTROLLER_VAL_MANUAL_MTXMODE (0x9)
+#define _SPI_CONTROLLER_VAL_MANUAL_MANUALEN (0x1)
+#define _SPI_CONTROLLER_VAL_DDATA_ENABLE (0x1)
+#define _SPI_CONTROLLER_VAL_AUTO_MTXMODE (0x0)
+#define _SPI_CONTROLLER_VAL_MANUAL_MANUALDISABLE (0x0)
+#define _SPI_CONTROLLER_VAL_NFI2SPI_ENABLE (1)
+#define _SPI_CONTROLLER_VAL_NFI2SPI_DISABLE (0)
+
+
+#define _SPI_CONTROLLER_CHECK_TIMES (10000)
+
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+#ifndef VPint
+#define VPint *(volatile unsigned long int *)
+#endif
+
+#define WriteReg(reg, data) (VPint(reg) = data)
+#define ReadReg(reg) (VPint(reg))
+#define bReadReg(reg, mask) (VPint(reg) & mask)
+
+#define _SPI_CONTROLLER_PRINTF printk
+#define _SPI_CONTROLLER_DEBUG_PRINTF spi_controller_debug_printf
+#define _SPI_CONTROLLER_DEBUG_PRINTF_ARRAY spi_controller_debug_printf_array
+#define _SPI_CONTROLLER_GET_CONF_PTR &(_spi_controller_conf_t)
+#define _SPI_CONTROLLER_MEMCPY memcpy
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+
+/* STATIC VARIABLE DECLARATIONS ------------------------------------------------------ */
+u8 _SPI_CONTROLLER_DEBUG_FLAG= 0; /* For control printf debug message or not */
+SPI_CONTROLLER_CONF_T _spi_controller_conf_t;
+
+/* LOCAL SUBPROGRAM BODIES------------------------------------------------------------ */
+void spi_controller_debug_printf_array (u8 *ptr_data, u32 len)
+{
+#if 0
+ u32 idx;
+
+ for(idx=0 ; idx<len; idx++)
+ {
+ _SPI_CONTROLLER_DEBUG_PRINTF("0x%x ", *(ptr_data+idx));
+ }
+#endif
+}
+static void spi_controller_debug_printf( char *fmt, ... )
+{
+ if( _SPI_CONTROLLER_DEBUG_FLAG == 1 )
+ {
+ unsigned char str_buf[100];
+ va_list argptr;
+ int cnt;
+
+ va_start(argptr, fmt);
+ cnt = vsprintf(str_buf, fmt, argptr);
+ va_end(argptr);
+
+ prom_printf("%s", str_buf);
+ }
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_CONTROLLER_RTN_T spi_controller_set_opfifo( u8 op_cmd,
+ * u32 op_len )
+ * PURPOSE : To setup SPI Controller opfifo.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : op_cmd - The op_cmd variable of this function.
+ * op_len - The op_len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_CONTROLLER_RTN_T spi_controller_set_opfifo(u8 op_cmd, u32 op_len)
+{
+ u32 check_idx;
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF("spi_controller_set_opfifo: set op_cmd =0x%x, op_len=0x%x\n", op_cmd, op_len);
+
+ /* 1. Write op_cmd to register OPFIFO_WDATA */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_WDATA, ((((op_cmd) & _SPI_CONTROLLER_VAL_OP_CMD_MASK) << _SPI_CONTROLLER_VAL_OP_SHIFT) | ((op_len) & _SPI_CONTROLLER_VAL_OP_LEN_MASK)));
+
+ /* 2. Wait until opfifo is not full */
+ while(ReadReg( _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_FULL ));
+
+ /* 3. Enable write from register OPFIFO_WDATA to opfifo */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_WR, _SPI_CONTROLLER_VAL_OP_ENABLE);
+
+ /* 4. Wait until opfifo is empty */
+ while(!ReadReg( _SPI_CONTROLLER_REGS_MANUAL_OPFIFO_EMPTY ));
+
+ return(rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_CONTROLLER_RTN_T spi_controller_read_data_fifo( u8 *ptr_rtn_data,
+ * u32 data_len )
+ * PURPOSE : To read data from SPI Controller data pfifo.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : data_len - The data_len variable of this function.
+ * OUTPUT: ptr_rtn_data - The ptr_rtn_data variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_CONTROLLER_RTN_T spi_controller_read_data_fifo( u8 *ptr_rtn_data, u32 data_len)
+{
+ u32 idx;
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ for( idx =0 ; idx<data_len ; idx ++)
+ {
+ /* 1. wait until dfifo is not empty */
+ while(ReadReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_EMPTY ));
+
+ /* 2. read from dfifo to register DFIFO_RDATA */
+
+ *(ptr_rtn_data+idx) = (ReadReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_RDATA )) &_SPI_CONTROLLER_VAL_DFIFO_MASK;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF(" spi_controller_read_data_fifo : read_data = 0x%x\n", *(ptr_rtn_data+idx));
+ /* 3. enable register DFIFO_RD to read next byte */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_RD, _SPI_CONTROLLER_VAL_DDATA_ENABLE);
+ }
+
+ return(rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_CONTROLLER_RTN_T spi_controller_write_data_fifo( u8 *ptr_data,
+ * u32 data_len )
+ * PURPOSE : To write data from SPI Controller data pfifo.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : ptr_data - The data variable of this function.
+ * data_len - The data_len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_CONTROLLER_RTN_T spi_controller_write_data_fifo(u8 *ptr_data, u32 data_len)
+{
+ u32 idx;
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF("spi_controller_write_data_fifo : len=0x%x, data: 0x%x\n", data_len, *ptr_data);
+ _SPI_CONTROLLER_DEBUG_PRINTF_ARRAY(ptr_data, data_len);
+
+ for( idx =0 ; idx<data_len ; idx++)
+ {
+ /* 1. Wait until dfifo is not full */
+ while(ReadReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_FULL ));
+
+ /* 2. Write data to register DFIFO_WDATA */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_WDATA, ((*(ptr_data+idx)) & _SPI_CONTROLLER_VAL_DFIFO_MASK));
+
+
+ _SPI_CONTROLLER_DEBUG_PRINTF(" spi_controller_write_data_fifo: write data =0x%x\n", ((*(ptr_data+idx)) & _SPI_CONTROLLER_VAL_DFIFO_MASK));
+
+
+ /* 3. Wait until dfifo is not full */
+ while(ReadReg( _SPI_CONTROLLER_REGS_MANUAL_DFIFO_FULL ));
+
+ }
+
+ return(rtn_status);
+}
+
+/* EXPORTED SUBPROGRAM BODIES -------------------------------------------------------- */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Set_Configure( SPI_CONTROLLER_CONF_T *ptr_spi_controller_conf_t )
+{
+ SPI_CONTROLLER_CONF_T *ptr_spi_conf_t;
+
+ ptr_spi_conf_t = _SPI_CONTROLLER_GET_CONF_PTR;
+
+ /* Store new setting */
+ _SPI_CONTROLLER_MEMCPY(ptr_spi_conf_t, ptr_spi_controller_conf_t, sizeof(SPI_CONTROLLER_CONF_T) );
+
+ /* Setting Mode */
+ if( (ptr_spi_conf_t->mode) == SPI_CONTROLLER_MODE_AUTO )
+ {
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Set_Configure: AUTO Mode\n");
+ /* Switch out DMA circuit */
+ if(isEN7526c) {
+ WriteReg( _SPI_CONTROLLER_REGS_NFI2SPI_EN, _SPI_CONTROLLER_VAL_NFI2SPI_DISABLE);
+ }
+
+ /* manaul mode -> auto mode */
+ /*Set 0 to SF_MTX_MODE_TOG */
+ WriteReg( _SPI_CONTROLLER_REGS_MTX_MODE_TOG, _SPI_CONTROLLER_VAL_AUTO_MTXMODE);
+
+ /*Enable Auto Mode */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_EN, _SPI_CONTROLLER_VAL_MANUAL_MANUALDISABLE);
+ }
+ if( (ptr_spi_conf_t->mode) == SPI_CONTROLLER_MODE_MANUAL)
+ {
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Set_Configure: Manual Mode\n");
+
+ /* Switch out DMA circuit */
+ if(isEN7526c) {
+ WriteReg( _SPI_CONTROLLER_REGS_NFI2SPI_EN, _SPI_CONTROLLER_VAL_NFI2SPI_DISABLE);
+ }
+
+ /* disable read_idle_enable */
+ WriteReg( _SPI_CONTROLLER_REGS_READ_IDLE_EN , _SPI_CONTROLLER_VAL_READ_IDLE_DISABLE);
+
+ /*wait until auto read status is IDLE */
+ while(ReadReg( _SPI_CONTROLLER_REGS_RDCTL_FSM ));
+
+ /*auto mode -> manaul mode */
+ /*Set 9 to SF_MTX_MODE_TOG */
+ WriteReg( _SPI_CONTROLLER_REGS_MTX_MODE_TOG, _SPI_CONTROLLER_VAL_MANUAL_MTXMODE);
+
+ /*Enable Manual Mode */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_EN, _SPI_CONTROLLER_VAL_MANUAL_MANUALEN);
+ }
+ if( (ptr_spi_conf_t->mode) == SPI_CONTROLLER_MODE_DMA)
+ {
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Set_Configure: DMA Mode\n");
+
+ /* Switch into DMA circuit */
+ if(isEN7526c) {
+ WriteReg( _SPI_CONTROLLER_REGS_NFI2SPI_EN, _SPI_CONTROLLER_VAL_NFI2SPI_ENABLE);
+ }
+
+ /* manaul mode -> auto mode */
+ /*Set 0 to SF_MTX_MODE_TOG */
+ WriteReg( _SPI_CONTROLLER_REGS_MTX_MODE_TOG, _SPI_CONTROLLER_VAL_AUTO_MTXMODE);
+
+ /*Enable Auto Mode */
+ WriteReg( _SPI_CONTROLLER_REGS_MANUAL_EN, _SPI_CONTROLLER_VAL_MANUAL_MANUALDISABLE);
+
+ }
+
+ /* Set dummy byte number */
+ WriteReg(_SPI_CONTROLLER_REGS_DUMMY, (ptr_spi_conf_t->dummy_byte_num) );
+
+ return (SPI_CONTROLLER_RTN_NO_ERROR);
+}
+
+
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Get_Configure( SPI_CONTROLLER_CONF_T *ptr_rtn_spi_controller_conf_t )
+{
+
+ SPI_CONTROLLER_CONF_T *ptr_spi_controller_conf_info_t;
+
+ ptr_spi_controller_conf_info_t = _SPI_CONTROLLER_GET_CONF_PTR;
+ _SPI_CONTROLLER_MEMCPY( ptr_rtn_spi_controller_conf_t, ptr_spi_controller_conf_info_t, sizeof(SPI_CONTROLLER_CONF_T) );
+
+ return (SPI_CONTROLLER_RTN_NO_ERROR);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Enable_Manual_Mode( void )
+ * PURPOSE : To provide interface for enable SPI Controller Manual Mode Enable.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Enable_Manual_Mode( void )
+{
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+ SPI_CONTROLLER_CONF_T spi_conf_t;
+
+ /* Switch to manual mode*/
+ spi_conf_t.dummy_byte_num = 1 ;
+ spi_conf_t.mode = SPI_CONTROLLER_MODE_MANUAL;
+ SPI_CONTROLLER_Set_Configure(&spi_conf_t);
+
+ return (rtn_status);
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_One_Byte( u8 data )
+ * PURPOSE : To provide interface for write one byte to SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : data - The data variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_One_Byte( u8 data )
+{
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Write_One_Byte : data=0x%x\n", data);
+
+ /* 1. Set opcode to SPI Controller */
+ spi_controller_set_opfifo( _SPI_CONTROLLER_VAL_OP_OUTS, _SPI_CONTROLLER_VAL_OP_LEN_ONE);
+
+ /* 2. Write data to SPI Controller */
+ spi_controller_write_data_fifo( &data, _SPI_CONTROLLER_VAL_OP_LEN_ONE);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_WRITE_NBYTES( u8 *ptr_data,
+ * u32 len,
+ * SPI_CONTROLLER_SPEED_T speed )
+ * PURPOSE : To provide interface for write N bytes to SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : ptr_data - The data variable of this function.
+ * len - The len variable of this function.
+ * speed - The speed variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_NByte( u8 *ptr_data, u32 len, SPI_CONTROLLER_SPEED_T speed )
+{
+ u8 op_cmd;
+ u32 data_len, remain_len;
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Write_NByte: len=0x%x\n", len );
+ _SPI_CONTROLLER_DEBUG_PRINTF_ARRAY(ptr_data, len);
+
+ /* 1. Mapping the op code */
+ switch( speed )
+ {
+ case SPI_CONTROLLER_SPEED_SINGLE :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_OUTS;
+ break;
+
+ case SPI_CONTROLLER_SPEED_DUAL :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_OUTD;
+ break;
+
+ case SPI_CONTROLLER_SPEED_QUAD :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_OUTQ;
+ break;
+ }
+
+ remain_len = len;
+ while (remain_len > 0)
+ {
+ if( remain_len > _SPI_CONTROLLER_VAL_OP_LEN_MAX ) /*Controller at most process limitation one time */
+ {
+ data_len = _SPI_CONTROLLER_VAL_OP_LEN_MAX;
+ }
+ else
+ {
+ data_len = remain_len;
+ }
+ /* 2. Set opcode to SPI Controller */
+ spi_controller_set_opfifo( op_cmd, data_len);
+
+ /* 3. Write data to SPI Controller */
+ spi_controller_write_data_fifo( &ptr_data[len - remain_len], data_len );
+
+ remain_len -= data_len;
+ }
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_READ_NBYTES( u8 *ptr_rtn_data,
+ * u8 len,
+ * SPI_CONTROLLER_SPEED_T speed )
+ * PURPOSE : To provide interface for read N bytes from SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : len - The len variable of this function.
+ * speed - The speed variable of this function.
+ * OUTPUT: ptr_rtn_data - The ptr_rtn_data variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Read_NByte(u8 *ptr_rtn_data, u32 len, SPI_CONTROLLER_SPEED_T speed)
+{
+ u8 op_cmd;
+ u32 data_len, remain_len;
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ _SPI_CONTROLLER_DEBUG_PRINTF("SPI_CONTROLLER_Read_NByte : \n");
+
+ /* 1. Mapping the op code */
+ switch( speed )
+ {
+ case SPI_CONTROLLER_SPEED_SINGLE :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_INS;
+ break;
+
+ case SPI_CONTROLLER_SPEED_DUAL :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_IND;
+ break;
+
+ case SPI_CONTROLLER_SPEED_QUAD :
+ op_cmd = _SPI_CONTROLLER_VAL_OP_INQ;
+ break;
+ }
+
+ remain_len = len;
+ while (remain_len > 0)
+ {
+ if( remain_len > _SPI_CONTROLLER_VAL_OP_LEN_MAX ) /*Controller at most process limitation one time */
+ {
+ data_len = _SPI_CONTROLLER_VAL_OP_LEN_MAX;
+ }
+ else
+ {
+ data_len = remain_len;
+ }
+ /* 2. Set opcode to SPI Controller */
+ spi_controller_set_opfifo( op_cmd, data_len);
+
+ /* 3. Read data through SPI Controller */
+ spi_controller_read_data_fifo( &ptr_rtn_data[len - remain_len], data_len );
+
+ remain_len -= data_len;
+ }
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_Low( void )
+ * PURPOSE : To provide interface for set chip select low in SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_Low(void)
+{
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ spi_controller_set_opfifo( _SPI_CONTROLLER_VAL_OP_CSL, _SPI_CONTROLLER_VAL_OP_LEN_ONE);
+ spi_controller_set_opfifo( _SPI_CONTROLLER_VAL_OP_CSL, _SPI_CONTROLLER_VAL_OP_LEN_ONE);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_High( void )
+ * PURPOSE : To provide interface for set chip select high in SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_High(void)
+{
+ SPI_CONTROLLER_RTN_T rtn_status = SPI_CONTROLLER_RTN_NO_ERROR;
+
+ spi_controller_set_opfifo( _SPI_CONTROLLER_VAL_OP_CSH, _SPI_CONTROLLER_VAL_OP_LEN_ONE);
+ spi_controller_set_opfifo( _SPI_CONTROLLER_VAL_OP_CK, _SPI_CONTROLLER_VAL_OP_LEN_FIVE);
+
+ return (rtn_status);
+}
+
+void SPI_CONTROLLER_DEBUG_ENABLE( void )
+{
+ _SPI_CONTROLLER_DEBUG_FLAG = 1;
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_DISABLE( void )
+ * PURPOSE : To disable to printf debug message of SPI NAND driver.
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_CONTROLLER_DEBUG_DISABLE( void )
+{
+ _SPI_CONTROLLER_DEBUG_FLAG = 0;
+}
+
+/* End of [spi_controller.c] package */
Index: linux-3.18.21/drivers/mtd/chips/spi_controller.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_controller.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,234 @@
+ /***************************************************************************************
+ * Copyright(c) 2014 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+/*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_controller.h
+ * DATE: 2014/12/16
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI Controller Access interface.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ *
+ * SPI_CONTROLLER_Enable_Manual_Mode To provide interface for Enable SPI Controller Manual Mode.
+ * SPI_CONTROLLER_Write_One_Byte To provide interface for write one byte to SPI bus.
+ * SPI_CONTROLLER_Write_NByte To provide interface for write N bytes to SPI bus.
+ * SPI_CONTROLLER_Read_NByte To provide interface for read N bytes from SPI bus.
+ * SPI_CONTROLLER_Chip_Select_Low To provide interface for set chip select low in SPI bus.
+ * SPI_CONTROLLER_Chip_Select_High To provide interface for set chip select high in SPI bus.
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2014/12/16 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+#ifndef __SPI_CONTROLLER_H__
+ #define __SPI_CONTROLLER_H__
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+#include <asm/types.h>
+
+/* NAMING CONSTANT DECLARATIONS ------------------------------------------------------ */
+#define SPI_CONTROLLER_REGS_STRAP 0xBFA10114
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+typedef enum{
+ SPI_CONTROLLER_SPEED_SINGLE =0,
+ SPI_CONTROLLER_SPEED_DUAL,
+ SPI_CONTROLLER_SPEED_QUAD
+
+} SPI_CONTROLLER_SPEED_T;
+
+typedef enum{
+ SPI_CONTROLLER_RTN_NO_ERROR =0,
+ SPI_CONTROLLER_RTN_SET_OPFIFO_ERROR,
+ SPI_CONTROLLER_RTN_READ_DATAPFIFO_ERROR,
+ SPI_CONTROLLER_RTN_WRITE_DATAPFIFO_ERROR,
+
+ SPI_CONTROLLER_RTN_DEF_NO
+} SPI_CONTROLLER_RTN_T;
+
+
+typedef enum{
+ SPI_CONTROLLER_MODE_AUTO=0,
+ SPI_CONTROLLER_MODE_MANUAL,
+ SPI_CONTROLLER_MODE_DMA,
+ SPI_CONTROLLER_MODE_NO
+} SPI_CONTROLLER_MODE_T;
+
+typedef struct SPI_CONTROLLER_CONFIGURE
+{
+ SPI_CONTROLLER_MODE_T mode;
+ u32 dummy_byte_num;
+} SPI_CONTROLLER_CONF_T;
+/* EXPORTED SUBPROGRAM SPECIFICATION ------------------------------------------------- */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Set_Configure( SPI_CONTROLLER_CONF_T *ptr_spi_controller_conf_t );
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Get_Configure( SPI_CONTROLLER_CONF_T *ptr_rtn_spi_controller_conf_t );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Enable_Manual_Mode( void )
+ * PURPOSE : To provide interface for enable SPI Controller Manual Mode Enable.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Enable_Manual_Mode( void );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_One_Byte( u8 data )
+ * PURPOSE : To provide interface for write one byte to SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : data - The data variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_One_Byte( u8 data );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_WRITE_NBYTES( u8 *ptr_data,
+ * u32 len,
+ * SPI_CONTROLLER_SPEED_T speed )
+ * PURPOSE : To provide interface for write N bytes to SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : ptr_data - The data variable of this function.
+ * len - The len variable of this function.
+ * speed - The speed variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Write_NByte( u8 *ptr_data,
+ u32 len,
+ SPI_CONTROLLER_SPEED_T speed );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_READ_NBYTES( u8 *ptr_rtn_data,
+ * u8 len,
+ * SPI_CONTROLLER_SPEED_T speed )
+ * PURPOSE : To provide interface for read N bytes from SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : len - The len variable of this function.
+ * speed - The speed variable of this function.
+ * OUTPUT: ptr_rtn_data - The ptr_rtn_data variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Read_NByte( u8 *ptr_rtn_data,
+ u32 len,
+ SPI_CONTROLLER_SPEED_T speed );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_Low( void )
+ * PURPOSE : To provide interface for set chip select low in SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_Low( void );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_High( void )
+ * PURPOSE : To provide interface for set chip select high in SPI bus.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *------------------------------------------------------------------------------------
+ */
+SPI_CONTROLLER_RTN_T SPI_CONTROLLER_Chip_Select_High( void );
+
+void SPI_CONTROLLER_DEBUG_ENABLE( void );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_DISABLE( void )
+ * PURPOSE : To disable to printf debug message of SPI NAND driver.
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_CONTROLLER_DEBUG_DISABLE( void );
+
+#endif /* ifndef __SPI_CONTROLLER_H__ */
+/* End of [spi_controller.h] package */
+
Index: linux-3.18.21/drivers/mtd/chips/spi_ecc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_ecc.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,583 @@
+#include "spi_ecc.h"
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include <stdarg.h>
+#include <asm/string.h>
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+
+/*******************************************************************************
+ * ECC Register Definition
+ *******************************************************************************/
+#define _SPI_ECC_REGS_BASE 0xBFA12000
+#define _SPI_ECC_REGS_ECCCON (_SPI_ECC_REGS_BASE + 0x0000)
+#define _SPI_ECC_REGS_ENCCNFG (_SPI_ECC_REGS_BASE + 0x0004)
+#define _SPI_ECC_REGS_ENCDIADDR (_SPI_ECC_REGS_BASE + 0x0008)
+#define _SPI_ECC_REGS_ENCIDLE (_SPI_ECC_REGS_BASE + 0x000C)
+#define _SPI_ECC_REGS_ENCPAR0 (_SPI_ECC_REGS_BASE + 0x0010)
+#define _SPI_ECC_REGS_ENCPAR1 (_SPI_ECC_REGS_BASE + 0x0014)
+#define _SPI_ECC_REGS_ENCPAR2 (_SPI_ECC_REGS_BASE + 0x0018)
+#define _SPI_ECC_REGS_ENCPAR3 (_SPI_ECC_REGS_BASE + 0x001C)
+#define _SPI_ECC_REGS_ENCPAR4 (_SPI_ECC_REGS_BASE + 0x0020)
+#define _SPI_ECC_REGS_ENCPAR5 (_SPI_ECC_REGS_BASE + 0x0024)
+#define _SPI_ECC_REGS_ENCPAR6 (_SPI_ECC_REGS_BASE + 0x0028)
+#define _SPI_ECC_REGS_ENCSTA (_SPI_ECC_REGS_BASE + 0x002C)
+#define _SPI_ECC_REGS_ENCIRQEN (_SPI_ECC_REGS_BASE + 0x0030)
+#define _SPI_ECC_REGS_ENCIRQSTA (_SPI_ECC_REGS_BASE + 0x0034)
+#define _SPI_ECC_REGS_PIO_DIRDY (_SPI_ECC_REGS_BASE + 0x0080)
+#define _SPI_ECC_REGS_PIO_DI (_SPI_ECC_REGS_BASE + 0x0084)
+#define _SPI_ECC_REGS_DECCON (_SPI_ECC_REGS_BASE + 0x0100)
+#define _SPI_ECC_REGS_DECCNFG (_SPI_ECC_REGS_BASE + 0x0104)
+#define _SPI_ECC_REGS_DECDIADDR (_SPI_ECC_REGS_BASE + 0x0108)
+#define _SPI_ECC_REGS_DECIDLE (_SPI_ECC_REGS_BASE + 0x010C)
+#define _SPI_ECC_REGS_DECFER (_SPI_ECC_REGS_BASE + 0x0110)
+#define _SPI_ECC_REGS_DECNUM0 (_SPI_ECC_REGS_BASE + 0x0114)
+#define _SPI_ECC_REGS_DECNUM1 (_SPI_ECC_REGS_BASE + 0x0118)
+#define _SPI_ECC_REGS_DECDONE (_SPI_ECC_REGS_BASE + 0x011C)
+#define _SPI_ECC_REGS_DECEL0 (_SPI_ECC_REGS_BASE + 0x0120)
+#define _SPI_ECC_REGS_DECEL1 (_SPI_ECC_REGS_BASE + 0x0124)
+#define _SPI_ECC_REGS_DECEL2 (_SPI_ECC_REGS_BASE + 0x0128)
+#define _SPI_ECC_REGS_DECEL3 (_SPI_ECC_REGS_BASE + 0x012C)
+#define _SPI_ECC_REGS_DECEL4 (_SPI_ECC_REGS_BASE + 0x0130)
+#define _SPI_ECC_REGS_DECEL5 (_SPI_ECC_REGS_BASE + 0x0134)
+#define _SPI_ECC_REGS_DECEL6 (_SPI_ECC_REGS_BASE + 0x0138)
+#define _SPI_ECC_REGS_DECEL7 (_SPI_ECC_REGS_BASE + 0x013C)
+#define _SPI_ECC_REGS_DECIRQEN (_SPI_ECC_REGS_BASE + 0x0140)
+#define _SPI_ECC_REGS_DECIRQSTA (_SPI_ECC_REGS_BASE + 0x0144)
+#define _SPI_ECC_REGS_DECFSM (_SPI_ECC_REGS_BASE + 0x014C)
+
+
+/*******************************************************************************
+ * ECC Register Field Definition
+ *******************************************************************************/
+
+/* ECC_ENCCON */
+#define _SPI_ECC_REGS_ECCCON_ENABLE (0x1)
+
+/* ECC_ENCCNFG */
+#define _SPI_ECC_REGS_ENCCNFG_ENCMS_MASK (0x1FF80000)
+#define _SPI_ECC_REGS_ENCCNFG_ENCMS_SHIFT (19)
+
+#define _SPI_ECC_REGS_ENCCNFG_ENCMODE_MASK (0x00000030)
+#define _SPI_ECC_REGS_ENCCNFG_ENCMODE_SHIFT (4)
+#define _SPI_ECC_REGS_ENCCNFG_ENCMODE_NFIMODE (0x01)
+
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK (0x00000007)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT (0)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_4BITS (0)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_6BITS (1)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_8BITS (2)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_10BITS (3)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_12BITS (4)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_14BITS (5)
+#define _SPI_ECC_REGS_ENCCNFG_ENCTNUM_16BITS (6)
+
+/* ECC_ENCIDLE */
+#define _SPI_ECC_REGS_ENCIDLE_STAT_PROCESSING (0)
+#define _SPI_ECC_REGS_ENCIDLE_STAT_IDLE (1)
+
+/* ECC_ENCODE_IRQEN*/
+#define _SPI_ECC_REGS_ENCIRQEN_IRQEN (0x1)
+
+/*ECC_ENCODE_IRQSTATUS */
+#define _SPI_ECC_REGS_ENCIRQSTA_PROCESSING (0)
+#define _SPI_ECC_REGS_ENCIRQSTA_DONE (1)
+
+
+
+/* ECC_DECCON */
+#define _SPI_ECC_REGS_DECCON_ENABLE (0x1)
+
+/* ECC_DECCNFG */
+#define _SPI_ECC_REGS_DECCNFG_DECMS_MASK (0x1FFF0000)
+#define _SPI_ECC_REGS_DECCNFG_DECMS_SHIFT (16)
+
+#define _SPI_ECC_REGS_DECCNFG_DECCON_MASK (0x00003000)
+#define _SPI_ECC_REGS_DECCNFG_DECCON_SHIFT (12)
+#define _SPI_ECC_REGS_DECCNFG_DECCON_VALUE (0x3)
+
+#define _SPI_ECC_REGS_DECCNFG_DECMODE_MASK (0x00000030)
+#define _SPI_ECC_REGS_DECCNFG_DECMODE_SHIFT (4)
+#define _SPI_ECC_REGS_DECCNFG_DECMODE_NFIMODE (0x01)
+
+#define _SPI_ECC_REGS_DECCNFG_DECEMPTY_MASK (0x80000000)
+#define _SPI_ECC_REGS_DECCNFG_DECEMPTY_SHIFT (31)
+#define _SPI_ECC_REGS_DECCNFG_DECEMPTY_VALUE (0x1)
+
+
+
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK (0x00000007)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT (0)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_4BITS (0)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_6BITS (1)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_8BITS (2)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_10BITS (3)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_12BITS (4)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_14BITS (5)
+#define _SPI_ECC_REGS_DECCNFG_DECTNUM_16BITS (6)
+
+/* ECC_DECIDLE */
+#define _SPI_ECC_REGS_DECIDLE_STAT_PROCESSING (0)
+#define _SPI_ECC_REGS_DECIDLE_STAT_IDLE (1)
+
+/* ECC_DECODE_IRQEN*/
+#define _SPI_ECC_REGS_DECIRQEN_IRQEN (0x1)
+
+/*ECC_DECODE_IRQSTATUS */
+#define _SPI_ECC_REGS_DECIRQSTA_PROCESSING (0)
+#define _SPI_ECC_REGS_DECIRQSTA_DONE (1)
+
+
+/* ECC_DECODE NUM0 */
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM0_MASK (0x1F)
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM0_SHIFT (0)
+
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM1_MASK (0x3E0)
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM1_SHIFT (5)
+
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM2_MASK (0x7C00)
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM2_SHIFT (10)
+
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM3_MASK (0x000F8000)
+#define _SPI_ECC_REGS_DECNUM0_ERRNUM3_SHIFT (15)
+
+/* ECC_DECODE NUM1 */
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM4_MASK (0x1F)
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM4_SHIFT (0)
+
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM5_MASK (0x3E0)
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM5_SHIFT (5)
+
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM6_MASK (0x7C00)
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM6_SHIFT (10)
+
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM7_MASK (0x000F8000)
+#define _SPI_ECC_REGS_DECNUM1_ERRNUM7_SHIFT (15)
+
+#define _SPI_ECC_UNCORRECTABLE_VALUE (0x1F)
+
+
+#define READ_REGISTER_UINT32(reg) \
+ (*(volatile unsigned int * const)(reg))
+
+#define WRITE_REGISTER_UINT32(reg, val) \
+ (*(volatile unsigned int * const)(reg)) = (val)
+
+#define INREG32(x) READ_REGISTER_UINT32((unsigned int *)((void*)(x)))
+#define OUTREG32(x, y) WRITE_REGISTER_UINT32((unsigned int *)((void*)(x)), (unsigned int )(y))
+#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
+#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
+
+#define _SPI_ECC_REG8_READ(addr) INREG32(addr)
+#define _SPI_ECC_REG8_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_ECC_REG8_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_ECC_REG8_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_ECC_REG8_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+#define _SPI_ECC_REG16_READ(addr) INREG32(addr)
+#define _SPI_ECC_REG16_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_ECC_REG16_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_ECC_REG16_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_ECC_REG16_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+#define _SPI_ECC_REG32_READ(addr) INREG32(addr)
+#define _SPI_ECC_REG32_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_ECC_REG32_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_ECC_REG32_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_ECC_REG32_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+
+#define _SPI_ECC_GET_ENCODE_INFO_PTR &(_spi_ecc_encode_conf_info_t)
+#define _SPI_ECC_GET_DECODE_INFO_PTR &(_spi_ecc_decode_conf_info_t)
+
+#define _SPI_ECC_PRINTF printk
+#define _SPI_ECC_DEBUG_PRINTF spi_ecc_debug_printf
+#define _SPI_ECC_MEMCPY memcpy
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+
+
+/* STATIC VARIABLE DECLARATIONS ------------------------------------------------------ */
+SPI_ECC_ENCODE_CONF_T _spi_ecc_encode_conf_info_t;
+SPI_ECC_DECODE_CONF_T _spi_ecc_decode_conf_info_t;
+u8 _SPI_ECC_DEBUG_FLAG = 0; /* For control printf debug message or not */
+
+/* LOCAL SUBPROGRAM BODIES------------------------------------------------------------ */
+static void spi_ecc_debug_printf( char *fmt, ... )
+{
+ if( _SPI_ECC_DEBUG_FLAG == 1 )
+ {
+ unsigned char str_buf[100];
+ va_list argptr;
+ int cnt;
+
+ va_start(argptr, fmt);
+ cnt = vsprintf(str_buf, fmt, argptr);
+ va_end(argptr);
+
+ printk("%s", str_buf);
+ }
+}
+
+/* EXPORTED SUBPROGRAM BODIES -------------------------------------------------------- */
+SPI_ECC_RTN_T SPI_ECC_Regs_Dump( void )
+{
+ u32 idx;
+
+ for(idx = _SPI_ECC_REGS_BASE ; idx <= _SPI_ECC_REGS_DECFSM ; idx +=4)
+ {
+ _SPI_ECC_PRINTF("reg(0x%x) = 0x%x\n", idx, _SPI_ECC_REG32_READ(idx) );
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Check_Idle( SPI_ECC_ENCODE_STATUS_T *prt_rtn_encode_status_t )
+{
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ENCIDLE) == _SPI_ECC_REGS_ENCIDLE_STAT_PROCESSING )
+ {
+ *prt_rtn_encode_status_t = SPI_ECC_ENCODE_STATUS_PROCESSING ;
+ }
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ENCIDLE) == _SPI_ECC_REGS_ENCIDLE_STAT_IDLE )
+ {
+ *prt_rtn_encode_status_t = SPI_ECC_ENCODE_STATUS_IDLE ;
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Check_Done( SPI_ECC_ENCODE_STATUS_T *prt_rtn_encode_status_t )
+{
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ENCIRQSTA) == _SPI_ECC_REGS_ENCIRQSTA_PROCESSING )
+ {
+ *prt_rtn_encode_status_t = SPI_ECC_ENCODE_STATUS_PROCESSING ;
+ }
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ENCIRQSTA) == _SPI_ECC_REGS_ENCIRQSTA_DONE )
+ {
+ *prt_rtn_encode_status_t = SPI_ECC_ENCODE_STATUS_DONE ;
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Get_Configure( SPI_ECC_ENCODE_CONF_T *ptr_rtn_encode_conf_t )
+{
+ SPI_ECC_ENCODE_CONF_T * encode_conf_t;
+
+ encode_conf_t = _SPI_ECC_GET_ENCODE_INFO_PTR;
+
+
+ _SPI_ECC_MEMCPY(ptr_rtn_encode_conf_t, encode_conf_t, sizeof(SPI_ECC_ENCODE_CONF_T));
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Set_Configure( SPI_ECC_ENCODE_CONF_T *ptr_encode_conf_t )
+{
+
+ SPI_ECC_ENCODE_CONF_T *spi_ecc_encode_info_t;
+
+ /* Store new setting */
+ spi_ecc_encode_info_t = _SPI_ECC_GET_ENCODE_INFO_PTR;
+ _SPI_ECC_MEMCPY(spi_ecc_encode_info_t, ptr_encode_conf_t, sizeof(SPI_ECC_ENCODE_CONF_T));
+
+ /* Set Block size */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCMS_MASK, \
+ (ptr_encode_conf_t-> encode_block_size) << _SPI_ECC_REGS_ENCCNFG_ENCMS_SHIFT );
+
+ /* Set ECC Ability */
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_4BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_4BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_6BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_6BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_8BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_8BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_10BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_10BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_12BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_12BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_14BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_14BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+ if( (ptr_encode_conf_t->encode_ecc_abiliry) == SPI_ECC_ENCODE_ABILITY_16BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCTNUM_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCTNUM_16BITS << _SPI_ECC_REGS_ENCCNFG_ENCTNUM_SHIFT );
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Enable( void)
+{
+ _SPI_ECC_REG16_SETBITS(_SPI_ECC_REGS_ECCCON, _SPI_ECC_REGS_ECCCON_ENABLE);
+ _SPI_ECC_DEBUG_PRINTF("SPI_ECC_Encode_Set_Configure : encode enable reg(0x%x) = 0x%x\n", _SPI_ECC_REGS_ECCCON, _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ECCCON) );
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Disable( void)
+{
+ _SPI_ECC_REG16_CLRBITS(_SPI_ECC_REGS_ECCCON, _SPI_ECC_REGS_ECCCON_ENABLE);
+ _SPI_ECC_DEBUG_PRINTF("SPI_ECC_Encode_Set_Configure : encode disable reg(0x%x) = 0x%x\n", _SPI_ECC_REGS_ECCCON, _SPI_ECC_REG16_READ(_SPI_ECC_REGS_ECCCON) );
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Encode_Init( void )
+{
+ /* Set Encode Mode as NFI mode */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_ENCCNFG, _SPI_ECC_REGS_ENCCNFG_ENCMODE_MASK, \
+ _SPI_ECC_REGS_ENCCNFG_ENCMODE_NFIMODE << _SPI_ECC_REGS_ENCCNFG_ENCMODE_SHIFT );
+
+ /* Enable Encoder IRQ function */
+ _SPI_ECC_REG16_SETBITS( _SPI_ECC_REGS_ENCIRQEN, _SPI_ECC_REGS_ENCIRQEN_IRQEN);
+
+ return (SPI_ECC_RTN_NO_ERROR);
+
+}
+
+/*******************************************************************************************************/
+/*******************************************************************************************************/
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Check_Idle( SPI_ECC_DECODE_STATUS_T *prt_rtn_decode_status_t )
+{
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_DECIDLE) == _SPI_ECC_REGS_DECIDLE_STAT_PROCESSING )
+ {
+ *prt_rtn_decode_status_t = SPI_ECC_DECODE_STATUS_PROCESSING ;
+ }
+ if( _SPI_ECC_REG16_READ(_SPI_ECC_REGS_DECIDLE) == _SPI_ECC_REGS_DECIDLE_STAT_IDLE )
+ {
+ *prt_rtn_decode_status_t = SPI_ECC_DECODE_STATUS_IDLE ;
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Check_Done( SPI_ECC_DECODE_STATUS_T *prt_rtn_decode_status_t )
+{
+ u32 ret_val = 0;
+
+ ret_val = _SPI_ECC_REG16_READ(_SPI_ECC_REGS_DECIRQSTA);
+
+ if( ret_val != 0 )
+ {
+ _SPI_ECC_DEBUG_PRINTF("SPI_ECC_Decode_Check_Done : decode done, ret_val = 0x%x\n", ret_val);
+ *prt_rtn_decode_status_t = SPI_ECC_DECODE_STATUS_DONE ;
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_DECODE_Check_Correction_Status( void )
+{
+
+ u32 dec_err_number_reg0;
+ u32 dec_err_number_reg1;
+ SPI_ECC_RTN_T rtn_status = SPI_ECC_RTN_NO_ERROR;
+
+
+ dec_err_number_reg0 = _SPI_ECC_REG32_READ(_SPI_ECC_REGS_DECNUM0);
+
+ /* Sector 0 can be correctalbe or not */
+ if( ((dec_err_number_reg0 & _SPI_ECC_REGS_DECNUM0_ERRNUM0_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM0_ERRNUM0_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 0 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+
+ /* Sector 1 can be correctalbe or not */
+ if( ((dec_err_number_reg0 & _SPI_ECC_REGS_DECNUM0_ERRNUM1_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM0_ERRNUM1_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 1 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+
+ /* Sector 2 can be correctalbe or not */
+ if( ((dec_err_number_reg0 & _SPI_ECC_REGS_DECNUM0_ERRNUM2_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM0_ERRNUM2_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 2 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+
+ /* Sector 3 can be correctalbe or not */
+ if( ((dec_err_number_reg0 & _SPI_ECC_REGS_DECNUM0_ERRNUM3_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM0_ERRNUM3_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 3 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+
+
+ dec_err_number_reg1 = _SPI_ECC_REG32_READ(_SPI_ECC_REGS_DECNUM1);
+
+ /* Sector 4 can be correctalbe or not */
+ if( ((dec_err_number_reg1 & _SPI_ECC_REGS_DECNUM1_ERRNUM4_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM1_ERRNUM4_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 4 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+ /* Sector 5 can be correctalbe or not */
+ if( ((dec_err_number_reg1 & _SPI_ECC_REGS_DECNUM1_ERRNUM5_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM1_ERRNUM5_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 5 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+ /* Sector 6 can be correctalbe or not */
+ if( ((dec_err_number_reg1 & _SPI_ECC_REGS_DECNUM1_ERRNUM6_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM1_ERRNUM6_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 6 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+ /* Sector 7 can be correctalbe or not */
+ if( ((dec_err_number_reg1 & _SPI_ECC_REGS_DECNUM1_ERRNUM7_MASK ) \
+ >> _SPI_ECC_REGS_DECNUM1_ERRNUM7_SHIFT) == _SPI_ECC_UNCORRECTABLE_VALUE )
+ {
+ _SPI_ECC_PRINTF("SPI_ECC_DECODE_Check_Correction_Status : sector 7 uncorrectable.\n");
+ rtn_status = SPI_ECC_RTN_CORRECTION_ERROR;
+ }
+
+ return ( rtn_status );
+
+}
+
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Get_Configure( SPI_ECC_DECODE_CONF_T *ptr_rtn_decode_conf_t )
+{
+ SPI_ECC_DECODE_CONF_T *decode_conf_t;
+
+ decode_conf_t = _SPI_ECC_GET_DECODE_INFO_PTR;
+
+
+ _SPI_ECC_MEMCPY(ptr_rtn_decode_conf_t, decode_conf_t, sizeof(SPI_ECC_DECODE_CONF_T));
+
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Set_Configure( SPI_ECC_DECODE_CONF_T *ptr_decode_conf_t )
+{
+
+ SPI_ECC_DECODE_CONF_T *spi_ecc_decode_info_t;
+
+ /* Store new setting */
+ spi_ecc_decode_info_t = _SPI_ECC_GET_DECODE_INFO_PTR;
+ _SPI_ECC_MEMCPY(spi_ecc_decode_info_t, ptr_decode_conf_t, sizeof(SPI_ECC_DECODE_CONF_T));
+
+
+ /* Set Block size */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECMS_MASK, \
+ (ptr_decode_conf_t-> decode_block_size) << _SPI_ECC_REGS_DECCNFG_DECMS_SHIFT );
+
+ /* Set ECC Ability */
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_4BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_4BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_6BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_6BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_8BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_8BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_10BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_10BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_12BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_12BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_14BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_14BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+ if( (ptr_decode_conf_t->decode_ecc_abiliry) == SPI_ECC_DECODE_ABILITY_16BITS )
+ {
+ _SPI_ECC_REG32_SETMASKBITS(_SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECTNUM_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECTNUM_16BITS << _SPI_ECC_REGS_DECCNFG_DECTNUM_SHIFT );
+ }
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Enable( void)
+{
+ _SPI_ECC_REG16_SETBITS(_SPI_ECC_REGS_DECCON, _SPI_ECC_REGS_DECCON_ENABLE);
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Disable( void)
+{
+ _SPI_ECC_REG16_CLRBITS(_SPI_ECC_REGS_DECCON, _SPI_ECC_REGS_DECCON_ENABLE);
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+
+SPI_ECC_RTN_T SPI_ECC_Decode_Init( void )
+{
+
+ /* Set Decode Mode as NFI mode */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECMODE_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECMODE_NFIMODE << _SPI_ECC_REGS_DECCNFG_DECMODE_SHIFT );
+
+ /* Set Decode Mode have igore empty data function */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECEMPTY_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECEMPTY_VALUE << _SPI_ECC_REGS_DECCNFG_DECEMPTY_SHIFT );
+
+
+ /* Set Decode has most poweful ability */
+ _SPI_ECC_REG32_SETMASKBITS( _SPI_ECC_REGS_DECCNFG, _SPI_ECC_REGS_DECCNFG_DECCON_MASK, \
+ _SPI_ECC_REGS_DECCNFG_DECCON_VALUE << _SPI_ECC_REGS_DECCNFG_DECCON_SHIFT );
+
+ /* Enable Decoder IRQ function */
+ _SPI_ECC_REG16_SETBITS( _SPI_ECC_REGS_DECIRQEN, _SPI_ECC_REGS_DECIRQEN_IRQEN);
+
+
+ return (SPI_ECC_RTN_NO_ERROR);
+}
+
+void SPI_ECC_DEBUG_ENABLE( void )
+{
+ _SPI_ECC_DEBUG_FLAG = 1;
+}
+
+void SPI_ECC_DEBUG_DISABLE( void )
+{
+ _SPI_ECC_DEBUG_FLAG = 0;
+}
+
+
Index: linux-3.18.21/drivers/mtd/chips/spi_ecc.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_ecc.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,105 @@
+
+#include <asm/types.h>
+
+typedef enum
+{
+ SPI_ECC_ENCODE_DISABLE=0,
+ SPI_ECC_ENCODE_ENABLE
+} SPI_ECC_ENCODE_T;
+
+typedef enum
+{
+ SPI_ECC_ENCODE_ABILITY_4BITS = 4,
+ SPI_ECC_ENCODE_ABILITY_6BITS = 6,
+ SPI_ECC_ENCODE_ABILITY_8BITS = 8,
+ SPI_ECC_ENCODE_ABILITY_10BITS = 10,
+ SPI_ECC_ENCODE_ABILITY_12BITS = 12,
+ SPI_ECC_ENCODE_ABILITY_14BITS = 14,
+ SPI_ECC_ENCODE_ABILITY_16BITS = 16,
+} SPI_ECC_ENCODE_ABILITY_T;
+
+typedef enum
+{
+ SPI_ECC_ENCODE_STATUS_IDLE=0,
+ SPI_ECC_ENCODE_STATUS_PROCESSING,
+ SPI_ECC_ENCODE_STATUS_DONE,
+} SPI_ECC_ENCODE_STATUS_T;
+
+
+typedef struct SPI_ECC_ENCODE_CONF
+{
+ SPI_ECC_ENCODE_T encode_en; /* enable encode or not */
+ u32 encode_block_size; /* encode block size */
+ SPI_ECC_ENCODE_ABILITY_T encode_ecc_abiliry; /* Indicate the ECC correction abiliry */
+
+} SPI_ECC_ENCODE_CONF_T;
+
+
+typedef enum
+{
+ SPI_ECC_DECODE_DISABLE=0,
+ SPI_ECC_DECODE_ENABLE
+} SPI_ECC_DECODE_T;
+
+typedef enum
+{
+ SPI_ECC_DECODE_ABILITY_4BITS = 4,
+ SPI_ECC_DECODE_ABILITY_6BITS = 6,
+ SPI_ECC_DECODE_ABILITY_8BITS = 8,
+ SPI_ECC_DECODE_ABILITY_10BITS = 10,
+ SPI_ECC_DECODE_ABILITY_12BITS = 12,
+ SPI_ECC_DECODE_ABILITY_14BITS = 14,
+ SPI_ECC_DECODE_ABILITY_16BITS = 16,
+} SPI_ECC_DECODE_ABILITY_T;
+
+
+typedef struct SPI_ECC_DECODE_CONF
+{
+ SPI_ECC_DECODE_T decode_en; /* enable decode or not */
+ u32 decode_block_size; /* decode block size */
+ SPI_ECC_DECODE_ABILITY_T decode_ecc_abiliry; /* Indicate the ECC correction abiliry */
+
+} SPI_ECC_DECODE_CONF_T;
+
+
+typedef enum
+{
+ SPI_ECC_DECODE_STATUS_IDLE=0,
+ SPI_ECC_DECODE_STATUS_PROCESSING,
+ SPI_ECC_DECODE_STATUS_DONE,
+ SPI_ECC_DECODE_STATUS_TIMEOUT
+} SPI_ECC_DECODE_STATUS_T;
+
+typedef enum
+{
+ SPI_ECC_DECODE_CORRECTION_FAIL=0,
+ SPI_ECC_DECODE_CORRECTION_OK,
+
+} SPI_ECC_DECODE_CORRECTION_T;
+
+
+typedef enum{
+ SPI_ECC_RTN_NO_ERROR =0,
+ SPI_ECC_RTN_CORRECTION_ERROR,
+
+ SPI_ECC_RTN_DEF_NO
+} SPI_ECC_RTN_T;
+
+SPI_ECC_RTN_T SPI_ECC_Regs_Dump( void );
+SPI_ECC_RTN_T SPI_ECC_Encode_Check_Idle( SPI_ECC_ENCODE_STATUS_T *prt_rtn_encode_status_t );
+SPI_ECC_RTN_T SPI_ECC_Encode_Check_Done( SPI_ECC_ENCODE_STATUS_T *prt_rtn_encode_status_t );
+SPI_ECC_RTN_T SPI_ECC_Encode_Get_Configure( SPI_ECC_ENCODE_CONF_T *ptr_rtn_encode_conf_t );
+SPI_ECC_RTN_T SPI_ECC_Encode_Set_Configure( SPI_ECC_ENCODE_CONF_T *ptr_encode_conf_t );
+SPI_ECC_RTN_T SPI_ECC_Encode_Enable( void);
+SPI_ECC_RTN_T SPI_ECC_Encode_Disable( void);
+SPI_ECC_RTN_T SPI_ECC_Encode_Init( void );
+SPI_ECC_RTN_T SPI_ECC_Decode_Check_Idle( SPI_ECC_DECODE_STATUS_T *prt_rtn_decode_status_t );
+SPI_ECC_RTN_T SPI_ECC_Decode_Check_Done( SPI_ECC_DECODE_STATUS_T *prt_rtn_decode_status_t );
+SPI_ECC_RTN_T SPI_ECC_DECODE_Check_Correction_Status( void );
+SPI_ECC_RTN_T SPI_ECC_Decode_Get_Configure( SPI_ECC_DECODE_CONF_T *ptr_rtn_decode_conf_t );
+SPI_ECC_RTN_T SPI_ECC_Decode_Set_Configure( SPI_ECC_DECODE_CONF_T *ptr_decode_conf_t );
+SPI_ECC_RTN_T SPI_ECC_Decode_Enable( void);
+SPI_ECC_RTN_T SPI_ECC_Decode_Disable( void);
+SPI_ECC_RTN_T SPI_ECC_Decode_Init( void );
+void SPI_ECC_DEBUG_ENABLE( void );
+void SPI_ECC_DEBUG_DISABLE( void );
\ No newline at end of file
Index: linux-3.18.21/drivers/mtd/chips/spi_nand_flash.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_nand_flash.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,6136 @@
+/***************************************************************************************
+ * Copyright(c) 2014 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Networks Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+/*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_nand_flash.c
+ * DATE: 2014/11/21
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI NAND Access interface.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ *
+ * SPI_NAND_Flash_Init To provide interface for SPI NAND init.
+ * SPI_NAND_Flash_Write_Nbyte To provide interface for Write N Bytes into SPI NAND Flash.
+ * SPI_NAND_Flash_Read_Byte To provide interface for read 1 Bytes from SPI NAND Flash.
+ * SPI_NAND_Flash_Read_DWord To provide interface for read Double Word from SPI NAND Flash.
+ * SPI_NAND_Flash_Read_NByte To provide interface for Read N Bytes from SPI NAND Flash.
+ * SPI_NAND_Flash_Erase To provide interface for Erase SPI NAND Flash.
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2014/11/21 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/gen_probe.h>
+
+#include "spi_nand_flash.h"
+#include "spi_controller.h"
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include <stdarg.h>
+
+#include "spi_nfi.h"
+#include "spi_ecc.h"
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+#include <linux/miscdevice.h>
+#include <linux/scatterlist.h>
+
+#include <linux/kthread.h>
+
+#include "../econet/bmt.h"
+
+
+/* Added for TCM used */
+//#include <asm/system.h>
+
+#include <linux/mtd/map.h>
+#include <asm/tc3162/tc3162.h>
+
+#if defined (TCSUPPORT_GPON_DUAL_IMAGE) || defined (TCSUPPORT_EPON_DUAL_IMAGE)
+#include "flash_layout/tc_partition.h"
+#endif
+
+#ifdef TCSUPPORT_NAND_BMT
+#define SLAVE_IMAGE_OFFSET 0xf00000
+extern int nand_logic_size;
+#endif
+
+
+DECLARE_MUTEX(SPI_NAND_SEM); /* Make sure all related SPI NAND operations are atomic */
+
+struct spi_chip_info {
+ struct spi_flash_info *flash;
+ void (*destroy)(struct spi_chip_info *chip_info);
+
+ u32 (*read)(struct map_info *map, u32 from, u32 to, u32 size);
+ u32 (*read_manual)(struct mtd_info *mtd, unsigned long from, unsigned char *buf, unsigned long len);
+ u32 (*write)(struct mtd_info *mtd, u32 from, u32 to, u32 size);
+ u32 (*erase)(struct mtd_info *mtd, u32 addr);
+};
+
+extern unsigned int (*ranand_read_byte)(unsigned long long);
+extern unsigned int (*ranand_read_dword)(unsigned long long);
+
+
+/* NAMING CONSTANT DECLARATIONS ------------------------------------------------------ */
+
+/* SPI NAND Command Set */
+#define _SPI_NAND_OP_GET_FEATURE 0x0F /* Get Feature */
+#define _SPI_NAND_OP_SET_FEATURE 0x1F /* Set Feature */
+#define _SPI_NAND_OP_PAGE_READ 0x13 /* Load page data into cache of SPI NAND chip */
+#define _SPI_NAND_OP_READ_FROM_CACHE_SINGLE 0x03 /* Read data from cache of SPI NAND chip, single speed*/
+#define _SPI_NAND_OP_READ_FROM_CACHE_DUAL 0x3B /* Read data from cache of SPI NAND chip, dual speed*/
+#define _SPI_NAND_OP_READ_FROM_CACHE_QUAD 0x6B /* Read data from cache of SPI NAND chip, quad speed*/
+#define _SPI_NAND_OP_WRITE_ENABLE 0x06 /* Enable write data to SPI NAND chip */
+#define _SPI_NAND_OP_WRITE_DISABLE 0x04 /* Reseting the Write Enable Latch (WEL) */
+#define _SPI_NAND_OP_PROGRAM_LOAD_SINGLE 0x02 /* Write data into cache of SPI NAND chip with cache reset, single speed */
+#define _SPI_NAND_OP_PROGRAM_LOAD_QUAD 0x32 /* Write data into cache of SPI NAND chip with cache reset, quad speed */
+#define _SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE 0x84 /* Write data into cache of SPI NAND chip, single speed */
+#define _SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD 0x34 /* Write data into cache of SPI NAND chip, quad speed */
+
+#define _SPI_NAND_OP_PROGRAM_EXECUTE 0x10 /* Write data from cache into SPI NAND chip */
+#define _SPI_NAND_OP_READ_ID 0x9F /* Read Manufacture ID and Device ID */
+#define _SPI_NAND_OP_BLOCK_ERASE 0xD8 /* Erase Block */
+#define _SPI_NAND_OP_RESET 0xFF /* Reset */
+#define _SPI_NAND_OP_DIE_SELECT 0xC2 /* Die Select */
+
+/* SPI NAND register address of command set */
+#define _SPI_NAND_ADDR_ECC 0x90 /* Address of ECC Config */
+#define _SPI_NAND_ADDR_PROTECTION 0xA0 /* Address of protection */
+#define _SPI_NAND_ADDR_FEATURE 0xB0 /* Address of feature */
+#define _SPI_NAND_ADDR_STATUS 0xC0 /* Address of status */
+#define _SPI_NAND_ADDR_FEATURE_4 0xD0 /* Address of status 4 */
+#define _SPI_NAND_ADDR_STATUS_5 0xE0 /* Address of status 5 */
+#define _SPI_NAND_ADDR_MANUFACTURE_ID 0x00 /* Address of Manufacture ID */
+#define _SPI_NAND_ADDR_DEVICE_ID 0x01 /* Address of Device ID */
+
+/* SPI NAND value of register address of command set */
+#define _SPI_NAND_VAL_DISABLE_PROTECTION 0x0 /* Value for disable write protection */
+#define _SPI_NAND_VAL_ENABLE_PROTECTION 0x38 /* Value for enable write protection */
+#define _SPI_NAND_VAL_OIP 0x1 /* OIP = Operaton In Progress */
+#define _SPI_NAND_VAL_ERASE_FAIL 0x4 /* E_FAIL = Erase Fail */
+#define _SPI_NAND_VAL_PROGRAM_FAIL 0x8 /* P_FAIL = Program Fail */
+
+
+
+/* SPI NAND Size Define */
+#define _SPI_NAND_PAGE_SIZE_512 0x0200
+#define _SPI_NAND_PAGE_SIZE_2KBYTE 0x0800
+#define _SPI_NAND_PAGE_SIZE_4KBYTE 0x1000
+#define _SPI_NAND_OOB_SIZE_64BYTE 0x40
+#define _SPI_NAND_OOB_SIZE_120BYTE 0x78
+#define _SPI_NAND_OOB_SIZE_128BYTE 0x80
+#define _SPI_NAND_OOB_SIZE_256BYTE 0x100
+#define _SPI_NAND_BLOCK_SIZE_128KBYTE 0x20000
+#define _SPI_NAND_BLOCK_SIZE_256KBYTE 0x40000
+#define _SPI_NAND_CHIP_SIZE_512MBIT 0x04000000
+#define _SPI_NAND_CHIP_SIZE_1GBIT 0x08000000
+#define _SPI_NAND_CHIP_SIZE_2GBIT 0x10000000
+#define _SPI_NAND_CHIP_SIZE_4GBIT 0x20000000
+
+/* SPI NAND Manufacturers ID */
+#define _SPI_NAND_MANUFACTURER_ID_GIGADEVICE 0xC8
+#define _SPI_NAND_MANUFACTURER_ID_WINBOND 0xEF
+#define _SPI_NAND_MANUFACTURER_ID_ESMT 0xC8
+#define _SPI_NAND_MANUFACTURER_ID_MXIC 0xC2
+#define _SPI_NAND_MANUFACTURER_ID_ZENTEL 0xC8
+#define _SPI_NAND_MANUFACTURER_ID_ETRON 0xD5
+#define _SPI_NAND_MANUFACTURER_ID_TOSHIBA 0x98
+#define _SPI_NAND_MANUFACTURER_ID_MICRON 0x2C
+#define _SPI_NAND_MANUFACTURER_ID_HEYANG 0xC9
+#define _SPI_NAND_MANUFACTURER_ID_XTX 0xA1
+#define _SPI_NAND_MANUFACTURER_ID_ATO 0x9B
+#define _SPI_NAND_MANUFACTURER_ID_FM 0xA1
+
+
+/* SPI NAND Device ID */
+#define _SPI_NAND_DEVICE_ID_GD5F1GQ4UAYIG 0xF1
+#define _SPI_NAND_DEVICE_ID_GD5F1GQ4UBYIG 0xD1
+#define _SPI_NAND_DEVICE_ID_GD5F1GQ4UCYIG 0xB1
+#define _SPI_NAND_DEVICE_ID_GD5F2GQ4UBYIG 0xD2
+#define _SPI_NAND_DEVICE_ID_GD5F2GQ4UCYIG 0xB2
+#define _SPI_NAND_DEVICE_ID_GD5F4GQ4UBYIG 0xD4
+#define _SPI_NAND_DEVICE_ID_GD5F4GQ4UCYIG 0xB4
+#define _SPI_NAND_DEVICE_ID_F50L512M41A 0x20
+#define _SPI_NAND_DEVICE_ID_F50L1G41A0 0x21
+#define _SPI_NAND_DEVICE_ID_F50L1G41LB 0x01
+#define _SPI_NAND_DEVICE_ID_F50L2G41LB 0x0A
+#define _SPI_NAND_DEVICE_ID_W25N01GV 0xAA
+#define _SPI_NAND_DEVICE_ID_W25M02GV 0xAB
+#define _SPI_NAND_DEVICE_ID_MXIC35LF1GE4AB 0x12
+#define _SPI_NAND_DEVICE_ID_MXIC35LF2GE4AB 0x22
+#define _SPI_NAND_DEVICE_ID_MXIC35LF4GE4AB 0x32
+#define _SPI_NAND_DEVICE_ID_A5U12A21ASC 0x20
+#define _SPI_NAND_DEVICE_ID_A5U1GA21BWS 0x21
+#define _SPI_NAND_DEVICE_ID_EM73C044SNB 0x11
+#define _SPI_NAND_DEVICE_ID_EM73C044SNA 0x19
+#define _SPI_NAND_DEVICE_ID_EM73D044SNA 0x12
+#define _SPI_NAND_DEVICE_ID_EM73D044SNC 0x0A
+#define _SPI_NAND_DEVICE_ID_EM73E044SNA 0x03
+#define _SPI_NAND_DEVICE_ID_TC58CVG0S3H 0xC2
+#define _SPI_NAND_DEVICE_ID_TC58CVG1S3H 0xCB
+#define _SPI_NAND_DEVICE_ID_TC58CVG2S0H 0xCD
+#define _SPI_NAND_DEVICE_ID_MT29F1G01 0x14
+#define _SPI_NAND_DEVICE_ID_MT29F2G01 0x24
+#define _SPI_NAND_DEVICE_ID_MT29F4G01 0x36
+#define _SPI_NAND_DEVICE_ID_HYF1GQ4UAACAE 0x51
+#define _SPI_NAND_DEVICE_ID_HYF2GQ4UAACAE 0x52
+#define _SPI_NAND_DEVICE_ID_PN26G01AWSIUG 0xE1
+#define _SPI_NAND_DEVICE_ID_PN26G02AWSIUG 0xE2
+#define _SPI_NAND_DEVICE_ID_ATO25D1GA 0x12
+#define _SPI_NAND_DEVICE_ID_FM25G01B 0xD1
+#define _SPI_NAND_DEVICE_ID_FM25G02B 0xD2
+
+
+
+
+/* Others Define */
+#define _SPI_NAND_LEN_ONE_BYTE (1)
+#define _SPI_NAND_LEN_TWO_BYTE (2)
+#define _SPI_NAND_LEN_THREE_BYTE (3)
+#define _SPI_NAND_BLOCK_ROW_ADDRESS_OFFSET (6)
+
+#define _SPI_NAND_PAGE_SIZE 4096
+#define _SPI_NAND_OOB_SIZE 256
+#define _SPI_NAND_CACHE_SIZE (_SPI_NAND_PAGE_SIZE+_SPI_NAND_OOB_SIZE)
+
+#define EMPTY_DATA (0)
+#define NONE_EMPTY_DATA (1)
+#define EMPTY_OOB (0)
+#define NONE_EMPTY_OOB (1)
+
+#define _SPI_FREQUENCY_ADJUST_REG 0xBFA200CC
+
+#define LINUX_USE_OOB_START_OFFSET 4
+#define MAX_LINUX_USE_OOB_SIZE 26
+#define MAX_USE_OOB_SIZE (LINUX_USE_OOB_START_OFFSET + MAX_LINUX_USE_OOB_SIZE + 2)
+
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+
+#define K0_TO_K1(x) (((uint32)x) | 0xa0000000)
+#define K1_TO_PHY(x) (((uint32)x) & 0x1fffffff)
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+#define _SPI_NAND_BLOCK_ALIGNED_CHECK( __addr__,__block_size__) ((__addr__) & ( __block_size__ - 1))
+#define _SPI_NAND_GET_DEVICE_INFO_PTR &(_current_flash_info_t)
+
+
+static DEFINE_SPINLOCK(spinandLock);
+
+
+
+/* Porting Replacement */
+#define _SPI_NAND_SEMAPHORE_LOCK() spin_lock_irqsave(&spinandLock, spinand_spinlock_flags) /* Disable interrupt */
+#define _SPI_NAND_SEMAPHORE_UNLOCK() spin_unlock_irqrestore(&spinandLock, spinand_spinlock_flags) /* Enable interrupt */
+#define _SPI_NAND_PRINTF printk
+#define _SPI_NAND_DEBUG_PRINTF spi_nand_flash_debug_printf
+#define _SPI_NAND_DEBUG_PRINTF_ARRAY spi_nand_flash_debug_printf_array
+#define _SPI_NAND_ENABLE_MANUAL_MODE SPI_CONTROLLER_Enable_Manual_Mode
+#define _SPI_NAND_WRITE_ONE_BYTE SPI_CONTROLLER_Write_One_Byte
+#define _SPI_NAND_WRITE_NBYTE SPI_CONTROLLER_Write_NByte
+#define _SPI_NAND_READ_NBYTE SPI_CONTROLLER_Read_NByte
+#define _SPI_NAND_READ_CHIP_SELECT_HIGH SPI_CONTROLLER_Chip_Select_High
+#define _SPI_NAND_READ_CHIP_SELECT_LOW SPI_CONTROLLER_Chip_Select_Low
+
+
+struct ra_nand_chip ra;
+struct nand_info flashInfo;
+unsigned char _plane_select_bit=0;
+unsigned char _die_id = 0;
+
+#define _SPI_NFI_CHECK_ECC_DONE_MAX_TIMES (1000000)
+#define CACHE_LINE_SIZE (32)
+u8 *dma_read_page;
+u8 tmp_dma_read_page[_SPI_NAND_CACHE_SIZE + CACHE_LINE_SIZE];
+u8 *dma_write_page;
+u8 tmp_dma_write_page[_SPI_NAND_CACHE_SIZE + CACHE_LINE_SIZE];
+
+unsigned long read_times =0;
+unsigned char _ondie_ecc_flag=1; /* Ondie ECC : [ToDo : Init this flag base on diffrent chip ?] */
+unsigned char _spi_dma_mode=0;
+struct mtd_info *spi_nand_mtd;
+
+#define IOMUX_CONTROL1 (0xBFA20104)
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+struct _SPI_NAND_FLASH_ID_T {
+ u8 mfr_id;
+ u8 dev_id;
+};
+
+struct _SPI_NAND_FLASH_RW_TEST_T {
+ u32 times;
+ u32 block_idx;
+};
+
+/* STATIC VARIABLE DECLARATIONS ------------------------------------------------------ */
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+
+static int bmt_pool_size = 0;
+static bmt_struct *g_bmt = NULL;
+static init_bbt_struct *g_bbt = NULL;
+#endif
+
+static struct _SPI_NAND_FLASH_RW_TEST_T rw_test_param;
+
+#define UBIFS_BLANK_PAGE_FIXUP
+
+#ifdef UBIFS_BLANK_PAGE_FIXUP
+typedef enum{
+ SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MATCH = 0,
+ SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MISMATCH,
+} UBIFS_BLANK_PAGE_ECC_T;
+
+typedef enum{
+ SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_SUCCESS = 0,
+ SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_FAIL,
+} UBIFS_BLANK_PAGE_FIXUP_T;
+
+static SPI_NAND_FLASH_RTN_T spi_nand_write_page(u32 page_number,
+ u32 data_offset,
+ u8 *ptr_data,
+ u32 data_len,
+ u32 oob_offset,
+ u8 *ptr_oob ,
+ u32 oob_len,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode );
+
+#endif
+
+
+SPI_NAND_FLASH_DEBUG_LEVEL_T _SPI_NAND_DEBUG_LEVEL = SPI_NAND_FLASH_DEBUG_LEVEL_0;
+int _SPI_NAND_TEST_FLAG = 0;
+
+
+u8 _SPI_NAND_DEBUG_FLAG= 0; /* For control printf debug message or not */
+u32 _current_page_num = 0xFFFFFFFF;
+u32 _spare_area_factor = 32;
+u8 _current_cache_page[_SPI_NAND_CACHE_SIZE];
+u8 _current_cache_page_data[_SPI_NAND_PAGE_SIZE];
+u8 _current_cache_page_oob[_SPI_NAND_OOB_SIZE];
+u8 _current_cache_page_oob_mapping[_SPI_NAND_OOB_SIZE];
+
+
+struct SPI_NAND_FLASH_INFO_T _current_flash_info_t; /* Store the current flash information */
+
+
+struct spi_nand_flash_ooblayout ooblayout_esmt = {
+ .oobsize = 36,
+ .oobfree = {{0,1}, {8,8}, {16,1}, {24,8}, {32,1}, {40,8}, {48,1}, {56,8} }
+};
+
+/* only use user meta data with ECC protected */
+struct spi_nand_flash_ooblayout ooblayout_esmt_41lb = {
+ .oobsize = 20,
+ .oobfree = {{0,4}, {4,4}, {20,4}, {36,4}, {52,4}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_mxic = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_winbond = {
+ .oobsize = 32,
+ .oobfree = {{0,8}, {16,8}, {32,8}, {48,8} }
+};
+
+struct spi_nand_flash_ooblayout ooblayout_gigadevice_a = {
+ .oobsize = 48,
+ .oobfree = {{0,12}, {16,12}, {32,12}, {48,12} }
+};
+
+struct spi_nand_flash_ooblayout ooblayout_gigadevice_128 = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_gigadevice_256 = {
+ .oobsize = 128,
+ .oobfree = {{0,128}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_zentel = {
+ .oobsize = 36,
+ .oobfree = {{0,1}, {8,8}, {16,1}, {24,8}, {32,1}, {40,8}, {48,1}, {56,8} }
+};
+
+struct spi_nand_flash_ooblayout ooblayout_etron_73C044SNB = {
+ .oobsize = 64,
+ .oobfree = {{0,16}, {30,16}, {60,16}, {90,16}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_etron_73D044SNA = {
+ .oobsize = 72,
+ .oobfree = {{0,18}, {32,18}, {64,18}, {96,18}}
+};
+
+/* only use user meta data with ECC protected */
+struct spi_nand_flash_ooblayout ooblayout_etron_73D044SNC = {
+ .oobsize = 64,
+ .oobfree = {{0,16}, {30,16}, {60,16}, {90,16}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_etron_73E044SNA = {
+ .oobsize = 144,
+ .oobfree = {{0,18}, {32,18}, {64,18}, {96,18}, {128,18}, {160,18}, {192,18}, {224,18}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_toshiba_128 = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_toshiba_256 = {
+ .oobsize = 128,
+ .oobfree = {{0,128}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_micron = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_heyang = {
+ .oobsize = 32,
+ .oobfree = {{0,8}, {32,8}, {64,8}, {96,8}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_xtx = {
+ .oobsize = 76,
+ .oobfree = {{0,6}, {19,2}, {34,2}, {49,2}, {64,64}}
+};
+
+/* only use user meta data with ECC protected */
+struct spi_nand_flash_ooblayout ooblayout_ato = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_fm = {
+ .oobsize = 64,
+ .oobfree = {{0,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_spi_controller_ecc_64 = {
+ .oobsize = 32,
+ .oobfree = {{0,8}, {16,8}, {32,8}, {48,8}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_spi_controller_ecc_128 = {
+ .oobsize = 96,
+ .oobfree = {{0,8}, {16,8}, {32,8}, {48,8}, {64,64}}
+};
+
+struct spi_nand_flash_ooblayout ooblayout_spi_controller_ecc_256 = {
+ .oobsize = 224,
+ .oobfree = {{0,8}, {16,8}, {32,8}, {48,8}, {64,192}}
+};
+
+
+/*****************************[ Notice]******************************/
+/* If new spi nand chip have page size more than 4KB, or oob size more than 256 bytes, than*/
+/* it will need to adjust the #define of _SPI_NAND_PAGE_SIZE and _SPI_NAND_OOB_SIZE */
+/*****************************[ Notice]******************************/
+
+static const struct SPI_NAND_FLASH_INFO_T spi_nand_flash_tables[] = {
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F1GQ4UAYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F1GQ4UA",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_a,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F1GQ4UBYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F1GQ4UB",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F1GQ4UCYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F1GQ4UC",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F2GQ4UBYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F2GQ4UB",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F2GQ4UCYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F2GQ4UC",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F4GQ4UBYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F4GQ4UB",
+ device_size: _SPI_NAND_CHIP_SIZE_4GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_4KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_256BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_256KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_256,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_GIGADEVICE,
+ dev_id: _SPI_NAND_DEVICE_ID_GD5F4GQ4UCYIG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_GD5F4GQ4UC",
+ device_size: _SPI_NAND_CHIP_SIZE_4GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_4KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_256BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_256KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_gigadevice_256,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ESMT,
+ dev_id: _SPI_NAND_DEVICE_ID_F50L512M41A,
+ ptr_name: "_SPI_NAND_DEVICE_ID_F50L512",
+ device_size: _SPI_NAND_CHIP_SIZE_512MBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_esmt,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ESMT,
+ dev_id: _SPI_NAND_DEVICE_ID_F50L1G41A0,
+ ptr_name: "_SPI_NAND_DEVICE_ID_F50L1G",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_esmt,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ESMT,
+ dev_id: _SPI_NAND_DEVICE_ID_F50L1G41LB,
+ ptr_name: "_SPI_NAND_DEVICE_ID_F50L1G41LB",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_esmt_41lb,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ESMT,
+ dev_id: _SPI_NAND_DEVICE_ID_F50L2G41LB,
+ ptr_name: "_SPI_NAND_DEVICE_ID_F50L2G41LB",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_esmt_41lb,
+ feature: SPI_NAND_FLASH_DIE_SELECT_1_HAVE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_WINBOND,
+ dev_id: _SPI_NAND_DEVICE_ID_W25N01GV,
+ ptr_name: "_SPI_NAND_DEVICE_ID_W25N01G",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_winbond,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_WINBOND,
+ dev_id: _SPI_NAND_DEVICE_ID_W25M02GV,
+ ptr_name: "_SPI_NAND_DEVICE_ID_W25M02G",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_winbond,
+ feature: SPI_NAND_FLASH_DIE_SELECT_1_HAVE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_MXIC,
+ dev_id: _SPI_NAND_DEVICE_ID_MXIC35LF1GE4AB,
+ ptr_name: "_SPI_NAND_DEVICE_ID_MXIC35LF1G",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_mxic,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_MXIC,
+ dev_id: _SPI_NAND_DEVICE_ID_MXIC35LF2GE4AB,
+ ptr_name: "_SPI_NAND_DEVICE_ID_MXIC35LF2G",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_mxic,
+ feature: SPI_NAND_FLASH_PLANE_SELECT_HAVE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ZENTEL,
+ dev_id: _SPI_NAND_DEVICE_ID_A5U12A21ASC,
+ ptr_name: "_SPI_NAND_DEVICE_ID_A5U12A21ASC",
+ device_size: _SPI_NAND_CHIP_SIZE_512MBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_zentel,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ZENTEL,
+ dev_id: _SPI_NAND_DEVICE_ID_A5U1GA21BWS,
+ ptr_name: "_SPI_NAND_DEVICE_ID_A5U1GA21BWS",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_zentel,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ETRON,
+ dev_id: _SPI_NAND_DEVICE_ID_EM73C044SNB,
+ ptr_name: "_SPI_NAND_DEVICE_ID_EM73C044SNB",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_etron_73C044SNB,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ETRON,
+ dev_id: _SPI_NAND_DEVICE_ID_EM73D044SNA,
+ ptr_name: "_SPI_NAND_DEVICE_ID_EM73D044SNA",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_etron_73D044SNA,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ETRON,
+ dev_id: _SPI_NAND_DEVICE_ID_EM73D044SNC,
+ ptr_name: "_SPI_NAND_DEVICE_ID_EM73D044SNC",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_etron_73D044SNC,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ETRON,
+ dev_id: _SPI_NAND_DEVICE_ID_EM73E044SNA,
+ ptr_name: "_SPI_NAND_DEVICE_ID_EM73E044SNA",
+ device_size: _SPI_NAND_CHIP_SIZE_4GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_4KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_256BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_256KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_etron_73E044SNA,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_TOSHIBA,
+ dev_id: _SPI_NAND_DEVICE_ID_TC58CVG0S3H,
+ ptr_name: "_SPI_NAND_DEVICE_ID_TC58CVG0S3H",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_toshiba_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_TOSHIBA,
+ dev_id: _SPI_NAND_DEVICE_ID_TC58CVG1S3H,
+ ptr_name: "_SPI_NAND_DEVICE_ID_TC58CVG1S3H",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_toshiba_128,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_TOSHIBA,
+ dev_id: _SPI_NAND_DEVICE_ID_TC58CVG2S0H,
+ ptr_name: "_SPI_NAND_DEVICE_ID_TC58CVG2S0H",
+ device_size: _SPI_NAND_CHIP_SIZE_4GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_4KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_256KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_toshiba_256,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_MICRON,
+ dev_id: _SPI_NAND_DEVICE_ID_MT29F1G01,
+ ptr_name: "_SPI_NAND_DEVICE_ID_MT29F1G01",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_micron,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_MICRON,
+ dev_id: _SPI_NAND_DEVICE_ID_MT29F2G01,
+ ptr_name: "_SPI_NAND_DEVICE_ID_MT29F2G01",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_micron,
+ feature: SPI_NAND_FLASH_PLANE_SELECT_HAVE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_MICRON,
+ dev_id: _SPI_NAND_DEVICE_ID_MT29F4G01,
+ ptr_name: "_SPI_NAND_DEVICE_ID_MT29F4G01",
+ device_size: _SPI_NAND_CHIP_SIZE_4GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_micron,
+ feature: SPI_NAND_FLASH_PLANE_SELECT_HAVE | SPI_NAND_FLASH_DIE_SELECT_2_HAVE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_HEYANG,
+ dev_id: _SPI_NAND_DEVICE_ID_HYF1GQ4UAACAE,
+ ptr_name: "_SPI_NAND_DEVICE_ID_HYF1GQ4UAACAE",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_heyang,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_HEYANG,
+ dev_id: _SPI_NAND_DEVICE_ID_HYF2GQ4UAACAE,
+ ptr_name: "_SPI_NAND_DEVICE_ID_HYF2GQ4UAACAE",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_heyang,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_XTX,
+ dev_id: _SPI_NAND_DEVICE_ID_PN26G01AWSIUG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_PN26G01AWSIUG",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_xtx,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_XTX,
+ dev_id: _SPI_NAND_DEVICE_ID_PN26G02AWSIUG,
+ ptr_name: "_SPI_NAND_DEVICE_ID_PN26G02AWSIUG",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_xtx,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_ATO,
+ dev_id: _SPI_NAND_DEVICE_ID_ATO25D1GA,
+ ptr_name: "_SPI_NAND_DEVICE_ID_ATO25D1GA",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_64BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_ato,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_FM,
+ dev_id: _SPI_NAND_DEVICE_ID_FM25G01B,
+ ptr_name: "_SPI_NAND_DEVICE_ID_FM25G01B",
+ device_size: _SPI_NAND_CHIP_SIZE_1GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_fm,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+
+ {
+ mfr_id: _SPI_NAND_MANUFACTURER_ID_FM,
+ dev_id: _SPI_NAND_DEVICE_ID_FM25G02B,
+ ptr_name: "_SPI_NAND_DEVICE_ID_FM25G02B",
+ device_size: _SPI_NAND_CHIP_SIZE_2GBIT,
+ page_size: _SPI_NAND_PAGE_SIZE_2KBYTE,
+ oob_size: _SPI_NAND_OOB_SIZE_128BYTE,
+ erase_size: _SPI_NAND_BLOCK_SIZE_128KBYTE,
+ dummy_mode: SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+ read_mode: SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ write_mode: SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE,
+ oob_free_layout : &ooblayout_fm,
+ feature: SPI_NAND_FLASH_FEATURE_NONE,
+ },
+};
+
+/* LOCAL SUBPROGRAM BODIES------------------------------------------------------------ */
+static int generic_ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+static void spi_nand_flash_debug_printf( SPI_NAND_FLASH_DEBUG_LEVEL_T DEBUG_LEVEL, char *fmt, ... )
+{
+ if( _SPI_NAND_DEBUG_LEVEL >= DEBUG_LEVEL )
+ {
+
+ unsigned char str_buf[100];
+ va_list argptr;
+ int cnt;
+
+ va_start(argptr, fmt);
+ cnt = vsprintf(str_buf, fmt, argptr);
+ va_end(argptr);
+
+ _SPI_NAND_PRINTF("%s", str_buf);
+ }
+}
+
+static void spi_nand_flash_debug_printf_array( SPI_NAND_FLASH_DEBUG_LEVEL_T DEBUG_LEVEL, char *buf, u32 len )
+{
+
+ u32 idx_for_debug;
+
+ if( _SPI_NAND_DEBUG_LEVEL >= DEBUG_LEVEL )
+ {
+
+ for(idx_for_debug=0; idx_for_debug< len; idx_for_debug++)
+ {
+ if( ((idx_for_debug) %8 == 0) )
+ {
+ _SPI_NAND_PRINTF("\n%04x: ", (idx_for_debug));
+ }
+ _SPI_NAND_PRINTF("%02x ", buf[idx_for_debug]);
+ }
+ _SPI_NAND_PRINTF("\n");
+ }
+
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_reset( void )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send FFh opcode (Reset) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_SET_FEATURE );
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_reset\n");
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_feature( u8 addr, u8 protection )
+ * PURPOSE : To implement the SPI nand protocol for set status register.
+ * AUTHOR :
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - register address
+ * data - The variable of this register.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2017/5/26.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_feature( u8 addr, u8 data )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send 0Fh opcode (Set Feature) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_SET_FEATURE );
+
+ /* 3. Offset addr */
+ _SPI_NAND_WRITE_ONE_BYTE( addr );
+
+ /* 4. Write new setting */
+ _SPI_NAND_WRITE_ONE_BYTE( data );
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_set_feature %x: val=0x%x\n", addr, data);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_feature( u8 addr, u8 *ptr_rtn_data )
+ * PURPOSE : To implement the SPI nand protocol for get status register.
+ * AUTHOR :
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - register address
+ * OUTPUT: ptr_rtn_protection - A pointer to the ptr_rtn_protection variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2017/5/26.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_feature( u8 addr, u8 *ptr_rtn_data )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send 0Fh opcode (Get Feature) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_GET_FEATURE );
+
+ /* 3. Offset addr */
+ _SPI_NAND_WRITE_ONE_BYTE( addr );
+
+ /* 4. Read 1 byte data */
+ _SPI_NAND_READ_NBYTE( ptr_rtn_data, _SPI_NAND_LEN_ONE_BYTE, SPI_CONTROLLER_SPEED_SINGLE);
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_get_feature %x: val=0x%x\n", addr, ptr_rtn_data);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_1( u8 protection )
+ * PURPOSE : To implement the SPI nand protocol for set status register 1.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : protection - The protection variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_1( u8 protection )
+{
+ /* Offset addr of protection (0xA0) */
+ return spi_nand_protocol_set_feature(_SPI_NAND_ADDR_PROTECTION, protection);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_1( u8 *ptr_rtn_protection )
+ * PURPOSE : To implement the SPI nand protocol for get status register 1.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_protection - A pointer to the ptr_rtn_protection variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_1( u8 *ptr_rtn_protection )
+{
+ /* Offset addr of protection (0xA0) */
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_PROTECTION, ptr_rtn_protection);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_2( u8 feature )
+ * PURPOSE : To implement the SPI nand protocol for set status register 2.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : feature - The feature variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/21 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_2( u8 feature )
+{
+ /* Offset addr of feature (0xB0) */
+ return spi_nand_protocol_set_feature(_SPI_NAND_ADDR_FEATURE, feature);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_2( u8 *ptr_rtn_feature )
+ * PURPOSE : To implement the SPI nand protocol for get status register 2.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_feature - A pointer to the ptr_rtn_feature variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/21 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_2( u8 *ptr_rtn_feature )
+{
+ /* Offset addr of protection (0xB0) */
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_FEATURE, ptr_rtn_feature);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_3( u8 *ptr_rtn_status )
+ * PURPOSE : To implement the SPI nand protocol for get status register 3.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : stats_reg - The stats_reg variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_3( u8 *ptr_rtn_status )
+{
+ /* Offset addr of status (0xC0) */
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_STATUS, ptr_rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_4( u8 feature )
+ * PURPOSE : To implement the SPI nand protocol for set status register 4.
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : feature - The feature variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_set_status_reg_4( u8 feature )
+{
+ /* Offset addr of feature 4 (0xD0) */
+ return spi_nand_protocol_set_feature(_SPI_NAND_ADDR_FEATURE_4, feature);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_4( u8 *ptr_rtn_status )
+ * PURPOSE : To implement the SPI nand protocol for get status register 4.
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_status - A pointer to the ptr_rtn_status variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_4( u8 *ptr_rtn_status )
+{
+ /* Offset addr of feature 4 (0xD0) */
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_FEATURE_4, ptr_rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_5( u8 *ptr_rtn_status )
+ * PURPOSE : To implement the SPI nand protocol for get status register 5.
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_status - A pointer to the ptr_rtn_status variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_get_status_reg_5( u8 *ptr_rtn_status )
+{
+ /* Offset addr of status 5 (0xE0)) */
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_STATUS_5, ptr_rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_write_enable( void )
+ * PURPOSE : To implement the SPI nand protocol for write enable.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_write_enable( void )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select Low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Write op_cmd 0x06 (Write Enable (WREN)*/
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_WRITE_ENABLE );
+
+ /* 3. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_write_disable( void )
+ * PURPOSE : To implement the SPI nand protocol for write disable.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_write_disable( void )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select Low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Write op_cmd 0x04 (Write Disable (WRDI)*/
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_WRITE_DISABLE );
+
+ /* 3. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_block_erase( u32 block_idx )
+ * PURPOSE : To implement the SPI nand protocol for block erase.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : block_idx - The block_idx variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_block_erase( u32 block_idx )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select Low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Write op_cmd 0xD8 (Block Erase) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_BLOCK_ERASE );
+
+ /* 3. Write block number */
+ block_idx = block_idx << _SPI_NAND_BLOCK_ROW_ADDRESS_OFFSET; /*Row Address format in SPI NAND chip */
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_block_erase : block idx =0x%x\n", block_idx);
+
+ _SPI_NAND_WRITE_ONE_BYTE( (block_idx >> 16) & 0xff ); /* dummy byte */
+ _SPI_NAND_WRITE_ONE_BYTE( (block_idx >> 8) & 0xff );
+ _SPI_NAND_WRITE_ONE_BYTE( block_idx & 0xff );
+
+ /* 4. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_id( struct _SPI_NAND_FLASH_ID_T *ptr_rtn_flash_id )
+ * PURPOSE : To implement the SPI nand protocol for read id.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_id ( struct _SPI_NAND_FLASH_ID_T * ptr_rtn_flash_id )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select Low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Write op_cmd 0x9F (Read ID) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_READ_ID );
+
+ /* 3. Write Address Byte (0x00) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_ADDR_MANUFACTURE_ID );
+
+ /* 4. Read data (Manufacture ID and Device ID) */
+ _SPI_NAND_READ_NBYTE( &(ptr_rtn_flash_id->mfr_id), _SPI_NAND_LEN_ONE_BYTE, SPI_CONTROLLER_SPEED_SINGLE);
+ _SPI_NAND_READ_NBYTE( &(ptr_rtn_flash_id->dev_id), _SPI_NAND_LEN_ONE_BYTE, SPI_CONTROLLER_SPEED_SINGLE);
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_read_id : mfr_id=0x%x, dev_id=0x%x\n", ptr_rtn_flash_id->mfr_id, ptr_rtn_flash_id->dev_id);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_id_2( struct _SPI_NAND_FLASH_ID_T *ptr_rtn_flash_id )
+ * PURPOSE : To implement the SPI nand protocol for read id.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_id_2 ( struct _SPI_NAND_FLASH_ID_T *ptr_rtn_flash_id )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select Low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Write op_cmd 0x9F (Read ID) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_READ_ID );
+
+ /* 3. Read data (Manufacture ID and Device ID) */
+ _SPI_NAND_READ_NBYTE( &(ptr_rtn_flash_id->mfr_id), _SPI_NAND_LEN_ONE_BYTE, SPI_CONTROLLER_SPEED_SINGLE);
+ _SPI_NAND_READ_NBYTE( &(ptr_rtn_flash_id->dev_id), _SPI_NAND_LEN_ONE_BYTE, SPI_CONTROLLER_SPEED_SINGLE);
+
+ /* 4. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_read_id_2 : mfr_id=0x%x, dev_id=0x%x\n", ptr_rtn_flash_id->mfr_id, ptr_rtn_flash_id->dev_id);
+
+ return (rtn_status);
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_page_read( u32 page_number )
+ * PURPOSE : To implement the SPI nand protocol for page read.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : page_number - The page_number variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_page_read ( u32 page_number )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send 13h opcode */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PAGE_READ );
+
+ /* 3. Send page number */
+ _SPI_NAND_WRITE_ONE_BYTE( ((page_number >> 16 ) & 0xff) );
+ _SPI_NAND_WRITE_ONE_BYTE( ((page_number >> 8 ) & 0xff) );
+ _SPI_NAND_WRITE_ONE_BYTE( ((page_number) & 0xff) );
+
+ /* 4. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_load_page_into_cache: value=0x%x\n", ((page_number ) & 0xff) );
+
+ return (rtn_status);
+
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_from_cache( u32 data_offset,
+ * u32 len,
+ * u8 *ptr_rtn_buf,
+ * u32 read_mode,
+ * SPI_NAND_FLASH_READ_DUMMY_BYTE_T dummy_mode)
+ * PURPOSE : To implement the SPI nand protocol for read from cache with single speed.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : data_offset - The data_offset variable of this function.
+ * len - The len variable of this function.
+ * OUTPUT: ptr_rtn_buf - A pointer to the ptr_rtn_buf variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_read_from_cache( u32 data_offset,
+ u32 len,
+ u8 *ptr_rtn_buf,
+ u32 read_mode,
+ SPI_NAND_FLASH_READ_DUMMY_BYTE_T dummy_mode )
+{
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send opcode */
+ switch (read_mode)
+ {
+ /* 03h */
+ case SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_READ_FROM_CACHE_SINGLE );
+ break;
+
+ /* 3Bh */
+ case SPI_NAND_FLASH_READ_SPEED_MODE_DUAL:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_READ_FROM_CACHE_DUAL );
+ break;
+
+ /* 6Bh */
+ case SPI_NAND_FLASH_READ_SPEED_MODE_QUAD:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_READ_FROM_CACHE_QUAD );
+ break;
+
+ default:
+ break;
+ }
+
+ /* 3. Send data_offset addr */
+ if( dummy_mode == SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND )
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( (0xff) ); /* dummy byte */
+ }
+
+ if( ((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE) )
+ {
+ if( _plane_select_bit == 0)
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((data_offset >> 8 ) &(0xef)) );
+ }
+ if( _plane_select_bit == 1)
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((data_offset >> 8 ) | (0x10)) );
+ }
+ }
+ else
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((data_offset >> 8 ) &(0xff)) );
+ }
+
+ _SPI_NAND_WRITE_ONE_BYTE( ((data_offset ) &(0xff)) );
+
+ if( dummy_mode == SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND )
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( (0xff) ); /* dummy byte */
+ }
+
+ if( dummy_mode == SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND &&
+ ((read_mode == SPI_NAND_FLASH_READ_SPEED_MODE_DUAL) || (read_mode == SPI_NAND_FLASH_READ_SPEED_MODE_QUAD)))
+ {
+ _SPI_NAND_WRITE_ONE_BYTE(0xff); /* for dual/quad read dummy byte */
+ }
+
+ /* 4. Read n byte (len) data */
+ switch (read_mode)
+ {
+ case SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE:
+ _SPI_NAND_READ_NBYTE( ptr_rtn_buf, len, SPI_CONTROLLER_SPEED_SINGLE);
+ break;
+
+ case SPI_NAND_FLASH_READ_SPEED_MODE_DUAL:
+ _SPI_NAND_READ_NBYTE( ptr_rtn_buf, len, SPI_CONTROLLER_SPEED_DUAL);
+ break;
+
+ case SPI_NAND_FLASH_READ_SPEED_MODE_QUAD:
+ _SPI_NAND_READ_NBYTE( ptr_rtn_buf, len, SPI_CONTROLLER_SPEED_QUAD);
+ break;
+
+ default:
+ break;
+ }
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_read_from_cache : data_offset=0x%x, buf=0x%x\n", data_offset, ptr_rtn_buf);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_load( u32 addr,
+ * u8 *ptr_data,
+ * u32 len,
+ * u32 write_mode)
+ * PURPOSE : To implement the SPI nand protocol for program load, with single speed.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * ptr_data - A pointer to the ptr_data variable.
+ * len - The len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_load ( u32 addr,
+ u8 *ptr_data,
+ u32 len,
+ u32 write_mode)
+{
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_program_load: addr=0x%x, len=0x%x\n", addr, len );
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send opcode */
+ switch (write_mode)
+ {
+ /* 02h */
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PROGRAM_LOAD_SINGLE );
+ break;
+
+ /* 32h */
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PROGRAM_LOAD_QUAD );
+ break;
+
+ default:
+ break;
+ }
+
+ /* 3. Send address offset */
+ if( ((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE) )
+ {
+ if( _plane_select_bit == 0)
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 8 ) & (0xef)) );
+ }
+ if( _plane_select_bit == 1)
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 8 ) | (0x10)) );
+ }
+ }
+ else
+ {
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 8 ) & (0xff)) );
+ }
+
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr) & (0xff)) );
+
+ /* 4. Send data */
+ switch (write_mode)
+ {
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE:
+ _SPI_NAND_WRITE_NBYTE( ptr_data, len, SPI_CONTROLLER_SPEED_SINGLE);
+ break;
+
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD:
+ _SPI_NAND_WRITE_NBYTE( ptr_data, len, SPI_CONTROLLER_SPEED_QUAD);
+ break;
+
+ default:
+ break;
+ }
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_load_random( u32 addr,
+ * u8 *ptr_data,
+ * u32 len,
+ * u32 write_mode)
+ * PURPOSE : To implement the SPI nand protocol for program load, with single speed.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * ptr_data - A pointer to the ptr_data variable.
+ * len - The len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_load_random ( u32 addr,
+ u8 *ptr_data,
+ u32 len,
+ u32 write_mode)
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_program_load_random: addr=0x%x, len=0x%x\n", addr, len );
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send opcode */
+ switch (write_mode)
+ {
+ /* 84 */
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE );
+ break;
+
+ /* 34h */
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD:
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD );
+ break;
+
+ default:
+ break;
+ }
+
+ /* 3. Send address offset */
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 8 ) & 0xff) );
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr) & 0xff) );
+
+ /* 4. Send data */
+ switch (write_mode)
+ {
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE:
+ _SPI_NAND_WRITE_NBYTE( ptr_data, len, SPI_CONTROLLER_SPEED_SINGLE);
+ break;
+
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD:
+ _SPI_NAND_WRITE_NBYTE( ptr_data, len, SPI_CONTROLLER_SPEED_QUAD);
+ break;
+
+ default:
+ break;
+ }
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_execute( u32 addr )
+ * PURPOSE : To implement the SPI nand protocol for program execute.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_program_execute ( u32 addr )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_program_execute: addr=0x%x\n", addr);
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send 10h opcode */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_PROGRAM_EXECUTE );
+
+ /* 3. Send address offset */
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 16 ) & 0xff) );
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr >> 8 ) & 0xff) );
+ _SPI_NAND_WRITE_ONE_BYTE( ((addr) & 0xff) );
+
+ /* 4. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ return (rtn_status);
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_die_select_1( u8 die_id)
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ /* 1. Chip Select low */
+ _SPI_NAND_READ_CHIP_SELECT_LOW();
+
+ /* 2. Send C2h opcode (Die Select) */
+ _SPI_NAND_WRITE_ONE_BYTE( _SPI_NAND_OP_DIE_SELECT );
+
+ /* 3. Send Die ID */
+ _SPI_NAND_WRITE_ONE_BYTE( die_id );
+
+ /* 5. Chip Select High */
+ _SPI_NAND_READ_CHIP_SELECT_HIGH();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_die_select_1\n");
+
+ return (rtn_status);
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_protocol_die_select_2( u8 die_id)
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned char feature;
+
+ rtn_status = spi_nand_protocol_get_status_reg_4(&feature);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ _SPI_NAND_PRINTF("spi_nand_protocol_die_select_2 get die select fail.\n");
+ return (rtn_status);
+ }
+
+ if(die_id == 0) {
+ feature &= ~(0x40);
+ } else {
+ feature |= 0x40;
+ }
+ rtn_status = spi_nand_protocol_set_status_reg_4(feature);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_protocol_die_select_2\n");
+
+ return (rtn_status);
+}
+
+static void spi_nand_select_die ( u32 page_number )
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u8 die_id = 0;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE)) {
+ /* single die = 1024blocks * 64pages */
+ die_id = ((page_number >> 16) & 0xff);
+
+ if (_die_id != die_id)
+ {
+ _die_id = die_id;
+ spi_nand_protocol_die_select_1(die_id);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_protocol_die_select_1: die_id=0x%x\n", die_id);
+ }
+ } else if( ((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_2_HAVE) ) {
+ /* single die = 2plans * 1024blocks * 64pages */
+ die_id = ((page_number >> 17) & 0xff);
+
+ if (_die_id != die_id)
+ {
+ _die_id = die_id;
+ spi_nand_protocol_die_select_2(die_id);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_protocol_die_select_2: die_id=0x%x\n", die_id);
+ }
+ }
+}
+
+static SPI_NAND_FLASH_RTN_T ecc_fail_check( u32 page_number )
+{
+ u8 status;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ spi_nand_protocol_get_status_reg_3( &status);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "ecc_fail_check: status=0x%x\n", status);
+
+ if((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) &&
+ ((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UAYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UBYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UCYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UBYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UCYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UBYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UCYIG)))
+ {
+ if((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UAYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UBYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UBYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UBYIG))
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+
+ if((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UCYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UCYIG) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UCYIG))
+ {
+ if(((status & 0x70) >> 4) == 0x7)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ }
+
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_MXIC)
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_WINBOND)
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+
+ else if((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ESMT) &&
+ ((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_F50L512M41A) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_F50L1G41A0) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_F50L1G41LB) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_F50L2G41LB)))
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ZENTEL) &&
+ ((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_A5U12A21ASC) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_A5U1GA21BWS)))
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ETRON)
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_TOSHIBA)
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_MICRON)
+ {
+ if(((status & 0x70) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_HEYANG)
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_XTX) && (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_PN26G01AWSIUG)) ||
+ ((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_XTX) && (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_PN26G02AWSIUG)))
+ {
+ if(((status & 0x30) >> 4) == 0x2)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ else if(((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_FM) && (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_FM25G01B)) ||
+ ((ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_FM) && (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_FM25G02B)))
+ {
+ if(((status & 0x70) >> 4) == 0x7)
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+
+ else if(rtn_status == SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK)
+ {
+ _SPI_NAND_PRINTF("[spinand_ecc_fail_check] : ECC cannot recover detected !, page=0x%x\n", page_number);
+ }
+
+ return (rtn_status);
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_load_page_into_cache( long page_number )
+ * PURPOSE : To load page into SPI NAND chip.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : page_number - The page_number variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/16 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_load_page_into_cache( u32 page_number)
+{
+ u8 status;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_load_page_into_cache: page number =0x%x\n", page_number);
+
+ if( _current_page_num == page_number )
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_load_page_into_cache: page number == _current_page_num\n");
+ }
+ else
+ {
+ spi_nand_select_die ( page_number );
+
+ spi_nand_protocol_page_read ( page_number );
+
+ /* Checking status for load page/erase/program complete */
+ do{
+ spi_nand_protocol_get_status_reg_3( &status);
+ }while( status & _SPI_NAND_VAL_OIP) ;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_load_page_into_cache : status =0x%x\n", status);
+
+ if(_spi_dma_mode == 0) {
+ rtn_status = ecc_fail_check(page_number);
+ }
+ }
+
+ return (rtn_status);
+
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: spi_nand_set_clock_speed( u32 clock_factor)
+ * PURPOSE : To set SPI clock.
+ * clock_factor = 0
+ * EN7512: SPI clock = 500MHz / 40 = 12.5MHz
+ * EN7522: SPI clock = 400MHz / 40 = 10MHz
+ * clock_factor > 0
+ * EN7512: SPI clock = 500MHz / (clock_factor * 2)
+ * EN7522: SPI clock = 400MHz / (clock_factor * 2)
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : clock_factor - The SPI clock divider.
+ * RETURN : NONE.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_set_clock_speed( u32 clk)
+{
+ u32 val;
+ u32 dividend;
+ u32 clock_factor;
+
+ if(!isFPGA) {
+ if(isEN7526c || isEN751627||isEN7580) {
+ dividend = 400;
+ } else {
+ dividend = 500;
+ }
+
+ clock_factor = (dividend / (clk * 2));
+
+ val = VPint(_SPI_FREQUENCY_ADJUST_REG);
+ val &= 0xffff0000;
+ VPint(_SPI_FREQUENCY_ADJUST_REG) = val;
+
+ val |= (((clock_factor) << 8)|1);
+ VPint(_SPI_FREQUENCY_ADJUST_REG) = val;
+
+ _SPI_NAND_PRINTF("Set SPI Clock to %u Mhz\n", (dividend / (clock_factor * 2)));
+ } else {
+ _SPI_NAND_PRINTF("FPGA can not Set SPI Clock, FPGA SPI Clock is fixed to 25 Mhz\n");
+ }
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_block_aligned_check( u32 addr,
+ * u32 len )
+ * PURPOSE : To check block align.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * len - The len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/15 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_block_aligned_check( u32 addr,
+ u32 len )
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR ;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "SPI_NAND_BLOCK_ALIGNED_CHECK_check: addr=0x%x, len=0x%x, block size =0x%x \n", addr, len, (ptr_dev_info_t->erase_size));
+
+ if (_SPI_NAND_BLOCK_ALIGNED_CHECK(len, (ptr_dev_info_t->erase_size)))
+ {
+ len = ( (len/ptr_dev_info_t->erase_size) + 1) * (ptr_dev_info_t->erase_size);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand__SPI_NAND_BLOCK_ALIGNED_CHECK_check: erase block aligned first check OK, addr:%x len:%x\n", addr, len, (ptr_dev_info_t->erase_size));
+ }
+
+ if (_SPI_NAND_BLOCK_ALIGNED_CHECK(addr, (ptr_dev_info_t->erase_size)) || _SPI_NAND_BLOCK_ALIGNED_CHECK(len, (ptr_dev_info_t->erase_size)) )
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand__SPI_NAND_BLOCK_ALIGNED_CHECK_check: erase block not aligned, addr:0x%x len:0x%x, blocksize:0x%x\n", addr, len, (ptr_dev_info_t->erase_size));
+ rtn_status = SPI_NAND_FLASH_RTN_ALIGNED_CHECK_FAIL;
+ }
+
+ return (rtn_status);
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_erase_block ( u32 block_index)
+{
+
+ u8 status;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ spi_nand_select_die ( (block_index << _SPI_NAND_BLOCK_ROW_ADDRESS_OFFSET) );
+
+ /* 2.2 Enable write_to flash */
+ spi_nand_protocol_write_enable();
+
+ /* 2.3 Erasing one block */
+ spi_nand_protocol_block_erase( block_index );
+
+ /* 2.4 Checking status for erase complete */
+ do{
+ spi_nand_protocol_get_status_reg_3( &status);
+ }while( status & _SPI_NAND_VAL_OIP) ;
+
+ /* 2.5 Disable write_flash */
+ spi_nand_protocol_write_disable();
+
+ /* 2.6 Check Erase Fail Bit */
+ if( status & _SPI_NAND_VAL_ERASE_FAIL )
+ {
+ _SPI_NAND_PRINTF("spi_nand_erase_block : erase block fail, block=0x%x, status=0x%x\n", block_index, status);
+ rtn_status = SPI_NAND_FLASH_RTN_ERASE_FAIL;
+ }
+
+ return rtn_status;
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_erase_internal( u32 addr,
+ * u32 len )
+ * PURPOSE : To erase flash internally.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * len - The size variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/15 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_erase_internal( u32 addr,
+ u32 len )
+{
+ u32 block_index = 0;
+ u32 erase_len = 0;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ unsigned short phy_block_bbt;
+ u32 logical_block, physical_block;
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+#endif
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "\nspi_nand_erase_internal (in): addr=0x%x, len=0x%x\n", addr, len );
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+ SPI_CONTROLLER_CONF_T spi_conf_t;
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ /* 1. Check the address and len must aligned to NAND Flash block size */
+ if( spi_nand_block_aligned_check( addr, len) == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ /* 2. Erase block one by one */
+ while( erase_len < len )
+ {
+ /* 2.1 Caculate Block index */
+ block_index = (addr/(_current_flash_info_t.erase_size));
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ logical_block = block_index;
+ physical_block = get_mapping_block_index(logical_block, &phy_block_bbt);
+ if( physical_block != logical_block)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad Block Mapping, from %d block to %d block\n", logical_block, physical_block);
+ }
+ block_index = physical_block;
+#endif
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_erase_internal: addr=0x%x, len=0x%x, block_idx=0x%x\n", addr, len, block_index );
+
+
+ rtn_status = spi_nand_erase_block(block_index);
+
+ /* 2.6 Check Erase Fail Bit */
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ if (update_bmt(addr,UPDATE_ERASE_FAIL, NULL, NULL))
+ {
+ _SPI_NAND_PRINTF("Erase fail at block: %d, update BMT success\n", addr/(_current_flash_info_t.erase_size));
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("Erase fail at block: %d, update BMT fail\n", addr/(_current_flash_info_t.erase_size));
+ rtn_status = SPI_NAND_FLASH_RTN_ERASE_FAIL;
+ break;
+ }
+#else
+ _SPI_NAND_PRINTF("spi_nand_erase_internal : Erase Fail at addr=0x%x, len=0x%x, block_idx=0x%x\n", addr, len, block_index);
+ rtn_status = SPI_NAND_FLASH_RTN_ERASE_FAIL;
+ break;
+
+#endif
+ }
+
+ /* 2.7 Erase next block if needed */
+ addr += _current_flash_info_t.erase_size;
+ erase_len += _current_flash_info_t.erase_size;
+
+ }
+ }
+ else
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_ALIGNED_CHECK_FAIL;
+ }
+
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return (rtn_status);
+}
+
+
+
+
+static SPI_NAND_FLASH_RTN_T spi_nand_read_page (u32 page_number, SPI_NAND_FLASH_READ_SPEED_MODE_T speed_mode)
+{
+
+ u32 idx=0;
+ u32 i, j;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ struct spi_nand_flash_oobfree *ptr_oob_entry_idx;
+ u16 read_addr;
+ SPI_NFI_MISC_SPEDD_CONTROL_T dma_speed_mode;
+ u32 read_cmd;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ SPI_NFI_CONF_T spi_nfi_conf_t;
+ SPI_ECC_DECODE_CONF_T spi_ecc_decode_conf_t;
+ u32 check_cnt;
+
+ SPI_ECC_DECODE_STATUS_T decode_status_t;
+ SPI_CONTROLLER_CONF_T spi_conf_t;
+ u32 offset1, offset2, offset3, dma_sec_size;
+
+ /* read from read_addr index in the page */
+ read_addr = 0;
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ /* 1. Load Page into cache of NAND Flash Chip */
+ if( spi_nand_load_page_into_cache(page_number) == SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK )
+ {
+ _SPI_NAND_PRINTF("spi_nand_read_page: Bad Block, ECC cannot recovery detecte, page=0x%x\n", page_number);
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+
+ /* 2. Read whole data from cache of NAND Flash Chip */
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_read_page: curren_page_num=0x%x, page_number=0x%x\n", _current_page_num, page_number);
+
+ /* No matter what status, we must read the cache data to dram */
+ if( (_current_page_num != page_number) )
+ {
+ memset(_current_cache_page, 0x0, sizeof(_current_cache_page));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_read_page: before read, _current_cache_page:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], _SPI_NAND_CACHE_SIZE);
+
+ if( ((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE) )
+ {
+ _plane_select_bit = ((page_number >> 6)& (0x1));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"spi_nand_read_page: plane select = 0x%x\n", _plane_select_bit);
+ }
+
+ if( _spi_dma_mode ==1 )
+ {
+ SPI_CONTROLLER_Get_Configure(&spi_conf_t);
+ spi_conf_t.dummy_byte_num = 0 ;
+ spi_conf_t.mode = SPI_CONTROLLER_MODE_DMA;
+ SPI_CONTROLLER_Set_Configure(&spi_conf_t);
+
+ /* Reset NFI statemachie is neccessary */
+ SPI_NFI_Reset();
+
+ SPI_NFI_Get_Configure(&spi_nfi_conf_t);
+ SPI_NFI_Set_Configure(&spi_nfi_conf_t);
+
+ SPI_ECC_Decode_Get_Configure(&spi_ecc_decode_conf_t);
+ SPI_ECC_Decode_Set_Configure(&spi_ecc_decode_conf_t);
+
+ if( spi_nfi_conf_t.hw_ecc_t == SPI_NFI_CON_HW_ECC_Enable )
+ {
+ SPI_ECC_Decode_Get_Configure(&spi_ecc_decode_conf_t);
+
+ if( spi_ecc_decode_conf_t.decode_en == SPI_ECC_DECODE_ENABLE )
+ {
+ (spi_ecc_decode_conf_t.decode_block_size) = (((spi_nfi_conf_t.fdm_ecc_num) + 512)*8) + ((spi_ecc_decode_conf_t.decode_ecc_abiliry) *13);
+
+ SPI_ECC_Decode_Set_Configure(&spi_ecc_decode_conf_t);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_read_page: decode block size=0x%x, ecc_num=0x%x, ecc_ab=0x%x\n", VPint(0xBFA12104), (spi_nfi_conf_t.fdm_ecc_num), (spi_ecc_decode_conf_t.decode_ecc_abiliry));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_read_page : SPI_ECC_Decode_Enable \n");
+
+ SPI_ECC_Decode_Disable();
+ SPI_ECC_Encode_Disable();
+ SPI_ECC_Decode_Enable();
+ }
+ }
+
+ /* Set plane select */
+ if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE)) {
+ if( _plane_select_bit == 0) {
+ read_addr &= ~(0x1000);
+ } else if( _plane_select_bit == 1) {
+ read_addr |= (0x1000);
+ }
+ }
+
+ switch (speed_mode)
+ {
+ case SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE:
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X1;
+ read_cmd = _SPI_NAND_OP_READ_FROM_CACHE_SINGLE;
+ break;
+
+ case SPI_NAND_FLASH_READ_SPEED_MODE_DUAL:
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X2;
+ read_cmd = _SPI_NAND_OP_READ_FROM_CACHE_DUAL;
+ break;
+
+ case SPI_NAND_FLASH_READ_SPEED_MODE_QUAD:
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X4;
+ read_cmd = _SPI_NAND_OP_READ_FROM_CACHE_QUAD;
+ break;
+
+ default:
+ _SPI_NAND_PRINTF("[Error] Read DMA : read speed setting error:%d!\n", speed_mode);
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X1;
+ read_cmd = _SPI_NAND_OP_READ_FROM_CACHE_SINGLE;
+ break;
+ }
+
+ rtn_status = SPI_NFI_Read_SPI_NAND_Page(dma_speed_mode, read_cmd, read_addr, K1_TO_PHY(&dma_read_page[0]));
+
+ if( spi_nfi_conf_t.hw_ecc_t == SPI_NFI_CON_HW_ECC_Enable )
+ {
+ if( spi_ecc_decode_conf_t.decode_en == SPI_ECC_DECODE_ENABLE )
+ {
+ /* Check Decode done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_ECC_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ SPI_ECC_Decode_Check_Done (&decode_status_t);
+
+ if( decode_status_t == SPI_ECC_DECODE_STATUS_DONE )
+ {
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_ECC_DONE_MAX_TIMES)
+ {
+ _SPI_NAND_PRINTF("[Error] Read ECC : Check Decode Done Timeout ! \n");
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ /* return somthing ? */
+ rtn_status = SPI_ECC_DECODE_STATUS_TIMEOUT;
+ }
+
+ if( SPI_ECC_DECODE_Check_Correction_Status() == SPI_ECC_RTN_CORRECTION_ERROR )
+ {
+ _SPI_NAND_PRINTF("[Error] Read ECC : ECC Fail! page:0x%x\n", page_number);
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ /* return somthing ?*/
+ rtn_status = SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK;
+ }
+ }
+ }
+
+ if( ( spi_nfi_conf_t.auto_fdm_t) == SPI_NFI_CON_AUTO_FDM_Disable ) {
+ memcpy(&_current_cache_page[0], K0_TO_K1(&dma_read_page[0]), ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size);
+ } else {
+ memcpy(&_current_cache_page[0], K0_TO_K1(&dma_read_page[0]), ptr_dev_info_t->page_size);
+ }
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+ }
+ else
+ {
+ spi_nand_protocol_read_from_cache(read_addr, ((ptr_dev_info_t->page_size)+(ptr_dev_info_t->oob_size)), &_current_cache_page[0], speed_mode, ptr_dev_info_t->dummy_mode );
+ }
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_read_page: after read, _current_cache_page:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], _SPI_NAND_CACHE_SIZE);
+
+ /* Divide read page into data segment and oob segment */
+ if( _spi_dma_mode ==1 )
+ {
+ if( ( spi_nfi_conf_t.auto_fdm_t) == SPI_NFI_CON_AUTO_FDM_Disable )
+ {
+ offset1 = 0;
+ offset2 = 0;
+ offset3 = 0;
+ dma_sec_size = ((spi_nfi_conf_t.page_size_t)/ (spi_nfi_conf_t.sec_num));
+ for(i =0 ; i< (spi_nfi_conf_t.sec_num) ; i++ )
+ {
+ memcpy( &_current_cache_page_data[offset1], &_current_cache_page[offset2], dma_sec_size );
+ memcpy( &_current_cache_page_oob[offset3], &_current_cache_page[offset2+dma_sec_size], (spi_nfi_conf_t.spare_size_t) );
+ offset1 += dma_sec_size;
+ offset2 += (dma_sec_size+ (spi_nfi_conf_t.spare_size_t));
+ offset3 += (spi_nfi_conf_t.spare_size_t);
+ }
+ }
+ else /* Auto FDM enable : Data and oob alternate , Data inside DRAM , oob inside NFI register */
+ {
+ memcpy( &_current_cache_page_data[0], &_current_cache_page[0], (ptr_dev_info_t->page_size) );
+ SPI_NFI_Read_SPI_NAND_FDM( &_current_cache_page_oob[0], (ptr_dev_info_t->oob_size));
+ }
+
+ memcpy( &_current_cache_page_oob_mapping[0], &_current_cache_page_oob[0], ptr_dev_info_t->oob_size);
+ }
+ else
+ {
+ memcpy( &_current_cache_page_data[0], &_current_cache_page[0], (ptr_dev_info_t->page_size) );
+ memcpy( &_current_cache_page_oob[0], &_current_cache_page[(ptr_dev_info_t->page_size)], (ptr_dev_info_t->oob_size) );
+
+ idx = 0;
+ ptr_oob_entry_idx = (struct spi_nand_flash_oobfree*) &( (ptr_dev_info_t->oob_free_layout)->oobfree );
+
+ if( _ondie_ecc_flag == 1) /* When OnDie ecc is enable, mapping oob area is neccessary */
+ {
+ /* Transter oob area from physical offset into logical offset */
+ for( i=0; (i<SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX) && (ptr_oob_entry_idx[i].len) && (idx< ((ptr_dev_info_t->oob_free_layout)->oobsize)) ; i++)
+ {
+ for(j=0; (j< (ptr_oob_entry_idx[i].len)) && (idx<(ptr_dev_info_t->oob_free_layout->oobsize)) ; j++)
+ {
+ /* _SPI_NAND_PRINTF("i=%d , j=%d, len=%d, idx=%d, size=%d\n", i, j,(ptr_oob_entry_idx[i].len), idx, (ptr_dev_info_t->oob_free_layout->oobsize) ); */
+ _current_cache_page_oob_mapping[idx] = _current_cache_page_oob[(ptr_oob_entry_idx[i].offset)+j];
+ idx++;
+ }
+ }
+ }
+ else
+ {
+ memcpy( &_current_cache_page_oob_mapping[0], &_current_cache_page_oob[0], (ptr_dev_info_t->oob_size) );
+ }
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_read_page: _current_cache_page:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], ((ptr_dev_info_t->page_size)+(ptr_dev_info_t->oob_size)));
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_read_page: _current_cache_page_oob:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page_oob[0], (ptr_dev_info_t->oob_size));
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_read_page: _current_cache_page_oob_mapping:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page_oob_mapping[0], (ptr_dev_info_t->oob_size));
+ }
+ _current_page_num = page_number;
+ }
+
+ return rtn_status;
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_dma_program_load(u32 addr, u32 oob_len, SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+
+ SPI_CONTROLLER_CONF_T spi_conf_t;
+ SPI_NFI_CONF_T spi_nfi_conf_t;
+ SPI_ECC_ENCODE_CONF_T spi_ecc_encode_conf_t;
+ SPI_ECC_DECODE_CONF_T spi_ecc_decode_conf_t;
+ u32 offset1, offset2, offset3, dma_sec_size;
+ u32 i=0;
+ SPI_NAND_FLASH_RTN_T rtn_status;
+ u16 write_addr;
+ u32 write_cmd;
+ SPI_NFI_MISC_SPEDD_CONTROL_T dma_speed_mode;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ /* Set plane select */
+ if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE)) {
+ if( _plane_select_bit == 0) {
+ write_addr &= ~(0x1000);
+ } else if( _plane_select_bit == 1) {
+ write_addr |= (0x1000);
+ }
+ } else {
+ write_addr = addr;
+ }
+
+ switch (speed_mode)
+ {
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE:
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X1;
+ write_cmd = _SPI_NAND_OP_PROGRAM_LOAD_SINGLE;
+ break;
+
+ case SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD:
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X4;
+ write_cmd = _SPI_NAND_OP_PROGRAM_LOAD_QUAD;
+ break;
+
+ default:
+ _SPI_NAND_PRINTF("[Error] Write DMA : write speed setting error:%d!\n", speed_mode);
+ dma_speed_mode = SPI_NFI_MISC_CONTROL_X1;
+ write_cmd = _SPI_NAND_OP_PROGRAM_LOAD_SINGLE;
+ break;
+ }
+
+ SPI_CONTROLLER_Get_Configure(&spi_conf_t);
+ spi_conf_t.dummy_byte_num = 0 ;
+ spi_conf_t.mode = SPI_CONTROLLER_MODE_DMA;
+ SPI_CONTROLLER_Set_Configure(&spi_conf_t);
+
+ /* Reset NFI statemachie is neccessary */
+ SPI_NFI_Reset();
+
+ SPI_NFI_Get_Configure(&spi_nfi_conf_t);
+ SPI_NFI_Set_Configure(&spi_nfi_conf_t);
+
+ SPI_ECC_Encode_Get_Configure(&spi_ecc_encode_conf_t);
+ SPI_ECC_Encode_Set_Configure(&spi_ecc_encode_conf_t);
+
+ if( spi_nfi_conf_t.hw_ecc_t == SPI_NFI_CON_HW_ECC_Enable )
+ {
+ SPI_ECC_Encode_Get_Configure(&spi_ecc_encode_conf_t);
+
+ if( spi_ecc_encode_conf_t.encode_en == SPI_ECC_ENCODE_ENABLE )
+ {
+ (spi_ecc_encode_conf_t.encode_block_size) = ((spi_nfi_conf_t.fdm_ecc_num) + 512) ;
+
+ SPI_ECC_Encode_Set_Configure(&spi_ecc_encode_conf_t);
+
+ SPI_ECC_Encode_Disable();
+ SPI_ECC_Decode_Disable();
+ SPI_ECC_Encode_Enable();
+ }
+ }
+
+ /*Set oob */
+ if( oob_len > 0 ) /* Write OOB */
+ {
+ if( ( spi_nfi_conf_t.auto_fdm_t) == SPI_NFI_CON_AUTO_FDM_Disable ) /* Data and oob alternate */
+ {
+ offset1 = 0;
+ offset2 = 0;
+ offset3 = 0;
+ dma_sec_size = ((spi_nfi_conf_t.page_size_t)/ (spi_nfi_conf_t.sec_num));
+
+ for(i =0 ; i< (spi_nfi_conf_t.sec_num) ; i++ )
+ {
+ memcpy( &_current_cache_page[offset2], &_current_cache_page_data[offset1], dma_sec_size );
+ memcpy( &_current_cache_page[offset2+dma_sec_size], &_current_cache_page_oob[offset3] , (spi_nfi_conf_t.spare_size_t) );
+ offset1 += dma_sec_size;
+ offset2 += (dma_sec_size+ (spi_nfi_conf_t.spare_size_t));
+ offset3 += (spi_nfi_conf_t.fdm_num);
+ }
+
+ /* Set data */
+ memcpy(K0_TO_K1(&dma_write_page[0]), &_current_cache_page[0], ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size);
+ }
+ else /* Data inside DRAM , oob inside NFI register */
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_dma_program_load: set fdm\n");
+ SPI_NFI_Write_SPI_NAND_FDM(&_current_cache_page_oob[0], oob_len);
+
+ /* Set data */
+ memcpy(K0_TO_K1(&dma_write_page[0]), &_current_cache_page[0], ptr_dev_info_t->page_size);
+ }
+ }
+
+ rtn_status = SPI_NFI_Write_SPI_NAND_page(dma_speed_mode, write_cmd, write_addr, K1_TO_PHY(&dma_write_page[0]));
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ return rtn_status;
+}
+
+#ifdef UBIFS_BLANK_PAGE_FIXUP
+UBIFS_BLANK_PAGE_ECC_T check_blank_page(u32 page_number)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u32 block, page_per_block;
+ u8 ecc_parity_0[8] = {0}, i = 0;
+ SPI_NAND_FLASH_DEBUG_LEVEL_T ubiDbgLv = SPI_NAND_FLASH_DEBUG_LEVEL_2;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if (ptr_dev_info_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_WINBOND) {
+ if((ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_W25N01GV) ||
+ (ptr_dev_info_t->dev_id == _SPI_NAND_DEVICE_ID_W25M02GV)) {
+ /* for winbond, if data has been written blank,
+ * the ECC parity is all 0.
+ */
+ memset(ecc_parity_0, 0x0, sizeof(ecc_parity_0));
+
+ for(i = 0; i < 4; i++) {
+ if(memcmp(ecc_parity_0, &_current_cache_page_oob[i * 16 + 8], sizeof(ecc_parity_0)) == 0) {
+ page_per_block = (ptr_dev_info_t->erase_size / ptr_dev_info_t->page_size);
+ block = page_number / page_per_block;
+
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "page 0x%x detected ECC parity 0 at block:0x%x page offset:0x%x.\n", page_number, block, page_number % page_per_block);
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], ((ptr_dev_info_t->page_size)+(ptr_dev_info_t->oob_size)));
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MISMATCH; /* blank sector*/
+ }
+ }
+ }
+
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MATCH; /* Good Page*/
+ } else {
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MATCH; /* Good Page*/
+ }
+}
+
+SPI_NAND_FLASH_RTN_T store_block(u32 block, u8 *block_buf)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ int i;
+ u32 page_per_block;
+ u32 start_page;
+ SPI_NAND_FLASH_RTN_T rtn_status;
+ SPI_NAND_FLASH_DEBUG_LEVEL_T ubiDbgLv = SPI_NAND_FLASH_DEBUG_LEVEL_2;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ page_per_block = (ptr_dev_info_t->erase_size / ptr_dev_info_t->page_size);
+ start_page = block * page_per_block;
+
+ // read all pages in the block
+ for(i = 0; i < page_per_block; i++) {
+ rtn_status = spi_nand_read_page(i + start_page, SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE);
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR) {
+ memcpy(block_buf + i * (ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size),
+ _current_cache_page,
+ ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size);
+
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "store_block: block:0x%x page offset:0x%x\n", block, i);
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], ((ptr_dev_info_t->page_size)+(ptr_dev_info_t->oob_size)));
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("%s: fix blank page 0x%x read error\n",__func__, start_page+i);
+ return rtn_status;
+ }
+ }
+
+ return SPI_NAND_FLASH_RTN_NO_ERROR;
+}
+
+SPI_NAND_FLASH_RTN_T restore_block(u32 block, u8 *block_buf, u32 page_number)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u32 i, j, k, idx;
+ u32 page_per_block;
+ u32 start_page;
+ SPI_NAND_FLASH_RTN_T rtn_status;
+ int isBlankData, isBlankOOB;
+ u8 *page_buf = NULL;
+ struct spi_nand_flash_oobfree *ptr_oob_entry_idx;
+ SPI_NAND_FLASH_DEBUG_LEVEL_T ubiDbgLv = SPI_NAND_FLASH_DEBUG_LEVEL_2;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ page_per_block = (ptr_dev_info_t->erase_size / ptr_dev_info_t->page_size);
+ start_page = block * page_per_block;
+
+ // read all pages in the block
+ for(i = 0; i < page_per_block; i++) {
+ if((i + start_page) == page_number) {
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "restore_block: skip source page block:0x%x page offset:0x%x\n", block, i);
+ continue;
+ }
+
+ page_buf = (block_buf + i * (ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size));
+
+ isBlankData = 1;
+ for(idx = 0; idx < ptr_dev_info_t->page_size; idx++) {
+ if(page_buf[idx] != 0xFF) {
+ isBlankData = 0;
+ break;
+ }
+ }
+
+ if(isBlankData == 1) {
+ isBlankOOB = 1;
+ ptr_oob_entry_idx = (struct spi_nand_flash_oobfree*) &( ptr_dev_info_t->oob_free_layout->oobfree );
+
+ idx = 0;
+ for(k = 0; (k < SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX) && (ptr_oob_entry_idx[k].len) && idx < ptr_dev_info_t->oob_free_layout->oobsize; k++)
+ {
+ for(j = 0; j < ptr_oob_entry_idx[k].len && idx < ptr_dev_info_t->oob_free_layout->oobsize; j++)
+ {
+ if(page_buf[ptr_dev_info_t->page_size + (ptr_oob_entry_idx[k].offset) + j] != 0xFF) {
+ isBlankData = 0;
+ k = SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX;
+ break;
+ }
+ idx++;
+ }
+ }
+ }
+
+ if((isBlankData == 1) && (isBlankOOB == 1)) {
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "restore_block: skip blank page block:0x%x page offset:0x%x\n", block, i);
+ continue;
+ }
+
+ rtn_status = spi_nand_write_page(i + start_page,
+ 0,
+ (block_buf + i * (ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size)),
+ ptr_dev_info_t->page_size,
+ 0,
+ (block_buf + ptr_dev_info_t->page_size + i * (ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size)),
+ ptr_dev_info_t->oob_size,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE);
+
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "restore_block: block:0x%x page offset:0x%x\n", block, i);
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR) {
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "fixed page %x\n", start_page + i);
+ } else {
+ _SPI_NAND_PRINTF("%s: fix_ecc_0 0x%x write error \n",__func__, start_page+i);
+ return rtn_status;
+ }
+ }
+
+ return SPI_NAND_FLASH_RTN_NO_ERROR;
+}
+
+
+UBIFS_BLANK_PAGE_FIXUP_T fix_blank_page(u32 page_number)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u8 *block_buf;
+ u32 page_per_block;
+ u32 block;
+ SPI_NAND_FLASH_RTN_T rtn_status;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ page_per_block = (ptr_dev_info_t->erase_size / ptr_dev_info_t->page_size);
+ block = page_number / page_per_block;
+
+ block_buf = (u8 *) kmalloc((ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size) * page_per_block, GFP_KERNEL);
+ if(!block_buf) {
+ _SPI_NAND_PRINTF("%s:can not allocate buffer\n", __func__);
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_FAIL;
+ }
+ memset(block_buf, 0xff, (ptr_dev_info_t->page_size + ptr_dev_info_t->oob_size) * page_per_block);
+
+ /* store block */
+ rtn_status = store_block(block, block_buf);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ kfree(block_buf);
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_FAIL;
+ }
+
+ /* erase block */
+ rtn_status = spi_nand_erase_block(block);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ kfree(block_buf);
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_FAIL;
+ }
+
+ /* restore block except page_number */
+ rtn_status = restore_block(block, block_buf, page_number);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ kfree(block_buf);
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_FAIL;
+ }
+
+ kfree(block_buf);
+ return SPI_NAND_FLASH_UBIFS_BLANK_PAGE_FIXUP_SUCCESS;
+}
+#endif
+
+static SPI_NAND_FLASH_RTN_T spi_nand_write_page( u32 page_number,
+ u32 data_offset,
+ u8 *ptr_data,
+ u32 data_len,
+ u32 oob_offset,
+ u8 *ptr_oob ,
+ u32 oob_len,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode )
+{
+
+ u8 status, status_2;
+ u32 i=0,j=0,idx=0;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ struct spi_nand_flash_oobfree *ptr_oob_entry_idx;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ u16 write_addr;
+ int data_content, oob_content;
+ SPI_NAND_FLASH_DEBUG_LEVEL_T ubiDbgLv = SPI_NAND_FLASH_DEBUG_LEVEL_2;
+ static int isUbifsBlankPageFix = 0;
+
+ /* write to write_addr index in the page */
+ write_addr = 0;
+
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+#ifdef UBIFS_BLANK_PAGE_FIXUP
+ if(isUbifsBlankPageFix == 0) {
+ /* Read Current page data to software cache buffer */
+ spi_nand_read_page(page_number, speed_mode);
+
+ if(check_blank_page(page_number) == SPI_NAND_FLASH_UBIFS_BLANK_PAGE_ECC_MISMATCH) {
+ isUbifsBlankPageFix = 1;
+ _SPI_NAND_DEBUG_PRINTF(ubiDbgLv, "UBIFS_BLANK_PAGE_FIXUP, page:0x%x\n", page_number);
+ fix_blank_page(page_number);
+ isUbifsBlankPageFix = 0;
+
+ /* Read Current page data to software cache buffer */
+ spi_nand_read_page(page_number, speed_mode);
+ }
+ }
+#else
+ /* Read Current page data to software cache buffer */
+ spi_nand_read_page(page_number, speed_mode);
+#endif
+
+ /* Rewirte the software cahe buffer */
+ if(data_len > 0)
+ {
+ memcpy( &_current_cache_page_data[data_offset], &ptr_data[0], data_len );
+ }
+
+ if( oob_len > 0 ) /* Write OOB */
+ {
+ if( _spi_dma_mode ==1 )
+ {
+ memcpy( &_current_cache_page_oob[0], &ptr_oob[0], oob_len );
+ }
+ else
+ {
+ if(_ondie_ecc_flag == 1) /* When OnDie ecc is enable, mapping oob area is neccessary */
+ {
+ ptr_oob_entry_idx = (struct spi_nand_flash_oobfree*) &( ptr_dev_info_t->oob_free_layout->oobfree );
+
+ for( i=0; (i<SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX) && (ptr_oob_entry_idx[i].len) && ((idx< (ptr_dev_info_t->oob_free_layout->oobsize)) && (idx < oob_len)) ; i++)
+ {
+ for(j=0; (j< (ptr_oob_entry_idx[i].len)) && (idx<(ptr_dev_info_t->oob_free_layout->oobsize)) && ((idx< (ptr_dev_info_t->oob_free_layout->oobsize)) && (idx < oob_len)) ; j++)
+ {
+
+ _current_cache_page_oob[(ptr_oob_entry_idx[i].offset)+j] &= ptr_oob[idx];
+ idx++;
+ }
+ }
+ }
+ else
+ {
+ memcpy( &_current_cache_page_oob[0], &ptr_oob[0], oob_len);
+ }
+ }
+ }
+
+ memcpy( &_current_cache_page[0], &_current_cache_page_data[0], ptr_dev_info_t->page_size );
+ memcpy( &_current_cache_page[ptr_dev_info_t->page_size], &_current_cache_page_oob[0], ptr_dev_info_t->oob_size );
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_write_page: page=0x%x, data_offset=0x%x, date_len=0x%x, oob_offset=0x%x, oob_len=0x%x\n", page_number, data_offset, data_len, oob_offset, oob_len);
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page[0], ((ptr_dev_info_t->page_size) + (ptr_dev_info_t->oob_size)));
+
+ if( ((ptr_dev_info_t->feature) & SPI_NAND_FLASH_PLANE_SELECT_HAVE) )
+ {
+ _plane_select_bit = ((page_number >> 6) & (0x1));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_write_page: _plane_select_bit=0x%x\n", _plane_select_bit );
+
+ }
+
+ spi_nand_select_die ( page_number );
+
+ /* Different Manafacture have different prgoram flow and setting */
+ if( ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) ||
+ ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_XTX) ||
+ ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_FM) )
+ {
+ if( _spi_dma_mode ==1 )
+ {
+ rtn_status = spi_nand_dma_program_load(write_addr, oob_len, speed_mode);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+ return (rtn_status);
+ }
+ }
+ else
+ {
+ spi_nand_protocol_program_load(write_addr, &_current_cache_page[0], ((ptr_dev_info_t->page_size) + (ptr_dev_info_t->oob_size)), speed_mode);
+ }
+
+ /* Enable write_to flash */
+ spi_nand_protocol_write_enable();
+ }
+ else
+ {
+ /* Enable write_to flash */
+ spi_nand_protocol_write_enable();
+
+ if( _spi_dma_mode ==1 )
+ {
+ rtn_status = spi_nand_dma_program_load(write_addr, oob_len, speed_mode);
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+ /* Switch to manual mode*/
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+ return (rtn_status);
+ }
+ }
+ else
+ {
+ /* Proram data into buffer of SPI NAND chip */
+ spi_nand_protocol_program_load(write_addr, &_current_cache_page[0], ((ptr_dev_info_t->page_size) + (ptr_dev_info_t->oob_size)), speed_mode);
+ }
+ }
+
+ /* Execute program data into SPI NAND chip */
+ spi_nand_protocol_program_execute ( page_number );
+
+ /* Checking status for erase complete */
+ do{
+ spi_nand_protocol_get_status_reg_3( &status);
+ }while( status & _SPI_NAND_VAL_OIP) ;
+
+ /*. Disable write_flash */
+ spi_nand_protocol_write_disable();
+
+ spi_nand_protocol_get_status_reg_1( &status_2);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spi_nand_write_page]: status 1 = 0x%x, status 3 =0x%x\n", status_2, status);
+
+ /* Check Program Fail Bit */
+ if( status & _SPI_NAND_VAL_PROGRAM_FAIL )
+ {
+ _SPI_NAND_PRINTF("spi_nand_write_page : Program Fail at addr_offset =0x%x, page_number=0x%x, status=0x%x\n", data_offset, page_number, status);
+ rtn_status = SPI_NAND_FLASH_RTN_PROGRAM_FAIL;
+ }
+
+
+ return (rtn_status);
+
+}
+
+
+
+
+static SPI_NAND_FLASH_RTN_T spi_nand_write_page_internal( u32 page_number,
+ u32 data_offset,
+ u8 *ptr_data,
+ u32 data_len,
+ u32 oob_offset,
+ u8 *ptr_oob ,
+ u32 oob_len,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode )
+{
+
+ u32 addr_offset;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ unsigned short phy_block_bbt;
+ u32 logical_block, physical_block;
+ u32 page_offset_in_block;
+#endif
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spi_nand_write_page_internal]: enter\n");
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ page_offset_in_block = ((page_number * (ptr_dev_info_t->page_size))%(ptr_dev_info_t->erase_size))/(ptr_dev_info_t->page_size);
+ logical_block = ((page_number * (ptr_dev_info_t->page_size))/(ptr_dev_info_t->erase_size)) ;
+ physical_block = get_mapping_block_index(logical_block, &phy_block_bbt);
+
+ if( physical_block != logical_block)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad Block Mapping, from %d block to %d block\n", logical_block, physical_block);
+ }
+
+ page_number = (page_offset_in_block)+((physical_block*(ptr_dev_info_t->erase_size))/(ptr_dev_info_t->page_size));
+#endif
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spi_nand_write_page_internal]: page_number = 0x%x\n", page_number);
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ if(block_is_in_bmt_region(physical_block))
+ {
+ memcpy(ptr_oob + OOB_INDEX_OFFSET, &phy_block_bbt, OOB_INDEX_SIZE);
+ }
+
+ if(_SPI_NAND_TEST_FLAG == 0)
+ {
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spi_nand_write_page_internal: data_offset=0x%x, date_len=0x%x, oob_offset=0x%x, oob_len=0x%x\n", data_offset, data_len, oob_offset, oob_len);
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &ptr_oob[0], oob_len);
+
+ rtn_status = spi_nand_write_page(page_number, addr_offset, ptr_data, data_len, 0, ptr_oob, MAX_USE_OOB_SIZE , speed_mode);
+ }
+ else
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_PROGRAM_FAIL;
+ }
+
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ _SPI_NAND_PRINTF("write fail at page: 0x%x \n", page_number);
+ if (update_bmt(page_number * (ptr_dev_info_t->page_size), UPDATE_WRITE_FAIL, ptr_data, ptr_oob))
+ {
+ _SPI_NAND_PRINTF("Update BMT success\n");
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("Update BMT fail\n");
+ }
+ }
+#else
+ rtn_status = spi_nand_write_page(page_number, addr_offset, ptr_data, data_len, 0, ptr_oob, MAX_USE_OOB_SIZE , speed_mode);
+#endif
+
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return (rtn_status);
+}
+
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_write_internal( u32 dst_addr,
+ * u32 len,
+ * u32 *ptr_rtn_len,
+ * u8* ptr_buf )
+ * PURPOSE : To write flash internally.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : dst_addr - The dst_addr variable of this function.
+ * len - The len variable of this function.
+ * ptr_buf - A pointer to the ptr_buf variable.
+ * OUTPUT: ptr_rtn_len - A pointer to the ptr_rtn_len variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/19 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+ static SPI_NAND_FLASH_RTN_T spi_nand_write_internal( u32 dst_addr,
+ u32 len,
+ u32 *ptr_rtn_len,
+ u8* ptr_buf,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode )
+{
+ u32 remain_len, write_addr, data_len, page_number, physical_dst_addr;
+ u32 addr_offset;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ unsigned short phy_block_bbt;
+ unsigned long addr_offset_in_block;
+ u32 logical_block, physical_block;
+ u8 oob_buf[_SPI_NAND_OOB_SIZE]={0xff};
+#endif
+
+
+ *ptr_rtn_len = 0;
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+ remain_len = len;
+ write_addr = dst_addr;
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_write_internal: remain_len =0x%x\n", remain_len);
+
+ while( remain_len > 0 )
+ {
+ physical_dst_addr = write_addr;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ memset(oob_buf, 0xff, _SPI_NAND_OOB_SIZE);
+ addr_offset_in_block = (write_addr %(ptr_dev_info_t->erase_size) );
+ logical_block = (write_addr / (ptr_dev_info_t->erase_size));
+ physical_block = get_mapping_block_index(logical_block, &phy_block_bbt);
+ physical_dst_addr = (physical_block * (ptr_dev_info_t->erase_size))+ addr_offset_in_block;
+
+ if( physical_block != logical_block)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad Block Mapping, from %d block to %d block\n", logical_block, physical_block);
+ }
+#endif
+
+ /* Caculate page number */
+ addr_offset = (physical_dst_addr % (ptr_dev_info_t->page_size));
+ page_number = (physical_dst_addr / (ptr_dev_info_t->page_size));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "\nspi_nand_write_internal: addr_offset =0x%x, page_number=0x%x, remain_len=0x%x, page_size=0x%x\n", addr_offset, page_number, remain_len,(ptr_dev_info_t->page_size) );
+ if( ((addr_offset + remain_len ) > (ptr_dev_info_t->page_size)) ) /* data cross over than 1 page range */
+ {
+ data_len = ((ptr_dev_info_t->page_size) - addr_offset);
+ }
+ else
+ {
+ data_len = remain_len;
+ }
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ if(block_is_in_bmt_region(physical_block))
+ {
+ memcpy(oob_buf + OOB_INDEX_OFFSET, &phy_block_bbt, OOB_INDEX_SIZE);
+ }
+
+ if(_SPI_NAND_TEST_FLAG == 0)
+ {
+ rtn_status = spi_nand_write_page(page_number, addr_offset, &(ptr_buf[len - remain_len]), data_len, 0, &oob_buf[0], ptr_dev_info_t->oob_size , speed_mode);
+ }
+ else
+ {
+ rtn_status = SPI_NAND_FLASH_RTN_PROGRAM_FAIL;
+ }
+
+ if(rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ _SPI_NAND_PRINTF("write fail at page: %d \n", page_number);
+ if (update_bmt(page_number * (ptr_dev_info_t->page_size), UPDATE_WRITE_FAIL, &(ptr_buf[len - remain_len]), oob_buf))
+ {
+ _SPI_NAND_PRINTF("Update BMT success\n");
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("Update BMT fail\n");
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+ return -1;
+ }
+ }
+#else
+ rtn_status = spi_nand_write_page(page_number, addr_offset, &(ptr_buf[len - remain_len]), data_len, 0, NULL, 0 , speed_mode);
+#endif
+
+ /* 8. Write remain data if neccessary */
+ write_addr += data_len;
+ remain_len -= data_len;
+ ptr_rtn_len += data_len;
+
+ }
+
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return (rtn_status);
+}
+
+static SPI_NAND_FLASH_RTN_T spi_nand_read_page_internal ( u32 page_number,
+ SPI_NAND_FLASH_READ_SPEED_MODE_T speed_mode )
+{
+ u32 logical_block, physical_block;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ unsigned short phy_block_bbt;
+ u32 page_offset_in_block;
+#endif
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ page_offset_in_block = (((page_number * (ptr_dev_info_t->page_size))%(ptr_dev_info_t->erase_size))/ (ptr_dev_info_t->page_size));
+ logical_block = ((page_number * (ptr_dev_info_t->page_size))/(ptr_dev_info_t->erase_size)) ;
+ physical_block = get_mapping_block_index(logical_block, &phy_block_bbt);
+
+ if( physical_block != logical_block)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad Block Mapping, from %d block to %d block\n", logical_block, physical_block);
+ }
+
+ page_number = (page_offset_in_block)+ ((physical_block*(ptr_dev_info_t->erase_size))/(ptr_dev_info_t->page_size));
+#endif
+
+
+ rtn_status = spi_nand_read_page(page_number, speed_mode);
+
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return (rtn_status);
+
+}
+
+
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_read_internal( u32 addr,
+ * u32 len,
+ * u8 *ptr_rtn_buf )
+ * PURPOSE : To read flash internally.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * len - The len variable of this function.
+ * OUTPUT: ptr_rtn_buf - A pointer to the ptr_rtn_buf variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/19 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_read_internal ( unsigned long long addr,
+ u32 len,
+ u8 *ptr_rtn_buf,
+ SPI_NAND_FLASH_READ_SPEED_MODE_T speed_mode )
+{
+ u32 page_number, data_offset;
+ u32 read_addr, physical_read_addr, remain_len, logical_block, physical_block;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ unsigned short phy_block_bbt;
+ unsigned long addr_offset_in_block;
+#endif
+
+ if( (0xbc000000 <= addr) && (addr<= 0xbfffffff) ) /* Reserver address area for system */
+ {
+ if( (addr & 0xbfc00000) == 0xbfc00000)
+ {
+ addr &= 0x003fffff;
+ }
+ else
+ {
+ addr &= 0x03ffffff;
+ }
+ }
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+ read_addr = addr;
+ remain_len = len;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "\nspi_nand_read_internal : addr=0x%lx, len=0x%x\n", addr, len );
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+ while(remain_len > 0)
+ {
+ physical_read_addr = read_addr;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+ addr_offset_in_block = (read_addr %(ptr_dev_info_t->erase_size) );
+ logical_block = (read_addr / (ptr_dev_info_t->erase_size));
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"read_addr=0x%x, erase size =0x%x, logical_block =0x%x\n", read_addr, (ptr_dev_info_t->erase_size), logical_block);
+ physical_block = get_mapping_block_index(logical_block, &phy_block_bbt);
+ physical_read_addr = (physical_block * (ptr_dev_info_t->erase_size))+ addr_offset_in_block;
+
+ if( physical_block != logical_block)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad Block Mapping, from %d block to %d block\n", logical_block, physical_block);
+ }
+#endif
+
+ /* Caculate page number */
+ data_offset = (physical_read_addr % (ptr_dev_info_t->page_size));
+ page_number = (physical_read_addr / (ptr_dev_info_t->page_size));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_read_internal: read_addr=0x%x, page_number=0x%x, data_offset=0x%x\n", physical_read_addr, page_number, data_offset);
+
+ spi_nand_read_page(page_number, speed_mode);
+
+ /* 3. Retrieve the request data */
+ if( (data_offset + remain_len) < ptr_dev_info_t->page_size )
+ {
+ memcpy( &ptr_rtn_buf[len - remain_len], &_current_cache_page_data[data_offset], (sizeof(unsigned char)*remain_len) );
+ remain_len =0;
+
+ }
+ else
+ {
+ memcpy( &ptr_rtn_buf[len - remain_len], &_current_cache_page_data[data_offset], (sizeof(unsigned char)*(ptr_dev_info_t->page_size - data_offset)));
+ remain_len -= (ptr_dev_info_t->page_size - data_offset);
+ read_addr += (ptr_dev_info_t->page_size - data_offset);
+ }
+ }
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return (rtn_status);
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static void spi_nand_manufacute_init( struct SPI_NAND_FLASH_INFO_T *ptr_device_t )
+ * PURPOSE : To init SPI NAND Flash chip
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None.
+ * RETURN : None.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/05/15 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static void spi_nand_manufacute_init( struct SPI_NAND_FLASH_INFO_T *ptr_device_t )
+{
+ unsigned char feature;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"SPI NAND Chip Init : Unlock all block and Enable Quad Mode\n");
+
+ if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UAYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UBYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F1GQ4UCYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UBYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F2GQ4UCYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UBYIG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_GIGADEVICE) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_GD5F4GQ4UCYIG)))
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC1;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_2(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After enable qual mode setup, the status register2 =0x%x\n", feature);
+ }
+ else if((ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_MXIC)
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC1;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_2(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After enable qual mode setup, the status register2 =0x%x\n", feature);
+ }
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_WINBOND)
+ {
+ if(((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE)) {
+ _die_id = 0x00;
+ spi_nand_protocol_die_select_1(_die_id);
+ }
+
+ /* Enable to modify the status regsiter 1 */
+ feature = 0x58;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ /* Unlock all block and Enable Qual mode */
+ feature = 0x81;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ /* Disable to modify the status regsiter 1 */
+ feature = 0x18;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+
+ /* Unlock all block for Die_1 */
+ if(((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE))
+ {
+ _die_id = 0x01;
+ spi_nand_protocol_die_select_1(_die_id);
+
+ /* Enable to modify the status regsiter 1 */
+ feature = 0x58;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ /* Unlock all block and Enable Qual mode */
+ feature = 0x81;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ /* Disable to modify the status regsiter 1 */
+ feature = 0x18;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ }
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+ }
+ else if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ESMT) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_F50L512M41A)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ESMT) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_F50L1G41A0)))
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+ }
+ else if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ESMT) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_F50L1G41LB)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ESMT) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_F50L2G41LB)))
+ {
+ if(((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE))
+ {
+ _die_id = 0x00;
+ spi_nand_protocol_die_select_1(_die_id);
+ }
+
+ /* 1. Unlock All block */
+ feature = 0x83;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+
+ /* Unlock all block for Die_1 */
+ if(((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE))
+ {
+ _die_id = 0x01;
+ spi_nand_protocol_die_select_1(_die_id);
+
+ /* 1. Unlock All block */
+ feature = 0x83;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the die %d status register1 =0x%x\n", _die_id, feature);
+ }
+ }
+ else if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ZENTEL) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_A5U12A21ASC)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_ZENTEL) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_A5U1GA21BWS)))
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+ }
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_ETRON)
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC1;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_2(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After enable qual mode setup, the status register2 =0x%x\n", feature);
+ }
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_TOSHIBA)
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"After Unlock all block setup, the status register1 =0x%x\n", feature);
+ }
+
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_MICRON)
+ {
+ if(((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_2_HAVE)) {
+ _die_id = 0x00;
+ spi_nand_protocol_die_select_2(_die_id);
+ }
+
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0x83;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+ /* Unlock all block for Die_1 */
+ if( ((ptr_device_t->feature) & SPI_NAND_FLASH_DIE_SELECT_2_HAVE) )
+ {
+ _die_id = 0x01;
+ spi_nand_protocol_die_select_2(_die_id);
+
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0x83;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"After Unlock all block setup, the die %d status register1 =0x%x\n", _die_id, feature);
+ }
+ }
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_HEYANG)
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+ }
+ else if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_XTX) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_PN26G01AWSIUG)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_XTX) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_PN26G02AWSIUG)))
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+ }
+ else if( (ptr_device_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_ATO)
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+ }
+ else if(((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_FM) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_FM25G01B)) ||
+ ((ptr_device_t->mfr_id == _SPI_NAND_MANUFACTURER_ID_FM) && (ptr_device_t->dev_id == _SPI_NAND_DEVICE_ID_FM25G02B)))
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC7;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+ }
+ else
+ {
+ /* 1. Unlock All block */
+ spi_nand_protocol_get_status_reg_1(&feature);
+ feature &= 0xC1;
+ spi_nand_protocol_set_status_reg_1(feature);
+
+ spi_nand_protocol_get_status_reg_1(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After Unlock all block setup, the status register1 =0x%x\n", feature);
+
+
+ /* 2. Enable Qual mode */
+ spi_nand_protocol_get_status_reg_2(&feature);
+ feature |= 0x1;
+ spi_nand_protocol_set_status_reg_2(feature);
+
+ spi_nand_protocol_get_status_reg_2(&feature);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "After enable qual mode setup, the status register2 =0x%x\n", feature);
+ }
+
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: static SPI_NAND_FLASH_RTN_T spi_nand_probe( struct SPI_NAND_FLASH_INFO_T *ptr_rtn_device_t )
+ * PURPOSE : To probe SPI NAND flash id.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: rtn_index - The rtn_index variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+static SPI_NAND_FLASH_RTN_T spi_nand_probe( struct SPI_NAND_FLASH_INFO_T * ptr_rtn_device_t )
+{
+ u32 i=0;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_PROBE_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_probe: start \n");
+
+
+ /* Protocol for read id */
+ _SPI_NAND_SEMAPHORE_LOCK();
+ spi_nand_protocol_read_id( (struct SPI_NAND_FLASH_INFO_T *)ptr_rtn_device_t );
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ for ( i=0 ; i < (sizeof(spi_nand_flash_tables)/sizeof(struct SPI_NAND_FLASH_INFO_T)) ; i++)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"spi_nand_probe: table[%d]: mfr_id=0x%x, dev_id=0x%x\n", i, spi_nand_flash_tables[i].mfr_id, spi_nand_flash_tables[i].dev_id );
+
+ if ( ( (ptr_rtn_device_t->mfr_id) == spi_nand_flash_tables[i].mfr_id) &&
+ ( (ptr_rtn_device_t->dev_id) == spi_nand_flash_tables[i].dev_id) )
+ {
+ ptr_rtn_device_t->device_size = spi_nand_flash_tables[i].device_size;
+ ptr_rtn_device_t->erase_size = spi_nand_flash_tables[i].erase_size;
+ ptr_rtn_device_t->page_size = spi_nand_flash_tables[i].page_size;
+ ptr_rtn_device_t->oob_size = spi_nand_flash_tables[i].oob_size;
+ ptr_rtn_device_t->dummy_mode = spi_nand_flash_tables[i].dummy_mode;
+ ptr_rtn_device_t->read_mode = spi_nand_flash_tables[i].read_mode;
+ ptr_rtn_device_t->write_mode = spi_nand_flash_tables[i].write_mode;
+ memcpy( &(ptr_rtn_device_t->ptr_name) , &(spi_nand_flash_tables[i].ptr_name), sizeof(ptr_rtn_device_t->ptr_name));
+ if(isSpiNandAndCtrlECC) {
+ if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_64BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_64), sizeof(ptr_rtn_device_t->oob_free_layout));
+ } else if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_128BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_128), sizeof(ptr_rtn_device_t->oob_free_layout));
+ } else if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_256BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_256), sizeof(ptr_rtn_device_t->oob_free_layout));
+ }
+ } else {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(spi_nand_flash_tables[i].oob_free_layout), sizeof(ptr_rtn_device_t->oob_free_layout));
+ }
+ ptr_rtn_device_t->feature = spi_nand_flash_tables[i].feature;
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ break;
+ }
+ }
+
+ if ( rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR )
+ {
+ /* Another protocol for read id (For example, the GigaDevice SPI NADN chip for Type C */
+ _SPI_NAND_SEMAPHORE_LOCK();
+ spi_nand_protocol_read_id_2( (struct SPI_NAND_FLASH_INFO_T *)ptr_rtn_device_t );
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ for ( i=0 ; i < (sizeof(spi_nand_flash_tables)/sizeof(struct SPI_NAND_FLASH_INFO_T)) ; i++)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"spi_nand_probe: table[%d]: mfr_id=0x%x, dev_id=0x%x\n", i, spi_nand_flash_tables[i].mfr_id, spi_nand_flash_tables[i].dev_id );
+
+ if ( ( (ptr_rtn_device_t->mfr_id) == spi_nand_flash_tables[i].mfr_id) &&
+ ( (ptr_rtn_device_t->dev_id) == spi_nand_flash_tables[i].dev_id) )
+ {
+ ptr_rtn_device_t->device_size = spi_nand_flash_tables[i].device_size;
+ ptr_rtn_device_t->erase_size = spi_nand_flash_tables[i].erase_size;
+ ptr_rtn_device_t->page_size = spi_nand_flash_tables[i].page_size;
+ ptr_rtn_device_t->oob_size = spi_nand_flash_tables[i].oob_size;
+ ptr_rtn_device_t->dummy_mode = spi_nand_flash_tables[i].dummy_mode;
+ ptr_rtn_device_t->read_mode = spi_nand_flash_tables[i].read_mode;
+ ptr_rtn_device_t->write_mode = spi_nand_flash_tables[i].write_mode;
+ memcpy( &(ptr_rtn_device_t->ptr_name) , &(spi_nand_flash_tables[i].ptr_name), sizeof(ptr_rtn_device_t->ptr_name));
+ if(isSpiNandAndCtrlECC) {
+ if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_64BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_64), sizeof(ptr_rtn_device_t->oob_free_layout));
+ } else if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_128BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_128), sizeof(ptr_rtn_device_t->oob_free_layout));
+ } else if(ptr_rtn_device_t->oob_size == _SPI_NAND_OOB_SIZE_256BYTE) {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(ooblayout_spi_controller_ecc_256), sizeof(ptr_rtn_device_t->oob_free_layout));
+ }
+ } else {
+ memcpy( &(ptr_rtn_device_t->oob_free_layout) , &(spi_nand_flash_tables[i].oob_free_layout), sizeof(ptr_rtn_device_t->oob_free_layout));
+ }
+ ptr_rtn_device_t->feature = spi_nand_flash_tables[i].feature;
+
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ break;
+ }
+ }
+ }
+
+ _SPI_NAND_PRINTF("spi_nand_probe: mfr_id=0x%x, dev_id=0x%x\n", ptr_rtn_device_t->mfr_id, ptr_rtn_device_t->dev_id);
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ _SPI_NAND_SEMAPHORE_LOCK();
+ spi_nand_manufacute_init(ptr_rtn_device_t);
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ if((ptr_rtn_device_t->write_mode == SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD) ||
+ (ptr_rtn_device_t->read_mode == SPI_NAND_FLASH_READ_SPEED_MODE_QUAD)) {
+ VPint(IOMUX_CONTROL1) |= (1 << 19);
+ VPint(IOMUX_CONTROL1) &= ~((1 << 18) | (1 << 11) | (1 << 8) | (1 << 7) | (1 << 3));
+ }
+ }
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spi_nand_probe: end \n");
+
+ return (rtn_status);
+}
+
+/* EXPORTED SUBPROGRAM BODIES -------------------------------------------------------- */
+
+
+u32 reservearea_size = 0;
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+#define BMT_BAD_BLOCK_INDEX_OFFSET (1)
+#define POOL_GOOD_BLOCK_PERCENT 8/100
+#define MAX_BMT_SIZE_PERCENTAGE 1/10
+
+#if defined(TCSUPPORT_CT_PON)
+#define MAX_BMT_SIZE_PERCENTAGE_CT 1/8
+#endif
+
+extern int nand_flash_avalable_size;
+
+
+int en7512_nand_exec_read_page(u32 page, u8* date, u8* oob)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ rtn_status = spi_nand_read_page(page, ptr_dev_info_t->read_mode);
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ /* Get data segment and oob segment */
+ memcpy( date, &_current_cache_page_data[0], ptr_dev_info_t->page_size );
+ memcpy( oob, &_current_cache_page_oob_mapping[0], ptr_dev_info_t->oob_size );
+
+ return 0;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF( "en7512_nand_exec_read_page: read error, page=0x%x\n", page);
+ return -1;
+ }
+}
+int en7512_nand_check_block_bad(u32 offset, u32 bmt_block)
+{
+ u32 page_number;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u8 def_bad_block_ECC[28];
+ unsigned long spinand_spinlock_flags;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ SPI_NFI_CONF_T spi_nfi_conf_t;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if( (0xbc000000 <= offset) && (offset<= 0xbfffffff)) /* Reserver address area for system */
+ {
+ if( (offset & 0xbfc00000) == 0xbfc00000)
+ {
+ offset &= 0x003fffff;
+ }
+ else
+ {
+ offset &= 0x03ffffff;
+ }
+ }
+
+ /* Caculate page number */
+ page_number = (offset / (ptr_dev_info_t->page_size));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "en7512_nand_check_block_bad: read_addr=0x%x, page_number=0x%x\n", offset, page_number);
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ rtn_status = spi_nand_read_page(page_number, ptr_dev_info_t->read_mode);
+
+ if (rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "en7512_nand_check_block_bad return error, block:%d\n", offset/ptr_dev_info_t->erase_size);
+ return 1;
+ }
+
+ if(bmt_block){
+ if(_current_cache_page_oob_mapping[BMT_BAD_BLOCK_INDEX_OFFSET] != 0xff){
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad block detected at page_addr 0x%x, oob_buf[%d] is 0x%x\n", page_number, BMT_BAD_BLOCK_INDEX_OFFSET,_current_cache_page_oob_mapping[BMT_BAD_BLOCK_INDEX_OFFSET]);
+ return 1;
+ }
+ }
+ else
+ {
+ if(_current_cache_page_oob_mapping[0] != 0xff)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "Bad block detected at page_addr 0x%x, oob_buf[0] is 0x%x\n", page_number, _current_cache_page_oob_mapping[0]);
+ return 1;
+ }
+ }
+
+ if(_spi_dma_mode == 1) {
+ /* This is for check default bad block.
+ * When DMA read with all ECC parity equal to 0xFF,
+ * this will not generate read ECC error. So, it must
+ * close DMA to check first OOB byte.
+ */
+ SPI_NAND_Flash_Set_DmaMode(0);
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+ rtn_status = spi_nand_read_page(page_number, ptr_dev_info_t->read_mode);
+ SPI_NAND_Flash_Set_DmaMode(1);
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ if (rtn_status != SPI_NAND_FLASH_RTN_NO_ERROR) {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "en7512_nand_check_block_bad return error, block:%d\n", offset/ptr_dev_info_t->erase_size);
+ SPI_NAND_Flash_Set_DmaMode(1);
+ return 1;
+ }
+
+ memset(def_bad_block_ECC, 0xFF, sizeof(def_bad_block_ECC));
+ SPI_NFI_Get_Configure(&spi_nfi_conf_t);
+
+ if(memcmp(def_bad_block_ECC, &(_current_cache_page_oob_mapping[8]), (spi_nfi_conf_t.spare_size_t - spi_nfi_conf_t.fdm_num)) == 0) {
+ if(bmt_block) {
+ if(_current_cache_page_oob_mapping[BMT_BAD_BLOCK_INDEX_OFFSET] != 0xff) {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "DMA close, Bad block detected at page_addr 0x%x, oob_buf[%d] is 0x%x\n", page_number, BMT_BAD_BLOCK_INDEX_OFFSET,_current_cache_page_oob_mapping[BMT_BAD_BLOCK_INDEX_OFFSET]);
+ return 1;
+ }
+ } else {
+ if(_current_cache_page_oob_mapping[0] != 0xff) {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "DMA close, Bad block detected at page_addr 0x%x, oob_buf[0] is 0x%x\n", page_number, _current_cache_page_oob_mapping[0]);
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0; /* Good Block*/
+}
+int en7512_nand_erase(u32 offset)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "en7512_nand_erase: offset =0x%x, erase_size=0x%x\n", offset, (ptr_dev_info_t->erase_size));
+
+ SPI_NAND_Flash_Clear_Read_Cache_Data();
+
+ rtn_status = spi_nand_erase_block((offset / (ptr_dev_info_t->erase_size)));
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("en7512_nand_erase : Fail \n");
+ return -1;
+ }
+}
+int en7512_nand_mark_badblock(u32 offset, u32 bmt_block)
+{
+
+ u32 page_number;
+ u8 buf[8];
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ unsigned long spinand_spinlock_flags;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ /* Caculate page number */
+ page_number = (offset / (ptr_dev_info_t->page_size));
+
+
+ memset(buf, 0xFF, 8);
+ if(bmt_block)
+ {
+ buf[BMT_BAD_BLOCK_INDEX_OFFSET] = 0;
+ }
+ else
+ {
+ buf[0] = 0;
+ }
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "en7512_nand_mark_badblock: buf info:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &buf[0], 8);
+
+ _SPI_NAND_PRINTF("en7512_nand_mark_badblock: page_num=0x%x\n", page_number);
+
+ rtn_status = spi_nand_write_page(page_number, 0, NULL, 0, 0, &buf[0], 8, ptr_dev_info_t->write_mode);
+
+ if( rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+
+
+}
+int en7512_nand_exec_write_page(u32 page, u8 *dat, u8 *oob)
+{
+
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ unsigned long spinand_spinlock_flags;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "en7512_nand_exec_write_page: page=0x%x\n", page);
+
+ rtn_status = spi_nand_write_page(page, 0, dat, ptr_dev_info_t->page_size, 0, oob, ptr_dev_info_t->oob_size , ptr_dev_info_t->write_mode);
+
+ if( rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+u32 maximum_bmt_block_count = 0;
+int calc_bmt_pool_size(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ int chip_size = nand->chipsize;
+ int block_size = 1 << nand->phys_erase_shift;
+ int total_block = chip_size / block_size;
+ int last_block = total_block - 1;
+
+ u16 valid_block_num = 0;
+ u16 need_valid_block_num = total_block * POOL_GOOD_BLOCK_PERCENT;
+
+ _SPI_NAND_PRINTF("calc_bmt_pool_size : need_valid_block_num=0x%x, total_block=0x%x\n",need_valid_block_num, total_block);
+
+ maximum_bmt_block_count = total_block * MAX_BMT_SIZE_PERCENTAGE;
+
+#if defined(TCSUPPORT_CT_PON)
+ maximum_bmt_block_count = total_block * MAX_BMT_SIZE_PERCENTAGE_CT;
+#endif
+
+ for(;last_block > 0; --last_block)
+ {
+ if(en7512_nand_check_block_bad(last_block * block_size, BAD_BLOCK_RAW))
+ {
+ continue;
+ }
+ else
+ {
+ valid_block_num++;
+ if(valid_block_num == need_valid_block_num)
+ {
+ break;
+ }
+ }
+ }
+
+ return (total_block - last_block);
+}
+
+
+
+#endif
+
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Init( long rom_base )
+ * PURPOSE : To provide interface for SPI NAND init.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : rom_base - The rom_base variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Init(u32 rom_base)
+{
+ unsigned long val;
+ SPI_NFI_CONF_T spi_nfi_conf_t;
+ SPI_ECC_ENCODE_CONF_T encode_conf_t;
+ SPI_ECC_DECODE_CONF_T decode_conf_t;
+ int sec_num;
+
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_PROBE_ERROR;
+
+ /* 1. set SFC Clock to 50MHZ */
+ spi_nand_set_clock_speed(50);
+
+ /* 2. Enable Manual Mode */
+ _SPI_NAND_ENABLE_MANUAL_MODE();
+
+ _SPI_NAND_PRINTF("SPI_NAND_Flash_init: before init, mtx_mode_tog=0xl%x, manaul_en=0x%lx\n", VPint(0xbfa10014), VPint(0xbfa10020) );
+
+ /* 3. Probe flash information */
+ if ( spi_nand_probe( &_current_flash_info_t) != SPI_NAND_FLASH_RTN_NO_ERROR )
+ {
+ _SPI_NAND_PRINTF("SPI NAND Flash Detected Error !\n");
+ }
+ else
+ {
+ /* for 32bytes alignment */
+ dma_read_page = tmp_dma_read_page + (CACHE_LINE_SIZE - (((u32)tmp_dma_read_page) % CACHE_LINE_SIZE));
+ dma_write_page = tmp_dma_write_page + (CACHE_LINE_SIZE - (((u32)tmp_dma_write_page) % CACHE_LINE_SIZE));
+ /* flush cache_page */
+ dma_cache_inv(dma_read_page, _SPI_NAND_CACHE_SIZE);
+ dma_cache_inv(dma_write_page, _SPI_NAND_CACHE_SIZE);
+
+ if(isSpiNandAndCtrlECC) {
+ /* Init DMA */
+ SPI_NFI_Init();
+ SPI_NFI_Get_Configure(&spi_nfi_conf_t);
+ spi_nfi_conf_t.auto_fdm_t = SPI_NFI_CON_AUTO_FDM_Enable;
+ spi_nfi_conf_t.hw_ecc_t = SPI_NFI_CON_HW_ECC_Enable;
+ spi_nfi_conf_t.dma_burst_t = SPI_NFI_CON_DMA_BURST_Enable;
+ spi_nfi_conf_t.fdm_num = 8;
+ spi_nfi_conf_t.fdm_ecc_num = 8;
+ spi_nfi_conf_t.spare_size_t = SPI_NFI_CONF_SPARE_SIZE_16BYTE;
+ spi_nfi_conf_t.page_size_t = _current_flash_info_t.page_size;
+ if(_current_flash_info_t.page_size == _SPI_NAND_PAGE_SIZE_2KBYTE) {
+ sec_num = 4;
+ } else if(_current_flash_info_t.page_size == _SPI_NAND_PAGE_SIZE_4KBYTE) {
+ sec_num = 8;
+ } else {
+ sec_num = 1;
+ }
+ spi_nfi_conf_t.sec_num = sec_num;
+ spi_nfi_conf_t.cus_sec_size_en_t = SPI_NFI_CONF_CUS_SEC_SIZE_Disable;
+ spi_nfi_conf_t.sec_size = 0;
+ SPI_NFI_Set_Configure(&spi_nfi_conf_t);
+
+ /* Set controller to DMA mode */
+ SPI_NAND_Flash_Set_DmaMode(1);
+
+ /* Init Decode, Encode */
+ SPI_ECC_Encode_Init();
+ SPI_ECC_Decode_Init();
+
+ /* Setup Encode */
+ SPI_ECC_Encode_Get_Configure(&encode_conf_t);
+ encode_conf_t.encode_en = SPI_ECC_ENCODE_ENABLE;
+ encode_conf_t.encode_ecc_abiliry = SPI_ECC_ENCODE_ABILITY_4BITS;
+ encode_conf_t.encode_block_size = 512 + spi_nfi_conf_t.fdm_ecc_num;
+ SPI_ECC_Encode_Set_Configure(&encode_conf_t);
+
+ /* Setup Decode */
+ SPI_ECC_Decode_Get_Configure(&decode_conf_t);
+ decode_conf_t.decode_en = SPI_ECC_DECODE_ENABLE;
+ decode_conf_t.decode_ecc_abiliry = SPI_ECC_DECODE_ABILITY_4BITS;
+ decode_conf_t.decode_block_size = (((spi_nfi_conf_t.fdm_ecc_num) + 512) * 8) + ((decode_conf_t.decode_ecc_abiliry) * 13);
+ SPI_ECC_Decode_Set_Configure(&decode_conf_t);
+ }
+ _SPI_NAND_PRINTF("Detected SPI NAND Flash : %s, Flash Size=0x%x\n", _current_flash_info_t.ptr_name, _current_flash_info_t.device_size);
+ rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ }
+
+ return (rtn_status);
+}
+
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Get_Flash_Info( struct SPI_NAND_FLASH_INFO_T *ptr_rtn_into_t )
+ * PURPOSE : To get system current flash info.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_into_t - A pointer to the structure of the ptr_rtn_into_t variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/14 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Get_Flash_Info( struct SPI_NAND_FLASH_INFO_T *ptr_rtn_into_t)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ memcpy( ptr_rtn_into_t, ptr_dev_info_t, sizeof(struct SPI_NAND_FLASH_INFO_T) );
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Write_Nbyte( u32 dst_addr,
+ * u32 len,
+ * u32 *ptr_rtn_len,
+ * u8* ptr_buf )
+ * PURPOSE : To provide interface for Write N Bytes into SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : dst_addr - The dst_addr variable of this function.
+ * len - The len variable of this function.
+ * buf - The buf variable of this function.
+ * OUTPUT: rtn_len - The rtn_len variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/15 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Write_Nbyte( u32 dst_addr,
+ u32 len,
+ u32 *ptr_rtn_len,
+ u8 *ptr_buf,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_node )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ rtn_status = spi_nand_write_internal(dst_addr, len, ptr_rtn_len, ptr_buf, speed_node);
+
+ *ptr_rtn_len = len ; /* Chuck Kuo, tmp modify */
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: int SPI_NAND_Flash_Read_NByte( long addr,
+ * long len,
+ * long *retlen,
+ * char *buf )
+ * PURPOSE : To provide interface for Read N Bytes from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * len - The len variable of this function.
+ * retlen - The retlen variable of this function.
+ * buf - The buf variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+u32 SPI_NAND_Flash_Read_NByte( unsigned long long addr,
+ u32 len,
+ u32 *retlen,
+ u8 *buf,
+ SPI_NAND_FLASH_READ_SPEED_MODE_T speed_mode)
+{
+
+ return spi_nand_read_internal(addr, len, buf, speed_mode);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Erase( u32 dst_addr,
+ * u32 len )
+ * PURPOSE : To provide interface for Erase SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : dst_addr - The dst_addr variable of this function.
+ * len - The len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Erase( u32 dst_addr,
+ u32 len )
+{
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ rtn_status = spi_nand_erase_internal(dst_addr, len);
+
+ return (rtn_status);
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: char SPI_NAND_Flash_Read_Byte( long addr )
+ * PURPOSE : To provide interface for read 1 Bytes from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+unsigned char SPI_NAND_Flash_Read_Byte(unsigned long long addr)
+{
+ unsigned char ch;
+ size_t retlen;
+ int ret=0;
+
+ ret = spi_nand_mtd->_read(spi_nand_mtd, (loff_t)addr, 1, &retlen, &ch);
+ if(ret != 0)
+ {
+ _SPI_NAND_PRINTF("Error : SPI_NAND_Flash_Read_Byte , read 0x%llx error\n", addr);
+ return -1;
+ }
+
+ return ch;
+
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: long SPI_NAND_Flash_Read_DWord( long addr )
+ * PURPOSE : To provide interface for read Double Word from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+unsigned long SPI_NAND_Flash_Read_DWord(unsigned long long addr)
+{
+
+ unsigned long dword;
+ size_t retlen;
+ int ret = 0;
+
+ ret = spi_nand_mtd->_read( spi_nand_mtd, (loff_t)addr, 4, &retlen, &dword);
+
+ if (ret != 0)
+ {
+ _SPI_NAND_PRINTF("Error : SPI_NAND_Flash_Read_DWord , read 0x%llx error\n", addr);
+ return -1;
+ }
+
+ return dword;
+
+
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_ENABLE( void )
+ * PURPOSE : To enable to printf debug message of SPI NAND driver.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_DEBUG_ENABLE( void )
+{
+ _SPI_NAND_DEBUG_FLAG = 1;
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_DISABLE( void )
+ * PURPOSE : To disable to printf debug message of SPI NAND driver.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_DEBUG_DISABLE( void )
+{
+ _SPI_NAND_DEBUG_FLAG = 0;
+}
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_Flash_Clear_Read_Cache_Data( void )
+ * PURPOSE : To clear the cache data for read.
+ * (The next time to read data will get data from flash chip certainly.)
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/21 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_Flash_Clear_Read_Cache_Data( void )
+{
+ _current_page_num = 0xFFFFFFFF;
+}
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/***** Modify for SPI NAND linux kernel driver below *******************************/
+/***********************************************************************************/
+/***********************************************************************************/
+/***********************************************************************************/
+
+
+/* NAND driver */
+int ra_nand_init(void)
+{
+ return 0;
+}
+
+void ra_nand_remove(void)
+{
+
+}
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/* Porting */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+#define LINUX_DEBUG_FLAG 0
+
+/* feature/ status reg */
+#define REG_BLOCK_LOCK 0xa0
+#define REG_OTP 0xb0
+#define REG_STATUS 0xc0/* timing */
+
+/* status */
+#define STATUS_OIP_MASK 0x01
+#define STATUS_READY (0 << 0)
+#define STATUS_BUSY (1 << 0)
+
+#define STATUS_E_FAIL_MASK 0x04
+#define STATUS_E_FAIL (1 << 2)
+
+#define STATUS_P_FAIL_MASK 0x08
+#define STATUS_P_FAIL (1 << 3)
+
+#define STATUS_ECC_MASK 0x30
+#define STATUS_ECC_1BIT_CORRECTED (1 << 4)
+#define STATUS_ECC_ERROR (2 << 4)
+#define STATUS_ECC_RESERVED (3 << 4)
+
+/*ECC enable defines*/
+#define OTP_ECC_MASK 0x10
+#define OTP_ECC_OFF 0
+#define OTP_ECC_ON 1
+
+#define ECC_DISABLED
+#define ECC_IN_NAND
+#define ECC_SOFT
+
+#define SPI_NAND_PROCNAME "driver/spi_nand_debug"
+#define SPI_NAND_TEST "driver/spi_nand_test"
+
+
+
+
+#define BUFSIZE (2 * 2048)
+#define CACHE_BUF 2112
+
+
+#define CONFIG_MTD_SPINAND_ONDIEECC 1
+
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+static int enable_hw_ecc;
+static int enable_read_hw_ecc;
+
+#if 0
+static struct nand_ecclayout spinand_oob_64 = {
+ .eccbytes = 16,
+ .eccpos = {
+ 12,13,14,15,
+ 28,29,30,31,
+ 44,45,46,47,
+ 60,61,62,63 },
+ .oobavail = 48,
+ .oobfree = {
+ {.offset = 2,
+ .length = 10},
+ {.offset = 16,
+ .length = 12},
+ {.offset = 32,
+ .length = 12},
+ {.offset = 48,
+ .length = 12},
+ }
+};
+#endif
+static struct nand_ecclayout spinand_oob_64 = {
+ .eccbytes = 0,
+ .eccpos = {},
+ .oobavail = MAX_LINUX_USE_OOB_SIZE,
+ .oobfree = {
+ {.offset = 0,
+ .length = MAX_LINUX_USE_OOB_SIZE}
+ }
+};
+
+
+#endif
+
+static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+ struct spinand_state *state = (struct spinand_state *)info->priv;
+
+ return state;
+}
+
+
+/*
+ * spinand_read_id- Read SPI Nand ID
+ * Description:
+ * Read ID: read two ID bytes from the SPI Nand device
+ */
+static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
+{
+ struct _SPI_NAND_FLASH_ID_T flash_id;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+
+ (id[0])= ptr_dev_info_t->mfr_id;
+ (id[1])= ptr_dev_info_t->dev_id;
+#if 0
+ _SPI_NAND_SEMAPHORE_LOCK();
+ spi_nand_protocol_read_id(&flash_id);
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ (id[0])= flash_id.mfr_id;
+ (id[1])= flash_id.dev_id;
+#endif
+
+ return 0;
+}
+
+void SPI_NAND_Flash_Set_DmaMode( u32 input )
+{
+ _spi_dma_mode = input;
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "SPI_NAND_Flash_Set_DmaMode : dma_mode =%d\n", _spi_dma_mode);
+}
+
+void SPI_NAND_Flash_Get_DmaMode( u32 *val )
+{
+ *val = _spi_dma_mode;
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "SPI_NAND_Flash_Get_DmaMode : dma_mode =%d\n", _spi_dma_mode);
+}
+
+/*
+ * spinand_read_status- send command 0xf to the SPI Nand status register
+ * Description:
+ * After read, write, or erase, the Nand device is expected to set the
+ * busy status.
+ * This function is to allow reading the status of the command: read,
+ * write, and erase.
+ * Once the status turns to be ready, the other status bits also are
+ * valid status bits.
+ */
+static int spinand_read_status(struct spi_device *spi_nand, uint8_t *status)
+{
+ return spi_nand_protocol_get_status_reg_3(status);
+}
+
+#define MAX_WAIT_JIFFIES (40 * HZ)
+static int wait_till_ready(struct spi_device *spi_nand)
+{
+ unsigned long deadline;
+ int retval;
+ u8 stat = 0;
+
+ deadline = jiffies + MAX_WAIT_JIFFIES;
+ do {
+ retval = spinand_read_status(spi_nand, &stat);
+ if (retval < 0)
+ return -1;
+ else if (!(stat & 0x1))
+ break;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ if ((stat & 0x1) == 0)
+ return 0;
+
+ return -1;
+}
+/**
+ * spinand_get_otp- send command 0xf to read the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if( ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_XTX) ||
+ ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_FM) )
+ {
+ return spi_nand_protocol_get_feature(_SPI_NAND_ADDR_ECC, otp);
+ }
+ else
+ {
+ return spi_nand_protocol_get_status_reg_2(otp);
+ }
+}
+
+/**
+ * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
+{
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if( ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_XTX) ||
+ ((ptr_dev_info_t->mfr_id) == _SPI_NAND_MANUFACTURER_ID_FM) )
+ {
+ return spi_nand_protocol_set_feature(_SPI_NAND_ADDR_ECC, *otp);
+ }
+ else
+ {
+ return spi_nand_protocol_set_status_reg_2(*otp);
+ }
+}
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+/**
+ * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register
+ * Description:
+ * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
+ * Enable chip internal ECC, set the bit to 1
+ * Disable chip internal ECC, clear the bit to 0
+ */
+static int spinand_enable_ecc(struct spi_device *spi_nand)
+{
+ int retval;
+ u8 otp = 0;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;u8 die_num;
+ int i;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE)) {
+ die_num = (ptr_dev_info_t->device_size / ptr_dev_info_t->page_size) >> 16;
+
+ for(i = 0; i < die_num; i++) {
+ spi_nand_protocol_die_select_1(i);
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ continue;
+ }
+ otp |= OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ }
+ } else if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_2_HAVE)) {
+ die_num = (ptr_dev_info_t->device_size / ptr_dev_info_t->page_size) >> 17;
+
+ for(i = 0; i < die_num; i++) {
+ spi_nand_protocol_die_select_2(i);
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ continue;
+ }
+ otp |= OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ }
+ } else {
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ _ondie_ecc_flag = 1;
+ return 0;
+ }
+ otp |= OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ }
+
+ _ondie_ecc_flag = 1;
+
+ return spinand_get_otp(spi_nand, &otp);
+}
+#endif
+
+static int spinand_disable_ecc(struct spi_device *spi_nand)
+{
+ int retval;
+ u8 otp = 0;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u8 die_num;
+ int i;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_1_HAVE)) {
+ die_num = (ptr_dev_info_t->device_size / ptr_dev_info_t->page_size) >> 16;
+
+ for(i = 0; i < die_num; i++) {
+ spi_nand_protocol_die_select_1(i);
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ otp &= ~OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ }
+ }
+ } else if(((ptr_dev_info_t->feature) & SPI_NAND_FLASH_DIE_SELECT_2_HAVE)) {
+ die_num = (ptr_dev_info_t->device_size / ptr_dev_info_t->page_size) >> 17;
+
+ for(i = 0; i < die_num; i++) {
+ spi_nand_protocol_die_select_2(i);
+
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ otp &= ~OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ }
+ }
+ } else {
+ retval = spinand_get_otp(spi_nand, &otp);
+ if (retval < 0)
+ return retval;
+
+ if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
+ otp &= ~OTP_ECC_MASK;
+ retval = spinand_set_otp(spi_nand, &otp);
+ if (retval < 0) {
+ return retval;
+ }
+ _ondie_ecc_flag = 0;
+ return spinand_get_otp(spi_nand, &otp);
+ }
+ }
+
+ _ondie_ecc_flag = 0;
+
+ return 0;
+}
+
+/**
+ * spinand_write_enable- send command 0x06 to enable write or erase the
+ * Nand cells
+ * Description:
+ * Before write and erase the Nand cells, the write enable has to be set.
+ * After the write or erase, the write enable bit is automatically
+ * cleared (status register bit 2)
+ * Set the bit 2 of the status register has the same effect
+ */
+static int spinand_write_enable(struct spi_device *spi_nand)
+{
+ return spi_nand_protocol_write_enable();
+}
+
+static int spinand_read_page_to_cache(struct spi_device *spi_nand, u32 page_id)
+{
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "spinand_read_page_to_cache: page_idx=0x%x\n", page_id);
+ return spi_nand_protocol_page_read( (u32)page_id);
+}
+
+/*
+ * spinand_read_from_cache- send command 0x03 to read out the data from the
+ * cache register(2112 bytes max)
+ * Description:
+ * The read can specify 1 to 2112 bytes of data read at the corresponding
+ * locations.
+ * No tRd delay.
+ */
+static int spinand_read_from_cache(struct spi_device *spi_nand, u32 page_id,
+ u32 byte_id, u32 len, u8 *rbuf)
+{
+
+ unsigned int ret;
+ u8 status;
+
+ spi_nand_protocol_read_from_cache(byte_id, len, rbuf, SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE, SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND);
+
+ while (1) {
+ ret = spinand_read_status(spi_nand, &status);
+ if (ret < 0) {
+ _SPI_NAND_PRINTF("err %d read status register\n", ret);
+ return ret;
+ }
+
+ if ((status & STATUS_OIP_MASK) == STATUS_READY) {
+ break;
+ }
+ }
+
+ return 0; /* Chuck , tmp return success any way */
+}
+
+/*
+ * spinand_read_page-to read a page with:
+ * @page_id: the physical page number
+ * @offset: the location from 0 to 2111
+ * @len: number of bytes to read
+ * @rbuf: read buffer to hold @len bytes
+ *
+ * Description:
+ * The read includes two commands to the Nand: 0x13 and 0x03 commands
+ * Poll to read status to wait for tRD time.
+ */
+static int spinand_read_page(struct spi_device *spi_nand, u32 page_id,
+ u32 offset, u32 len, u8 *rbuf)
+{
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ rtn_status = spi_nand_read_internal((((ptr_dev_info_t->page_size)*page_id)+offset), len, rbuf, ptr_dev_info_t->read_mode);
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("spinand_read_page, error\n");
+ return -1;
+ }
+
+}
+
+/*
+ * spinand_program_data_to_cache--to write a page to cache with:
+ * @byte_id: the location to write to the cache
+ * @len: number of bytes to write
+ * @rbuf: read buffer to hold @len bytes
+ *
+ * Description:
+ * The write command used here is 0x84--indicating that the cache is
+ * not cleared first.
+ * Since it is writing the data to cache, there is no tPROG time.
+ */
+static int spinand_program_data_to_cache(struct spi_device *spi_nand,
+ u32 page_id, u32 byte_id, u32 len, u8 *wbuf)
+{
+ return spi_nand_protocol_program_load(byte_id, wbuf, len, SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE);
+}
+
+/**
+ * spinand_program_execute--to write a page from cache to the Nand array with
+ * @page_id: the physical page location to write the page.
+ *
+ * Description:
+ * The write command used here is 0x10--indicating the cache is writing to
+ * the Nand array.
+ * Need to wait for tPROG time to finish the transaction.
+ */
+static int spinand_program_execute(struct spi_device *spi_nand, u32 page_id)
+{
+ return spi_nand_protocol_program_execute(page_id);
+}
+
+/**
+ * spinand_program_page--to write a page with:
+ * @page_id: the physical page location to write the page.
+ * @offset: the location from the cache starting from 0 to 2111
+ * @len: the number of bytes to write
+ * @wbuf: the buffer to hold the number of bytes
+ *
+ * Description:
+ * The commands used here are 0x06, 0x84, and 0x10--indicating that
+ * the write enable is first sent, the write cache command, and the
+ * write execute command.
+ * Poll to wait for the tPROG time to finish the transaction.
+ */
+static int spinand_program_page(struct mtd_info *mtd,
+ u32 page_id, u32 offset, u32 len, u8 *buf)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_program_page]: enter, page=0x%x\n", page_id);
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ struct spinand_state *state = mtd_to_state(mtd);
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_program_page: _current_cache_page_oob_mapping: state->oob_buf_len=0x%x, state->oob_buf=\n", (state->oob_buf_len));
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &(state->oob_buf[0]), (state->oob_buf_len));
+
+ rtn_status = spi_nand_write_page_internal(page_id, (state->buf_idx), &state->buf[(state->buf_idx)], (state->buf_len), 0, (&state->oob_buf[0]), (state->oob_buf_len), ptr_dev_info_t->write_mode);
+
+ if( rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("spinand_program_page, error\n");
+ return -1;
+ }
+
+}
+
+/**
+ * spinand_erase_block_erase--to erase a page with:
+ * @block_id: the physical block location to erase.
+ *
+ * Description:
+ * The command used here is 0xd8--indicating an erase command to erase
+ * one block--64 pages
+ * Need to wait for tERS.
+ */
+static int spinand_erase_block_erase(struct spi_device *spi_nand, u32 block_id)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"[spinand_erase_block_erase]: enter, block id=0x%x \n", block_id);
+ return spi_nand_protocol_block_erase(block_id);
+}
+
+/**
+ * spinand_erase_block--to erase a page with:
+ * @block_id: the physical block location to erase.
+ *
+ * Description:
+ * The commands used here are 0x06 and 0xd8--indicating an erase
+ * command to erase one block--64 pages
+ * It will first to enable the write enable bit (0x06 command),
+ * and then send the 0xd8 erase command
+ * Poll to wait for the tERS time to complete the tranaction.
+ */
+static int spinand_erase_block(struct spi_device *spi_nand, u32 block_id)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"[spinand_erase_block]: enter, block id=0x%x \n", block_id);
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ rtn_status = spi_nand_erase_internal( ((block_id)*(ptr_dev_info_t->erase_size)) , (ptr_dev_info_t->erase_size) );
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR)
+ {
+ return 0;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("spinand_erase_block, error\n");
+ return -1;
+ }
+
+}
+
+
+void spinand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_write_buf]: enter \n");
+ int min_oob_size;
+ struct spinand_state *state = mtd_to_state(mtd);
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if( (state->col) >= (mtd->writesize)) /* Write OOB area */
+ {
+ min_oob_size = MIN(len, (MAX_LINUX_USE_OOB_SIZE - ((state->col) - (mtd->writesize))));
+ memcpy( &(state->oob_buf)[((state->col)-(mtd->writesize))+LINUX_USE_OOB_START_OFFSET], buf, min_oob_size);
+ state->col += min_oob_size;
+ state->oob_buf_len = min_oob_size;
+ }
+ else /* Write Data area */
+ {
+ memcpy( &(state->buf)[state->buf_idx], buf, len);
+ state->col += len;
+ state->buf_len += len;
+ }
+
+}
+
+void spinand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct spinand_state *state = mtd_to_state(mtd);
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_read_buf]: enter, len=0x%x, offset=0x%x\n", len, state->buf_idx);
+
+ if( ((state->command) == NAND_CMD_READID) ||
+ ((state->command) == NAND_CMD_STATUS) )
+ {
+ memcpy(buf, &state->buf, len);
+ }
+ else
+ {
+ if( (state->buf_idx) < ( ptr_dev_info_t->page_size )) /* Read data area */
+ {
+ memcpy(buf, &_current_cache_page_data[state->buf_idx], len);
+ }
+ else /* Read oob area */
+ {
+ /* dump_stack(); */
+ memcpy(buf, &_current_cache_page_oob_mapping[ ((state->buf_idx)-(ptr_dev_info_t->page_size))+ LINUX_USE_OOB_START_OFFSET], len);
+ }
+ }
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_read_buf : idx=0x%x, len=0x%x\n", (state->buf_idx), len);
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &buf[0], len);
+
+ state->buf_idx += len;
+
+}
+
+
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+static int spinand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_write_page_hwecc]: enter \n");
+
+ spinand_write_buf(mtd, buf, mtd->writesize);
+ spinand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_write_page_hwecc: data=\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &buf[0], mtd->writesize);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_write_page_hwecc: oob=\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &chip->oob_poi[0], mtd->oobsize);
+ return 0;
+
+}
+
+static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ u32 idx;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ unsigned long spinand_spinlock_flags;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_read_page_hwecc]: enter, page=0x%x \n", page);
+
+
+ _SPI_NAND_SEMAPHORE_LOCK();
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if(buf==NULL)
+ {
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_read_page_hwecc]buf is NULL\n");
+ }
+
+ memcpy(buf, &_current_cache_page_data[0], (ptr_dev_info_t->page_size));
+ memcpy((chip->oob_poi), &_current_cache_page_oob_mapping[LINUX_USE_OOB_START_OFFSET], MAX_LINUX_USE_OOB_SIZE);
+
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_read_page_hwecc: data:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &buf[0], mtd->writesize);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_read_page_hwecc: oob:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &chip->oob_poi[0], mtd->oobsize);
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_2, "spinand_read_page_hwecc: _current_cache_page_oob_mapping:\n");
+ _SPI_NAND_DEBUG_PRINTF_ARRAY(SPI_NAND_FLASH_DEBUG_LEVEL_2, &_current_cache_page_oob_mapping[0], mtd->oobsize);
+
+ _SPI_NAND_SEMAPHORE_UNLOCK();
+
+ return 0;
+
+}
+
+#endif
+
+
+static int spinand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int cached, int raw)
+{
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ struct spinand_state *state = mtd_to_state(mtd);
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_write_page]: enter, page=0x%x \n", page);
+
+ memset((state->oob_buf), 0xff, MAX_USE_OOB_SIZE);
+ memcpy(&(state->oob_buf)[LINUX_USE_OOB_START_OFFSET], chip->oob_poi, MAX_LINUX_USE_OOB_SIZE);
+
+ rtn_status = spi_nand_write_page_internal(page, 0, buf, (ptr_dev_info_t->page_size), 0, (state->oob_buf), MAX_LINUX_USE_OOB_SIZE, ptr_dev_info_t->write_mode);
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR )
+ {
+ return 0;
+ }
+ else
+ {
+ return -EIO;
+ }
+}
+
+
+static int spinand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+
+ struct spinand_state *state = mtd_to_state(mtd);
+ SPI_NAND_FLASH_RTN_T rtn_status = SPI_NAND_FLASH_RTN_NO_ERROR;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_write_oob]: enter, page=0x%x \n", page);
+
+ memset(state->oob_buf, 0xff, MAX_USE_OOB_SIZE);
+ memcpy(&(state->oob_buf)[LINUX_USE_OOB_START_OFFSET], chip->oob_poi, MAX_LINUX_USE_OOB_SIZE);
+
+ rtn_status = spi_nand_write_page_internal(page, 0, NULL, 0, 0, (&state->oob_buf[0]), MAX_LINUX_USE_OOB_SIZE, ptr_dev_info_t->write_mode);
+
+ if(rtn_status == SPI_NAND_FLASH_RTN_NO_ERROR )
+ {
+ return 0;
+ }
+ else
+ {
+ return -EIO;
+ }
+
+}
+
+
+static int spinand_read_oob(struct mtd_info *mtd,struct nand_chip *chip, int page, int sndcmd)
+{
+ return 0;
+
+}
+
+
+static int spinand_block_markbad(struct mtd_info *mtd, loff_t offset)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_block_markbad]: enter , offset=0x%x\n", offset);
+
+ return 0;
+
+}
+
+
+static int spinand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_block_bad]: enter \n");
+
+ return 0;
+}
+
+
+
+
+static void spinand_select_chip(struct mtd_info *mtd, int dev)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_select_chip]: enter \n");
+}
+
+static int spinand_dev_ready(struct mtd_info *mtd)
+{
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_dev_ready]: enter \n");
+ return 1;
+}
+
+static void spinand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_enable_hwecc]: enter \n");
+}
+
+
+static int spinand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_correct_data]: enter \n");
+ return 0;
+}
+
+
+
+
+static int spinand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_calculate_ecc]: enter \n");
+ return 0;
+}
+
+
+static uint8_t spinand_read_byte(struct mtd_info *mtd)
+{
+
+ struct spinand_state *state = mtd_to_state(mtd);
+ u8 data;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_read_byte]: enter \n");
+
+ data = state->buf[state->buf_idx];
+ state->buf_idx++;
+ return data;
+}
+
+
+static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+
+
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+
+ unsigned long timeo = jiffies;
+ int retval, state = chip->state;
+ u8 status;
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_wait]: enter \n");
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ while (time_before(jiffies, timeo)) {
+ retval = spinand_read_status(info->spi, &status);
+ if ((status & STATUS_OIP_MASK) == STATUS_READY)
+ return 0;
+
+ cond_resched();
+ }
+ return 0;
+}
+
+
+/*
+ * spinand_reset- send RESET command "0xff" to the Nand device.
+ */
+static void spinand_reset(struct spi_device *spi_nand)
+{
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_reset]: enter \n");
+
+ spi_nand_protocol_reset();
+}
+
+
+static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command, int column, int page)
+{
+
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct spinand_info *info = (struct spinand_info *)chip->priv;
+ struct spinand_state *state = (struct spinand_state *)info->priv;
+ u16 block_id;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ state->command = command;
+
+ switch (command) {
+ /*
+ * READ0 - read in first 0x800 bytes
+ */
+ case NAND_CMD_READ1:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_READ1 \n");
+ case NAND_CMD_READ0:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_READ0 \n");
+
+ state->buf_idx = column;
+ spi_nand_read_page_internal(page, ptr_dev_info_t->read_mode);
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"[spinand_cmdfunc]: NAND_CMD_READ0/1, End\n");
+
+ break;
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_READOOB, page=0x%x \n", page);
+ state->buf_idx = column + (ptr_dev_info_t->page_size);
+ spi_nand_read_page_internal(page, ptr_dev_info_t->read_mode);
+
+ break;
+ case NAND_CMD_RNDOUT:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_RNDOUT \n");
+ state->buf_idx = column;
+ break;
+ case NAND_CMD_READID:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_READID \n");
+ state->buf_idx = 0;
+ spinand_read_id(info->spi, (u8 *)state->buf);
+ break;
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ block_id = page /((mtd->erasesize)/(mtd->writesize));
+
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_ERASE1 \n");
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "erasesize=0x%x, writesiez=0x%x, page=0x%x, block_idx=0x%x\n", (mtd->erasesize), (mtd->writesize), page, block_id);
+ spinand_erase_block(info->spi, block_id);
+ break;
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"[spinand_cmdfunc]: NAND_CMD_ERASE2 \n");
+ break;
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_SEQIN \n");
+ state->col = column;
+ state->row = page;
+ state->buf_idx = column;
+ state->buf_len = 0;
+ state->oob_buf_len = 0 ;
+ memset(state->buf, 0xff, BUFSIZE);
+ memset(state->oob_buf, 0xff, MAX_USE_OOB_SIZE);
+ break;
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_PAGEPROG \n");
+ spinand_program_page(mtd, state->row, state->col,
+ state->buf_idx, state->buf);
+ break;
+ case NAND_CMD_STATUS:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_STATUS \n");
+ spinand_get_otp(info->spi, state->buf);
+ if (!(state->buf[0] & 0x80))
+ state->buf[0] = 0x80;
+ state->buf_idx = 0;
+ break;
+ /* RESET command */
+ case NAND_CMD_RESET:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1, "[spinand_cmdfunc]: NAND_CMD_RESET \n");
+ if (wait_till_ready(info->spi))
+ printk("WAIT timedout!!!\n");
+ /* a minimum of 250us must elapse before issuing RESET cmd*/
+ udelay(250);
+ spinand_reset(info->spi);
+ break;
+ default:
+ _SPI_NAND_DEBUG_PRINTF(SPI_NAND_FLASH_DEBUG_LEVEL_1,"[spinand_cmdfunc]: Unknown CMD: 0x%x\n", command);
+ }
+}
+
+
+struct nand_flash_dev spi_nand_flash_ids[] = {
+ {NULL, 0, 0, 0, 0, 0},
+ {NULL,}
+};
+
+
+static void free_allcate_memory(struct mtd_info *mtd)
+{
+
+
+ _SPI_NAND_PRINTF("SPI NAND : free_allcate_memory");
+
+ if( ((struct spinand_info *)(((struct nand_chip *)(mtd->priv))->priv))->spi )
+ {
+ kfree( ((struct spinand_info *)(((struct nand_chip *)(mtd->priv))->priv))->spi );
+ }
+
+ if( ((struct spinand_info *)(((struct nand_chip *)(mtd->priv))->priv)) )
+ {
+ kfree( ((struct spinand_info *)(((struct nand_chip *)(mtd->priv))->priv)) );
+ }
+
+ if( (((struct nand_chip *)(mtd->priv))->priv) )
+ {
+ kfree ( (((struct nand_chip *)(mtd->priv))->priv) ) ;
+ }
+
+ if((mtd->priv) )
+ {
+ kfree((mtd->priv));
+ }
+
+ if(mtd)
+ {
+ kfree(mtd);
+ }
+
+}
+
+
+static int spi_nand_setup(u32 *ptr_rtn_mtd_address)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct spinand_info *info;
+ struct spinand_state *state;
+ struct spi_device *spi_nand;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ int ret;
+ unsigned long spinand_spinlock_flags;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ _SPI_NAND_PRINTF("[spi_nand_setup] : Enter \n");
+
+ /* 1. Allocate neccessary struct memory ,and assigned related pointer */
+ info = kzalloc(sizeof(struct spinand_info),GFP_KERNEL);
+ if (!info)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate info structure error! \n");
+ return -ENOMEM;
+ }
+
+ /* Chuck Kuo, temp assign. Other function will pass it, but we wil not use it in functions. */
+ spi_nand = kzalloc(sizeof(struct spinand_info),GFP_KERNEL);
+ if (!spi_nand)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate spi_nand structure error! \n");
+ return -ENOMEM;
+ }
+
+ info->spi = spi_nand;
+
+ state = kzalloc(sizeof(struct spinand_state),GFP_KERNEL);
+ if (!state)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate state structure error! \n");
+ return -ENOMEM;
+ }
+
+ info->priv = state;
+ state->buf_idx = 0;
+ state->buf = kzalloc( BUFSIZE, GFP_KERNEL); /* Data buffer */
+ state->oob_buf = kzalloc( MAX_USE_OOB_SIZE, GFP_KERNEL); /* OOB buffer */
+ if (!state->buf)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate data buf error! \n");
+ return -ENOMEM;
+ }
+ if (!state->oob_buf)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate oob buf error! \n");
+ return -ENOMEM;
+ }
+
+ chip = kzalloc(sizeof(struct nand_chip),GFP_KERNEL);
+ if (!chip)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate chip structure error! \n");
+ return -ENOMEM;
+ }
+
+ chip->priv = info;
+ chip->read_byte = spinand_read_byte;
+ chip->read_buf = spinand_read_buf;
+ chip->write_buf = spinand_write_buf;
+ chip->waitfunc = spinand_wait;
+ chip->options |= NAND_CACHEPRG;
+ chip->select_chip = spinand_select_chip;
+ chip->dev_ready = spinand_dev_ready;
+ chip->cmdfunc = spinand_cmdfunc;
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 0x200;
+ chip->ecc.bytes = 0x4;
+ chip->ecc.steps = 0x4;
+ chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+#if 0
+ chip->ecc.strength = 1;
+#endif
+ chip->ecc.layout = &spinand_oob_64;
+ chip->ecc.read_page = spinand_read_page_hwecc;
+ chip->ecc.write_page = spinand_write_page_hwecc;
+ if(_spi_dma_mode == 1) {
+ /* Disable OnDie ECC */
+ if (spinand_disable_ecc(spi_nand) < 0)
+ pr_info("%s: disable ecc failed!\n", __func__);
+ } else {
+ /* Enable OnDie ECC */
+ if (spinand_enable_ecc(spi_nand) < 0)
+ pr_info("%s: enable ecc failed!\n", __func__);
+ }
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+ if (spinand_disable_ecc(spi_nand) < 0)
+ pr_info("%s: disable ecc failed!\n", __func__);
+#endif
+
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE; /* Chip does not allow subpage writes. */
+ chip->options |= NAND_SKIP_BBTSCAN; /*To skips the bbt scan during initialization. */
+ /* For BMT, we need to revise driver architecture */
+#if 0
+ //chip->write_page = spinand_write_page;
+ //chip->ecc.read_oob = spinand_read_oob;
+#endif
+ chip->ecc.write_oob = spinand_write_oob;
+ chip->block_markbad = spinand_block_markbad; /* tmp null */
+ chip->block_bad = spinand_block_bad; /* tmp null */
+ chip->ecc.calculate = spinand_calculate_ecc;
+ chip->ecc.correct = spinand_correct_data;
+ chip->ecc.hwctl = spinand_enable_hwecc;
+
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd)
+ {
+ _SPI_NAND_PRINTF("spi_nand_setup: allocate mtd error! \n");
+ return -ENOMEM;
+ }
+
+ spi_nand_mtd = mtd;
+ mtd->priv = chip;
+ mtd->name = "EN7512-SPI_NAND";
+ mtd->owner = THIS_MODULE;
+ mtd->oobsize = MAX_LINUX_USE_OOB_SIZE;
+
+ spi_nand_flash_ids[0].name = ptr_dev_info_t->ptr_name;
+ spi_nand_flash_ids[0].dev_id = ptr_dev_info_t->dev_id;
+ spi_nand_flash_ids[0].pagesize = ptr_dev_info_t->page_size;
+ spi_nand_flash_ids[0].chipsize = ((ptr_dev_info_t->device_size)>>20);
+ spi_nand_flash_ids[0].erasesize = ptr_dev_info_t->erase_size;
+ spi_nand_flash_ids[0].options = 0;
+
+ ret = nand_scan_ident(mtd, 1, spi_nand_flash_ids);
+ if (!ret)
+ {
+ _SPI_NAND_PRINTF("nand_scan_ident ok\n");
+ ret = nand_scan_tail(mtd);
+ _SPI_NAND_PRINTF("[spi_nand_setup]: chip size = 0x%llx, erase_shift=0x%x\n", chip->chipsize, chip->phys_erase_shift);
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("nand_scan_ident fail\n");
+ return -ENOMEM;
+ }
+
+
+#if defined(TCSUPPORT_NAND_BMT) && !defined(LZMA_IMG)
+
+ bmt_pool_size = calc_bmt_pool_size(mtd);
+ if(bmt_pool_size > maximum_bmt_block_count)
+ {
+ _SPI_NAND_PRINTF("Error : bmt pool size: %d > maximum size %d\n", bmt_pool_size, maximum_bmt_block_count);
+ _SPI_NAND_PRINTF("Error: init bmt failed \n");
+ return -1;
+ }
+
+ if(bmt_pool_size > MAX_BMT_SIZE) {
+ bmt_pool_size = MAX_BMT_SIZE;
+ }
+
+ _SPI_NAND_PRINTF("bmt pool size: %d \n", bmt_pool_size);
+
+ if (!g_bmt)
+ {
+ if ( !(g_bmt = init_bmt(mtd, bmt_pool_size)) )
+ {
+ _SPI_NAND_PRINTF("Error: init bmt failed \n");
+ return -1;
+ }
+ }
+
+ if (!g_bbt)
+ {
+ if ( !(g_bbt = start_init_bbt()) )
+ {
+ _SPI_NAND_PRINTF("Error: init bbt failed \n");
+ return -1;
+ }
+ }
+
+ if(write_bbt_or_bmt_to_flash() != 0)
+ {
+ _SPI_NAND_PRINTF("Error: save bbt or bmt to nand failed \n");
+ return -1;
+ }
+
+ if(create_badblock_table_by_bbt())
+ {
+ _SPI_NAND_PRINTF("Error: create bad block table failed \n");
+ return -1;
+ }
+
+ _SPI_NAND_PRINTF("BMT & BBT Init Success \n");
+
+#ifdef TCSUPPORT_CT_PON
+ nand_flash_avalable_size = chip->chipsize - (chip->chipsize * MAX_BMT_SIZE_PERCENTAGE_CT);
+ mtd->size = nand_flash_avalable_size;
+#else
+ mtd->size = nand_logic_size;
+#endif
+
+#endif
+
+ ranand_read_byte = SPI_NAND_Flash_Read_Byte;
+ ranand_read_dword = SPI_NAND_Flash_Read_DWord;
+
+
+ *ptr_rtn_mtd_address = mtd;
+
+ return 0;
+
+
+}
+
+
+static int spi_nand_proc_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+
+ int len;
+
+ if (off > 0)
+ {
+ return 0;
+ }
+
+ len = sprintf(page, "SPI NAND DEBUG LEVEL=%d, _SPI_NAND_TEST_FLAG=%d\n", _SPI_NAND_DEBUG_LEVEL, _SPI_NAND_TEST_FLAG);
+
+ return len;
+
+}
+
+static int spi_nand_proc_write(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+
+
+ char buf[16];
+
+ int len = count;
+
+ if (copy_from_user(buf, buffer, len))
+ {
+ return -EFAULT;
+ }
+
+ buf[len] = '\0';
+
+ _SPI_NAND_PRINTF("len = 0x%x, buf[0]=%c, buf[1]=%c\n", len , buf[0], buf[1]);
+
+
+ if (buf[0] == '0')
+ {
+ _SPI_NAND_PRINTF("Set SPI NAND DEBUG LEVLE to %d\n", SPI_NAND_FLASH_DEBUG_LEVEL_0);
+ _SPI_NAND_DEBUG_LEVEL = SPI_NAND_FLASH_DEBUG_LEVEL_0;
+ }
+ else if (buf[0] == '1')
+ {
+ _SPI_NAND_PRINTF("Set SPI NAND DEBUG LEVLE to %d\n", SPI_NAND_FLASH_DEBUG_LEVEL_1);
+ _SPI_NAND_DEBUG_LEVEL = SPI_NAND_FLASH_DEBUG_LEVEL_1;
+ }
+ else if (buf[0] == '2')
+ {
+ _SPI_NAND_PRINTF("Set SPI NAND DEBUG LEVLE to %d\n", SPI_NAND_FLASH_DEBUG_LEVEL_2);
+ _SPI_NAND_DEBUG_LEVEL = SPI_NAND_FLASH_DEBUG_LEVEL_2;
+ }
+ else
+ {
+ _SPI_NAND_PRINTF("DEBUG LEVEL only up to %d\n", (SPI_NAND_FLASH_DEBUG_LEVEL_DEF_NO -1 ));
+ }
+
+ if(buf[1] == '0')
+ {
+ _SPI_NAND_TEST_FLAG = 0;
+ _SPI_NAND_PRINTF("Set _SPI_NAND_TEST_FLAG to %d\n", _SPI_NAND_TEST_FLAG);
+ }
+ if(buf[1] == '1')
+ {
+ _SPI_NAND_TEST_FLAG = 1;
+ _SPI_NAND_PRINTF("Set _SPI_NAND_TEST_FLAG to %d\n", _SPI_NAND_TEST_FLAG);
+ }
+
+ return len;
+
+}
+
+static int write_test(void *arg)
+{
+ struct _SPI_NAND_FLASH_RW_TEST_T param;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u32 ptr_rtn_len;
+ u8 buf[64], read_buf[64];
+ int i;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ int cpu = smp_processor_id();
+ int vpe = cpu_data[cpu].vpe_id;
+#else
+ int cpu = 0;
+ int vpe = 0;
+#endif
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ memcpy(&param, arg, sizeof(struct _SPI_NAND_FLASH_RW_TEST_T));
+ _SPI_NAND_PRINTF("write_test: run at vpe:%d, cpu:%d\n", vpe, cpu);
+ _SPI_NAND_PRINTF("write_test: times=%d, block_idx=%d\n", param.times, param.block_idx);
+
+ while (!kthread_should_stop() && param.times > 0) {
+ if(param.times % 10 == 0)
+ printk("write_test:%d\n", param.times);
+ msleep(1);
+ param.times--;
+ get_random_bytes(buf, sizeof(buf));
+ SPI_NAND_Flash_Erase(param.block_idx * ptr_dev_info_t->erase_size, sizeof(buf));
+ SPI_NAND_Flash_Write_Nbyte(param.block_idx * ptr_dev_info_t->erase_size, sizeof(buf), &ptr_rtn_len, buf, ptr_dev_info_t->write_mode);
+ SPI_NAND_Flash_Read_NByte(param.block_idx * ptr_dev_info_t->erase_size, sizeof(read_buf), &ptr_rtn_len, read_buf, ptr_dev_info_t->read_mode);
+
+ if(memcmp(buf, read_buf, sizeof(buf)) != 0) {
+ _SPI_NAND_PRINTF("write fail\n");
+ return -1;
+ }
+ }
+
+ _SPI_NAND_PRINTF("write done\n");
+
+ return 0;
+}
+
+static int read_test(void *arg)
+{
+ struct _SPI_NAND_FLASH_RW_TEST_T param;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ u32 ptr_rtn_len;
+ u8 buf[64];
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ int cpu = smp_processor_id();
+ int vpe = cpu_data[cpu].vpe_id;
+#else
+ int cpu = 0;
+ int vpe = 0;
+#endif
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ memcpy(&param, arg, sizeof(struct _SPI_NAND_FLASH_RW_TEST_T));
+ _SPI_NAND_PRINTF("read_test: run at vpe:%d, cpu:%d\n", vpe, cpu);
+ _SPI_NAND_PRINTF("read_test: times=%d, block_idx=%d\n", param.times, param.block_idx);
+
+ memset(buf, 0xaa, sizeof(buf));
+
+ while (!kthread_should_stop() && param.times > 0) {
+ if(param.times % 10 == 0)
+ _SPI_NAND_PRINTF("read_test:%d\n", param.times);
+ msleep(1);
+ param.times--;
+ SPI_NAND_Flash_Read_NByte(param.block_idx * ptr_dev_info_t->erase_size, sizeof(buf), &ptr_rtn_len, buf, ptr_dev_info_t->read_mode);
+ }
+
+ _SPI_NAND_PRINTF("read done\n");
+
+ return 0;
+}
+
+static int spi_nand_proc_test_write(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+ char buf[64], cmd[32];
+ u32 arg1, arg2;
+ struct task_struct *thread;
+ u32 ptr_rtn_len;
+ u32 idx;
+ struct SPI_NAND_FLASH_INFO_T *ptr_dev_info_t;
+ unsigned int cpu;
+
+ ptr_dev_info_t = _SPI_NAND_GET_DEVICE_INFO_PTR;
+
+ if (copy_from_user(buf, buffer, count)) {
+ return -EFAULT;
+ }
+
+ buf[count] = '\0';
+
+ sscanf(buf, "%s %d %d", cmd, &arg1, &arg2) ;
+
+ _SPI_NAND_PRINTF("cmd:%s, arg1=%u, arg2=%u\n", cmd, arg1, arg2);
+
+ if (!strcmp(cmd, "rw_test")) {
+ rw_test_param.times = arg1;
+ rw_test_param.block_idx = arg2;
+
+ thread = kthread_create(write_test, (void *)&rw_test_param, "write_test");
+ kthread_bind(thread, 0);
+ wake_up_process(thread);
+ thread = kthread_create(read_test, (void *)&rw_test_param, "read_test");
+ kthread_bind(thread, 1);
+ wake_up_process(thread);
+ } else if (!strcmp(cmd, "read")) {
+ SPI_NAND_Flash_Read_NByte(arg1, ptr_dev_info_t->page_size, &ptr_rtn_len, _current_cache_page_data, ptr_dev_info_t->read_mode);
+ printk("read data:\n");
+ for(idx = 0; idx < ptr_dev_info_t->page_size; idx++) {
+ if((idx % 8 == 0) && (idx != 0)) {
+ printk(" ");
+ }
+ if(idx % 16 == 0) {
+ printk("\n%08x: ", idx);
+ }
+ printk("%02x ", _current_cache_page_data[idx]);
+ }
+ printk("\n\n");
+ } else if (!strcmp(cmd, "erase")) {
+ SPI_NAND_Flash_Erase(arg1 * ptr_dev_info_t->erase_size, ptr_dev_info_t->erase_size);
+ } else {
+ _SPI_NAND_PRINTF("input not defined.\n");
+ }
+
+ return count;
+
+}
+
+
+static struct mtd_info *spi_nand_probe_kernel(struct map_info *map)
+{
+
+ u32 mtd_address;
+ int rtn_status;
+
+ _SPI_NAND_PRINTF("EN7512 mtd init: spi nand probe enter\n");
+
+ rtn_status = spi_nand_setup(&mtd_address);
+
+ if(rtn_status == 0 ) /* Probe without error */
+ {
+ return ((struct mtd_info * )(mtd_address));
+ }
+ else
+ {
+ free_allcate_memory( (struct mtd_info * )(mtd_address) );
+ _SPI_NAND_PRINTF("[spi_nand_probe_kernel] probe fail !\n");
+ return NULL;
+ }
+
+}
+
+static void spi_nand_destroy_kernel(struct mtd_info *mtd)
+{
+ free_allcate_memory(mtd);
+}
+
+static struct mtd_chip_driver spi_nand_chipdrv = {
+ .probe = spi_nand_probe_kernel,
+ .destroy = spi_nand_destroy_kernel,
+ .name = "nandflash_probe",
+ .module = THIS_MODULE
+};
+
+static int __init linux_spi_nand_flash_init(void)
+{
+
+ struct proc_dir_entry *entry;
+
+ _SPI_NAND_PRINTF("IS_SPIFLASH=0x%x, IS_NANDFLASH=0x%x, (0xBFA10114)=0x%lx)\n", (unsigned int)IS_SPIFLASH, (unsigned int)IS_NANDFLASH, VPint(0xBFA10114));
+
+ if(IS_SPIFLASH){ /* For boot from SPI NOR, then mount NAND as a MTD partition */
+ _SPI_NAND_PRINTF("[linux_spi_nand_flash_init] spi nor flash\n");
+ return -1;
+ }
+ else{
+ SPI_NAND_Flash_Init(0);
+
+ _SPI_NAND_PRINTF("spi nand flash\n");
+ register_mtd_chip_driver(&spi_nand_chipdrv);
+
+
+ entry = create_proc_entry(SPI_NAND_PROCNAME, 0666, NULL);
+ if (entry == NULL)
+ {
+ _SPI_NAND_PRINTF("SPI NAND unable to create /proc entry\n");
+ return -ENOMEM;
+ }
+ entry->read_proc = spi_nand_proc_read;
+ entry->write_proc = spi_nand_proc_write;
+
+ entry = create_proc_entry(SPI_NAND_TEST, 0666, NULL);
+ if (entry == NULL)
+ {
+ _SPI_NAND_PRINTF("SPI NAND unable to create /proc entry\n");
+ return -ENOMEM;
+ }
+ entry->write_proc = spi_nand_proc_test_write;
+
+
+ return 0;
+ }
+
+}
+
+static void __init linux_spi_nand_flash_exit(void)
+{
+ if(IS_SPIFLASH){
+ }
+ else
+ {
+ unregister_mtd_chip_driver(&spi_nand_chipdrv);
+
+ remove_proc_entry(SPI_NAND_PROCNAME, NULL);
+ remove_proc_entry(SPI_NAND_TEST, NULL);
+ }
+}
+
+module_init(linux_spi_nand_flash_init);
+module_exit(linux_spi_nand_flash_exit);
+
+
+/* End of [spi_nand_flash.c] package */
Index: linux-3.18.21/drivers/mtd/chips/spi_nand_flash.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_nand_flash.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,428 @@
+ /***************************************************************************************
+ * Copyright(c) 2014 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+/*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_nand_flash.h
+ * DATE: 2014/11/21
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI NAND Access interface.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ * SPI_NAND_Flash_Init To provide interface for SPI NAND init.
+ * SPI_NAND_Flash_Get_Flash_Info To get system current flash info.
+ * SPI_NAND_Flash_Write_Nbyte To provide interface for Write N Bytes into SPI NAND Flash.
+ * SPI_NAND_Flash_Read_NByte To provide interface for Read N Bytes from SPI NAND Flash.
+ * SPI_NAND_Flash_Erase To provide interface for Erase SPI NAND Flash.
+ * SPI_NAND_Flash_Read_Byte To provide interface for read 1 Bytes from SPI NAND Flash.
+ * SPI_NAND_Flash_Read_DWord To provide interface for read Double Word from SPI NAND Flash.
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2014/11/21 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+#ifndef __SPI_NAND_FLASH_H__
+ #define __SPI_NAND_FLASH_H__
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+//#include "asm/system.h"
+#include "asm/types.h"
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+#define SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX 32
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+typedef enum{
+ SPI_NAND_FLASH_READ_DUMMY_BYTE_PREPEND,
+ SPI_NAND_FLASH_READ_DUMMY_BYTE_APPEND,
+
+ SPI_NAND_FLASH_READ_DUMMY_BYTE_DEF_NO
+
+} SPI_NAND_FLASH_READ_DUMMY_BYTE_T;
+
+typedef enum{
+ SPI_NAND_FLASH_RTN_NO_ERROR =0,
+ SPI_NAND_FLASH_RTN_PROBE_ERROR,
+ SPI_NAND_FLASH_RTN_ALIGNED_CHECK_FAIL,
+ SPI_NAND_FLASH_RTN_DETECTED_BAD_BLOCK,
+ SPI_NAND_FLASH_RTN_ERASE_FAIL,
+ SPI_NAND_FLASH_RTN_PROGRAM_FAIL,
+
+
+ SPI_NAND_FLASH_RTN_DEF_NO
+} SPI_NAND_FLASH_RTN_T;
+
+typedef enum{
+ SPI_NAND_FLASH_READ_SPEED_MODE_SINGLE =0,
+ SPI_NAND_FLASH_READ_SPEED_MODE_DUAL,
+ SPI_NAND_FLASH_READ_SPEED_MODE_QUAD,
+
+ SPI_NAND_FLASH_READ_SPEED_MODE_DEF_NO
+} SPI_NAND_FLASH_READ_SPEED_MODE_T;
+
+
+typedef enum{
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_SINGLE =0,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_QUAD,
+
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_DEF_NO
+} SPI_NAND_FLASH_WRITE_SPEED_MODE_T;
+
+
+
+typedef enum{
+ SPI_NAND_FLASH_DEBUG_LEVEL_0 =0,
+ SPI_NAND_FLASH_DEBUG_LEVEL_1,
+ SPI_NAND_FLASH_DEBUG_LEVEL_2,
+
+ SPI_NAND_FLASH_DEBUG_LEVEL_DEF_NO
+} SPI_NAND_FLASH_DEBUG_LEVEL_T;
+
+#if 0
+typedef enum{
+ SPI_NAND_FLASH_PLANE_SELECT_NONE =0,
+ SPI_NAND_FLASH_PLANE_SELECT_HAVE,
+
+ SPI_NAND_FLASH_PLANE_SELECT_DEF_NO
+
+} SPI_NAND_FLASH_PLANE_SELECT_T;
+#endif
+
+/* Bitwise */
+#define SPI_NAND_FLASH_FEATURE_NONE ( 0x00 )
+#define SPI_NAND_FLASH_PLANE_SELECT_HAVE ( 0x01 )
+#define SPI_NAND_FLASH_DIE_SELECT_1_HAVE ( 0x01 << 1 )
+#define SPI_NAND_FLASH_DIE_SELECT_2_HAVE ( 0x01 << 2 )
+
+struct spi_nand_flash_oobfree{
+ unsigned long offset;
+ unsigned long len;
+};
+
+struct spi_nand_flash_ooblayout
+{ unsigned long oobsize;
+ struct spi_nand_flash_oobfree oobfree[SPI_NAND_FLASH_OOB_FREE_ENTRY_MAX];
+};
+
+
+struct SPI_NAND_FLASH_INFO_T {
+ const u8 mfr_id;
+ const u8 dev_id;
+ const u8 *ptr_name;
+ u32 device_size; /* Flash total Size */
+ u32 page_size; /* Page Size */
+ u32 erase_size; /* Block Size */
+ u32 oob_size; /* Spare Area (OOB) Size */
+ SPI_NAND_FLASH_READ_DUMMY_BYTE_T dummy_mode;
+ u32 read_mode;
+ u32 write_mode;
+ struct spi_nand_flash_ooblayout *oob_free_layout;
+ u32 feature;
+#if 0
+ SPI_NAND_FLASH_PLANE_SELECT_T plane_select;
+#endif
+};
+
+struct nand_info {
+ int mfr_id;
+ int dev_id;
+ char *name;
+ int numchips;
+ int chip_shift;
+ int page_shift;
+ int erase_shift;
+ int oob_shift;
+ int badblockpos;
+ int opcode_type;
+};
+
+#if 1
+struct ra_nand_chip {
+ struct nand_info *flash;
+};
+#endif
+
+struct spinand_info {
+ struct nand_ecclayout *ecclayout;
+ struct spi_device *spi;
+ void *priv;
+};
+
+struct spinand_state {
+ uint32_t col;
+ uint32_t row;
+ int buf_idx;
+ u8 *buf;
+ uint32_t buf_len;
+ int oob_idx;
+ u8 *oob_buf;
+ uint32_t oob_buf_len;
+ uint32_t command;
+};
+
+struct en7512_spinand_host
+{
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ struct nand_ecclayout *ecclayout;
+ struct spinand_state state;
+ void *priv;
+};
+
+
+/* EXPORTED SUBPROGRAM SPECIFICATION ------------------------------------------------- */
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Init( long rom_base )
+ * PURPOSE : To provide interface for SPI NAND init.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : rom_base - The rom_base variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Init( u32 rom_base );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Get_Flash_Info( struct SPI_NAND_FLASH_INFO_T *ptr_rtn_into_t )
+ * PURPOSE : To get system current flash info.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: ptr_rtn_into_t - A pointer to the structure of the ptr_rtn_into_t variable.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/14 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Get_Flash_Info( struct SPI_NAND_FLASH_INFO_T *ptr_rtn_into_t);
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Write_Nbyte( u32 dst_addr,
+ * u32 len,
+ * u32 *ptr_rtn_len,
+ * u8* ptr_buf )
+ * PURPOSE : To provide interface for Write N Bytes into SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : dst_addr - The dst_addr variable of this function.
+ * len - The len variable of this function.
+ * buf - The buf variable of this function.
+ * OUTPUT: rtn_len - The rtn_len variable of this function.
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/15 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Write_Nbyte( u32 dst_addr,
+ u32 len,
+ u32 *ptr_rtn_len,
+ u8 *ptr_buf,
+ SPI_NAND_FLASH_WRITE_SPEED_MODE_T speed_mode );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: int SPI_NAND_Flash_Read_NByte( long addr,
+ * long len,
+ * long *retlen,
+ * char *buf )
+ * PURPOSE : To provide interface for Read N Bytes from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * len - The len variable of this function.
+ * retlen - The retlen variable of this function.
+ * buf - The buf variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+u32 SPI_NAND_Flash_Read_NByte( unsigned long long addr,
+ u32 len,
+ u32 *retlen,
+ u8 *buf,
+ SPI_NAND_FLASH_READ_SPEED_MODE_T speed_mode );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Erase( u32 dst_addr,
+ * u32 len )
+ * PURPOSE : To provide interface for Erase SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : dst_addr - The dst_addr variable of this function.
+ * len - The len variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/17 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+SPI_NAND_FLASH_RTN_T SPI_NAND_Flash_Erase( u32 dst_addr,
+ u32 len );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: char SPI_NAND_Flash_Read_Byte( long addr )
+ * PURPOSE : To provide interface for read 1 Bytes from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+unsigned char SPI_NAND_Flash_Read_Byte( unsigned long long addr );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: long SPI_NAND_Flash_Read_DWord( long addr )
+ * PURPOSE : To provide interface for read Double Word from SPI NAND Flash.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : addr - The addr variable of this function.
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2014/12/12 by Chuck Kuo - The first revision for this function.
+ *
+
+ *------------------------------------------------------------------------------------
+ */
+unsigned long SPI_NAND_Flash_Read_DWord( unsigned long long addr );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_ENABLE( void )
+ * PURPOSE : To enable to printf debug message of SPI NAND driver.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_DEBUG_ENABLE( void );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_DEBUG_DISABLE( void )
+ * PURPOSE : To disable to printf debug message of SPI NAND driver.
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/20 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_DEBUG_DISABLE( void );
+
+/*------------------------------------------------------------------------------------
+ * FUNCTION: void SPI_NAND_Flash_Clear_Read_Cache_Data( void )
+ * PURPOSE : To clear the cache data for read.
+ * (The next time to read data will get data from flash chip certainly.)
+ * AUTHOR : Chuck Kuo
+ * CALLED BY
+ * -
+ * CALLS
+ * -
+ * PARAMs :
+ * INPUT : None
+ * OUTPUT: None
+ * RETURN : SPI_RTN_NO_ERROR - Successful. Otherwise - Failed.
+ * NOTES :
+ * MODIFICTION HISTORY:
+ * Date 2015/01/21 by Chuck Kuo - The first revision for this function.
+ *
+ *------------------------------------------------------------------------------------
+ */
+void SPI_NAND_Flash_Clear_Read_Cache_Data( void );
+
+void SPI_NAND_Flash_Set_DmaMode( u32 input );
+void SPI_NAND_Flash_Get_DmaMode( u32 *val );
+
+#endif /* ifndef __SPI_NAND_FLASH_H__ */
+/* End of [spi_nand_flash.h] package */
+
Index: linux-3.18.21/drivers/mtd/chips/spi_nfi.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_nfi.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,818 @@
+/***************************************************************************************
+ * Copyright(c) 2016 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+/*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_nfi.c
+ * DATE: 2016/03/18
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI NFI(DMA) Access Internace.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2016/03/18 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+#include "spi_nfi.h"
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include <stdarg.h>
+#include <asm/string.h>
+#include <linux/delay.h>
+
+/* NAMING CONSTANT DECLARATIONS ------------------------------------------------------ */
+
+
+/*******************************************************************************
+ * NFI Register Definition
+ *******************************************************************************/
+#define _SPI_NFI_REGS_BASE 0xBFA11000
+#define _SPI_NFI_REGS_CNFG (_SPI_NFI_REGS_BASE + 0x0000)
+#define _SPI_NFI_REGS_PAGEFMT (_SPI_NFI_REGS_BASE + 0x0004)
+#define _SPI_NFI_REGS_CON (_SPI_NFI_REGS_BASE + 0x0008)
+#define _SPI_NFI_REGS_INTR_EN (_SPI_NFI_REGS_BASE + 0x0010)
+#define _SPI_NFI_REGS_INTR (_SPI_NFI_REGS_BASE + 0x0014)
+#define _SPI_NFI_REGS_CMD (_SPI_NFI_REGS_BASE + 0x0020)
+#define _SPI_NFI_REGS_STA (_SPI_NFI_REGS_BASE + 0x0060)
+#define _SPI_NFI_REGS_FIFOSTA (_SPI_NFI_REGS_BASE + 0x0064)
+#define _SPI_NFI_REGS_STRADDR (_SPI_NFI_REGS_BASE + 0x0080)
+#define _SPI_NFI_REGS_FDM0L (_SPI_NFI_REGS_BASE + 0x00A0)
+#define _SPI_NFI_REGS_FDM0M (_SPI_NFI_REGS_BASE + 0x00A4)
+#define _SPI_NFI_REGS_FDM7L (_SPI_NFI_REGS_BASE + 0x00D8)
+#define _SPI_NFI_REGS_FDM7M (_SPI_NFI_REGS_BASE + 0x00DC)
+#define _SPI_NFI_REGS_FIFODATA0 (_SPI_NFI_REGS_BASE + 0x0190)
+#define _SPI_NFI_REGS_FIFODATA1 (_SPI_NFI_REGS_BASE + 0x0194)
+#define _SPI_NFI_REGS_FIFODATA2 (_SPI_NFI_REGS_BASE + 0x0198)
+#define _SPI_NFI_REGS_FIFODATA3 (_SPI_NFI_REGS_BASE + 0x019C)
+#define _SPI_NFI_REGS_MASTERSTA (_SPI_NFI_REGS_BASE + 0x0224)
+#define _SPI_NFI_REGS_SECCUS_SIZE (_SPI_NFI_REGS_BASE + 0x022C)
+#define _SPI_NFI_REGS_RD_CTL2 (_SPI_NFI_REGS_BASE + 0x0510)
+#define _SPI_NFI_REGS_RD_CTL3 (_SPI_NFI_REGS_BASE + 0x0514)
+#define _SPI_NFI_REGS_PG_CTL1 (_SPI_NFI_REGS_BASE + 0x0524)
+#define _SPI_NFI_REGS_PG_CTL2 (_SPI_NFI_REGS_BASE + 0x0528)
+#define _SPI_NFI_REGS_NOR_PROG_ADDR (_SPI_NFI_REGS_BASE + 0x052C)
+#define _SPI_NFI_REGS_NOR_RD_ADDR (_SPI_NFI_REGS_BASE + 0x0534)
+#define _SPI_NFI_REGS_SNF_MISC_CTL (_SPI_NFI_REGS_BASE + 0x0538)
+#define _SPI_NFI_REGS_SNF_MISC_CTL2 (_SPI_NFI_REGS_BASE + 0x053C)
+#define _SPI_NFI_REGS_SNF_STA_CTL1 (_SPI_NFI_REGS_BASE + 0x0550)
+#define _SPI_NFI_REGS_SNF_STA_CTL2 (_SPI_NFI_REGS_BASE + 0x0554)
+
+
+/*******************************************************************************
+ * NFI Register Field Definition
+ *******************************************************************************/
+
+/* NFI_CNFG */
+#define _SPI_NFI_REGS_CNFG_AHB (0x0001)
+#define _SPI_NFI_REGS_CNFG_READ_EN (0x0002)
+#define _SPI_NFI_REGS_CNFG_DMA_BURST_EN (0x0004)
+#define _SPI_NFI_REGS_CNFG_HW_ECC_EN (0x0100)
+#define _SPI_NFI_REGS_CNFG_AUTO_FMT_EN (0x0200)
+
+#define _SPI_NFI_REGS_CONF_OP_PRGM (3)
+#define _SPI_NFI_REGS_CONF_OP_READ (6)
+#define _SPI_NFI_REGS_CONF_OP_MASK (0x7000)
+#define _SPI_NFI_REGS_CONF_OP_SHIFT (12)
+
+#define _SPI_NFI_REGS_CNFG_DMA_RD_SWAP_MASK (0x0004)
+#define _SPI_NFI_REGS_CNFG_DMA_WR_SWAP_MASK (0x0008)
+#define _SPI_NFI_REGS_CNFG_DMA_RD_SWAP_SHIFT (0x0003)
+#define _SPI_NFI_REGS_CNFG_DMA_WR_SWAP_SHIFT (0x0004)
+
+/* NFI_PAGEFMT */
+#define _SPI_NFI_REGS_PAGEFMT_PAGE_512 (0x0000)
+#define _SPI_NFI_REGS_PAGEFMT_PAGE_2K (0x0001)
+#define _SPI_NFI_REGS_PAGEFMT_PAGE_4K (0x0002)
+#define _SPI_NFI_REGS_PAGEFMT_PAGE_MASK (0x0003)
+#define _SPI_NFI_REGS_PAGEFMT_PAGE_SHIFT (0x0000)
+
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_16 (0x0000)
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_26 (0x0001)
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_27 (0x0002)
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_28 (0x0003)
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_MASK (0x0030)
+#define _SPI_NFI_REGS_PAGEFMT_SPARE_SHIFT (4)
+
+#define _SPI_NFI_REGS_PAGEFMT_FDM_MASK (0x0F00)
+#define _SPI_NFI_REGS_PAGEFMT_FDM_SHIFT (8)
+#define _SPI_NFI_REGS_PAGEFMT_FDM_ECC_MASK (0xF000)
+#define _SPI_NFI_REGS_PAGEFMT_FDM_ECC_SHIFT (12)
+
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_16 (0x0000)
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_26 (0x0001)
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_27 (0x0002)
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_28 (0x0003)
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_MASK (0x0030)
+#define _SPI_NFI_REGS_PPAGEFMT_SPARE_SHIFT (4)
+
+/* NFI_CON */
+#define _SPI_NFI_REGS_CON_SEC_MASK (0xF000)
+#define _SPI_NFI_REGS_CON_WR_TRIG (0x0200)
+#define _SPI_NFI_REGS_CON_RD_TRIG (0x0100)
+#define _SPI_NFI_REGS_CON_SEC_SHIFT (12)
+#define _SPI_NFI_REGS_CON_RESET_VALUE (0x3)
+
+/* NFI_INTR_EN */
+#define _SPI_NFI_REGS_INTR_EN_AHB_DONE_EN (0x0040)
+
+/* NFI_REGS_INTR */
+#define _SPI_NFI_REGS_INTR_AHB_DONE_CHECK (0x0040)
+
+/* NFI_SECCUS_SIZE */
+#define _SPI_NFI_REGS_SECCUS_SIZE_EN (0x00010000)
+#define _SPI_NFI_REGS_SECCUS_SIZE_MASK (0x00001FFF)
+#define _SPI_NFI_REGS_SECCUS_SIZE_SHIFT (0)
+
+/* NFI_SNF_MISC_CTL */
+#define _SPI_NFI_REGS_SNF_MISC_CTL_DATA_RW_MODE_SHIFT (16)
+
+/* NFI_SNF_MISC_CTL2 */
+#define _SPI_NFI_REGS_SNF_MISC_CTL2_WR_MASK (0x1FFF0000)
+#define _SPI_NFI_REGS_SNF_MISC_CTL2_WR_SHIFT (16)
+#define _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK (0x00001FFF)
+#define _SPI_NFI_REGS_SNF_MISC_CTL2_RD_SHIFT (0)
+
+/* NFI_REGS_CMD */
+#define _SPI_NFI_REGS_CMD_READ_VALUE (0x00)
+#define _SPI_NFI_REGS_CMD_WRITE_VALUE (0x80)
+
+
+/* NFI_REGS_PG_CTL1 */
+#define _SPI_NFI_REGS_PG_CTL1_SHIFT (8)
+
+
+/* SNF_STA_CTL1 */
+#define _SPI_NFI_REGS_LOAD_TO_CACHE_DONE (0x04000000)
+#define _SPI_NFI_REGS_READ_FROM_CACHE_DONE (0x02000000)
+
+
+
+/* MACRO DECLARATIONS ---------------------------------------------------------------- */
+
+#define READ_REGISTER_UINT32(reg) \
+ (*(volatile unsigned int * const)(reg))
+
+#define WRITE_REGISTER_UINT32(reg, val) \
+ (*(volatile unsigned int * const)(reg)) = (val)
+
+#define INREG32(x) READ_REGISTER_UINT32((unsigned int *)((void*)(x)))
+#define OUTREG32(x, y) WRITE_REGISTER_UINT32((unsigned int *)((void*)(x)), (unsigned int )(y))
+#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
+#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
+
+#define _SPI_NFI_REG8_READ(addr) INREG32(addr)
+#define _SPI_NFI_REG8_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_NFI_REG8_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_NFI_REG8_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_NFI_REG8_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+#define _SPI_NFI_REG16_READ(addr) INREG32(addr)
+#define _SPI_NFI_REG16_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_NFI_REG16_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_NFI_REG16_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_NFI_REG16_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+#define _SPI_NFI_REG32_READ(addr) INREG32(addr)
+#define _SPI_NFI_REG32_WRITE(addr, data) OUTREG32(addr, data)
+#define _SPI_NFI_REG32_SETBITS(addr, data) SETREG32(addr, data)
+#define _SPI_NFI_REG32_CLRBITS(addr, data) CLRREG32(addr, data)
+#define _SPI_NFI_REG32_SETMASKBITS(addr, mask, data) MASKREG32(addr, mask, data)
+
+#define _SPI_NFI_GET_CONF_PTR &(_spi_nfi_conf_info_t)
+#define _SPI_NFI_GET_FDM_PTR &(_spi_nfi_fdm_value)
+#define _SPI_NFI_SET_FDM_PTR &(_spi_nfi_fdm_value)
+#define _SPI_NFI_DATA_SIZE_WITH_ECC (512)
+#define _SPI_NFI_CHECK_DONE_MAX_TIMES (1000000)
+
+#define _SPI_NFI_PRINTF printk
+#define _SPI_NFI_DEBUG_PRINTF spi_nfi_debug_printf
+
+#define _SPI_NFI_MEMCPY memcpy
+#define _SPI_NFI_MEMSET memset
+#define _SPI_NFI_MAX_FDM_NUMBER (64)
+#define _SPI_NFI_MAX_FDM_PER_SEC (8)
+
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+
+/* STATIC VARIABLE DECLARATIONS ------------------------------------------------------ */
+SPI_NFI_CONF_T _spi_nfi_conf_info_t;
+u8 _spi_nfi_fdm_value[_SPI_NFI_MAX_FDM_NUMBER];
+u8 _SPI_NFI_DEBUG_FLAG = 0; /* For control printf debug message or not */
+
+/* LOCAL SUBPROGRAM BODIES------------------------------------------------------------ */
+static void spi_nfi_debug_printf( char *fmt, ... )
+{
+ if( _SPI_NFI_DEBUG_FLAG == 1 )
+ {
+ unsigned char str_buf[100];
+ va_list argptr;
+ int cnt;
+
+ va_start(argptr, fmt);
+ cnt = vsprintf(str_buf, fmt, argptr);
+ va_end(argptr);
+
+ printk("%s", str_buf);
+ }
+}
+
+SPI_NFI_RTN_T spi_nfi_get_fdm_from_register( void )
+{
+ u32 idx, i, j, reg_addr, val;
+ u8 *fdm_value;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ u8 spi_nfi_mapping_fdm_value[_SPI_NFI_MAX_FDM_NUMBER];
+
+ fdm_value = _SPI_NFI_GET_FDM_PTR;
+ spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+
+ _SPI_NFI_MEMSET(spi_nfi_mapping_fdm_value, 0xff, _SPI_NFI_MAX_FDM_NUMBER);
+ _SPI_NFI_MEMSET(fdm_value, 0xff, _SPI_NFI_MAX_FDM_NUMBER);
+
+ idx = 0;
+ for( reg_addr = _SPI_NFI_REGS_FDM0L ; reg_addr <= _SPI_NFI_REGS_FDM7M ; reg_addr+=4 )
+ {
+ val = _SPI_NFI_REG32_READ(reg_addr);
+ spi_nfi_mapping_fdm_value[idx++] = ( val & 0xFF) ;
+ spi_nfi_mapping_fdm_value[idx++] = ((val >> 8) & 0xFF) ;
+ spi_nfi_mapping_fdm_value[idx++] = ((val >> 16) & 0xFF) ;
+ spi_nfi_mapping_fdm_value[idx++] = ((val >> 24) & 0xFF) ;
+ }
+
+ j=0;
+ for(idx=0 ; idx< (spi_nfi_conf_info_t->sec_num) ; idx++)
+ {
+ for(i =0; i< (spi_nfi_conf_info_t->fdm_num); i++)
+ {
+ fdm_value[j] = spi_nfi_mapping_fdm_value[(idx*_SPI_NFI_MAX_FDM_PER_SEC)+i];
+ j++;
+ }
+ }
+
+ return (SPI_NFI_RTN_NO_ERROR);
+}
+
+
+SPI_NFI_RTN_T spi_nfi_set_fdm_into_register( void )
+{
+ u32 idx, i,j, reg_addr, val;
+ u8 *fdm_value;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ u8 spi_nfi_mapping_fdm_value[_SPI_NFI_MAX_FDM_NUMBER];
+
+ fdm_value = _SPI_NFI_GET_FDM_PTR;
+ spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+
+ _SPI_NFI_MEMSET(spi_nfi_mapping_fdm_value, 0xff, _SPI_NFI_MAX_FDM_NUMBER);
+
+ j=0;
+ for(idx=0 ; idx< (spi_nfi_conf_info_t->sec_num) ; idx++)
+ {
+ for(i =0; i< (spi_nfi_conf_info_t->fdm_num); i++)
+ {
+ spi_nfi_mapping_fdm_value[(idx*_SPI_NFI_MAX_FDM_PER_SEC)+i] = fdm_value[j];
+ j++;
+ }
+ }
+
+
+ idx = 0;
+ for( reg_addr = _SPI_NFI_REGS_FDM0L ; reg_addr <= _SPI_NFI_REGS_FDM7M ; reg_addr+=4 )
+ {
+ val = 0;
+
+ val |= (spi_nfi_mapping_fdm_value[idx++] & (0xFF));
+ val |= ((spi_nfi_mapping_fdm_value[idx++] & (0xFF)) << 8);
+ val |= ((spi_nfi_mapping_fdm_value[idx++] & (0xFF)) << 16);
+ val |= ((spi_nfi_mapping_fdm_value[idx++] & (0xFF)) << 24);
+
+ _SPI_NFI_REG32_WRITE(reg_addr, val);
+
+ _SPI_NFI_DEBUG_PRINTF("spi_nfi_set_fdm_into_register : reg(0x%x)=0x%x\n", reg_addr, _SPI_NFI_REG32_READ(reg_addr));
+ }
+
+ return (SPI_NFI_RTN_NO_ERROR);
+}
+
+
+
+/* EXPORTED SUBPROGRAM BODIES -------------------------------------------------------- */
+SPI_NFI_RTN_T SPI_NFI_Regs_Dump( void )
+{
+ u32 idx;
+
+ for(idx = _SPI_NFI_REGS_BASE ; idx <= _SPI_NFI_REGS_SNF_STA_CTL2 ; idx +=4)
+ {
+ _SPI_NFI_PRINTF("reg(0x%x) = 0x%x\n", idx, _SPI_NFI_REG32_READ(idx) );
+ }
+
+ return (SPI_NFI_RTN_NO_ERROR);
+}
+
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NAND_FDM(u8 *ptr_rtn_oob, u32 oob_len)
+{
+ u8 *fdm_value;
+ u32 idx;
+
+ spi_nfi_get_fdm_from_register();
+ fdm_value = _SPI_NFI_GET_FDM_PTR;
+
+ _SPI_NFI_MEMCPY(ptr_rtn_oob, fdm_value, oob_len);
+
+ return (SPI_NFI_RTN_NO_ERROR);
+
+}
+
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NAND_FDM(u8 *ptr_oob, u32 oob_len)
+{
+ u8 *fdm_value;
+ u32 idx;
+
+ fdm_value = _SPI_NFI_GET_FDM_PTR;
+
+ if( oob_len > _SPI_NFI_MAX_FDM_NUMBER )
+ {
+ _SPI_NFI_MEMCPY(fdm_value, ptr_oob, _SPI_NFI_MAX_FDM_NUMBER);
+ }
+ else
+ {
+ _SPI_NFI_MEMCPY(fdm_value, ptr_oob, oob_len);
+ }
+
+ spi_nfi_set_fdm_into_register();
+
+ return (SPI_NFI_RTN_NO_ERROR);
+
+}
+
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NAND_Page(SPI_NFI_MISC_SPEDD_CONTROL_T speed_mode, u32 read_cmd, u16 read_addr, u32 *prt_rtn_data)
+{
+ u32 check_cnt;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ SPI_NFI_RTN_T rtn_status = SPI_NFI_RTN_NO_ERROR;
+
+
+ spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+
+ /* Set DMA destination address */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_STRADDR, prt_rtn_data);
+
+ /* Set Read length */
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Disable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((_SPI_NFI_DATA_SIZE_WITH_ECC + (spi_nfi_conf_info_t->spare_size_t)) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_SNF_MISC_CTL2_RD_SHIFT );
+ }
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Enable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((spi_nfi_conf_info_t->sec_size) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_SNF_MISC_CTL2_RD_SHIFT );
+ }
+
+ /* Set Read Command */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_RD_CTL2, (read_cmd & 0xFF));
+
+ /* Set Read mode */
+ _SPI_NFI_REG32_WRITE(_SPI_NFI_REGS_SNF_MISC_CTL, (speed_mode << _SPI_NFI_REGS_SNF_MISC_CTL_DATA_RW_MODE_SHIFT));
+
+ /* Set Read Address (Note : Controller will use following register, depend on the Hardware TRAP of SPI NAND/SPI NOR )*/
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_RD_CTL3, read_addr); /* Set Address into SPI NAND address register*/
+
+ /* Set NFI Read */
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CONF_OP_MASK, \
+ (_SPI_NFI_REGS_CONF_OP_READ << _SPI_NFI_REGS_CONF_OP_SHIFT ));
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_READ_EN);
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_AHB);
+
+
+ _SPI_NFI_REG16_WRITE( _SPI_NFI_REGS_CMD, _SPI_NFI_REGS_CMD_READ_VALUE);
+
+
+ /* Trigger DMA read active*/
+ SPI_NFI_TRIGGER(SPI_NFI_CON_DMA_TRIGGER_READ);
+
+ /* Check read from cache done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_SNF_STA_CTL1)& (_SPI_NFI_REGS_READ_FROM_CACHE_DONE)) != 0 )
+ {
+ /* Clear this bit is neccessary for NFI state machine */
+ _SPI_NFI_REG32_SETBITS(_SPI_NFI_REGS_SNF_STA_CTL1, _SPI_NFI_REGS_READ_FROM_CACHE_DONE);
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Read DMA : Check READ FROM CACHE Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_READ_FROM_CACHE_DONE_TIMEOUT;
+ }
+
+
+ /* Check DMA done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_INTR)& (_SPI_NFI_REGS_INTR_AHB_DONE_CHECK)) != 0 )
+ {
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Read DMA : Check AHB Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_CHECK_AHB_DONE_TIMEOUT;
+ }
+
+ /* Does DMA read need delay for data ready from controller to DRAM */
+ udelay(1);
+
+ return (rtn_status);
+}
+
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NAND_page(SPI_NFI_MISC_SPEDD_CONTROL_T speed_mode, u32 write_cmd, u16 write_addr, u32 *prt_data)
+{
+
+ volatile u32 check_cnt;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ SPI_NFI_RTN_T rtn_status = SPI_NFI_RTN_NO_ERROR;
+
+ _SPI_NFI_DEBUG_PRINTF("SPI_NFI_Write_SPI_NAND_page : enter, speed_mode=%d, write_cmd=0x%x, write_addr=0x%x, prt_data=0x%x\n", speed_mode, write_cmd, write_addr, prt_data);
+
+
+ spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+
+ /* Set DMA destination address */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_STRADDR, prt_data);
+
+ _SPI_NFI_DEBUG_PRINTF("SPI_NFI_Write_SPI_NAND_page: _SPI_NFI_REGS_STRADDR=0x%x\n", _SPI_NFI_REG32_READ(_SPI_NFI_REGS_STRADDR));
+ _SPI_NFI_DEBUG_PRINTF("SPI_NFI_Write_SPI_NAND_page\n");
+
+ /* Set Write length */
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Disable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_WR_MASK, \
+ ((_SPI_NFI_DATA_SIZE_WITH_ECC + (spi_nfi_conf_info_t->spare_size_t)) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_SNF_MISC_CTL2_WR_SHIFT );
+ }
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Enable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_WR_MASK, \
+ ((spi_nfi_conf_info_t->sec_size) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_SNF_MISC_CTL2_WR_SHIFT );
+ }
+
+ /* Set Write Command */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_PG_CTL1, ((write_cmd & 0xFF) << _SPI_NFI_REGS_PG_CTL1_SHIFT));
+
+ /* Set Write mode */
+ _SPI_NFI_REG32_WRITE(_SPI_NFI_REGS_SNF_MISC_CTL, (speed_mode << _SPI_NFI_REGS_SNF_MISC_CTL_DATA_RW_MODE_SHIFT));
+
+ /* Set Write Address (Note : Controller will use following register, depend on the Hardware TRAP of SPI NAND/SPI NOR )*/
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_PG_CTL2, write_addr); /* Set Address into SPI NAND address register*/
+
+ /* Set NFI Write */
+ _SPI_NFI_REG16_CLRBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_READ_EN);
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CONF_OP_MASK, \
+ (_SPI_NFI_REGS_CONF_OP_PRGM << _SPI_NFI_REGS_CONF_OP_SHIFT ));
+
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_AHB);
+ _SPI_NFI_REG16_WRITE( _SPI_NFI_REGS_CMD, _SPI_NFI_REGS_CMD_WRITE_VALUE);
+
+
+ /* Trigger DMA write active*/
+ SPI_NFI_TRIGGER(SPI_NFI_CON_DMA_TRIGGER_WRITE);
+
+ /* Does DMA read need delay for data ready from controller to DRAM */
+ udelay(1);
+
+ /* Check DMA done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_INTR)& (_SPI_NFI_REGS_INTR_AHB_DONE_CHECK)) != 0 )
+ {
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Write DMA : Check AHB Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_CHECK_AHB_DONE_TIMEOUT;
+ }
+
+
+ /* Check load to cache done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_SNF_STA_CTL1)& (_SPI_NFI_REGS_LOAD_TO_CACHE_DONE)) != 0 )
+ {
+ /* Clear this bit is neccessary for NFI state machine */
+ _SPI_NFI_REG32_SETBITS(_SPI_NFI_REGS_SNF_STA_CTL1, _SPI_NFI_REGS_LOAD_TO_CACHE_DONE);
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Write DMA : Check LOAD TO CACHE Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_LOAD_TO_CACHE_DONE_TIMEOUT;
+ }
+
+
+ _SPI_NFI_DEBUG_PRINTF("SPI_NFI_Write_SPI_NAND_page : exit \n");
+
+ return (rtn_status);
+
+}
+
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NOR(u8 opcode, u16 read_addr, u32 *prt_rtn_data)
+{
+ u32 check_cnt;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ SPI_NFI_RTN_T rtn_status = SPI_NFI_RTN_NO_ERROR;
+
+
+ spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+
+ /* Set Read length */
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Disable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((_SPI_NFI_DATA_SIZE_WITH_ECC + (spi_nfi_conf_info_t->spare_size_t)) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_CON_SEC_SHIFT );
+ }
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Enable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((spi_nfi_conf_info_t->sec_size) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_CON_SEC_SHIFT );
+ }
+
+ /* Set Read Command */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_RD_CTL2, (u32) opcode);
+
+ /* Set Read Address (Note : Controller will use following register, depend on the Hardware TRAP of SPI NAND/SPI NOR )*/
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_NOR_RD_ADDR, read_addr); /* Set Address into SPI NOR address register*/
+
+ /* Reset NFI statemachile and flush fifo*/
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RESET_VALUE);
+
+ /* Set NFI Read */
+ _SPI_NFI_REG16_WRITE( _SPI_NFI_REGS_CMD, _SPI_NFI_REGS_CMD_READ_VALUE);
+
+ /* Set DMA destination address */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_STRADDR, prt_rtn_data);
+
+ /* Trigger DMA read active*/
+ _SPI_NFI_REG16_CLRBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RD_TRIG);
+ /* [Note : Is here need to have little time delay or not ? */
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RD_TRIG);
+
+ /* Check DMA done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_INTR)& (_SPI_NFI_REGS_INTR_AHB_DONE_CHECK)) != 0 )
+ {
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Read DMA : Check AHB Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_CHECK_AHB_DONE_TIMEOUT;
+ }
+
+ return (rtn_status);
+}
+
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NOR(u8 opcode, u16 write_addr, u32 *prt_data)
+{
+
+ u32 check_cnt;
+ SPI_NFI_CONF_T *spi_nfi_conf_info_t;
+ SPI_NFI_RTN_T rtn_status = SPI_NFI_RTN_NO_ERROR;
+
+ /* Set Write length */
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Disable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((_SPI_NFI_DATA_SIZE_WITH_ECC + (spi_nfi_conf_info_t->spare_size_t)) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_CON_SEC_SHIFT );
+ }
+ if( (spi_nfi_conf_info_t-> cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Enable )
+ {
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SNF_MISC_CTL2, _SPI_NFI_REGS_SNF_MISC_CTL2_RD_MASK, \
+ ((spi_nfi_conf_info_t->sec_size) * (spi_nfi_conf_info_t->sec_num))<< _SPI_NFI_REGS_CON_SEC_SHIFT );
+ }
+
+ /* Set Write Command */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_PG_CTL1, ((u32) opcode) << _SPI_NFI_REGS_PG_CTL1_SHIFT);
+
+ /* Set Write Address (Note : Controller will use following register, depend on the Hardware TRAP of SPI NAND/SPI NOR )*/
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_NOR_PROG_ADDR, write_addr); /* Set Address into SPI NOR address register*/
+
+ /* Reset NFI statemachile and flush fifo*/
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RESET_VALUE);
+
+ /* Set NFI Write */
+ _SPI_NFI_REG16_WRITE( _SPI_NFI_REGS_CMD, _SPI_NFI_REGS_CMD_WRITE_VALUE);
+
+ /* Set DMA destination address */
+ _SPI_NFI_REG32_WRITE( _SPI_NFI_REGS_STRADDR, prt_data);
+
+ /* Trigger DMA read active*/
+ _SPI_NFI_REG16_CLRBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_WR_TRIG);
+ /* [Note : Is here need to have little time delay or not ? */
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_WR_TRIG);
+
+ /* Check DMA done or not */
+ for( check_cnt = 0 ; check_cnt < _SPI_NFI_CHECK_DONE_MAX_TIMES ; check_cnt ++)
+ {
+ if( (_SPI_NFI_REG16_READ(_SPI_NFI_REGS_INTR)& (_SPI_NFI_REGS_INTR_AHB_DONE_CHECK)) != 0 )
+ {
+ break;
+ }
+ }
+ if(check_cnt == _SPI_NFI_CHECK_DONE_MAX_TIMES)
+ {
+ _SPI_NFI_PRINTF("[Error] Write DMA : Check AHB Done Timeout ! \n");
+ rtn_status = SPI_NFI_RTN_CHECK_AHB_DONE_TIMEOUT;
+ }
+
+
+ return (rtn_status);
+
+}
+
+SPI_NFI_RTN_T SPI_NFI_Get_Configure( SPI_NFI_CONF_T *ptr_rtn_nfi_conf_t )
+{
+ SPI_NFI_CONF_T *ptr_spi_nfi_conf_info_t;
+
+ ptr_spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+ _SPI_NFI_MEMCPY(ptr_rtn_nfi_conf_t, ptr_spi_nfi_conf_info_t, sizeof(SPI_NFI_CONF_T));
+
+ return (SPI_NFI_RTN_NO_ERROR);
+}
+
+
+SPI_NFI_RTN_T SPI_NFI_Set_Configure( SPI_NFI_CONF_T *ptr_nfi_conf_t )
+{
+ SPI_NFI_CONF_T *ptr_spi_nfi_conf_info_t;
+
+ /* Store new setting */
+ ptr_spi_nfi_conf_info_t = _SPI_NFI_GET_CONF_PTR;
+ _SPI_NFI_MEMCPY(ptr_spi_nfi_conf_info_t, ptr_nfi_conf_t, sizeof(SPI_NFI_CONF_T));
+
+
+ _SPI_NFI_DEBUG_PRINTF("SPI_NFI_Set_Configure: hw_ecc_t= 0x%x\n", ptr_nfi_conf_t->hw_ecc_t );
+
+ /* Set Auto FDM */
+ if( (ptr_nfi_conf_t->auto_fdm_t) == SPI_NFI_CON_AUTO_FDM_Disable )
+ {
+ _SPI_NFI_REG16_CLRBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_AUTO_FMT_EN);
+ }
+ if( (ptr_nfi_conf_t->auto_fdm_t) == SPI_NFI_CON_AUTO_FDM_Enable )
+ {
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_AUTO_FMT_EN);
+ }
+
+ /* Set Hardware ECC */
+ if( (ptr_nfi_conf_t->hw_ecc_t) == SPI_NFI_CON_HW_ECC_Disable )
+ {
+ _SPI_NFI_REG16_CLRBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_HW_ECC_EN);
+ }
+ if( (ptr_nfi_conf_t->hw_ecc_t) == SPI_NFI_CON_HW_ECC_Enable )
+ {
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_HW_ECC_EN);
+ }
+
+ /* Set DMA BURST */
+ if( (ptr_nfi_conf_t->dma_burst_t) == SPI_NFI_CON_DMA_BURST_Disable )
+ {
+ _SPI_NFI_REG16_CLRBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_DMA_BURST_EN);
+ }
+ if( (ptr_nfi_conf_t->dma_burst_t) == SPI_NFI_CON_DMA_BURST_Enable )
+ {
+ _SPI_NFI_REG16_SETBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_DMA_BURST_EN);
+ }
+
+ /* Set FDM Number */
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_FDM_MASK, \
+ (ptr_nfi_conf_t->fdm_num)<< _SPI_NFI_REGS_PAGEFMT_FDM_SHIFT );
+
+ /* Set FDM ECC Number */
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_FDM_ECC_MASK, \
+ (ptr_nfi_conf_t->fdm_ecc_num)<< _SPI_NFI_REGS_PAGEFMT_FDM_ECC_SHIFT );
+
+ /* Set SPARE Size */
+ if( (ptr_nfi_conf_t->spare_size_t) == SPI_NFI_CONF_SPARE_SIZE_16BYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_SPARE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_SPARE_16 << _SPI_NFI_REGS_PAGEFMT_SPARE_SHIFT );
+ }
+ if( (ptr_nfi_conf_t->spare_size_t) == SPI_NFI_CONF_SPARE_SIZE_26BYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_SPARE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_SPARE_26 << _SPI_NFI_REGS_PAGEFMT_SPARE_SHIFT );
+ }
+ if( (ptr_nfi_conf_t->spare_size_t) == SPI_NFI_CONF_SPARE_SIZE_27BYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_SPARE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_SPARE_27 << _SPI_NFI_REGS_PAGEFMT_SPARE_SHIFT );
+ }
+ if( (ptr_nfi_conf_t->spare_size_t) == SPI_NFI_CONF_SPARE_SIZE_28BYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_SPARE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_SPARE_28 << _SPI_NFI_REGS_PAGEFMT_SPARE_SHIFT );
+ }
+
+ /* Set PAGE Size */
+ if( (ptr_nfi_conf_t->page_size_t) == SPI_NFI_CONF_PAGE_SIZE_512BYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_PAGE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_PAGE_512 << _SPI_NFI_REGS_PAGEFMT_PAGE_SHIFT );
+ }
+ if( (ptr_nfi_conf_t->page_size_t) == SPI_NFI_CONF_PAGE_SIZE_2KBYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_PAGE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_PAGE_2K << _SPI_NFI_REGS_PAGEFMT_PAGE_SHIFT );
+ }
+ if( (ptr_nfi_conf_t->page_size_t) == SPI_NFI_CONF_PAGE_SIZE_4KBYTE )
+ {
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_PAGEFMT, _SPI_NFI_REGS_PAGEFMT_PAGE_MASK, \
+ _SPI_NFI_REGS_PAGEFMT_PAGE_4K << _SPI_NFI_REGS_PAGEFMT_PAGE_SHIFT );
+ }
+
+ /* Set sector number */
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_SEC_MASK, \
+ (ptr_nfi_conf_t->sec_num)<< _SPI_NFI_REGS_CON_SEC_SHIFT );
+
+ /* Enable Customer setting sector size or not */
+ if( (ptr_nfi_conf_t->cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Disable )
+ {
+ _SPI_NFI_REG32_CLRBITS(_SPI_NFI_REGS_SECCUS_SIZE, _SPI_NFI_REGS_SECCUS_SIZE_EN);
+ }
+ if( (ptr_nfi_conf_t->cus_sec_size_en_t) == SPI_NFI_CONF_CUS_SEC_SIZE_Enable )
+ {
+ _SPI_NFI_REG32_SETBITS(_SPI_NFI_REGS_SECCUS_SIZE, _SPI_NFI_REGS_SECCUS_SIZE_EN);
+ }
+
+ /* Set Customer sector size */
+ _SPI_NFI_REG32_SETMASKBITS(_SPI_NFI_REGS_SECCUS_SIZE, _SPI_NFI_REGS_SECCUS_SIZE_MASK, \
+ (ptr_nfi_conf_t->sec_size)<< _SPI_NFI_REGS_SECCUS_SIZE_SHIFT );
+}
+
+void SPI_NFI_Reset( void )
+{
+ /* Reset NFI statemachile and flush fifo*/
+ _SPI_NFI_REG16_WRITE( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RESET_VALUE);
+}
+
+SPI_NFI_RTN_T SPI_NFI_Init( void )
+{
+ /* Enable AHB Done Interrupt Function */
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_INTR_EN, _SPI_NFI_REGS_INTR_EN_AHB_DONE_EN);
+}
+
+void SPI_NFI_DEBUG_ENABLE( void )
+{
+ _SPI_NFI_DEBUG_FLAG = 1;
+}
+
+void SPI_NFI_DEBUG_DISABLE( void )
+{
+ _SPI_NFI_DEBUG_FLAG = 0;
+}
+
+/* Trigger DMA read active*/
+void SPI_NFI_TRIGGER(SPI_NFI_CONF_DMA_TRIGGER_T rw)
+{
+ if(rw == SPI_NFI_CON_DMA_TRIGGER_READ) {
+ _SPI_NFI_REG16_CLRBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RD_TRIG);
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_RD_TRIG);
+ } else {
+ _SPI_NFI_REG16_CLRBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_WR_TRIG);
+ _SPI_NFI_REG16_SETBITS( _SPI_NFI_REGS_CON, _SPI_NFI_REGS_CON_WR_TRIG);
+ }
+}
+
+/* Set DMA(flash -> SRAM) byte swap*/
+void SPI_NFI_DMA_RD_BYTE_SWAP(SPI_NFI_CONF_DMA_RD_BYTE_SWAP_T enable)
+{
+ _SPI_NFI_REG16_SETMASKBITS(_SPI_NFI_REGS_CNFG, _SPI_NFI_REGS_CNFG_DMA_RD_SWAP_MASK, enable << _SPI_NFI_REGS_CNFG_DMA_RD_SWAP_SHIFT);
+}
+
Index: linux-3.18.21/drivers/mtd/chips/spi_nfi.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spi_nfi.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,156 @@
+ /***************************************************************************************
+ * Copyright(c) 2016 ECONET Incorporation All rights reserved.
+ *
+ * This is unpublished proprietary source code of ECONET Incorporation
+ *
+ * The copyright notice above does not evidence any actual or intended
+ * publication of such source code.
+ ***************************************************************************************
+ */
+
+ /*======================================================================================
+ * MODULE NAME: spi
+ * FILE NAME: spi_nfi.h
+ * DATE: 2016/03/18
+ * VERSION: 1.00
+ * PURPOSE: To Provide SPI NFI(DMA) Access Internace.
+ * NOTES:
+ *
+ * AUTHOR : Chuck Kuo REVIEWED by
+ *
+ * FUNCTIONS
+ *
+ * DEPENDENCIES
+ *
+ * * $History: $
+ * MODIFICTION HISTORY:
+ * Version 1.00 - Date 2016/03/18 By Chuck Kuo
+ * ** This is the first versoin for creating to support the functions of
+ * current module.
+ *
+ *======================================================================================
+ */
+
+#ifndef __SPI_NFI_H__
+ #define __SPI_NFI_H__
+
+
+/* INCLUDE FILE DECLARATIONS --------------------------------------------------------- */
+#include <asm/types.h>
+
+/* TYPE DECLARATIONS ----------------------------------------------------------------- */
+typedef enum
+{
+ SPI_NFI_CON_AUTO_FDM_Disable=0,
+ SPI_NFI_CON_AUTO_FDM_Enable,
+} SPI_NFI_CONF_AUTO_FDM_T;
+
+typedef enum
+{
+ SPI_NFI_CON_HW_ECC_Disable=0,
+ SPI_NFI_CON_HW_ECC_Enable,
+} SPI_NFI_CONF_HW_ECC_T;
+
+typedef enum
+{
+ SPI_NFI_CON_DMA_TRIGGER_READ=0,
+ SPI_NFI_CON_DMA_TRIGGER_WRITE,
+} SPI_NFI_CONF_DMA_TRIGGER_T;
+
+typedef enum
+{
+ SPI_NFI_CON_DMA_BURST_Disable=0,
+ SPI_NFI_CON_DMA_BURST_Enable,
+} SPI_NFI_CONF_DMA_BURST_T;
+
+typedef enum
+{
+ SPI_NFI_CONF_SPARE_SIZE_16BYTE = 16,
+ SPI_NFI_CONF_SPARE_SIZE_26BYTE = 26,
+ SPI_NFI_CONF_SPARE_SIZE_27BYTE = 27,
+ SPI_NFI_CONF_SPARE_SIZE_28BYTE = 28,
+} SPI_NFI_CONF_SPARE_SIZE_T;
+
+typedef enum
+{
+ SPI_NFI_CONF_PAGE_SIZE_512BYTE = 512,
+ SPI_NFI_CONF_PAGE_SIZE_2KBYTE = 2048,
+ SPI_NFI_CONF_PAGE_SIZE_4KBYTE = 4096,
+} SPI_NFI_CONF_PAGE_SIZE_T;
+
+typedef enum
+{
+ SPI_NFI_CONF_CUS_SEC_SIZE_Disable=0,
+ SPI_NFI_CONF_CUS_SEC_SIZE_Enable,
+} SPI_NFI_CONF_CUS_SEC_SIZE_T;
+
+
+typedef enum
+{
+ SPI_NFI_MISC_CONTROL_X1 = 0x000,
+ SPI_NFI_MISC_CONTROL_X2 = 0x001,
+ SPI_NFI_MISC_CONTROL_X4 = 0x010,
+ SPI_NFI_MISC_CONTROL_DUAL_IO = 0x101,
+ SPI_NFI_MISC_CONTROL_QUAD_IO = 0x110,
+
+} SPI_NFI_MISC_SPEDD_CONTROL_T;
+
+typedef struct SPI_NFI_CONFIGURE
+{
+ SPI_NFI_CONF_AUTO_FDM_T auto_fdm_t; /* auto padding oob behind data, or not */
+ SPI_NFI_CONF_HW_ECC_T hw_ecc_t; /* enable hw ecc or not */
+ SPI_NFI_CONF_DMA_BURST_T dma_burst_t; /* dma burst */
+ u8 fdm_num; /* value range : 0 ~ 8 */
+ u8 fdm_ecc_num; /* value range : 0 ~ 8 */
+ /* fdm byte under ecc protection */
+ SPI_NFI_CONF_SPARE_SIZE_T spare_size_t; /* spare size of eache sector */
+ SPI_NFI_CONF_PAGE_SIZE_T page_size_t; /* page size (not incluing oob size) */
+ u8 sec_num; /* number of sector */
+ /* value range : 1 ~ 8 */
+ SPI_NFI_CONF_CUS_SEC_SIZE_T cus_sec_size_en_t; /* To apply user define sector size or not */
+ /* Disable : sector size = 512 bytes,
+ and ECC function will work */
+ /* Enable : user define sector size,
+ and ECC function will not work */
+ u32 sec_size; /* Only work if cus_sec_size_en is enable */
+ /* value range : 1 ~ 8187 */
+ SPI_NFI_MISC_SPEDD_CONTROL_T speed_t;
+
+} SPI_NFI_CONF_T;
+
+
+
+typedef enum{
+ SPI_NFI_RTN_NO_ERROR =0,
+ SPI_NFI_RTN_CHECK_AHB_DONE_TIMEOUT,
+ SPI_NFI_RTN_LOAD_TO_CACHE_DONE_TIMEOUT,
+ SPI_NFI_RTN_READ_FROM_CACHE_DONE_TIMEOUT,
+
+ SPI_NFI_RTN_DEF_NO
+} SPI_NFI_RTN_T;
+
+typedef enum{
+ SPI_NFI_CONF_DMA_RD_BYTE_SWAP_DISABLE =0,
+ SPI_NFI_CONF_DMA_RD_BYTE_SWAP_ENABLE
+} SPI_NFI_CONF_DMA_RD_BYTE_SWAP_T;
+
+/* EXPORTED SUBPROGRAM SPECIFICATION ------------------------------------------------- */
+
+SPI_NFI_RTN_T SPI_NFI_Regs_Dump( void );
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NAND_FDM(u8 *ptr_rtn_oob, u32 oob_len);
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NAND_FDM(u8 *ptr_oob, u32 oob_len);
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NAND_Page(SPI_NFI_MISC_SPEDD_CONTROL_T speed_mode, u32 read_cmd, u16 read_addr, u32 *prt_rtn_data);
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NAND_page(SPI_NFI_MISC_SPEDD_CONTROL_T speed_mode, u32 write_cmd, u16 write_addr, u32 *prt_data);
+SPI_NFI_RTN_T SPI_NFI_Read_SPI_NOR(u8 opcode, u16 read_addr, u32 *prt_rtn_data);
+SPI_NFI_RTN_T SPI_NFI_Write_SPI_NOR(u8 opcode, u16 write_addr, u32 *prt_data);
+SPI_NFI_RTN_T SPI_NFI_Get_Configure( SPI_NFI_CONF_T *ptr_rtn_nfi_conf_t );
+SPI_NFI_RTN_T SPI_NFI_Set_Configure( SPI_NFI_CONF_T *ptr_nfi_conf_t );
+void SPI_NFI_Reset( void );
+SPI_NFI_RTN_T SPI_NFI_Init( void );
+void SPI_NFI_DEBUG_ENABLE( void );
+void SPI_NFI_DEBUG_DISABLE( void );
+/* Set DMA(flash -> SRAM) byte swap*/
+void SPI_NFI_DMA_RD_BYTE_SWAP(SPI_NFI_CONF_DMA_RD_BYTE_SWAP_T enable);
+
+#endif /* ifndef __SPI_NFI_H__ */
+/* End of [spi_nfi.h] package */
Index: linux-3.18.21/drivers/mtd/chips/spiflash_tc3162.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spiflash_tc3162.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,1283 @@
+/*
+ * SPIFLASH support for TC3162
+ */
+
+/*
+ * MTD driver for the SPI Flash Memory support.
+ *
+ * Copyright (c) 2005-2006 Atheros Communications Inc.
+ * Copyright (C) 2006-2007 FON Technology, SL.
+ * Copyright (C) 2006-2007 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+#include <linux/proc_fs.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/gen_probe.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include "spiflash_tc3162.h"
+#include <linux/kthread.h>
+
+
+
+/* debugging */
+/* #define SPIFLASH_DEBUG */
+#define TC_SOC
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define FALSE 0
+#define TRUE 1
+
+#define ID_MASK 0xffff
+
+#define MANUFACTURER_ID(id) ((id >> 16) & ID_MASK)
+#define DEVICE_ID(id) (id & ID_MASK)
+
+#define SIZE_64KiB 0x10000
+#define SIZE_64MiB 0x4000000
+#define SIZE_32MiB 0x2000000
+#define SIZE_16MiB 0x1000000
+#define SIZE_8MiB 0x800000
+#define SIZE_4MiB 0x400000
+#define SIZE_2MiB 0x200000
+
+/* Manufacturers */
+#define MANUFACTURER_ST 0x0020
+#define MANUFACTURER_WINBOND 0x00ef
+#define MANUFACTURER_SST 0x00bf
+#define MANUFACTURER_MXIC 0x00c2
+#define MANUFACTURER_SPANSION 0x0001
+#define MANUFACTURER_EON 0x001c
+#define MANUFACTURER_NUMONYX 0x0020
+
+/* GD */
+#define MANUFACTURER_GIGADEVICE 0x00c8
+#define GD25Q64 0x4017
+#define GD25Q128 0x4018
+
+/* ST */
+#define M25P16 0x2015
+#define M25P32 0x2016
+#define M25P64 0x2017
+
+/* Winbond */
+#define W25X16 0x3015
+#define W25X32 0x3016
+#define W25X64 0x3017
+#define W25X128 0x3017
+
+#define W25Q16 0x4015
+#define W25Q32 0x4016
+#define W25Q64 0x4017
+#define W25Q128 0x4018
+
+/* SST */
+#define SST25VF032B 0x254a
+
+/* MXIC */
+#define MX25L3205D 0x2016
+#define MX25L6405D 0x2017
+#define MX25L12805D 0x2018
+#define MX25L25635E 0x2019
+
+/* SPANSION */
+#define S25FL016A 0x0214
+#define S25FL032A 0x0215
+#define S25FL064A 0x0216
+#define S25FL128P 0x2018
+#define N25Q064 0xba17
+
+/* EON */
+#define EN25Q64 0x3017
+
+#if defined(TC_SOC)
+static __u32 reg0x28;
+DECLARE_MUTEX(SPI_SEM);//Make sure all related SPI operations are atomic
+#define SPI_REG_BASE 0xbfbc0000
+#define SPI_REG_MASTER 0xbfbc0028
+#define SPI_REG_SPACE_CR 0xbfbc003c
+#define SPI_REG_MOREBUF 0xbfbc002c
+#define SPI_FLASH_DATA2 0x0C
+#define SPI_FLASH_DATA3 0x10
+#define SPI_FLASH_DATA4 0x14
+#define SPI_FLASH_DATA5 0x18
+#define SPI_FLASH_DATA6 0x1C
+#define SPI_FLASH_DATA7 0x20
+#define SPI_FLASH_DATA8 0x24
+#endif
+static __u32 spiflash_regread32(int reg);
+static void spiflash_regwrite32(int reg, __u32 data);
+static __u32 spiflash_sendcmd (int op);
+
+struct spi_flash_info {
+ const u16 mfr_id;
+ const u16 dev_id;
+ const u16 extra_id;
+ const char *name;
+ const int DeviceSize;
+ const int EraseSize;
+};
+
+#if defined(TCSUPPORT_VOIP)
+/*#11542: For voice afftected by Flash action issue*/
+typedef struct spiEraseParam_s{
+ struct mtd_info* tmpMtd;
+ struct erase_info* tmpInstr;
+}spiEraseParam_t;
+spiEraseParam_t spiEraseParam;
+static struct timer_list eraseCheck_Timer;
+DECLARE_WAIT_QUEUE_HEAD(spi_erase_wq);
+static struct task_struct *spi_erase_task;
+atomic_t spi_erase_flag = ATOMIC_INIT(0);
+static void spi_erase_alarm_expires(void)
+{
+ atomic_inc(&spi_erase_flag);
+ wake_up_interruptible(&spi_erase_wq);
+}
+#endif
+
+
+struct spi_chip_info {
+ struct spi_flash_info *flash;
+ void (*destroy)(struct spi_chip_info *chip_info);
+
+ u32 (*read)(struct map_info *map, u32 from, u32 to, u32 size);
+ u32 (*write)(struct map_info *map, u32 from, u32 to, u32 size);
+ u32 (*erase)(struct map_info *map, u32 addr);
+};
+
+/* Mapping of generic opcodes to STM serial flash opcodes */
+struct opcodes {
+ __u16 code;
+ __s8 tx_cnt;
+ __s8 rx_cnt;
+} stm_opcodes[] = {
+ {STM_OP_WR_ENABLE, 1, 0},
+ {STM_OP_WR_DISABLE, 1, 0},
+ {STM_OP_RD_STATUS, 1, 1},
+ {STM_OP_WR_STATUS, 2, 0},
+ {STM_OP_RD_DATA, 4, 4},
+ {STM_OP_FAST_RD_DATA, 1, 0},
+ {STM_OP_PAGE_PGRM, 8, 0},
+ {STM_OP_SECTOR_ERASE, 4, 0},
+ {STM_OP_BULK_ERASE, 1, 0},
+ {STM_OP_DEEP_PWRDOWN, 1, 0},
+ {STM_OP_RD_SIG, 4, 1},
+ {STM_OP_RD_ID, 1, 3}
+};
+
+static wait_queue_head_t spiflash_wq;
+static spinlock_t spiflash_mutex;
+static int spiflash_state;
+
+static __u8 byte_program_mode = 0;
+static __u8 four_byte_mode = 0;
+
+static __u32
+spiflash_regread32(int reg)
+{
+ //volatile __u32 *addr = (__u32 *)(CR_SPI_BASE + reg);
+ //return (*addr);
+ return regRead32((__u32 *)(CR_SPI_BASE + reg));
+}
+
+static void
+spiflash_regwrite32(int reg, __u32 data)
+{
+ //volatile __u32 *addr = (__u32 *)(CR_SPI_BASE + reg);
+ regWrite32((__u32 *)(CR_SPI_BASE + reg),data);
+ //*addr = data;
+ return;
+}
+
+static __u32
+spiflash_sendcmd (int op)
+{
+ __u32 reg;
+ __u32 mask;
+ struct opcodes *ptr_opcode;
+
+ ptr_opcode = &stm_opcodes[op];
+
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+
+ spiflash_regwrite32(SPI_FLASH_OPCODE, ptr_opcode->code);
+
+ reg = (reg & ~SPI_CTL_TX_RX_CNT_MASK) | ptr_opcode->tx_cnt |
+ (ptr_opcode->rx_cnt << 4) | SPI_CTL_START;
+
+ spiflash_regwrite32(SPI_FLASH_CTL, reg);
+
+ if (ptr_opcode->rx_cnt > 0) {
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+
+ reg = (__u32) spiflash_regread32(SPI_FLASH_DATA);
+
+ switch (ptr_opcode->rx_cnt) {
+ case 1:
+ mask = 0x000000ff;
+ break;
+ case 2:
+ mask = 0x0000ffff;
+ break;
+ case 3:
+ mask = 0x00ffffff;
+ break;
+ default:
+ mask = 0xffffffff;
+ break;
+ }
+
+ reg &= mask;
+ } else {
+ reg = 0;
+ }
+
+ return reg;
+}
+
+/* Probe SPI flash device
+ * Function returns 0 for failure.
+ * and flashconfig_tbl array index for success.
+ */
+static u32
+spiflash_read_id (void)
+{
+ u32 flash_id;
+
+ flash_id = spiflash_sendcmd(SPI_RD_ID);
+ flash_id = ((flash_id & 0xff) << 16) | (flash_id & 0xff00) | ((flash_id >> 16) & 0xff);
+ return flash_id;
+}
+
+static u32
+spiflash_erase (struct map_info *map, u32 addr)
+{
+ struct opcodes *ptr_opcode;
+ __u32 temp, reg;
+ int finished = FALSE;
+
+ ptr_opcode = &stm_opcodes[SPI_SECTOR_ERASE];
+
+ temp = ((__u32)addr << 8) | (__u32)(ptr_opcode->code);
+ spiflash_sendcmd(SPI_WRITE_ENABLE);
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+
+ spiflash_regwrite32(SPI_FLASH_OPCODE, temp);
+
+ if(four_byte_mode == 1){
+ reg = ((__u32)addr & 0xff000000) |(reg & 0x00ffff00) | (ptr_opcode->tx_cnt+1) | SPI_CTL_START;
+ }else{
+ reg = (reg & ~SPI_CTL_TX_RX_CNT_MASK) | ptr_opcode->tx_cnt | SPI_CTL_START;
+ }
+ spiflash_regwrite32(SPI_FLASH_CTL, reg);
+
+ do {
+ reg = spiflash_sendcmd(SPI_RD_STATUS);
+ if (!(reg & SPI_STATUS_WIP)) {
+ finished = TRUE;
+ }
+ } while (!finished);
+
+ return (0);
+}
+
+#if defined(TCSUPPORT_VOIP)
+static int spiflash_wait_erase_ready(unsigned long data)
+{
+ int ret;
+ unsigned long adr, len;
+ while(!kthread_should_stop())
+ {
+ wait_event_interruptible(spi_erase_wq, (atomic_read(&spi_erase_flag) != 0)); /* replace !=0 with == 1 */
+ atomic_dec(&spi_erase_flag);
+ struct mtd_info* mtd = spiEraseParam.tmpMtd;
+ struct erase_info* instr = spiEraseParam.tmpInstr;
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ adr = instr->addr;
+ len = instr->len;
+
+
+ while (len) {
+ ret = chip_info->erase(map, adr);
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ }
+ up(&SPI_SEM);
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback)
+ instr->callback(instr);
+ }
+}
+#endif
+
+/* wait until the flash chip is ready and grab a lock */
+static int spiflash_wait_ready(int state)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+retry:
+ spin_lock_bh(&spiflash_mutex);
+ if (spiflash_state != FL_READY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&spiflash_wq, &wait);
+ spin_unlock_bh(&spiflash_mutex);
+ schedule();
+ remove_wait_queue(&spiflash_wq, &wait);
+
+ if(signal_pending(current))
+ return 0;
+
+ goto retry;
+ }
+ spiflash_state = state;
+
+ return 1;
+}
+
+static inline void spiflash_done(void)
+{
+ spiflash_state = FL_READY;
+ spin_unlock_bh(&spiflash_mutex);
+ wake_up(&spiflash_wq);
+}
+
+static u32
+spiflash_read (struct map_info *map, u32 from, u32 to, u32 size)
+{
+#ifdef TCSUPPORT_MT7510_E1
+ int i;
+ for(i=0;i<size;i++){
+ *(char *)(to+i) = *(char *)(map->virt + from + i);
+ wmb();
+ if(*(char *)(to+i) == 0){
+ *(char *)(to+i) = *(char *)(map->virt + from + i);
+ wmb();
+ }
+ }
+#else
+ memcpy((char *)to, (char *)(map->virt + from), size);
+#endif
+ return (0);
+}
+
+static u32
+spiflash_write (struct map_info *map, u32 from, u32 to, u32 len)
+{
+ int done = FALSE, page_offset, bytes_left, finished;
+#if defined(TC_SOC)
+ __u32 xact_len, spi_data[8], opcode, reg;
+ __u32 reg_value;
+ unsigned char words, bytes, finalrun, i, j;
+#else
+ __u32 xact_len, spi_data = 0, opcode, reg;
+#endif
+ unsigned char *buf;
+
+ buf = (unsigned char *) from;
+
+ opcode = stm_opcodes[SPI_PAGE_PROGRAM].code;
+ bytes_left = len;
+
+ while (done == FALSE) {
+ if (byte_program_mode)
+ xact_len = MIN(bytes_left, sizeof(__u8));
+ else
+#if defined(TC_SOC)
+ xact_len = MIN(bytes_left, sizeof(__u32)<<3);
+#else
+ xact_len = MIN(bytes_left, sizeof(__u32));
+#endif
+
+ /* 32-bit writes cannot span across a page boundary
+ * (256 bytes). This types of writes require two page
+ * program operations to handle it correctly. The STM part
+ * will write the overflow data to the beginning of the
+ * current page as opposed to the subsequent page.
+ */
+ page_offset = (to & (STM_PAGE_SIZE - 1)) + xact_len;
+
+ if (page_offset > STM_PAGE_SIZE) {
+ xact_len -= (page_offset - STM_PAGE_SIZE);
+ }
+
+ spiflash_sendcmd(SPI_WRITE_ENABLE);
+
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+
+#if defined(TC_SOC)
+ words = xact_len >> 2;
+// if(!(xact_len % 4))
+ if(!(xact_len & 0x00000003))
+ words--;
+ bytes = 3;
+// finalrun = xact_len % 4 - 1;
+ finalrun = (xact_len & 0x00000003) - 1;
+ if(finalrun == 0xFF)
+ finalrun = 3;
+ for(i = 0; i <= words; i++){
+ spi_data[i] = 0;//Make sure the initial value of spi_data[i] is 0
+ if(i == words)
+ bytes = finalrun;
+ for(j = 0; j <= bytes; j++){
+ spi_data[i] |= (buf[j + (i<<2)] << (j<<3));
+ }
+ }
+#else
+ switch (xact_len) {
+ case 1:
+ spi_data = (__u8)*buf;
+ break;
+ case 2:
+ spi_data = (buf[1] << 8) | buf[0];
+ break;
+ case 3:
+ spi_data = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ break;
+ case 4:
+ spi_data = (buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
+ break;
+ default:
+ printk("spiflash_write: default case\n");
+ break;
+ }
+#endif
+
+#if defined(TC_SOC)
+ if (!byte_program_mode){
+ /*20101119 pork modified for Slic lower SPI speed request*/
+ reg_value = regRead32(SPI_REG_MASTER);
+// VPint(SPI_REG_MASTER) = 0x38804;//Set bit [2] to 1 to enter more buffer mode
+ //VPint(SPI_REG_MASTER) = reg_value | (1 << 2);
+ regWrite32(SPI_REG_MASTER,reg_value|(1<<2));
+ // VPint(SPI_REG_MOREBUF) = 0x20000100;//Set bits [8:0] to 128 (data bit counts) and bits[29:24] to 32(comman bit counts)
+ /* write exact data size into flash */
+ if(four_byte_mode == 1){
+ //VPint(SPI_REG_MOREBUF) = 0x28000000|(xact_len<<3);//Set bits [8:0] to data bit counts and bits[29:24] to 40(command bit counts)
+ regWrite32(SPI_REG_MOREBUF,0x28000000|(xact_len<<3));
+ }else{
+ regWrite32(SPI_REG_MOREBUF,0x20000000|(xact_len<<3));
+ //VPint(SPI_REG_MOREBUF) = 0x20000000|(xact_len<<3);//Set bits [8:0] to data bit counts and bits[29:24] to 32(command bit counts)
+ }
+ }
+#endif
+
+#if defined(TC_SOC)
+ spiflash_regwrite32(SPI_FLASH_DATA, spi_data[0]);
+ if (!byte_program_mode){
+ spiflash_regwrite32(SPI_FLASH_DATA2, spi_data[1]);
+ spiflash_regwrite32(SPI_FLASH_DATA3, spi_data[2]);
+ spiflash_regwrite32(SPI_FLASH_DATA4, spi_data[3]);
+ spiflash_regwrite32(SPI_FLASH_DATA5, spi_data[4]);
+ spiflash_regwrite32(SPI_FLASH_DATA6, spi_data[5]);
+ spiflash_regwrite32(SPI_FLASH_DATA7, spi_data[6]);
+ spiflash_regwrite32(SPI_FLASH_DATA8, spi_data[7]);
+ if(four_byte_mode == 1){
+ opcode = ((__u32)to);
+ }else{
+ opcode = (0x02 << 24) | ((__u32)to);
+ }
+ }
+ else
+#else
+ spiflash_regwrite32(SPI_FLASH_DATA, spi_data);
+#endif
+ opcode = (opcode & SPI_OPCODE_MASK) | ((__u32)to << 8);
+
+ spiflash_regwrite32(SPI_FLASH_OPCODE, opcode);
+
+ if(four_byte_mode == 1){
+#if defined(TC_SOC)
+ reg = ((0x02 << 24) | (reg & 0x00ffff00)) | (xact_len + 5) | SPI_CTL_START;
+#else
+ reg = ((__u32)to & 0xff000000 ) | (reg & 0x00ffff00) | (xact_len + 5) | SPI_CTL_START;
+#endif
+ }else{
+ reg = (reg & ~SPI_CTL_TX_RX_CNT_MASK) | (xact_len + 4) | SPI_CTL_START;
+ }
+ spiflash_regwrite32(SPI_FLASH_CTL, reg);
+ finished = FALSE;
+
+#if defined(TC_SOC)
+ if (!byte_program_mode){
+ /*20101119 pork modified for Slic lower SPI speed request*/
+ //while(VPint(SPI_REG_BASE) & 0x10000);//Make sure the bit spi_master_busy is 0 and then continue
+ while(regRead32(SPI_REG_BASE) & 0x10000);//Make sure the bit spi_master_busy is 0 and then continue
+ //VPint(SPI_REG_MOREBUF) = 0x00000000;//Set the default value back
+ regWrite32(SPI_REG_MOREBUF,0x0);//Set the default value back
+// VPint(SPI_REG_MASTER) = 0x38800;//Set the default value back
+ //VPint(SPI_REG_MASTER) = reg_value;
+ regWrite32(SPI_REG_MASTER,reg_value);
+ }
+#endif
+
+ do {
+ udelay(1);
+ reg = spiflash_sendcmd(SPI_RD_STATUS);
+ if (!(reg & SPI_STATUS_WIP)) {
+ finished = TRUE;
+ }
+ } while (!finished);
+
+ bytes_left -= xact_len;
+ to += xact_len;
+ buf += xact_len;
+
+ if (bytes_left == 0) {
+ done = TRUE;
+ }
+ }
+
+ return (0);
+}
+
+static struct spi_flash_info flash_tables[] = {
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P64,
+ name: "ST M25P64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P32,
+ name: "ST M25P32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M25P16,
+ name: "ST M25P16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X128,
+ name: "Winbond W25X128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X64,
+ name: "Winbond W25X64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X32,
+ name: "Winbond W25X32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25X16,
+ name: "Winbond W25X16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q128,
+ name: "Winbond W25Q128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q64,
+ name: "Winbond W25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q32,
+ name: "Winbond W25Q32",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_WINBOND,
+ dev_id: W25Q16,
+ name: "Winbond W25Q16",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SST,
+ dev_id: SST25VF032B,
+ name: "SST 25VF032B",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L3205D,
+ name: "MX25L3205D",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L6405D,
+ name: "MX25L6405D",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L12805D,
+ name: "MX25L12805D",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_MXIC,
+ dev_id: MX25L25635E,
+ name: "MX25L25635DE",
+ DeviceSize: SIZE_32MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL064A,
+ name: "S25FL064A",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL032A,
+ name: "S25FL032A",
+ DeviceSize: SIZE_4MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL016A,
+ name: "S25FL016A",
+ DeviceSize: SIZE_2MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S25FL128P,
+ name: "S25FL128P",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_EON,
+ dev_id: EN25Q64,
+ name: "EN25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_NUMONYX,
+ dev_id: N25Q064,
+ name: "N25Q064",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q128,
+ name: "GD25Q128",
+ DeviceSize: SIZE_16MiB,
+ EraseSize: SIZE_64KiB,
+ //mode: SPI_FAST_RD_QUAD_IO,
+ },
+ {
+ mfr_id: MANUFACTURER_GIGADEVICE,
+ dev_id: GD25Q64,
+ name: "GD25Q64",
+ DeviceSize: SIZE_8MiB,
+ EraseSize: SIZE_64KiB,
+ },
+};
+
+static void spiflash_unlock (void)
+{
+ struct opcodes *ptr_opcode;
+ __u32 reg;
+ __u32 status_reg;
+
+ status_reg = spiflash_sendcmd(SPI_RD_STATUS);
+ ptr_opcode = &stm_opcodes[SPI_WR_STATUS];
+
+ spiflash_sendcmd(SPI_WRITE_ENABLE);
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+
+ status_reg &= ~0x1c;
+ spiflash_regwrite32(SPI_FLASH_DATA, status_reg);
+ spiflash_regwrite32(SPI_FLASH_OPCODE, ptr_opcode->code);
+
+ reg = (reg & ~SPI_CTL_TX_RX_CNT_MASK) | ptr_opcode->tx_cnt | SPI_CTL_START;
+ spiflash_regwrite32(SPI_FLASH_CTL, reg);
+}
+
+static struct spi_chip_info *spiflash_tc3162_setup(struct map_info *map)
+{
+ struct spi_chip_info *chip_info;
+
+ chip_info = kmalloc(sizeof(*chip_info), GFP_KERNEL);
+ if (!chip_info) {
+ printk(KERN_WARNING "Failed to allocate memory for chip_info\n");
+ return NULL;
+ }
+
+ memset(chip_info, 0, sizeof(struct spi_chip_info));
+
+ return chip_info;
+}
+
+static void spiflash_tc3162_destroy(struct spi_chip_info *chip_info)
+{
+ if (chip_info) {
+ kfree(chip_info);
+ }
+}
+
+struct spi_chip_info *spiflash_probe_tc3162(struct map_info *map)
+{
+ int i;
+ struct spi_chip_info *chip_info = NULL;
+ unsigned long flash_id;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+ spin_lock_init(&spiflash_mutex);
+ init_waitqueue_head(&spiflash_wq);
+ spiflash_state = FL_READY;
+
+ flash_id = spiflash_read_id();
+
+ for (i=0; i < ARRAY_SIZE(flash_tables); i++) {
+ if ((MANUFACTURER_ID(flash_id) == flash_tables[i].mfr_id) &&
+ (DEVICE_ID(flash_id) == flash_tables[i].dev_id)) {
+
+ if (MANUFACTURER_ID(flash_id) == MANUFACTURER_SST) {
+ spiflash_unlock();
+ byte_program_mode = 1;
+ }
+
+ if(flash_tables[i].DeviceSize >= SIZE_32MiB){
+ int tmpVal;
+ tmpVal = regRead32(SPI_REG_BASE);
+ tmpVal |=((1<<19)|(1<<20));
+ //VPint(SPI_REG_BASE) |= ((1<<19)|(1<<20));
+ regWrite32(SPI_REG_BASE,tmpVal);
+ four_byte_mode = 1;
+ }
+ chip_info = spiflash_tc3162_setup(map);
+ if (chip_info) {
+ chip_info->flash = &flash_tables[i];
+ chip_info->destroy = spiflash_tc3162_destroy;
+
+ chip_info->read = spiflash_read;
+ chip_info->write = spiflash_write;
+ chip_info->erase = spiflash_erase;
+ }
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return chip_info;
+ }
+ }
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return NULL;
+}
+
+#if defined(TCSUPPORT_VOIP)
+int mtd_spiflash_erase_sp(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ unsigned long adr, len;
+ int ret = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+
+ if (instr->addr & (mtd->erasesize - 1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize -1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+ if ((instr->len + instr->addr) > mtd->size){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ memset(&spiEraseParam,0,sizeof(spiEraseParam));
+ spiEraseParam.tmpMtd = mtd;
+ spiEraseParam.tmpInstr = instr;
+ spi_erase_alarm_expires();
+ return 0;
+}
+#endif
+
+int mtd_spiflash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ unsigned long adr, len;
+ int ret = 0;
+ __u32 reg = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+#if defined(TCSUPPORT_VOIP)
+ /*63368 Register space clk = 233/(5+2) Mhz*/
+ if(isRT63368){
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x5 << 16);
+ }else if (isRT65168 || isTC3182) {
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x1 << 16);
+ /*3182 65168 Register space clk = 102/(1+2) Mhz*/
+ }
+#endif
+
+
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+#endif
+ if (!chip_info->erase){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ if (instr->addr & (mtd->erasesize - 1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ if (instr->len & (mtd->erasesize -1)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ if ((instr->len + instr->addr) > mtd->size){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINVAL;
+ }
+
+ adr = instr->addr;
+ len = instr->len;
+
+ while (len) {
+ if (!spiflash_wait_ready(FL_ERASING)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINTR;
+ }
+ ret = chip_info->erase(map, adr);
+ spiflash_done();
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback)
+ instr->callback(instr);
+
+ return 0;
+}
+
+int mtd_spiflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ int ret = 0;
+ int reg = 0;
+ int finished = FALSE;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+ int breakCount =0;
+ do {
+ breakCount++;
+ udelay(1);
+ reg = spiflash_sendcmd(SPI_RD_STATUS);
+ if(!(reg & SPI_STATUS_WIP)){
+ finished = TRUE;
+ }
+
+ /*workaround solution when flash status is always in WIP*/
+ if(breakCount>1000){
+ break;
+ }
+
+ }while(!finished);
+
+
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+#endif
+ if (!chip_info->read){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ ret = chip_info->read(map, from, (u32)buf, len);
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ if(retlen)
+ (*retlen) = len;
+
+ finished = FALSE;
+ do {
+ udelay(1);
+ reg = spiflash_sendcmd(SPI_RD_STATUS);
+ if(!(reg & SPI_STATUS_WIP)){
+ finished = TRUE;
+ }
+ }while(!finished);
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return 0;
+}
+
+int mtd_spiflash_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+ int ret = 0;
+ __u32 reg = 0;
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ if(down_interruptible(&SPI_SEM))
+ return -ERESTARTSYS;
+
+
+#if defined(TCSUPPORT_VOIP)
+ /*63368 Register space clk = 233/(5+2) Mhz*/
+ if(isRT63368){
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x5 << 16);
+ }else if (isRT65168 || isTC3182) {
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x1 << 16);
+ /*3182 65168 Register space clk = 102/(1+2) Mhz*/
+ }
+#endif
+ *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM)) = reg0x28;
+ do {
+ reg = spiflash_regread32(SPI_FLASH_CTL);
+ } while (reg & SPI_CTL_BUSY);
+#endif
+ if (!chip_info->write){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ if (!spiflash_wait_ready(FL_WRITING)){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return -EINTR;
+ }
+ ret = chip_info->write(map, (u32)buf, to, len);
+ spiflash_done();
+ if (ret){
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return ret;
+ }
+
+ if (retlen)
+ (*retlen) = len;
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+ up(&SPI_SEM);
+#endif
+ return 0;
+}
+
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+int offset = 0;
+#endif
+
+static struct mtd_info *spiflash_probe(struct map_info *map)
+{
+ struct spi_chip_info *chip_info = NULL;
+ struct mtd_info *mtd;
+
+ chip_info = spiflash_probe_tc3162(map);
+ if (!chip_info)
+ return NULL;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ return NULL;
+ }
+
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+#if defined(TCSUPPORT_VOIP)
+ mtd->_erase = mtd_spiflash_erase_sp;
+#else
+ mtd->_erase = mtd_spiflash_erase;
+#endif
+
+
+ mtd->_write = mtd_spiflash_write;
+ mtd->_read = mtd_spiflash_read;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+
+ mtd->size = chip_info->flash->DeviceSize;
+ mtd->erasesize = chip_info->flash->EraseSize;
+
+ map->fldrv_priv = chip_info;
+
+ printk(KERN_INFO "%s: Found SPIFLASH %dMiB %s\n",
+ map->name, chip_info->flash->DeviceSize/(1024*1024), chip_info->flash->name);
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+#ifdef TCSUPPORT_DUAL_IMAGE_8M
+ offset = 0x3e0000;
+#else
+ offset = chip_info->flash->DeviceSize/2;
+#endif
+#endif
+ return mtd;
+}
+
+static void spiflash_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = (struct map_info *)mtd->priv;
+ struct spi_chip_info *chip_info = (struct spi_chip_info *)map->fldrv_priv;
+
+ if (chip_info->destroy) {
+ chip_info->destroy(chip_info);
+ }
+}
+
+static struct mtd_chip_driver spiflash_chipdrv = {
+ .probe = spiflash_probe,
+ .destroy = spiflash_destroy,
+ .name = "spiflash_probe",
+ .module = THIS_MODULE
+};
+
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+static int read_proc_support_flash(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int flash_id;
+ int index_flash;
+ char *buf_proc = NULL;
+ char buf_line[200];
+ char buf_name[100];
+ char *buf_replace = NULL;
+ int total_len=0;
+
+
+ buf_proc = kmalloc(4*1024, GFP_KERNEL);
+ if (!buf_proc)
+ {
+ printk(KERN_WARNING "Failed to allocate memory for /proc/tc3162/support_flash\n");
+ return 0;
+ }
+
+ memset(buf_proc,0,4*1024);
+
+ for(index_flash=0; index_flash < sizeof(flash_tables)/sizeof(struct spi_flash_info); index_flash++)
+ {
+ strcpy(buf_name,flash_tables[index_flash].name);//replace whitespace with '_'
+ while( (buf_replace=strstr(buf_name, " "))!=NULL)
+ *buf_replace='#';
+
+ if(flash_tables[index_flash].DeviceSize/0x100000 <4)
+ continue;
+
+ flash_id = (flash_tables[index_flash].mfr_id <<16) | ( flash_tables[index_flash].dev_id & 0xffff);
+
+ sprintf(buf_line,"%s %#x %d \n",buf_name , flash_id,
+ flash_tables[index_flash].DeviceSize/0x100000);
+
+ total_len += strlen(buf_line);
+ if(total_len>4*1024)
+ break;
+ strcat(buf_proc,buf_line);
+ }
+
+ len = sprintf(page, "%s", buf_proc);
+
+ len -= off;
+ *start = page + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ kfree(buf_proc);
+ return len;
+}
+#endif
+
+static int __init spiflash_probe_init(void)
+{
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+ create_proc_read_entry("tc3162/support_flash", 0, NULL, read_proc_support_flash, NULL);
+#endif
+
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+
+ if (isRT63365){
+ /*63368 Flash space clk = 233/(5+2) Mhz, 63365 Flash space clk = 166/(2+2) Mhz*/
+ if(isRT63368){
+ //VPint(SPI_REG_SPACE_CR) = (VPint(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x5);
+ regWrite32(SPI_REG_SPACE_CR,(regRead32(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x5));
+ }else{
+ //VPint(SPI_REG_SPACE_CR) = (VPint(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x4);
+ regWrite32(SPI_REG_SPACE_CR,(regRead32(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x4));
+ }
+ }else if (isRT65168 || isTC3182){
+ //VPint(SPI_REG_SPACE_CR) = (VPint(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x1);
+ regWrite32(SPI_REG_SPACE_CR, (regRead32(SPI_REG_SPACE_CR) & 0xfffffff0) | (0x1));
+ /*3182 65168 Flash space clk = 102/(1+2) Mhz*/
+ }
+ reg0x28 = *((__u32 *)(CR_SPI_BASE | SPI_FLASH_MM));
+
+ if (isRT63365){
+ /*63368 Register space clk = 233/(5+2) Mhz, 63365 Register space clk = 166/(4+2) Mhz*/
+ if (isRT63368) {
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x5 << 16);
+ }else{
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x4 << 16);
+ }
+ }else if (isRT65168 || isTC3182) {
+ reg0x28 &= (0xf000ffff);
+ reg0x28 |= (0x1 << 16);
+ /*3182 65168 Register space clk = 102/(1+2) Mhz*/
+ }
+ //VPint(SPI_REG_MASTER) = reg0x28;
+ regWrite32(SPI_REG_MASTER, reg0x28);
+#endif
+ register_mtd_chip_driver(&spiflash_chipdrv);
+
+#if defined(TCSUPPORT_VOIP)
+ spi_erase_task = kthread_run(spiflash_wait_erase_ready, NULL, "spi_erase_task");
+ if(IS_ERR(spi_erase_task))
+ {
+ printk("@%s>>%d--spi_erase_task init failed\n", __func__, __LINE__);
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+static void __exit spiflash_probe_exit(void)
+{
+#if defined(TCSUPPORT_SUPPORT_FLASH)
+ remove_proc_entry("tc3162/support_flash", NULL);
+#endif
+
+ unregister_mtd_chip_driver(&spiflash_chipdrv);
+}
+
+module_init(spiflash_probe_init);
+module_exit(spiflash_probe_exit);
+#if defined(TC_SOC) && defined(CONFIG_MIPS_TC3262)
+EXPORT_SYMBOL(SPI_SEM);
+#endif
+
Index: linux-3.18.21/drivers/mtd/chips/spiflash_tc3162.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/chips/spiflash_tc3162.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,86 @@
+/*
+ * SPIFLASH support for TC3162
+ */
+
+/*
+ * SPI Flash Memory support header file.
+ *
+ * Copyright (c) 2005, Atheros Communications Inc.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __SPI_TC3162_H
+#define __SPI_TC3162_H
+
+#define STM_PAGE_SIZE 256
+
+#define SPI_WRITE_ENABLE 0
+#define SPI_WRITE_DISABLE 1
+#define SPI_RD_STATUS 2
+#define SPI_WR_STATUS 3
+#define SPI_RD_DATA 4
+#define SPI_FAST_RD_DATA 5
+#define SPI_PAGE_PROGRAM 6
+#define SPI_SECTOR_ERASE 7
+#define SPI_BULK_ERASE 8
+#define SPI_DEEP_PWRDOWN 9
+#define SPI_RD_SIG 10
+#define SPI_RD_ID 11
+#define SPI_MAX_OPCODES 12
+
+#define SFI_WRITE_BUFFER_SIZE 4
+#define SFI_FLASH_ADDR_MASK 0x00ffffff
+
+/*
+ * ST Microelectronics Opcodes for Serial Flash
+ */
+
+#define STM_OP_WR_ENABLE 0x06 /* Write Enable */
+#define STM_OP_WR_DISABLE 0x04 /* Write Disable */
+#define STM_OP_RD_STATUS 0x05 /* Read Status */
+#define STM_OP_WR_STATUS 0x01 /* Write Status */
+#define STM_OP_RD_DATA 0x03 /* Read Data */
+#define STM_OP_FAST_RD_DATA 0x0b /* Fast Read Data */
+#define STM_OP_PAGE_PGRM 0x02 /* Page Program */
+#define STM_OP_SECTOR_ERASE 0xd8 /* Sector Erase */
+#define STM_OP_BULK_ERASE 0xc7 /* Bulk Erase */
+#define STM_OP_DEEP_PWRDOWN 0xb9 /* Deep Power-Down Mode */
+#define STM_OP_RD_SIG 0xab /* Read Electronic Signature */
+#define STM_OP_RD_ID 0x9f /* Read JEDEC ID */
+
+#define STM_STATUS_WIP 0x01 /* Write-In-Progress */
+#define STM_STATUS_WEL 0x02 /* Write Enable Latch */
+#define STM_STATUS_BP0 0x04 /* Block Protect 0 */
+#define STM_STATUS_BP1 0x08 /* Block Protect 1 */
+#define STM_STATUS_BP2 0x10 /* Block Protect 2 */
+#define STM_STATUS_SRWD 0x80 /* Status Register Write Disable */
+
+#define SPI_FLASH_CTL 0x00
+#define SPI_FLASH_OPCODE 0x04
+#define SPI_FLASH_DATA 0x08
+#define SPI_FLASH_MM 0x28
+
+#define SPI_CTL_START 0x00000100
+#define SPI_CTL_BUSY 0x00010000
+#define SPI_CTL_TXCNT_MASK 0x0000000f
+#define SPI_CTL_RXCNT_MASK 0x000000f0
+#define SPI_CTL_TX_RX_CNT_MASK 0x000000ff
+#define SPI_CTL_SIZE_MASK 0x00060000
+
+#define SPI_CTL_CLK_SEL_MASK 0x03000000
+#define SPI_OPCODE_MASK 0x000000ff
+
+#define SPI_STATUS_WIP STM_STATUS_WIP
+
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+extern int offset;
+#endif
+
+
+#endif
Index: linux-3.18.21/drivers/mtd/econet/Kconfig
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/Kconfig 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,11 @@
+menu "Ralink Non-volatile Device Drivers"
+ depends on MTD!=n
+
+config MTD_NAND_RALINK
+ tristate "NAND Flash Support"
+ depends on MIPS_RT63365
+ select MTD_PARTITIONS
+ help
+ support Ralink 3052 nand flash controller.
+
+endmenu
Index: linux-3.18.21/drivers/mtd/econet/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/Makefile 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MTD_NAND_RALINK) += rt_nand.o
+obj-$(TCSUPPORT_NAND_BMT) += bmt.o
+
+rt_nand-objs = ralink_nand.o nand_verify.o gdma.o
Index: linux-3.18.21/drivers/mtd/econet/bmt.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/bmt.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,1186 @@
+
+#include <linux/types.h>
+
+#include "bmt.h"
+
+typedef struct {
+ char signature[3];
+ u8 version;
+ u8 bad_count; // bad block count in pool
+ u8 mapped_count; // mapped block count in pool
+ u8 checksum;
+ u8 reseverd[13];
+} phys_bmt_header;
+
+typedef struct {
+ phys_bmt_header header;
+ bmt_entry table[MAX_BMT_SIZE];
+} phys_bmt_struct;
+
+typedef struct {
+ char signature[3];
+} bmt_oob_data;
+
+typedef struct {
+ char signature[4];
+ u32 checksum;
+ u8 version;
+ u8 badblock_count;
+ u8 reserved[2];
+}init_table_header;
+
+typedef struct {
+ init_table_header header;
+ u16 badblock_table[MAX_RAW_BAD_BLOCK_SIZE];
+}init_table_struct;
+
+static char BBT_SIGNATURE[] = "RAWB";
+#define BBT_SIGNATURE_SIZE (4)
+
+static char MAIN_SIGNATURE[] = "BMT";
+#define SIGNATURE_SIZE (3)
+
+#define MAX_DAT_SIZE 0x1000
+#define MAX_OOB_SIZE 0x100
+#define MAX_RAW_BBT_SIZE 65536
+
+static u8 need_write_bmt_to_nand = 0;
+static u8 need_write_bbt_to_nand = 0;
+
+#define __UBOOT_NAND__
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)||defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+#if 0
+#define MSG(args...) printk(args)
+#else
+#define MSG(args...) do{}while(0)
+#endif
+
+
+
+static struct mtd_info *mtd_bmt;
+static struct nand_chip *nand_chip_bmt;
+#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
+#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
+#else
+static struct ra_nand_chip *nand_chip_bmt = NULL;
+
+#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->flash->erase_shift)
+#define PAGE_SIZE_BMT (1 << nand_chip_bmt->flash->page_shift)
+#endif
+
+#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
+#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
+
+/*********************************************************************
+* Flash is splited into 2 parts, system part is for normal system *
+* system usage, size is system_block_count, another is replace pool *
+* +-------------------------------------------------+ *
+* | system_block_count | bmt_block_count | *
+* +-------------------------------------------------+ *
+*********************************************************************/
+static u32 total_block_count; // block number in flash
+static u32 system_block_count; // system block number
+static int bmt_block_count; // bmt block number
+
+int nand_logic_size; // logic size
+int nand_flash_avalable_size;
+static int page_per_block; // page per count
+static int oob_bad_index_offset = OOB_INDEX_OFFSET; // bad index offset in oob
+
+static u32 bmt_block_index = 0; // bmt block index
+static bmt_struct bmt; // dynamic created global bmt table
+static phys_bmt_struct phys_bmt_table; // global physical bmt table
+
+static u32 bbt_block_index = 0; // bbt block index
+static init_bbt_struct init_bbt; // dynamic created global bbt table
+static u16 pBbt[MAX_RAW_BBT_SIZE]; // raw badblock table
+
+static u8 dat_buf[MAX_DAT_SIZE];
+static u8 oob_buf[MAX_OOB_SIZE];
+static bool pool_erased;
+
+/***************************************************************
+*
+* Interface adaptor for preloader/uboot/kernel
+* These interfaces operate on physical address, read/write
+* physical data.
+*
+***************************************************************/
+
+int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ return en7512_nand_exec_read_page(page, dat, oob);
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+ return mt6573_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
+#else
+ return mt6573_nand_exec_read_page(nand_chip_bmt, page, PAGE_SIZE_BMT, dat, oob);
+#endif
+#endif
+}
+
+int nand_block_bad_bmt(u32 offset, u32 bmt_block)
+{
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ return en7512_nand_check_block_bad(offset, bmt_block);
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+ return mt6573_nand_block_bad_hw(mtd_bmt, offset, bmt_block);
+#else
+ return mt6573_nand_block_bad_hw(nand_chip_bmt, offset, bmt_block);
+#endif
+#endif
+}
+
+int nand_erase_bmt(u32 offset)
+{
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ return en7512_nand_erase(offset);
+#else
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7520)
+ int status;
+ status = mt6573_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ else
+ return 0;
+#else
+ return mt6573_nand_erase_hw(nand_chip_bmt, offset / PAGE_SIZE_BMT);
+#endif
+#endif
+
+}
+
+int mark_block_bad_bmt(u32 offset, u32 bmt_block)
+{
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ return en7512_nand_mark_badblock(offset, bmt_block);
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+ return mt6573_nand_block_markbad_hw(mtd_bmt, offset, bmt_block);
+#else
+ return mt6573_nand_block_markbad_hw(nand_chip_bmt, offset, bmt_block);
+#endif
+#endif
+}
+
+int nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ return en7512_nand_exec_write_page(page, dat, oob);
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+ return mt6573_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
+#else
+ return mt6573_nand_exec_write_page(nand_chip_bmt, page, PAGE_SIZE_BMT, dat, oob);
+#endif
+#endif
+}
+
+/***************************************************************
+* *
+* static internal function *
+* *
+***************************************************************/
+static void dump_bmt_info(bmt_struct *bmt)
+{
+ int i;
+
+ MSG("BMT v%d.", bmt->version);
+ MSG("total %d mapping\n", bmt->mapped_count);
+ for (i = 0; i < bmt->mapped_count; i++)
+ {
+ MSG("%d -> %d \n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
+ }
+}
+
+static bool match_bmt_signature(u8 *dat, u8 *oob)
+{
+ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+static bool match_bbt_signature(u8 *dat, u8 *oob)
+{
+ if (memcmp(dat + BBT_SIGNATURE_OFFSET, BBT_SIGNATURE, BBT_SIGNATURE_SIZE))
+ {
+ return false;
+ }
+
+ return true;
+
+}
+
+static u8 cal_bmt_checksum(phys_bmt_struct *phys_table, int bmt_size)
+{
+ int i;
+ u8 checksum = 0;
+ u8 *dat = (u8 *)phys_table;
+
+ checksum += phys_table->header.version;
+ checksum += phys_table->header.mapped_count;
+
+ dat += sizeof(phys_bmt_header);
+ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
+ {
+ checksum += dat[i];
+ }
+
+ return checksum;
+}
+
+static u16 cal_bbt_checksum(init_table_struct *bbt_table)
+{
+ int i;
+ u16 checksum = 0;
+ u8 *dat = (u8*)bbt_table;
+
+ checksum += bbt_table->header.version;
+ checksum += bbt_table->header.badblock_count;
+
+ dat += sizeof(init_table_header);
+
+ for (i = 0; i < sizeof(bbt_table->badblock_table); i++)
+ {
+ checksum += dat[i];
+
+ }
+
+ return checksum;
+
+}
+
+static int is_block_mapped(int index)
+{
+ int i;
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (index == bmt.table[i].mapped_index)
+ return i;
+ }
+ return -1;
+}
+
+static int is_badblock_raw(u16 index)
+{
+ int i;
+
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (index == bmt.table[i].bad_index)
+ return 0;
+ }
+ return 1;
+
+}
+
+static bool valid_bmt_data(phys_bmt_struct *phys_table)
+{
+ int i;
+ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
+
+ // checksum correct?
+ if ( phys_table->header.checksum != checksum)
+ {
+ MSG("bmt checksum error \n");
+ return false;
+ }
+
+ // block index correct?
+ for (i = 0; i < phys_table->header.mapped_count; i++)
+ {
+ if (phys_table->table[i].bad_index >= total_block_count ||
+ phys_table->table[i].mapped_index >= total_block_count ||
+ phys_table->table[i].mapped_index < system_block_count)
+ {
+ MSG("bmt block index error \n");
+ return false;
+ }
+ }
+
+ // pass check, valid bmt.
+ MSG("Valid BMT, version v%d\n", phys_table->header.version);
+ return true;
+}
+
+static bool valid_bbt_data(init_table_struct *bbt_table)
+{
+ int i;
+ u16 checksum = cal_bbt_checksum(bbt_table);
+
+ if (bbt_table->header.checksum != checksum)
+ {
+ MSG("BBT Data checksum error: 0x%x 0x%x\n", bbt_table->header.checksum, checksum);
+ return false;
+ }
+
+ MSG("BBT Checksum is: 0x%x\n", bbt_table->header.checksum);
+
+ for (i = 0; i < bbt_table->header.badblock_count; i++)
+ {
+ if (bbt_table->badblock_table[i] >= system_block_count)
+ {
+ MSG("error: badblock_table[%d]: %d \n", i, bbt_table->badblock_table[i]);
+ return false;
+ }
+ }
+
+ MSG("Valid BBT, version v%d\n", bbt_table->header.version);
+ return true;
+
+}
+
+
+static void fill_nand_bmt_buffer(bmt_struct *bmt, u8 *dat, u8 *oob)
+{
+ phys_bmt_struct *phys_bmt = &phys_bmt_table;
+
+ dump_bmt_info(bmt);
+
+ // fill phys_bmt_struct structure with bmt_struct
+ memset(phys_bmt, 0xFF, sizeof(phys_bmt_struct));
+
+ memcpy(phys_bmt->header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
+ phys_bmt->header.version = BMT_VERSION;
+ phys_bmt->header.mapped_count = bmt->mapped_count;
+ memcpy(phys_bmt->table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
+
+ phys_bmt->header.checksum = cal_bmt_checksum(phys_bmt, bmt_block_count);
+
+ memcpy(dat + MAIN_SIGNATURE_OFFSET, phys_bmt, sizeof(phys_bmt_struct));
+
+ return;
+}
+
+static void fill_nand_bbt_buffer(init_bbt_struct *bbt, u8 *dat, u8 *oob)
+{
+ init_table_struct init_table;
+
+ memset(&init_table, 0xFF, sizeof(init_table));
+
+ memcpy(init_table.header.signature, BBT_SIGNATURE, BBT_SIGNATURE_SIZE);
+
+ init_table.header.version = BBT_VERSION;
+ init_table.header.badblock_count = bbt->badblock_count;
+
+ memcpy(init_table.badblock_table, bbt->badblock_table, sizeof(bbt->badblock_table));
+
+ init_table.header.checksum = cal_bbt_checksum(&init_table);
+
+ memcpy(dat + BBT_SIGNATURE_OFFSET, &init_table, sizeof(init_table));
+
+ return;
+
+}
+
+// return valid index if found BMT, else return 0
+static int load_bmt_data(int start, int pool_size)
+{
+ int bmt_index = start + pool_size - 1; // find from the end
+ phys_bmt_struct *phys_table = &phys_bmt_table;
+ int i;
+
+ MSG("begin to search BMT from block %d \n", bmt_index);
+
+ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
+ {
+ if (nand_block_bad_bmt(OFFSET(bmt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bmt_index), BMT_BADBLOCK_GENERATE_LATER))
+ {
+ MSG("Skip bad block: %d \n", bmt_index);
+ continue;
+ }
+
+ if (nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
+ {
+ MSG("Error found when read block: %d\n", bmt_index);
+ continue;
+ }
+
+ if (!match_bmt_signature(dat_buf, oob_buf))
+ {
+ continue;
+ }
+
+ MSG("Match bmt signature @ block: %d\n", bmt_index);
+
+ memcpy(phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_bmt_struct));
+
+ if (!valid_bmt_data(phys_table))
+ {
+ MSG("BMT data is not correct: %d\n", bmt_index);
+ continue;
+ }
+ else
+ {
+ bmt.mapped_count = phys_table->header.mapped_count;
+ bmt.version = phys_table->header.version;
+ memcpy(bmt.table, phys_table->table, bmt.mapped_count * sizeof(bmt_entry));
+
+ MSG("bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
+
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW))
+ {
+ MSG("block %d is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
+ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW);
+ }
+ }
+
+ return bmt_index;
+ }
+ }
+
+ MSG("bmt not found!\n");
+ return 0;
+}
+
+static int load_bbt_data(int start, int pool_size, init_bbt_struct *init_bbt)
+{
+ int i;
+ int ret = 0;
+
+ int bbt_index = start;
+ init_table_struct init_table;
+
+ for(;bbt_index < (start + pool_size); bbt_index++)
+ {
+
+ if (nand_block_bad_bmt(OFFSET(bbt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bbt_index), BMT_BADBLOCK_GENERATE_LATER))
+ {
+ MSG("Skip bad block: %d\n", bbt_index);
+ continue;
+ }
+
+ if (nand_read_page_bmt(PAGE_ADDR(bbt_index), dat_buf, oob_buf))
+ {
+ MSG("Error found when read block %d\n", bbt_index);
+ continue;
+ }
+
+ if (!match_bbt_signature(dat_buf, oob_buf))
+ {
+ continue;
+ }
+
+ MSG("Match bbt signature \n");
+
+ memcpy(&init_table, dat_buf + BBT_SIGNATURE_OFFSET, sizeof(init_table));
+
+ if (!valid_bbt_data(&init_table))
+ {
+ MSG("BBT data is not correct \n");
+ continue;
+ }
+ else
+ {
+ init_bbt->badblock_count = init_table.header.badblock_count;
+ init_bbt->version = init_table.header.version;
+ memcpy(init_bbt->badblock_table, init_table.badblock_table, (init_bbt->badblock_count) * 2);
+
+ MSG("bbt found, bad block count: %d\n", init_bbt->badblock_count);
+
+ for (i = 0; i < init_bbt->badblock_count; i++)
+ {
+ MSG("init_bbt->badblock_table[%d]: %d \n", i, init_bbt->badblock_table[i]);
+ }
+
+ return bbt_index;
+ }
+
+ }
+
+ return ret;
+
+}
+
+
+/*************************************************************************
+* Find an available block and erase. *
+* start_from_end: if true, find available block from end of flash. *
+* else, find from the beginning of the pool *
+* need_erase: if true, all unmapped blocks in the pool will be erased *
+*************************************************************************/
+static int find_available_block(bool start_from_end)
+{
+ int i;
+ int block = system_block_count;
+ int direction;
+ MSG("Try to find_available_block, pool_erase: %d\n", pool_erased);
+
+ // erase all un-mapped blocks in pool when finding avaliable block
+ if (!pool_erased)
+ {
+ for (i = 0; i < bmt_block_count; i++)
+ {
+ if ((block + i) == bmt_block_index)
+ {
+ MSG("Skip bmt block %d \n", block + i);
+ continue;
+ }
+
+ if ((block + i) == bbt_block_index)
+ {
+ MSG("Skip bbt block %d \n", block + i);
+ continue;
+ }
+
+ if (nand_block_bad_bmt(OFFSET(block + i), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(block + i), BMT_BADBLOCK_GENERATE_LATER))
+ {
+ MSG("Skip bad block %d \n", block + i);
+ continue;
+ }
+
+ if (is_block_mapped(block + i) >= 0)
+ {
+ MSG("Skip mapped block %d \n", block + i);
+ continue;
+ }
+
+ if (nand_erase_bmt(OFFSET(block + i)))
+ {
+ MSG("Erase block %d fail\n", block + i);
+ mark_block_bad_bmt(OFFSET(block + i), BMT_BADBLOCK_GENERATE_LATER);
+ }
+ }
+
+ pool_erased = 1;
+ }
+
+ if (start_from_end)
+ {
+ block = total_block_count - 1;
+ direction = -1;
+ }
+ else
+ {
+ block = system_block_count;
+ direction = 1;
+ }
+
+ for (i = 0; i < bmt_block_count; i++, block += direction)
+ {
+ if (block == bmt_block_index)
+ {
+ MSG("Skip bmt block %d \n", block);
+ continue;
+ }
+
+ if (block == bbt_block_index)
+ {
+ MSG("Skip bbt block %d \n", block);
+ continue;
+ }
+
+ if (nand_block_bad_bmt(OFFSET(block), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(block), BMT_BADBLOCK_GENERATE_LATER))
+ {
+ MSG("Skip bad block %d \n", block);
+ continue;
+ }
+
+ if (is_block_mapped(block) >= 0)
+ {
+ MSG("Skip mapped block %d \n", block);
+ continue;
+ }
+
+ MSG("Find block %d available\n", block);
+ return block;
+ }
+
+ return 0;
+}
+
+
+static unsigned short get_bad_index_from_oob(u8 *oob_buf)
+{
+ unsigned short index;
+
+ memcpy(&index, oob_buf + oob_bad_index_offset, OOB_INDEX_SIZE);
+
+ return index;
+}
+
+void set_bad_index_to_oob(u8 *oob, u16 index)
+{
+ memcpy(oob + oob_bad_index_offset, &index, sizeof(index));
+}
+
+static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob)
+{
+ int page;
+ int error_block = offset / BLOCK_SIZE_BMT;
+ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
+ int to_index;
+
+ to_index = find_available_block(false);
+
+
+ if (!to_index)
+ {
+ MSG("Cannot find an available block for BMT\n");
+ return 0;
+ }
+
+ for (page = 0; page < error_page; page++)
+ {
+ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
+
+ if (error_block < system_block_count)
+ {
+ set_bad_index_to_oob(oob_buf, error_block);
+ }
+
+ if (nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
+ {
+ MSG("Write page %d fail\n", PAGE_ADDR(to_index) + page);
+ mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER);
+ return migrate_from_bad(offset, write_dat, write_oob);
+ }
+ }
+
+
+ {
+
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)||defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ memcpy(oob_buf, write_oob, mtd_bmt->oobsize);
+ #else
+ memcpy(oob_buf, write_oob, 1 << nand_chip_bmt->flash->oob_shift);
+ #endif
+
+ if (error_block < system_block_count)
+ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
+
+ if (nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
+ {
+ MSG("Write page %d fail\n", PAGE_ADDR(to_index) + error_page);
+ mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER);
+ return migrate_from_bad(offset, write_dat, write_oob);
+ }
+ }
+
+ MSG("Migrate from %d to %d done!\n",error_block, to_index);
+
+ return to_index;
+}
+
+static bool write_bmt_to_flash(u8 *dat, u8 *oob)
+{
+ bool need_erase = true;
+ MSG("Try to write BMT\n");
+
+ if (bmt_block_index == 0)
+ {
+ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
+ need_erase = false;
+ if ( !(bmt_block_index = find_available_block(true)) )
+ {
+ MSG("Cannot find an available block for BMT\n");
+ return false;
+ }
+ }
+
+ // write bmt to flash
+ if (need_erase)
+ {
+ if (nand_erase_bmt(OFFSET(bmt_block_index)))
+ {
+ MSG("BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
+ mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER);
+
+ bmt_block_index = 0;
+ return write_bmt_to_flash(dat, oob); // recursive call
+ }
+ }
+
+ if ( nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) )
+ {
+ MSG("Write BMT data fail \n");
+ mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER);
+
+ bmt_block_index = 0;
+ return write_bmt_to_flash(dat, oob); // recursive call
+ }
+
+ MSG("Write BMT to block %d success\n", bmt_block_index);
+ return true;
+}
+
+static bool write_bbt_to_flash(u8 *dat, u8 *oob)
+{
+
+ if ( !(bbt_block_index = find_available_block(false)) )
+ {
+ MSG("Cannot find an available block for BBT\n");
+ return false;
+ }
+
+ if ( nand_write_page_bmt(PAGE_ADDR(bbt_block_index), dat, oob) )
+ {
+ MSG("Write BBT data fail \n");
+ mark_block_bad_bmt(OFFSET(bbt_block_index), BMT_BADBLOCK_GENERATE_LATER);
+
+ bbt_block_index = 0;
+ return write_bbt_to_flash(dat, oob); // recursive call
+ }
+
+ MSG("Write BBT to block %d success\n", bbt_block_index);
+ return true;
+}
+
+int scan_badblock_raw(init_bbt_struct *init_bbt)
+{
+ u16 block = 0;
+ int count = 0;
+
+ for(block = 1; block < system_block_count; block++)
+ {
+ if(nand_block_bad_bmt(OFFSET(block), BAD_BLOCK_RAW))
+ {
+ if(is_badblock_raw(block))
+ {
+ if(count >= MAX_RAW_BAD_BLOCK_SIZE)
+ {
+ return -1;
+ }
+ init_bbt->badblock_table[count] = block;
+ count++;
+ }
+ else
+ {
+ continue;
+ }
+
+ }
+
+ }
+
+ init_bbt->badblock_count = count;
+
+ return 0;
+
+}
+/*******************************************************************
+* Reconstruct bmt, called when found bmt info doesn't match bad
+* block info in flash.
+*
+* Return NULL for failure
+*******************************************************************/
+bmt_struct *reconstruct_bmt(bmt_struct * bmt)
+{
+ int i;
+ int index = system_block_count;
+ unsigned short bad_index;
+
+ // init everything in BMT struct
+ bmt->version = BMT_VERSION;
+ bmt->bad_count = 0;
+ bmt->mapped_count = 0;
+
+ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
+
+ for (i = 0; i < bmt_block_count; i++, index++)
+ {
+ if (nand_block_bad_bmt(OFFSET(index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(index), BMT_BADBLOCK_GENERATE_LATER))
+ {
+ MSG("Skip bad block: %d \n", index);
+ continue;
+ }
+
+ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
+
+ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
+ {
+ MSG("get bad index: 0x%x \n", bad_index);
+ if (bad_index != 0xFFFF)
+ MSG("Invalid bad index found in block: %d \n", index);
+ continue;
+ }
+
+ MSG("Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
+
+ if (!nand_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW))
+ {
+ mark_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW);
+ MSG("block %d is not marked as bad, mark it\n", bad_index);
+ }
+
+ {
+ // add mapping to BMT
+ bmt->table[bmt->mapped_count].bad_index = bad_index;
+ bmt->table[bmt->mapped_count].mapped_index = index;
+ bmt->mapped_count++;
+ }
+
+ MSG("Add mapping: %d -> %d to BMT\n", bad_index, index);
+ }
+
+ MSG("Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
+
+ return bmt;
+}
+
+init_bbt_struct* reconstruct_bbt(init_bbt_struct* init_bbt)
+{
+ init_bbt->version = BBT_VERSION;
+ init_bbt->badblock_count = 0;
+ memset(init_bbt->badblock_table, 0, sizeof(init_bbt->badblock_table));
+
+ if(scan_badblock_raw(init_bbt))
+ {
+ MSG("scan_badblock_raw fail (%d)! \n", BBT_VERSION);
+ return NULL;
+ }
+
+ return init_bbt;
+}
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Init bmt from nand. Reconstruct if not found or data error
+*
+* Parameter:
+* size: size of bmt and replace pool
+*
+* Return:
+* NULL for failure, and a bmt struct for success
+*******************************************************************/
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+bmt_struct *init_bmt(struct mtd_info *mtd, int size)
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+bmt_struct *init_bmt(struct nand_chip *chip, int size)
+#else
+bmt_struct *init_bmt(struct ra_nand_chip *ra, int size)
+#endif
+#endif
+{
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+
+ struct mt6573_nand_host *host;
+#endif
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+ struct nand_chip *chip;
+#endif
+
+
+ if (size > 0 && size <= MAX_BMT_SIZE)
+ {
+ MSG("Init bmt table, size: %d\n", size);
+ bmt_block_count = size;
+ }
+ else
+ {
+ MSG("Invalid bmt table size: %d\n", size);
+ return NULL;
+ }
+
+ #if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+ chip = mtd->priv;
+ nand_chip_bmt = chip;
+ total_block_count = chip->chipsize >> chip->phys_erase_shift;
+ system_block_count = total_block_count - bmt_block_count;
+
+ mtd_bmt = mtd;
+
+ #else
+ #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+ nand_chip_bmt = chip;
+ system_block_count = chip->chipsize >> chip->phys_erase_shift;
+ total_block_count = bmt_block_count + system_block_count;
+
+ host = (struct mt6573_nand_host *)chip->priv;
+ mtd_bmt = &host->mtd;
+
+ #else
+ nand_chip_bmt = ra;
+ total_block_count = (1 << ra->flash->chip_shift) / (1 << ra->flash->erase_shift);
+ system_block_count = total_block_count - bmt_block_count;
+ #endif
+ #endif
+
+ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
+
+ MSG("bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
+
+ // set this flag, and unmapped block in pool will be erased.
+ pool_erased = 0;
+
+ // alloc size for bmt.
+ memset(bmt.table, 0, size * sizeof(bmt_entry));
+
+ bmt_block_index = load_bmt_data(system_block_count, size);
+
+ // load bmt if exist
+ if (bmt_block_index)
+ {
+ MSG("Load bmt data success @ block %d \n", bmt_block_index);
+ dump_bmt_info(&bmt);
+ return &bmt;
+ }
+ else
+ {
+ MSG("Load bmt data fail! \n");
+
+ if (reconstruct_bmt(&bmt))
+ {
+ need_write_bmt_to_nand = 1;
+ return &bmt;
+ }
+ else
+ return NULL;
+ }
+
+}
+
+init_bbt_struct* start_init_bbt(void)
+{
+ bbt_block_index = load_bbt_data(system_block_count, bmt_block_count, &init_bbt);
+
+ if(bbt_block_index)
+ {
+ MSG("Load bbt data success \n");
+ return &init_bbt;
+ }
+ else
+ {
+
+ MSG("Load bbt data fail! \n");
+ if(reconstruct_bbt(&init_bbt))
+ {
+ need_write_bbt_to_nand = 1;
+ return &init_bbt;
+
+ }
+ else
+ return NULL;
+ }
+
+}
+
+int write_bbt_or_bmt_to_flash(void)
+{
+ if(need_write_bmt_to_nand)
+ {
+ // fill NAND BMT buffer
+ memset(dat_buf, 0xFF, sizeof(dat_buf));
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
+
+ // write BMT back
+ if (!write_bmt_to_flash(dat_buf, oob_buf))
+ {
+ MSG("save bmt to nand fail! \n");
+ return -1;
+ }
+
+ }
+
+ if(need_write_bbt_to_nand)
+ {
+ // fill NAND BBT buffer
+ memset(dat_buf, 0xFF, sizeof(dat_buf));
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ fill_nand_bbt_buffer(&init_bbt, dat_buf, oob_buf);
+
+ // write BBT back
+ if (!write_bbt_to_flash(dat_buf, oob_buf))
+ {
+ MSG("save bbt to nand fail! \n");
+ return -1;
+ }
+ }
+
+ return 0;
+
+}
+
+int create_badblock_table_by_bbt(void)
+{
+ int i = 0, j = 0, k = 0;
+ int badblock_table_size = system_block_count - init_bbt.badblock_count;
+
+ for(i = 0; i < badblock_table_size; i++)
+ {
+ pBbt[i] = i;
+
+ }
+
+ for(i = 0; i < init_bbt.badblock_count; i++)
+ {
+ for(; j < badblock_table_size; j++)
+ {
+ if(pBbt[j] == init_bbt.badblock_table[i])
+ {
+ k = j;
+ break;
+
+ }
+
+ }
+
+ for(; k < badblock_table_size; k++)
+ {
+ pBbt[k]++;
+ }
+
+ }
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)||defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+ nand_logic_size = (system_block_count - init_bbt.badblock_count) * (1 << nand_chip_bmt->phys_erase_shift);
+#else
+ nand_logic_size = (system_block_count - init_bbt.badblock_count) * (1 << nand_chip_bmt->flash->erase_shift);
+#endif
+ return 0;
+}
+
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Update BMT.
+*
+* Parameter:
+* offset: update block/page offset.
+* reason: update reason, see update_reason_t for reason.
+* dat/oob: data and oob buffer for write fail.
+*
+* Return:
+* Return true for success, and false for failure.
+*******************************************************************/
+bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob)
+{
+ int map_index;
+ int orig_bad_block = -1;
+ int i;
+ int bad_index = offset / BLOCK_SIZE_BMT;
+
+ if (reason == UPDATE_WRITE_FAIL)
+ {
+ if ( !(map_index = migrate_from_bad(offset, dat, oob)) )
+ {
+ MSG("migrate fail \n");
+ return false;
+ }
+ }
+ else
+ {
+ if ( !(map_index = find_available_block(false)) )
+ {
+ MSG("Cannot find block in pool \n");
+ return false;
+ }
+ }
+
+ // now let's update BMT
+ if (bad_index >= system_block_count) // mapped block become bad, find original bad block
+ {
+ for (i = 0; i < bmt_block_count; i++)
+ {
+ if (bmt.table[i].mapped_index == bad_index)
+ {
+ orig_bad_block = bmt.table[i].bad_index;
+ break;
+ }
+ }
+ MSG("Mapped block becomes bad, orig bad block is %d \n", orig_bad_block);
+
+ bmt.table[i].mapped_index = map_index;
+ }
+ else
+ {
+ bmt.table[bmt.mapped_count].mapped_index = map_index;
+ bmt.table[bmt.mapped_count].bad_index = bad_index;
+ bmt.mapped_count++;
+ }
+
+ memset(dat_buf, 0xFF, sizeof(dat_buf));
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
+ if (!write_bmt_to_flash(dat_buf, oob_buf))
+ return false;
+
+ if (bad_index >= system_block_count)
+ mark_block_bad_bmt(offset, BMT_BADBLOCK_GENERATE_LATER);
+ else
+ mark_block_bad_bmt(offset, BAD_BLOCK_RAW);
+
+ return true;
+}
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Given an block index, return mapped index if it's mapped, else
+* return given index.
+*
+* Parameter:
+* index: given an block index. This value cannot exceed
+* system_block_count.
+*
+* Return NULL for failure
+*******************************************************************/
+int get_mapping_block_index_by_bmt(int index)
+{
+ int i;
+
+ if (index >= system_block_count)
+ {
+ MSG("BMT Given index exceed: %d > %d \n", index, system_block_count);
+ return index;
+ }
+
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (bmt.table[i].bad_index == index)
+ {
+ MSG("Redirect %d to %d \n", index, bmt.table[i].mapped_index);
+ return bmt.table[i].mapped_index;
+ }
+ }
+
+ return index;
+}
+
+int get_mapping_block_index_by_bbt(int index)
+{
+
+ if (index >= (system_block_count - init_bbt.badblock_count))
+ {
+ MSG("BBT Given index exceed: %d > %d \n", index, (system_block_count - init_bbt.badblock_count));
+ return index;
+ }
+
+ return pBbt[index];
+
+}
+
+int get_mapping_block_index(int index, u16 *phy_block_bbt)
+{
+ int block;
+
+ block = get_mapping_block_index_by_bbt(index);
+ *phy_block_bbt = block;
+ block = get_mapping_block_index_by_bmt(block);
+
+ return block;
+
+}
+
+int block_is_in_bmt_region(int index)
+{
+ if(index >= system_block_count)
+ return 1;
+ else
+ return 0;
+}
+
Index: linux-3.18.21/drivers/mtd/econet/bmt.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/bmt.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,121 @@
+#ifndef __BMT_H__
+#define __BMT_H__
+
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+#ifdef TCSUPPORT_SPI_CONTROLLER_ECC
+#include <spi/spi_nand_flash.h>
+#else
+#include "../chips/spi_nand_flash.h"
+#endif
+#else
+#include "../mtk/mt6573_nand.h"
+#endif
+#else
+#include "ralink_nand.h"
+
+#define bool u8
+#define true 1
+#define false 0
+#endif
+
+#define MAX_RAW_BAD_BLOCK_SIZE (250)
+#define BBT_SIGNATURE_OFFSET (0)
+#define BBT_VERSION 1
+
+#define BAD_BLOCK_RAW (0)
+#define BMT_BADBLOCK_GENERATE_LATER (1)
+
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+#define MAX_BMT_SIZE (250)
+#define BMT_SIZE_FOR_RESERVE_AREA (0x80)
+#else
+#define MAX_BMT_SIZE (0x80)//(500)
+#endif
+#define BMT_VERSION (1) // initial version
+
+#define MAIN_SIGNATURE_OFFSET (0)
+#define OOB_INDEX_OFFSET (2)
+#define OOB_INDEX_SIZE (2)
+
+#if !defined(TCSUPPORT_CPU_MT7510)&& !defined(TCSUPPORT_CPU_MT7520)
+#if 0
+#define MSG(args...) printk(args)
+#else
+#define MSG(args...) do{}while(0)
+#endif
+#endif
+
+typedef struct _bmt_entry_
+{
+ u16 bad_index; // bad block index
+ u16 mapped_index; // mapping block index in the replace pool
+} bmt_entry;
+
+typedef enum
+{
+ UPDATE_ERASE_FAIL,
+ UPDATE_WRITE_FAIL,
+ UPDATE_UNMAPPED_BLOCK,
+ UPDATE_REASON_COUNT,
+} update_reason_t;
+
+typedef struct {
+ bmt_entry table[MAX_BMT_SIZE];
+ u8 version;
+ u8 mapped_count; // mapped block count in pool
+ u8 bad_count; // bad block count in pool. Not used in V1
+}bmt_struct;
+
+typedef struct {
+ u16 badblock_table[MAX_RAW_BAD_BLOCK_SIZE]; //store bad block raw
+ u8 version;
+ u8 badblock_count;
+ u8 reserved[2];
+}init_bbt_struct;
+
+/***************************************************************
+* *
+* Interface BMT need to use *
+* *
+***************************************************************/
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+extern int mt6573_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8* pPageBuf, u8* pFDMBuf);
+extern int mt6573_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs, u32 bmt_block);
+extern int mt6573_nand_erase_hw(struct mtd_info *mtd, int page);
+extern int mt6573_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset, u32 bmt_block);
+extern int mt6573_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8* pPageBuf, u8* pFDMBuf);
+
+#else
+extern int mt6573_nand_exec_read_page(struct ra_nand_chip *ra, int page, u32 page_size, u8 *dat, u8 *oob);
+extern int mt6573_nand_block_bad_hw(struct ra_nand_chip *ra, unsigned long ofs, unsigned long bmt_block);
+extern int mt6573_nand_erase_hw(struct ra_nand_chip *ra, unsigned long page);
+extern int mt6573_nand_block_markbad_hw(struct ra_nand_chip *ra, unsigned long ofs, unsigned long bmt_block);
+extern int mt6573_nand_exec_write_page(struct ra_nand_chip *ra, int page, u32 page_size, u8 *dat, u8 *oob);
+#endif
+/********************************************
+* *
+* Interface for preloader/uboot/kernel *
+* *
+********************************************/
+extern void set_bad_index_to_oob(u8 *oob, u16 index);
+#if defined(TCSUPPORT_CPU_EN7512)||defined(TCSUPPORT_CPU_EN7521)
+bmt_struct *init_bmt(struct mtd_info *mtd, int size);
+#else
+#if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520)
+bmt_struct *init_bmt(struct nand_chip *chip, int size);
+#else
+extern bmt_struct *init_bmt(struct ra_nand_chip *ra, int size);
+#endif
+#endif
+extern init_bbt_struct* start_init_bbt(void);
+extern int write_bbt_or_bmt_to_flash(void);
+extern int create_badblock_table_by_bbt(void);
+extern bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob);
+extern int get_mapping_block_index_by_bmt(int index);
+extern int get_mapping_block_index_by_bbt(int index);
+extern int get_mapping_block_index(int index, u16 *phy_block_bbt);
+extern int block_is_in_bmt_region(int index);
+#endif
Index: linux-3.18.21/drivers/mtd/econet/gdma.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/gdma.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,234 @@
+#if !defined (__UBOOT__)
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#define PHYSADDR(a) virt_to_phys((void*)(a))
+#else
+#include <common.h>
+#define printk printf
+#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
+#endif
+#include <asm/io.h>
+//#include "rt_mmap.h"
+#include "gdma.h"
+#include "ralink_nand.h"
+
+#define DMA_CHNUM (0)
+
+
+int _nand_dma_sync(void)
+{
+ //unmask to start dma
+ unsigned long data;
+ int retry = 1000000; //fixme
+
+ data = GDMA_READ_REG(GDMA_CTRL_REG1(DMA_CHNUM));
+ data &= ~( 0x01 << CH_MASK_OFFSET);
+ GDMA_WRITE_REG(GDMA_CTRL_REG1(DMA_CHNUM), data);
+
+#if 0
+#if defined (CONFIG_RALINK_RT3052)
+ // sync status
+ while(!(GDMA_READ_REG(RALINK_GDMAISTS) & (1<<DMA_CHNUM)) && retry--) {
+ ndelay(1);
+ // do nothing
+ }
+ if (!(GDMA_READ_REG(RALINK_GDMAISTS) & (1<<DMA_CHNUM))) {
+ return -1;
+ }
+ GDMA_WRITE_REG(RALINK_GDMAISTS, 1<<DMA_CHNUM);
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+ while(!(GDMA_READ_REG(RALINK_GDMA_DONEINT) & (1<<DMA_CHNUM)) && retry--) {
+ ndelay(1);
+ }
+ if (!(GDMA_READ_REG(RALINK_GDMA_DONEINT) & (1<<DMA_CHNUM))) {
+ return -1;
+ }
+ GDMA_WRITE_REG(RALINK_GDMA_DONEINT, 1<<DMA_CHNUM);
+#endif
+#endif
+ // frank added
+ while(!(GDMA_READ_REG(RALINK_GDMA_DONEINT) & (1<<DMA_CHNUM)) && retry--) {
+ ndelay(1);
+ }
+ if (!(GDMA_READ_REG(RALINK_GDMA_DONEINT) & (1<<DMA_CHNUM))) {
+ return -1;
+ }
+ GDMA_WRITE_REG(RALINK_GDMA_DONEINT, 1<<DMA_CHNUM);
+
+ return 0;
+}
+
+void _release_dma_buf(void)
+{
+ unsigned long data;
+
+ data = GDMA_READ_REG(GDMA_CTRL_REG(DMA_CHNUM));
+ data &= ~( 0x01 << CH_EBL_OFFSET);
+ GDMA_WRITE_REG(GDMA_CTRL_REG(DMA_CHNUM), data);
+
+}
+
+int _set_gdma_ch(unsigned long dst,
+ unsigned long src, unsigned int len, int burst_size,
+ int soft_mode, int src_req_type, int dst_req_type,
+ int src_burst_mode, int dst_burst_mode)
+{
+ unsigned long data;
+
+ //src
+ GDMA_WRITE_REG(GDMA_SRC_REG(DMA_CHNUM), (src & 0x1fffffff));
+
+ //dst
+ GDMA_WRITE_REG(GDMA_DST_REG(DMA_CHNUM), (dst & 0x1fffffff));
+
+ //control 1,
+ data = 0;
+// data = (0 << CH_UNMASK_INTEBL_OFFSET);
+ data |= ( DMA_CHNUM << NEXT_UNMASK_CH_OFFSET);
+ data |= ( (soft_mode == 0) << CH_MASK_OFFSET);
+#if 0
+#if defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+ data |= (src_req_type << SRC_DMA_REQ_OFFSET);
+ data |= (dst_req_type << DST_DMA_REQ_OFFSET);
+#endif
+#endif
+
+ // frank added
+ data |= (src_req_type << SRC_DMA_REQ_OFFSET);
+ data |= (dst_req_type << DST_DMA_REQ_OFFSET);
+
+ GDMA_WRITE_REG(GDMA_CTRL_REG1(DMA_CHNUM), data);
+
+ // control
+ data = (len << TRANS_CNT_OFFSET);
+#if 0
+#ifdef CONFIG_RALINK_RT3052
+ data |= (src_req_type << SRC_DMA_REQ_OFFSET);
+ data |= (dst_req_type << DST_DMA_REQ_OFFSET);
+#endif
+#endif
+
+ data |= (src_burst_mode << SRC_BRST_MODE_OFFSET);
+ data |= (dst_burst_mode << DST_BRST_MODE_OFFSET);
+ data |= (burst_size << BRST_SIZE_OFFSET);
+
+// data |= (0 << INT_EBL_OFFSET);
+
+ data |= ((soft_mode != 0) << MODE_SEL_OFFSET);
+ data |= (0x01<<CH_EBL_OFFSET);
+ GDMA_WRITE_REG(GDMA_CTRL_REG(DMA_CHNUM), data);
+
+ return 1;
+}
+
+
+int _ra_nand_prepare_dma_pull(unsigned long dst, int len)
+{
+ _set_gdma_ch(PHYSADDR(dst), NFC_DATA, len,
+ BURST_SIZE_4B, HW_MODE, DMA_NAND_REQ, DMA_REQMEM,
+ TRN_FIX, TRN_INC);
+ return 0;
+}
+
+#if 0
+int _ra_nor_dma_pull(char *dst, char *src, int len)
+{
+ int ret = 0;
+
+ //fixme, take care about alignment issues
+ while (len > 0) {
+ int size = (len > ((1<<16) - 4)) ? ((1<<16) - 4) : len; // limitation is DMA buffer
+ // set GDMA
+ _set_gdma_ch(PHYSADDR(dst), PHYSADDR(src), size,
+ BURST_SIZE_32B, SW_MODE, DMA_REQMEM, DMA_REQMEM,
+ TRN_INC, TRN_INC);
+
+ // start and wait dma done
+ if (_nand_dma_sync()) {
+ printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, dst, len);
+ ret = -1;
+ }
+
+ // disable dma
+ _release_dma_buf();
+
+ len -= size;
+ dst += size;
+ src += size;
+ }
+ return ret;
+
+
+}
+#endif
+
+
+// this is "data moving" from nand to memory.
+int _ra_nand_dma_pull(unsigned long dst, int len)
+{
+ int ret =0;
+
+#if !defined (__UBOOT__)
+ dma_cache_inv(dst, len);
+#endif
+
+#if defined (__UBOOT__)
+ flush_cache(dst, len);
+#endif
+
+ // set GDMA
+ _set_gdma_ch(PHYSADDR(dst), NFC_DATA, len,
+ BURST_SIZE_4B, HW_MODE, DMA_NAND_REQ, DMA_REQMEM,
+ TRN_FIX, TRN_INC);
+
+ // start and wait dma done
+ if (_nand_dma_sync()) {
+ printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, dst, len);
+ ret = -1;
+ }
+
+ // disable dma
+ _release_dma_buf();
+
+
+ return ret;
+}
+
+// this is data moving from memory to nand.
+int _ra_nand_dma_push(unsigned long src, int len)
+{
+ int ret = 0;
+
+#if !defined (__UBOOT__) // uboot set kseg0 as noncache
+ dma_cache_wback(src, len);
+#else
+ flush_cache(src, len);
+#endif
+
+ // set GDMA
+ _set_gdma_ch(NFC_DATA, PHYSADDR((void*)src), len,
+ BURST_SIZE_4B, HW_MODE, DMA_REQMEM, DMA_NAND_REQ,
+ TRN_INC, TRN_FIX);
+
+ // start and wait dma done
+ if (_nand_dma_sync()) {
+ printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, src, len);
+ ret = -1;
+ }
+
+
+ // disable dma
+ _release_dma_buf();
+
+
+ return ret;
+}
+
+#if 0
+void gdma_reset(void)
+{
+ ra_aor(RALINK_SYSCTL_BASE + 0x34, ~(1<<14) , (1<<14));
+ udelay(1);
+ ra_aor(RALINK_SYSCTL_BASE + 0x34, ~(1<<14) , 0);
+}
+#endif
Index: linux-3.18.21/drivers/mtd/econet/gdma.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/gdma.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,216 @@
+/*
+ ***************************************************************************
+ * Ralink Tech Inc.
+ * 5F., No.36, Taiyuan St., Jhubei City,
+ * Hsinchu County 302,
+ * Taiwan, R.O.C.
+ *
+ * (c) Copyright, Ralink Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ ***************************************************************************
+ */
+
+#ifndef GDMA_H
+#define GDMA_H
+
+
+
+/*
+ * DEFINITIONS AND MACROS
+ */
+#if 0
+#define MOD_VERSION "0.4"
+
+#if defined (CONFIG_RALINK_RT3052)
+#define MAX_GDMA_CHANNEL 8
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+#define MAX_GDMA_CHANNEL 16
+#else
+#error Please Choose System Type
+#endif
+#endif
+
+#define RALINK_GDMA_BASE (0xbfb30000)
+#define RALINK_GDMA_CTRL_BASE (RALINK_GDMA_BASE)
+
+
+#if 0
+#if defined (CONFIG_RALINK_RT3052)
+#define RALINK_GDMAISTS (RALINK_GDMA_BASE + 0x80)
+#define RALINK_GDMAGCT (RALINK_GDMA_BASE + 0x88)
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+#define RALINK_GDMA_UNMASKINT (RALINK_GDMA_BASE + 0x200)
+#define RALINK_GDMA_DONEINT (RALINK_GDMA_BASE + 0x204)
+#define RALINK_GDMA_GCT (RALINK_GDMA_BASE + 0x220)
+#endif
+#endif
+
+// frank added
+#define RALINK_GDMAISTS (RALINK_GDMA_BASE + 0x0)
+#define RALINK_GDMAGCT (RALINK_GDMA_BASE + 0x8)
+#define RALINK_GDMA_UNMASKINT (RALINK_GDMA_BASE + 0x200)
+#define RALINK_GDMA_DONEINT (RALINK_GDMA_BASE + 0x204)
+#define RALINK_GDMA_GCT (RALINK_GDMA_BASE + 0x220)
+
+#if 0
+#define GDMA_READ_REG(addr) le32_to_cpu(*(volatile u32 *)(addr))
+#define GDMA_WRITE_REG(addr, val) *((volatile uint32_t *)(addr)) = cpu_to_le32(val)
+#define GET_GDMA_IP_VER (GDMA_READ_REG(RALINK_GDMA_GCT) & 0x6) >> 1 //GDMA_GCT[2:1]
+#endif
+
+#define GDMA_READ_REG(addr) (*(volatile u32 *)(addr))
+#define GDMA_WRITE_REG(addr, val) *((volatile uint32_t *)(addr)) = (val)
+
+#if 0
+#define RALINK_IRQ_ADDR RALINK_INTCL_BASE
+#define RALINK_REG_INTENA (RALINK_IRQ_ADDR + 0x34)
+#define RALINK_REG_INTDIS (RALINK_IRQ_ADDR + 0x38)
+#endif
+
+/*
+ * 12bytes=GDMA Channel n Source Address(4) +
+ * GDMA Channel n Destination Address(4) +
+ * GDMA Channel n Control Register(4)
+ *
+ */
+#define GDMA_SRC_REG(ch) (RALINK_GDMA_BASE + ch*16)
+#define GDMA_DST_REG(ch) (GDMA_SRC_REG(ch) + 4)
+#define GDMA_CTRL_REG(ch) (GDMA_DST_REG(ch) + 4)
+#define GDMA_CTRL_REG1(ch) (GDMA_CTRL_REG(ch) + 4)
+
+//GDMA Interrupt Status Register
+#if 0
+#if defined (CONFIG_RALINK_RT3052)
+#define UNMASK_INT_STATUS(ch) (ch+16)
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+#define UNMASK_INT_STATUS(ch) (ch)
+#endif
+#define TXDONE_INT_STATUS(ch) (ch)
+#endif
+
+//Control Reg0
+#define MODE_SEL_OFFSET 0
+#define CH_EBL_OFFSET 1
+#define CH_DONEINT_EBL_OFFSET 2
+#define BRST_SIZE_OFFSET 3
+#define DST_BRST_MODE_OFFSET 6
+#define SRC_BRST_MODE_OFFSET 7
+#define TRANS_CNT_OFFSET 16
+
+//Control Reg1
+#if 0
+#if defined (CONFIG_RALINK_RT3052)
+#define CH_UNMASKINT_EBL_OFFSET 4
+#define NEXT_UNMASK_CH_OFFSET 1
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+#define CH_UNMASKINT_EBL_OFFSET 1
+#define NEXT_UNMASK_CH_OFFSET 3
+#endif
+#endif
+
+
+#define COHERENT_INT_EBL_OFFSET 2
+#define CH_MASK_OFFSET 0
+// frank added
+#define NEXT_UNMASK_CH_OFFSET 3
+
+#if 0
+#if defined (CONFIG_RALINK_RT3052)
+//Control Reg0
+#define DST_DMA_REQ_OFFSET 8
+#define SRC_DMA_REQ_OFFSET 12
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_RT3352)
+//Control Reg1
+#define DST_DMA_REQ_OFFSET 8
+#define SRC_DMA_REQ_OFFSET 16
+#endif
+#endif
+
+#define DST_DMA_REQ_OFFSET 8
+#define SRC_DMA_REQ_OFFSET 16
+
+//#define GDMA_DEBUG
+#ifdef GDMA_DEBUG
+#define GDMA_PRINT(fmt, args...) printk(KERN_INFO "GDMA: " fmt, ## args)
+#else
+#define GDMA_PRINT(fmt, args...) { }
+#endif
+
+/*
+ * TYPEDEFS AND STRUCTURES
+ */
+
+enum GdmaBusterMode {
+ INC_MODE=0,
+ FIX_MODE=1
+};
+
+enum GdmaBusterSize {
+ BUSTER_SIZE_4B=0, /* 1 transfer */
+ BUSTER_SIZE_8B=1, /* 2 transfer */
+ BUSTER_SIZE_16B=2, /* 4 transfer */
+ BUSTER_SIZE_32B=3, /* 8 transfer */
+ BUSTER_SIZE_64B=4 /* 16 transfer */
+};
+
+enum GdmaDmaReqNum {
+ DMA_NAND_REQ=0,
+ DMA_REQ1=1,
+ DMA_REQ2=2,
+ DMA_REQ3=3,
+ DMA_REQ4=4,
+ DMA_REQ5=5,
+ DMA_REQ6=6,
+ DMA_REQ7=7,
+ DMA_REQ8=8,
+ DMA_REQ9=9,
+ DMA_REQ10=10,
+ DMA_REQ11=11,
+ DMA_REQ12=12,
+ DMA_REQ13=13,
+ DMA_REQ14=14,
+ DMA_REQ15=15,
+ DMA_MEM_REQ=32
+};
+
+
+#define BURST_SIZE_4B 0 /* 1 transfer */
+#define BURST_SIZE_8B 1 /* 2 transfer */
+#define BURST_SIZE_16B 2 /* 4 transfer */
+#define BURST_SIZE_32B 3 /* 8 transfer */
+#define BURST_SIZE_64B 4 /* 16 transfer */
+#define SW_MODE 1
+#define HW_MODE 0
+#define DMA_REQMEM 0x20
+
+#define TRN_FIX 1
+#define TRN_INC 0
+
+
+int _ra_nand_prepare_dma_pull(unsigned long dst, int len);
+int _ra_nand_dma_pull(unsigned long dst, int len);
+int _ra_nand_dma_push(unsigned long src, int len);
+
+
+#endif
Index: linux-3.18.21/drivers/mtd/econet/nand_verify.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/nand_verify.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,212 @@
+#include "ralink_nand.h"
+
+#define VPint *(volatile unsigned int *)
+
+typedef struct reg_check_s{
+ char* name; /*Register Name*/
+ #define VC_TYPE (1 << 0)
+ #define RO (1 << 1)
+ #define WO (1 << 2)
+ #define RW (1 << 3)
+ #define NO_DEF (1 << 4)
+ uint8_t type; /*0:VC_TYPE, 1:Read Only, 2: Write Only, 3: No default value*/
+ uint32_t addr; /*Register location*/
+ uint32_t def_value; /*Default value*/
+ uint32_t mask; /*For read/write test*/
+} reg_check_t;
+
+/*_____________________________________________________________________________
+** function name: tsarmRegDefCheck
+** descriptions:
+** SAR registers read/write test.
+** Steps as flows:
+** 1. Write the test pattern into SAR register.
+** 2. Read the value of SAR register.
+** 3. Compare the value of SAR register is fit in with test pattern.
+**
+** parameters:
+** pattern: Test pattern.
+**
+** global:
+** sar_reg
+**
+** return:
+** Success: 0
+** Otherwise: -1
+**
+** call:
+** delay1ms
+**
+** revision:
+** 1. Here 2008/08/15
+**____________________________________________________________________________
+*/
+reg_check_t nand_reg[]=
+{
+ /*Register name, Type, Address, Default Value, Mask*/
+ {"NFC_CTRL", RW, (NFC_BASE + 0x0010), 0x00c82f20, 0x0000fffe},
+ {"NFC_CONF", RW, (NFC_BASE + 0x0014), 0x00000000, 0xfff73ffe},
+ {"NFC_CMD1", RW, (NFC_BASE + 0x0018), 0x00000050, 0x00ffffff},
+ {"NFC_CMD2", RW, (NFC_BASE + 0x001c), 0x00000000, 0x00ffffff},
+ {"NFC_CMD3", RW, (NFC_BASE + 0x0020), 0x00000000, 0x00ffffff},
+ {"NFC_ADDR", RW, (NFC_BASE + 0x0024), 0x00000000, 0xffffffff},
+// {"NFC_DATA", RW, (NFC_BASE + 0x0028), 0x00000000, 0xffffffff},
+ {"NFC_ECC", RO, (NFC_BASE + 0x002c), 0x00000000, 0x00000000},
+ {"NFC_STATUS", RO, (NFC_BASE + 0x0030), 0x2f000004, 0x00000000},
+ {"NFC_INT_EN", RW, (NFC_BASE + 0x0034), 0x00000000, 0x000000ff},
+// {"NFC_INT_ST", RW, (NFC_BASE + 0x0038), 0x00000000, 0x000000ff},
+ {"NFC_ADDR2", RW, (NFC_BASE + 0x003c), 0x00000000, 0x000000ff},
+ {"NFC_ECC2", RO, (NFC_BASE + 0x0040), 0x00000000, 0x00000000},
+ {"NFC_ECC3", RO, (NFC_BASE + 0x0044), 0x00000000, 0x00000000},
+ {"NFC_ECC4", RO, (NFC_BASE + 0x0048), 0x00000000, 0x00000000},
+ {"NFC_ECC_ST2", RO, (NFC_BASE + 0x004c), 0x00000000, 0x00000000},
+ {"NFC_ECC_ST3", RO, (NFC_BASE + 0x0050), 0x00000000, 0x00000000},
+};
+
+
+int
+nand_reg_rwtest(uint32_t pattern)
+{
+ int i=0, j=0;
+ uint32_t befVal=0;
+ uint8_t loop=0;
+ uint32_t reg_addr=0x0;
+ int retval=0;
+ int err=0;
+
+ for (i=1; nand_reg[i].name!=NULL; i++){/*Skip Software reset register(TSARM_RAI)*/
+ loop = 1;
+ for (j=0; j<loop; j++){
+ err=0;
+ reg_addr= nand_reg[i].addr+(j<<2);
+ befVal= (uint32_t)VPint(reg_addr);
+
+ if (nand_reg[i].type & RO) {
+ VPint(reg_addr)=~(VPint(reg_addr));
+ if(VPint(reg_addr) != befVal){
+ retval=-1;
+ err=1;
+ }
+ } else if (nand_reg[i].type & WO) {
+ VPint(reg_addr) = (pattern & nand_reg[i].mask);
+ if(VPint(reg_addr)!=0x00000000){
+ retval=-1;
+ err=1;
+ }
+ } else if (nand_reg[i].type & RW){
+ VPint(reg_addr) = (pattern & nand_reg[i].mask);
+ if (VPint(reg_addr) != (pattern & nand_reg[i].mask)) {
+ retval=-1;
+ err=1;
+ }
+ }
+
+ if (err==1) {
+ printk("Error(j:%d): %s is error, Pattern: 0x%x before:0x%x After:0x%x\n", \
+ j, nand_reg[i].name, (pattern & nand_reg[i].mask), befVal, (uint32_t) VPint(reg_addr));
+ }
+ }
+ }
+ return retval;
+
+}/*nand_reg_rwtest*/
+
+/*_____________________________________________________________________________
+** function name: tsarmRegDefCheck
+** descriptions:
+** Read the value of SAR register and to compare the default value is fit in with data
+** sheet.
+**
+** parameters:
+** None
+**
+** global:
+** sar_reg
+**
+** return:
+** Success: 0
+** Otherwise: -1
+**
+** call:
+** None
+**
+** revision:
+** 1. Here 2008/08/15
+**____________________________________________________________________________
+*/
+int
+nand_reg_defcheck(void)
+{
+ int i = 0, j = 0;
+ uint32_t reg_addr = 0;
+ uint8_t loop = 0;
+ int retval = 0;
+
+ for (i=0; nand_reg[i].name!=NULL; i++) {
+ reg_addr = nand_reg[i].addr;
+ if (nand_reg[i].type & NO_DEF) {
+ /*If the register is no default value, we skip this register.*/
+ continue;
+ } else {
+ loop = 1;
+ }
+
+ for(j=0; j<loop; j++) {
+ if(VPint(reg_addr + (j << 2)) != nand_reg[i].def_value) {
+ printk("Error(j:%d):%s is error, Default: 0x%x Real:0x%x\n", j, nand_reg[i].name, nand_reg[i].def_value, (uint32_t) VPint(reg_addr));
+ retval=-1;
+ }
+ }
+ }
+ return retval;
+} /*end nand_reg_defcheck*/
+
+
+/*_____________________________________________________________________________
+** function name: doTsarmRegCheck
+** descriptions:
+** Verify registers of SAR Modules, It's inclued default value and read/write test.
+**
+** parameters:
+** argc: argument counter
+** argv: argument array strings
+** p:
+**
+** global:
+**
+** return:
+** Success: 0
+** Otherwise: -1
+**
+** call:
+** tsarmReset
+** doTsarmRegCheck
+** tsarmRegRWTest
+**
+** revision:
+** 1. Here 2008/08/15
+**____________________________________________________________________________
+*/
+
+int
+doNandRegCheck(uint32_t type, uint32_t pattern)
+{
+ if(type == REG_DEF_CHK) {
+ if(nand_reg_defcheck() == -1) {
+ printk("NAND Register default value verification is failure!!\r\n");
+ } else{
+ printk("NAND Register default value verification is ok!!\r\n");
+ }
+ } else if(type == REG_RW_CHK) {
+ if(nand_reg_rwtest(pattern) == -1) {
+ printk("NAND Register Read/Write verification is failure!!\r\n");
+ } else {
+ printk("NAND Register Read/Write verification is ok!!\r\n");
+ }
+ } else {
+ printk("Usage:nand reg_check <partten>\n");
+ }
+ return 0;
+}
+
+
Index: linux-3.18.21/drivers/mtd/econet/ralink_nand.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/ralink_nand.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,3926 @@
+#if defined (__UBOOT__)
+#include <common.h>
+#include <malloc.h>
+#include <linux/stddef.h>
+#include <linux/mtd/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/mtd-abi.h>
+#include <linux/mtd/partitions.h>
+#include "ralink_nand.h"
+
+#define EIO 5 /* I/O error */
+#define EINVAL 22 /* Invalid argument */
+#define ENOMEM 12 /* Out of memory */
+#define EFAULT 14 /* Out of accessible address space */
+
+#define __devinit
+#define __devexit
+#if 0
+#define NULL_DEFINED( ... ) do{}while(0)
+#define NULL_DEF_RET_1( ... ) (1)
+#define NULL_DEF_RET_0( ... ) (0)
+
+#define HZ 1
+#define schedule_timeout(a) udelay(1000000*(a))
+#define cond_resched() NULL_DEF_RET_0()
+#endif
+
+#else // !defined (__UBOOT__)
+
+#define DEBUG
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include "ralink_nand.h"
+
+#include <linux/mtd/map.h>
+#include <asm/tc3162/tc3162.h>
+#include "gdma.h"
+//#include <linux/mtd/gen_probe.h>
+
+#ifdef TCSUPPORT_NAND_RT63368
+#include "bmt.h"
+#endif
+
+
+#endif// !defined (__UBOOT__)
+
+
+/* frankliao modify 1000 ==> 1000000 */
+#define RETRY_NUMBER 1000000
+#define NFC_BIG_ENDIAN 0x02
+#define NFC_LITTLE_ENDIAN 0x0
+
+/* frankliao modify 20101215 */
+struct mtd_info *ranfc_mtd = NULL;
+struct ra_nand_chip *ra = NULL;
+
+//static int skipbbt = 0;
+static int ranfc_debug = 0;
+static int ranfc_bbt = 1;
+static int ranfc_verify = 1;
+static int ranfc_flags = 0;
+static int ranfc_page = 0;
+static int column_addr_cycle = 0;
+static int row_addr_cycle = 0;
+static int addr_cycle = 0;
+
+#ifdef TCSUPPORT_NAND_RT63368
+#define BMT_BAD_BLOCK_INDEX_OFFSET (1)
+#define POOL_GOOD_BLOCK_PERCENT 8/100
+#define SLAVE_IMAGE_OFFSET 0xf00000
+#define SIZE_2KiB_BYTES (2048)
+#define SIZE_64iB_BYTES (64)
+static int bmt_pool_size = 0;
+static bmt_struct *g_bmt = NULL;
+static init_bbt_struct *g_bbt = NULL;
+extern int nand_logic_size;
+#endif
+
+#if !defined (__UBOOT__)
+module_param(ranfc_debug, int, 0644);
+module_param(ranfc_bbt, int, 0644);
+module_param(ranfc_verify, int, 0644);
+#endif
+
+extern unsigned long flash_base;
+extern unsigned int (*ranand_read_byte)(unsigned long long);
+extern unsigned int (*ranand_read_dword)(unsigned long long);
+/* for test */
+extern void prom_printf(const char *fmt, ...);
+
+#if 0
+#define ra_dbg(args...) do { if (ranfc_debug) printk(args); } while(0)
+#else
+#define ra_dbg(args...)
+#endif
+
+#define CLEAR_INT_STATUS() ra_outl(NFC_INT_ST, ra_inl(NFC_INT_ST))
+#define NFC_TRANS_DONE() (ra_inl(NFC_INT_ST) & INT_ST_ND_DONE)
+
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+extern int offset;
+#endif
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+static int block_table[1024];
+int nand_erase_next_goodblock(struct ra_nand_chip *ra, int block, unsigned long addr_l);
+int nand_write_next_goodblock(struct ra_nand_chip *ra, int page_u, int page_l);
+int nand_partition_check(int block);
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+int calc_bmt_pool_size(struct ra_nand_chip *ra);
+#endif
+
+static struct mtd_partition rt63165_test_partitions[] = {
+ { /* First partition */
+ name : "NAND Flash", /* Section */
+ size : 0x0, /* Size */
+ offset : 0 /* Offset from start of flash- location 0x0*/
+ },
+};
+
+
+static struct nand_opcode opcode_tables[] = {
+ {
+ type: STANDARD_SMALL_FLASH,
+ read1: 0x00,
+ read2: NONE,
+ readB: 0x01,
+ readoob: 0x50,
+ pageprog1: 0x8000,
+ pageprog2: 0x10,
+ writeoob: 0x8050,
+ erase1: 0x60,
+ erase2: 0xd0,
+ status: 0x70,
+ reset: 0xff,
+ },
+ {
+ type: STANDARD_LARGE_FLASH,
+ read1: 0x00,
+ read2: 0x30,
+ readB: NONE,
+ readoob: 0x00,
+ pageprog1: 0x80,
+ pageprog2: 0x10,
+ writeoob: 0x80,
+ erase1: 0x60,
+ erase2: 0xd0,
+ status: 0x70,
+ reset: 0xff,
+ },
+};
+
+
+static struct nand_info flash_tables[] = {
+ {
+ mfr_id: MANUFACTURER_ST3A,
+ dev_id: ST128W3A,
+ name: "ST NAND128W3A",
+ numchips: (1),
+ chip_shift: SIZE_16MiB_BIT,
+ page_shift: SIZE_512iB_BIT,
+ erase_shift: SIZE_16KiB_BIT,
+ oob_shift: SIZE_16iB_BIT,
+ badblockpos: (4), //512 pagesize bad blk offset --> 4
+ opcode_type: STANDARD_SMALL_FLASH,
+ },
+ {
+ mfr_id: MANUFACTURER_ST3A,
+ dev_id: ST512W3A,
+ name: "ST NAND512W3A",
+ numchips: (1),
+ chip_shift: SIZE_64MiB_BIT,
+ page_shift: SIZE_512iB_BIT,
+ erase_shift: SIZE_16KiB_BIT,
+ oob_shift: SIZE_16iB_BIT,
+ badblockpos: (4), //512 pagesize bad blk offset --> 4
+ opcode_type: STANDARD_SMALL_FLASH,
+ },
+ {
+ mfr_id: MANUFACTURER_ZENTEL,
+ dev_id: A5U1GA31ATS,
+ name: "ZENTEL NAND1GA31ATS",
+ numchips: (1),
+ chip_shift: SIZE_128MiB_BIT,
+ page_shift: SIZE_2KiB_BIT,
+ erase_shift: SIZE_128KiB_BIT,
+ oob_shift: SIZE_64iB_BIT,
+ badblockpos: (51),
+ opcode_type: STANDARD_LARGE_FLASH,
+
+ },
+ {
+ mfr_id: MANUFACTURER_MIRCON,
+ dev_id: MT29F2G08AAD,
+ name: "MICRON NAND2G08AAD",
+ numchips: (1),
+ chip_shift: SIZE_256MiB_BIT,
+ page_shift: SIZE_2KiB_BIT,
+ erase_shift: SIZE_128KiB_BIT,
+ oob_shift: SIZE_64iB_BIT,
+ badblockpos: (51),
+ opcode_type: STANDARD_LARGE_FLASH,
+ },
+ {
+ mfr_id: MANUFACTURER_MIRCON,
+ dev_id: MT29F4G08AAC,
+ name: "MICRON NAND4G08AAC",
+ numchips: (1),
+ chip_shift: SIZE_512MiB_BIT,
+ page_shift: SIZE_2KiB_BIT,
+ erase_shift: SIZE_128KiB_BIT,
+ oob_shift: SIZE_64iB_BIT,
+ badblockpos: (51),
+ opcode_type: STANDARD_LARGE_FLASH,
+ },
+ {
+ mfr_id: MANUFACTURER_SAMSUNG,
+ dev_id: K9F1G08U0D,
+ name: "SAMSUNG K9F1G08U0D",
+ numchips: (1),
+ chip_shift: SIZE_128MiB_BIT,
+ page_shift: SIZE_2KiB_BIT,
+ erase_shift: SIZE_128KiB_BIT,
+ oob_shift: SIZE_64iB_BIT,
+ badblockpos: (0),
+ opcode_type: STANDARD_LARGE_FLASH,
+
+ },
+ {/* SPANSION support */
+ mfr_id: MANUFACTURER_SPANSION,
+ dev_id: S34ML01G1,
+ name: "SPANSION S34ML01G1",
+ numchips: (1),
+ chip_shift: SIZE_128MiB_BIT,
+ page_shift: SIZE_2KiB_BIT,
+ erase_shift: SIZE_128KiB_BIT,
+ oob_shift: SIZE_64iB_BIT,
+ badblockpos: (0),
+ opcode_type: STANDARD_LARGE_FLASH,
+ }
+};
+
+
+static struct nand_ecclayout oob_layout_tables[] = {
+ /* 512iB page size flash */
+ {
+ .eccbytes = SMALL_FLASH_ECC_BYTES,
+ .eccpos = {SMALL_FLASH_ECC_OFFSET, SMALL_FLASH_ECC_OFFSET+1, SMALL_FLASH_ECC_OFFSET+2},
+ .oobfree = {
+ {.offset = 0, .length = 4},
+ {.offset = 8, .length = 8},
+ {.offset = 0, .length = 0}
+ },
+ .oobavail = 12,
+ // 4th byte is bad-block flag.
+ },
+ /* 2K page size flash */
+ {
+ .eccbytes = LARGE_FLASH_ECC_BYTES,
+ #ifdef CONFIG_MIPS_RT63365
+ .eccpos = {
+ LARGE_FLASH_ECC_OFFSET, LARGE_FLASH_ECC_OFFSET+1, LARGE_FLASH_ECC_OFFSET+2,
+ LARGE_FLASH_ECC_OFFSET+16, LARGE_FLASH_ECC_OFFSET+17, LARGE_FLASH_ECC_OFFSET+18,
+ LARGE_FLASH_ECC_OFFSET+32, LARGE_FLASH_ECC_OFFSET+33, LARGE_FLASH_ECC_OFFSET+34,
+ LARGE_FLASH_ECC_OFFSET+48, LARGE_FLASH_ECC_OFFSET+49, LARGE_FLASH_ECC_OFFSET+50
+ },
+ .oobfree = {
+ #ifdef TCSUPPORT_NAND_RT63368
+ {.offset = 4, .length = 1},
+ #else
+ {.offset = 0, .length = 5},
+ #endif
+ {.offset = 8, .length = 13},
+ {.offset = 24, .length = 13},
+ {.offset = 40, .length = 11},
+ {.offset = 52, .length = 1},
+ {.offset = 56, .length = 8}
+ },
+ #else
+ .eccpos = {
+ LARGE_FLASH_ECC_OFFSET, LARGE_FLASH_ECC_OFFSET+1, LARGE_FLASH_ECC_OFFSET+2,
+ LARGE_FLASH_ECC_OFFSET+3, LARGE_FLASH_ECC_OFFSET+4, LARGE_FLASH_ECC_OFFSET+5,
+ LARGE_FLASH_ECC_OFFSET+6, LARGE_FLASH_ECC_OFFSET+7, LARGE_FLASH_ECC_OFFSET+8,
+ LARGE_FLASH_ECC_OFFSET+9, LARGE_FLASH_ECC_OFFSET+10, LARGE_FLASH_ECC_OFFSET+11
+ },
+ .oobfree = {
+ {.offset = 0, .length = 51},
+ {.offset = 0, .length = 0}
+ },
+ #endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ .oobavail = 47,
+ #else
+ .oobavail = 51,
+ #endif
+ // 2009th byte is bad-block flag.
+ }
+};
+
+
+/*************************************************************
+ * nfc functions
+ *************************************************************/
+static int nfc_wait_ready(struct ra_nand_chip *ra);
+
+/*
+unsigned int
+nfc_addr_translate(struct ra_nand_chip *ra, unsigned int addr, unsigned int *column, unsigned int *row)
+{
+ unsigned int _col, _row;
+
+ _row = (addr >> ra->flash->page_shift);
+ _col = addr & ((1<<ra->flash->page_shift) - CONFIG_SUBPAGE_BIT);
+
+ if (column)
+ *column = _col;
+ if (row)
+ *row = _row;
+
+ return ((_row) << (column_addr_cycle * 8)) | (_col & ((1<<(column_addr_cycle * 8))-1));
+}*/
+
+
+/**
+ * reset nand chip
+ */
+static int
+nfc_chip_reset(struct ra_nand_chip *ra)
+{
+ int status, cmd1;
+
+// cmd1 = ra->opcode->reset;
+ cmd1 = 0xff;
+ // reset nand flash
+ ra_outl(NFC_CMD1, cmd1);
+// ra_outl(NFC_ADDR, 0xfffffff);
+ ra_outl(NFC_CONF, 0x0101 | (addr_cycle << 16));
+
+ status = nfc_wait_ready(ra); //erase wait 5us
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ }
+
+ return (int)(status & NAND_STATUS_FAIL);
+}
+
+
+/**
+ * clear NFC and flash chip.
+ */
+static int
+nfc_all_reset(struct ra_nand_chip *ra)
+{
+ long retry;
+
+ ra_dbg("%s: \n", __func__);
+
+ // reset controller
+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer
+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer
+
+ CLEAR_INT_STATUS();
+
+ retry = RETRY_NUMBER;
+ while ((ra_inl(NFC_INT_ST) & 0x02) != 0x02 && retry--);
+ if (retry <= 0) {
+ printk("nfc_all_reset: clean buffer fail \n");
+ return -1;
+ }
+
+ retry = RETRY_NUMBER;
+ while ((ra_inl(NFC_STATUS) & 0x1) != 0x0 && retry--) { //fixme, controller is busy ?
+ // ndelay(1)
+ // mdelay(1);
+ }
+
+ nfc_chip_reset(ra);
+
+ return 0;
+
+}
+
+
+/** NOTICE: only called by nfc_wait_ready().
+ * @return -1, nfc can not get transction done
+ * @return 0, ok.
+ */
+static int
+_nfc_read_status(struct ra_nand_chip *ra, char *status)
+{
+ unsigned long cmd1, conf;
+ unsigned int endian = 0;
+ int int_st, nfc_st;
+ long retry;
+
+ cmd1 = 0x70;
+// cmd1 = ra->opcode->status;
+ conf = 0x000101 | (1 << 20);
+
+ //fixme, should we check nfc status?
+ CLEAR_INT_STATUS();
+
+ ra_outl(NFC_CMD1, cmd1);
+ ra_outl(NFC_CONF, conf);
+
+ /* FIXME,
+ * 1. since we have no wired ready signal, directly
+ * calling this function is not gurantee to read right status under ready state.
+ * 2. the other side, we can not determine how long to become ready, this timeout retry is nonsense.
+ * 3. SUGGESTION: call nfc_read_status() from nfc_wait_ready(),
+ * that is aware about caller (in sementics) and has snooze plused nfc ND_DONE.
+ */
+ retry = RETRY_NUMBER;
+ do {
+ nfc_st = ra_inl(NFC_STATUS);
+ int_st = ra_inl(NFC_INT_ST);
+// ndelay(1);
+ } while (!(int_st & INT_ST_RX_BUF_RDY) && retry--);
+
+
+ if (!(int_st & INT_ST_RX_BUF_RDY)) {
+ printk("nfc_read_status: NFC fail, int_st(%x), nfc:%x, reset nfc and flash. \n",
+ int_st, nfc_st);
+ nfc_all_reset(ra);
+ *status = NAND_STATUS_FAIL;
+ return -1;
+ }
+ /* frankliao modify 20101001 */
+// *status = (char)(le32_to_cpu(ra_inl(NFC_DATA)) & 0x0ff);
+ // frank modify 20110418
+ // rt63365 modify
+ #ifdef CONFIG_MIPS_RT63365
+ /* frank modify 20110420 for rt63365 big endian*/
+ *status = (char)(((ra_inl(NFC_DATA)) & 0xff000000)>>24);
+ #else
+ /* frankliao modify 20101001 for rt63165 a*/
+ *status = (char)((ra_inl(NFC_DATA)) & 0x0ff);
+ #endif
+
+ return 0;
+}
+
+
+/**
+ * @return !0, chip protect.
+ * @return 0, chip not protected.
+ */
+static int
+nfc_check_wp(struct ra_nand_chip *ra)
+{
+ /* Check the WP bit */
+#if !defined CONFIG_NOT_SUPPORT_WP
+// printk("NFC_CTRL : %x\n", ra_inl(NFC_CTRL));
+// printk("WP BIT : %d\n", ra_inl(NFC_CTRL) & 0x01);
+ return !(ra_inl(NFC_CTRL) & 0x01);
+#else
+ char result = 0;
+ int ret;
+
+ ret = _nfc_read_status(ra, &result);
+ //fixme, if ret < 0
+
+ return !(result & NAND_STATUS_WP);
+#endif
+
+}
+
+
+#if !defined CONFIG_NOT_SUPPORT_RB
+/*
+ * @return !0, chip ready.
+ * @return 0, chip busy.
+ */
+static int
+nfc_device_ready(void)
+{
+ /* Check the ready */
+ return (ra_inl(NFC_STATUS) & 0x04);
+}
+#endif
+
+/* frankliao added 20101015 */
+static int
+nfc_read_id(void)
+{
+ long retry;
+ unsigned int ret_data;
+
+ retry = RETRY_NUMBER;
+ CLEAR_INT_STATUS();
+
+ ra_outl(NFC_CMD1, NAND_CMD1_READID);
+ ra_outl(NFC_CONF, NAND_CONF_READID);
+
+ while (retry > 0) {
+ int int_st;
+ int_st = ra_inl(NFC_INT_ST);
+
+ if (int_st & INT_ST_RX_BUF_RDY) {
+ ret_data = ra_inl(NFC_DATA);
+ break;
+ } else {
+// mdelay(1);
+// ndelay(1);
+ retry--;
+ }
+ }
+
+// printk("READID : %x\n", ret_data);
+
+ if (retry <= 0) {
+ printk("%s: read id fail \n", __func__);
+ return -1;
+ } else {
+ return ret_data;
+ }
+
+}
+
+
+/**
+ * generic function to get data from flash.
+ * @return data length reading from flash.
+ */
+ static int
+_ra_nand_pull_data(unsigned char *buf, int len, int use_gdma)
+{
+ unsigned char *p = buf;
+ long retry;
+
+ // receive data by use_gdma
+ if (use_gdma) {
+ if (_ra_nand_dma_pull((unsigned long)p, len)) {
+ printk("%s: fail \n", __func__);
+ len = -1; //return error
+ }
+ return len;
+ }
+
+ //fixme: retry count size?
+ retry = RETRY_NUMBER;
+ // no gdma
+ while (len > 0) {
+ int int_st = ra_inl(NFC_INT_ST);
+ /* frankliao added 20101004 */
+ if (int_st & INT_ST_RX_BUF_RDY) {
+ unsigned int ret_data;
+ int ret_size;
+#if 1 //!fixme, need optimal by endian
+ ret_data = ra_inl(NFC_DATA);
+ /* frankliao added 20101005 */
+ ra_outl(NFC_INT_ST, INT_ST_RX_BUF_RDY);
+
+ ret_size = sizeof(unsigned long);
+ ret_size = min(ret_size, len);
+ len -= ret_size;
+
+ // rt63365 modify
+ #ifdef CONFIG_MIPS_RT63365
+ *(unsigned int*)p = ret_data;
+ p += ret_size;
+ #else
+ while (ret_size-- > 0) {
+ //nfc is little endian
+ *p++ = ret_data & 0x0ff;
+ // frankliao test 20101213
+// wmb();
+ ret_data >>= 8;
+ }
+ #endif
+#else
+ //optimaize
+#endif
+ } else if (int_st & INT_ST_ND_DONE) {
+// printk("!! done\n");
+ break;
+ } else {
+// ndelay(1);
+ if (retry-- < 0)
+ break;
+ }
+ }
+
+ return (p-buf);
+}
+
+
+/**
+ * generic function to put data into flash.
+ * @return data length writing into flash.
+ */
+static int
+_ra_nand_push_data(unsigned char *buf, int len, int use_gdma)
+{
+ unsigned char *p = buf;
+ long retry;
+
+// printk("LENGTH = %d\n", len);
+ // receive data by use_gdma
+ if (use_gdma) {
+ if (_ra_nand_dma_push((unsigned long)p, len))
+ len = 0;
+ return len;
+ }
+
+ // no gdma frankliao modify old value : 528
+ retry = RETRY_NUMBER;
+ while(len > 0) {
+ int int_st = ra_inl(NFC_INT_ST);
+ if (int_st & INT_ST_TX_BUF_RDY) {
+ unsigned int tx_data = 0;
+ int tx_size;
+
+#if 1 //fixme, need optimaize in words
+ tx_size = min(len, (int)sizeof(unsigned long));
+
+ // rt63365 modify
+ #ifdef CONFIG_MIPS_RT63365
+ tx_data = *(unsigned int*)p;
+ p += tx_size;
+ #else
+ int iter;
+ for (iter = 0; iter < tx_size; iter++) {
+ /* nfc is little endian */
+ tx_data |= ((*p++ & 0xff) << (8*iter));
+ }
+
+ #endif
+
+ /* frankliao mark */
+ ra_outl(NFC_INT_ST, INT_ST_TX_BUF_RDY);
+ ra_outl(NFC_DATA, tx_data);
+
+ len -= tx_size;
+#else
+ //optimaize
+#endif
+ }
+
+ if (int_st & INT_ST_ND_DONE) { // code review tag
+ break;
+ } else {
+ /* frankliao modify 20101006 */
+// ndelay(1);
+ if (retry-- < 0) {
+ ra_dbg("%s p:%p buf:%p \n", __func__, p, buf);
+ break;
+ }
+ }
+ }
+
+ return (int)(p-buf);
+
+}
+
+
+static int
+nfc_select_chip(struct ra_nand_chip *ra, int chipnr)
+{
+#if (CONFIG_NUMCHIPS == 1)
+ if (!(chipnr < CONFIG_NUMCHIPS))
+ return -1;
+ return 0;
+#else
+ BUG();
+#endif
+}
+
+
+/** @return -1: chip_select fail
+ * 0 : both CE and WP==0 are OK
+ * 1 : CE OK and WP==1
+ */
+static int
+nfc_enable_chip(struct ra_nand_chip *ra, loff_t offs, int read_only)
+{
+ /* frankliao fix 20101223 */
+ int chipnr = (int)(offs >> ra->flash->chip_shift);
+
+// ra_dbg("%s: offs:%llx read_only:%x \n", __func__, offs, read_only);
+// ra_dbg("chipnr:%d\n", chipnr);
+
+ chipnr = nfc_select_chip(ra, chipnr);
+ if (chipnr < 0) {
+ printk("%s: chip select error, offs(%llx)\n", __func__, offs);
+ return -1;
+ }
+
+ if (!read_only)
+ return nfc_check_wp(ra);
+
+ return 0;
+}
+
+
+/** wait nand chip becomeing ready and return queried status.
+ * @param snooze: sleep time in ms unit before polling device ready.
+ * @return status of nand chip
+ * @return NAN_STATUS_FAIL if something unexpected.
+ */
+static int
+nfc_wait_ready(struct ra_nand_chip *ra)
+{
+ long retry;
+ char status;
+
+#if 0
+ // wait nfc idle,
+ if (snooze_ms == 0)
+ snooze_ms = 1;
+ else
+ schedule_timeout(snooze_ms * HZ / 1000);
+
+ snooze_ms = retry = snooze_ms *1000000 / 100 ;
+#endif
+
+ retry = RETRY_NUMBER;
+ while (!NFC_TRANS_DONE() && retry--) {
+// if (!cond_resched());
+// ndelay(1);
+ }
+
+ if (!NFC_TRANS_DONE()) {
+ printk("nfc_wait_ready: no transaction done \n");
+ return NAND_STATUS_FAIL;
+ }
+
+#if !defined (CONFIG_NOT_SUPPORT_RB)
+ //fixme
+ retry = RETRY_NUMBER;
+ while(!(status = nfc_device_ready()) && retry--) {
+// ndelay(1);
+ }
+
+ if (status == 0) {
+ printk("nfc_wait_ready: no device ready. \n");
+ return NAND_STATUS_FAIL;
+ }
+
+ // frank modify 201104221 for device ready issue (ecc verify occurs
+ retry = RETRY_NUMBER;
+ while (retry--) {
+ _nfc_read_status(ra, &status);
+ if (status & NAND_STATUS_READY)
+ break;
+// ndelay(1);
+ }
+
+// _nfc_read_status(ra, &status);
+
+ return status;
+
+#else
+
+ retry = RETRY_NUMBER;
+ while (retry--) {
+ _nfc_read_status(ra, &status);
+ if (status & NAND_STATUS_READY)
+ break;
+// ndelay(1);
+ }
+ if (retry<0)
+ printk("nfc_wait_ready 2: no device ready, status(%x). \n", status);
+
+ return status;
+
+#endif
+}
+
+
+/**
+ * return 0: erase OK
+ * return -EIO: fail
+ */
+static int
+nfc_erase_block(struct ra_nand_chip *ra, int row_addr)
+{
+ unsigned long cmd1, cmd2, conf;
+ char status;
+
+// cmd1 = 0x60;
+// cmd2 = 0xd0;
+ cmd1 = ra->opcode->erase1;
+ cmd2 = ra->opcode->erase2;
+
+
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ conf = 0x00513 | ((row_addr_cycle)<<16);
+ } else {
+ conf = 0x00503 | ((row_addr_cycle)<<16);
+ }
+
+ ra_dbg("Erase CMD1:%2lx\n", cmd1);
+ ra_dbg("Erase CMD2:%2lx\n", cmd2);
+ ra_dbg("Erase BLOCK:%2x\n", row_addr);
+ ra_dbg("CONFIG:%5lx\n", conf);
+
+ // set NFC
+// printk("%s: cmd1: %lx, cmd2:%lx bus_addr: %x, conf: %lx \n",
+// __func__, cmd1, cmd2, bus_addr, conf);
+
+ //fixme, should we check nfc status?
+ CLEAR_INT_STATUS();
+
+ ra_outl(NFC_CMD1, cmd1);
+ ra_outl(NFC_CMD2, cmd2);
+ ra_outl(NFC_ADDR, row_addr);
+ ra_outl(NFC_CONF, conf);
+
+ status = nfc_wait_ready(ra); //erase wait 3ms
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+
+}
+
+
+static inline int
+_nfc_write_raw_data(struct ra_nand_chip *ra, int cmd1, int cmd3, unsigned long row_addr,
+ unsigned long column_addr, int conf, unsigned char *buf, int len, int flags)
+{
+ int ret;
+
+ CLEAR_INT_STATUS();
+ ra_dbg("In _nfc_write_raw_data function\n");
+ ra_dbg("NFC_CMD1:%x\n", cmd1);
+ ra_dbg("NFC_CMD3:%x\n", cmd3);
+ ra_dbg("ROW_ADDR:%lx\n", row_addr);
+ ra_dbg("COLUMN_ADDR:%lx\n", column_addr);
+ ra_dbg("NFC_CONF:%x\n", conf);
+
+ ra_outl(NFC_CMD1, cmd1);
+ ra_outl(NFC_CMD3, cmd3);
+
+ if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ ra_outl(NFC_ADDR, (row_addr << (column_addr_cycle << 3)) | column_addr );
+ ra_outl(NFC_ADDRII, ((row_addr>>(32-(column_addr_cycle << 3)))));
+ } else {
+ ra_outl(NFC_ADDR, (row_addr << (column_addr_cycle << 3)) | column_addr );
+ }
+
+#if 0
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ ra_outl(NFC_ADDR, (unsigned int)bus_addr);
+ } else {
+ ra_outl(NFC_ADDR, (unsigned int)(bus_addr & 0xffffffff));
+ ra_outl(NFC_ADDRII, (unsigned int)((bus_addr>>32) & 0xff));
+ }
+#endif
+
+ ra_outl(NFC_CONF, conf);
+
+ ret = _ra_nand_push_data(buf, len, flags & FLAG_USE_GDMA);
+ if (ret != len) {
+ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len);
+ return NAND_STATUS_FAIL;
+ }
+
+// ret = nfc_wait_ready(1); //write wait 1ms
+ /* frankliao modify 20101004 */
+ ret = nfc_wait_ready(ra); //write wait 1ms
+ if (ret & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+_nfc_read_raw_data(struct ra_nand_chip *ra, int cmd1, int cmd2, unsigned long row_addr,
+ unsigned long column_addr, int conf, unsigned char *buf, int len, int flags)
+{
+ int ret;
+
+ ra_dbg("in _nfc_read_raw_data function\n");
+ ra_dbg("NFC_CMD1: %x\n", cmd1);
+ ra_dbg("cmd1: %x\n", cmd1);
+ ra_dbg("row_addr: %x\n", row_addr);
+ ra_dbg("column_addr: %x\n", column_addr);
+ ra_dbg("NFC_ADDR:%x\n", (row_addr << (column_addr_cycle << 3)) | column_addr );
+ ra_dbg("NFC_ADDRII:%x\n",((row_addr>>(32 - (column_addr_cycle << 3)))));
+ ra_dbg("conf: %x\n", conf);
+ ra_dbg("len : %x\n", len);
+
+ CLEAR_INT_STATUS();
+ ra_outl(NFC_CMD1, cmd1);
+
+#if 0
+ if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ ra_outl(NFC_CMD2, cmd2);
+ ra_outl(NFC_ADDR, (unsigned int)(bus_addr & 0xffffffff));
+ ra_outl(NFC_ADDRII, (unsigned int)((bus_addr>>32) & 0xff));
+ } else {
+ ra_outl(NFC_ADDR, (unsigned int)bus_addr);
+ }
+#endif
+
+ if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ ra_outl(NFC_CMD2, cmd2);
+ ra_outl(NFC_ADDR, (row_addr << (column_addr_cycle << 3)) | column_addr );
+ ra_outl(NFC_ADDRII, ((row_addr>>(32 - (column_addr_cycle << 3)))));
+ } else {
+ ra_outl(NFC_ADDR, (row_addr << (column_addr_cycle << 3)) | column_addr );
+ }
+
+ ra_outl(NFC_CONF, conf);
+
+ ret = _ra_nand_pull_data(buf, len, flags & FLAG_USE_GDMA);
+
+// ra_dbg("in _nfc_read_raw_data function\n");
+
+ if (ret != len) {
+ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len);
+ return NAND_STATUS_FAIL;
+ }
+
+ //FIXME, this section is not necessary
+ // frankliao modify
+ ret = nfc_wait_ready(ra); //wait ready
+// ret = nfc_wait_ready(0); //wait ready
+ if (ret & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return NAND_STATUS_FAIL;
+ }
+ return 0;
+}
+
+
+/**
+ * @return !0: fail
+ * @return 0: OK
+ */
+int
+nfc_read_oob(struct ra_nand_chip *ra, int page, unsigned int offs, unsigned char *buf, int len, int flags)
+{
+ unsigned int cmd1 = 0, cmd2 = 0, conf = 0;
+ unsigned long row_addr, column_addr;
+ unsigned int ecc_en;
+ int use_gdma;
+ int status;
+
+ int pages_perblock = 1<<(ra->flash->erase_shift - ra->flash->page_shift);
+ // constrain of nfc read function
+
+#if defined (WORKAROUND_RX_BUF_OV)
+ BUG_ON (len > 60); //problem of rx-buffer overrun
+#endif
+ BUG_ON (offs >> ra->flash->oob_shift); //page boundry
+ BUG_ON ((unsigned int)(((offs + len) >> ra->flash->oob_shift) + page) >
+ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry
+
+ use_gdma = flags & FLAG_USE_GDMA;
+ ecc_en = flags & FLAG_ECC_EN;
+ row_addr = page;
+ column_addr = offs & ((1 << (column_addr_cycle<<3)) -1);
+// cmd1 = 0x50;
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ cmd1 = ra->opcode->readoob;
+ conf = 0x000141| ((addr_cycle)<<16) | ((len) << 20);
+ } else {
+ cmd1 = ra->opcode->read1;
+ cmd2 = ra->opcode->read2;
+ conf = 0x000511| ((addr_cycle)<<16) | ((len) << 20);
+ column_addr |= (1<<11);
+ }
+
+ if (ecc_en)
+ conf |= (1<<3);
+ if (use_gdma)
+ conf |= (1<<2);
+
+// ra_dbg("%s: cmd1: %x, bus_addr: %llx, conf: %x, len:%x, flag:%x\n",
+// __func__, cmd1, bus_addr, conf, len, flags);
+
+ status = _nfc_read_raw_data(ra, cmd1, cmd2, row_addr, column_addr, conf, buf, len, flags);
+// ra_dbg("in read_oob function\n");
+// ra_dbg("after read raw data\n");
+// ra_dbg("buf : %02x\n", buf[0] & 0xff);
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @return !0: fail
+ * @return 0: OK
+ */
+int
+nfc_write_oob(struct ra_nand_chip *ra, int page, unsigned int offs, unsigned char *buf, int len, int flags)
+{
+ unsigned int cmd1 = 0, cmd3=0, conf = 0;
+// unsigned long long bus_addr = 0;
+ unsigned long row_addr, column_addr;
+ int use_gdma;
+ int status;
+
+ int pages_perblock = 1<<(ra->flash->erase_shift - ra->flash->page_shift);
+ // constrain of nfc read function
+
+ BUG_ON (offs >> ra->flash->oob_shift); //page boundry
+ BUG_ON ((unsigned int)(((offs + len) >> ra->flash->oob_shift) + page) >
+ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry
+
+ use_gdma = flags & FLAG_USE_GDMA;
+
+ row_addr = page;
+ column_addr = offs & ((1<<(column_addr_cycle<<3)) - 1);
+
+// bus_addr = (((unsigned long long) page << (column_addr_cycle*8)) |
+// ((unsigned long long) offs & ((1<<column_addr_cycle*8) - 1)));
+
+ cmd1 = ra->opcode->writeoob;
+ cmd3 = ra->opcode->pageprog2;
+
+ /* frankliao modify 20100105 */
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ conf = 0x001243 | ((addr_cycle)<<16) | ((len) << 20);
+ } else if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ conf = 0x001103 | ((addr_cycle)<<16) | ((len) << 20);
+ column_addr |= (1<<11);
+ } else {
+ printk("Undefined Write Page Command\n");
+ return -EIO;
+ }
+
+#if 0
+ /* frankliao added 20101015 */
+ if (cmd1 <= 0xff && cmd1 >= 0x0)
+ conf = 0x001123 | ((addr_cycle)<<16) | ((len) << 20);
+ else if (cmd1 > 0xff && cmd1 <= 0xffff)
+ conf = 0x001223 | ((addr_cycle)<<16) | ((len) << 20);
+ else {
+ printk("Undefine write oob command\n");
+ return -EIO;
+ }
+#endif
+
+ if (use_gdma)
+ conf |= (1<<2);
+
+ // set NFC
+ ra_dbg("%s: cmd1: %x, cmd3: %x row_addr: %lx, column_addr: %lx, conf: %x, len:%x\n",
+ __func__, cmd1, cmd3, row_addr, column_addr, conf, len);
+
+ status = _nfc_write_raw_data(ra, cmd1, cmd3, row_addr, column_addr, conf, buf, len, flags);
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+int nfc_read_page(struct ra_nand_chip *ra, unsigned char *buf, int page, int flags);
+int nfc_write_page(struct ra_nand_chip *ra, unsigned char *buf, int page, int flags);
+
+
+/*nfc_ecc_err_handler
+for 512 byte/page
+ return:
+ ECC_NO_ERR: no error
+ ECC_CODE_ERR: ECC code error
+ ECC_DATA_ERR: more than 1 bit error, un-correctable
+ ECC_ONE_BIT_ERR: 1 bit correctable error
+ ECC_NFC_CONFLICT: software check result conflict with HW check result
+*/
+int nfc_ecc_err_handler(int page_index , unsigned char *ecc_from_oob,
+unsigned char *ecc_from_nfc, unsigned long *error_byte_index, unsigned long *error_bit_index){
+ unsigned long old_ecc = 0;
+ unsigned long new_ecc = 0;
+ unsigned long ecc_rst = 0;
+ int ecc_bit_index = 0;
+ int ecc_bit1_cnt = 0;
+ unsigned long temp = 0;
+
+ memcpy((unsigned char *)&old_ecc + 1 , ecc_from_oob , 3);
+ memcpy((unsigned char *)&new_ecc + 1 , ecc_from_nfc , 3);
+
+
+ ecc_rst = old_ecc^new_ecc;
+
+
+ if(ecc_rst == 0){//no ecc error
+ return ECC_NO_ERR;
+ }else{
+ for(ecc_bit_index = 0; ecc_bit_index< 24; ecc_bit_index++ ){
+ if((ecc_rst&(1<<ecc_bit_index)) != 0){
+ ecc_bit1_cnt++;
+ }
+ }
+ printk("\r\n ecc_rst= 0x%08x, ecc_bit1_cnt=%d ", ecc_rst, ecc_bit1_cnt);
+ if(ecc_bit1_cnt == 1){//ECC code error
+ return ECC_CODE_ERR;
+ }else if(ecc_bit1_cnt != 12){//more than 1 bit error, un-correctable
+ printk("\r\n more than one bit ECC error \r\n");
+ return ECC_DATA_ERR;
+ }else if(ecc_bit1_cnt == 12){// 1 bit correctable error, get error bit
+ temp = ra_inl(NFC_ECC_ST + page_index*4);
+ if(unlikely((temp&0x1) == 0)){
+ printk("\r\n ECC result conflict!! \r\n");
+ return ECC_NFC_CONFLICT;
+ }
+ *error_byte_index = ((temp>>6)&0x1ff);
+ *error_bit_index = ((temp>>2)&0x7);
+ printk("\r\n correctable ECC error error_byte_index=%d, error_bit_index=%d",
+ *error_byte_index, *error_bit_index);
+ return ECC_ONE_BIT_ERR;
+ }
+ }
+ return ECC_NO_ERR;
+}
+
+
+
+#if !defined (WORKAROUND_RX_BUF_OV)
+
+/**
+ * nfc_ecc_verify
+ return value:
+ 0: data OK or data correct OK
+ -1: data ECC fail
+ */
+int nfc_ecc_verify(struct ra_nand_chip *ra, unsigned char *buf, int page, int mode)
+{
+ int ret, i, j, ecc_num;
+ unsigned char *p, *e;
+ unsigned long err_byte_index = 0;
+ unsigned long err_bit_index = 0;
+ int ecc_error_code = ECC_DATA_ERR;
+ int ecc_ret = -1;
+
+
+ /* 512 bytes data has a ecc value */
+ int ecc_bytes, ecc_offset, ecc[4];
+ unsigned char ecc_swap[3] = {0};
+ unsigned char correct_byte = 0;
+
+
+ // printk("%s, page:%x mode:%d\n", __func__, page, mode);
+
+ if (mode == FL_WRITING) {
+ int len = (1 << ra->flash->page_shift) + (1 << ra->flash->oob_shift);
+ int conf;
+
+ if (ra->flash->page_shift == SIZE_512iB_BIT)
+ conf = 0x000141;
+ else
+ conf = 0x000511;
+
+ conf |= ((addr_cycle) << 16) | (len << 20);
+ conf |= (1<<3); //(ecc_en)
+// conf |= (1<<2); // (use_gdma)
+
+ p = ra->readback_buffers;
+
+// ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_USE_GDMA | FLAG_ECC_EN);
+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN);
+// ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_NONE);
+ if (ret == 0)
+ goto ecc_check;
+
+ //FIXME, double comfirm
+ printk("%s: read back fail, try again \n",__func__);
+// ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_USE_GDMA | FLAG_ECC_EN);
+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN);
+// ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_NONE);
+
+
+ if (ret != 0) {
+ printk("\t%s: read back fail agian \n",__func__);
+ goto bad_block;
+ }
+ } else if (mode == FL_READING) {
+ p = buf;
+ } else {
+ return -2;
+ }
+
+ecc_check:
+ /* frankliao modify 20101018 */
+ p += (1<<ra->flash->page_shift);
+
+ ecc[0] = ra_inl(NFC_ECC);
+
+ if (ecc[0] == 0) {
+ //printk("clean page.\n");
+ return 0;
+ }
+
+ ecc_bytes = ra->oob->eccbytes;
+
+ ecc_offset = ra->oob->eccpos[0];
+
+ /* each ecc register store 3 bytes ecc value */
+ if (ecc_bytes == 12) {
+ ecc[1] = ra_inl(NFC_ECCII);
+ ecc[2] = ra_inl(NFC_ECCIII);
+ ecc[3] = ra_inl(NFC_ECCIV);
+ }
+
+ ecc_num = ecc_bytes/3;
+ for (i=0; i<ecc_num; i++) {
+ e = (unsigned char*)&ecc[i];
+ ecc_swap[0] = *((unsigned char*)&ecc[i]+3);
+ ecc_swap[1] = *((unsigned char*)&ecc[i]+2);
+ ecc_swap[2] = *((unsigned char*)&ecc[i]+1);
+ #if defined (TCSUPPORT_NAND_BADBLOCK_CHECK) || defined (TCSUPPORT_NAND_RT63368)
+ ecc_offset = ra->oob->eccpos[i * 3];
+ #endif
+ err_byte_index = 0;
+ err_bit_index = 0;
+ /* each ecc register store 3 bytes ecc value */
+ ecc_ret = 0;
+ for (j=0; j<3; j++) {
+
+ #ifdef __LITTLE_ENDIAN
+ int eccpos = ecc_offset + j + i*3;
+ #else
+ #if defined (TCSUPPORT_NAND_BADBLOCK_CHECK) || defined (TCSUPPORT_NAND_RT63368)
+ int eccpos = ecc_offset - j + 2;
+ #else
+ int eccpos = ecc_offset - j + 2 + i*3;
+ #endif
+ #endif
+
+ if (*(p + eccpos) != *(e + j + 1)) {
+
+ #ifdef __LITTLE_ENDIAN
+ printk("%s mode:%s, invalid ecc, page: %x read:%x %x %x, ecc:%x \n",
+ __func__, (mode == FL_READING)?"read":"write", page,
+ *(p+ecc_offset), *(p+ecc_offset+1), *(p+ecc_offset+2), ecc[i]);
+ #else
+ printk("%s mode:%s, invalid ecc, page: %x read:%x %x %x, ecc:%x \n",
+ __func__, (mode == FL_READING)?"read":"write", page,
+ *(p+ecc_offset+2), *(p+ecc_offset+1), *(p+ecc_offset), ecc[i]);
+ #endif
+ ecc_ret =-1;
+ break;
+ }
+ }
+ if(ecc_ret == -1){
+ ecc_error_code = nfc_ecc_err_handler(i , p+ecc_offset, ecc_swap, &err_byte_index,
+ &err_bit_index );
+ if(ecc_error_code != ECC_NO_ERR){
+ printk("\r\n ecc_error_code= %d, page=%d ,i=%d", ecc_error_code, page, i);
+ if(ecc_error_code == ECC_ONE_BIT_ERR){
+ //correct the error
+ printk("\r\n err_byte_index= %d, err_bit_index=%d",
+ err_byte_index , err_bit_index);
+ correct_byte = buf[err_byte_index + i*512];
+ if((correct_byte&(1<<err_bit_index)) != 0){
+ correct_byte &= (~(1<<err_bit_index));
+ }else{
+ correct_byte |= (1<<err_bit_index);
+ }
+ buf[err_byte_index + i*512] = correct_byte;
+ ecc_ret = 0;
+ ecc_error_code = ECC_NO_ERR;
+ continue;
+ }
+ return ecc_error_code;
+ }
+ }
+ }
+
+ return 0;
+
+bad_block:
+ return -1;
+}
+
+
+
+#else
+
+void ranfc_dump(void)
+{
+ int i;
+ for (i=0; i<11; i++) {
+ if (i==6)
+ continue;
+ printk("%x: %x \n", NFC_BASE + i*4, ra_inl(NFC_BASE + i*4));
+ }
+}
+
+
+/**
+ * @return 0, ecc OK or corrected.
+ * @return NAND_STATUS_FAIL, ecc fail.
+ */
+
+int
+nfc_ecc_verify(struct ra_nand_chip *ra, unsigned char *buf, int page, int mode)
+{
+ int ret, i;
+ unsigned char *p, *e;
+ int ecc;
+
+ if (ranfc_verify == 0)
+ return 0;
+
+ ra_dbg("%s, page:%x mode:%d\n", __func__, page, mode);
+
+ if (mode == FL_WRITING) { // read back and memcmp
+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_USE_GDMA);
+ if (ret != 0) //double comfirm
+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_USE_GDMA);
+
+ if (ret != 0) {
+ printk("%s: mode:%x read back fail \n", __func__, mode);
+ return -1;
+ }
+ return memcmp(buf, ra->readback_buffers, 1<<ra->flash->page_shift);
+ }
+
+ if (mode == FL_READING) {
+#if 0
+ if (ra->sandbox_page == 0)
+ return 0;
+
+ ret = nfc_write_page(ra, buf, ra->sandbox_page, FLAG_USE_GDMA | FLAG_ECC_EN);
+ if (ret != 0) {
+ printk("%s, fail write sandbox_page \n", __func__);
+ return -1;
+ }
+#else
+ /** @n// code review tag ote:
+ * The following command is actually not 'write' command to drive NFC to write flash.
+ * However, it can make NFC to calculate ECC, that will be used to compare with original ones.
+ * --YT
+ */
+ unsigned int conf = 0x001223 | (addr_cycle<<16) | (0x200 << 20) | (1<<3) | (1<<2);
+ _nfc_write_raw_data(ra, 0xff, 0xff, ra->sandbox_page << ra->flash->page_shift, conf, buf, 0x200, FLAG_USE_GDMA);
+#endif
+
+ ecc = ra_inl(NFC_ECC);
+ if (ecc == 0) //clean page.
+ return 0;
+ e = (unsigned char*)&ecc;
+ p = buf + (1 << ra->flash->page_shift);
+
+ ecc_bytes = ra->oob->eccbytes;
+ ecc_offset = ra->oob->eccpos[0];
+
+ for (i=0; i<ecc_bytes; i++) {
+
+ #ifdef __LITTLE_ENDIAN
+ int eccpos = ecc_offset + i;
+ #else
+ int eccpos = ecc_offset + ecc_bytes - i - 1;
+ #endif
+
+ if (*(p + eccpos) != *(e + i + 1)) {
+
+ #ifdef __LITTLE_ENDIAN
+ printk("%s mode:%s, invalid ecc, page: %x read:%x %x %x, ecc:%x \n",
+ __func__, (mode == FL_READING)?"read":"write", page,
+ *(p+ecc_offset), *(p+ecc_offset+1), *(p+ecc_offset+2), ecc);
+ #else
+ printk("%s mode:%s, invalid ecc, page: %x read:%x %x %x, ecc:%x \n",
+ __func__, (mode == FL_READING)?"read":"write", page,
+ *(p+ecc_offset+2), *(p+ecc_offset+1), *(p+ecc_offset), ecc);
+ #endif
+ return -1;
+ }
+ }
+ return 0;
+ }
+ return -1;
+}
+#endif
+
+
+/**
+ * @return -EIO, writing size is less than a page
+ * @return 0, OK
+ */
+int
+nfc_read_page(struct ra_nand_chip *ra, unsigned char *buf, int page, int flags)
+{
+ unsigned int cmd1 = 0, cmd2 = 0, conf = 0;
+// unsigned long long bus_addr = 0;
+ unsigned long column_addr, row_addr;
+ unsigned int ecc_en;
+ int use_gdma;
+ int pagesize, size, offs;
+ int status = 0;
+
+ use_gdma = flags & FLAG_USE_GDMA;
+ ecc_en = flags & FLAG_ECC_EN;
+
+ page = page & ((1 << ra->flash->chip_shift)-1); // chip boundary
+ pagesize = (1 << ra->flash->page_shift);
+ // write first, then read frankliao mark
+
+ /* frankliao added 20101029, read size make status reg show ecc error*/
+
+ if (ecc_en)
+ size = pagesize + (1 <<ra->flash->oob_shift); //add oobsize
+ else
+ size = pagesize;
+
+ offs = 0;
+
+ while (size > 0) {
+ int len;
+#if defined (WORKAROUND_RX_BUF_OV)
+ len = min(60, size);
+#else
+ len = size;
+#endif
+
+// bus_addr = (((unsigned long long) page << (column_addr_cycle*8)) |
+// ((unsigned long long) offs & ((1<<column_addr_cycle*8)-1)));
+
+ row_addr = page;
+ column_addr = offs & ((1<<(column_addr_cycle<<3))-1);
+
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ /* frankliao added 512 bytes ~ 528 bytes 20101012*/
+
+ if (unlikely((offs & ~((1 << ra->flash->page_shift)-1))))
+ cmd1 = ra->opcode->readoob;
+ /* frankliao added 256 bytes ~ 512 bytes 20101012*/
+ else if (offs & ~((1<<(column_addr_cycle<<3))-1))
+ cmd1 = ra->opcode->readB;
+ else
+ cmd1 = ra->opcode->read1;
+ conf = 0x000141 | ((addr_cycle)<<16) | (len << 20);
+
+ } else if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ cmd1 = ra->opcode->read1;
+ cmd2 = ra->opcode->read2;
+ conf = 0x000511 | ((addr_cycle)<<16) | (len << 20);
+ }
+ // frankliao
+#if !defined (WORKAROUND_RX_BUF_OV)
+ if (ecc_en)
+ conf |= (1<<3);
+#endif
+ if (use_gdma)
+ conf |= (1<<2);
+
+ status = _nfc_read_raw_data(ra, cmd1, cmd2, row_addr, column_addr, conf, buf+offs, len, flags);
+
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return -EIO;
+ }
+
+ offs += len;
+ size -= len;
+ }
+
+ // verify and correct ecc frankliao mark
+ if ((flags & (FLAG_VERIFY | FLAG_ECC_EN)) == (FLAG_VERIFY | FLAG_ECC_EN)) {
+ status = nfc_ecc_verify(ra, buf, page, FL_READING);
+ if (status != 0) {
+ printk("%s: fail, buf:%x, page:%x, flag:%x\n",
+ __func__, (unsigned int)buf, page, flags);
+ return status;
+ }
+ }
+ else {
+ // fix,e not yet support
+ ra->buffers_page = -1; //cached
+ }
+
+ return 0;
+}
+
+
+/**
+ * @return -EIO, fail to write
+ * @return 0, OK
+ */
+int
+nfc_write_page(struct ra_nand_chip *ra, unsigned char *buf, int page, int flags)
+{
+ unsigned int cmd1 = 0, cmd3, conf = 0;
+// unsigned long long bus_addr = 0;
+ unsigned long row_addr = 0;
+ unsigned int ecc_en;
+ int use_gdma;
+ int pagesize;
+ int i;
+ char status;
+// uint8_t *oob = buf + (1 << ra->flash->page_shift);
+
+ use_gdma = flags & FLAG_USE_GDMA;
+ ecc_en = flags & FLAG_ECC_EN;
+// oob[ra->flash->badblockpos] = 0xff; //tag as good block.
+ ra->buffers_page = -1; //cached
+
+ page = page & ((1<<ra->flash->chip_shift)-1); // chip boundary
+ pagesize = (1 << ra->flash->page_shift);
+
+ /* frankliao added 20101029 */
+ if (ecc_en) {
+ // frank added 20110421
+ #if !defined(TCSUPPORT_NAND_BADBLOCK_CHECK) && !defined(TCSUPPORT_NAND_RT63368)
+ memset(ra->buffers + pagesize, 0xff, (1 << ra->flash->oob_shift));
+ #endif
+ pagesize = pagesize + (1 << ra->flash->oob_shift);
+ }
+
+// bus_addr = ((unsigned long long) page << (column_addr_cycle*8)); //write_page always write from offset 0.
+ row_addr = page;
+
+ /* frankliao modify 20101004 */
+ cmd1 = ra->opcode->pageprog1;
+ cmd3 = ra->opcode->pageprog2;
+
+ /* frankliao modify 20100105 */
+ if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ conf = 0x001243 | ((addr_cycle)<<16) | ((pagesize) << 20);
+ } else if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ conf = 0x001103 | ((addr_cycle)<<16) | ((pagesize) << 20);
+ } else {
+ printk("Undefined Write Page Command\n");
+ return -EIO;
+ }
+
+ ra_dbg("in nfc_write_page function\n");
+ ra_dbg("CMD1:%02x\n", cmd1);
+ ra_dbg("CMD3:%02x\n", cmd3);
+ ra_dbg("CONFIG:%06x\n", conf);
+
+ if (ecc_en)
+ conf |= (1<<3);
+ if (use_gdma)
+ conf |= (1<<2);
+
+ // set NFC
+ ra_dbg("nfc_write_page: cmd1: %x, cmd3: %x, conf: %x, len:%x\n",cmd1, cmd3, conf, pagesize);
+
+ status = _nfc_write_raw_data(ra, cmd1, cmd3, row_addr, 0, conf, buf, pagesize, flags);
+ if (status & NAND_STATUS_FAIL) {
+ printk("%s: fail \n", __func__);
+ return -EIO;
+ }
+
+ if (flags & FLAG_VERIFY) { // verify and correct ecc
+ status = nfc_ecc_verify(ra, buf, page, FL_WRITING);
+ // frank added 20110421
+ #ifdef radbg
+ for (i=0; i<512; i++) {
+ if (ra->readback_buffers[i] != buf[i]) {
+ printk("bytes %d error readbuf: %x buf: %x\n", i, ra->readback_buffers[i], buf[i]);
+ }
+ }
+ #endif
+ if (status != 0) {
+ printk("%s: ecc_verify fail: ret:%x \n", __func__, status);
+ return -EBADMSG;
+ }
+ }
+
+ ra->buffers_page = page; //cached
+ return 0;
+}
+
+
+/*************************************************************
+ * nand internal process
+ *************************************************************/
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void
+nand_release_device(struct ra_nand_chip *ra)
+{
+ /* De-select the NAND device */
+ nfc_select_chip(ra, -1);
+
+ /* Release the controller and the chip */
+ ra->state = FL_READY;
+#if !defined (__UBOOT__)
+ mutex_unlock(ra->controller);
+#endif ///
+}
+
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct ra_nand_chip *ra, int new_state)
+{
+ int ret = 0;
+
+#if !defined (__UBOOT__)
+ ret = mutex_lock_interruptible(ra->controller); // code review tag
+#endif ///
+
+ if (!ret)
+ ra->state = new_state;
+
+ return ret;
+
+}
+
+
+/*************************************************************
+ * nand internal process
+ *************************************************************/
+int
+nand_bbt_get(struct ra_nand_chip *ra, int block)
+{
+ int byte, bits;
+ bits = block * BBTTAG_BITS;
+
+ byte = bits / 8;
+ bits = bits % 8;
+
+ return (ra->bbt[byte] >> bits) & BBTTAG_BITS_MASK;
+}
+
+
+int
+nand_bbt_set(struct ra_nand_chip *ra, int block, int tag)
+{
+ int byte, bits;
+ bits = block * BBTTAG_BITS;
+
+ byte = bits / 8;
+ bits = bits % 8;
+
+ ra->bbt[byte] = (ra->bbt[byte] & ~(BBTTAG_BITS_MASK << bits)) | ((tag & BBTTAG_BITS_MASK) << bits);
+
+ return tag;
+}
+
+
+/*s
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+int
+nand_block_checkbad(struct ra_nand_chip *ra, loff_t offs
+#ifdef TCSUPPORT_NAND_RT63368
+, unsigned long bmt_block
+#endif
+)
+{
+ int page, block;
+ int ret = 4;
+ unsigned int tag;
+
+ char *str[]= {"UNK", "RES", "BAD", "GOOD"};
+
+ if (ranfc_bbt == 0)
+ return 0;
+
+ // align with chip
+ offs = offs & (((loff_t)1<<ra->flash->chip_shift) -1);
+
+ page = offs >> ra->flash->page_shift;
+ block = offs >> ra->flash->erase_shift;
+#ifdef TCSUPPORT_NAND_RT63368
+ if(bmt_block == 0){
+#endif
+ tag = nand_bbt_get(ra, block);
+
+ if (tag == BBT_TAG_UNKNOWN) {
+ ret = nfc_read_oob(ra, page, ra->flash->badblockpos, (char*)&tag, 1, FLAG_NONE);
+ /* frankliao added 20101025, 32 - 8 = 24 */
+ tag = tag >> 24;
+ if (ret == 0)
+ tag = ((tag & 0xff) == 0xff) ? BBT_TAG_GOOD : BBT_TAG_BAD;
+ else
+ tag = BBT_TAG_BAD;
+
+ nand_bbt_set(ra, block, tag);
+ }
+
+#if 0
+ ra_dbg("%s offs: %x , ret: %x, tag:%s\n",
+ __func__, (int)offs, ret, str[tag]);
+#endif
+
+ if (tag != BBT_TAG_GOOD) {
+ printk("%s: offs:%llx tag: %s \n", __func__, (loff_t)offs, str[tag]);
+ return 1;
+ } else {
+ return 0;
+ }
+#ifdef TCSUPPORT_NAND_RT63368
+ }
+ else{
+
+ ret = nfc_read_oob(ra, page, BMT_BAD_BLOCK_INDEX_OFFSET, (char*)&tag, 1, FLAG_NONE);
+
+ tag = tag >> 24;
+ if (ret == 0 && ((tag & 0xff) == 0xff))
+ return 0;
+ else
+ return 1;
+
+ }
+#endif
+
+}
+
+
+/**
+ * nand_block_markbad -
+ */
+int
+nand_block_markbad(struct ra_nand_chip *ra, loff_t offs
+#ifdef TCSUPPORT_NAND_RT63368
+, unsigned long bmt_block
+#endif
+)
+{
+ int page, block;
+ int start_page, end_page;
+ int ret = 4;
+ unsigned int tag;
+ char *ecc;
+
+ // align with chip
+ ra_dbg("%s offs: %llx \n", __func__, (loff_t)offs);
+
+ offs = offs & (((loff_t)1<<ra->flash->chip_shift) -1);
+
+// page = offs >> ra->flash->page_shift;
+ block = offs >> ra->flash->erase_shift;
+ start_page = block * (1<<(ra->flash->erase_shift - ra->flash->page_shift));
+ end_page = (block+1) * (1<<(ra->flash->erase_shift - ra->flash->page_shift));
+#ifdef TCSUPPORT_NAND_RT63368
+ if(bmt_block == 0){
+#endif
+
+ tag = nand_bbt_get(ra, block);
+
+ if (tag == BBT_TAG_BAD) {
+ printk("%s: mark repeatedly \n", __func__);
+ return 0;
+ }
+
+#ifdef TCSUPPORT_NAND_RT63368
+ }
+#endif
+ for (page=start_page; page<end_page; page++) {
+ // new tag as bad
+ tag = BBT_TAG_BAD;
+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_ECC_EN);
+ if (ret != 0) {
+ printk("%s: fail to read bad block tag \n", __func__);
+ goto tag_bbt;
+ }
+
+#ifdef TCSUPPORT_NAND_RT63368
+ if(bmt_block)
+ ecc = &ra->buffers[(1<<ra->flash->page_shift) + BMT_BAD_BLOCK_INDEX_OFFSET];
+ else
+#endif
+ ecc = &ra->buffers[(1<<ra->flash->page_shift)+ra->flash->badblockpos];
+
+ if (*ecc == (char)0xff) {
+ //tag into flash
+ *ecc = (char)tag;
+ // ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA);
+ ret = nfc_write_page(ra, ra->buffers, page, FLAG_ECC_EN);
+ if (ret) {
+ printk("%s: fail to write bad block tag \n", __func__);
+ break;
+ }
+ }
+ #if defined(TCSUPPORT_NAND_BADBLOCK_CHECK) || defined(TCSUPPORT_NAND_RT63368)
+ break;
+ #endif
+
+ }
+tag_bbt:
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ if(bmt_block == 0){
+ #endif
+ //update bbt
+ nand_bbt_set(ra, block, tag);
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ }
+ #endif
+
+ return 0;
+}
+
+
+#if defined (WORKAROUND_RX_BUF_OV)
+/**
+ * to find a bad block for ecc verify of read_page
+ */
+unsigned int
+nand_bbt_find_sandbox(struct ra_nand_chip *ra)
+{
+ loff_t offs = 0;
+ int chipsize = 1 << ra->flash->chip_shift;
+ int blocksize = 1 << ra->flash->erase_shift;
+
+
+ while (offs < chipsize) {
+ if (nand_block_checkbad(ra, offs)) //scan and verify the unknown tag
+ break;
+ offs += blocksize;
+ }
+
+ if (offs >= chipsize) {
+ offs = chipsize - blocksize;
+ }
+
+ nand_bbt_set(ra, (loff_t)offs >> ra->flash->erase_shift, BBT_TAG_RES); // tag bbt only, instead of update badblockpos of flash.
+ return (offs >> ra->flash->page_shift);
+}
+#endif
+
+
+/**
+ * nand_erase_nand - [Internal] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks
+ */
+int
+nand_erase_nand(struct ra_nand_chip *ra, struct erase_info *instr)
+{
+ int page, len, status, ret;
+ unsigned long long addr;
+ unsigned int blocksize = 1<<ra->flash->erase_shift;
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ int block;
+ int srcblock;
+ unsigned long srcaddr;
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ int physical_block;
+ unsigned long logic_addr;
+ unsigned short phy_block_bbt;
+#endif
+
+ ra_dbg("%s: start:%llx, len:%x \n", __func__, instr->addr, (unsigned int)instr->len);
+
+#define BLOCK_ALIGNED(a) ((a) & (blocksize - 1))
+
+ if (BLOCK_ALIGNED(instr->addr) || BLOCK_ALIGNED(instr->len)) {
+ ra_dbg("%s: erase block not aligned, addr:%llx len:%x\n", __func__, instr->addr, instr->len);
+ return -EINVAL;
+ }
+
+ instr->fail_addr = -1;
+
+ len = instr->len;
+ addr = instr->addr; //logic address
+ instr->state = MTD_ERASING;
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcblock = addr >> ra->flash->erase_shift;
+ srcaddr = addr;
+
+ addr += (block_table[srcblock] - srcblock) << ra->flash->erase_shift;
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, erase fail \n", __func__);
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ logic_addr = addr; //logic address
+#endif
+
+ while (len) {
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ block = srcaddr >> ra->flash->erase_shift;
+ if(srcblock != block)
+ {
+ srcblock = block;
+ addr = srcaddr + ((block_table[block] - block) << ra->flash->erase_shift);
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, erase fail \n", __func__);
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ physical_block = get_mapping_block_index(logic_addr >> ra->flash->erase_shift, &phy_block_bbt); //physical block
+ addr = (physical_block << ra->flash->erase_shift); //physical address
+#endif
+
+// page = (int)(addr >> ra->flash->page_shift);
+ page = (int)(addr >> ra->flash->page_shift);
+ ranfc_page = page;
+// block = (int)(addr >> ra->flash->erase_shift);
+// ra_dbg("FrankLiao block %x\n", block);
+
+ /* select device and check wp */
+ if (nfc_enable_chip(ra, addr, 0)) {
+ printk("%s: nand is write protected \n", __func__);
+ printk("NFC_CTRL : %x\n", ra_inl(NFC_CTRL));
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * heck if we have a bad block, we do not erase bad blocks !
+ */
+ // frankliao mark
+#if 0
+ if (nand_block_checkbad(ra, addr)) {
+ printk(KERN_WARNING "nand_erase: attempt to erase a "
+ "bad block at 0x%llx\n", addr);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+#endif
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page
+ */
+ // frankliao mark
+ if (BLOCK_ALIGNED(addr) == BLOCK_ALIGNED(ra->buffers_page << ra->flash->page_shift))
+ ra->buffers_page = -1;
+
+ status = nfc_erase_block(ra, page);
+// status = nfc_erase_block(ra, block);
+
+ /* See if block erase succeeded */
+ if (status) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+
+ nand_erase_next_goodblock(ra, block, addr);
+
+ #elif defined(TCSUPPORT_NAND_RT63368)
+ if (update_bmt(addr,
+ UPDATE_ERASE_FAIL, NULL, NULL))
+ {
+ printk("Erase fail at block, update BMT success\n");
+ }
+ else
+ {
+ printk("Erase fail at block, update BMT fail\n");
+ return -1;
+ }
+
+ #else
+ printk("%s: failed erase, block 0x%08x\n", __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+// instr->fail_addr = (block << ra->flash->erase_shift);
+ instr->fail_addr = (page << ra->flash->page_shift);
+ goto erase_exit;
+ #endif
+
+
+ }
+
+
+ /* Increment page address and decrement length */
+ len -= blocksize;
+ addr += blocksize; //physical address
+
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcaddr += blocksize;
+ #endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ logic_addr += blocksize; //logic address
+ #endif
+
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = ((instr->state == MTD_ERASE_DONE) ? 0 : -EIO);
+
+#if !defined (__UBOOT__)
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+#endif
+
+ if (ret) {
+ nand_bbt_set(ra, addr >> ra->flash->erase_shift, BBT_TAG_BAD);
+ }
+
+ /* Return more or less happy */
+ return ret;
+}
+
+
+static int
+nand_write_oob_buf(struct ra_nand_chip *ra, uint8_t *buf, uint8_t *oob, size_t size,
+ int mode, int ooboffs)
+{
+ size_t oobsize = 1 << ra->flash->oob_shift;
+// uint8_t *buf = ra->buffers + (1<<ra->page_shift);
+ int retsize = 0;
+
+ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs);
+
+ switch(mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_RAW:
+ if (ooboffs > oobsize)
+ return -1;
+
+#if 0 //* clear buffer */
+ if (ooboffs || ooboffs+size < oobsize)
+ memset (ra->buffers + oobsize, 0x0ff, 1<<ra->oob_shift);
+#endif
+
+ size = min(size, oobsize - ooboffs);
+ memcpy(buf + ooboffs, oob, size);
+ retsize = size;
+ break;
+
+ case MTD_OOB_AUTO:
+ {
+ struct nand_oobfree *free;
+ uint32_t woffs = ooboffs;
+
+ if (ooboffs > ra->oob->oobavail)
+ return -1;
+
+ /* OOB AUTO does not clear buffer */
+ /* frankliao mark 20101021 */
+
+ for(free = ra->oob->oobfree; free->length && size; free++) {
+ int wlen = free->length - woffs;
+ int bytes = 0;
+
+ /* Write request not from offset 0 ? */
+ if (wlen <= 0) {
+ woffs = -wlen;
+ continue;
+ }
+
+ bytes = min_t(size_t, size, wlen);
+ memcpy (buf + free->offset + woffs, oob, bytes);
+ woffs = 0;
+ oob += bytes;
+ size -= bytes;
+ retsize += bytes;
+ }
+
+ buf += oobsize;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return retsize;
+}
+
+
+static int
+nand_read_oob_buf(struct ra_nand_chip *ra, uint8_t *oob, size_t size,
+ int mode, int ooboffs)
+{
+ size_t oobsize = 1 << ra->flash->oob_shift;
+ uint8_t *buf = ra->buffers + (1 << ra->flash->page_shift);
+ int retsize=0;
+
+ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs);
+
+ switch(mode) {
+ case MTD_OOB_PLACE:
+ case MTD_OOB_RAW:
+ if (ooboffs > oobsize)
+ return -1;
+
+ size = min(size, oobsize - ooboffs);
+ memcpy(oob, buf + ooboffs, size);
+ return size;
+
+ case MTD_OOB_AUTO: {
+ struct nand_oobfree *free;
+ uint32_t woffs = ooboffs;
+
+ if (ooboffs > ra->oob->oobavail)
+ return -1;
+
+ size = min(size, ra->oob->oobavail - ooboffs);
+ for(free = ra->oob->oobfree; free->length && size; free++) {
+ int wlen = free->length - woffs;
+ int bytes = 0;
+
+ /* Write request not from offset 0 ? */
+ if (wlen <= 0) {
+ woffs = -wlen;
+ continue;
+ }
+
+ bytes = min_t(size_t, size, wlen);
+ memcpy(oob, buf + free->offset + woffs, bytes);
+ woffs = 0;
+ oob += bytes;
+ size -= bytes;
+ retsize += bytes;
+ }
+
+
+ return retsize;
+ }
+ default:
+ BUG();
+ }
+
+ return -1;
+}
+
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+int nandflash_scan_badblock(void)
+{
+ int i, j, badblock;
+ int addr = 0;
+ int blocksize = 1 << ra->flash->erase_shift;
+ int totalblock = 1024;
+
+
+ for(i = 0; i < totalblock; i++)
+ {
+ block_table[i] = i;
+ }
+
+ for(j = 0; j < totalblock; j++)
+ {
+
+ if(nand_block_checkbad(ra, addr))
+ {
+ badblock = addr/blocksize;
+
+ if(badblock >= TCROMFILE_START && badblock < TCROMFILE_END)
+ {
+
+ for(i = TCROMFILE_START; i < TCROMFILE_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCROMFILE_END; i++)
+ {
+ block_table[i]++;
+ }
+
+ }else if(badblock >= TCLINUX_BLOCK_START && badblock < TCLINUX_BLOCK_END){
+
+ for(i = TCLINUX_BLOCK_START; i < TCLINUX_BLOCK_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCLINUX_BLOCK_END; i++)
+ {
+ block_table[i]++;
+ }
+
+
+
+ }
+ else if(badblock >= TCLINUX_SLAVE_BLOCK_START && badblock < TCLINUX_SLAVE_BLOCK_END){
+
+ for(i = TCLINUX_SLAVE_BLOCK_START; i < TCLINUX_SLAVE_BLOCK_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCLINUX_SLAVE_BLOCK_END; i++)
+ {
+ block_table[i]++;
+ }
+
+
+ }else if(badblock >= TCSYSLOG_START && badblock < TCSYSLOG_END){
+
+ for(i = TCSYSLOG_START; i < TCSYSLOG_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCSYSLOG_END; i++)
+ {
+ block_table[i]++;
+ }
+
+ }else if(badblock >= TCBKROMFILE_START && badblock < TCBKROMFILE_END){
+
+ for(i = TCBKROMFILE_START; i < TCBKROMFILE_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCBKROMFILE_END; i++)
+ {
+ block_table[i]++;
+ }
+
+ }else if(badblock >= TCWIFI_START && badblock < TCWIFI_END){
+
+ for(i = TCWIFI_START; i < TCWIFI_END; i++)
+ {
+ if(block_table[i] == badblock)
+ {
+ break;
+ }
+
+ }
+
+ for(; i < TCWIFI_END; i++)
+ {
+ block_table[i]++;
+ }
+
+ }
+
+ }
+
+ addr += blocksize;
+
+ }
+
+ return 0;
+
+}
+
+
+int nand_partition_check(int block)
+{
+ int ret = 0;
+
+ if(block >= TCROMFILE_START && block < TCROMFILE_END){
+ if(block_table[block] >= TCROMFILE_END){
+ ret = -1;
+ goto done;
+ }
+
+ }else if(block >= TCLINUX_BLOCK_START && block < TCLINUX_BLOCK_END){
+ if(block_table[block] >= TCLINUX_BLOCK_END){
+ ret = -1;
+ goto done;
+ }
+
+ }else if(block >= TCLINUX_SLAVE_BLOCK_START && block < TCLINUX_SLAVE_BLOCK_END){
+ if(block_table[block] >= TCLINUX_SLAVE_BLOCK_END){
+ ret = -1;
+ goto done;
+ }
+
+ }else if(block >= TCSYSLOG_START && block < TCSYSLOG_END){
+ if(block_table[block] >= TCSYSLOG_END){
+ ret = -1;
+ goto done;
+ }
+
+ }else if(block >= TCBKROMFILE_START && block < TCBKROMFILE_END){
+ if(block_table[block] >= TCBKROMFILE_END){
+ ret = -1;
+ goto done;
+ }
+ }else if(block >= TCWIFI_START && block < TCWIFI_END){
+ if(block_table[block] >= TCWIFI_END){
+ ret = -1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+
+}
+
+int nand_erase_next_goodblock(struct ra_nand_chip *ra, int block, unsigned long addr_l)
+{
+ unsigned int blocksize = 1 << ra->flash->erase_shift;
+ unsigned int pagesize = 1 << ra->flash->page_shift;
+ unsigned long offset;
+ int page;
+ nand_block_markbad(ra, addr_l);
+
+ block++;
+ offset = block * blocksize;
+ offset += (block_table[block] - block) << ra->flash->erase_shift;
+
+ page = (unsigned long)(offset >> ra->flash->page_shift);
+
+ while(nfc_erase_block(ra, page))
+ {
+
+ nand_block_markbad(ra, offset);
+
+ block++;
+ offset = block * blocksize;
+ offset += (block_table[block] - block) << ra->flash->erase_shift;
+
+ page = (unsigned long)(offset >> ra->flash->page_shift);
+
+ }
+
+ nandflash_scan_badblock();
+
+ return 0;
+
+}
+
+int nand_write_next_goodblock(struct ra_nand_chip *ra, int page_u, int page_l)
+{
+ int src_page, npage, nextblk_startaddr, nextblk_writeaddr, ret, block, addr;
+ int readstart_page, writestart_page, last_page, readtotal_page, to_page, erase_blk, erase_addr;
+ uint8_t *pbuf = NULL;
+ uint32_t pagesize = (1 << ra->flash->page_shift);
+ uint32_t blocksize = (1 << ra->flash->erase_shift);
+ uint8_t page_data[pagesize + (1 << ra->flash->oob_shift)];
+
+ src_page = page_l >> (ra->flash->erase_shift - ra->flash->page_shift);
+ src_page = src_page << (ra->flash->erase_shift - ra->flash->page_shift);
+
+ readstart_page = src_page;
+
+ npage = page_l - src_page + 1;
+ readtotal_page = npage;
+
+ block = page_u >> (ra->flash->erase_shift - ra->flash->page_shift);
+ addr = block << ra->flash->erase_shift;
+
+ block++;
+ addr += blocksize;
+
+ nextblk_startaddr = addr + ((block_table[block] - block) << ra->flash->erase_shift);
+ nextblk_writeaddr = nextblk_startaddr + (page_l - src_page) * pagesize;
+
+ writestart_page = nextblk_startaddr >> ra->flash->page_shift;
+
+ memcpy(page_data, ra->buffers, pagesize + (1 << ra->flash->oob_shift));
+
+ nfc_erase_block(ra, writestart_page);
+
+ while(readtotal_page > 0)
+ {
+ if(readtotal_page > 1){
+
+ memset(ra->buffers, 0xff, pagesize + (1 << ra->flash->oob_shift));
+
+ ret = nfc_read_page(ra, ra->buffers, readstart_page, 0);
+ if(ret)
+ {
+ ret = nfc_read_page(ra, ra->buffers, readstart_page, 0);
+ if(ret)
+ printk("%s: read page fail", __func__);
+
+ }
+ to_page = writestart_page;
+ pbuf = ra->buffers;
+ }
+ else{
+ to_page = nextblk_writeaddr >> ra->flash->page_shift;
+ pbuf = page_data;
+
+ }
+
+ ret = nfc_write_page(ra, pbuf, to_page, FLAG_ECC_EN | FLAG_VERIFY);
+ if(ret)
+ {
+ nfc_erase_block(ra, writestart_page);
+ nand_block_markbad(ra, nextblk_startaddr);
+
+ block++;
+ addr += blocksize;
+ nextblk_startaddr = addr + (block_table[block] - block) << ra->flash->erase_shift;
+ writestart_page = nextblk_startaddr >> ra->flash->page_shift;
+ nextblk_writeaddr = nextblk_startaddr + (page_l - src_page) * pagesize;
+ readstart_page = src_page;
+ readtotal_page = npage;
+ continue;
+
+ }
+ writestart_page++;
+ readstart_page++;
+ readtotal_page--;
+
+ }
+
+ nfc_erase_block(ra, src_page);
+ nand_block_markbad(ra, src_page * pagesize);
+
+ nandflash_scan_badblock();
+
+ return to_page;
+
+}
+
+#endif
+/**
+ * nand_do_write_ops - [Internal] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC
+ */
+static int
+nand_do_write_ops(struct ra_nand_chip *ra, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int page;
+ uint32_t datalen = ops->len;
+ uint32_t ooblen = ops->ooblen;
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *data = ops->datbuf;
+
+ int pagesize = (1 << ra->flash->page_shift);
+ int pagemask = (pagesize -1);
+ int oobsize = 1 << ra->flash->oob_shift;
+ int i;
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ unsigned int blocksize = (1 << ra->flash->erase_shift);
+ int block;
+ int srcpage;
+ #endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ int physical_block;
+ int logic_page;
+ unsigned long addr_offset_in_block;
+ unsigned long logic_addr;
+ unsigned short phy_block_bbt;
+ char dat[SIZE_2KiB_BYTES + SIZE_64iB_BYTES];
+#endif
+
+ loff_t addr = to; //logic address
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ int srcblock = addr >> ra->flash->erase_shift;
+ unsigned long srcaddr = addr;
+
+ addr += (block_table[srcblock] - srcblock) << ra->flash->erase_shift;
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, write fail \n", __func__);
+ return -EFAULT;
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ logic_addr = addr; //logic address
+#endif
+
+// ra_dbg("%s: to:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x oobmode:%x \n",
+// __func__, (unsigned int)to, data, oob, datalen, ooblen, ops->ooboffs, ops->mode);
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ ra->buffers_page = -1;
+
+ if (data ==0)
+ datalen = 0;
+
+#if 0
+ // oob sequential (burst) write
+ if (datalen == 0 && ooblen) {
+ int len = ((ooblen + ops->ooboffs) + (ra->oob->oobavail - 1)) / ra->oob->oobavail * oobsize;
+
+ /* select chip, and check if it is write protected */
+ if (nfc_enable_chip(ra, addr, 0))
+ return -EIO;
+
+ //FIXME, need sanity check of block boundary
+ page = (int)((to & (((loff_t)1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift); //chip boundary
+ ranfc_page = page;
+
+ /* frankliao added 20101029 */
+ if (nand_block_checkbad(ra, addr
+ #ifdef TCSUPPORT_NAND_RT63368
+ ,BAD_BLOCK_RAW
+ #endif
+ )) {
+ printk(KERN_WARNING "nand_do_write_ops: attempt to write a "
+ "bad block at 0x%08x\n", page);
+ return -EFAULT;
+ }
+ // frankliao added 20110304, clear write buffer
+ memset(ra->buffers, 0x0ff, pagesize);
+ //fixme, should we reserve the original content?
+ if (ops->mode == MTD_OOB_AUTO) {
+// nfc_read_oob(ra, page, 0, ra->buffers, len, FLAG_USE_GDMA);
+ nfc_read_oob(ra, page, 0, ra->buffers, len, FLAG_NONE);
+ }
+ //prepare buffers
+ nand_write_oob_buf(ra, ra->buffers, oob, ooblen, ops->mode, ops->ooboffs);
+ // write out buffer to chip
+// nfc_write_oob(ra, page, 0, ra->buffers, len, FLAG_USE_GDMA);
+ nfc_write_oob(ra, page, 0, ra->buffers, len, FLAG_NONE);
+
+ ops->oobretlen = ooblen;
+ ooblen = 0;
+ }
+#endif
+
+ // data sequential (burst) write
+ if (datalen && ooblen == 0) {
+ /* frankliao modify 20101021 */ // code review tag
+// printk("ranfc can not support write_data_burst, since hw-ecc and fifo constraints..\n");
+ }
+
+ // page write
+ while (datalen || ooblen) {
+ int len;
+ int ret;
+ int offs;
+ int ecc_en = 0;
+/*
+ printk("datalen = %d\n", datalen);
+ printk("ooblen = %d\n", ooblen);
+*/
+ ra_dbg("%s : addr:%llx, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n",
+ __func__, addr, data, oob, datalen, ooblen, ops->ooboffs);
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ block = srcaddr >> ra->flash->erase_shift;
+ if(srcblock != block)
+ {
+ srcblock = block;
+ addr = srcaddr + ((block_table[block] - block) << ra->flash->erase_shift);
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, write fail \n", __func__);
+ return -EFAULT;
+ }
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+
+ addr_offset_in_block = logic_addr % (1 << ra->flash->erase_shift); //logic address offset
+ physical_block = get_mapping_block_index(logic_addr >> ra->flash->erase_shift, &phy_block_bbt); //physical block
+ addr = (physical_block << ra->flash->erase_shift) + addr_offset_in_block; //physical address offset
+#endif
+
+ page = (int)((addr & (((loff_t)1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift); //chip boundary
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcpage = (int)((srcaddr & ((1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift);
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ logic_page = (int)((logic_addr & ((1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift); //logic page
+#endif
+
+ ranfc_page = page;
+#if 0
+ /* frankliao added 20101029 */
+ if (nand_block_checkbad(ra, addr)) {
+ printk(KERN_WARNING "nand_do_write_ops: attempt to write a "
+ "bad block at 0x%08x\n", page);
+ return -EFAULT;
+ }
+#endif
+ /* select chip, and check if it is write protected */
+ if (nfc_enable_chip(ra, addr, 0))
+ return -EIO;
+ /* frankliao deleted 20101007, have wait time problem */
+/* // oob write
+ if (ops->mode == MTD_OOB_AUTO) {
+ //fixme, this path is not yet varified
+ nfc_read_oob(ra, page, 0, ra->buffers + pagesize, oobsize, FLAG_NONE);
+ }
+*/
+ if (oob && ooblen > 0) {
+ memset(ra->buffers + pagesize, 0xff, oobsize);
+ len = nand_write_oob_buf(ra, ra->buffers + pagesize, oob, ooblen, ops->mode, ops->ooboffs);
+ if (len < 0)
+ return -EINVAL;
+ oob += len;
+ ops->oobretlen += len;
+ ooblen -= len;
+ }
+
+ // data write
+ offs = addr & pagemask;
+ len = min_t(size_t, datalen, pagesize - offs);
+ /* frankliao mark */
+// len = datalen;
+ if (data && len > 0) {
+ /* frankliao modify 20110208, reset buffer */
+// memset(ra->buffers, 0xff, len);
+ #if defined(TCSUPPORT_NAND_BADBLOCK_CHECK) || defined(TCSUPPORT_NAND_RT63368)
+ memset(ra->buffers, 0xff, pagesize + oobsize);
+ #else
+ memset(ra->buffers, 0xff, pagesize);
+ #endif
+ memcpy(ra->buffers + offs, data, len); // we can not sure ops->buf wether is DMA-able.
+/*
+ printk("In nand_do_write_ops\n");
+ printk("offset : %d\n", offs);
+ printk("ra->buffer\n");
+ for (i=0; i<pagesize; i++) {
+ printk("%d ", ra->buffers[i]);
+ }
+ printk("\n\n\n");
+
+ printk("data buf\n");
+ for (i=0; i<pagesize; i++) {
+ printk("%d ", data[i]);
+ }
+ printk("\n\n\n");
+*/
+ #ifdef TCSUPPORT_NAND_RT63368
+ if(block_is_in_bmt_region(physical_block))
+ {
+ memcpy(ra->buffers + pagesize + OOB_INDEX_OFFSET, &phy_block_bbt, OOB_INDEX_SIZE);
+ }
+ #endif
+ data += len;
+ datalen -= len;
+ ops->retlen += len;
+ ecc_en = FLAG_ECC_EN;
+ }
+// ret = nfc_write_page(ra, ra->buffers, page, FLAG_VERIFY);
+ /* frankliao modify 20101004 */
+// ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA | FLAG_VERIFY |
+// ((ops->mode == MTD_OOB_RAW || ops->mode == MTD_OOB_PLACE) ? 0 : ecc_en ));
+// ret = nfc_write_page(ra, ra->buffers, page, FLAG_VERIFY |
+// ((ops->mode == MTD_OOB_RAW || ops->mode == MTD_OOB_PLACE) ? 0 : ecc_en ));
+
+// ret = nfc_write_page(ra, ra->buffers, page, FLAG_VERIFY | ranfc_flags);
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ ranfc_flags = (FLAG_VERIFY | ecc_en);
+ //ranfc_flags = 0;
+ #elif defined(TCSUPPORT_NAND_RT63368)
+ ranfc_flags = (FLAG_VERIFY | ecc_en);
+ #endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ if(!(data && len > 0))
+ {
+ ret = nfc_write_oob(ra, page, 0, ra->buffers + pagesize, oobsize, ranfc_flags);
+ if(ret)
+ nfc_read_page(ra, ra->buffers, page, ranfc_flags);
+ }
+ else{
+ #endif
+ ret = nfc_write_page(ra, ra->buffers, page, ranfc_flags);
+ #ifdef TCSUPPORT_NAND_RT63368
+ }
+ #endif
+
+ if (ret) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ page = nand_write_next_goodblock(ra, srcpage, page);
+ #elif defined(TCSUPPORT_NAND_RT63368)
+ printk("write fail at page: %d \n", page);
+ memcpy(dat, ra->buffers, SIZE_2KiB_BYTES + SIZE_64iB_BYTES);
+ if (update_bmt(page << ra->flash->page_shift,
+ UPDATE_WRITE_FAIL, dat, dat + SIZE_2KiB_BYTES))
+ {
+ printk("Update BMT success\n");
+
+ }
+ else
+ {
+ printk("Update BMT fail\n");
+ return -1;
+ }
+ #else
+ nand_bbt_set(ra, addr >> ra->flash->erase_shift, BBT_TAG_BAD);
+ return ret;
+ #endif
+
+ }
+
+// nand_bbt_set(ra, addr >> ra->flash->erase_shift, BBT_TAG_GOOD);
+
+ addr = (page+1) << ra->flash->page_shift; //physical address
+
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcaddr = (srcpage+1) << ra->flash->page_shift;
+ #endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ logic_addr = (logic_page + 1) << ra->flash->page_shift; //logic address
+ #endif
+ }
+
+ return 0;
+}
+
+/**
+ * nand_do_read_ops - [Internal] Read data with ECC
+ *
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int
+nand_do_read_ops(struct ra_nand_chip *ra, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+
+ int page;
+ uint32_t datalen = ops->len;
+ uint32_t ooblen = ops->ooblen;
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *data = ops->datbuf;
+ int pagesize = (1 << ra->flash->page_shift);
+ int oobsize = (1 << ra->flash->oob_shift);
+ int pagemask = (pagesize -1);
+ loff_t addr = from; //logic address
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ int physical_block;
+ int logic_page;
+ unsigned long addr_offset_in_block;
+ unsigned long logic_addr = addr;
+ unsigned short phy_block_bbt;
+ #endif
+
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ int block;
+ unsigned int blocksize = (1 << ra->flash->erase_shift);
+ int srcblock = addr >> ra->flash->erase_shift;
+ unsigned long srcaddr = addr;
+ int srcpage;
+
+ addr += (block_table[srcblock] - srcblock) << ra->flash->erase_shift;
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, read fail \n", __func__);
+ return -EIO;
+ }
+ #endif
+// ra_dbg("READ FROM\n");
+// ra_dbg("read from %llx\n\n", from);
+
+// ra_dbg("%s: addr:%llx, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n",
+// __func__, (unsigned long long)addr, data, oob, datalen, ooblen, ops->ooboffs);
+
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ if (data == 0)
+ datalen = 0;
+
+ while(datalen || ooblen) {
+ int len, ret, offs;
+
+// ra_dbg("%s : addr:%llx, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n",
+// __func__, (unsigned long long)addr, data, oob, datalen, ooblen, ops->ooboffs);
+
+ /* select chip */
+ if (nfc_enable_chip(ra, addr, 1) < 0)
+ return -EIO;
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ block = srcaddr >> ra->flash->erase_shift;
+ if(srcblock != block)
+ {
+ srcblock = block;
+ addr = srcaddr + ((block_table[block] - block) << ra->flash->erase_shift);
+
+ if(nand_partition_check(srcblock)){
+ printk("%s: address over partition size, read fail \n", __func__);
+ return -EIO;
+ }
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+
+ addr_offset_in_block = logic_addr % (1 << ra->flash->erase_shift); //logic address offset
+ physical_block = get_mapping_block_index(logic_addr >> ra->flash->erase_shift, &phy_block_bbt); //physical block
+ addr = (physical_block << ra->flash->erase_shift) + addr_offset_in_block; //physical address
+
+#endif
+
+ page = (int)((addr & (((loff_t)1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift);
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcpage = (int)((srcaddr & ((1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift);
+#endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ logic_page = (int)((logic_addr & ((1<<ra->flash->chip_shift)-1)) >> ra->flash->page_shift); //logic page
+#endif
+
+#if 0
+ /* frankliao added 20101029 */
+ if (nand_block_checkbad(ra, addr)) {
+ printk(KERN_WARNING "nand_do_read_ops: attempt to read a "
+ "bad block at 0x%08x\n", page);
+ return -EFAULT;
+ }
+#endif
+ ranfc_page = page;
+
+// ret = nfc_read_page(ra, ra->buffers, page, FLAG_USE_GDMA | FLAG_VERIFY |
+// ((ops->mode == MTD_OOB_RAW || ops->mode == MTD_OOB_PLACE) ? 0: FLAG_ECC_EN ));
+
+ #if defined(TCSUPPORT_NAND_BADBLOCK_CHECK) || defined(TCSUPPORT_NAND_RT63368)
+ //ranfc_flags = (FLAG_VERIFY | FLAG_ECC_EN);
+ ranfc_flags = (FLAG_ECC_EN | FLAG_VERIFY);
+ #endif
+
+#ifdef TCSUPPORT_NAND_RT63368
+ if(data && len > 0) {
+#endif
+ /* frankliao test delete */
+ ret = nfc_read_page(ra, ra->buffers, page, ranfc_flags);
+ //FIXME, something strange here, some page needs 2 more tries to guarantee read success.
+ if (ret) {
+// printk("read again:\n");
+ /* frankliao modify 20101001 */
+ ret = nfc_read_page(ra, ra->buffers, page, ranfc_flags);
+// ret = nfc_read_page(ra, ra->buffers, page, FLAG_USE_GDMA | FLAG_VERIFY |
+// ((ops->mode == MTD_OOB_RAW || ops->mode == MTD_OOB_PLACE) ? 0: FLAG_ECC_EN ));
+ if (ret) {
+// printk("read again fail \n");
+ #if !defined(TCSUPPORT_NAND_BADBLOCK_CHECK) && !defined(TCSUPPORT_NAND_RT63368)
+ nand_bbt_set(ra, addr >> ra->flash->erase_shift, BBT_TAG_BAD);
+ if ((ret != -EUCLEAN) && (ret != -EBADMSG)) {
+ return ret;
+ } else {
+ /* ecc verification fail, but data need to be returned. */
+ }
+ #else
+ return ret;
+ #endif
+ } else {
+// printk(" read again susccess \n");
+ }
+ }
+
+#ifdef TCSUPPORT_NAND_RT63368
+ }
+#endif
+
+ // oob read
+ if (oob && ooblen > 0) {
+
+#ifdef TCSUPPORT_NAND_RT63368
+ memset(ra->buffers + pagesize, 0xff, oobsize);
+ nfc_read_oob(ra, page, 0, ra->buffers + pagesize, oobsize, FLAG_NONE);
+#endif
+
+ len = nand_read_oob_buf(ra, oob, ooblen, ops->mode, ops->ooboffs);
+ if (len < 0) {
+ printk("nand_read_oob_buf: fail return %x \n", len);
+ return -EINVAL;
+ }
+ oob += len;
+ ops->oobretlen += len;
+ ooblen -= len;
+ }
+
+ // data read
+ offs = addr & pagemask;
+ /* frankliao mark */
+ len = min_t(size_t, datalen, pagesize - offs);
+// len = datalen;
+ if (data && len > 0) {
+ memcpy(data, ra->buffers + offs, len); // we can not sure ops->buf wether is DMA-able.
+ data += len;
+ datalen -= len;
+ ops->retlen += len;
+ if (ret) {
+ return ret;
+ }
+ }
+
+// nand_bbt_set(ra, addr >> ra->flash->erase_shift, BBT_TAG_GOOD);
+ // address go further to next page, instead of increasing of length of write. This avoids some special cases wrong.
+ addr = ((loff_t)(page+1) << ra->flash->page_shift); //physical address
+
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ srcaddr = (srcpage+1) << ra->flash->page_shift;
+ #endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ logic_addr = (logic_page + 1) << ra->flash->page_shift; //logic address
+ #endif
+ }
+
+ return 0;
+// return page;
+}
+
+
+/*
+ * nand_setup - setup nand flash info and opcode
+ *
+ * frankliao added 20101014
+ */
+static int
+nand_setup(void)
+{
+ int flash_id, i, subpage_bit = 0;
+ unsigned int id_mask, mfr_id, dev_id, nbits, endian = 0;
+ int alloc_size, bbt_size, buffers_size;
+
+ struct nand_info *flash = NULL;
+ struct nand_opcode *opcode = NULL;
+
+ flash_id = nfc_read_id();
+ if (flash_id == -1) {
+ printk("read flash id fail\n");
+ return -1;
+ }
+
+ id_mask = 0xffff;
+ nbits = 16;
+
+ if ((flash_id >> nbits) != 0) {
+ nbits = 8;
+ id_mask = 0xff;
+ }
+
+ // frank added 20110421
+ #ifdef CONFIG_MIPS_RT63365
+ endian = ra_inl(NFC_CTRLII) & 0x02;
+
+ if (endian == NFC_BIG_ENDIAN) {
+ mfr_id = ((flash_id >> 24) & id_mask);
+ dev_id = ((flash_id >> (24-nbits)) & id_mask);
+ } else {
+ mfr_id = (flash_id & id_mask);
+ dev_id = ((flash_id >> nbits) & id_mask);
+ }
+ #else
+ mfr_id = (flash_id & id_mask);
+ dev_id = ((flash_id >> nbits) & id_mask);
+ #endif
+
+// printk("mfr_id : %2x\n", mfr_id);
+// printk("dev_id : %2x\n", dev_id);
+
+ for (i=0; i < ARRAY_SIZE(flash_tables); i++) {
+
+// radbg("index %d\n", i);
+// radbg("mfr_id : %2x\n", flash_tables[i].mfr_id);
+// radbg("dev_id : %2x\n", flash_tables[i].dev_id);
+
+ if ((mfr_id == flash_tables[i].mfr_id) && (dev_id == flash_tables[i].dev_id)) {
+ flash = &flash_tables[i];
+ break;
+ }
+ }
+
+ if (flash == NULL) {
+ printk("Undefined Manufactor ID and Devcie ID\n");
+ return -1;
+ }
+
+
+ if (flash->page_shift == SIZE_512iB_BIT) {
+ subpage_bit = 1;
+ }
+
+ column_addr_cycle = (flash->page_shift - subpage_bit + 7) / 8;
+ row_addr_cycle = (flash->chip_shift - flash->page_shift + 7) / 8;
+ addr_cycle = column_addr_cycle + row_addr_cycle;
+
+ for (i=0; i < ARRAY_SIZE(opcode_tables); i++) {
+ if (flash->opcode_type == opcode_tables[i].type) {
+ opcode = &opcode_tables[i];
+ break;
+ }
+ }
+
+ if (opcode == NULL) {
+ printk("Undefined Opcode\n");
+ return -1;
+ }
+
+//#define ALIGN_32(a) (((unsigned long)(a)+32) & ~31) // code review tag
+#define ALIGN_32(a) (((unsigned long)(a)+31) & ~31) // code review tag
+ buffers_size = ALIGN_32((1<<flash->page_shift) + (1<<flash->oob_shift)); //ra->buffers
+ bbt_size = BBTTAG_BITS * (1<<(flash->chip_shift - flash->erase_shift)) / 8; //ra->bbt
+ bbt_size = ALIGN_32(bbt_size);
+
+ alloc_size = buffers_size + bbt_size;
+ alloc_size += buffers_size; //for ra->readback_buffers
+ alloc_size += sizeof(*ra);
+ alloc_size += sizeof(*ranfc_mtd);
+
+#if !defined (__UBOOT__)
+ ra = (struct ra_nand_chip *)kzalloc(alloc_size, GFP_KERNEL | GFP_DMA);
+#else
+ ra = (struct ra_nand_chip *)malloc(alloc_size);
+#endif
+
+ if (!ra) {
+ printk("%s: mem alloc fail \n", __func__);
+ return -ENOMEM;
+ }
+
+#if defined(__UBOOT__)
+ memset(ra, 0, alloc_size);
+#endif
+
+ printk("%s: alloc %x, at %p , btt(%p, %x), ranfc_mtd:%p\n",
+ __func__ , alloc_size, ra, ra->bbt, bbt_size, ranfc_mtd);
+
+ //dynamic
+ ra->buffers = (char *)((char *)ra + sizeof(*ra));
+ ra->readback_buffers = ra->buffers + buffers_size;
+ ra->bbt = ra->readback_buffers + buffers_size;
+ ranfc_mtd = (struct mtd_info *)(ra->bbt + bbt_size);
+ ra->buffers_page = -1;
+
+ if (flash->page_shift == SIZE_512iB_BIT) {
+ ra->oob = &oob_layout_tables[ STANDARD_SMALL_FLASH ];
+ } else if (flash->page_shift == SIZE_2KiB_BIT) {
+ ra->oob = &oob_layout_tables[ STANDARD_LARGE_FLASH ];
+ } else {
+ printk("Undefined NAND OOB LAYOUT\n");
+ return -1;
+ }
+
+ ra->flash = flash;
+ ra->opcode = opcode;
+
+
+#ifdef TCSUPPORT_NAND_RT63368
+ bmt_pool_size = calc_bmt_pool_size(ra);
+ printk("bmt pool size: %d \n", bmt_pool_size);
+
+ if (!g_bmt)
+ {
+ if ( !(g_bmt = init_bmt(ra, bmt_pool_size)) )
+ {
+ printk("Error: init bmt failed \n");
+ return -1;
+ }
+ }
+
+ if (!g_bbt)
+ {
+ if ( !(g_bbt = start_init_bbt()) )
+ {
+ printk("Error: init bbt failed \n");
+ return -1;
+ }
+ }
+
+ if(write_bbt_or_bmt_to_flash() != 0)
+ {
+ printk("Error: save bbt or bmt to nand failed \n");
+ return -1;
+ }
+
+ if(create_badblock_table_by_bbt())
+ {
+ printk("Error: create bad block table failed \n");
+ return -1;
+ }
+
+#endif
+
+
+ return 0;
+}
+
+
+/************************************************************
+ * the following are mtd necessary interface.
+ ************************************************************/
+
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks
+ */
+static int
+ramtd_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret;
+
+ struct ra_nand_chip *ra = (struct ra_nand_chip *)mtd->priv;
+
+ ra_dbg("%s: \n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(ra, FL_ERASING);
+
+ ret = nand_erase_nand((struct ra_nand_chip *)mtd->priv, instr);
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(ra);
+
+ return ret;
+
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC
+ */
+static int
+ramtd_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct ra_nand_chip *ra = mtd->priv;
+ struct mtd_oob_ops ops;
+ int ret;
+ /* frankliao added 20100106, 2K page NAND oobsize */
+// uint8_t oob_buffer[64];
+
+ ra_dbg("%s: \n", __func__);
+ /* Do not allow reads past end of device */
+ if ((to + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_get_device(ra, FL_WRITING);
+
+ memset(&ops, 0, sizeof(ops));
+
+ ops.len = len;
+
+ ops.datbuf = (uint8_t *)buf;
+
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+
+// oobsize = (1<<ra->flash->oob_shift);
+
+// ops.ooblen = ra->oob->oobavail;
+
+ /* frankliao added */
+// for (i=0; i<oobsize; i++) {
+// oob_buffer[i] = 0x11;
+// }
+
+// ops.oobbuf = oob_buffer;
+
+// ops.oobbuf = (uint8_t *)(buf + (1 << ra->flash->page_shift));
+ /* for ecc write frankliao added 20101021 */
+// oobsize = (1<<ra->flash->oob_shift);
+
+ ops.mode = MTD_OOB_AUTO;
+
+ ret = nand_do_write_ops(ra, to, &ops);
+
+ /* write to next block, frankliao added 20101029 */
+/* while (ret != 0) {
+ to += (1 << ra->flash->erase_shift);
+ ret = nand_do_write_ops(ra, to, &ops);
+ } */
+
+ *retlen = ops.retlen;
+
+ nand_release_device(ra);
+
+ return ret;
+
+}
+
+/**
+ * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read
+ */
+static int
+ramtd_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf)
+{
+
+ struct ra_nand_chip *ra = mtd->priv;
+ int ret;
+// int oobsize;
+ struct mtd_oob_ops ops;
+ /* frankliao added 20100107 */
+// unsigned int ecc_en;
+
+ /* frankliao added 20100106, 2K page NAND oobsize */
+// uint8_t oob_buffer[64];
+
+// ra_dbg("%s: mtd:%p from:%llx, len:%x, buf:%p \n", __func__, mtd, (unsigned long long)from, len, buf);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ nand_get_device(ra, FL_READING);
+ // frankliao test
+ memset(&ops, 0, sizeof(ops));
+
+ ops.ooblen = 0;
+ ops.oobbuf = NULL;
+
+ /* frankliao added */
+// ops.ooblen = ra->oob->oobavail;
+
+ ops.len = len;
+ ops.datbuf = buf;
+
+// ops.oobbuf = NULL;
+
+ /* frankliao modify */
+// ops.oobbuf = oob_buffer;
+
+ ops.mode = MTD_OOB_AUTO;
+
+ ret = nand_do_read_ops(ra, from, &ops);
+// for (i=0; i<(int)len; i++) {
+// if (buf[i] != *(unsigned char*)((unsigned long)(from) + (unsigned long)i + 0xbfc00000))
+// buf[i] = *(unsigned char *)((unsigned long)(from) + (unsigned long)i + 0xbfc00000);
+// }
+// ret = 0;
+// *retlen = len;
+
+// oobsize = (1<<ra->flash->oob_shift);
+ /* frankliao added */
+// for (i=0; i<oobsize; i++) {
+// printk("oob_buffer[%d] : %x\n", i, oob_buffer[i] & 0xff);
+// }
+
+ *retlen = ops.retlen;
+// dump_stack();
+
+ /* read from next block, frankliao added 20101029 */
+/* while (ret != 0) { // code review tag
+
+ printk("ret = %d\n", ret);
+ printk("read ofs : %x\n", from);
+ from += (1 << ra->flash->erase_shift);
+
+ ret = nand_do_read_ops(ra, from, &ops);
+ } */
+
+/*
+ *retlen = ops.retlen;
+ printk("nand VS SPI\n");
+ printk("from : %llx\n", from);
+ for (i=0; i<(int)len; i++) {
+ if (buf[i] != (*(char *)((unsigned long)(from) + (unsigned long)i + 0xbfc00000) & 0xff) ) {
+ printk("nand : %x\n ", buf[i]);
+ printk("spi %x\n ", (*(char *)((unsigned long)(from) + (unsigned long)i + 0xbfc00000)) & 0xff);
+ }
+ }
+*/
+
+ nand_release_device(ra);
+
+ return ret;
+}
+
+unsigned int ra_nand_read_byte(unsigned long long from)
+{
+ unsigned char ch;
+ size_t retlen;
+ ramtd_nand_read(ranfc_mtd, (loff_t)from, 1, &retlen, &ch);
+ return ch;
+}
+
+unsigned int ra_nand_read_dword(unsigned long long from)
+{
+ unsigned char data[4];
+ unsigned long dword;
+ size_t retlen;
+ int ret, i;
+
+ ret = ramtd_nand_read(ranfc_mtd, (loff_t)from, 4, &retlen, data);
+ if (ret != 0)
+ return -1;
+
+ dword = 0;
+ for (i=0; i<4; i++) {
+ dword += (unsigned long)data[i];
+ if (i<3)
+ dword <<= 8;
+ }
+ return dword;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data
+ */
+static int
+ramtd_nand_readoob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct ra_nand_chip *ra = mtd->priv;
+ int ret;
+
+ ra_dbg("%s: \n", __func__);
+
+ nand_get_device(ra, FL_READING);
+
+ ret = nand_do_read_ops(ra, from, ops);
+
+ nand_release_device(ra);
+
+ return ret;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int
+ramtd_nand_writeoob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct ra_nand_chip *ra = mtd->priv;
+ int ret;
+
+ ra_dbg("%s: \n", __func__);
+
+ nand_get_device(ra, FL_READING);
+
+ ret = nand_do_write_ops(ra, to, ops);
+
+ nand_release_device(ra);
+
+ return ret;
+}
+
+
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int
+ramtd_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+
+// ra_dbg("%s: \n", __func__);
+
+ if (offs > mtd->size)
+ return -EINVAL;
+ return 0;
+ return nand_block_checkbad((struct ra_nand_chip *)mtd->priv, offs
+ #ifdef TCSUPPORT_NAND_RT63368
+ , BAD_BLOCK_RAW
+ #endif
+ );
+}
+
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int
+ramtd_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+ struct ra_nand_chip *ra = mtd->priv;
+
+ return 0;
+
+ ra_dbg("%s: \n", __func__);
+
+ nand_get_device(ra, FL_WRITING);
+
+ ret = nand_block_markbad(ra, ofs
+ #ifdef TCSUPPORT_NAND_RT63368
+ , BAD_BLOCK_RAW
+ #endif
+ );
+
+ nand_release_device(ra);
+ return ret;
+}
+
+
+
+/************************************************************
+ * the init/exit section.
+ */
+
+static int nand_access_page_read_proc(char *buf, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ len = 0;
+ len += sprintf(buf, "%d\n", ranfc_page);
+
+ len -= off;
+ *start = buf + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+
+}
+
+
+static int nand_flags_read_proc(char *buf, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ len = 0;
+ len += sprintf(buf+len, "NAND Flags Status : %d\n", ranfc_flags);
+ len += sprintf(buf+len, " 0 : NONE\n");
+ len += sprintf(buf+len, " 1 : ECC Enable\n");
+ len += sprintf(buf+len, " 2 : GDMA Enable\n");
+ len += sprintf(buf+len, " 4 : Read/Write Verification Enable\n");
+ len += sprintf(buf+len, "-------------------------------------\n");
+
+ len -= off;
+ *start = buf + off;
+
+ if (len > count)
+ len = count;
+ else
+ *eof = 1;
+
+ if (len < 0)
+ len = 0;
+
+ return len;
+
+}
+
+
+
+static int nand_flags_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data) {
+ char val_string[8];
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ sscanf(val_string, "%d", &ranfc_flags);
+
+ printk(" nand flag : %d\n", ranfc_flags);
+ printk(" 0 : NONE\n");
+ printk(" 1 : ECC Enable\n");
+ printk(" 2 : GDMA Enable\n");
+ printk(" 4 : Read/Write Verification Enable\n");
+ printk("-------------------------------------\n");
+
+ if (ranfc_flags & 0x01) {
+ printk(" ECC Enable\n");
+ }
+
+ if (ranfc_flags & 0x02) {
+ printk(" GDMA Enable\n");
+ }
+
+ if (ranfc_flags & 0x04) {
+ printk(" Read/Write Verification Enable\n");
+ }
+
+ return count;
+}
+
+
+extern int
+doNandRegCheck(uint32_t type, uint32_t pattern);
+
+#define REG_DEF_CHK 1
+#define REG_RW_CHK 2
+
+static int nand_regchk_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data) {
+
+ char val_string[8];
+ int type;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT;
+
+ val_string[count] = '\0';
+ sscanf(val_string, "%d", &type);
+
+ printk(" NAND controller register check\n" );
+ printk(" type 1 : Reg Def Check\n");
+ printk(" teyp 2 : Reg RW Check\n");
+ printk(" -------------------------------------\n");
+
+ if (type == REG_RW_CHK) {
+
+ printk("Input Type : %d\n", REG_RW_CHK);
+
+ printk("Test Pattern 0xffffffff\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0xffffffff);
+
+ printk("Test Pattern 0x55555555\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0x55555555);
+
+ printk("Test Pattern 0x00000000\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0x00000000);
+
+ printk("Test Pattern 0xaaaaaaaa\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0xaaaaaaaa);
+
+ printk("Test Pattern 0xa5a5a5a5\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0xa5a5a5a5);
+
+ printk("Test Pattern 0x5a5a5a5a\n");
+ printk("------------------------------------\n");
+ doNandRegCheck(REG_RW_CHK, 0x5a5a5a5a);
+
+ }
+
+ return count;
+}
+
+#ifdef TCSUPPORT_NAND_RT63368
+int mt6573_nand_erase_hw(struct ra_nand_chip *ra, unsigned long page)
+{
+ return nfc_erase_block(ra, page);
+}
+
+int mt6573_nand_exec_write_page(struct ra_nand_chip *ra, int page, u32 page_size, u8 *dat, u8 *oob)
+{
+ memset(ra->buffers, 0xff, sizeof(ra->buffers));
+ memcpy(ra->buffers, dat, page_size);
+ memcpy(ra->buffers + page_size, oob, 1 << ra->flash->oob_shift);
+
+ return nfc_write_page(ra, ra->buffers, page, FLAG_ECC_EN);
+}
+
+int mt6573_nand_exec_read_page(struct ra_nand_chip *ra, int page, u32 page_size, u8 *dat, u8 *oob)
+{
+ int ret = 0;
+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_ECC_EN | FLAG_VERIFY);
+
+ if(ret)
+ {
+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_ECC_EN | FLAG_VERIFY);
+ if(ret)
+ {
+ printk("[%s]: read again fail!", __func__);
+ goto read_fail;
+ }
+ }
+
+ memcpy(dat, ra->buffers, page_size);
+ memcpy(oob, ra->buffers + page_size, 1 << ra->flash->oob_shift);
+
+read_fail:
+ return ret;
+
+}
+
+int mt6573_nand_block_markbad_hw(struct ra_nand_chip *ra, unsigned long ofs, unsigned long bmt_block)
+{
+ unsigned long page;
+ int block;
+
+ block = ofs >> ra->flash->erase_shift;
+ page = block * (1<<(ra->flash->erase_shift - ra->flash->page_shift));
+
+ nfc_erase_block(ra, page);
+ nand_block_markbad(ra, ofs, bmt_block);
+ return 0;
+
+}
+
+int mt6573_nand_block_bad_hw(struct ra_nand_chip *ra, unsigned long ofs, unsigned long bmt_block)
+{
+
+ return nand_block_checkbad(ra, ofs, bmt_block);
+
+}
+int calc_bmt_pool_size(struct ra_nand_chip *ra)
+{
+ int chip_size = 1 << ra->flash->chip_shift;
+ int block_size = 1 << ra->flash->erase_shift;
+ int total_block = chip_size / block_size;
+ int last_block = total_block - 1;
+
+ u16 valid_block_num = 0;
+ u16 need_valid_block_num = total_block * POOL_GOOD_BLOCK_PERCENT;
+#if 0
+ printk("need_valid_block_num:%d \n", need_valid_block_num);
+ printk("total block:%d \n", total_block);
+#endif
+ for(;last_block > 0; --last_block)
+ {
+ if(nand_block_checkbad(ra, last_block * block_size, BAD_BLOCK_RAW))
+ {
+ continue;
+
+ }
+ else
+ {
+ valid_block_num++;
+ if(valid_block_num == need_valid_block_num)
+ {
+ break;
+ }
+
+ }
+
+ }
+
+ return (total_block - last_block);
+
+}
+#endif
+
+
+static struct mtd_info *nandflash_probe(struct map_info *map)
+//int __devinit ra_nand_init(void)
+{
+ int ret, num;
+
+ /* frankliao added for nand flash test */
+ struct proc_dir_entry *nand_flags_proc;
+ struct proc_dir_entry *nand_regchk_proc;
+ struct proc_dir_entry *nand_access_page_proc;
+
+ /* frank */
+ if ( (ra_inl(CR_AHB_BASE+0x64) >> 16) == 0x4){
+ ra_outl(0xbfb00860, ra_inl(0xbfb00860) | (0x00000100));
+ }
+
+
+ // frank modify 20110425
+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x01); //set wp to high
+
+ nfc_all_reset(ra);
+
+ ret = nand_setup();
+
+ if (ret == -1) {
+ printk("%s: nand flash setup flash info and opcode fail\n", __func__);
+ return NULL;
+ }
+
+#ifndef CONFIG_MIPS_RT63365
+ if (ra->flash->page_shift == SIZE_512iB_BIT) { // RT63165
+ ra_outl(NFC_CTRLII, ra_inl(NFC_CTRLII) | 0x01);
+ }
+#else
+ if (ra->flash->page_shift == SIZE_2KiB_BIT) {
+ ra_outl(NFC_CTRLII, 0x76501);
+ } else if (ra->flash->page_shift == SIZE_512iB_BIT) {
+ ra_outl(NFC_CTRLII, 0x76500);
+ }
+
+ ra_outl(NFC_CTRLII, ra_inl(NFC_CTRLII) | 0x02); //big endian for rt63365
+#endif
+
+// nfc_all_reset(ra);
+
+// printk("numchips : %d\n", ra->flash->numchips );
+// printk("chip_shift : %d\n", ra->flash->chip_shift );
+// printk("page_shift : %d\n", ra->flash->page_shift );
+// printk("oob_shift : %d\n", ra->flash->oob_shift );
+// printk("erase_shift : %d\n", ra->flash->erase_shift );
+// printk("badblockpos : %d\n", ra->flash->badblockpos );
+
+ //static
+// ra->numchips = CONFIG_NUMCHIPS;
+// ra->chip_shift = CONFIG_CHIP_SIZE_BIT;
+// ra->page_shift = CONFIG_PAGE_SIZE_BIT;
+// ra->oob_shift = CONFIG_OOBSIZE_PER_PAGE_BIT;
+// ra->erase_shift = (CONFIG_PAGE_SIZE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT);
+// ra->badblockpos = CONFIG_BAD_BLOCK_POS;
+
+
+ ranfc_mtd->type = MTD_NANDFLASH;
+ ranfc_mtd->flags = MTD_CAP_NANDFLASH;
+#ifdef TCSUPPORT_NAND_RT63368
+ ranfc_mtd->size = nand_logic_size;
+#else
+ ranfc_mtd->size = CONFIG_NUMCHIPS * (1<<ra->flash->chip_shift);
+#endif
+ ranfc_mtd->erasesize = (1<<ra->flash->erase_shift);
+ ranfc_mtd->writesize = (1<<ra->flash->page_shift);
+ ranfc_mtd->oobsize = (1<<ra->flash->oob_shift);
+ ranfc_mtd->oobavail = ra->oob->oobavail;
+ ranfc_mtd->name = "ra_nfc";
+ //ranfc_mtd->index
+ ranfc_mtd->ecclayout = ra->oob;
+ //ranfc_mtd->numberaseregions
+ //ranfc_mtd->eraseregions
+ //ranfc_mtd->bansize
+ ranfc_mtd->erase = ramtd_nand_erase;
+ //ranfc_mtd->point
+ //ranfc_mtd->unpoint
+ ranfc_mtd->read = ramtd_nand_read;
+ ranfc_mtd->write = ramtd_nand_write;
+ ranfc_mtd->read_oob = ramtd_nand_readoob;
+ ranfc_mtd->write_oob = ramtd_nand_writeoob;
+ //ranfc_mtd->get_fact_prot_info; ranfc_mtd->read_fact_prot_reg;
+ //ranfc_mtd->get_user_prot_info; ranfc_mtd->read_user_prot_reg;
+ //ranfc_mtd->write_user_prot_reg; ranfc_mtd->lock_user_prot_reg;
+ //ranfc_mtd->writev; ranfc_mtd->sync; ranfc_mtd->lock; ranfc_mtd->unlock; ranfc_mtd->suspend; ranfc_mtd->resume;
+ ranfc_mtd->block_isbad = ramtd_nand_block_isbad;
+ ranfc_mtd->block_markbad = ramtd_nand_block_markbad;
+ //ranfc_mtd->reboot_notifier
+ //ranfc_mtd->ecc_stats;
+ // subpage_sht;
+
+ //ranfc_mtd->get_device; ranfc_mtd->put_device
+ ranfc_mtd->priv = ra;
+ /* frankliao added 20101222 frank*/
+ if (IS_NANDFLASH) {
+ map->fldrv_priv = ra;
+ }
+ ranand_read_byte = ra_nand_read_byte;
+ ranand_read_dword = ra_nand_read_dword;
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ nandflash_scan_badblock();
+#endif
+
+#if !defined (__UBOOT__)
+ ranfc_mtd->owner = THIS_MODULE;
+ ra->controller = &ra->hwcontrol;
+ mutex_init(ra->controller);
+
+ /* Register the partitions */
+ if (IS_SPIFLASH) {
+ num = ARRAY_SIZE(rt63165_test_partitions);
+ rt63165_test_partitions[ num-1 ].size = ranfc_mtd->size;
+ add_mtd_partitions(ranfc_mtd, rt63165_test_partitions, num);
+ }
+
+ nand_flags_proc = create_proc_entry("nand_flag", 0, NULL);
+ nand_flags_proc->read_proc = nand_flags_read_proc;
+ nand_flags_proc->write_proc = nand_flags_write_proc;
+
+ nand_regchk_proc = create_proc_entry("nand_regchk", 0, NULL);
+ nand_regchk_proc->write_proc = nand_regchk_write_proc;
+
+ nand_access_page_proc = create_proc_entry("nand_access_page", 0, NULL);
+ nand_access_page_proc->read_proc = nand_access_page_read_proc;
+
+
+
+ if (IS_NANDFLASH){
+
+ #ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ #ifdef TCSUPPORT_NAND_RT63368
+ offset = SLAVE_IMAGE_OFFSET;
+ #else
+ offset = (1 << ra->flash->chip_shift)/2;
+ #endif
+ #endif
+ return ranfc_mtd;
+ }
+ else
+ return 0;
+#else
+ return ranfc_mtd;
+#endif
+
+}
+
+static void nandflash_destroy(struct mtd_info *mtd)
+//static void __devexit ra_nand_remove(void)
+{
+ struct ra_nand_chip *ra;
+
+ if (ranfc_mtd) {
+ ra = (struct ra_nand_chip *)ranfc_mtd->priv;
+
+#if !defined (__UBOOT__)
+ /* Deregister partitions */
+ del_mtd_partitions(ranfc_mtd);
+ kfree(ra);
+#else
+ free(ra);
+#endif
+ }
+}
+
+int __devinit ra_nand_init(void)
+{
+ nandflash_probe(NULL);
+ return 0;
+}
+
+void __devexit ra_nand_remove(void)
+{
+ nandflash_destroy(NULL);
+}
+
+static struct mtd_chip_driver nandflash_chipdrv = {
+ .probe = nandflash_probe,
+ .destroy = nandflash_destroy,
+ .name = "nandflash_probe",
+ .module = THIS_MODULE
+};
+
+
+static int __init nandflash_probe_init(void)
+{
+ register_mtd_chip_driver(&nandflash_chipdrv);
+ return 0;
+}
+
+static void __exit nandflash_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&nandflash_chipdrv);
+}
+
+
+#if !defined (__UBOOT__)
+module_init(nandflash_probe_init);
+module_exit(nandflash_probe_exit);
+//rootfs_initcall(ra_nand_init);
+//module_exit(ra_nand_remove);
+
+MODULE_LICENSE("GPL");
+#endif
Index: linux-3.18.21/drivers/mtd/econet/ralink_nand.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/econet/ralink_nand.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,289 @@
+#ifndef RT63165_NAND_H
+#define RT63165_NAND_H
+#include <linux/mtd/mtd.h>
+
+#if !defined (__UBOOT__)
+//#include <asm/rt2880/rt_mmap.h>
+#else
+//#include <rt_mmap.h>
+#define EIO 5 /* I/O error */
+#define EINVAL 22 /* Invalid argument */
+#define ENOMEM 12 /* Out of memory */
+#define EFAULT 14 /* Out of accessible address space */
+#define EBADMSG 74 /* Not a data message */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#endif
+
+#define ECC_NO_ERR 0
+#define ECC_ONE_BIT_ERR -6
+#define ECC_DATA_ERR -7
+#define ECC_CODE_ERR -8
+#define ECC_NFC_CONFLICT -9
+
+
+/* frankliao deleted 20100916 */
+//1#include "gdma.h"
+
+#define ra_inl(addr) (*(volatile unsigned int *)(addr))
+#define ra_outl(addr, value) (*(volatile unsigned int *)(addr) = (value))
+#define ra_aor(addr, a_mask, o_value) ra_outl(addr, (ra_inl(addr) & (a_mask)) | (o_value))
+
+/* FOR ID frankliao added 20101012 */
+#define NAND_CMD1_READID 0x90
+#define NAND_CONF_READID 0x410101
+
+/* NAND Reg Verify Type */
+#define REG_DEF_CHK 1
+#define REG_RW_CHK 2
+
+/* Flash Type */
+#define STANDARD_SMALL_FLASH (0)
+#define STANDARD_LARGE_FLASH (1)
+
+#define NONE (-1)
+
+/* Manufacturers */
+#define MANUFACTURER_ST3A 0x20
+#define MANUFACTURER_ST4A 0x0020
+#define MANUFACTURER_MIRCON 0x2c
+#define MANUFACTURER_ZENTEL 0x92
+#define MANUFACTURER_SAMSUNG 0xec
+
+/* SPANSION support */
+#define MANUFACTURER_SPANSION (0x01)
+
+/* ST Device ID */
+#define ST128W3A 0x73
+#define ST512W3A 0x76
+
+/* SPANSION support */
+#define S34ML01G1 (0xf1)
+#define S34ML02G1 (0xda)
+#define S34ML04G1 (0xdc)
+
+/* MICRON Device ID */
+#define MT29F2G08AAD 0xda
+#define MT29F4G08AAC 0xdc
+
+
+/* ZENTEL Device ID */
+#define A5U1GA31ATS 0xf1
+
+/* SAMSUNG Device ID*/
+#define K9F1G08U0D 0xf1
+
+/* SIZE BIT*/
+#define SIZE_512MiB_BIT (29)
+#define SIZE_256MiB_BIT (28)
+#define SIZE_128MiB_BIT (27)
+#define SIZE_64MiB_BIT (26)
+#define SIZE_16MiB_BIT (24)
+#define SIZE_128KiB_BIT (17)
+#define SIZE_16KiB_BIT (14)
+#define SIZE_4KiB_BIT (12)
+#define SIZE_2KiB_BIT (11)
+#define SIZE_512iB_BIT (9)
+#define SIZE_64iB_BIT (6)
+#define SIZE_16iB_BIT (4)
+
+
+/* SIZE BYTE */
+#define SIZE_512M_BYTES (512)
+#define SIZE_2K_BYTES (2048)
+
+#define NFC_BASE (0xbfbe0000)
+#define NFC_CTRL (NFC_BASE + 0x10)
+#define NFC_CONF (NFC_BASE + 0x14)
+#define NFC_CMD1 (NFC_BASE + 0x18)
+#define NFC_CMD2 (NFC_BASE + 0x1c)
+#define NFC_CMD3 (NFC_BASE + 0x20)
+#define NFC_ADDR (NFC_BASE + 0x24)
+#define NFC_DATA (NFC_BASE + 0x28)
+#define NFC_STATUS (NFC_BASE + 0x30)
+#define NFC_INT_EN (NFC_BASE + 0x34)
+#define NFC_INT_ST (NFC_BASE + 0x38)
+
+#ifdef CONFIG_MIPS_RT63165
+
+#define NFC_ECC (NFC_BASE + 0x2c)
+#define NFC_ADDRII (NFC_BASE + 0x3c)
+#define NFC_ECCII (NFC_BASE + 0x40)
+#define NFC_ECCIII (NFC_BASE + 0x44)
+#define NFC_ECCIV (NFC_BASE + 0x48)
+#define NFC_ECC_STII (NFC_BASE + 0x4c)
+#define NFC_ECC_STIII (NFC_BASE + 0x50)
+#define NFC_CTRLII (NFC_BASE + 0x54)
+
+#else
+
+#define NFC_CTRLII (NFC_BASE + 0x3c)
+#define NFC_ECC (NFC_BASE + 0x40)
+#define NFC_ECCII (NFC_BASE + 0x44)
+#define NFC_ECCIII (NFC_BASE + 0x48)
+#define NFC_ECCIV (NFC_BASE + 0x4c)
+#define NFC_ECC_ST (NFC_BASE + 0x50)
+#define NFC_ECC_STII (NFC_BASE + 0x54)
+#define NFC_ECC_STIII (NFC_BASE + 0x58)
+#define NFC_ECC_STIV (NFC_BASE + 0x5c)
+#define NFC_ADDRII (NFC_BASE + 0x60)
+
+#endif
+
+#ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+
+#define TCROMFILE_BLOCK_NUM 13
+#define TCBKROMFILE_BLOCK_NUM 13
+#define TCLINUX_BLOCK_NUM 256
+#define TCSYSLOG_BLOCK_NUM 11
+#define TCWIFI_BLOCK_NUM 2
+
+#define TCROMFILE_START 7
+#define TCROMFILE_END (TCROMFILE_START + TCROMFILE_BLOCK_NUM)
+
+#define TCLINUX_BLOCK_START 20
+#define TCLINUX_BLOCK_END (TCLINUX_BLOCK_START + TCLINUX_BLOCK_NUM)
+
+#define TCLINUX_SLAVE_BLOCK_START 276
+#define TCLINUX_SLAVE_BLOCK_END (TCLINUX_SLAVE_BLOCK_START + TCLINUX_BLOCK_NUM)
+
+#define TCSYSLOG_START 996
+#define TCSYSLOG_END (TCSYSLOG_START + TCSYSLOG_BLOCK_NUM)
+
+#define TCBKROMFILE_START 1007
+#define TCBKROMFILE_END (TCBKROMFILE_START + TCBKROMFILE_BLOCK_NUM)
+
+#define TCWIFI_START 1022
+#define TCWIFI_END (TCWIFI_START + TCWIFI_BLOCK_NUM)
+
+#endif
+
+
+enum _int_stat {
+ INT_ST_ND_DONE = 1<<0,
+ INT_ST_TX_BUF_RDY = 1<<1,
+ INT_ST_RX_BUF_RDY = 1<<2,
+ INT_ST_ECC_ERR = 1<<3,
+ INT_ST_TX_TRAS_ERR = 1<<4,
+ INT_ST_RX_TRAS_ERR = 1<<5,
+ INT_ST_TX_KICK_ERR = 1<<6,
+ INT_ST_RX_KICK_ERR = 1<<7
+};
+
+
+//#define WORKAROUND_RX_BUF_OV 1
+
+//chip
+#define CONFIG_NUMCHIPS 1
+/* frankliao marked, rt63165 has support WP */
+//#define CONFIG_NOT_SUPPORT_WP
+/* frankliao marked, rt63165 has support RB */
+//#define CONFIG_NOT_SUPPORT_RB
+
+#define SMALL_FLASH_ECC_BYTES 3 //! ecc has 3 bytes
+#define SMALL_FLASH_ECC_OFFSET 5 //! ecc starts from offset 5.
+
+#define LARGE_FLASH_ECC_BYTES 12 //! ecc has 12 bytes
+// frank modify 20110415
+#ifdef CONFIG_MIPS_RT63365
+#define LARGE_FLASH_ECC_OFFSET 5 //! ecc starts from offset 5.
+#else
+#define LARGE_FLASH_ECC_OFFSET 52 //! ecc starts from offset 52.
+#endif
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+typedef enum {
+ FL_READY,
+ FL_READING,
+ FL_WRITING,
+ FL_ERASING,
+ FL_SYNCING,
+ FL_CACHEDPRG,
+ FL_PM_SUSPENDED,
+} nand_state_t;
+
+/*************************************************************/
+
+typedef enum _ra_flags {
+ FLAG_NONE = 0,
+ FLAG_ECC_EN = (1<<0),
+ FLAG_USE_GDMA = (1<<1),
+ FLAG_VERIFY = (1<<2),
+} RA_FLAGS;
+
+#define BBTTAG_BITS 2
+#define BBTTAG_BITS_MASK ((1<<BBTTAG_BITS) -1)
+enum BBT_TAG {
+ BBT_TAG_UNKNOWN = 0, //2'b01
+ BBT_TAG_GOOD = 3, //2'b11
+ BBT_TAG_BAD = 2, //2'b10
+ BBT_TAG_RES = 1, //2'b01
+};
+
+
+struct nand_opcode {
+ const int type;
+ const int read1;
+ const int read2;
+ const int readB;
+ const int readoob;
+ const int pageprog1;
+ const int pageprog2;
+ const int writeoob;
+ const int erase1;
+ const int erase2;
+ const int status;
+ const int reset;
+};
+
+
+struct nand_info {
+ const int mfr_id;
+ const int dev_id;
+ const char *name;
+ const int numchips;
+ const int chip_shift;
+ const int page_shift;
+ const int erase_shift;
+ const int oob_shift;
+ const int badblockpos;
+ const int opcode_type;
+};
+
+
+struct ra_nand_chip {
+
+ struct nand_info *flash;
+
+#if !defined (__UBOOT__)
+ struct mutex hwcontrol;
+ struct mutex *controller;
+#endif
+
+ struct nand_ecclayout *oob;
+ struct nand_opcode *opcode;
+ int state;
+ unsigned int buffers_page;
+ unsigned char *buffers; //[CFG_PAGESIZE + CFG_PAGE_OOBSIZE];
+ unsigned char *readback_buffers;
+ unsigned char *bbt;
+#if defined (WORKAROUND_RX_BUF_OV)
+ unsigned int sandbox_page; // steal a page (block) for read ECC verification
+#endif
+
+};
+
+//fixme, gdma api
+int nand_dma_sync(void);
+void release_dma_buf(void);
+int set_gdma_ch(unsigned long dst,
+ unsigned long src, unsigned int len, int burst_size,
+ int soft_mode, int src_req_type, int dst_req_type,
+ int src_burst_mode, int dst_burst_mode);
+
+#endif
Index: linux-3.18.21/drivers/mtd/maps/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/mtd/maps/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/maps/Kconfig 2018-02-05 13:20:41.000000000 +0800
@@ -225,6 +225,14 @@
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
+config MTD_TC3162
+ tristate "TrendChip TC3162/TC3262 MTD support"
+ #depends on MIPS_TC3162 && MTD_PARTITIONS && MTD_CFI
+ depends on (MIPS_TC3162 || MIPS_TC3162U || MIPS_TC3262)
+ #&& MTD_PARTITIONS
+ help
+ Flash memory access on TrendChip TC3162 Boards
+
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
Index: linux-3.18.21/drivers/mtd/maps/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/mtd/maps/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/maps/Makefile 2018-02-05 13:20:41.000000000 +0800
@@ -30,6 +30,7 @@
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
+obj-$(CONFIG_MTD_TC3162) += tc3162-flash.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
Index: linux-3.18.21/drivers/mtd/maps/tc3162-flash.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/maps/tc3162-flash.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,1646 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/root_dev.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <asm/tc3162/tc3162.h>
+#include <linux/version.h>
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+#include "../chips/spiflash_tc3162.h"
+#endif
+
+#if defined(TCSUPPORT_INIC_CLIENT) || defined(TCSUPPORT_INIC_HOST)
+#include <linux/mtd/fttdp_inic.h>
+#endif
+
+#if defined (TCSUPPORT_GPON_DUAL_IMAGE) || defined (TCSUPPORT_EPON_DUAL_IMAGE)
+#include "flash_layout/tc_partition.h"
+#endif
+
+#ifdef TCSUPPORT_MTD_PARTITIONS_CMDLINE
+#include <linux/slab.h>
+
+#define STR_LEN 150
+/* special size referring to all the remaining space in a partition */
+#define SIZE_REMAINING UINT_MAX
+#define SIZE_TO_GET (UINT_MAX-1)
+#define OFFSET_CONTINUOUS UINT_MAX
+#define OFFSET_BACK_FORWARD (UINT_MAX-1)
+
+/* error message prefix */
+#define ERRP "mtd: "
+#define TCLINUX "tclinux"
+#define TCLINUX_SLAVE "tclinux_slave"
+#define TCLINUX_REAL_SIZE "tclinux_real_size"
+#define TCLINUX_SLAVE_REAL_SIZE "tclinux_slave_real_size"
+#define RESERVEAREA "reservearea"
+#define KERNEL_PART "kernel"
+#define ROOTFS_PART "rootfs"
+#define KERNEL_SLAVE_PART "kernel_slave"
+#define ROOTFS_SLAVE_PART "rootfs_slave"
+#define BOOTLOADER_PART "bootloader"
+#define ROMFILE_PART "romfile"
+#define BOOTLOADER_PART_STR "0[bootloader],"
+#define ROMFILE_PART_STR "0[romfile],"
+
+extern unsigned long flash_base;
+
+/* mtdpart_setup() parses into here */
+static struct mtd_partition *ecnt_parts;
+static int num_parts = 0;
+static int has_remaining_part_flag = 0;
+static uint64_t tclinux_part_size = 0;
+static uint64_t tclinux_part_offset = OFFSET_CONTINUOUS;
+static int kernel_part_index = -1;
+static int kernel_slave_part_index = -1;
+#endif
+
+#define WINDOW_ADDR 0x1fc00000
+#define WINDOW_SIZE 0x400000
+#define BUSWIDTH 2
+
+#define TRX_LEN 256
+#define KERNEL_PARTITION(a) (a + TRX_LEN) //Include trx header
+#define ROOTFS_PARTITION(a) ((a + 0x10000) & ~(0x10000-1))
+//#define ROOTFS_PARTITION(a) (a)
+
+#ifdef CONFIG_DUAL_IMAGE
+#ifdef TCSUPPORT_FREE_BOOTBASE
+#define FLAG_ADDR (START_ADDR - 1)
+#else
+#define FLAG_ADDR 0x8001ffff
+#endif
+#if !defined(TCSUPPORT_CT_DUAL_IMAGE)
+#if !defined(TCSUPPORT_CUC_DUAL_IMAGE)
+#ifdef TCSUPPORT_NAND_BMT
+#define MAIN_IMAGE_SIZE (0x1000000)//16M for tclinux.bin
+#define SLAVE_IMAGE_OFFSET (0x7E0000)
+#define SLAVE_IMAGE_SIZE (0x3B0000)
+#else
+#define MAIN_IMAGE_SIZE (0x500000)
+#define SLAVE_IMAGE_OFFSET (0x520000)
+#define SLAVE_IMAGE_SIZE (0x400000)
+#endif
+#endif
+#endif
+#endif
+
+#if !defined(TCSUPPORT_CT_OSGI)
+#define JOYME_ADD_JFFS2 (0)
+#define JOYME_ADD_YAFS2 (0)
+#endif
+
+#ifdef TCSUPPORT_JFFS2_BLOCK
+#define BLOCK_FOR_JFFS2 (0x400000)
+#if defined(TCSUPPORT_RESERVEAREA_EXTEND)
+#define JFFS2_OFFSET (0xb90000)
+#else
+#define JFFS2_OFFSET (0xbc0000)
+#endif
+#endif
+
+
+/* frankliao added 20101215 */
+extern int nand_flash_avalable_size;
+extern unsigned long flash_base;
+extern unsigned int (*ranand_read_byte)(unsigned long long);
+extern unsigned int (*ranand_read_dword)(unsigned long long);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 36)
+extern int __devinit ra_nand_init(void);
+extern void __devinit ra_nand_remove(void);
+#else
+extern int ra_nand_init(void);
+extern void ra_nand_remove(void);
+
+#endif
+
+#ifdef TCSUPPORT_NEW_SPIFLASH
+extern unsigned char ReadSPIByte(unsigned long index);
+extern unsigned long ReadSPIDWord(unsigned long index);
+#else
+#ifdef TCSUPPORT_MT7510_E1
+#define ReadSPIByte(i) (((*((unsigned char*)i))==0) ? (*((unsigned char*)i)): (*((unsigned char*)i)))
+#define ReadSPIDWord(i) (((*((unsigned int*)i))==0) ? (*((unsigned int*)i)): (*((unsigned int*)i)))
+#else
+#define ReadSPIByte(i) (*((unsigned char*)i))
+#define ReadSPIDWord(i) (*((unsigned int*)i))
+#endif
+#endif
+
+#ifdef TCSUPPORT_MT7510_E1
+#define READ_FLASH_BYTE(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_byte != NULL) ? ranand_read_byte((i)) : -1) \
+ : (ReadSPIByte(i)) )
+
+
+/* for read flash, frankliao added 20101216 */
+#define READ_FLASH_DWORD(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_dword != NULL) ? ranand_read_dword((i)) : -1) \
+ : (ReadSPIDWord(i)) )
+#else
+/* for read flash, frankliao added 20101216 */
+#define READ_FLASH_DWORD(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_dword != NULL) ? ranand_read_dword((i)) : -1) \
+ : (ReadSPIDWord(i)) )
+#endif
+ #ifdef TCSUPPORT_MTD_ENCHANCEMENT
+ #if 0
+ #if defined ( TCSUPPORT_RESERVEAREA_1_BLOCK)
+ #define BLOCK_NUM_FOR_RESERVEAREA 1
+ #elif defined(TCSUPPORT_RESERVEAREA_2_BLOCK)
+ #define BLOCK_NUM_FOR_RESERVEAREA 2
+ #elif defined(TCSUPPORT_RESERVEAREA_3_BLOCK)
+ #define BLOCK_NUM_FOR_RESERVEAREA 3
+#else //TCSUPPORT_RESERVEAREA_4_BLOCK
+ #define BLOCK_NUM_FOR_RESERVEAREA 4
+#endif
+#endif
+#if ((TCSUPPORT_RESERVEAREA_BLOCK != 1)&& (TCSUPPORT_RESERVEAREA_BLOCK != 2)&& (TCSUPPORT_RESERVEAREA_BLOCK !=3)&& (TCSUPPORT_RESERVEAREA_BLOCK !=4) && (TCSUPPORT_RESERVEAREA_BLOCK !=5) && (TCSUPPORT_RESERVEAREA_BLOCK !=6) && (TCSUPPORT_RESERVEAREA_BLOCK !=7))
+#define BLOCK_NUM_FOR_RESERVEAREA 4
+#else
+ #define BLOCK_NUM_FOR_RESERVEAREA TCSUPPORT_RESERVEAREA_BLOCK
+#endif
+#endif
+
+static struct mtd_info *tc3162_mtd_info;
+
+static struct map_info tc3162_map = {
+ .name = "tc3162",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
+};
+#ifdef TCSUPPORT_SQUASHFS_ADD_YAFFS
+#define SQUASHFS_ADD_YAFFS_SIZE (0x500000 + JOYME_ADD_YAFS2) //5M
+#endif
+#ifndef TCSUPPORT_IS_FH_PON
+static struct mtd_partition tc3162_parts[] = {
+ { /* First partition */
+ name : "bootloader", /* Bootloader section */
+#ifdef TCSUPPORT_BOOTROM_LARGE_SIZE
+ size : 0x00020000, /* Size =128k */
+#else
+ size : 0x00010000, /* Size */
+#endif
+ offset : 0 /* Offset from start of flash- location 0x0*/
+// mask_flags : MTD_WRITEABLE /* This partition is not writable */
+ },
+ { /* Second partition */
+ name : "romfile", /* config filesystem section */
+ size : 0x00010000, /* Size */
+ offset : MTDPART_OFS_APPEND /* Append after bootloader section */
+ },
+ { /* Third partition */
+ name : "kernel", /* Kernel section */
+#ifdef CONFIG_MTD_PURE_BRIDGE
+ size : 0x000a0000, /* Size */
+#else
+ size : 0x000d0000,
+#endif
+ /*
+ * frank modify for nand flash support
+ * for nand flash, romfile partition is put in the last block,
+ * so the kernel partition cannot append after romfile section
+ */
+ offset : 0x20000 /* Append after bootloader section */
+// offset : MTDPART_OFS_APPEND /* Append after bootloader section */
+ },
+ { /* Fourth partition */
+ name : "rootfs", /* Root filesystem section */
+ size : MTDPART_SIZ_FULL, /* Occupy rest of flash */
+ offset : MTDPART_OFS_APPEND /* Append after kernel section */
+ },
+ {
+ name : "tclinux", /* tclinux partition */
+ size : MTDPART_SIZ_FULL, /* Occupy rest of flash */
+ offset : 0x00020000
+ }
+#ifdef CONFIG_DUAL_IMAGE
+ ,
+ {
+ name : "kernel_slave", /* tclinux slave partition */
+#ifdef CONFIG_MTD_PURE_BRIDGE
+ size : 0x000a0000, /* Size */
+#else
+ size : 0x000d0000,
+#endif
+ offset : SLAVE_IMAGE_OFFSET
+ },
+ {
+ name : "rootfs_slave", /* tclinux slave partition */
+ size : MTDPART_SIZ_FULL, /* Occupy rest of flash */
+ offset : MTDPART_OFS_APPEND
+ },
+ {
+ name : "tclinux_slave", /* tclinux slave partition */
+ size : MTDPART_SIZ_FULL, /* Occupy rest of flash */
+ offset : SLAVE_IMAGE_OFFSET
+ }
+#endif
+#ifdef TCSUPPORT_INIC_HOST
+ ,
+ {
+ name : INIC_CLIENT_ROMFILE_NAME,
+ size : INIC_CLIENT_ROMFILE_SIZE,
+ offset : MTDPART_OFS_APPEND
+ }
+#endif
+#ifdef TCSUPPORT_JFFS2_BLOCK
+ ,
+ {
+ name : "jffs2", /*test partition */
+ size : BLOCK_FOR_JFFS2, //MTDPART_SIZ_FULL, /* Occupy rest of flash */
+ offset : JFFS2_OFFSET
+ }
+#endif
+ #ifdef TCSUPPORT_MTD_ENCHANCEMENT
+ ,
+ {
+ name : "reservearea", /*test partition */
+ size : 0x00040000, /* occupy the last 4 blocks */
+ offset : MTDPART_OFS_APPEND
+ }
+#endif
+#if defined(TCSUPPORT_MULTI_BOOT) && !defined(TCSUPPORT_C1_ZY_SFU)
+ ,
+ {
+ name : "romd", /*test partition */
+ size : 0x00010000, /* occupy one block*/
+ offset : MTDPART_OFS_APPEND
+ }
+#endif
+#if !defined(TCSUPPORT_CT_OSGI)
+#if defined(TCSUPPORT_SQUASHFS_ADD_YAFFS)
+ ,
+ {
+ name : "yaffs", /*nand yaffs partition */
+ size : SQUASHFS_ADD_YAFFS_SIZE, /* occupy 5M*/
+ offset : MTDPART_OFS_APPEND
+ }
+#endif
+#endif
+
+};
+#else
+#if !defined(TCSUPPORT_FH_JOYMEV2_PON)
+static struct mtd_partition tc3162_parts[] = {
+ { /* First partition */
+ name : "boot", /* Bootloader section */
+ size : 0x200000, /* Size =128k */
+ offset : 0 /* Offset from start of flash- location 0x0*/
+ },
+ {
+ name : "KernelA",
+ size : 0x00400000 ,
+ offset : 0x00200000
+ },
+
+ {
+ name : "AppA",
+ size : 0x1400000 ,
+ offset : MTDPART_OFS_APPEND
+ },
+
+ {
+ name : "RootfsA",
+ size : 0x1000000 ,
+ offset : MTDPART_OFS_APPEND
+ }
+
+ ,
+ {
+ name : "KernelB",
+ size : 0x00400000 ,
+ offset : MTDPART_OFS_APPEND
+ } ,
+ {
+ name : "AppB",
+ size : 0x1400000 ,
+ offset : MTDPART_OFS_APPEND
+ },
+ {
+ name : "RootfsB",
+ size : 0x1000000 ,
+ offset : MTDPART_OFS_APPEND
+ },
+ {
+ name : "ConfigurationA",
+ size : 0x200000 ,
+ offset : MTDPART_OFS_APPEND
+ },
+ {
+ name : "ConfigurationB",
+ size : 0xa00000 ,
+ offset : MTDPART_OFS_APPEND
+ } ,
+ {
+ name : "UserLocalCT",
+ size : 0x400000 ,
+ offset : MTDPART_OFS_APPEND
+ },
+ {
+ name : "Userdata",
+ size : 0xe00000 ,
+ offset : MTDPART_OFS_APPEND
+ }
+
+};
+#endif
+#endif
+static int tc3162_parts_size = sizeof(tc3162_parts) / sizeof(tc3162_parts[0]);
+
+static struct mtd_info *get_mtd_named(char *name)
+{
+ int i;
+ struct mtd_info *mtd;
+
+ for (i = 0; i < 32; i++) {
+ mtd = get_mtd_device(NULL, i);
+ if (mtd) {
+ if (strcmp(mtd->name, name) == 0)
+ return(mtd);
+ put_mtd_device(mtd);
+ }
+ }
+ return(NULL);
+}
+
+#if defined(TCSUPPORT_CT_DUAL_IMAGE) || defined(TCSUPPORT_CUC_DUAL_IMAGE) || defined(TCSUPPORT_NAND_BMT)
+struct trx_header {
+ unsigned long int magic; /* "HDR0" */
+ unsigned long int kernel_len; /* Length of kernel image */
+ unsigned long int crc32; /* 32-bit CRC from flag_version to end of file */
+ unsigned long int flag_version; /* 0:15 flags, 16:31 version */
+ unsigned long int reserved[12]; /* Reserved field of header */
+};
+#define TRX_MAGIC2 0x32524448 /* "for tclinux" */
+#endif
+
+/******************************************************************************
+ Function: tc3162_map_init
+ Description: It's used to init tc3162_map
+ Input: void
+ Return: 0: Success, -EIO: fail
+******************************************************************************/
+int tc3162_map_init(void){
+#ifdef TCSUPPORT_INIC_CLIENT
+ u_int32_t ram_base;
+#endif
+
+ /*add address mapping on 7510. Pork*/
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627||isEN7580){
+ uint32 tmpVal;
+ tmpVal = regRead32(0xbfb00038);
+ tmpVal &= 0xffe0e0e0;
+ tmpVal |= 0x80070f00;
+ regWrite32(0xbfb00038,tmpVal);
+ //VPint(0xbfb00038) |= 0x80070F00;
+ #ifdef TCSUPPORT_INIC_CLIENT
+ ram_base = 0x800000 * (1 << (((VPint(0xbfb0008c) >> 13) & 0x7) - 1))- INIC_CLIENT_RAM_SIMU_MAX_SIZE;
+ printk("tc3162: iNIC simulated flash device 0x%08x at 0x%08x\n", INIC_CLIENT_RAM_SIMU_MAX_SIZE, ram_base);
+ tc3162_map.virt = ioremap_nocache(ram_base, INIC_CLIENT_RAM_SIMU_MAX_SIZE);
+ tc3162_map.phys = ram_base;
+ tc3162_map.size = INIC_CLIENT_RAM_SIMU_MAX_SIZE;
+ #else
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", 0x1000000, 0x1c000000);
+ tc3162_map.virt = ioremap_nocache(0x1c000000, 0x1000000);
+ tc3162_map.phys = 0x1c000000;
+ tc3162_map.size = 0x1000000;
+ #endif
+ ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ }
+ /*add 8M 16M flash support. shnwind*/
+ else if (isTC3162U || isTC3182 || isRT65168 || isRT63165 || isRT63365 || isRT63260){
+// header = (unsigned int *)0xb0020000;
+ /*enable addr bigger than 4M support.*/
+ VPint(0xbfb00038) |= 0x80000000;
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", 0x1000000, 0x10000000);
+ tc3162_map.virt = ioremap_nocache(0x10000000, 0x1000000);
+ tc3162_map.phys = 0x10000000;
+ tc3162_map.size = 0x1000000;
+ ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ }else{
+
+// header = (unsigned int *)0xbfc20000;
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", WINDOW_SIZE, WINDOW_ADDR);
+ tc3162_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+
+ }
+ if (!tc3162_map.virt) {
+ printk("tc3162: Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&tc3162_map);
+
+ return 0;
+}
+
+static int tc3162_mtd_info_init(void){
+ #ifdef TCSUPPORT_INIC_CLIENT
+ printk("MT75XX: INIC mode\n");
+ tc3162_mtd_info = do_map_probe("map_ram", &tc3162_map);
+ #else
+ /* check if boot from SPI flash */
+ if (IS_NANDFLASH) {
+ tc3162_mtd_info = do_map_probe("nandflash_probe", &tc3162_map);
+ } else if (IS_SPIFLASH) {
+ tc3162_mtd_info = do_map_probe("spiflash_probe", &tc3162_map);
+ } else {
+ tc3162_mtd_info = do_map_probe("cfi_probe", &tc3162_map);
+ }
+ #endif
+
+ if (!tc3162_mtd_info) {
+ #ifdef TCSUPPORT_INIC_CLIENT
+ printk("iNIC flash fail\n");
+ #endif
+ iounmap(tc3162_map.virt);
+ return -ENXIO;
+ }
+
+ tc3162_mtd_info->owner = THIS_MODULE;
+
+ return 0;
+}
+
+static void tc3162_put_rootfs(void){
+ struct mtd_info *mtd;
+
+#ifdef CONFIG_DUAL_IMAGE
+ char *bufaddr = (char*)FLAG_ADDR;
+ const char flagvalue = 1;//not change!!because we use this flag to judge which image
+ char tmp[8] = {0};
+#endif
+ #ifdef CONFIG_DUAL_IMAGE
+ //read the flag from last block to tell kernel use which image(main or slave)
+ #if 0
+ memcpy(tmp,(char*)(0xb0000000+tc3162_mtd_info->size -sizeof(char)),sizeof(char));
+ #endif
+ memcpy(tmp,(char*)bufaddr,sizeof(char));
+ if(flagvalue == tmp[0])
+ {
+ printk("\r\nrootfs_slave");
+ mtd = get_mtd_named("rootfs_slave");
+ }
+ else
+ {
+ printk("\r\nrootfs");
+ #endif
+ mtd = get_mtd_named("rootfs");
+ #ifdef CONFIG_DUAL_IMAGE
+ }
+ #endif
+ if (mtd) {
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ put_mtd_device(mtd);
+ }
+}
+
+#ifndef TCSUPPORT_MTD_PARTITIONS_CMDLINE
+int tc3162_add_partitions(void){
+ unsigned int *header;
+ unsigned int addr;
+ #if defined(TCSUPPORT_BOOTROM_LARGE_SIZE)
+ u_int32_t tclinux_flash_offset = 0x30000;
+ #else
+ u_int32_t tclinux_flash_offset = 0x20000;
+ #endif
+
+ #ifdef TCSUPPORT_SQUASHFS_ADD_YAFFS
+ u_int32_t nand_yaffs_size = SQUASHFS_ADD_YAFFS_SIZE;
+ #else
+ u_int32_t nand_yaffs_size = 0;
+ #endif
+
+ #if defined(CONFIG_DUAL_IMAGE) || defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT) || defined(TCSUPPORT_NAND_BADBLOCK_CHECK)
+ int i = 0;
+ #endif
+ #ifdef CONFIG_DUAL_IMAGE
+ char *bufaddr = (char*)FLAG_ADDR;
+ const char flagvalue = 1;//not change!!because we use this flag to judge which image
+ char tmp[8] = {0};
+ #endif
+ //#if defined(CONFIG_DUAL_IMAGE) && (defined(TCSUPPORT_MTD_ENCHANCEMENT) ||defined(TCSUPPORT_MULTI_BOOT))
+#if defined(CONFIG_DUAL_IMAGE)
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ u_int32_t tclinux_slave_offset = offset+tclinux_flash_offset;
+#else
+ u_int32_t tclinux_slave_offset = MAIN_IMAGE_SIZE+tclinux_flash_offset;
+#endif
+ u_int32_t tclinux_slave_size = 0;
+#endif
+#if defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT)
+ u_int32_t tclinux_size = 0;
+#endif
+#if defined(TCSUPPORT_CT_DUAL_IMAGE) || defined(TCSUPPORT_CUC_DUAL_IMAGE) || defined(TCSUPPORT_NAND_BMT)
+ struct trx_header *trx = NULL;
+ char *trx_addr;
+ unsigned int magic;
+#endif
+
+ #ifdef TCSUPPORT_NAND_RT63368
+ header = (unsigned int *)(flash_base + 0x40000);
+ #elif defined(TCSUPPORT_NAND_BADBLOCK_CHECK)
+ header = (unsigned int *)(flash_base + 0x280000);
+ #else
+ /* frankliao added 20101223 */
+ #ifdef TCSUPPORT_INIC_CLIENT
+ header = (unsigned int *)(flash_base + INIC_CLIENT_BOOTLOADER_SIZE + INIC_CLIENT_ROMFILE_SIZE);
+ #else
+ header = (unsigned int *)(flash_base + tclinux_flash_offset);
+ #endif
+ #endif
+
+ #ifdef TCSUPPORT_BOOTROM_LARGE_SIZE
+ if (IS_NANDFLASH) {
+ if(tc3162_mtd_info->erasesize >= 0x20000){
+ /*tclinux offset is 0x80000 for 128k&256k block size*/
+ //tclinux_flash_offset = tc3162_mtd_info->erasesize*2;
+ tclinux_flash_offset = 0x40000*2;
+
+#if defined(CONFIG_DUAL_IMAGE)
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ tclinux_slave_offset = offset+tclinux_flash_offset;
+#else
+ tclinux_slave_offset = MAIN_IMAGE_SIZE+tclinux_flash_offset;
+#endif
+#endif
+ header = (unsigned int *)(flash_base + tclinux_flash_offset);
+ }
+ }
+ #endif
+
+ #if defined(TCSUPPORT_NAND_BADBLOCK_CHECK) || defined(TCSUPPORT_NAND_RT63368)
+ for(i= 0; i < tc3162_parts_size; i++)
+ {
+ if(!strcmp(tc3162_parts[i].name, "bootloader"))
+ {
+ tc3162_parts[i].size = 0x20000;
+ }
+
+ if(!strcmp(tc3162_parts[i].name, "romfile"))
+ {
+ tc3162_parts[i].size = 0x20000;
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[i].offset = 0xe0000;
+ #else
+ tc3162_parts[i].offset = 0x20000;
+ #endif
+ }
+
+ }
+ #endif
+
+ #ifdef CONFIG_DUAL_IMAGE
+ for(i= 0; i < tc3162_parts_size; i++)
+ {
+ if(!strcmp(tc3162_parts[i].name,"kernel"))
+ {
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 20 * sizeof(unsigned int)));
+ tc3162_parts[i].size = KERNEL_PARTITION( addr );
+// tc3162_parts[i].size = KERNEL_PARTITION(header[20]);
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 21 * sizeof(unsigned int)));
+ tc3162_parts[i+1].size = ROOTFS_PARTITION( addr );
+// tc3162_parts[i+1].size = ROOTFS_PARTITION(header[21]);
+ #if defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT)
+ tclinux_size = tc3162_parts[i].size+tc3162_parts[i+1].size;
+ #endif
+
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[i].offset = 0x280000;
+ tc3162_parts[i+2].offset = tc3162_parts[i].offset;
+ #elif defined(TCSUPPORT_NAND_RT63368)
+ tc3162_parts[i].offset = 0x40000;
+ tc3162_parts[i+2].offset = tc3162_parts[i].offset;
+ #endif
+ }
+ if(!strcmp(tc3162_parts[i].name,"kernel_slave"))
+ {
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ unsigned int *header_slave = (unsigned int *)(flash_base + tclinux_flash_offset + offset);
+ tc3162_parts[i].offset = offset + tclinux_flash_offset;
+ tc3162_parts[i+2].offset = offset + tclinux_flash_offset;
+
+#elif defined(TCSUPPORT_NAND_BADBLOCK_CHECK)
+ unsigned int *header_slave = (unsigned int *)(flash_base + 0x2280000);
+ tc3162_parts[i].offset = 0x2280000;
+ tc3162_parts[i+2].offset = tc3162_parts[i].offset;
+#else
+ unsigned int *header_slave = (unsigned int *)(flash_base + tclinux_flash_offset + MAIN_IMAGE_SIZE);
+// unsigned int *header_slave = (unsigned int *)(0xb0020000+0x500000);
+ tc3162_parts[i].offset = MAIN_IMAGE_SIZE + tclinux_flash_offset;
+ tc3162_parts[i+2].offset = MAIN_IMAGE_SIZE + tclinux_flash_offset;
+#endif
+// tc3162_parts[i].size = KERNEL_PARTITION(header_slave[20]);
+// tc3162_parts[i+1].size = ROOTFS_PARTITION(header_slave[21]);
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header_slave + 20 * sizeof(unsigned int)));
+// tc3162_parts[i].size = KERNEL_PARTITION(header_slave[20]);
+ tc3162_parts[i].size = KERNEL_PARTITION( addr );
+
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header_slave + 21 * sizeof(unsigned int)));
+ tc3162_parts[i+1].size = ROOTFS_PARTITION( addr );
+// tc3162_parts[i+1].size = ROOTFS_PARTITION(header_slave[21]);
+
+ #if defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT)
+ tclinux_slave_offset = tc3162_parts[i].offset;
+ tclinux_slave_size = tc3162_parts[i].size + tc3162_parts[i+1].size;
+ #endif
+
+#if defined(TCSUPPORT_CT_DUAL_IMAGE) || defined(TCSUPPORT_CUC_DUAL_IMAGE) || defined(TCSUPPORT_NAND_BMT)
+ trx = (struct trx_header *)header_slave;
+ trx_addr = &(trx->magic);
+ magic = READ_FLASH_DWORD(trx_addr);
+ if(magic !=TRX_MAGIC2){
+ tclinux_slave_offset = SLAVE_IMAGE_OFFSET;
+ tclinux_slave_size = SLAVE_IMAGE_SIZE;
+ tc3162_parts[i].size = 0; //no slave image
+ tc3162_parts[i+1].size = 0; //no slave image
+ }
+#endif
+
+ }
+ if (IS_NANDFLASH) {
+ /* frankliao enhance 20110112, for nand flash romfile */
+ if(!strcmp(tc3162_parts[i].name,"tclinux_slave")) {
+ /* frankliao add 20110112, for 128K block size nand flash romfile */
+ if (tc3162_mtd_info->erasesize >= 0x20000) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[i].size = 0x2000000;
+ #else
+ tc3162_parts[i].size = tc3162_mtd_info->size - nand_yaffs_size - 6*tc3162_mtd_info->erasesize - tclinux_slave_offset;
+ #endif
+ } else {
+ tc3162_parts[i].size = tc3162_mtd_info->size - tclinux_slave_offset;
+ }
+ }
+
+ /*
+ * frankliao enhance 20110112
+ * the tclinux partition start from 0x20000, end at tclinux_slave_offset
+ */
+ if(!strcmp(tc3162_parts[i].name,"tclinux")) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[i].size = 0x2000000;
+ #else
+ tc3162_parts[i].size = tclinux_slave_offset - tclinux_flash_offset;
+ #endif
+ }
+ }
+ }
+ #else
+// tc3162_parts[2].size = KERNEL_PARTITION(header[20]);
+// tc3162_parts[2].size = KERNEL_PARTITION(header[21]);
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 20 * sizeof(unsigned int)));
+ tc3162_parts[2].size = KERNEL_PARTITION( addr );
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 21 * sizeof(unsigned int)));
+ tc3162_parts[3].size = ROOTFS_PARTITION( addr );
+
+ #if defined(TCSUPPORT_NAND_BADBLOCK_CHECK) || defined(TCSUPPORT_NAND_RT63368)
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[2].offset = 0x280000;
+ #else
+ tc3162_parts[2].offset = 0x40000;
+ #endif
+ tc3162_parts[4].offset = tc3162_parts[2].offset;
+ #endif
+
+ if (IS_NANDFLASH) {
+ /* frankliao added 20110112, for 128K block size NAND Flash */
+ if (tc3162_mtd_info->erasesize >= 0x20000) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[4].size = 0x2000000;
+ #else
+ /* the last block store nand flash romfile */
+ tc3162_parts[4].size = tc3162_mtd_info->size - nand_yaffs_size - 6*tc3162_mtd_info->erasesize - tclinux_flash_offset;
+ #endif
+ }
+ }
+
+ #if defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT)
+ tclinux_size = tc3162_parts[2].size + tc3162_parts[3].size;
+ #endif
+ #endif
+
+//use last 4 block as reserve area for storing data(for example:syslog,backupromfile,and so on)
+ #ifdef TCSUPPORT_MTD_ENCHANCEMENT
+ for(i= 0; i < tc3162_parts_size; i++)
+ {
+ if(!strcmp(tc3162_parts[i].name,"reservearea"))
+ {
+ /*
+ * frankliao modify 20110112
+ * 64K block size SPI Flash & 128K block size NAND Flash
+ */
+ if (tc3162_mtd_info->erasesize >= 0x10000) {
+ #ifdef TCSUPPORT_NAND_BADBLOCK_CHECK
+ tc3162_parts[i].offset = 0x7c80000;
+ tc3162_parts[i].size = 0x380000;
+ #else
+ #ifdef TCSUPPORT_NAND_FLASH
+#if !defined(TCSUPPORT_CT_PON)
+ tc3162_parts[i].offset = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize);
+ tc3162_parts[i].size = BLOCK_NUM_FOR_RESERVEAREA*(tc3162_mtd_info->erasesize);
+#endif
+ #else
+ tc3162_parts[i].offset = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize);
+ tc3162_parts[i].size = BLOCK_NUM_FOR_RESERVEAREA*(tc3162_mtd_info->erasesize);
+ #endif
+ #endif
+ /* 16 block size NAND Flash */
+ } else {
+ tc3162_parts[i].offset = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*(0x10000);
+ tc3162_parts[i].size = BLOCK_NUM_FOR_RESERVEAREA*(0x10000);
+ }
+ }
+
+ #ifndef TCSUPPORT_NAND_BADBLOCK_CHECK
+
+ #ifdef CONFIG_DUAL_IMAGE
+ memcpy(tmp,(char*)bufaddr,sizeof(char));
+ if(flagvalue != tmp[0])//use main image
+ {
+ #endif
+ if(!strcmp(tc3162_parts[i].name,"tclinux"))
+ {
+ #ifdef CONFIG_DUAL_IMAGE
+ tc3162_parts[i].size = tclinux_slave_offset -tclinux_flash_offset ; //reserve the last 4 blocks
+ #else
+
+ /*
+ * frankliao modify 201100112
+ * 64K block size SPI Flash & 128K block size NAND Flash
+ */
+ if (tc3162_mtd_info->erasesize == 0x10000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize) - tclinux_flash_offset; //reserve the last 4 blocks
+ }
+ else if (tc3162_mtd_info->erasesize >= 0x20000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize) - tclinux_flash_offset;
+ /* 16K block size NAND Flash */
+ } else {
+ tc3162_parts[i].size = tc3162_mtd_info->size - BLOCK_NUM_FOR_RESERVEAREA*(0x10000) - tclinux_flash_offset;
+ }
+ #endif
+ if(tclinux_size > tc3162_parts[i].size)
+ {
+ printk("\r\ntclinux size is beyond the limit!!");
+ return -1;
+ }
+ }
+ #ifdef CONFIG_DUAL_IMAGE
+ }
+ if(!strcmp(tc3162_parts[i].name,"tclinux_slave"))
+ {
+ /*
+ * frankliao modify 201100112
+ * 64K block size SPI Flash & 128K block size NAND Flash
+ */
+ if (tc3162_mtd_info->erasesize == 0x10000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize) -tclinux_slave_offset; //reserve the last 4 blocks
+ } else if (tc3162_mtd_info->erasesize >= 0x20000) {
+ #ifdef TCSUPPORT_NAND_FLASH
+#if !defined(TCSUPPORT_CT_PON)
+ tc3162_parts[i].size = tc3162_mtd_info->size - nand_yaffs_size - BLOCK_NUM_FOR_RESERVEAREA*( tc3162_mtd_info->erasesize) -tclinux_slave_offset;
+#endif
+ #else
+ tc3162_parts[i].size = tc3162_mtd_info->size - nand_yaffs_size - 6*( tc3162_mtd_info->erasesize) -tclinux_slave_offset;
+ #endif
+ /* 16K block size NAND Flash */
+ } else {
+ tc3162_parts[i].size = tc3162_mtd_info->size -BLOCK_NUM_FOR_RESERVEAREA*(0x10000) - tclinux_slave_offset;
+ }
+ #ifdef TCSUPPORT_JFFS2_BLOCK
+ #ifndef TCSUPPORT_NAND_FLASH)
+#if !defined(TCSUPPORT_CT_PON)
+ tc3162_parts[i].size -= BLOCK_FOR_JFFS2;
+#endif
+ #endif
+ #endif
+ if(tclinux_slave_size > tc3162_parts[i].size)
+ {
+ printk("\r\ntclinux_slave size is beyond the limit!!");
+
+#if !defined(TCSUPPORT_CY_PON)
+ // return -1;
+#endif
+ }
+ }
+ #endif
+ #endif
+ #ifdef TCSUPPORT_NAND_FLASH
+ #endif
+ #ifdef TCSUPPORT_SQUASHFS_ADD_YAFFS
+ if(!strcmp(tc3162_parts[i].name,"yaffs"))
+ {
+#if !defined(TCSUPPORT_CT_OSGI)
+ tc3162_parts[i].offset = tc3162_mtd_info->size - nand_yaffs_size;
+#endif
+ }
+ #endif
+
+ }
+ #endif
+
+ #if defined(TCSUPPORT_MULTI_BOOT)
+ for(i= 0; i < tc3162_parts_size; i++)
+ {
+ #if !defined(TCSUPPORT_C1_ZY_SFU)
+ if(!strcmp(tc3162_parts[i].name,"romd"))
+ {
+ if (tc3162_mtd_info->erasesize >= 0x10000) {
+ tc3162_parts[i].offset = tc3162_mtd_info->size -5*( tc3162_mtd_info->erasesize);
+ tc3162_parts[i].size = (tc3162_mtd_info->erasesize);
+ } else {
+ tc3162_parts[i].offset = tc3162_mtd_info->size -5*(0x10000);
+ tc3162_parts[i].size = 0x10000;
+ }
+ }
+ #endif
+
+
+ #ifdef CONFIG_DUAL_IMAGE
+ memcpy(tmp,(char*)bufaddr,sizeof(char));
+ if(flagvalue != tmp[0])//use main image
+ {
+ #endif
+
+ if(!strcmp(tc3162_parts[i].name,"tclinux"))
+ {
+ #ifdef CONFIG_DUAL_IMAGE
+ tc3162_parts[i].size = tclinux_slave_offset -tclinux_flash_offset ;
+ #else
+ if (tc3162_mtd_info->erasesize >= 0x20000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size -6*( tc3162_mtd_info->erasesize) -tclinux_flash_offset;
+ } else if (tc3162_mtd_info->erasesize == 0x10000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size -5*( tc3162_mtd_info->erasesize) -tclinux_flash_offset;
+ } else {
+ tc3162_parts[i].size = tc3162_mtd_info->size -5*(0x10000) -tclinux_flash_offset;
+ }
+ #endif
+
+ if(tclinux_size > tc3162_parts[i].size)
+ {
+ printk("tclinux size is beyond the limit!!\r\n");
+ return -1;
+ }
+ }
+
+
+ #ifdef CONFIG_DUAL_IMAGE
+ }
+ if(flagvalue == tmp[0])//use slave image
+ {
+ if(!strcmp(tc3162_parts[i].name,"tclinux_slave"))
+ {
+ if (tc3162_mtd_info->erasesize >= 0x20000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size -6*tc3162_mtd_info->erasesize -tclinux_slave_offset;
+ } else if (tc3162_mtd_info->erasesize == 0x10000) {
+ tc3162_parts[i].size = tc3162_mtd_info->size -5*tc3162_mtd_info->erasesize -tclinux_slave_offset;
+ } else {
+ tc3162_parts[i].size = tc3162_mtd_info->size -5*(0x10000) -tclinux_slave_offset;
+ }
+ if(tclinux_slave_size > tc3162_parts[i].size)
+ {
+ printk("tclinux_slave size is beyond the limit!!\r\n");
+#if !defined(TCSUPPORT_CY_PON)
+ // return -1;
+#endif
+ }
+ }
+ }
+ #endif
+ }
+#endif
+
+#if !defined(TCSUPPORT_NAND_BADBLOCK_CHECK) && !defined(TCSUPPORT_NAND_RT63368)
+ /*Reset the kernel partition offset*/
+ tc3162_parts[2].offset = tclinux_flash_offset;
+ /*Reset the tclinux partition offset*/
+ tc3162_parts[4].offset = tclinux_flash_offset;
+
+ /* frank added 20110111 for 128K block size NAND Flash*/
+ if (tc3162_mtd_info->erasesize >= 0x20000) {
+ #ifdef TCSUPPORT_BOOTROM_LARGE_SIZE
+ /* reset the bootloader partition size to 0x40000 which is a blocksize of 128K&256k NAND Flash */
+ tc3162_parts[0].size = 0x40000;
+ /*romfile offset is 0x40000 for 128k&256k block size*/
+ //tc3162_parts[1].offset = tc3162_mtd_info->erasesize;
+ tc3162_parts[1].offset = 0x40000;
+
+ /* reset the romfile partition size */
+ tc3162_parts[1].size = 0x40000;
+ #else
+ /* reset the bootloader partition size to 0x20000 which is a blocksize of 128K NAND Flash */
+ tc3162_parts[0].size = tc3162_mtd_info->erasesize;
+ /*
+ * reset the romfile partition offset.
+ * the romfile partition starts from the last block address
+ */
+ tc3162_parts[1].offset = tc3162_mtd_info->size - nand_yaffs_size - 6*tc3162_mtd_info->erasesize;
+ /* reset the romfile partition size */
+ tc3162_parts[1].size = tc3162_mtd_info->erasesize;
+ #endif
+ }
+#endif
+
+#ifdef TCSUPPORT_INIC_HOST
+ for(i = 0; i < tc3162_parts_size; i++) {
+ if(!strcmp(tc3162_parts[i].name,INIC_CLIENT_ROMFILE_NAME)) {
+ tc3162_parts[i - 1].size -= INIC_CLIENT_ROMFILE_SIZE;
+ tc3162_parts[i].offset = tc3162_parts[i - 1].offset + tc3162_parts[i - 1].size;
+ }
+ }
+#endif
+
+
+ add_mtd_partitions(tc3162_mtd_info, tc3162_parts, tc3162_parts_size);
+ return 0;
+}
+#else
+unsigned long long tc3162_memparse(const char *ptr, char **retptr, unsigned int blocksize)
+{
+ char *endptr; /* local pointer to end of parsed string */
+
+ unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ endptr++;
+ break;
+ case 'B':
+ case 'b':
+ ret *=blocksize;
+ endptr++;
+ break;
+ default:
+ break;
+ }
+
+ if (retptr)
+ *retptr = endptr;
+
+ return ret;
+}
+
+static void tc3162_newpart_set_other_parts(struct mtd_partition *part, char *name, int index, unsigned int blocksize){
+ if(!part || !name){
+ printk("tc3162_newpart_set_other_parts fail, input NULL\n");
+ return;
+ }
+ if(!strcmp(name, TCLINUX)){
+ part[0].name = KERNEL_PART;
+ part[0].size = SIZE_TO_GET;
+ part[0].offset = OFFSET_CONTINUOUS;
+ part[1].name = ROOTFS_PART;
+ part[1].size = SIZE_TO_GET;
+ part[1].offset = OFFSET_CONTINUOUS;
+ kernel_part_index = index;
+ }
+ else if(!strcmp(name, TCLINUX_SLAVE)){
+ part[0].name = KERNEL_SLAVE_PART;
+ part[0].size = SIZE_TO_GET;
+ part[0].offset = OFFSET_CONTINUOUS;
+ part[1].name = ROOTFS_SLAVE_PART;
+ part[1].size = SIZE_TO_GET;
+ part[1].offset = OFFSET_CONTINUOUS;
+ kernel_slave_part_index = index;
+ }
+ else if(!strcmp(name, RESERVEAREA)){
+#ifdef TCSUPPORT_NAND_FLASH
+#endif
+ part->name = RESERVEAREA;
+ part->offset = OFFSET_BACK_FORWARD;
+ part->size = (blocksize*TCSUPPORT_RESERVEAREA_BLOCK);
+ }
+}
+/*
+ * Parse one partition definition for an MTD. Since there can be many
+ * comma separated partition definitions, this function calls itself
+ * recursively until no more partition definitions are found. Nice side
+ * effect: the memory to keep the mtd_partition structs and the names
+ * is allocated upon the last definition being found. At that point the
+ * syntax has been verified ok.
+ */
+static struct mtd_partition * tc3162_newpart(char *s, char **retptr, int *num_parts,
+ int this_part, unsigned char **extra_mem_ptr,
+ int extra_mem_size, unsigned int blocksize)
+{
+ struct mtd_partition *parts;
+ unsigned long size = SIZE_TO_GET;
+ unsigned long offset = OFFSET_CONTINUOUS;
+ char *name;
+ int name_len;
+ unsigned char *extra_mem;
+ char delim;
+ unsigned int mask_flags;
+
+ /* fetch the partition size */
+ if (*s == '-'){
+ if(has_remaining_part_flag == 0){
+ /* assign all remaining space to this partition */
+ size = SIZE_REMAINING;
+ has_remaining_part_flag = 1;
+ s++;
+ }
+ else{
+ printk(KERN_ERR ERRP "no fill-up partitions allowed after already having a fill-up partition\n");
+ return NULL;
+ }
+ }
+
+ else{
+ size = tc3162_memparse(s, &s, blocksize);
+ if(size == 0)
+ size = SIZE_TO_GET;
+ if (size < PAGE_SIZE)
+ {
+ printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ return NULL;
+ }
+ }
+
+ /* fetch partition name and flags */
+ mask_flags = 0; /* this is going to be a regular partition */
+ delim = 0;
+
+ /* now look for name */
+ if (*s == '['){
+ delim = ']';
+ }
+
+ if (delim){
+ char *p;
+
+ name = ++s;
+ p = strchr(name, delim);
+ if (!p)
+ {
+ printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
+ return NULL;
+ }
+ name_len = p - name;
+ s = p + 1;
+ }
+ else{
+ name = NULL;
+ name_len = 13; /* Partition_000 */
+ }
+
+ /* record name length for memory allocation later */
+ extra_mem_size += name_len + 1;
+
+ /* offset type is append */
+ if (strncmp(s, "a", 1) == 0){
+ offset = OFFSET_CONTINUOUS;
+ s += 1;
+ }
+
+ /* offset type is back forward*/
+ if (strncmp(s, "end", 3) == 0){
+ offset = OFFSET_BACK_FORWARD;
+ s += 3;
+ }
+
+ /* test if more partitions are following */
+ if (*s == ',')
+ {
+ if(!strncmp(name, TCLINUX, name_len)){
+ this_part += 2;
+ extra_mem_size +=strlen(KERNEL_PART)+strlen(ROOTFS_PART)+2;
+ }
+ else if(!strncmp(name, TCLINUX_SLAVE, name_len)){
+ this_part += 2;
+ extra_mem_size +=strlen(KERNEL_SLAVE_PART)+strlen(ROOTFS_SLAVE_PART)+2;
+ }
+
+ /* more partitions follow, parse them */
+ parts = tc3162_newpart(s + 1, &s, num_parts, this_part + 1,
+ &extra_mem, extra_mem_size, blocksize);
+ if (!parts)
+ return NULL;
+ }
+ else
+ { /* this is the last partition: allocate space for all */
+ int alloc_size;
+
+ if(!strncmp(name, TCLINUX, name_len)){
+ this_part += 2;
+ extra_mem_size +=strlen(KERNEL_PART)+strlen(ROOTFS_PART)+2;
+ }
+ else if(!strncmp(name, TCLINUX_SLAVE, name_len)){
+ this_part += 2;
+ extra_mem_size +=strlen(KERNEL_SLAVE_PART)+strlen(ROOTFS_SLAVE_PART)+2;
+ }
+
+ *num_parts = this_part + 2; /*add reservearea partition*/
+ extra_mem_size += strlen(RESERVEAREA)+1;
+ alloc_size = *num_parts * sizeof(struct mtd_partition) +
+ extra_mem_size;
+ parts = kzalloc(alloc_size, GFP_KERNEL);
+ if (!parts){
+ printk(KERN_ERR ERRP "out of memory\n");
+ return NULL;
+ }
+ extra_mem = (unsigned char *)(parts + *num_parts);
+ }
+ /* enter this partition (offset will be calculated later if it is zero at this point) */
+ parts[this_part].size = size;
+ parts[this_part].offset = offset;
+ parts[this_part].mask_flags = mask_flags;
+ if (name){
+ strlcpy(extra_mem, name, name_len + 1);
+ }
+ else{
+ sprintf(extra_mem, "Partition_%03d", this_part);
+ }
+ parts[this_part].name = extra_mem;
+ extra_mem += name_len + 1;
+ if(!strcmp(parts[this_part].name, TCLINUX)){
+ tc3162_newpart_set_other_parts(&parts[this_part-2],TCLINUX, this_part-2, blocksize);
+ }
+ else if(!strcmp(parts[this_part].name, TCLINUX_SLAVE)){
+ tc3162_newpart_set_other_parts(&parts[this_part-2],TCLINUX_SLAVE, this_part-2, blocksize);
+ }
+ if(this_part == (*num_parts -2)){
+ tc3162_newpart_set_other_parts(&parts[*num_parts -1], RESERVEAREA, *num_parts -1, blocksize);
+ }
+ printk("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",this_part, parts[this_part].name,
+ parts[this_part].offset, parts[this_part].size,parts[this_part].mask_flags);
+
+ /* return (updated) pointer to extra_mem memory */
+ if (extra_mem_ptr)
+ *extra_mem_ptr = extra_mem;
+
+ /* return (updated) pointer command line string */
+ *retptr = s;
+
+ /* return partition table */
+ return parts;
+}
+
+/******************************************************************************
+ Function:
+ Description: Parse the command line.
+ Input:
+ Return:
+******************************************************************************/
+static int tc3162_mtdpart_setup(char *s, unsigned int blocksize)
+{
+ char cmdline[STR_LEN];
+ unsigned char *extra_mem;
+ char *p;
+
+ if(s == NULL){
+ panic("tc3162_mtdpart_setup(), mtd partition cmdline is NULL\n");
+ }
+
+ if(strlen(s) + strlen(BOOTLOADER_PART_STR)+strlen(ROMFILE_PART_STR) > STR_LEN){
+ panic("tc3162_mtdpart_setup(), string length outof size\n");
+ }
+
+ tc3162_check_mtdpart_str(cmdline, s);
+
+ p = cmdline;
+ printk("\nparsing <%s>\n", p);
+
+ /*
+ * parse one mtd. have it reserve memory for the
+ * struct cmdline_mtd_partition and the mtd-id string.
+ */
+ ecnt_parts = tc3162_newpart(p, /* cmdline */
+ &s, /* out: updated cmdline ptr */
+ &num_parts, /* out: number of parts */
+ 0, /* first partition */
+ &extra_mem, /* out: extra mem */
+ 0, blocksize);
+ if(!ecnt_parts)
+ {
+ /*
+ * An error occurred. We're either:
+ * a) out of memory, or
+ * b) in the middle of the partition spec
+ * Either way, this mtd is hosed and we're
+ * unlikely to succeed in parsing any more
+ */
+ return 0;
+ }
+
+ return 1;
+}
+
+void tc3162_set_kernel_rootfs_part(void){
+ unsigned int addr;
+ unsigned int *header;
+ unsigned int *header_slave;
+ int i;
+ uint64_t tclinux_real_size = 0;
+ uint64_t tclinux_slave_real_size = 0;
+
+ if(kernel_part_index < 0)
+ return;
+
+ if(tclinux_part_offset != OFFSET_CONTINUOUS){
+ header = (unsigned int *)(flash_base + tclinux_part_offset);
+ }
+ else{
+ panic("tc3162_set_kernel_rootfs_part(), tclinux partition offset error\n");
+ }
+
+ if((tclinux_part_size !=0) && (tclinux_part_size != SIZE_REMAINING)){
+ header_slave = (unsigned int *)(flash_base + tclinux_part_offset + tclinux_part_size);
+ }
+ else{
+ panic("tc3162_set_kernel_rootfs_part(), tclinux partition size error\n");
+ }
+
+
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 20 * sizeof(unsigned int)));
+ ecnt_parts[kernel_part_index].size = KERNEL_PARTITION(addr);
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header + 21 * sizeof(unsigned int)));
+ tclinux_real_size = ecnt_parts[kernel_part_index].size + addr;
+ ecnt_parts[kernel_part_index+1].size = ROOTFS_PARTITION(addr);
+ ecnt_parts[kernel_part_index+1].offset = ecnt_parts[kernel_part_index].offset + ecnt_parts[kernel_part_index].size;
+
+ if(kernel_slave_part_index > 0){
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header_slave + 20 * sizeof(unsigned int)));
+ ecnt_parts[kernel_slave_part_index].size = KERNEL_PARTITION(addr);
+ addr = READ_FLASH_DWORD((unsigned long long)((unsigned int)header_slave + 21 * sizeof(unsigned int)));
+ tclinux_slave_real_size = ecnt_parts[kernel_slave_part_index].size + addr;
+ ecnt_parts[kernel_slave_part_index+1].size = ROOTFS_PARTITION(addr);
+ ecnt_parts[kernel_slave_part_index+1].offset = ecnt_parts[kernel_slave_part_index].offset + ecnt_parts[kernel_slave_part_index].size;
+ }
+ /*check whether tclinux/tclinux_slave partition enough*/
+ for(i=0; i <num_parts; i++){
+ if(!strcmp(ecnt_parts[i].name,TCLINUX)){
+ if(ecnt_parts[i].size < tclinux_real_size){
+ printk("tclinux partition size = %llx, real size = %llx\n", ecnt_parts[i].size, tclinux_real_size);
+ printk("tclinux partition size < its real size!!!\n");
+ }
+ }
+ else if(!strcmp(ecnt_parts[i].name,TCLINUX_SLAVE)){
+ if(ecnt_parts[i].size < tclinux_slave_real_size){
+ printk("tclinux_slave partition size = %llx, real size = %llx\n", ecnt_parts[i].size, tclinux_slave_real_size);
+ printk("tclinux_slave partition size < its real size!!!\n");
+ }
+ }
+ }
+}
+
+void tc3162_check_mtdpart_str(char *dst, char *src){
+ int have_bootloader_part = 0;
+ int have_romfile_part = 0;
+ char *bootloader_p = NULL;
+ char *bootloader_end_p = NULL;
+
+ if(bootloader_p = strstr(src, BOOTLOADER_PART)){
+ have_bootloader_part = 1;
+ }
+ if(strstr(src, ROMFILE_PART)){
+ have_romfile_part = 1;
+ }
+
+ if(have_romfile_part && have_bootloader_part){
+ strcpy(dst, src);
+ }
+ else if(!have_romfile_part && !have_bootloader_part){
+ strcpy(dst, BOOTLOADER_PART_STR);
+ strcat(dst, ROMFILE_PART_STR);
+ strcat(dst, src);
+ }
+ else if(have_romfile_part && !have_bootloader_part){
+ strcpy(dst, BOOTLOADER_PART_STR);
+ strcat(dst, src);
+ }
+ else if(!have_romfile_part && have_bootloader_part){
+ bootloader_end_p = strchr(bootloader_p, ',');
+ if(!bootloader_end_p)
+ panic("cmdline, bootloader partition error!\n");
+ memcpy(dst, src, bootloader_end_p - src+1);
+ strcat(dst, ROMFILE_PART_STR);
+ strcat(dst, bootloader_end_p+1);
+ }
+
+}
+
+uint64_t tc3162_get_bootloader_romfile_size(char *name, unsigned int blocksize){
+#ifdef TCSUPPORT_BOOTROM_LARGE_SIZE
+ uint64_t bootloader_size = 0x00020000;
+#else
+ uint64_t bootloader_size = 0x00010000;
+#endif
+ uint64_t romfile_size = 0x00010000;
+#ifdef TCSUPPORT_NAND_FLASH
+ if (IS_NANDFLASH) {
+ blocksize = 0x40000;
+ }
+#endif
+
+ if(!strcmp(name, BOOTLOADER_PART)){
+ if(bootloader_size < blocksize)
+ bootloader_size = blocksize;
+ return bootloader_size;
+ }
+ else if(!strcmp(name, ROMFILE_PART)){
+ if(romfile_size < blocksize)
+ romfile_size = blocksize;
+ return romfile_size;
+ }
+ return SIZE_TO_GET;
+
+}
+/******************************************************************************
+ Function: tc3162_parse_cmdline_partitions
+ Description: It's used to init tc3162_parts by cmdline partiotion string
+ Input:
+ Return:
+******************************************************************************/
+int tc3162_parse_cmdline_partitions(struct mtd_info *master){
+ unsigned long offset;
+ int i;
+ int size_remaining_index = -1;
+ int first_back_forward_index = -1;
+ unsigned long size_remaining_offset_start;
+ unsigned long back_forward_total_size = 0;
+ int is_tclinux_remaining_flag = 0;
+ unsigned int blocksize = master->erasesize;
+ unsigned int total_size = master->size;
+#ifdef TCSUPPORT_NAND_FLASH
+ if (IS_NANDFLASH) {
+ }
+#endif
+ /* parse command line */
+ if(tc3162_mtdpart_setup(TCSUPPORT_PARTITIONS_CMDLINE_STR, blocksize) == 0){
+ printk("tc3162_mtdpart_setup fail\n");
+ return 0;
+ }
+
+ for(i = 0, offset = 0; i < num_parts; i++)
+ {
+ if(ecnt_parts[i].offset == OFFSET_BACK_FORWARD){
+ first_back_forward_index = i;
+ break;
+ }
+ if(ecnt_parts[i].size == SIZE_TO_GET){
+ ecnt_parts[i].offset = offset;
+ if(!strcmp(ecnt_parts[i].name,BOOTLOADER_PART) || !strcmp(ecnt_parts[i].name, ROMFILE_PART))
+ ecnt_parts[i].size = tc3162_get_bootloader_romfile_size(ecnt_parts[i].name, blocksize);
+ else
+ continue;
+ }
+
+ ecnt_parts[i].offset = offset;
+
+ if(ecnt_parts[i].size == SIZE_REMAINING){
+ size_remaining_index = i;
+ size_remaining_offset_start = ecnt_parts[i].offset;
+ if((i+1) == num_parts){
+ ecnt_parts[i].size = total_size - offset;
+ }
+ else{
+ if(!strcmp(ecnt_parts[i].name, TCLINUX)){
+ is_tclinux_remaining_flag = 1;
+ }
+ first_back_forward_index = i+1;
+ break;
+ }
+ }
+ if (offset + ecnt_parts[i].size > total_size)
+ {
+ panic(KERN_WARNING ERRP" part %d: partitioning exceeds flash size, truncating\n", i);
+ }
+ offset +=ecnt_parts[i].size;
+ /*offset align*/
+ offset = ALIGN(offset, master->erasesize);
+ if(!strcmp(ecnt_parts[i].name, TCLINUX)){
+ tclinux_part_offset = ecnt_parts[i].offset;
+ tclinux_part_size = ecnt_parts[i].size;
+ }
+ }
+ /*offset type is back forward*/
+ for(i = (num_parts -1), offset = total_size; i >=first_back_forward_index; i--){
+ ecnt_parts[i].size = ALIGN(ecnt_parts[i].size, master->erasesize);
+ back_forward_total_size += ecnt_parts[i].size;
+ ecnt_parts[i].offset = offset-ecnt_parts[i].size;
+ offset -= ecnt_parts[i].size;
+
+ if (offset + back_forward_total_size > total_size)
+ {
+ panic(KERN_WARNING ERRP"partitioning exceeds flash size, partition index = %d\n",i);
+ }
+ }
+ /*calculate remaining size*/
+ if(size_remaining_index != -1){
+ ecnt_parts[size_remaining_index].size = offset - size_remaining_offset_start;
+ if(ecnt_parts[size_remaining_index].size < 0)
+ panic(KERN_WARNING ERRP"partition size < 0, index = %d \n", size_remaining_index);
+ if(is_tclinux_remaining_flag){
+ tclinux_part_offset = ecnt_parts[size_remaining_index].offset;
+ tclinux_part_size = ecnt_parts[size_remaining_index].size;
+ }
+ }
+
+ tc3162_set_kernel_rootfs_part();
+
+ return num_parts;
+}
+
+#endif
+#ifndef TCSUPPORT_IS_FH_PON
+static int __init tc3162_mtd_init(void)
+{
+ int ret = 0;
+
+ struct mtd_partition *mtd_parts = 0;
+ int mtd_parts_nb = 0;
+
+ if(ret = tc3162_map_init()){
+ printk("tc3162_map_init() fail\n");
+ return ret;
+ }
+ if(ret = tc3162_mtd_info_init()){
+ printk("tc3162_mtd_info_init() fail\n");
+ return ret;
+ }
+#ifdef TCSUPPORT_MTD_PARTITIONS_CMDLINE
+ if((mtd_parts_nb = tc3162_parse_cmdline_partitions(tc3162_mtd_info))<= 0){
+ printk("tc3162_parse_cmdline_partitions() fail\n");
+ return -1;
+ }
+ add_mtd_partitions(tc3162_mtd_info, ecnt_parts, mtd_parts_nb);
+#else
+ if(ret = tc3162_add_partitions()){
+ printk("tc3162_add_partitions() fail\n");
+ return ret;
+ }
+#endif
+
+#ifdef TCSUPPORT_NAND_FLASH
+ if (IS_SPIFLASH) {
+ ra_nand_init();
+ }
+#endif
+ tc3162_put_rootfs();
+
+ return 0;
+}
+#else
+static int __init tc3162_mtd_init(void)
+{
+ struct mtd_info *mtd;
+ unsigned int *header;
+ unsigned int addr;
+
+
+
+ #if defined(CONFIG_DUAL_IMAGE) || defined(TCSUPPORT_MTD_ENCHANCEMENT) || defined(TCSUPPORT_MULTI_BOOT) || defined(TCSUPPORT_NAND_BADBLOCK_CHECK)
+ int i = 0;
+ #endif
+ #ifdef CONFIG_DUAL_IMAGE
+ char *bufaddr = (char*)FLAG_ADDR;
+ const char flagvalue = 1;//not change!!because we use this flag to judge which image
+ char tmp[8] = {0};
+ #endif
+
+
+ /*add address mapping on 7510. Pork*/
+ if(isMT751020 || isMT7505 || isEN751221 || isEN751627){
+ uint32 tmpVal;
+ tmpVal = regRead32(0xbfb00038);
+ tmpVal &= 0xffe0e0e0;
+ tmpVal |= 0x80070f00;
+ regWrite32(0xbfb00038,tmpVal);
+ //VPint(0xbfb00038) |= 0x80070F00;
+ #ifdef TCSUPPORT_INIC_CLIENT
+ ram_base = 0x800000 * (1 << (((VPint(0xbfb0008c) >> 13) & 0x7) - 1))- INIC_CLIENT_RAM_SIMU_MAX_SIZE;
+ printk("tc3162: iNIC simulated flash device 0x%08x at 0x%08x\n", INIC_CLIENT_RAM_SIMU_MAX_SIZE, ram_base);
+ tc3162_map.virt = ioremap_nocache(ram_base, INIC_CLIENT_RAM_SIMU_MAX_SIZE);
+ tc3162_map.phys = ram_base;
+ tc3162_map.size = INIC_CLIENT_RAM_SIMU_MAX_SIZE;
+ #else
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", 0x1000000, 0x1c000000);
+ tc3162_map.virt = ioremap_nocache(0x1c000000, 0x1000000);
+ tc3162_map.phys = 0x1c000000;
+ tc3162_map.size = 0x1000000;
+ #endif
+ ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ }
+ /*add 8M 16M flash support. shnwind*/
+ else if (isTC3162U || isTC3182 || isRT65168 || isRT63165 || isRT63365 || isRT63260){
+// header = (unsigned int *)0xb0020000;
+ /*enable addr bigger than 4M support.*/
+ VPint(0xbfb00038) |= 0x80000000;
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", 0x1000000, 0x10000000);
+ tc3162_map.virt = ioremap_nocache(0x10000000, 0x1000000);
+ tc3162_map.phys = 0x10000000;
+ tc3162_map.size = 0x1000000;
+ ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ }else{
+
+// header = (unsigned int *)0xbfc20000;
+ printk("tc3162: flash device 0x%08x at 0x%08x\n", WINDOW_SIZE, WINDOW_ADDR);
+ tc3162_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+
+ }
+ if (!tc3162_map.virt) {
+ printk("tc3162: Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&tc3162_map);
+
+ #ifdef TCSUPPORT_INIC_CLIENT
+ printk("MT75XX: INIC mode\n");
+ tc3162_mtd_info = do_map_probe("map_ram", &tc3162_map);
+ #else
+ /* check if boot from SPI flash */
+ if (IS_NANDFLASH) {
+ tc3162_mtd_info = do_map_probe("nandflash_probe", &tc3162_map);
+ } else if (IS_SPIFLASH) {
+ tc3162_mtd_info = do_map_probe("spiflash_probe", &tc3162_map);
+ } else {
+ tc3162_mtd_info = do_map_probe("cfi_probe", &tc3162_map);
+ }
+ #endif
+
+ if (!tc3162_mtd_info) {
+ #ifdef TCSUPPORT_INIC_CLIENT
+ printk("iNIC flash fail\n");
+ #endif
+ iounmap(tc3162_map.virt);
+ return -ENXIO;
+ }
+
+ tc3162_mtd_info->owner = THIS_MODULE;
+
+ add_mtd_partitions(tc3162_mtd_info, tc3162_parts, tc3162_parts_size);
+
+ #if 1
+ #ifdef TCSUPPORT_NAND_FLASH
+ if (IS_SPIFLASH) {
+ ra_nand_init();
+ }
+ #endif
+ #endif
+ #ifdef CONFIG_DUAL_IMAGE
+ //read the flag from last block to tell kernel use which image(main or slave)
+ #if 0
+ memcpy(tmp,(char*)(0xb0000000+tc3162_mtd_info->size -sizeof(char)),sizeof(char));
+ #endif
+ memcpy(tmp,(char*)bufaddr,sizeof(char));
+ if(flagvalue == tmp[0])
+ {
+ printk("\r\n RootfsB");
+ mtd = get_mtd_named("RootfsB");
+ }
+ else
+ {
+ printk("\r\ RootfsA");
+ #endif
+ mtd = get_mtd_named("RootfsA");
+ #ifdef CONFIG_DUAL_IMAGE
+ }
+ #endif
+ if (mtd) {
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ put_mtd_device(mtd);
+ }
+
+
+ return 0;
+}
+#endif
+static void __exit tc3162_mtd_cleanup(void)
+{
+ if (tc3162_mtd_info) {
+ del_mtd_partitions(tc3162_mtd_info);
+ map_destroy(tc3162_mtd_info);
+ }
+
+#if 1
+ #ifdef TCSUPPORT_NAND_FLASH
+ if (IS_SPIFLASH) {
+ ra_nand_remove();
+ }
+ #endif
+#endif
+ if (tc3162_map.virt) {
+ iounmap(tc3162_map.virt);
+ tc3162_map.virt = 0;
+ }
+}
+
+module_init(tc3162_mtd_init);
+module_exit(tc3162_mtd_cleanup);
+
Index: linux-3.18.21/drivers/mtd/mtk/Kconfig
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/Kconfig 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,11 @@
+menu "MTK Nand Flash Interface Device Drivers"
+ depends on MTD!=n
+
+config MTK_MTD_NAND
+ tristate "NAND Flash Support"
+ depends on MIPS_RT63365
+ select MTD_PARTITIONS
+ help
+ support MT7510 Nand flash controller.
+
+endmenu
Index: linux-3.18.21/drivers/mtd/mtk/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/Makefile 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,5 @@
+obj-$(CONFIG_MTK_MTD_NAND) += nand.o
+ifeq ($(TCSUPPORT_NAND_BMT),)
+obj-$(CONFIG_MTK_MTD_NAND) += bmt.o
+endif
+#obj-$(CONFIG_MTK_MTD_NAND) += partition_mt.o
Index: linux-3.18.21/drivers/mtd/mtk/bmt.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/bmt.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,1036 @@
+#include "bmt.h"
+
+#ifdef NAND_BMT
+typedef struct {
+ char signature[3];
+ u8 version;
+ u8 bad_count; // bad block count in pool
+ u8 mapped_count; // mapped block count in pool
+ u8 checksum;
+ u8 reseverd[13];
+} phys_bmt_header;
+
+typedef struct {
+ phys_bmt_header header;
+ bmt_entry table[MAX_BMT_SIZE];
+} phys_bmt_struct;
+
+typedef struct {
+ char signature[3];
+} bmt_oob_data;
+
+static char MAIN_SIGNATURE[] = "BMT";
+static char OOB_SIGNATURE[] = "bmt";
+#define SIGNATURE_SIZE (3)
+
+#define MAX_DAT_SIZE 0x1000
+#define MAX_OOB_SIZE 0x80
+
+#if defined(__PRELOADER_NAND__)
+
+static struct nand_chip *nand_chip_bmt;
+#define BLOCK_SIZE_BMT (nand_chip_bmt->erasesize)
+#define PAGE_SIZE_BMT (nand_chip_bmt->page_size)
+
+#elif defined(__UBOOT_NAND__)
+
+static struct nand_chip *nand_chip_bmt;
+
+#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
+#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
+
+
+#elif defined(__KERNEL_NAND__)
+
+static struct mtd_info *mtd_bmt;
+static struct nand_chip *nand_chip_bmt;
+#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
+#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
+
+#endif
+
+
+#define OFFSET(block) ((block) * BLOCK_SIZE_BMT) //((block) << (mtd->erasesize_shift) + (page) << (mtd->writesize_shift))
+#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
+
+/*********************************************************************
+* Flash is splited into 2 parts, system part is for normal system *
+* system usage, size is system_block_count, another is replace pool *
+* +-------------------------------------------------+ *
+* | system_block_count | bmt_block_count | *
+* +-------------------------------------------------+ *
+*********************************************************************/
+static u32 total_block_count; // block number in flash
+static u32 system_block_count;
+static int bmt_block_count; // bmt table size
+// static int bmt_count; // block used in bmt
+static int page_per_block; // page per count
+static int oob_bad_index_offset = OOB_16B_INDEX_OFFSET; // bad index offset in oob
+
+
+static u32 bmt_block_index; // bmt block index
+static bmt_struct bmt; // dynamic created global bmt table
+
+static u8 dat_buf[MAX_DAT_SIZE];
+static u8 oob_buf[MAX_OOB_SIZE];
+static bool pool_erased;
+static u32 reserve_block;
+
+#define MAX_BROM_RESERVE 20
+static u8 brom_reserve_mapping[MAX_BROM_RESERVE];
+
+/***************************************************************
+*
+* Interface adaptor for preloader/uboot/kernel
+* These interfaces operate on physical address, read/write
+* physical data.
+*
+***************************************************************/
+#if defined(__PRELOADER_NAND__)
+int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+// int offset, start, len, i;
+ return mt6573_nand_read_page_hw(page, dat, oob);
+ //return false;
+/*
+ offset = 0;
+
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && nand_oob->oobfree[i].length; i++)
+ {
+ start = nand_oob->oobfree[i].offset;
+ len = nand_oob->oobfree[i].length;
+ memcpy(buf + PAGE_SIZE_BMT + offset, g_nand_spare + start, len);;
+ offset += len;
+ }
+
+ return true;
+*/
+}
+
+bool nand_block_bad_bmt(u32 offset)
+{
+ return nand_block_bad_hw(offset);
+}
+
+bool nand_erase_bmt(u32 offset)
+{
+ return mt6573_nand_erase_hw(offset);
+}
+
+int mark_block_bad_bmt(u32 offset)
+{
+ return mark_block_bad_hw(offset);
+}
+
+bool nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+ return mt6573_nand_write_page_hw(page, dat, oob);
+}
+
+#elif defined(__UBOOT_NAND__)
+int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+ return mt6573_nand_exec_read_page_hw(nand_chip_bmt, page, PAGE_SIZE_BMT, dat, oob);
+ // return mt6573_nand_read_page_hw(page, dat, oob);
+}
+
+bool nand_block_bad_bmt(u32 offset)
+{
+ return nand_block_bad_hw(nand_chip_bmt, offset);
+}
+
+// actually uboot should never use the following 3 functions
+bool nand_erase_bmt(u32 offset)
+{
+ return true; // mt6573_nand_erase_hw(offset);
+}
+
+int mark_block_bad_bmt(u32 offset)
+{
+ return; //mark_block_bad_hw(offset);
+}
+
+bool nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+ return true; // mt6573_nand_write_page_hw(page, dat, oob);
+}
+
+#elif defined(__KERNEL_NAND__)
+
+int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+ return mt6573_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
+}
+
+bool nand_block_bad_bmt(u32 offset)
+{
+ return mt6573_nand_block_bad_hw(mtd_bmt, offset);
+}
+
+bool nand_erase_bmt(u32 offset)
+{
+ int status;
+ if (offset < 0x20000)
+ {
+ MSG(INIT, "erase offset: 0x%x\n", offset);
+ }
+
+ status = mt6573_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
+ if (status & NAND_STATUS_FAIL)
+ return false;
+ else
+ return true;
+}
+
+int mark_block_bad_bmt(u32 offset)
+{
+ return mt6573_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
+}
+
+bool nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
+{
+ if (mt6573_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
+ return false;
+ else
+ return true;
+}
+
+#endif
+
+
+
+/***************************************************************
+* *
+* static internal function *
+* *
+***************************************************************/
+static void dump_bmt_info(bmt_struct *bmt)
+{
+ int i;
+
+ MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
+ for (i = 0; i < bmt->mapped_count; i++)
+ {
+ MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
+ }
+}
+
+static bool match_bmt_signature(u8 *dat, u8 *oob)
+{
+ // int i;
+ // char *iter = OOB_SIGNATURE;
+ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
+ {
+ return false;
+ }
+
+ if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
+ {
+ MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
+ }
+ return true;
+}
+
+static u8 cal_bmt_checksum(phys_bmt_struct *phys_table, int bmt_size)
+{
+ int i;
+ u8 checksum = 0;
+ u8 *dat = (u8 *)phys_table;
+
+ checksum += phys_table->header.version;
+ // checksum += phys_table.header.bad_count;
+ checksum += phys_table->header.mapped_count;
+
+ dat += sizeof(phys_bmt_header);
+ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
+ {
+ checksum += dat[i];
+ }
+
+ return checksum;
+}
+
+// return -1 for unmapped block, and bad block index if mapped.
+static int is_block_mapped(int index)
+{
+ int i;
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (index == bmt.table[i].mapped_index)
+ return i;
+ }
+ return -1;
+}
+
+static bool is_page_used(u8 *dat, u8 *oob)
+{
+ //return ( (oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF) );
+ return ( (oob[oob_bad_index_offset] != 0xFF) || (oob[oob_bad_index_offset + 1] != 0xFF) );
+}
+
+static bool valid_bmt_data(phys_bmt_struct *phys_table)
+{
+ int i;
+ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
+
+ // checksum correct?
+ if ( phys_table->header.checksum != checksum)
+ {
+ MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
+ return false;
+ }
+
+ MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
+
+ // block index correct?
+ for (i = 0; i < phys_table->header.mapped_count; i++)
+ {
+ if (phys_table->table[i].bad_index >= total_block_count ||
+ phys_table->table[i].mapped_index >= total_block_count ||
+ phys_table->table[i].mapped_index < system_block_count)
+ {
+ MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n",
+ phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
+ return false;
+ }
+ }
+
+ // pass check, valid bmt.
+ MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
+ return true;
+}
+
+
+static void fill_nand_bmt_buffer(bmt_struct *bmt, u8 *dat, u8 *oob)
+{
+ phys_bmt_struct phys_bmt;
+
+ dump_bmt_info(bmt);
+
+ // fill phys_bmt_struct structure with bmt_struct
+ memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
+
+ memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
+ phys_bmt.header.version = BMT_VERSION;
+ // phys_bmt.header.bad_count = bmt->bad_count;
+ phys_bmt.header.mapped_count = bmt->mapped_count;
+ memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
+
+ phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
+
+ memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
+ memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
+}
+
+// return valid index if found BMT, else return 0
+static int load_bmt_data(int start, int pool_size)
+{
+ int bmt_index = start + pool_size - 1; // find from the end
+ phys_bmt_struct phys_table;
+ int i;
+
+ MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
+
+ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
+ {
+ if (nand_block_bad_bmt(OFFSET(bmt_index)))
+ {
+ MSG(INIT, "Skip bad block: %d\n", bmt_index);
+ continue;
+ }
+
+ if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
+ {
+ MSG(INIT, "Error found when read block %d\n", bmt_index);
+ continue;
+ }
+
+ if (!match_bmt_signature(dat_buf, oob_buf))
+ {
+ continue;
+ }
+
+ MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
+
+ memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
+
+ if (!valid_bmt_data(&phys_table))
+ {
+ MSG(INIT, "BMT data is not correct %d\n", bmt_index);
+ continue;
+ }
+ else
+ {
+ bmt.mapped_count = phys_table.header.mapped_count;
+ bmt.version = phys_table.header.version;
+ // bmt.bad_count = phys_table.header.bad_count;
+ memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
+
+ MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
+
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
+ {
+ MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
+ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
+ }
+ }
+
+ return bmt_index;
+ }
+ }
+
+ MSG(INIT, "bmt block not found!\n");
+ return 0;
+}
+
+
+/*************************************************************************
+* Find an available block and erase. *
+* start_from_end: if true, find available block from end of flash. *
+* else, find from the beginning of the pool *
+* need_erase: if true, all unmapped blocks in the pool will be erased *
+*************************************************************************/
+static int find_available_block(bool start_from_end)
+{
+ int i; // , j;
+ int block = system_block_count;
+ int direction;
+ // int avail_index = 0;
+ MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
+
+ // erase all un-mapped blocks in pool when finding avaliable block
+ if (!pool_erased)
+ {
+ MSG(INIT, "Erase all un-mapped blocks in pool\n");
+ for (i = 0; i < bmt_block_count; i++)
+ {
+ if (block == bmt_block_index)
+ {
+ MSG(INIT, "Skip bmt block 0x%x\n", block);
+ continue;
+ }
+
+ if (nand_block_bad_bmt(OFFSET(block + i)))
+ {
+ MSG(INIT, "Skip bad block 0x%x\n", block + i);
+ continue;
+ }
+
+ if (is_block_mapped(block + i) >= 0)
+ {
+ MSG(INIT, "Skip mapped block 0x%x\n", block + i);
+ continue;
+ }
+
+ if (!nand_erase_bmt(OFFSET(block + i)))
+ {
+ MSG(INIT, "Erase block 0x%x failed\n", block + i);
+ mark_block_bad_bmt(OFFSET(block + i));
+ }
+ }
+
+ pool_erased = 1;
+ }
+
+ if (start_from_end)
+ {
+ block = total_block_count - 1;
+ direction = -1;
+ }
+ else
+ {
+ block = system_block_count;
+ direction = 1;
+ }
+
+ for (i = 0; i < bmt_block_count; i++, block += direction)
+ {
+ if (block == bmt_block_index)
+ {
+ MSG(INIT, "Skip bmt block 0x%x\n", block);
+ continue;
+ }
+
+ if (nand_block_bad_bmt(OFFSET(block)))
+ {
+ MSG(INIT, "Skip bad block 0x%x\n", block);
+ continue;
+ }
+
+ if (is_block_mapped(block) >= 0)
+ {
+ MSG(INIT, "Skip mapped block 0x%x\n", block);
+ continue;
+ }
+
+ MSG(INIT, "Find block 0x%x available\n", block);
+ return block;
+ }
+
+ return 0;
+}
+
+
+static unsigned short get_bad_index_from_oob(u8 *oob_buf)
+{
+ unsigned short index;
+
+ //memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
+ memcpy(&index, oob_buf + oob_bad_index_offset, OOB_INDEX_SIZE);
+
+ return index;
+}
+
+void set_bad_index_to_oob(u8 *oob, u16 index)
+{
+ //memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
+ memcpy(oob + oob_bad_index_offset, &index, sizeof(index));
+}
+
+static int find_available_block_reserve(int block)
+{
+ int i;
+
+ for (i = block + 1; i < reserve_block; i++)
+ {
+ if (!nand_block_bad_bmt(OFFSET(i)))
+ {
+ if (nand_erase_bmt(OFFSET(i)))
+ {
+ MSG(INIT, "return 0x%x\n", i);
+ return i;
+ }
+ else
+ mark_block_bad_bmt(i);
+ }
+ }
+
+ return 0;
+}
+
+static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob)
+{
+ int page;
+ int error_block = offset / BLOCK_SIZE_BMT;
+ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
+ int to_index;
+
+ memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
+
+ if (error_block < reserve_block)
+ {
+ to_index = find_available_block_reserve(error_block);
+ }
+ else
+ {
+ to_index = find_available_block(false);
+ }
+
+ if (!to_index)
+ {
+ MSG(INIT, "Cannot find an available block for BMT\n");
+ return 0;
+ }
+
+ { // migrate error page first
+ MSG(INIT, "Write error page: 0x%x\n", error_page);
+ if (!write_dat)
+ {
+ nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
+ write_dat = dat_buf;
+ }
+
+ // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
+
+ if (error_block < system_block_count)
+ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
+
+ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
+ {
+ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
+ mark_block_bad_bmt(to_index);
+ return migrate_from_bad(offset, write_dat, write_oob);
+ }
+ }
+
+ for (page = 0; page < page_per_block; page++)
+ {
+ if (page != error_page)
+ {
+ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
+ if (is_page_used(dat_buf, oob_buf))
+ {
+ if (error_block < system_block_count)
+ {
+ set_bad_index_to_oob(oob_buf, error_block);
+ }
+ MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n",
+ PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
+ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
+ {
+ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
+ mark_block_bad_bmt(to_index);
+ return migrate_from_bad(offset, write_dat, write_oob);
+ }
+ }
+ }
+ }
+
+ MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
+
+ return to_index;
+}
+
+static bool map_reserve_region(int blocks)
+{
+ int i;
+ int bad_found = 0;
+
+ memset(brom_reserve_mapping, 0, sizeof(brom_reserve_mapping));
+
+ for (i = 0; i < reserve_block; i++)
+ {
+ if (nand_block_bad_bmt(OFFSET(i)))
+ {
+ MSG(INIT, "bad block 0x%x\n", i);
+ brom_reserve_mapping[reserve_block - 1 - bad_found] = i;
+ bad_found++;
+ }
+ else
+ {
+ brom_reserve_mapping[i - bad_found] = i;
+ }
+ }
+
+ for (i = 0; i < reserve_block; i++)
+ MSG(INIT, "reserve[%d]: 0x%x\n", i, brom_reserve_mapping[i]);
+
+ if (bad_found + 3 > reserve_block)
+ return false;
+ else
+ return true;
+}
+
+static bool write_bmt_to_flash(u8 *dat, u8 *oob)
+{
+ bool need_erase = true;
+ MSG(INIT, "Try to write BMT\n");
+
+ if (bmt_block_index == 0)
+ {
+ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
+ need_erase = false;
+ if ( !(bmt_block_index = find_available_block(true)) )
+ {
+ MSG(INIT, "Cannot find an available block for BMT\n");
+ return false;
+ }
+ }
+
+ MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
+
+ // write bmt to flash
+ if (need_erase)
+ {
+ if (!nand_erase_bmt(OFFSET(bmt_block_index)))
+ {
+ MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
+ mark_block_bad_bmt(OFFSET(bmt_block_index));
+ // bmt.bad_count++;
+
+ bmt_block_index = 0;
+ return write_bmt_to_flash(dat, oob); // recursive call
+ }
+ }
+
+ if ( !nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) )
+ {
+ MSG(INIT, "Write BMT data fail, need to write again\n");
+ mark_block_bad_bmt(OFFSET(bmt_block_index));
+ // bmt.bad_count++;
+
+ bmt_block_index = 0;
+ return write_bmt_to_flash(dat, oob); // recursive call
+ }
+
+ MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
+ return true;
+}
+
+/*******************************************************************
+* Reconstruct bmt, called when found bmt info doesn't match bad
+* block info in flash.
+*
+* Return NULL for failure
+*******************************************************************/
+bmt_struct *reconstruct_bmt(bmt_struct * bmt)
+{
+ int i;
+ int index = system_block_count;
+ unsigned short bad_index;
+ int mapped;
+
+ // init everything in BMT struct
+ bmt->version = BMT_VERSION;
+ bmt->bad_count = 0;
+ bmt->mapped_count = 0;
+
+ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
+
+ for (i = 0; i < bmt_block_count; i++, index++)
+ {
+ if (nand_block_bad_bmt(OFFSET(index)))
+ {
+ MSG(INIT, "Skip bad block: 0x%x\n", index);
+ // bmt->bad_count++;
+ continue;
+ }
+
+ MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
+ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
+ /* if (mt6573_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
+ {
+ MSG(INIT, "Error when read block %d\n", bmt_block_index);
+ continue;
+ } */
+
+ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
+ {
+ MSG(INIT, "get bad index: 0x%x\n", bad_index);
+ if (bad_index != 0xFFFF)
+ MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
+ continue;
+ }
+
+ MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
+
+ if (!nand_block_bad_bmt(OFFSET(bad_index)))
+ {
+ MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
+ continue; // no need to erase here, it will be erased later when trying to write BMT
+ }
+
+
+ if ( (mapped = is_block_mapped(bad_index)) >= 0)
+ {
+ MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n",
+ bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
+ bmt->table[mapped].mapped_index = index; // use new one instead.
+ }
+ else
+ {
+ // add mapping to BMT
+ bmt->table[bmt->mapped_count].bad_index = bad_index;
+ bmt->table[bmt->mapped_count].mapped_index = index;
+ bmt->mapped_count++;
+ }
+
+ MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
+
+ }
+
+ MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
+ // dump_bmt_info(bmt);
+
+ // fill NAND BMT buffer
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
+
+ // write BMT back
+ if (!write_bmt_to_flash(dat_buf, oob_buf))
+ {
+ MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
+ }
+
+ return bmt;
+}
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Set reserved region for bmt, bmt will not use map machanism.
+*
+* Parameter:
+* start: if true, means need to reserve blocks from beginning
+* else, reserve blocks from end.
+* size: reserve block size.
+*
+* Return:
+*
+*******************************************************************/
+void set_bmt_reserve_region(set_res_flag_t flag, int size)
+{
+ int blocks = size / BLOCK_SIZE_BMT;
+ if (flag == SET_RESERVE_ADD)
+ {
+ reserve_block += blocks;
+ }
+ else if (flag == SET_RESERVE_SET)
+ {
+ reserve_block = blocks;
+ }
+ else if (flag == SET_RESERVE_SUB)
+ {
+ reserve_block -= blocks;
+ }
+ MSG(INIT, "valid range: 0x%x~0x%x\n", reserve_block, system_block_count);
+
+ map_reserve_region(reserve_block);
+}
+#endif
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Init bmt from nand. Reconstruct if not found or data error
+*
+* Parameter:
+* size: size of bmt and replace pool
+*
+* Return:
+* NULL for failure, and a bmt struct for success
+*******************************************************************/
+bmt_struct *init_bmt(struct nand_chip *chip, int size)
+{
+#ifdef NAND_BMT
+
+#if defined(__KERNEL_NAND__)
+ struct mt6573_nand_host *host;
+#endif
+
+ reserve_block = 0;
+
+ if (size > 0 && size < MAX_BMT_SIZE)
+ {
+ MSG(INIT, "Init bmt table, size: %d\n", size);
+ bmt_block_count = size;
+ }
+ else
+ {
+ MSG(INIT, "Invalid bmt table size: %d\n", size);
+ return NULL;
+ }
+
+#if defined(__PRELOADER_NAND__)
+
+ nand_chip_bmt = chip;
+ system_block_count = chip->chipsize / chip->erasesize;
+ total_block_count = bmt_block_count + system_block_count;
+ page_per_block = chip->erasesize / chip->page_size;
+
+#elif defined(__UBOOT_NAND__)
+
+ nand_chip_bmt = chip;
+ system_block_count = chip->chipsize >> chip->phys_erase_shift;
+ total_block_count = bmt_block_count + system_block_count;
+ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
+
+#elif defined(__KERNEL_NAND__)
+
+ nand_chip_bmt = chip;
+ system_block_count = chip->chipsize >> chip->phys_erase_shift;
+ total_block_count = bmt_block_count + system_block_count;
+ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
+ host = (struct mt6573_nand_host *)chip->priv;
+ mtd_bmt = &host->mtd;
+
+ if(PAGE_SIZE_BMT == 512){
+ oob_bad_index_offset = OOB_16B_INDEX_OFFSET;
+ }
+ else{
+ oob_bad_index_offset = OOB_INDEX_OFFSET;
+ }
+
+#endif
+
+ MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
+
+ // set this flag, and unmapped block in pool will be erased.
+ pool_erased = 0;
+
+ // alloc size for bmt.
+ memset(bmt.table, 0, size * sizeof(bmt_entry));
+
+ // load bmt if exist
+ if ((bmt_block_index = load_bmt_data(system_block_count, size)))
+ {
+ MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
+ dump_bmt_info(&bmt);
+ return &bmt;
+ }
+ else
+ {
+ MSG(INIT, "Load bmt data fail, need re-construct!\n");
+#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
+ if (reconstruct_bmt(&bmt))
+ return &bmt;
+ else
+#endif
+ return NULL;
+ }
+
+#else
+ return NULL;
+#endif
+
+}
+
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Update BMT.
+*
+* Parameter:
+* offset: update block/page offset.
+* reason: update reason, see update_reason_t for reason.
+* dat/oob: data and oob buffer for write fail.
+*
+* Return:
+* Return true for success, and false for failure.
+*******************************************************************/
+bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob)
+{
+#ifdef NAND_BMT
+
+ int map_index;
+ int orig_bad_block = -1;
+ // int bmt_update_index;
+ int i;
+ int bad_index = offset / BLOCK_SIZE_BMT;
+
+ if (bad_index < reserve_block)
+ {
+ MSG(INIT, "Update in reserve region\n");
+ if (reason == UPDATE_UNMAPPED_BLOCK)
+ {
+ return false;
+ }
+ else if (reason == UPDATE_WRITE_FAIL)
+ {
+ MSG(INIT, "Write preloader/DSP_BL fail, SD update??\n");
+ if (!(map_index = migrate_from_bad(offset, dat, oob)))
+ {
+ MSG(INIT, "migrate fail\n");
+ return false;
+ }
+ }
+ else if (reason == UPDATE_ERASE_FAIL)
+ {
+ MSG(INIT, "Erase preloader/DSP_BL fail, SD update??\n");
+ if (!(map_index = find_available_block_reserve(bad_index)))
+ return false;
+ }
+
+ mark_block_bad_bmt(offset);
+ return map_reserve_region(reserve_block);
+ }
+
+ if (reason == UPDATE_WRITE_FAIL)
+ {
+ MSG(INIT, "Write fail, need to migrate\n");
+ if ( !(map_index = migrate_from_bad(offset, dat, oob)) )
+ {
+ MSG(INIT, "migrate fail\n");
+ return false;
+ }
+ }
+ else
+ {
+ if ( !(map_index = find_available_block(false)) )
+ {
+ MSG(INIT, "Cannot find block in pool\n");
+ return false;
+ }
+ }
+
+ // now let's update BMT
+ if (bad_index >= system_block_count) // mapped block become bad, find original bad block
+ {
+ for (i = 0; i < bmt_block_count; i++)
+ {
+ if (bmt.table[i].mapped_index == bad_index)
+ {
+ orig_bad_block = bmt.table[i].bad_index;
+ break;
+ }
+ }
+ // bmt.bad_count++;
+ MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
+
+ bmt.table[i].mapped_index = map_index;
+ }
+ else
+ {
+ bmt.table[bmt.mapped_count].mapped_index = map_index;
+ bmt.table[bmt.mapped_count].bad_index = bad_index;
+ bmt.mapped_count++;
+ }
+
+ memset(oob_buf, 0xFF, sizeof(oob_buf));
+ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
+ if (!write_bmt_to_flash(dat_buf, oob_buf))
+ return false;
+
+ mark_block_bad_bmt(offset);
+#endif
+ return true;
+}
+
+/*******************************************************************
+* [BMT Interface]
+*
+* Description:
+* Given an block index, return mapped index if it's mapped, else
+* return given index.
+*
+* Parameter:
+* index: given an block index. This value cannot exceed
+* system_block_count.
+*
+* Return NULL for failure
+*******************************************************************/
+u16 get_mapping_block_index(int index)
+{
+ int i;
+
+#ifdef NAND_BMT
+ if (index < reserve_block)
+ {
+ MSG(INIT, "return %d for block in reserve region\n", brom_reserve_mapping[index]);
+ return brom_reserve_mapping[index];
+ }
+
+ if (index > system_block_count)
+ {
+ MSG(INIT, "Given index exceed: 0x%x > 0x%x\n", index, system_block_count);
+ return index;
+ }
+
+ for (i = 0; i < bmt.mapped_count; i++)
+ {
+ if (bmt.table[i].bad_index == index)
+ {
+ MSG(INIT, "Redirect 0x%x to 0x%x\n", index, bmt.table[i].mapped_index);
+ return bmt.table[i].mapped_index;
+ }
+ }
+#endif
+ return index;
+}
+
+#ifdef __KERNEL_NAND__
+EXPORT_SYMBOL(init_bmt);
+EXPORT_SYMBOL(update_bmt);
+//EXPORT_SYMBOL(reconstruct_bmt);
+EXPORT_SYMBOL(get_mapping_block_index);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fei Jiang @ MediaTek");
+MODULE_DESCRIPTION("Block mapping management for MediaTek NAND Flash Driver");
+#endif
Index: linux-3.18.21/drivers/mtd/mtk/bmt.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/bmt.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,110 @@
+#ifndef __BMT_H__
+#define __BMT_H__
+
+// #define __PRELOADER_NAND__
+// #define __UBOOT_NAND__
+#define __KERNEL_NAND__
+
+#if defined(__PRELOADER_NAND__)
+
+#include "nand.h"
+
+#elif defined(__UBOOT_NAND__)
+
+#include <linux/mtd/nand.h>
+#include <asm/arch/mt65xx_typedefs.h>
+#include <asm/arch/mt6573_nand.h>
+
+#elif defined(__KERNEL_NAND__)
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+//#include <mach/mt6573_nand.h>
+#include "mt6573_nand.h"
+
+#endif
+
+#define MAX_BMT_SIZE (0x80)
+#define BMT_VERSION (1) // initial version
+
+#define MAIN_SIGNATURE_OFFSET (0)
+#define OOB_SIGNATURE_OFFSET (1)
+#define OOB_INDEX_OFFSET (29)
+#define OOB_INDEX_SIZE (2)
+#define FAKE_INDEX (0xAAAA)
+#define OOB_16B_INDEX_OFFSET (3)
+
+typedef struct _bmt_entry_
+{
+ u16 bad_index; // bad block index
+ u16 mapped_index; // mapping block index in the replace pool
+} bmt_entry;
+
+typedef enum
+{
+ UPDATE_ERASE_FAIL,
+ UPDATE_WRITE_FAIL,
+ UPDATE_UNMAPPED_BLOCK,
+ UPDATE_REASON_COUNT,
+} update_reason_t;
+
+typedef enum
+{
+ SET_RESERVE_ADD,
+ SET_RESERVE_SET,
+ SET_RESERVE_SUB,
+
+ SET_RESERVE_COUNT,
+} set_res_flag_t;
+
+
+typedef struct {
+ bmt_entry table[MAX_BMT_SIZE];
+ u8 version;
+ u8 mapped_count; // mapped block count in pool
+ u8 bad_count; // bad block count in pool. Not used in V1
+}bmt_struct;
+
+
+/***************************************************************
+* *
+* Interface BMT need to use *
+* *
+***************************************************************/
+#if defined(__PRELOADER_NAND__)
+
+extern int mt6573_nand_read_page_hw(u32 page, u8 *dat, u8 *oob);
+extern bool nand_block_bad_hw(u32 offset);
+extern bool mt6573_nand_erase_hw(u32 offset);
+extern bool mark_block_bad_hw(u32 offset);
+extern int mt6573_nand_write_page_hw(u32 page, u8 *dat, u8 *oob);
+
+#elif defined(__UBOOT_NAND__)
+
+extern bool mt6573_nand_exec_read_page_hw(struct nand_chip *nand, u32 page, u32 page_size, u8 *dat, u8 *oob);
+extern bool nand_block_bad_hw(struct nand_chip *nand, u32 offset);
+
+#elif defined(__KERNEL_NAND__)
+
+extern bool mt6573_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 *dat, u8 *oob);
+extern int mt6573_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
+extern int mt6573_nand_erase_hw(struct mtd_info *mtd, int page);
+extern int mt6573_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
+extern int mt6573_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 *dat, u8 *oob);
+
+#endif
+
+
+
+/********************************************
+* *
+* Interface for preloader/uboot/kernel *
+* *
+********************************************/
+void set_bad_index_to_oob(u8 *oob, u16 index);
+bmt_struct *init_bmt(struct nand_chip* nand, int size);
+bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob);
+unsigned short get_mapping_block_index(int index);
+void set_bmt_reserve_region(set_res_flag_t flag, int size);
+
+#endif // #ifndef __BMT_H__
Index: linux-3.18.21/drivers/mtd/mtk/mt6573_nand.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/mt6573_nand.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,472 @@
+#ifndef __MT6573_NAND_H
+#define __MT6573_NAND_H
+
+#include <asm/tc3162/tc3182_int_source.h>
+
+#define NFI_BASE 0xBFBE0000
+#define NFIECC_BASE 0xBFBE1000
+/*******************************************************************************
+ * NFI Register Definition
+ *******************************************************************************/
+
+#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
+#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
+#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
+#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
+#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
+#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
+
+#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
+
+#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
+#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
+#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
+
+#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
+
+#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
+#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
+#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
+
+#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
+#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
+#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
+
+#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
+
+#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
+#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
+
+#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
+#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
+
+#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
+#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
+
+#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
+#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
+#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
+#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
+#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
+#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
+#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
+#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
+#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
+#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
+#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
+#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
+#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
+#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
+#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
+#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
+#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
+#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
+#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
+#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
+#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
+#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
+#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
+#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
+#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
+#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
+#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
+#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
+#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
+#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
+#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
+#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
+#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
+#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
+#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
+
+#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
+#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
+#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
+#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
+#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
+
+
+/*******************************************************************************
+ * NFI Register Field Definition
+ *******************************************************************************/
+
+/* NFI_CNFG */
+#define CNFG_AHB (0x0001)
+#define CNFG_READ_EN (0x0002)
+#define CNFG_DMA_BURST_EN (0x0004)
+#define CNFG_PIO_BIG_ENDIAN (0x0008)
+#define CNFG_BYTE_RW (0x0040)
+#define CNFG_HW_ECC_EN (0x0100)
+#define CNFG_AUTO_FMT_EN (0x0200)
+#define CNFG_OP_IDLE (0x0000)
+#define CNFG_OP_READ (0x1000)
+#define CNFG_OP_SRD (0x2000)
+#define CNFG_OP_PRGM (0x3000)
+#define CNFG_OP_ERASE (0x4000)
+#define CNFG_OP_RESET (0x5000)
+#define CNFG_OP_CUST (0x6000)
+#define CNFG_OP_MODE_MASK (0x7000)
+#define CNFG_OP_MODE_SHIFT (12)
+
+/* NFI_PAGEFMT */
+#define PAGEFMT_512 (0x0000)
+#define PAGEFMT_2K (0x0001)
+#define PAGEFMT_4K (0x0002)
+
+#define PAGEFMT_PAGE_MASK (0x0003)
+
+#define PAGEFMT_DBYTE_EN (0x0008)
+
+#define PAGEFMT_SPARE_16 (0x0000)
+#define PAGEFMT_SPARE_26 (0x0001)
+#define PAGEFMT_SPARE_27 (0x0002)
+#define PAGEFMT_SPARE_28 (0x0003)
+#define PAGEFMT_SPARE_MASK (0x0030)
+#define PAGEFMT_SPARE_SHIFT (4)
+
+#define PAGEFMT_FDM_MASK (0x0F00)
+#define PAGEFMT_FDM_SHIFT (8)
+
+#define PAGEFMT_FDM_ECC_MASK (0xF000)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+
+/* NFI_CON */
+#define CON_FIFO_FLUSH (0x0001)
+#define CON_NFI_RST (0x0002)
+#define CON_NFI_SRD (0x0010)
+
+#define CON_NFI_NOB_MASK (0x0060)
+#define CON_NFI_NOB_SHIFT (5)
+
+#define CON_NFI_BRD (0x0100)
+#define CON_NFI_BWR (0x0200)
+
+#define CON_NFI_SEC_MASK (0xF000)
+#define CON_NFI_SEC_SHIFT (12)
+
+/* NFI_ACCCON */
+#define ACCCON_SETTING ()
+
+/* NFI_INTR_EN */
+#define INTR_RD_DONE_EN (0x0001)
+#define INTR_WR_DONE_EN (0x0002)
+#define INTR_RST_DONE_EN (0x0004)
+#define INTR_ERASE_DONE_EN (0x0008)
+#define INTR_BSY_RTN_EN (0x0010)
+#define INTR_ACC_LOCK_EN (0x0020)
+#define INTR_AHB_DONE_EN (0x0040)
+#define INTR_ALL_INTR_DE (0x0000)
+#define INTR_ALL_INTR_EN (0x007F)
+
+/* NFI_INTR */
+#define INTR_RD_DONE (0x0001)
+#define INTR_WR_DONE (0x0002)
+#define INTR_RST_DONE (0x0004)
+#define INTR_ERASE_DONE (0x0008)
+#define INTR_BSY_RTN (0x0010)
+#define INTR_ACC_LOCK (0x0020)
+#define INTR_AHB_DONE (0x0040)
+
+/* NFI_ADDRNOB */
+#define ADDR_COL_NOB_MASK (0x0003)
+#define ADDR_COL_NOB_SHIFT (0)
+#define ADDR_ROW_NOB_MASK (0x0030)
+#define ADDR_ROW_NOB_SHIFT (4)
+
+/* NFI_STA */
+#define STA_READ_EMPTY (0x00001000)
+#define STA_ACC_LOCK (0x00000010)
+#define STA_CMD_STATE (0x00000001)
+#define STA_ADDR_STATE (0x00000002)
+#define STA_DATAR_STATE (0x00000004)
+#define STA_DATAW_STATE (0x00000008)
+
+#define STA_NAND_FSM_MASK (0x1F000000)
+#define STA_NAND_BUSY (0x00000100)
+#define STA_NAND_BUSY_RETURN (0x00000200)
+#define STA_NFI_FSM_MASK (0x000F0000)
+#define STA_NFI_OP_MASK (0x0000000F)
+
+/* NFI_FIFOSTA */
+#define FIFO_RD_EMPTY (0x0040)
+#define FIFO_RD_FULL (0x0080)
+#define FIFO_WR_FULL (0x8000)
+#define FIFO_WR_EMPTY (0x4000)
+#define FIFO_RD_REMAIN(x) (0x1F&(x))
+#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
+
+/* NFI_ADDRCNTR */
+#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
+#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
+
+/* NFI_LOCK */
+#define NFI_LOCK_ON (0x0001)
+
+/* NFI_LOCKANOB */
+#define PROG_RADD_NOB_MASK (0x7000)
+#define PROG_RADD_NOB_SHIFT (12)
+#define PROG_CADD_NOB_MASK (0x0300)
+#define PROG_CADD_NOB_SHIFT (8)
+#define ERASE_RADD_NOB_MASK (0x0070)
+#define ERASE_RADD_NOB_SHIFT (4)
+#define ERASE_CADD_NOB_MASK (0x0007)
+#define ERASE_CADD_NOB_SHIFT (0)
+
+/*******************************************************************************
+ * ECC Register Definition
+ *******************************************************************************/
+
+#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
+#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
+#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
+#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
+#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
+#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
+#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
+#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
+#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
+#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
+#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
+#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
+
+#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
+#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
+#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
+#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
+#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
+#define ECC_DECENUM0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
+#define ECC_DECENUM1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0118))
+
+#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x011C))
+#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
+#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
+#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
+#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
+#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
+#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0134))
+#define ECC_DECEL6_REG32 ((volatile P_U32)(NFIECC_BASE+0x0138))
+#define ECC_DECEL7_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
+#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0140))
+#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0144))
+#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148)) //**
+#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
+#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0150))
+#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0154))
+#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0158))
+
+
+/*******************************************************************************
+ * ECC register definition
+ *******************************************************************************/
+/* ECC_ENCON */
+#define ENC_EN (0x0001)
+#define ENC_DE (0x0000)
+
+/* ECC_ENCCNFG */
+#define ECC_CNFG_ECC4 (0x0000)
+#define ECC_CNFG_ECC6 (0x0001)
+#define ECC_CNFG_ECC8 (0x0002)
+#define ECC_CNFG_ECC10 (0x0003)
+#define ECC_CNFG_ECC12 (0x0004)
+#define ECC_CNFG_ECC_MASK (0x00000007)
+
+#define ENC_CNFG_NFI (0x0010)
+#define ENC_CNFG_MODE_MASK (0x0010)
+
+#define ENC_CNFG_META6 (0x10300000)
+#define ENC_CNFG_META8 (0x10400000)
+
+#define ENC_CNFG_MSG_MASK (0x1FFF0000)
+#define ENC_CNFG_MSG_SHIFT (0x10)
+
+/* ECC_ENCIDLE */
+#define ENC_IDLE (0x0001)
+
+/* ECC_ENCSTA */
+#define STA_FSM (0x001F)
+#define STA_COUNT_PS (0xFF10)
+#define STA_COUNT_MS (0x3FFF0000)
+
+/* ECC_ENCIRQEN */
+#define ENC_IRQEN (0x0001)
+
+/* ECC_ENCIRQSTA */
+#define ENC_IRQSTA (0x0001)
+
+/* ECC_DECCON */
+#define DEC_EN (0x0001)
+#define DEC_DE (0x0000)
+
+/* ECC_ENCCNFG */
+#define DEC_CNFG_ECC4 (0x0000)
+//#define DEC_CNFG_ECC6 (0x0001)
+//#define DEC_CNFG_ECC12 (0x0002)
+#define DEC_CNFG_NFI (0x0010)
+//#define DEC_CNFG_META6 (0x10300000)
+//#define DEC_CNFG_META8 (0x10400000)
+
+#define DEC_CNFG_BURST_EN (0x00100)
+#define DEC_CNFG_FER (0x01000)
+#define DEC_CNFG_EL (0x02000)
+#define DEC_CNFG_CORRECT (0x03000)
+#define DEC_CNFG_TYPE_MASK (0x03000)
+
+#define DEC_CNFG_EMPTY_EN (0x80000000)
+
+#define DEC_CNFG_CODE_MASK (0x1FFF0000)
+#define DEC_CNFG_CODE_SHIFT (0x10)
+
+/* ECC_DECIDLE */
+#define DEC_IDLE (0x0001)
+
+/* ECC_DECFER */
+#define DEC_FER0 (0x0001)
+#define DEC_FER1 (0x0002)
+#define DEC_FER2 (0x0004)
+#define DEC_FER3 (0x0008)
+#define DEC_FER4 (0x0010)
+#define DEC_FER5 (0x0020)
+#define DEC_FER6 (0x0040)
+#define DEC_FER7 (0x0080)
+
+/* ECC_DECENUM */
+#define ERR_NUM0 (0x0000000F)
+#define ERR_NUM1 (0x000000F0)
+#define ERR_NUM2 (0x00000F00)
+#define ERR_NUM3 (0x0000F000)
+#define ERR_NUM4 (0x000F0000)
+#define ERR_NUM5 (0x00F00000)
+#define ERR_NUM6 (0x0F000000)
+#define ERR_NUM7 (0xF0000000)
+
+/* ECC_DECDONE */
+#define DEC_DONE0 (0x0001)
+#define DEC_DONE1 (0x0002)
+#define DEC_DONE2 (0x0004)
+#define DEC_DONE3 (0x0008)
+#define DEC_DONE4 (0x0010)
+#define DEC_DONE5 (0x0020)
+#define DEC_DONE6 (0x0040)
+#define DEC_DONE7 (0x0080)
+
+/* ECC_DECIRQEN */
+#define DEC_IRQEN (0x0001)
+
+/* ECC_DECIRQSTA */
+#define DEC_IRQSTA (0x0001)
+
+#define CHIPVER_ECO_1 (0x8a00)
+#define CHIPVER_ECO_2 (0x8a01)
+
+#if 0
+#define MT6573_NFI_IRQ_LINE NFI_INT
+#define MT6573_NFIECC_IRQ_LINE NFI_ECC_INT
+#else
+#define MT6573_NFI_IRQ_LINE 26
+#define MT6573_NFIECC_IRQ_LINE 27
+
+#endif
+
+//#define NAND_PFM
+
+/*******************************************************************************
+ * Data Structure Definition
+ *******************************************************************************/
+struct mt6573_nand_host
+{
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ struct mt6573_nand_host_hw *hw;
+};
+
+struct mt6573_CMD
+{
+ u32 u4ColAddr;
+ u32 u4RowAddr;
+ u32 u4OOBRowAddr;
+ u8 au1OOB[128];
+ u8* pDataBuf;
+#ifdef NAND_PFM
+ u32 pureReadOOB;
+ u32 pureReadOOBNum;
+#endif
+};
+
+/*
+ * ECC layout control structure. Exported to userspace for
+ * diagnosis and to allow creation of raw images
+struct nand_ecclayout {
+ uint32_t eccbytes;
+ uint32_t eccpos[64];
+ uint32_t oobavail;
+ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
+};
+*/
+#define __DEBUG_NAND 1 /* Debug information on/off */
+
+/* Debug message event */
+#define DBG_EVT_NONE 0x00000000 /* No event */
+#define DBG_EVT_INIT 0x00000001 /* Initial related event */
+#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
+#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
+#define DBG_EVT_READ 0x00000008 /* Read related event */
+#define DBG_EVT_WRITE 0x00000010 /* Write related event */
+#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
+#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
+#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
+
+#define DBG_EVT_ALL 0xffffffff
+
+#define DBG_EVT_MASK (DBG_EVT_ALL)
+
+#define NFI_DEFAULT_ACCESS_TIMING 0xF3FFFFFF
+#define NFI_CS_NUM 1
+
+//Not support bad block manager in kernel
+//#define NAND_BMT
+
+#if __DEBUG_NAND
+#define MSG(evt, fmt, args...) \
+do { \
+ if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
+ printk(fmt, ##args); \
+ } \
+} while(0)
+
+#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
+#else
+#define MSG(evt, fmt, args...) do{}while(0)
+#define MSG_FUNC_ENTRY(f) do{}while(0)
+#endif
+
+#define RAMDOM_READ 1<<0
+#define CACHE_READ 1<<1
+
+typedef struct
+{
+ u16 id; //deviceid+menuid
+ u32 ext_id;
+ u8 addr_cycle;
+ u8 iowidth;
+ u16 totalsize;
+ u16 blocksize;
+ u16 pagesize;
+ u32 timmingsetting;
+ char devciename[14];
+ u32 advancedmode; //
+}flashdev_info,*pflashdev_info;
+
+/* NAND driver */
+struct mt6573_nand_host_hw {
+ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
+ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
+ unsigned int nfi_cs_num; /* NFI_CS_NUM */
+ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
+ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
+ unsigned int nand_ecc_size;
+ unsigned int nand_ecc_bytes;
+ unsigned int nand_ecc_mode;
+};
+extern struct mt6573_nand_host_hw mt6573_nand_hw;
+
+#endif
Index: linux-3.18.21/drivers/mtd/mtk/mt6573_typedefs.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/mt6573_typedefs.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,329 @@
+/*****************************************************************************
+* Copyright Statement:
+* --------------------
+* This software is protected by Copyright and the information contained
+* herein is confidential. The software may not be copied and the information
+* contained herein may not be used or disclosed except with the written
+* permission of MediaTek Inc. (C) 2008
+*
+* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
+* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
+* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
+* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
+* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
+*
+* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
+* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
+* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+*
+* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
+* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
+* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
+* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
+* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
+*
+*****************************************************************************/
+
+#ifndef _MT6516_TYPEDEFS_H
+#define _MT6516_TYPEDEFS_H
+
+#include <linux/bug.h>
+//#include "mach/sync_write.h"
+
+// ---------------------------------------------------------------------------
+// Basic Type Definitions
+// ---------------------------------------------------------------------------
+
+typedef volatile unsigned char *P_kal_uint8;
+typedef volatile unsigned short *P_kal_uint16;
+typedef volatile unsigned int *P_kal_uint32;
+
+typedef long LONG;
+typedef unsigned char UBYTE;
+typedef short SHORT;
+
+typedef signed char kal_int8;
+typedef signed short kal_int16;
+typedef signed int kal_int32;
+typedef long long kal_int64;
+typedef unsigned char kal_uint8;
+typedef unsigned short kal_uint16;
+typedef unsigned int kal_uint32;
+typedef unsigned long long kal_uint64;
+typedef char kal_char;
+
+typedef unsigned int *UINT32P;
+typedef volatile unsigned short *UINT16P;
+typedef volatile unsigned char *UINT8P;
+typedef unsigned char *U8P;
+
+typedef volatile unsigned char *P_U8;
+typedef volatile signed char *P_S8;
+typedef volatile unsigned short *P_U16;
+typedef volatile signed short *P_S16;
+typedef volatile unsigned int *P_U32;
+typedef volatile signed int *P_S32;
+typedef unsigned long long *P_U64;
+typedef signed long long *P_S64;
+
+typedef unsigned char U8;
+typedef signed char S8;
+typedef unsigned short U16;
+typedef signed short S16;
+typedef unsigned int U32;
+typedef signed int S32;
+typedef unsigned long long U64;
+typedef signed long long S64;
+//typedef unsigned char bool;
+#if 0
+typedef unsigned char UINT8;
+typedef unsigned short UINT16;
+typedef unsigned int UINT32;
+typedef unsigned short USHORT;
+typedef signed char INT8;
+typedef signed short INT16;
+typedef signed int INT32;
+typedef unsigned int DWORD;
+typedef void VOID;
+typedef unsigned char BYTE;
+typedef float FLOAT;
+
+typedef char *LPCSTR;
+typedef short *LPWSTR;
+#endif
+
+// ---------------------------------------------------------------------------
+// Constants
+// ---------------------------------------------------------------------------
+
+#define IMPORT EXTERN
+#ifndef __cplusplus
+ #define EXTERN extern
+#else
+ #define EXTERN extern "C"
+#endif
+#define LOCAL static
+#define GLOBAL
+#define EXPORT GLOBAL
+
+#define EQ ==
+#define NEQ !=
+#define AND &&
+#define OR ||
+#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
+
+#ifndef FALSE
+ #define FALSE (0)
+#endif
+
+#ifndef TRUE
+ #define TRUE (1)
+#endif
+
+#ifndef NULL
+ #define NULL (0)
+#endif
+
+//enum boolean {false, true};
+enum {RX, TX, NONE};
+
+#ifndef BOOL
+typedef unsigned char BOOL;
+#endif
+
+typedef enum {
+ KAL_FALSE = 0,
+ KAL_TRUE = 1,
+} kal_bool;
+
+
+// ---------------------------------------------------------------------------
+// Type Casting
+// ---------------------------------------------------------------------------
+#if 0
+#define AS_INT32(x) (*(INT32 *)((void*)x))
+#define AS_INT16(x) (*(INT16 *)((void*)x))
+#define AS_INT8(x) (*(INT8 *)((void*)x))
+
+#define AS_UINT32(x) (*(UINT32 *)((void*)x))
+#define AS_UINT16(x) (*(UINT16 *)((void*)x))
+#define AS_UINT8(x) (*(UINT8 *)((void*)x))
+#endif
+
+// ---------------------------------------------------------------------------
+// Register Manipulations
+// ---------------------------------------------------------------------------
+
+
+#define READ_REGISTER_UINT32(reg) \
+ (*(volatile unsigned int * const)(reg))
+
+#define WRITE_REGISTER_UINT32(reg, val) \
+ (*(volatile unsigned int * const)(reg)) = (val)
+#if 0
+
+#define READ_REGISTER_UINT16(reg) \
+ (*(volatile unsigned int * const)(reg))
+
+#define WRITE_REGISTER_UINT16(reg, val) \
+ (*(volatile unsigned int * const)(reg)) = (val)
+
+#define READ_REGISTER_UINT8(reg) \
+ (*(volatile UINT8 * const)(reg))
+
+#define WRITE_REGISTER_UINT8(reg, val) \
+ (*(volatile UINT8 * const)(reg)) = (val)
+#endif
+#if 0
+#define READ_REGISTER_UINT32(reg) \
+ (*(volatile UINT32 * const)(reg))
+
+#define WRITE_REGISTER_UINT32(reg, val) \
+ mt65xx_reg_sync_writel(val,reg)
+
+#define READ_REGISTER_UINT16(reg) \
+ (*(volatile UINT16 * const)(reg))
+
+#define WRITE_REGISTER_UINT16(reg, val) \
+ mt65xx_reg_sync_writew(val,reg)
+
+#define READ_REGISTER_UINT8(reg) \
+ (*(volatile UINT8 * const)(reg))
+
+#define WRITE_REGISTER_UINT8(reg, val) \
+ mt65xx_reg_sync_writeb(val,reg)
+#endif
+#if 0
+#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
+#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
+#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
+#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
+#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
+
+#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
+#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
+#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
+#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
+#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
+#endif
+#if 0
+#define INREG32(x) READ_REGISTER_UINT32((unsigned int*)((void*)(x)))
+#define OUTREG32(x, y) WRITE_REGISTER_UINT32((unsigned int*)((void*)(x)), (unsigned int)(y))
+#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
+#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
+#else
+#define INREG32(x) regRead32((int)(x))
+#define OUTREG32(x, y) regWrite32((int)(x),(int)(y))
+#define SETREG32(x, y) regWrite32((int)(x), regRead32((int)(x))|((int)(y)))
+#define CLRREG32(x, y) regWrite32((int)(x), regRead32((int)(x))&~((int)(y)))
+#define MASKREG32(x, y, z) regWrite32((int)(x), (regRead32((int)(x))&~((int)(y)))|((int)(z)))
+
+#endif
+
+#if 0
+#define DRV_Reg8(addr) INREG8((addr+3))
+#define DRV_WriteReg8(addr, data) OUTREG8((addr+3), data)
+#define DRV_SetReg8(addr, data) SETREG8((addr+3), data)
+#define DRV_ClrReg8(addr, data) CLRREG8((addr+3), data)
+
+#define DRV_Reg16(addr) INREG16((addr+2))
+#define DRV_WriteReg16(addr, data) OUTREG16((addr+2), data)
+#define DRV_SetReg16(addr, data) SETREG16((addr+2), data)
+#define DRV_ClrReg16(addr, data) CLRREG16((addr+2), data)
+#else
+#define DRV_Reg8(addr) INREG32(addr)
+#define DRV_WriteReg8(addr, data) OUTREG32(addr, data)
+#define DRV_SetReg8(addr, data) SETREG32(addr, data)
+#define DRV_ClrReg8(addr, data) CLRREG32(addr, data)
+
+#define DRV_Reg16(addr) INREG32(addr)
+#define DRV_WriteReg16(addr, data) OUTREG32(addr, data)
+#define DRV_SetReg16(addr, data) SETREG32(addr, data)
+#define DRV_ClrReg16(addr, data) CLRREG32(addr, data)
+#endif
+
+#define DRV_Reg32(addr) INREG32(addr)
+#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
+#define DRV_SetReg32(addr, data) SETREG32(addr, data)
+#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
+
+// !!! DEPRECATED, WILL BE REMOVED LATER !!!
+#define DRV_Reg(addr) DRV_Reg16(addr)
+#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
+#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
+#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
+
+
+// ---------------------------------------------------------------------------
+// Compiler Time Deduction Macros
+// ---------------------------------------------------------------------------
+
+#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
+#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
+#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
+#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
+#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
+#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
+
+#define MASK_OFFSET_ERROR (0xFFFFFFFF)
+
+#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
+
+
+// ---------------------------------------------------------------------------
+// Assertions
+// ---------------------------------------------------------------------------
+
+#ifndef ASSERT
+ #define ASSERT(expr) BUG_ON(!(expr))
+#endif
+
+#ifndef NOT_IMPLEMENTED
+ #define NOT_IMPLEMENTED() BUG_ON(1)
+#endif
+
+#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
+#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
+#define STATIC_ASSERT_XX(pred, line) \
+ extern char assertion_failed_at_##line[(pred) ? 1 : -1]
+
+// ---------------------------------------------------------------------------
+// Resolve Compiler Warnings
+// ---------------------------------------------------------------------------
+
+#define NOT_REFERENCED(x) { (x) = (x); }
+
+
+// ---------------------------------------------------------------------------
+// Utilities
+// ---------------------------------------------------------------------------
+
+#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
+#define MINIMUM(A,B) (((A)<(B))?(A):(B))
+
+#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
+#define DVT_DELAYMACRO(u4Num) \
+{ \
+ UINT32 u4Count = 0 ; \
+ for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
+} \
+
+#define A68351B 0
+#define B68351B 1
+#define B68351D 2
+#define B68351E 3
+#define UNKNOWN_IC_VERSION 0xFF
+
+
+#endif // _MT6516_TYPEDEFS_H
+
Index: linux-3.18.21/drivers/mtd/mtk/nand.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/nand.c 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,5972 @@
+/******************************************************************************
+* mt6573_nand.c - MT6573 NAND Flash Device Driver
+ *
+* Copyright 2009-2010 MediaTek Co.,Ltd.
+ *
+* DESCRIPTION:
+* This file provid the other drivers nand relative functions
+ *
+* modification history
+* ----------------------------------------
+* v2.0, 11 Feb 2010, mtk02528 written
+* ----------------------------------------
+******************************************************************************/
+
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+#include <linux/miscdevice.h>
+#include <linux/scatterlist.h>
+#include <linux/version.h>
+
+//#include <mach/mt6573.h>
+// #include <mach/mtk_nand_device.h>
+//#include <mach/dma.h>
+//#include <mach/mt6573_devs.h>
+//#include <mach/mt6573_reg_base.h>
+#include "mt6573_typedefs.h"
+/* Koshi for mt6573 porting */
+//#include <mach/mt6573_pll.h>
+/*JL*/
+//#include <mach/mt6573_nand.h>
+#include "mt6573_nand.h"
+/* Koshi for mt6573 porting */
+#include "../econet/bmt.h"
+//#include "bmt.h"
+// #include <mach/partition.h>
+
+/* Added for TCM used */
+//#include <asm/tcm.h>
+//#include <asm/system.h>
+//#include "partition_define.h"
+#include "nand_devicelist.h"
+//#include "nand_customer.h"
+#include <linux/mtd/map.h>
+#include <asm/tc3162/tc3162.h>
+
+#if defined (TCSUPPORT_GPON_DUAL_IMAGE) || defined (TCSUPPORT_EPON_DUAL_IMAGE)
+#include "flash_layout/tc_partition.h"
+#endif
+
+#define VERSION "v2.0"
+#define MODULE_NAME "# MT6573 NAND #"
+#define PROCNAME "driver/nand"
+#define PROCNAME_ECC "driver/nand_ecc"
+
+
+//#define NAND_ECC_TEST
+//#define USE_AHB_MODE 1
+BOOL g_bUseAHBMode=true;
+//#if USE_AHB_MODE
+BOOL g_bAutoFMT=true;
+BOOL g_bHwEcc=true;
+BOOL g_bOOB_Test=false;
+BOOL g_bReadEraseStatus=false;
+#ifdef NAND_ECC_TEST
+int g_hw_ecc_bit = 4;
+int g_spare_size = 16;
+#endif
+//#else
+//BOOL g_bHwEcc=false;
+//#endif
+#define BMT_BAD_BLOCK_INDEX_OFFSET (1)
+#ifdef TCSUPPORT_NAND_BMT
+#define POOL_GOOD_BLOCK_PERCENT 8/100
+#define SLAVE_IMAGE_OFFSET 0xf00000
+static int bmt_pool_size = 0;
+static bmt_struct *g_bmt = NULL;
+static init_bbt_struct *g_bbt = NULL;
+extern int nand_logic_size;
+extern int nand_flash_avalable_size;
+#endif
+
+#define _MTK_NAND_DUMMY_DRIVER_
+
+#ifndef NAND_OTP_SUPPORT
+#define NAND_OTP_SUPPORT 0
+#endif
+
+#if NAND_OTP_SUPPORT
+
+#define SAMSUNG_OTP_SUPPORT 1
+#define OTP_MAGIC_NUM 0x4E3AF28B
+#define SAMSUNG_OTP_PAGE_NUM 6
+
+static const unsigned int Samsung_OTP_Page[SAMSUNG_OTP_PAGE_NUM] = {0x15, 0x16, 0x17, 0x18, 0x19, 0x1b};
+
+static struct mt6573_otp_config g_mt6573_otp_fuc;
+static spinlock_t g_OTPLock;
+
+#define OTP_MAGIC 'k'
+
+/* NAND OTP IO control number */
+#define OTP_GET_LENGTH _IOW(OTP_MAGIC, 1, int)
+#define OTP_READ _IOW(OTP_MAGIC, 2, int)
+#define OTP_WRITE _IOW(OTP_MAGIC, 3, int)
+
+#define FS_OTP_READ 0
+#define FS_OTP_WRITE 1
+
+/* NAND OTP Error codes */
+#define OTP_SUCCESS 0
+#define OTP_ERROR_OVERSCOPE -1
+#define OTP_ERROR_TIMEOUT -2
+#define OTP_ERROR_BUSY -3
+#define OTP_ERROR_NOMEM -4
+#define OTP_ERROR_RESET -5
+
+#endif
+
+#if NAND_OTP_SUPPORT
+struct mt6573_otp_config
+{
+ u32 (* OTPRead) (u32 PageAddr, void *BufferPtr, void *SparePtr);
+ u32 (* OTPWrite) (u32 PageAddr, void *BufferPtr, void *SparePtr);
+ u32 (* OTPQueryLength) (u32 *Length);
+};
+
+struct otp_ctl
+{
+ unsigned int QLength;
+ unsigned int Offset;
+ unsigned int Length;
+ char *BufferPtr;
+ unsigned int status;
+};
+#endif
+
+#define BLANK_PAGE_FIXUP 1
+
+/*******************************************************************************
+ * Macro definition
+ *******************************************************************************/
+//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
+//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
+//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
+//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
+
+#define NFI_SET_REG32(reg, value) \
+do { \
+ g_value = (DRV_Reg32(reg) | (value));\
+ DRV_WriteReg32(reg, g_value); \
+} while(0)
+
+#define NFI_SET_REG16(reg, value) \
+do { \
+ g_value = (DRV_Reg16(reg) | (value));\
+ DRV_WriteReg16(reg, g_value); \
+} while(0)
+
+#define NFI_CLN_REG32(reg, value) \
+do { \
+ g_value = (DRV_Reg32(reg) & (~(value)));\
+ DRV_WriteReg32(reg, g_value); \
+} while(0)
+
+#define NFI_CLN_REG16(reg, value) \
+do { \
+ g_value = (DRV_Reg16(reg) & (~(value)));\
+ DRV_WriteReg16(reg, g_value); \
+} while(0)
+
+#define PIO_BIG_ENDIAN (DRV_Reg16(NFI_CNFG_REG16) & CNFG_PIO_BIG_ENDIAN)
+
+
+#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
+#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
+
+
+#define NAND_SECTOR_SIZE (512)
+#define OOB_PER_SECTOR (16)
+#define OOB_AVAI_PER_SECTOR (8)
+
+#ifdef TCSUPPORT_NAND_BMT
+#define BMT_POOL_SIZE (80)
+#else
+#define BMT_POOL_SIZE (0)
+#endif
+
+#define PMT_POOL_SIZE (2)
+
+#define K0_TO_K1(x) (((uint32)x) | 0xA0000000)
+
+/*******************************************************************************
+ * Gloable Varible Definition
+ *******************************************************************************/
+#ifdef NAND_PFM
+static suseconds_t g_PFM_R = 0;
+static suseconds_t g_PFM_W = 0;
+static suseconds_t g_PFM_E = 0;
+static u32 g_PFM_RNum = 0;
+static u32 g_PFM_RD = 0;
+static u32 g_PFM_WD = 0;
+static struct timeval g_now;
+
+#define PFM_BEGIN(time) \
+do_gettimeofday(&g_now); \
+(time) = g_now;
+
+#define PFM_END_R(time, n) \
+do_gettimeofday(&g_now); \
+g_PFM_R += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
+g_PFM_RNum += 1; \
+g_PFM_RD += n; \
+MSG(PERFORMANCE, "%s - Read PFM: %lu, data: %d, ReadOOB: %d (%d, %d)\n", MODULE_NAME , g_PFM_R, g_PFM_RD, g_kCMD.pureReadOOB, g_kCMD.pureReadOOBNum, g_PFM_RNum);
+
+#define PFM_END_W(time, n) \
+do_gettimeofday(&g_now); \
+g_PFM_W += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
+g_PFM_WD += n; \
+MSG(PERFORMANCE, "%s - Write PFM: %lu, data: %d\n", MODULE_NAME, g_PFM_W, g_PFM_WD);
+
+#define PFM_END_E(time) \
+do_gettimeofday(&g_now); \
+g_PFM_E += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
+MSG(PERFORMANCE, "%s - Erase PFM: %lu\n", MODULE_NAME, g_PFM_E);
+#else
+#define PFM_BEGIN(time)
+#define PFM_END_R(time, n)
+#define PFM_END_W(time, n)
+#define PFM_END_E(time)
+#endif
+
+/* MT6573 NAND Driver */
+struct mt6573_nand_host_hw mt6573_nand_hw = {
+ .nfi_bus_width = 8,
+ .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
+ .nfi_cs_num = NFI_CS_NUM,
+ .nand_sec_size = 512,
+ .nand_sec_shift = 9,
+ .nand_ecc_size = 2048,
+ .nand_ecc_bytes = 32,
+ .nand_ecc_mode = NAND_ECC_NONE,
+};
+
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+extern int offset;
+#endif
+
+
+static struct mtd_partition mt6573_partitions[] = {
+ { /* First partition */
+ name : "NAND Flash", /* Section */
+ size : 0x0, /* Size */
+ offset : 0 /* Offset from start of flash- location 0x0*/
+ },
+};
+
+
+#define TIMEOUT_1 0x1fff
+#define TIMEOUT_2 0x8ff
+#define TIMEOUT_3 0xffff
+#define TIMEOUT_4 5000 //PIO
+
+#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
+ do { \
+ DRV_WriteReg(NFI_CMD_REG16,cmd);\
+ while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
+ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
+ DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
+ DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
+ while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
+ }while(0);
+
+//-------------------------------------------------------------------------------
+static struct completion g_comp_AHB_Done;
+static struct mt6573_CMD g_kCMD;
+static u32 g_u4ChipVer;
+bool g_bInitDone;
+static int g_i4Interrupt;
+static bool g_bcmdstatus;
+static u32 g_value = 0;
+static int g_page_size;
+
+static u8 *local_buffer_16_align_r; // 16 byte aligned buffer, for HW issue
+static u8 local_buffer_r[4096+32];
+static u8 *local_buffer_16_align_w; // 16 byte aligned buffer, for HW issue
+static u8 local_buffer_w[4096+32];
+
+extern void nand_release_device(struct mtd_info *mtd);
+//extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
+extern int nand_get_device(struct mtd_info *mtd, int new_state);
+
+
+extern unsigned int (*ranand_read_byte)(unsigned long long);
+extern unsigned int (*ranand_read_dword)(unsigned long long);
+
+#ifdef TCSUPPORT_NAND_BMT
+static bmt_struct *g_bmt;
+#endif
+struct mt6573_nand_host *host;
+
+//extern struct mtd_partition g_pasStatic_Partition[] ;
+//extern int part_num;
+#ifdef PMT
+extern void part_init_pmt(struct mtd_info *mtd, u8 *buf);
+extern struct mtd_partition g_exist_Partition[] ;
+#endif
+int manu_id;
+int dev_id;
+
+static u8 local_oob_buf[234];
+
+#ifdef _MTK_NAND_DUMMY_DRIVER_
+int dummy_driver_debug;
+#endif
+
+
+void dump_buf(uint8_t *buf, int len)
+{
+ int i;
+ printk("len=%d\n",len);
+
+ for(i = 0; i < (len); i++){
+ if (i>0 && !(i%16)) {
+ printk("\n");
+ }
+ printk("%x ", *buf++);
+ }
+
+ printk("\n");
+}
+
+void nand_enable_clock(void)
+{
+ //(void)hwEnableClock(MT65XX_PDN_PERI_NFI, "NAND");
+}
+
+void nand_disable_clock(void)
+{
+ //(void)hwDisableClock(MT65XX_PDN_PERI_NFI, "NAND");
+}
+
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = {{4,3}, {0, 0}}
+};
+
+struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 32,
+ .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {{4, 4}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
+};
+
+struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {{4, 4}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
+};
+
+flashdev_info devinfo;
+
+#ifdef TCSUPPORT_AUTOBENCH
+flashdev_info *nandDevInfo = &devinfo;
+EXPORT_SYMBOL(nandDevInfo);
+#endif
+
+static void mt6573_nand_configure_fdm(u16 u2FDMSize);
+
+
+void dump_nfi(void)
+{
+#if __DEBUG_NAND
+ printk(KERN_INFO "NFI_ACCCON: 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32));
+ printk(KERN_INFO "NFI_PAGEFMT: 0x%x\n", DRV_Reg16(NFI_PAGEFMT_REG16));
+ printk(KERN_INFO "NFI_CNFG: 0x%x\n", DRV_Reg16(NFI_CNFG_REG16));
+ printk(KERN_INFO "NFI_CON: 0x%x\n", DRV_Reg16(NFI_CON_REG16));
+ printk(KERN_INFO "NFI_STRDATA: 0x%x\n", DRV_Reg16(NFI_STRDATA_REG16));
+ printk(KERN_INFO "NFI_ADDRCNTR: 0x%x\n", DRV_Reg16(NFI_ADDRCNTR_REG16));
+ printk(KERN_INFO "NFI_FIFOSTA: 0x%x\n", DRV_Reg16(NFI_FIFOSTA_REG16));
+ printk(KERN_INFO "NFI_ADDRNOB: 0x%x\n", DRV_Reg16(NFI_ADDRNOB_REG16));
+ printk(KERN_INFO "NFI_FDM_0L: 0x%x\n", DRV_Reg32(NFI_FDM0L_REG32));
+ printk(KERN_INFO "NFI_STA: 0x%x\n", DRV_Reg32(NFI_STA_REG32));
+ printk(KERN_INFO "NFI_FDM_0M: 0x%x\n", DRV_Reg32(NFI_FDM0M_REG32));
+ printk(KERN_INFO "NFI_IOCON: 0x%x\n", DRV_Reg16(NFI_IOCON_REG16));
+ printk(KERN_INFO "NFI_BYTELEN: 0x%x\n", DRV_Reg16(NFI_BYTELEN_REG16));
+ printk(KERN_INFO "NFI_COLADDR: 0x%x\n", DRV_Reg32(NFI_COLADDR_REG32));
+ printk(KERN_INFO "NFI_ROWADDR: 0x%x\n", DRV_Reg32(NFI_ROWADDR_REG32));
+ printk(KERN_INFO "ECC_ENCCNFG: 0x%x\n", DRV_Reg32(ECC_ENCCNFG_REG32));
+ printk(KERN_INFO "ECC_ENCCON: 0x%x\n", DRV_Reg16(ECC_ENCCON_REG16));
+ printk(KERN_INFO "ECC_DECCNFG: 0x%x\n", DRV_Reg32(ECC_DECCNFG_REG32));
+ printk(KERN_INFO "ECC_DECCON: 0x%x\n", DRV_Reg16(ECC_DECCON_REG16));
+ printk(KERN_INFO "NFI_CSEL: 0x%x\n", DRV_Reg16(NFI_CSEL_REG16));
+ //ECC
+
+ printk(KERN_INFO "NFI_STRADDR: 0x%x\n", DRV_Reg32(NFI_STRADDR_REG32));
+ printk(KERN_INFO "ECC_DECDIADDR: 0x%x\n", DRV_Reg32(ECC_DECDIADDR_REG32));
+ printk(KERN_INFO "ECC_FDMADDR_REG32: 0x%x\n", DRV_Reg32(ECC_FDMADDR_REG32));
+ printk(KERN_INFO "ECC_DECFSM_REG32: 0x%x\n", DRV_Reg32(ECC_DECFSM_REG32));
+ printk(KERN_INFO "ECC_SYNSTA_REG32: 0x%x\n", DRV_Reg32(ECC_SYNSTA_REG32));
+ printk(KERN_INFO "ECC_DECNFIDI_REG32: 0x%x\n", DRV_Reg32(ECC_DECNFIDI_REG32));
+ printk(KERN_INFO "ECC_SYN0_REG32: 0x%x\n", DRV_Reg32(ECC_SYN0_REG32));
+ // printk(KERN_INFO "NFI clock register: 0x%x: %s\n", DRV_Reg32((volatile u32 *)0x00000000),
+ // (DRV_Reg32((volatile u32 *)0xF0039300) & (1 << 17)) ? "miss" : "OK");
+#endif
+}
+
+
+bool get_device_info(u16 id, u32 ext_id, flashdev_info *pdevinfo)
+{
+ u32 index;
+ for(index=0;gen_FlashTable[index].id!=0;index++)
+ {
+ //if(id==gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id)
+ if (id == gen_FlashTable[index].id)
+ {
+ ext_id = ((gen_FlashTable[index].ext_id&0xFF)==0xFF)?(ext_id|0xFF) : ext_id;
+ if(ext_id == gen_FlashTable[index].ext_id){
+ pdevinfo->id = gen_FlashTable[index].id;
+ pdevinfo->ext_id = gen_FlashTable[index].ext_id;
+ pdevinfo->blocksize = gen_FlashTable[index].blocksize;
+ pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
+ pdevinfo->iowidth = gen_FlashTable[index].iowidth;
+ pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
+ pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
+ pdevinfo->pagesize = gen_FlashTable[index].pagesize;
+ pdevinfo->totalsize = gen_FlashTable[index].totalsize;
+ memcpy(pdevinfo->devciename,gen_FlashTable[index].devciename,sizeof(pdevinfo->devciename));
+ printk(KERN_INFO "Device found in MTK table, ID: %x\n", id);
+
+ goto find;
+ }
+ }
+ }
+#if 0
+ for (index = 0; cust_FlashTable[index].id != 0; index++)
+ {
+ if (id == cust_FlashTable[index].id)
+ {
+ pdevinfo->id = cust_FlashTable[index].id;
+ pdevinfo->blocksize = cust_FlashTable[index].blocksize;
+ pdevinfo->addr_cycle = cust_FlashTable[index].addr_cycle;
+ pdevinfo->iowidth = cust_FlashTable[index].iowidth;
+ pdevinfo->timmingsetting = cust_FlashTable[index].timmingsetting;
+ pdevinfo->advancedmode = cust_FlashTable[index].advancedmode;
+ pdevinfo->pagesize = cust_FlashTable[index].pagesize;
+ pdevinfo->totalsize = cust_FlashTable[index].totalsize;
+ memcpy(pdevinfo->devciename, cust_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
+ printk(KERN_INFO "Device found in customer table, ID: %x\n", id);
+
+ goto find;
+ }
+ }
+#endif
+
+find:
+ if(0==pdevinfo->id)
+ {
+ printk(KERN_INFO "Device not found, ID: %x\n", id);
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+}
+/******************************************************************************
+ * mt6573_nand_irq_handler
+ *
+ * DESCRIPTION:
+ * NAND interrupt handler!
+ *
+ * PARAMETERS:
+ * int irq
+ * void *dev_id
+ *
+ * RETURNS:
+ * IRQ_HANDLED : Successfully handle the IRQ
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+/* Modified for TCM used */
+//static __tcmfunc irqreturn_t mt6573_nand_irq_handler(int irqno, void *dev_id)
+static irqreturn_t mt6573_nand_irq_handler(int irqno, void *dev_id)
+{
+ u16 u16IntStatus = DRV_Reg16(NFI_INTR_REG16);
+ (void)irqno;
+
+ if (u16IntStatus & (u16)INTR_AHB_DONE_EN)
+ {
+ complete(&g_comp_AHB_Done);
+ }
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * ECC_Config
+ *
+ * DESCRIPTION:
+ * Configure HW ECC!
+ *
+ * PARAMETERS:
+ * struct mt6573_nand_host_hw *hw
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int ECC_Config(struct mt6573_nand_host_hw *hw)
+{
+ u32 u4ENCODESize;
+ u32 u4DECODESize;
+
+ u32 timeout = 0xFFFF;
+
+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
+ do{
+ timeout--;
+ if(timeout == 0){
+ printk("NFI ECC Config: ECC_DECIDLE timeout\n");
+ return -1;
+ }
+ }while (!DRV_Reg16(ECC_DECIDLE_REG16));
+
+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
+ do{;}while (!DRV_Reg32(ECC_ENCIDLE_REG32));
+
+ /* setup FDM register base */
+ DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
+
+ /* Sector + FDM */
+ u4ENCODESize = (hw->nand_sec_size + 8) << 3;
+ /* Sector + FDM + YAFFS2 meta data bits */
+ u4DECODESize = ((hw->nand_sec_size + 8) << 3) + 4 * 13;
+
+ /* configure ECC decoder && encoder*/
+ DRV_WriteReg32(ECC_DECCNFG_REG32,
+ ECC_CNFG_ECC4|DEC_CNFG_NFI|DEC_CNFG_EMPTY_EN|DEC_CNFG_BURST_EN |
+ (u4DECODESize << DEC_CNFG_CODE_SHIFT));
+
+ DRV_WriteReg32(ECC_ENCCNFG_REG32,
+ ECC_CNFG_ECC4|ENC_CNFG_NFI|
+ (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
+
+
+//#if USE_AHB_MODE
+ if(g_bUseAHBMode)
+ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_CORRECT);
+ else
+ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
+
+ return 0;
+}
+
+#ifdef NAND_ECC_TEST
+static void ECC_Config_All_Bits(int hw_ecc_bit)
+{
+ int ecc_bit, ecc_bytes, spare_size, fdm_meta_size;
+ u32 u4ENCODESize;
+ u32 u4DECODESize;
+ u32 ecc_conf;
+
+ if(hw_ecc_bit < 4 || hw_ecc_bit > 16){
+ printk("hw_ecc_bit is out of range:%d\n", hw_ecc_bit);
+ printk("hw_ecc_bit should be 4,6,8,10,12,14,16\n");
+ return;
+ }
+
+ //spare_size = 16 * (devinfo.pagesize / 512);
+ spare_size = g_spare_size;
+
+ ecc_bit = hw_ecc_bit*13;
+ ecc_bytes = (ecc_bit + (1<<3)-1)>>3;
+
+ if(ecc_bytes > spare_size)
+ {
+ printk("ecc_bytes:%d > spare_size:%d\n", ecc_bytes, spare_size);
+ return;
+ }
+
+ fdm_meta_size = spare_size - ecc_bytes;
+ if(fdm_meta_size > 8)
+ {
+ fdm_meta_size = 8;
+ }
+ printk("ECC_Config_All_Bits:spare_size=%d,ecc_bytes=%d, fdm_meta_size=%d\n",spare_size, ecc_bytes, fdm_meta_size);
+ g_hw_ecc_bit = hw_ecc_bit;
+
+ ecc_conf = (hw_ecc_bit - 4)/2;
+
+ NFI_CLN_REG32(ECC_DECCNFG_REG32, ECC_CNFG_ECC_MASK);
+ NFI_SET_REG32(ECC_DECCNFG_REG32, ecc_conf);
+
+ NFI_CLN_REG32(ECC_ENCCNFG_REG32, ECC_CNFG_ECC_MASK);
+ NFI_SET_REG32(ECC_ENCCNFG_REG32, ecc_conf);
+
+
+ mt6573_nand_configure_fdm(fdm_meta_size); //FDM SIZE=FDM ECC SIZE
+
+ /*Configure spare size*/
+ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_SPARE_MASK);
+
+ switch(g_spare_size)
+ {
+ case 16:
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 26:
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 27:
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 28:
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ u4ENCODESize = (fdm_meta_size<<3)+(1<<(9+3));
+
+
+ printk("ECC_Config_All_Bits:u4ENCODESize=%x\n",u4ENCODESize);
+ /* Set-up ECC encode message size = data + FDM_ECC */
+ NFI_CLN_REG32(ECC_ENCCNFG_REG32, ENC_CNFG_MSG_MASK);
+ NFI_SET_REG32(ECC_ENCCNFG_REG32, u4ENCODESize << ENC_CNFG_MSG_SHIFT);
+
+
+ /* Set-up ECC decode message size = ECC message + T*13 */
+ u4DECODESize = u4ENCODESize + ecc_bit;
+ printk("ECC_Config_All_Bits:u4DECODESize=%x\n",u4DECODESize);
+ NFI_CLN_REG32(ECC_DECCNFG_REG32, DEC_CNFG_CODE_MASK);
+ NFI_SET_REG32(ECC_DECCNFG_REG32, u4DECODESize << DEC_CNFG_CODE_SHIFT);
+
+ dump_nfi();
+ return;
+
+}
+#endif
+
+/******************************************************************************
+ * ECC_Decode_Start
+ *
+ * DESCRIPTION:
+ * HW ECC Decode Start !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void ECC_Decode_Start(void)
+{
+ /* wait for device returning idle */
+ while(!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE));
+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
+}
+
+/******************************************************************************
+ * ECC_Decode_End
+ *
+ * DESCRIPTION:
+ * HW ECC Decode End !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void ECC_Decode_End(void)
+{
+ /* wait for device returning idle */
+ while(!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE));
+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
+}
+
+/******************************************************************************
+ * ECC_Encode_Start
+ *
+ * DESCRIPTION:
+ * HW ECC Encode Start !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void ECC_Encode_Start(void)
+{
+ /* wait for device returning idle */
+ while(!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE));
+ mb();
+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
+}
+
+/******************************************************************************
+ * ECC_Encode_End
+ *
+ * DESCRIPTION:
+ * HW ECC Encode End !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void ECC_Encode_End(void)
+{
+ /* wait for device returning idle */
+ while(!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE));
+ mb();
+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
+}
+
+/******************************************************************************
+ * mt6573_nand_check_bch_error
+ *
+ * DESCRIPTION:
+ * Check BCH error or not !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd
+ * u8* pDataBuf
+ * u32 u4SecIndex
+ * u32 u4PageAddr
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_check_bch_error(
+ struct mtd_info *mtd, u8* pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
+{
+ bool bRet = true;
+ u16 u2SectorDoneMask = 1 << u4SecIndex;
+ u32 u4ErrorNumDebug, i, u4ErrNum, u4ErrorNumDebug1;
+ u32 timeout = 0xFFFF;
+ u32 correct_count = 0;
+ // int el;
+//#if !USE_AHB_MODE
+ #if 0
+ u32 au4ErrBitLoc[6];
+ u32 u4ErrByteLoc, u4BitOffset;
+ u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
+ #endif
+//#endif
+
+ //4 // Wait for Decode Done
+ while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16)))
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ printk("mt6573_nand_check_bch_error return timeout \n");
+ //dump_nfi();
+ return false;
+ }
+ }
+//#if (USE_AHB_MODE)
+
+ if(g_bUseAHBMode){
+ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM0_REG32);
+ u4ErrorNumDebug1 = DRV_Reg32(ECC_DECENUM1_REG32);
+ if ((0 != (u4ErrorNumDebug & 0xFFFFF)) || (0 != (u4ErrorNumDebug1 & 0xFFFFF)))
+ {
+ for (i = 0; i <= u4SecIndex; ++i)
+ {
+ if(i<4){
+ u4ErrNum = DRV_Reg32(ECC_DECENUM0_REG32) >> (i*5);
+ }
+ else{
+ u4ErrNum = DRV_Reg32(ECC_DECENUM1_REG32) >> ((i-4)*5);
+ }
+ u4ErrNum &= 0x1F;
+ correct_count += u4ErrNum;
+
+ if (0x1F == u4ErrNum)
+ {
+ mtd->ecc_stats.failed++;
+ bRet = false;
+ printk("UnCorrectable at PageAddr=%d, Sector=%d\n", u4PageAddr, i);
+ }
+ else
+ {
+ if (u4ErrNum)
+ {
+ printk("Correct %d at PageAddr=%d, Sector=%d\n", u4ErrNum, u4PageAddr, i);
+ }
+ }
+ }
+ if ((correct_count > 2) && bRet)
+ {
+ mtd->ecc_stats.corrected++;
+ }
+ else
+ {
+ //printk("Less than 2 bit error, ignore\n");
+ }
+ }
+ }
+ else{
+ #if 0 //correct function for PIO mode need develop
+ /* We will manually correct the error bits in the last sector, not all the sectors of the page!*/
+ memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
+ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM0_REG32);
+ u4ErrNum = DRV_Reg32(ECC_DECENUM0_REG32) >> (u4SecIndex*5);
+ u4ErrNum &= 0x1F;
+
+ if (u4ErrNum)
+ {
+ if (0x1F == u4ErrNum)
+ {
+ mtd->ecc_stats.failed++;
+ bRet = false;
+ printk("UnCorrectable at PageAddr=%d\n", u4PageAddr);
+ }
+ else
+ {
+ for (i = 0; i < ((u4ErrNum+1)>>1); ++i)
+ {
+ au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
+ u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
+
+ if (u4ErrBitLoc1th < 0x1000)
+ {
+ u4ErrByteLoc = u4ErrBitLoc1th/8;
+ u4BitOffset = u4ErrBitLoc1th%8;
+ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc]^(1<<u4BitOffset);
+ mtd->ecc_stats.corrected++;
+ }
+ else
+ {
+ mtd->ecc_stats.failed++;
+ printk("UnCorrectable ErrLoc=%d\n", au4ErrBitLoc[i]);
+ }
+ u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
+ if (0 != u4ErrBitLoc2nd)
+ {
+ if (u4ErrBitLoc2nd < 0x1000)
+ {
+ u4ErrByteLoc = u4ErrBitLoc2nd/8;
+ u4BitOffset = u4ErrBitLoc2nd%8;
+ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc]^(1<<u4BitOffset);
+ mtd->ecc_stats.corrected++;
+ }
+ else
+ {
+ mtd->ecc_stats.failed++;
+ printk("UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
+ }
+ }
+ }
+ }
+ if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
+ {
+ bRet = false;
+ printk("mt6573_nand_check_bch_error return false 1111\n");
+ }
+ }
+ #endif
+ }
+
+ return bRet;
+}
+
+/******************************************************************************
+ * mt6573_nand_RFIFOValidSize
+ *
+ * DESCRIPTION:
+ * Check the Read FIFO data bytes !
+ *
+ * PARAMETERS:
+ * u16 u2Size
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_RFIFOValidSize(u16 u2Size)
+{
+ u32 timeout = 0xFFFF;
+ while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size)
+ {
+ timeout--;
+ if (0 == timeout){
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_WFIFOValidSize
+ *
+ * DESCRIPTION:
+ * Check the Write FIFO data bytes !
+ *
+ * PARAMETERS:
+ * u16 u2Size
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_WFIFOValidSize(u16 u2Size)
+{
+ u32 timeout = 0xFFFF;
+ while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size)
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_status_ready
+ *
+ * DESCRIPTION:
+ * Indicate the NAND device is ready or not !
+ *
+ * PARAMETERS:
+ * u32 u4Status
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_status_ready(u32 u4Status)
+{
+ u32 timeout = 0xFFFF;
+ while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0)
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_reset
+ *
+ * DESCRIPTION:
+ * Reset the NAND device hardware component !
+ *
+ * PARAMETERS:
+ * struct mt6573_nand_host *host (Initial setting data)
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_reset(void)
+{
+ // HW recommended reset flow
+ int timeout = 0xFFFF;
+ if (DRV_Reg16(NFI_MASTERSTA_REG16)) // master is busy
+ {
+ mb();
+ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
+ while (DRV_Reg16(NFI_MASTERSTA_REG16))
+ {
+ timeout--;
+ if (!timeout)
+ {
+ MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
+ }
+ }
+ }
+ /* issue reset operation */
+ mb();
+ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
+
+ return mt6573_nand_status_ready(STA_NFI_FSM_MASK|STA_NAND_BUSY) &&
+ mt6573_nand_RFIFOValidSize(0) &&
+ mt6573_nand_WFIFOValidSize(0);
+}
+
+/******************************************************************************
+ * mt6573_nand_set_mode
+ *
+ * DESCRIPTION:
+ * Set the oepration mode !
+ *
+ * PARAMETERS:
+ * u16 u2OpMode (read/write)
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_set_mode(u16 u2OpMode)
+{
+ u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
+ u2Mode &= ~CNFG_OP_MODE_MASK;
+ u2Mode |= u2OpMode;
+ DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
+}
+
+/******************************************************************************
+ * mt6573_nand_set_autoformat
+ *
+ * DESCRIPTION:
+ * Enable/Disable hardware autoformat !
+ *
+ * PARAMETERS:
+ * bool bEnable (Enable/Disable)
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_set_autoformat(bool bEnable)
+{
+ if (g_bAutoFMT && bEnable)
+ {
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
+ }
+ else
+ {
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
+ }
+}
+
+/******************************************************************************
+ * mt6573_nand_configure_fdm
+ *
+ * DESCRIPTION:
+ * Configure the FDM data size !
+ *
+ * PARAMETERS:
+ * u16 u2FDMSize
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_configure_fdm(u16 u2FDMSize)
+{
+ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
+}
+
+/******************************************************************************
+ * mt6573_nand_configure_lock
+ *
+ * DESCRIPTION:
+ * Configure the NAND lock !
+ *
+ * PARAMETERS:
+ * u16 u2FDMSize
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_configure_lock(void)
+{
+ u32 u4WriteColNOB = 2;
+ u32 u4WriteRowNOB = 3;
+ u32 u4EraseColNOB = 0;
+ u32 u4EraseRowNOB = 3;
+ DRV_WriteReg16(NFI_LOCKANOB_REG16,
+ (u4WriteColNOB << PROG_CADD_NOB_SHIFT) |
+ (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) |
+ (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) |
+ (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
+
+ if (CHIPVER_ECO_1 == g_u4ChipVer)
+ {
+ int i;
+ for (i = 0; i < 16; ++i)
+ {
+ DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
+ DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
+ }
+ //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
+ DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
+ DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
+ }
+}
+
+static bool mt6573_nand_pio_ready(void)
+{
+ int count = 0;
+ while ( !(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) )
+ {
+ count++;
+ if (count > 0xffff)
+ {
+ printk("PIO_DIRDY timeout\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_set_command
+ *
+ * DESCRIPTION:
+ * Send hardware commands to NAND devices !
+ *
+ * PARAMETERS:
+ * u16 command
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_set_command(u16 command)
+{
+ /* Write command to device */
+ mb();
+
+ DRV_WriteReg16(NFI_CMD_REG16, command);
+ return mt6573_nand_status_ready(STA_CMD_STATE);
+}
+
+/******************************************************************************
+ * mt6573_nand_set_address
+ *
+ * DESCRIPTION:
+ * Set the hardware address register !
+ *
+ * PARAMETERS:
+ * struct nand_chip *nand, u32 u4RowAddr
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
+{
+ u32 coladdr = u4ColAddr, rowaddr = u4RowAddr;
+ /* fill cycle addr */
+ mb();
+
+ DRV_WriteReg32(NFI_COLADDR_REG32, coladdr);
+ DRV_WriteReg32(NFI_ROWADDR_REG32, rowaddr);
+ DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB|(u2RowNOB << ADDR_ROW_NOB_SHIFT));
+ return mt6573_nand_status_ready(STA_ADDR_STATE);
+}
+
+/******************************************************************************
+ * mt6573_nand_check_RW_count
+ *
+ * DESCRIPTION:
+ * Check the RW how many sectors !
+ *
+ * PARAMETERS:
+ * u16 u2WriteSize
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_check_RW_count(u16 u2WriteSize)
+{
+ u32 timeout = 0xFFFF;
+ u16 u2SecNum = u2WriteSize >> 9;
+
+ while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum)
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ printk("[%s] timeout\n", __FUNCTION__);
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_ready_for_read
+ *
+ * DESCRIPTION:
+ * Prepare hardware environment for read !
+ *
+ * PARAMETERS:
+ * struct nand_chip *nand, u32 u4RowAddr
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 *buf)
+{
+ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
+ bool bRet = false;
+ u16 sec_num = 1 << (nand->page_shift - 9);
+ u32 col_addr = u4ColAddr;
+ u32 colnob=2, rownob;
+
+
+ if (nand->options & NAND_BUSWIDTH_16)
+ col_addr /= 2;
+
+ if (!mt6573_nand_reset())
+ {
+ goto cleanup;
+ }
+ if(g_bHwEcc){
+ /* Enable HW ECC */
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+
+ mt6573_nand_set_mode(CNFG_OP_READ);
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
+
+ if (full)
+ {
+ if(g_bUseAHBMode){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ DRV_WriteReg32(NFI_STRADDR_REG32, virt_to_phys(buf));
+ }
+ else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ }
+
+ if(g_bHwEcc){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+
+ }
+ else
+ {
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ }
+
+ mt6573_nand_set_autoformat(full);
+ if (full){
+ if(g_bHwEcc){
+ ECC_Decode_Start();
+ }
+ }
+ if((devinfo.pagesize == 512) && (u4ColAddr == devinfo.pagesize)){ // read oob for 512 page size
+ if (!mt6573_nand_set_command(NAND_CMD_READOOB))
+ {
+ goto cleanup;
+ }
+ }
+ else{
+
+ if (!mt6573_nand_set_command(NAND_CMD_READ0))
+ {
+ goto cleanup;
+ }
+ }
+
+ if(devinfo.pagesize == 512)
+ colnob = 1;
+ else
+ colnob = 2;
+
+ rownob=devinfo.addr_cycle - colnob;
+
+ //1 FIXED ME: For Any Kind of AddrCycle
+ if (!mt6573_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
+ {
+ goto cleanup;
+ }
+
+ if(devinfo.pagesize != 512){
+ if (!mt6573_nand_set_command(NAND_CMD_READSTART))
+ {
+ goto cleanup;
+ }
+ }
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ goto cleanup;
+ }
+
+ bRet = true;
+
+cleanup:
+ return bRet;
+}
+
+/**********************************************************
+Description : SAL_NFI_Pointer_Operation
+Input : 0
+Output : 0
+***********************************************************/
+static void SAL_NFI_Pointer_Operation(u16 command)
+{
+#if 0
+ kal_uint32 reg_val = 0;
+
+ DRV_WriteReg(NFI_CMD, ptr_cmd);
+ while (DRV_Reg32(NFI_STA) & STA_CMD_STATE);
+ reg_val = DRV_Reg(NFI_CON);
+ reg_val |= CON_NFI_RST;
+ DRV_WriteReg(NFI_CON, reg_val);
+#endif
+ mb();
+
+ DRV_WriteReg16(NFI_CMD_REG16, command);
+ mt6573_nand_status_ready(STA_CMD_STATE);
+
+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_RST);
+
+}
+
+/******************************************************************************
+ * mt6573_nand_ready_for_write
+ *
+ * DESCRIPTION:
+ * Prepare hardware environment for write !
+ *
+ * PARAMETERS:
+ * struct nand_chip *nand, u32 u4RowAddr
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_ready_for_write(
+ struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 *buf)
+{
+ bool bRet = false;
+ u32 sec_num = 1 << (nand->page_shift - 9);
+ u32 colnob=2, rownob;
+
+ if (nand->options & NAND_BUSWIDTH_16)
+ col_addr /= 2;
+
+
+ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
+ if (!mt6573_nand_reset())
+ {
+ return false;
+ }
+
+
+
+ mt6573_nand_set_mode(CNFG_OP_PRGM);
+
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
+
+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
+
+ if (full)
+ {
+ if(g_bUseAHBMode){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ DRV_WriteReg32(NFI_STRADDR_REG32, virt_to_phys(buf));
+ }
+ else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ }
+
+ if(g_bHwEcc){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+ }
+ else
+ {
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ }
+
+ mt6573_nand_set_autoformat(full);
+
+ if (full){
+ if(g_bHwEcc){
+ ECC_Encode_Start();
+ }
+ }
+
+
+ if(devinfo.pagesize == 512){
+ if(col_addr == devinfo.pagesize){ //write oob
+ SAL_NFI_Pointer_Operation(0x50);
+ }
+ else{
+ SAL_NFI_Pointer_Operation(0);
+ }
+ }
+
+
+ if (!mt6573_nand_set_command(NAND_CMD_SEQIN)){
+ goto cleanup;
+ }
+
+ if(devinfo.pagesize == 512)
+ colnob = 1;
+ else
+ colnob = 2;
+
+ rownob=devinfo.addr_cycle - colnob;
+
+ //1 FIXED ME: For Any Kind of AddrCycle
+ if (!mt6573_nand_set_address(col_addr, u4RowAddr, colnob, rownob)){
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY)){
+ goto cleanup;
+ }
+
+ bRet = true;
+cleanup:
+
+ return bRet;
+}
+
+static bool mt6573_nand_check_dececc_done(u32 u4SecNum)
+{
+ u32 timeout, dec_mask;
+ timeout = 0xffff;
+ dec_mask = (1<<u4SecNum)-1;
+ while((dec_mask != (DRV_Reg(ECC_DECDONE_REG16) & 0xFF)) && timeout>0){
+ timeout--;
+ }
+ if(timeout == 0){
+ //MSG(VERIFY, "ECC_DECDONE: timeout\n");
+ //dump_nfi();
+
+ return false;
+ }
+ return true;
+}
+
+/******************************************************************************
+ * mt6573_nand_read_page_data
+ *
+ * DESCRIPTION:
+ * Fill the page data into buffer !
+ *
+ * PARAMETERS:
+ * u8* pDataBuf, u32 u4Size
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_dma_read_data(struct mtd_info *mtd, u8 *buf, u32 length)
+{
+ int interrupt_en = g_i4Interrupt;
+ int timeout = 0xffff;
+#if 0
+ struct scatterlist sg;
+ enum dma_data_direction dir = DMA_FROM_DEVICE;
+
+ sg_init_one(&sg, buf, length);
+ dma_map_sg(&(mtd->dev), &sg, 1, dir);
+#endif
+
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ // DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(pDataBuf));
+
+ if ((unsigned int)buf % 16) // TODO: can not use AHB mode here
+ {
+ printk(KERN_INFO "Un-16-aligned address\n");
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
+ }
+ else
+ {
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
+ }
+
+ DRV_Reg16(NFI_INTR_REG16);
+ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
+
+ //dump_nfi();
+ //printk("NFI_STRADDR_REG32=%x\n", DRV_Reg32(NFI_STRADDR_REG32));
+
+ if (interrupt_en)
+ {
+ init_completion(&g_comp_AHB_Done);
+ }
+ //dmac_inv_range(pDataBuf, pDataBuf + u4Size);
+ mb();
+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
+
+ if (interrupt_en)
+ {
+ if (!wait_for_completion_timeout(&g_comp_AHB_Done, 10))
+ {
+ MSG(READ, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__, __LINE__);
+ printk( "[%s] wait for completion timeout happened\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ while ( (length >> 9) > ((DRV_Reg16(NFI_BYTELEN_REG16) & 0xf000) >> 12) )
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] poll BYTELEN error\n", __FUNCTION__);
+ return false; //4 // AHB Mode Time Out!
+ }
+ }
+ }
+ else
+ {
+ while (!DRV_Reg16(NFI_INTR_REG16))
+ {
+ timeout--;
+
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] poll nfi_intr error\n", __FUNCTION__);
+ dump_nfi();
+ return false; //4 // AHB Mode Time Out!
+ }
+ }
+ while ( (length >> 9) > ((DRV_Reg16(NFI_BYTELEN_REG16) & 0xf000) >> 12) )
+ {
+ timeout--;
+
+ if (0 == timeout)
+ {
+ printk( "[%s] poll BYTELEN error\n", __FUNCTION__);
+ dump_nfi();
+ return false; //4 // AHB Mode Time Out!
+ }
+ }
+ }
+
+#if 0
+ dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
+#endif
+
+ return true;
+}
+
+static bool mt6573_nand_mcu_read_data(u8 *buf, u32 length)
+{
+ int timeout = 0xffff;
+ u32 i, sec_num, sec_idx, temp;
+ u32* buf32 = (u32 *)buf;
+ int oob_per_sector = 16;
+#ifdef TESTTIME
+ unsigned long long time1,time2;
+ time1 = sched_clock();
+#endif
+ if ((u32)buf % 4 || length % 4 || oob_per_sector % 4)
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ else
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+
+ //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
+ mb();
+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
+
+ #ifdef NAND_ECC_TEST
+ oob_per_sector = g_spare_size;
+ #endif
+ if ((u32)buf % 4 || length % 4 || oob_per_sector % 4)
+ {
+ if(g_bAutoFMT || (length < NAND_SECTOR_SIZE)){
+ for (i = 0; (i < (length))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(PIO_BIG_ENDIAN)
+ *buf++ = (u8)(DRV_Reg32(NFI_DATAR_REG32)>>24);
+ else
+ *buf++ = (u8)DRV_Reg32(NFI_DATAR_REG32);
+
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ else{
+ sec_num = length / NAND_SECTOR_SIZE;
+ for(sec_idx = 0 ; sec_idx < sec_num ; sec_idx++)
+ {
+ timeout = 0xFFFF;
+ for (i = 0; (i < (NAND_SECTOR_SIZE))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+
+ if(PIO_BIG_ENDIAN)
+ *buf++ = (u8)(DRV_Reg32(NFI_DATAR_REG32)>>24);
+ else
+ *buf++ = (u8)DRV_Reg32(NFI_DATAR_REG32);
+
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ for (i = 0; (i < (oob_per_sector))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(PIO_BIG_ENDIAN)
+ *buf++ = (u8)(DRV_Reg32(NFI_DATAR_REG32)>>24);
+ else
+ *buf++ = (u8)DRV_Reg32(NFI_DATAR_REG32);
+
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if(g_bAutoFMT || (length < NAND_SECTOR_SIZE)){
+ for (i = 0; (i < (length >> 2))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ else{
+ sec_num = length / NAND_SECTOR_SIZE;
+ for(sec_idx = 0 ; sec_idx < sec_num ; sec_idx++)
+ {
+ timeout = 0xFFFF;
+ for (i = 0; (i < (NAND_SECTOR_SIZE >> 2))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ for (i = 0; (i < (oob_per_sector >> 2))&&(timeout > 0);)
+ {
+ //if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ temp = DRV_Reg32(NFI_DATAR_REG32);
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+
+ }
+ }
+
+ }
+#ifdef TESTTIME
+ time2 = sched_clock()-time1;
+ if(!readdatatime)
+ {
+ readdatatime=(time2);
+ }
+#endif
+ return true;
+}
+
+static bool mt6573_nand_read_page_data(struct mtd_info *mtd, u8* pDataBuf, u32 u4Size)
+{
+ if(g_bUseAHBMode)
+ return mt6573_nand_dma_read_data(mtd, pDataBuf, u4Size);
+ else
+ //return mt6573_nand_mcu_read_data(mtd, pDataBuf, u4Size);
+ return mt6573_nand_mcu_read_data(pDataBuf, u4Size);
+
+}
+
+/******************************************************************************
+ * mt6573_nand_write_page_data
+ *
+ * DESCRIPTION:
+ * Fill the page data into buffer !
+ *
+ * PARAMETERS:
+ * u8* pDataBuf, u32 u4Size
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static bool mt6573_nand_dma_write_data(struct mtd_info *mtd, u8 *pDataBuf, u32 u4Size)
+{
+ int i4Interrupt = g_i4Interrupt; //g_i4Interrupt;
+ u32 timeout = 0xFFFF;
+#if 0
+ struct scatterlist sg;
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ //u16 reg_status = 0;
+ sg_init_one(&sg, pDataBuf, u4Size);
+ dma_map_sg(&(mtd->dev), &sg, 1, dir);
+#endif
+
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ DRV_Reg16(NFI_INTR_REG16);
+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
+ // DRV_WriteReg32(NFI_STRADDR_REG32, (u32*)virt_to_phys(pDataBuf));
+
+ if ((unsigned int)pDataBuf % 16) // TODO: can not use AHB mode here
+ {
+ printk(KERN_INFO "Un-16-aligned address\n");
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
+ }
+ else
+ {
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
+ }
+
+ if (i4Interrupt)
+ {
+ init_completion(&g_comp_AHB_Done);
+ DRV_Reg16(NFI_INTR_REG16);
+ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
+ }
+ //dmac_clean_range(pDataBuf, pDataBuf + u4Size);
+ mb();
+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
+
+
+ if (i4Interrupt)
+ {
+ if (!wait_for_completion_timeout(&g_comp_AHB_Done, 10))
+ {
+ MSG(READ, "wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__, __LINE__);
+ printk("wait for completion timeout happened @ [%s]: %d\n", __FUNCTION__, __LINE__);
+ dump_nfi();
+ return false;
+ }
+ // wait_for_completion(&g_comp_AHB_Done);
+ }
+ else
+ {
+ while ( (u4Size >> 9) > ((DRV_Reg16(NFI_BYTELEN_REG16) & 0xf000) >> 12) )
+ {
+ timeout--;
+ if (0 == timeout)
+ {
+ printk( "[%s] poll BYTELEN error\n", __FUNCTION__);
+ return false; //4 // AHB Mode Time Out!
+ }
+ }
+ }
+
+#if 0
+ dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
+#endif
+
+ return true;
+}
+
+static bool mt6573_nand_mcu_write_data(struct mtd_info *mtd, const u8 *buf, u32 length)
+{
+ u32 timeout = 0xFFFF;
+ u32 i, sec_idx, sec_num;
+ u32* pBuf32, *pOOBBuf32 = NULL;
+ int oob_per_sector = 16;
+ u8* pOOBBuf = NULL;
+ u32 data_w;
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ mb();
+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
+ pBuf32 = (u32*)buf;
+ #ifdef NAND_ECC_TEST
+ oob_per_sector = g_spare_size;
+ #endif
+
+ if ((u32)buf % 4 || length % 4 || oob_per_sector % 4)
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ else
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+
+ if ((u32)buf % 4 || length % 4 || oob_per_sector % 4)
+ {
+ if(g_bAutoFMT || (length < NAND_SECTOR_SIZE)){
+ for (i = 0; (i < (length))&&(timeout > 0);)
+ {
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(PIO_BIG_ENDIAN){
+ data_w = (*buf) << 24;
+ DRV_WriteReg32(NFI_DATAW_REG32, data_w);
+ buf++;
+ }
+ else{
+ DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
+ }
+
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ else{
+ if(g_bOOB_Test){
+ pOOBBuf = local_oob_buf;
+ }
+ sec_num = length / NAND_SECTOR_SIZE;
+ for(sec_idx = 0 ; sec_idx < sec_num ; sec_idx++)
+ {
+ for (i = 0; (i < (NAND_SECTOR_SIZE))&&(timeout > 0);)
+ {
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(PIO_BIG_ENDIAN){
+ data_w = (*buf) << 24;
+ DRV_WriteReg32(NFI_DATAW_REG32, data_w);
+ buf++;
+ }
+ else{
+ DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
+ }
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ for (i = 0; (i < (oob_per_sector))&&(timeout > 0);)
+ {
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(g_bOOB_Test && pOOBBuf != NULL){
+ if(PIO_BIG_ENDIAN){
+ data_w = (*pOOBBuf) << 24;
+ DRV_WriteReg32(NFI_DATAW_REG32, data_w);
+ pOOBBuf++;
+ }
+ else{
+ DRV_WriteReg32(NFI_DATAW_REG32, *pOOBBuf++);
+ }
+ }
+ else{
+ DRV_WriteReg32(NFI_DATAW_REG32, 0xff);
+ }
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+
+ }
+ }
+ else
+ {
+ if(g_bAutoFMT || (length < NAND_SECTOR_SIZE)){
+ for (i = 0; (i < (length >> 2)) && (timeout > 0); )
+ {
+ // if (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) <= 12)
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ else{
+ if(g_bOOB_Test){
+ pOOBBuf32 = (u32*)local_oob_buf;
+ }
+ sec_num = length / NAND_SECTOR_SIZE;
+ for(sec_idx = 0 ; sec_idx < sec_num ; sec_idx++)
+ {
+ timeout = 0xFFFF;
+ for (i = 0; (i < (NAND_SECTOR_SIZE >> 2)) && (timeout > 0); )
+ {
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+
+ return false;
+ }
+ }
+ for (i = 0; (i < (oob_per_sector >> 2)) && (timeout > 0); )
+ {
+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)
+ {
+ if(g_bOOB_Test && (pOOBBuf32 != NULL)){
+ DRV_WriteReg32(NFI_DATAW_REG32, *pOOBBuf32++);
+ }
+ else{
+ DRV_WriteReg32(NFI_DATAW_REG32, 0xffffffff);
+ }
+ i++;
+ }
+ else
+ {
+ timeout--;
+ }
+ if (0 == timeout)
+ {
+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
+ dump_nfi();
+ return false;
+ }
+ }
+ }
+ }
+
+
+
+ }
+
+ return true;
+}
+
+
+static bool mt6573_nand_write_page_data(struct mtd_info *mtd, u8* buf, u32 size)
+{
+ if(g_bUseAHBMode)
+ return mt6573_nand_dma_write_data(mtd, buf, size);
+ else
+ return mt6573_nand_mcu_write_data(mtd, buf, size);
+}
+
+/******************************************************************************
+ * mt6573_nand_read_fdm_data
+ *
+ * DESCRIPTION:
+ * Read a fdm data !
+ *
+ * PARAMETERS:
+ * u8* pDataBuf, u32 u4SecNum
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_read_fdm_data(u8* pDataBuf, u32 u4SecNum)
+{
+ u32 i;
+ u32* pBuf32 = (u32*)pDataBuf;
+
+ if (pBuf32)
+ {
+ for (i = 0; i < u4SecNum; ++i)
+ {
+ *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i<<1));
+ *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i<<1));
+ //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0L_REG32 + (i<<3));
+ //*pBuf32++ = DRV_Reg32((u32)NFI_FDM0M_REG32 + (i<<3));
+ }
+ }
+}
+
+/******************************************************************************
+ * mt6573_nand_write_fdm_data
+ *
+ * DESCRIPTION:
+ * Write a fdm data !
+ *
+ * PARAMETERS:
+ * u8* pDataBuf, u32 u4SecNum
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static u8 fdm_buf[64];
+static void mt6573_nand_write_fdm_data(struct nand_chip *chip, u8* pDataBuf, u32 u4SecNum)
+{
+ u32 i, j;
+ u8 checksum = 0;
+ bool empty = true;
+ struct nand_oobfree *free_entry;
+ u32* pBuf32;
+
+ memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
+
+// printk("mt6573_nand_write_fdm_data enter\n");
+ free_entry = chip->ecc.layout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++)
+ {
+ for (j = 0; j < free_entry[i].length; j++)
+ {
+ if (pDataBuf[free_entry[i].offset + j] != 0xFF)
+ empty = false;
+ checksum ^= pDataBuf[free_entry[i].offset + j];
+ }
+ }
+
+ if (!empty)
+ {
+ fdm_buf[free_entry[i-1].offset + free_entry[i-1].length] = checksum;
+ }
+
+
+ pBuf32 = (u32*)fdm_buf;
+ for (i = 0; i < u4SecNum; ++i)
+ {
+ DRV_WriteReg32(NFI_FDM0L_REG32 + (i<<1), *pBuf32++);
+ DRV_WriteReg32(NFI_FDM0M_REG32 + (i<<1), *pBuf32++);
+ //DRV_WriteReg32((u32)NFI_FDM0L_REG32 + (i<<3), *pBuf32++);
+ //DRV_WriteReg32((u32)NFI_FDM0M_REG32 + (i<<3), *pBuf32++);
+ }
+}
+
+/******************************************************************************
+ * mt6573_nand_stop_read
+ *
+ * DESCRIPTION:
+ * Stop read operation !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_stop_read(void)
+{
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
+ mt6573_nand_reset();
+ if(g_bHwEcc){
+ ECC_Decode_End();
+ }
+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
+}
+
+/******************************************************************************
+ * mt6573_nand_stop_write
+ *
+ * DESCRIPTION:
+ * Stop write operation !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_stop_write(void)
+{
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
+ if(g_bHwEcc){
+ ECC_Encode_End();
+ }
+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
+}
+
+/******************************************************************************
+ * mt6573_nand_exec_read_page
+ *
+ * DESCRIPTION:
+ * Read a page data !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
+ * u8* pPageBuf, u8* pFDMBuf
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+int mt6573_nand_exec_read_page(
+ struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8* pPageBuf, u8* pFDMBuf)
+{
+ u8 *buf;
+ int bRet = 0;
+ struct nand_chip *nand = mtd->priv;
+ u32 u4SecNum = u4PageSize >> 9;
+#ifdef NAND_PFM
+ struct timeval pfm_time_read;
+#endif
+ PFM_BEGIN(pfm_time_read);
+
+ buf = local_buffer_16_align_r;
+
+
+ if (mt6573_nand_ready_for_read(nand, u4RowAddr, 0, true, buf))
+ {
+ if (!mt6573_nand_read_page_data(mtd, buf, u4PageSize))
+ {
+ printk("mt6573_nand_read_page_data return false\n");
+ bRet = -1;
+ }
+
+
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ printk("mt6573_nand_status_ready return false\n");
+ bRet = -1;
+ }
+
+ //dump_buf(local_buffer_16_align, u4PageSize);
+ if(g_bHwEcc){
+ if(!mt6573_nand_check_dececc_done(u4SecNum))
+ {
+ bRet = -1;//ECC Done error
+ }
+ }
+
+ mt6573_nand_read_fdm_data(pFDMBuf, u4SecNum);
+
+ if(g_bHwEcc){
+ if (!mt6573_nand_check_bch_error(mtd, buf, u4SecNum - 1, u4RowAddr))
+ {
+ bRet = -1; //ECC Done error
+ }
+ }
+ mt6573_nand_stop_read();
+ }
+ else
+ printk("mt6573 nand isn't ready for read\n");
+
+ if (buf == local_buffer_16_align_r)
+ memcpy(pPageBuf, buf, u4PageSize);
+
+
+ PFM_END_R(pfm_time_read, u4PageSize + 32);
+ return bRet;
+}
+
+/******************************************************************************
+ * mt6573_nand_exec_write_page
+ *
+ * DESCRIPTION:
+ * Write a page data !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
+ * u8* pPageBuf, u8* pFDMBuf
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+int mt6573_nand_exec_write_page(
+ struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8* pPageBuf, u8* pFDMBuf)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 u4SecNum = u4PageSize >> 9;
+ u8 *buf;
+ u8 status;
+
+ //MSG(WRITE, "mt6573_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
+#ifdef _MTK_NAND_DUMMY_DRIVER_
+ if (dummy_driver_debug)
+ {
+ unsigned long long time = sched_clock();
+ if (!((time * 123 + 59 ) % 32768))
+ {
+ printk(KERN_INFO "[NAND_DUMMY_DRIVER] Simulate write error at page: 0x%x\n", u4RowAddr);
+ return -EIO;
+ }
+ }
+#endif
+
+#ifdef NAND_PFM
+ struct timeval pfm_time_write;
+#endif
+ PFM_BEGIN(pfm_time_write);
+
+ memcpy(local_buffer_16_align_w, pPageBuf, mtd->writesize);
+ buf = local_buffer_16_align_w;
+
+
+ if (mt6573_nand_ready_for_write(chip, u4RowAddr, 0, true, buf))
+ {
+
+ mt6573_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
+
+ (void)mt6573_nand_write_page_data(mtd, buf, u4PageSize);
+ (void)mt6573_nand_check_RW_count(u4PageSize);
+ mt6573_nand_stop_write();
+ (void)mt6573_nand_set_command(NAND_CMD_PAGEPROG);
+ while(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+
+
+ }
+ else
+ printk("mt6573 nand isn't ready for write\n");
+
+ PFM_END_W(pfm_time_write, u4PageSize + 32);
+
+ status = chip->waitfunc(mtd, chip);
+
+ if (status & NAND_STATUS_FAIL){
+ return -EIO;
+ }
+ else
+ return 0;
+}
+
+#ifdef BLANK_PAGE_FIXUP
+#define NAND_MAX_OOBSIZE 256
+static int mt6573_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len);
+static int mt6573_nand_erase(struct mtd_info *mtd, int page);
+static int check_ecc_0(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u8 local_oob[NAND_MAX_OOBSIZE];
+
+ // for 4 bits ecc protection, the all 0xff is 1b 98 20 26 ff fc 6e 87
+ if (chip->ecc.layout->eccbytes == 32)
+ {
+ if (mt6573_nand_read_oob_raw(mtd, local_oob, page, mtd->oobsize) == 0)
+ {
+ int i;
+#if 0
+ printk("check_ecc_0 %x\n", page);
+ for (i = 0; i < 64; i++)
+ {
+ printk("%02X ", local_oob[i]);
+ if (((i+1) & 0x1f) == 0x0)
+ printk("\n");
+ }
+#endif
+ for (i = 0; i < 64; i++)
+ {
+ switch (i & 0xf)
+ {
+ case 8:
+ if (local_oob[i] != 0x1b)
+ return 0;
+ else
+ break;
+ case 9:
+ if (local_oob[i] != 0x98)
+ return 0;
+ else
+ break;
+ case 10:
+ if (local_oob[i] != 0x20)
+ return 0;
+ else
+ break;
+ case 11:
+ if (local_oob[i] != 0x26)
+ return 0;
+ else
+ break;
+ case 12:
+ if (local_oob[i] != 0xff)
+ return 0;
+ else
+ break;
+ case 13:
+ if (local_oob[i] != 0xfc)
+ return 0;
+ else
+ break;
+ case 14:
+ if (local_oob[i] != 0x6e)
+ return 0;
+ else
+ break;
+ case 15:
+ if (local_oob[i] != 0x87)
+ return 0;
+ else
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ printk("Not support FIX_ECC_0 now\n");
+ return 0;
+ }
+ //printk("clean page with ECC at %x\n", page);
+ return 1;
+}
+
+static void fix_ecc_0(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u8 *block_buf = NULL;
+ u8 *data_buf = NULL;
+ u8 *oob_buf = NULL;
+ int i = 0, j = 0, data_is_empty = 1, oob_is_empty = 1, status = 0;
+ int page_per_block_shift = chip->phys_erase_shift - chip->page_shift;
+ int page_per_block = 1 << page_per_block_shift;
+ int start_page = (page >> page_per_block_shift) << page_per_block_shift;
+
+ //printk("fix ecc 0 in page %x\n", page);
+ block_buf = (unsigned char *) kmalloc(((mtd->writesize + mtd->oobsize) * page_per_block), GFP_KERNEL);
+ if(!block_buf)
+ {
+ printk("%s:can not allocate buffer\n", __func__);
+ return;
+ }
+ memset(block_buf, 0xff, ((mtd->writesize + mtd->oobsize) * page_per_block));
+
+ // read all pages in the block
+ for(i = 0; i < page_per_block; i++)
+ {
+ data_buf = (block_buf + (i * (mtd->writesize + mtd->oobsize)));
+ oob_buf = (block_buf + (i * (mtd->writesize + mtd->oobsize)) + mtd->writesize);
+ status = mt6573_nand_exec_read_page(mtd, (start_page + i), mtd->writesize, data_buf, oob_buf);
+ if (0 != status)
+ {
+ printk("%s: fix_ecc_0 0x%x read data error\n", __func__, (start_page + i));
+ kfree(block_buf);
+ return;
+ }
+ }
+
+ data_buf = (block_buf + ((page - start_page) * (mtd->writesize + mtd->oobsize)));
+ for (i = 0; i < mtd->writesize; i++)
+ {
+ if (data_buf[i] != 0xFF)
+ {
+ data_is_empty = 0;
+ break;
+ }
+ }
+
+ if (!data_is_empty)
+ {
+ printk("%s: fix_ecc_0 0x%x isn't real blank page\n", __func__, page);
+ kfree(block_buf);
+ return;
+ }
+
+ // erase the block
+ status = mt6573_nand_erase(mtd, start_page);
+ if (0 != status)
+ {
+ printk("%s: fix_ecc_0 0x%x erase error\n", __func__, start_page);
+ kfree(block_buf);
+ return;
+ }
+
+ // program, skip all 0xff pages
+ for(i = 0; i < page_per_block; i++)
+ {
+ data_is_empty = 1;
+ data_buf = (block_buf + (i * (mtd->writesize + mtd->oobsize)));
+ oob_buf = (block_buf + (i * (mtd->writesize + mtd->oobsize)) + mtd->writesize);
+ for (j = 0; j < mtd->writesize; j++)
+ {
+ if (data_buf[j] != 0xFF)
+ {
+ data_is_empty = 0;
+ break;
+ }
+ }
+
+ if (0 != data_is_empty)
+ {
+ oob_is_empty = 1;
+ for (j = 0; j < mtd->oobsize; j++)
+ {
+ if (oob_buf[j] != 0xFF)
+ {
+ oob_is_empty = 0;
+ break;
+ }
+ }
+ }
+
+ if (!data_is_empty || !oob_is_empty)
+ {
+ // write page
+ status = mt6573_nand_exec_write_page(mtd, (start_page + i), mtd->writesize, data_buf, oob_buf);
+ if (0 != status)
+ {
+ printk("%s: fix_ecc_0 0x%x write error \n", __func__, (start_page + i));
+ kfree(block_buf);
+ return;
+ }
+#if 0
+ {
+ printk("fixed page %x\n", start_page + i);
+ printk("data_buf %x\n", mtd->writesize);
+ for (j = 0; j < mtd->writesize; j++)
+ {
+ printk("%02X ", data_buf[j]);
+ if (((j+1) & 0x1f) == 0x0)
+ printk("\n");
+ }
+
+ printk("oob_buf %x\n", mtd->oobsize);
+ for (j = 0; j < mtd->oobsize; j++)
+ {
+ printk("%02X ", oob_buf[j]);
+ if (((j+1) & 0x1f) == 0x0)
+ printk("\n");
+ }
+ }
+#endif
+ }
+ }
+
+ kfree(block_buf);
+ return;
+}
+#endif
+
+/******************************************************************************
+ *
+ * Write a page to a logical address
+ *
+ *****************************************************************************/
+ static int mt6573_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+#if 0
+static int mt6573_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int cached, int raw)
+#endif
+{
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int block = page / page_per_block;
+ u16 page_in_block = page % page_per_block;
+ u16 phy_block_bbt;
+ int mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ //printk("mt6573_nand_write_page enter: page addr 0x%x\n", page_in_block + mapped_block * page_per_block);
+ #ifdef TCSUPPORT_NAND_BMT
+ // write bad index into oob
+ #if 0
+ if (mapped_block != block)
+ {
+ set_bad_index_to_oob(chip->oob_poi, block);
+ }
+ else
+ {
+ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
+ }
+ #endif
+ if(block_is_in_bmt_region(mapped_block))
+ {
+ memcpy(chip->oob_poi + OOB_INDEX_OFFSET, &phy_block_bbt, OOB_INDEX_SIZE);
+ }
+ #endif
+
+#ifdef BLANK_PAGE_FIXUP
+ if (check_ecc_0(mtd, (page_in_block + (mapped_block * page_per_block))))
+ {
+ fix_ecc_0(mtd, (page_in_block + (mapped_block * page_per_block)));
+ }
+#endif
+
+ if (mt6573_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi))
+ {
+ MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
+ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
+ UPDATE_WRITE_FAIL, (u8 *)buf, chip->oob_poi))
+ {
+ MSG(INIT, "Update BMT success\n");
+ return 0;
+ }
+ else
+ {
+ MSG(INIT, "Update BMT fail\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+
+//-------------------------------------------------------------------------------
+/*
+static void mt6573_nand_command_sp(
+ struct mtd_info *mtd, unsigned int command, int column, int page_addr)
+{
+ g_u4ColAddr = column;
+ g_u4RowAddr = page_addr;
+
+ switch(command)
+ {
+ case NAND_CMD_STATUS:
+ break;
+
+ case NAND_CMD_READID:
+ break;
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_RNDOUT:
+ case NAND_CMD_RNDOUTSTART:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_STATUS_MULTI:
+ default:
+ break;
+ }
+
+}
+*/
+#ifdef TCSUPPORT_NAND_BMT
+static int mt6573_nand_erase_mapping_page(struct mtd_info *mtd, int page)
+{
+ // get mapping
+ struct nand_chip *chip = mtd->priv;
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int page_in_block = page % page_per_block;
+ int block = page / page_per_block;
+ u16 phy_block_bbt;
+
+ int mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ return (page_in_block + page_per_block * mapped_block);
+}
+
+
+static int mt6573_nand_erase_status(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (update_bmt( page << chip->page_shift,
+ UPDATE_ERASE_FAIL, NULL, NULL))
+ {
+ MSG(INIT, "Erase fail at block page addr: 0x%x, update BMT success\n", page);
+ return 0;
+ }
+ else
+ {
+ MSG(INIT, "Erase fail at block page addr: 0x%x, update BMT fail\n", page);
+ return NAND_STATUS_FAIL;
+ }
+
+
+ return 0;
+}
+#endif
+
+/******************************************************************************
+ * mt6573_nand_command_bp
+ *
+ * DESCRIPTION:
+ * Handle the commands from MTD !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, unsigned int command, int column, int page_addr
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_command_bp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip* nand = mtd->priv;
+#ifdef NAND_PFM
+ struct timeval pfm_time_erase;
+#endif
+ int timeout;
+ int page_address = page_addr;
+
+
+ //printk("mt6573_nand_command_bp:0x%x, 0x%x, 0x%x\n", command, column, page_addr);
+ switch (command)
+ {
+ case NAND_CMD_SEQIN:
+ /* Reset g_kCMD */
+ //if (g_kCMD.u4RowAddr != page_addr) {
+ memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
+ g_kCMD.pDataBuf = NULL;
+ //}
+ g_kCMD.u4RowAddr = page_addr;
+ g_kCMD.u4ColAddr = column;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[0]))
+ {
+ u8* pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
+ mt6573_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
+ g_kCMD.u4RowAddr = (u32)-1;
+ g_kCMD.u4OOBRowAddr = (u32)-1;
+ }
+ break;
+
+ case NAND_CMD_READOOB:
+ g_kCMD.u4RowAddr = page_addr;
+ g_kCMD.u4ColAddr = column + mtd->writesize;
+ #ifdef NAND_PFM
+ g_kCMD.pureReadOOB = 1;
+ g_kCMD.pureReadOOBNum += 1;
+ #endif
+ break;
+
+ case NAND_CMD_READ0:
+ g_kCMD.u4RowAddr = page_addr;
+ g_kCMD.u4ColAddr = column;
+ #ifdef NAND_PFM
+ g_kCMD.pureReadOOB = 0;
+ #endif
+ break;
+
+ case NAND_CMD_ERASE1:
+ PFM_BEGIN(pfm_time_erase);
+ #ifdef TCSUPPORT_NAND_BMT
+ page_address = mt6573_nand_erase_mapping_page(mtd, page_addr);
+ #endif
+ (void)mt6573_nand_reset();
+ mt6573_nand_set_mode(CNFG_OP_ERASE);
+ (void)mt6573_nand_set_command(NAND_CMD_ERASE1);
+
+ if(devinfo.pagesize == 512){
+ (void)mt6573_nand_set_address(0,page_address,0,devinfo.addr_cycle-1);
+ }
+ else{
+ (void)mt6573_nand_set_address(0,page_address,0,devinfo.addr_cycle-2);
+ }
+ #ifdef TCSUPPORT_NAND_BMT
+ g_kCMD.u4RowAddr = page_address;
+ #endif
+ break;
+
+ case NAND_CMD_ERASE2:
+ (void)mt6573_nand_set_command(NAND_CMD_ERASE2);
+ while(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+ PFM_END_E(pfm_time_erase);
+ #ifdef TCSUPPORT_NAND_BMT
+ g_bReadEraseStatus = true;
+ #endif
+ break;
+
+ case NAND_CMD_STATUS:
+ (void)mt6573_nand_reset();
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
+ mt6573_nand_set_mode(CNFG_OP_SRD);
+ mt6573_nand_set_mode(CNFG_READ_EN);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ (void)mt6573_nand_set_command(NAND_CMD_STATUS);
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
+ mb();
+ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD|(1 << CON_NFI_NOB_SHIFT));
+ g_bcmdstatus = true;
+ break;
+
+ case NAND_CMD_RESET:
+ (void)mt6573_nand_reset();
+ //mt6573_nand_exec_reset_device();
+ break;
+
+ case NAND_CMD_READID:
+ mt6573_nand_reset();
+ /* Disable HW ECC */
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+
+ #if 1
+ /* Reset NFI state machine */
+ mt6573_nand_reset();
+
+ /* Issue NAND chip reset command for Micron's MCP */
+ NFI_ISSUE_COMMAND(NAND_CMD_RESET, 0, 0, 0, 0);
+
+ timeout = TIMEOUT_4;
+
+ while(timeout)
+ timeout--;
+ #endif
+
+
+ /* Disable 16-bit I/O */
+ //NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
+
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN|CNFG_BYTE_RW);
+ (void)mt6573_nand_reset();
+ mt6573_nand_set_mode(CNFG_OP_SRD);
+ (void)mt6573_nand_set_command(NAND_CMD_READID);
+ (void)mt6573_nand_set_address(0,0,1,0);
+ mb();
+ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
+ while(DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+
+/******************************************************************************
+ * mt6573_nand_select_chip
+ *
+ * DESCRIPTION:
+ * Select a chip !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, int chip
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+
+ if (chip == -1 && false == g_bInitDone)
+ {
+ struct nand_chip *nand = mtd->priv;
+ /* Setup PageFormat */
+ if (4096 == mtd->writesize) {
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
+ nand->cmdfunc = mt6573_nand_command_bp;
+ } else if (2048 == mtd->writesize) {
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
+ nand->cmdfunc = mt6573_nand_command_bp;
+ } else if (512 == mtd->writesize) {
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT) | PAGEFMT_512);
+ //nand->cmdfunc = mt6573_nand_command_sp;
+ nand->cmdfunc = mt6573_nand_command_bp;
+ }
+ g_bInitDone = true;
+ }
+ switch(chip)
+ {
+ case -1:
+ break;
+ case 0:
+ case 1:
+ DRV_WriteReg16(NFI_CSEL_REG16, chip);
+ break;
+ }
+}
+
+/******************************************************************************
+ * mt6573_nand_read_byte
+ *
+ * DESCRIPTION:
+ * Read a byte of data !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static uint8_t mt6573_nand_read_byte(struct mtd_info *mtd)
+{
+#if 0
+ //while(0 == FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)));
+ /* Check the PIO bit is ready or not */
+ u32 timeout = TIMEOUT_4;
+ uint8_t retval = 0;
+ WAIT_NFI_PIO_READY(timeout);
+
+ retval = DRV_Reg8(NFI_DATAR_REG32);
+ MSG(INIT, "mt6573_nand_read_byte (0x%x)\n", retval);
+
+ if(g_bcmdstatus)
+ {
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ g_bcmdstatus = false;
+ }
+
+ return retval;
+#endif
+ uint8_t retval = 0;
+ int value = 0;
+
+ if (!mt6573_nand_pio_ready())
+ {
+ printk("pio ready timeout\n");
+ retval = false;
+ }
+
+ if(g_bcmdstatus)
+ {
+ if(PIO_BIG_ENDIAN){
+ value = DRV_Reg8(NFI_DATAR_REG32);
+ retval = (uint8_t)(value >> 24);
+ }
+ else{
+ retval = DRV_Reg8(NFI_DATAR_REG32);
+ }
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
+ mt6573_nand_reset();
+ if(g_bUseAHBMode){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ }
+
+ if(g_bHwEcc){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+ g_bcmdstatus = false;
+ }
+ else{
+ //printk("retval=%x\n",DRV_Reg8(NFI_DATAR_REG32));
+ if(PIO_BIG_ENDIAN){
+ value = DRV_Reg8(NFI_DATAR_REG32);
+ retval = (uint8_t)(value >> 24);
+ }
+ else{
+ retval = DRV_Reg8(NFI_DATAR_REG32);
+ }
+ }
+
+//printk("retval=%x\n",retval);
+ #ifdef TCSUPPORT_NAND_BMT
+ if(g_bReadEraseStatus){
+ //printk("mt6573_nand_read_byte:%x\n", g_kCMD.u4RowAddr);
+ if (retval & NAND_STATUS_FAIL){
+ mt6573_nand_erase_status(mtd, g_kCMD.u4RowAddr);
+ }
+ g_bReadEraseStatus = false;
+ }
+ #endif
+ return retval;
+}
+
+/******************************************************************************
+ * mt6573_nand_read_buf
+ *
+ * DESCRIPTION:
+ * Read NAND data !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, uint8_t *buf, int len
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip* nand = (struct nand_chip*)mtd->priv;
+ struct mt6573_CMD* pkCMD = &g_kCMD;
+ u32 u4ColAddr = pkCMD->u4ColAddr;
+ u32 u4PageSize = mtd->writesize;
+
+
+ if (u4ColAddr < u4PageSize)
+ {
+ if ((u4ColAddr == 0) && (len >= u4PageSize))
+ {
+ mt6573_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize,
+ buf, pkCMD->au1OOB);
+ if (len > u4PageSize)
+ {
+ u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
+ memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
+ }
+ }
+ else
+ {
+ mt6573_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize,
+ nand->buffers->databuf, pkCMD->au1OOB);
+ memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
+ }
+ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
+ }
+ else
+ {
+ u32 u4Offset = u4ColAddr - u4PageSize;
+ u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
+ if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr)
+ {
+ mt6573_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize,
+ nand->buffers->databuf, pkCMD->au1OOB);
+ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
+ }
+ memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
+ }
+ //dump_buf(buf, len);
+ pkCMD->u4ColAddr += len;
+}
+
+/******************************************************************************
+ * mt6573_nand_write_buf
+ *
+ * DESCRIPTION:
+ * Write NAND data !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, const uint8_t *buf, int len
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct mt6573_CMD* pkCMD = &g_kCMD;
+ u32 u4ColAddr = pkCMD->u4ColAddr;
+ u32 u4PageSize = mtd->writesize;
+ int i4Size, i;
+
+//printk("mt6573_nand_write_buf enter\n");
+
+ if (u4ColAddr >= u4PageSize)
+ {
+ u32 u4Offset = u4ColAddr - u4PageSize;
+ u8* pOOB = pkCMD->au1OOB + u4Offset;
+ i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
+
+ for (i = 0; i < i4Size; i++)
+ {
+ pOOB[i] &= buf[i];
+ }
+ }
+ else
+ {
+ pkCMD->pDataBuf = (u8*)buf;
+ }
+
+ pkCMD->u4ColAddr += len;
+}
+
+/******************************************************************************
+ * mt6573_nand_write_page_hwecc
+ *
+ * DESCRIPTION:
+ * Write NAND data with hardware ecc !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void mt6573_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
+{
+//printk("mt6573_nand_write_page_hwecc enter\n");
+ mt6573_nand_write_buf(mtd, buf, mtd->writesize);
+ mt6573_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+/******************************************************************************
+ * mt6573_nand_read_page_hwecc
+ *
+ * DESCRIPTION:
+ * Read NAND data with hardware ecc !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+#if 0
+ mt6573_nand_read_buf(mtd, buf, mtd->writesize);
+ mt6573_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+#else
+ struct mt6573_CMD* pkCMD = &g_kCMD;
+ u32 u4ColAddr = pkCMD->u4ColAddr;
+ u32 u4PageSize = mtd->writesize;
+ #ifdef TCSUPPORT_NAND_BMT
+ u32 u4RowAddr = pkCMD->u4RowAddr;
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int block = u4RowAddr / page_per_block;
+ u16 page_in_block = u4RowAddr % page_per_block;
+ u16 phy_block_bbt;
+ int mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ //printk("mt6573_nand_read_page_hwecc:page=%x,u4ColAddr=%x,block=%x,mapped_block=%x\n", u4RowAddr, u4ColAddr,block, mapped_block);
+ //if(mapped_block != block)
+ // printk("detected bad block:%d,mapped_block=%d\n",block, mapped_block);
+
+ if (u4ColAddr == 0)
+ {
+ mt6573_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, u4PageSize, buf, chip->oob_poi);
+ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
+ }
+ #else
+
+ if (u4ColAddr == 0)
+ {
+ mt6573_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
+ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
+ }
+ #endif
+#endif
+ return 0;
+}
+#if 0
+/******************************************************************************
+ *
+ * Read a page to a logical address
+ *
+ *****************************************************************************/
+static int mt6573_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page)
+{
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int block = page / page_per_block;
+ u16 page_in_block = page % page_per_block;
+ int mapped_block = get_mapping_block_index(block);
+
+ if (mt6573_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
+ mtd->writesize, buf, chip->oob_poi))
+ return 0;
+ /* else
+ return -EIO;*/
+ return 0;
+}
+#endif
+/******************************************************************************
+ *
+ * Erase a block at a logical address
+ *
+ *****************************************************************************/
+int mt6573_nand_erase_hw(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+
+#ifdef _MTK_NAND_DUMMY_DRIVER_
+ if (dummy_driver_debug)
+ {
+ unsigned long long time = sched_clock();
+ if (!((time * 123 + 59 ) % 1024))
+ {
+ printk(KERN_INFO "[NAND_DUMMY_DRIVER] Simulate erase error at page: 0x%x\n", page);
+ return NAND_STATUS_FAIL;
+ }
+ }
+#endif
+
+ chip->erase(mtd, page);
+
+ return chip->waitfunc(mtd, chip);
+}
+
+static int mt6573_nand_erase(struct mtd_info *mtd, int page)
+{
+ // get mapping
+ struct nand_chip *chip = mtd->priv;
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int page_in_block = page % page_per_block;
+ int block = page / page_per_block;
+ u16 phy_block_bbt;
+
+ int mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ int status = mt6573_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
+
+ if (status & NAND_STATUS_FAIL)
+ {
+ if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
+ UPDATE_ERASE_FAIL, NULL, NULL))
+ {
+ MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
+ return 0;
+ }
+ else
+ {
+ MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
+ return NAND_STATUS_FAIL;
+ }
+ }
+
+ return 0;
+}
+
+
+
+/******************************************************************************
+ * mt6573_nand_read_multi_page_cache
+ *
+ * description:
+ * read multi page data using cache read
+ *
+ * parameters:
+ * struct mtd_info *mtd, struct nand_chip *chip, int page, struct mtd_oob_ops *ops
+ *
+ * returns:
+ * none
+ *
+ * notes:
+ * only available for nand flash support cache read.
+ * read main data only.
+ *
+ *****************************************************************************/
+#if 0
+static int mt6573_nand_read_multi_page_cache(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, struct mtd_oob_ops *ops)
+{
+ int res = -EIO;
+ int len = ops->len;
+ struct mtd_ecc_stats stat = mtd->ecc_stats;
+ uint8_t *buf = ops->datbuf;
+
+ if (!mt6573_nand_ready_for_read(chip, page, 0, true, buf))
+ return -EIO;
+
+ while (len > 0)
+ {
+ mt6573_nand_set_mode(CNFG_OP_CUST);
+ DRV_WriteReg16(NFI_CON_REG16, 8 << CON_NFI_SEC_SHIFT);
+
+ if (len > mtd->writesize) // remained more than one page
+ {
+ if (!mt6573_nand_set_command(0x31)) // todo: add cache read command
+ goto ret;
+ }
+ else
+ {
+ if (!mt6573_nand_set_command(0x3f)) // last page remained
+ goto ret;
+ }
+
+ mt6573_nand_status_ready(STA_NAND_BUSY);
+
+#ifdef USE_AHB_MODE
+ //if (!mt6573_nand_dma_read_data(buf, mtd->writesize))
+ if (!mt6573_nand_read_page_data(mtd, buf, mtd->writesize))
+ goto ret;
+#else
+ if (!mt6573_nand_mcu_read_data(buf, mtd->writesize))
+ goto ret;
+#endif
+
+ // get ecc error info
+ mt6573_nand_check_bch_error(mtd, buf, 3, page);
+ ECC_Decode_End();
+
+ page++;
+ len -= mtd->writesize;
+ buf += mtd->writesize;
+ ops->retlen += mtd->writesize;
+
+ if (len > 0)
+ {
+ ECC_Decode_Start();
+ mt6573_nand_reset();
+ }
+
+ }
+
+ res = 0;
+
+ret:
+ mt6573_nand_stop_read();
+
+ if (res)
+ return res;
+
+ if (mtd->ecc_stats.failed > stat.failed)
+ {
+ printk(KERN_INFO "ecc fail happened\n");
+ return -EBADMSG;
+ }
+
+ return mtd->ecc_stats.corrected - stat.corrected ? -EUCLEAN: 0;
+}
+#endif
+
+/******************************************************************************
+ * mt6573_nand_read_oob_raw
+ *
+ * DESCRIPTION:
+ * Read oob data
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, const uint8_t *buf, int addr, int len
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * this function read raw oob data out of flash, so need to re-organise
+ * data format before using.
+ * len should be times of 8, call this after nand_get_device.
+ * Should notice, this function read data without ECC protection.
+ *
+ *****************************************************************************/
+static int mt6573_nand_read_oob_raw(struct mtd_info *mtd, uint8_t *buf, int page_addr, int len)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ u32 col_addr = 0;
+ u32 sector = 0;
+ int res = 0;
+ u32 colnob=2, rawnob;
+ int randomread =0;
+ int read_len = 0;
+
+ if (len > 128 || len % OOB_AVAI_PER_SECTOR || !buf)
+ {
+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n",
+ __FUNCTION__, len, buf);
+ return -EINVAL;
+ }
+ if(len>16)
+ {
+ randomread=1;
+ }
+ if(!randomread||!(devinfo.advancedmode & RAMDOM_READ))
+ {
+ //Always read from here
+ while (len > 0)
+ {
+ read_len = min(len, OOB_PER_SECTOR);
+ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + OOB_PER_SECTOR); // TODO: Fix this hard-code 16
+
+ if (!mt6573_nand_ready_for_read(chip, page_addr, col_addr, false, NULL))
+ {
+ printk("mt6573_nand_ready_for_read return failed\n");
+ res = -EIO;
+ goto error;
+ }
+ if (!mt6573_nand_mcu_read_data(buf + OOB_PER_SECTOR * sector, read_len)) // TODO: and this 8
+ {
+ printk("mt6573_nand_mcu_read_data return failed\n");
+ res = -EIO;
+ goto error;
+ }
+ mt6573_nand_stop_read();
+ //dump_data(buf + 16 * sector,16);
+ sector++;
+ len -= read_len;
+
+ }
+ }
+ else //should be 64
+ {
+ col_addr = NAND_SECTOR_SIZE;
+ if (chip->options & NAND_BUSWIDTH_16)
+ {
+ col_addr /= 2;
+ }
+
+ if (!mt6573_nand_reset())
+ {
+ goto error;
+ }
+
+ mt6573_nand_set_mode(0x6000);
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
+ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
+
+
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+
+
+ mt6573_nand_set_autoformat(false);
+
+
+ if (!mt6573_nand_set_command(NAND_CMD_READ0))
+ {
+ goto error;
+ }
+
+ if(devinfo.pagesize == 512)
+ colnob = 1;
+ else
+ colnob = 2;
+
+ rawnob=devinfo.addr_cycle - colnob;
+
+ //1 FIXED ME: For Any Kind of AddrCycle
+ if (!mt6573_nand_set_address(col_addr, page_addr, colnob, rawnob))
+ {
+ goto error;
+ }
+
+ if (!mt6573_nand_set_command(NAND_CMD_READSTART))
+ {
+ goto error;
+ }
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ goto error;
+ }
+
+
+ read_len = min(len, OOB_PER_SECTOR);
+ if (!mt6573_nand_mcu_read_data(buf + OOB_PER_SECTOR * sector, read_len)) // TODO: and this 8
+ {
+ printk(KERN_WARNING "mt6573_nand_mcu_read_data return failed first 16\n");
+ res = -EIO;
+ goto error;
+ }
+ sector++;
+ len -= read_len;
+ mt6573_nand_stop_read();
+ while(len>0)
+ {
+ read_len = min(len, 16);
+ if (!mt6573_nand_set_command(0x05))
+ {
+ goto error;
+ }
+
+ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + 16);
+ if (chip->options & NAND_BUSWIDTH_16)
+ {
+ col_addr /= 2;
+ }
+ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
+ DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
+ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
+
+ if( !mt6573_nand_status_ready(STA_ADDR_STATE))
+ {
+ goto error;
+ }
+
+ if (!mt6573_nand_set_command(0xE0))
+ {
+ goto error;
+ }
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ goto error;
+ }
+ if (!mt6573_nand_mcu_read_data(buf + OOB_PER_SECTOR * sector, read_len)) // TODO: and this 8
+ {
+ printk(KERN_WARNING "mt6573_nand_mcu_read_data return failed first 16\n");
+ res = -EIO;
+ goto error;
+ }
+ mt6573_nand_stop_read();
+ sector++;
+ len -= read_len;
+ }
+ //dump_data(&testbuf[16],16);
+ //printk(KERN_ERR "\n");
+ }
+error:
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
+ return res;
+}
+
+static int mt6573_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t *buf, int page_addr, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ // int i;
+ u32 col_addr = 0;
+ u32 sector = 0;
+ // int res = 0;
+ // u32 colnob=2, rawnob=devinfo.addr_cycle-2;
+ // int randomread =0;
+ int write_len = 0;
+ int status;
+
+#if defined(CONFIG_JFFS2_FS) && defined(TCSUPPORT_NAND_FLASH)
+ return 0;
+#endif
+//printk("mt6573_nand_write_oob_raw enter\n");
+ if (len > 128 || len % OOB_AVAI_PER_SECTOR || !buf)
+ {
+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n",
+ __FUNCTION__, len, buf);
+ return -EINVAL;
+ }
+
+ while (len > 0)
+ {
+ write_len = min(len, OOB_PER_SECTOR);
+ col_addr = sector * (NAND_SECTOR_SIZE + OOB_PER_SECTOR) + NAND_SECTOR_SIZE;
+ if (!mt6573_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
+ {
+ return -EIO;
+ }
+
+ if (!mt6573_nand_mcu_write_data(mtd, buf + sector * OOB_PER_SECTOR, write_len))
+ {
+ return -EIO;
+ }
+
+ (void)mt6573_nand_check_RW_count(write_len);
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
+ (void)mt6573_nand_set_command(NAND_CMD_PAGEPROG);
+
+ while(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ {
+ printk(KERN_INFO "status: %d\n", status);
+ return -EIO;
+ }
+
+ len -= write_len;
+ sector++;
+ }
+
+ return 0;
+}
+
+static int mt6573_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ // u8 *buf = chip->oob_poi;
+ int i, iter;
+
+ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
+//printk("mt6573_nand_write_oob_hw enter\n");
+ // copy ecc data
+ if(mtd->oobsize > 16){
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++)
+ {
+ iter = (i / OOB_AVAI_PER_SECTOR) * OOB_PER_SECTOR + OOB_AVAI_PER_SECTOR + i % OOB_AVAI_PER_SECTOR;
+ local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
+ // chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
+ }
+
+ // copy FDM data
+ for (i = 0; i < (chip->ecc.layout->eccbytes / OOB_AVAI_PER_SECTOR); i++)
+ {
+ memcpy(&local_oob_buf[i * OOB_PER_SECTOR], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
+ }
+ }
+
+ return mt6573_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
+}
+
+static int mt6573_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int block = page / page_per_block;
+ u16 page_in_block = page % page_per_block;
+ u16 phy_block_bbt;
+ int mapped_block = get_mapping_block_index(block,&phy_block_bbt);
+ int i, iter;
+
+
+ if(g_bOOB_Test){
+ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
+ if(mtd->oobsize > 16){
+ // copy ecc data
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++)
+ {
+ iter = (i / OOB_AVAI_PER_SECTOR) * OOB_PER_SECTOR + OOB_AVAI_PER_SECTOR + i % OOB_AVAI_PER_SECTOR;
+ local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
+ // chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
+ }
+
+ // copy FDM data
+ for (i = 0; i < (chip->ecc.layout->eccbytes / OOB_AVAI_PER_SECTOR); i++)
+ {
+ memcpy(&local_oob_buf[i * OOB_PER_SECTOR], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
+ }
+
+ }
+
+ return 0;
+ }
+
+ #ifdef TCSUPPORT_NAND_BMT
+ // write bad index into oob
+ #if 0
+ if (mapped_block != block)
+ {
+ set_bad_index_to_oob(chip->oob_poi, block);
+ }
+ else
+ {
+ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
+ }
+ #endif
+ if(block_is_in_bmt_region(mapped_block))
+ {
+ memcpy(chip->oob_poi + OOB_INDEX_OFFSET, &phy_block_bbt, OOB_INDEX_SIZE);
+ }
+ #endif
+
+ if (mt6573_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */))
+ {
+ MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
+ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
+ UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
+ {
+ MSG(INIT, "Update BMT success\n");
+ return 0;
+ }
+ else
+ {
+ MSG(INIT, "Update BMT fail\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int mt6573_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset, u32 bmt_block)
+{
+ struct nand_chip *chip = mtd->priv;
+ int block = (int)offset >> chip->phys_erase_shift;
+ int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
+ int ret;
+ unsigned int bad_flag_offset = 0;
+
+ u8 buf[8];
+ memset(buf, 0xFF, 8);
+ if(bmt_block)
+ bad_flag_offset = BMT_BAD_BLOCK_INDEX_OFFSET;
+ else{
+ if(chip->page_shift == 9) //512B page size
+ bad_flag_offset = 6;
+ else
+ bad_flag_offset = 0;
+ }
+ buf[bad_flag_offset] = 0;
+
+ ret = mt6573_nand_write_oob_raw(mtd, buf, page, 8);
+ return ret;
+}
+
+static int mt6573_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
+{
+ struct nand_chip *chip = mtd->priv;
+ int block = (int)offset >> chip->phys_erase_shift;
+ int mapped_block;
+ int ret;
+ u16 phy_block_bbt;
+
+ nand_get_device(mtd, FL_WRITING);
+
+ mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+ ret = mt6573_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift, 0);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+int mt6573_nand_read_oob_hw(struct mtd_info *mtd,struct nand_chip *chip, int page)
+{
+ int i;
+ u8 iter = 0;
+#ifdef TESTTIME
+ unsigned long long time1,time2;
+
+ time1 = sched_clock();
+#endif
+
+ if (mt6573_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize))
+ {
+ printk( "[%s]mt6573_nand_read_oob_raw return failed\n", __FUNCTION__);
+ return -EIO;
+ }
+ //dump_buf(chip->oob_poi, mtd->oobsize);
+#ifdef TESTTIME
+ time2= sched_clock()-time1;
+ if(!readoobflag)
+ {
+ readoobflag=1;
+ printk(KERN_ERR "[%s] time is %llu",__FUNCTION__,time2);
+ }
+#endif
+ if(mtd->oobsize== 16) //Page size is 512 bytes
+ return 0;
+
+ // adjust to ecc physical layout to memory layout
+ /*********************************************************/
+ /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
+ /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
+ /*********************************************************/
+
+
+ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
+
+ // copy ecc data
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++)
+ {
+ iter = (i / OOB_AVAI_PER_SECTOR) * OOB_PER_SECTOR + OOB_AVAI_PER_SECTOR + i % OOB_AVAI_PER_SECTOR;
+ chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
+ }
+
+ // copy FDM data
+ for (i = 0; i < (chip->ecc.layout->eccbytes / OOB_AVAI_PER_SECTOR); i++)
+ {
+ memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * OOB_PER_SECTOR], OOB_AVAI_PER_SECTOR);
+ }
+
+ return 0;
+}
+
+static int mt6573_nand_read_oob(struct mtd_info *mtd,struct nand_chip *chip, int page, int sndcmd)
+{
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int block = page / page_per_block;
+ u16 page_in_block = page % page_per_block;
+ u16 phy_block_bbt;
+ int mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ mt6573_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
+
+ return 0; // the return value is sndcmd
+}
+
+
+int mt6573_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs, u32 bmt_block)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int page_addr = (int)(ofs >> chip->page_shift);
+ unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ unsigned int bad_flag_offset = 0;
+ unsigned char oob_buf[8];
+
+ page_addr &= ~(page_per_block - 1);
+
+ if (mt6573_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf)))
+ {
+ printk(KERN_WARNING "mt6573_nand_read_oob_raw return error\n");
+ return 1;
+ }
+
+ if(bmt_block)
+ bad_flag_offset = BMT_BAD_BLOCK_INDEX_OFFSET;
+ else{
+ if(chip->page_shift == 9) //512B page size
+ bad_flag_offset = 6;
+ else
+ bad_flag_offset = 0;
+ }
+
+
+ if( oob_buf[bad_flag_offset] != 0xff){
+ printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[%d] is 0x%x\n", page_addr,bad_flag_offset, oob_buf[bad_flag_offset]);
+ // dump_nfi();
+ return 1;
+ }
+
+ return 0; // everything is OK, good block
+}
+
+
+static int mt6573_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ int chipnr = 0;
+
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int block = (int)ofs >> chip->phys_erase_shift;
+ int mapped_block;
+ u16 phy_block_bbt;
+
+ int ret;
+
+ if (getchip) {
+ chipnr = (int)(ofs >> chip->chip_shift);
+ nand_get_device(mtd, FL_READING);
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+ }
+
+ mapped_block = get_mapping_block_index(block, &phy_block_bbt);
+
+ ret = mt6573_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift, 0);
+
+ #ifdef TCSUPPORT_NAND_BMT
+ if (ret)
+ {
+ //MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
+ if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL))
+ {
+ //MSG(INIT, "Update BMT success\n");
+ ret = 0;
+ }
+ else
+ {
+ //MSG(INIT, "Update BMT fail\n");
+ ret = 1;
+ }
+ }
+ #endif
+
+ if (getchip)
+ {
+ nand_release_device(mtd);
+ }
+
+ return ret;
+}
+
+
+/******************************************************************************
+ * mt6573_nand_verify_buf
+ *
+ * DESCRIPTION:
+ * Verify the NAND write data is correct or not !
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, const uint8_t *buf, int len
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+
+char gacBuf[4096 + 128];
+
+static int mt6573_nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+#if 1
+ struct nand_chip* chip = (struct nand_chip*)mtd->priv;
+ struct mt6573_CMD* pkCMD = &g_kCMD;
+ u32 u4PageSize = mtd->writesize;
+ u32 *pSrc, *pDst;
+ int i;
+
+ mt6573_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
+
+ pSrc = (u32*)buf;
+ pDst = (u32*)gacBuf;
+ len = len/sizeof(u32);
+ for (i = 0; i < len; ++i)
+ {
+ if (*pSrc != *pDst)
+ {
+ MSG(VERIFY, "mt6573_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
+ return -1;
+ }
+ pSrc++;
+ pDst++;
+ }
+
+ pSrc = (u32*)chip->oob_poi;
+ pDst = (u32*)(gacBuf + u4PageSize);
+
+ if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) ||
+ (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) ||
+ (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5]))
+ // TODO: Ask Designer Why?
+ //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
+ {
+ MSG(VERIFY, "mt6573_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
+ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
+ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
+ return -1;
+ }
+ /*
+ for (i = 0; i < len; ++i) {
+ if (*pSrc != *pDst) {
+ printk(KERN_ERR"mt6573_nand_verify_buf oob fail at page %d\n", g_kCMD.u4RowAddr);
+ return -1;
+ }
+ pSrc++;
+ pDst++;
+ }
+ */
+ //printk(KERN_INFO"mt6573_nand_verify_buf OK at page %d\n", g_kCMD.u4RowAddr);
+
+ return 0;
+#else
+ return 0;
+#endif
+}
+#endif
+
+/******************************************************************************
+ * mt6573_nand_init_hw
+ *
+ * DESCRIPTION:
+ * Initial NAND device hardware component !
+ *
+ * PARAMETERS:
+ * struct mt6573_nand_host *host (Initial setting data)
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_init_hw(struct mt6573_nand_host *host)
+{
+ struct mt6573_nand_host_hw *hw = host->hw;
+
+
+ MSG(INIT, "Enable NFI Clock\n");
+ nand_enable_clock();
+
+ g_bInitDone = false;
+ /* Get the HW_VER */
+ //g_u4ChipVer = DRV_Reg32(CONFIG_BASE);
+ g_kCMD.u4OOBRowAddr = (u32)-1;
+
+ /* Set default NFI access timing control */
+ DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
+ DRV_WriteReg16(NFI_CNFG_REG16, 0);
+ DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
+
+ /* Reset the state machine and data FIFO, because flushing FIFO */
+ (void)mt6573_nand_reset();
+
+ /* Set the ECC engine */
+ if(hw->nand_ecc_mode == NAND_ECC_HW)
+ {
+ MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
+ if(g_bHwEcc){
+ NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+ if(ECC_Config(host->hw))
+ return -1;
+ mt6573_nand_configure_fdm(8);
+ mt6573_nand_configure_lock();
+ }
+
+ /*set pio mode as big endian, sycn as dma mode*/
+ //NFI_SET_REG32(NFI_CNFG_REG16, CNFG_PIO_BIG_ENDIAN);
+ /* Initilize interrupt. Clear interrupt, read clear. */
+ DRV_Reg16(NFI_INTR_REG16);
+
+ /* Interrupt arise when read data or program data to/from AHB is done. */
+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
+
+ return 0;
+}
+
+//-------------------------------------------------------------------------------
+static int mt6573_nand_dev_ready(struct mtd_info *mtd)
+{
+ return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+}
+
+static void mt6573_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+}
+
+
+static int mt6573_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ return 0;
+}
+
+static int mt6573_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ return 0;
+}
+
+
+
+unsigned int ra_nand_read_byte(unsigned long long from)
+{
+ unsigned char ch;
+ size_t retlen;
+ //ramtd_nand_read(ranfc_mtd, (loff_t)from, 1, &retlen, &ch);
+ host->mtd._read(&(host->mtd), (loff_t)from, 1, &retlen, &ch);
+ return ch;
+}
+
+
+unsigned int ra_nand_read_dword(unsigned long long from)
+{
+ unsigned char data[5];
+ unsigned long dword;
+ size_t retlen;
+ int ret = 0, i;
+
+ host->mtd._read(&(host->mtd), (loff_t)from, 4, &retlen, data);
+ if (ret != 0)
+ return -1;
+
+ dword = 0;
+ for (i=0; i<4; i++) {
+ dword += (unsigned long)data[i];
+ if (i<3)
+ dword <<= 8;
+ }
+ return dword;
+}
+
+#ifdef NAND_ECC_TEST
+/******************************************************************************
+ * mt6573_nand_proc_read
+ *
+ * DESCRIPTION:
+ * Read the proc file to get the interrupt scheme setting !
+ *
+ * PARAMETERS:
+ * char *page, char **start, off_t off, int count, int *eof, void *data
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_ecc_proc_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ if (off > 0)
+ {
+ return 0;
+ }
+ // return sprintf(page, "Interrupt-Scheme is %d\n", g_i4Interrupt);
+ len = sprintf(page, "hw ecc is %d bit,spare size is %d\n", g_hw_ecc_bit, g_spare_size);
+
+ return len;
+}
+
+int mt6573_nand_ecc_test(int max_errors);
+
+/******************************************************************************
+ * mt6573_nand_proc_write
+ *
+ * DESCRIPTION:
+ * Write the proc file to set the interrupt scheme !
+ *
+ * PARAMETERS:
+ * struct file* file, const char* buffer, unsigned long count, void *data
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_ecc_proc_write(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+ char buf[16];
+ int len = count, n;
+
+ if (len >= sizeof(buf))
+ {
+ len = sizeof(buf) - 1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ {
+ return -EFAULT;
+ }
+
+ buf[len] = '\0';
+
+ if(buf[0] == 'T'){
+ n = simple_strtol(buf+1, NULL, 10);
+ mt6573_nand_ecc_test(n);
+ }
+ else if(buf[0] == 'S'){
+ n = simple_strtol(buf+1, NULL, 10);
+ if(n == 16 || n == 26 || n == 27 || n == 28){
+ g_spare_size = n;
+ //ECC_Config_All_Bits(g_hw_ecc_bit);
+ }
+ else{
+ printk("spare size must be 16/26/27/28\n");
+ }
+ }
+ else{
+
+ n = simple_strtol(buf, NULL, 10);
+
+ ECC_Config_All_Bits(n);
+ }
+
+ return len;
+}
+#if 1
+
+/******************************************************************************
+ * mt6573_nand_read_oob_raw
+ *
+ * DESCRIPTION:
+ * Read oob data
+ *
+ * PARAMETERS:
+ * struct mtd_info *mtd, const uint8_t *buf, int addr, int len
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * this function read raw oob data out of flash, so need to re-organise
+ * data format before using.
+ * len should be times of 8, call this after nand_get_device.
+ * Should notice, this function read data without ECC protection.
+ *
+ *****************************************************************************/
+static int mt6573_nand_read_oob_raw_more(struct mtd_info *mtd, uint8_t *buf, int page_addr, int len)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ u32 col_addr = 0;
+ u32 sector = 0;
+ int res = 0;
+ int randomread =0;
+ int read_len = 0;
+ int oob_per_sector = g_spare_size;
+
+ if (len > 232 || len % OOB_AVAI_PER_SECTOR || !buf)
+ {
+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n",
+ __FUNCTION__, len, buf);
+ return -EINVAL;
+ }
+ if(len>16)
+ {
+ randomread=1;
+ }
+ /*TOSHIBA TC58NVG3S0F 4K Page size*/
+ /*Spare erea is 232 Bytes, oob per sector is 29=232/8*/
+ if(!randomread||!(devinfo.advancedmode & RAMDOM_READ))
+ {
+ //Always read from here
+ while (len > 0)
+ {
+ read_len = min(len, oob_per_sector);
+ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + oob_per_sector); // TODO: Fix this hard-code 16
+ //printk("mt6573_nand_read_oob_raw_more: page_addr=%d,col_addr=%d, readlen=%d\n", page_addr,col_addr, read_len);
+ if (!mt6573_nand_ready_for_read(chip, page_addr, col_addr, false, NULL))
+ {
+ printk("mt6573_nand_ready_for_read return failed\n");
+ res = -EIO;
+ goto error;
+ }
+ if (!mt6573_nand_mcu_read_data(buf + oob_per_sector * sector, read_len)) // TODO: and this 8
+ {
+ printk("mt6573_nand_mcu_read_data return failed\n");
+ res = -EIO;
+ goto error;
+ }
+ mt6573_nand_stop_read();
+ //dump_data(buf + 16 * sector,16);
+ sector++;
+ len -= read_len;
+
+ }
+ }
+
+
+error:
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
+ return res;
+}
+#if 0
+static int mt6573_nand_write_oob_raw_more(struct mtd_info *mtd, const uint8_t *buf, int page_addr, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ // int i;
+ u32 col_addr = 0;
+ u32 sector = 0;
+ // int res = 0;
+ // u32 colnob=2, rawnob=devinfo.addr_cycle-2;
+ // int randomread =0;
+ int write_len = 0;
+ int status;
+ int oob_per_sector = g_spare_size;
+
+//printk("mt6573_nand_write_oob_raw enter\n");
+ if (len > 232 || len % OOB_AVAI_PER_SECTOR || !buf)
+ {
+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n",
+ __FUNCTION__, len, buf);
+ return -EINVAL;
+ }
+
+ while (len > 0)
+ {
+ write_len = min(len, oob_per_sector);
+ col_addr = sector * (NAND_SECTOR_SIZE + oob_per_sector) + NAND_SECTOR_SIZE;
+ if (!mt6573_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
+ {
+ return -EIO;
+ }
+
+ if (!mt6573_nand_mcu_write_data(mtd, buf + sector * oob_per_sector, write_len))
+ {
+ return -EIO;
+ }
+ (void)mt6573_nand_check_RW_count(write_len);
+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
+ (void)mt6573_nand_set_command(NAND_CMD_PAGEPROG);
+
+ while(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ {
+ printk(KERN_INFO "status: %d\n", status);
+ return -EIO;
+ }
+
+ len -= write_len;
+ sector++;
+ }
+
+ return 0;
+}
+#endif
+/**********************************************************
+Description : CM_ECC_Invert_Bits
+Input :
+Output : 0
+***********************************************************/
+void CM_ECC_Invert_Bits(unsigned char* buff_ptr, unsigned int bit_pos)
+{
+ unsigned int byte_pos = 0;
+ unsigned char byte_val = 0;
+ unsigned char temp_val = 0;
+ unsigned int invert_bit = 0;
+
+ byte_pos = bit_pos >> 3;
+ invert_bit = bit_pos & ((1<<3)-1);
+ byte_val = buff_ptr[byte_pos];
+ temp_val = byte_val & (1 << invert_bit);
+
+ if(temp_val > 0)
+ byte_val &= ~temp_val;
+ else
+ byte_val |= (1 << invert_bit);
+ buff_ptr[byte_pos] = byte_val;
+
+}
+
+
+
+
+//char oob_buf[234] = {0};
+//char read_buf[4097] = {0};
+
+#define DATA_BUF_SIZE (4096+234+1)
+
+int mt6573_nand_ecc_test(int max_errors)
+{
+ int block_index, page_addr, i, oob_size, error=0;
+ struct nand_chip *chip = (struct nand_chip *)host->mtd.priv;
+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ int page_size = 1 << chip->page_shift;
+ int block_size = 1 << chip->phys_erase_shift;
+
+ int ecc_error_bit, hw_ecc_bits = g_hw_ecc_bit;
+ int result = 0, sector_num = 0;
+ int max_error_bits = g_hw_ecc_bit;
+
+ unsigned long long offset, len;
+ char *data_buffer = NULL;
+ char *read_buf = NULL;
+
+ char *size_4K_page_data = NULL;
+
+ size_4K_page_data = kmalloc(4097, GFP_KERNEL);
+ if(!size_4K_page_data){
+ printk("alloc read_buf buffer failed!\n");
+ return -1;
+ }
+ memset(size_4K_page_data,0x5a, 4097);
+ read_buf = kmalloc(4097, GFP_KERNEL);
+ if(!read_buf){
+ printk("alloc read_buf buffer failed!\n");
+ return -1;
+ }
+
+ //g_spare_size = 26;
+ if(max_errors){
+ max_error_bits = max_errors;
+ }
+
+
+ printk("page_per_block=%d, page_size=%d, block_size=%d\n", page_per_block, page_size, block_size);
+ //1.Select a random block and erase it
+ //get_random_bytes??
+ //block_index = random() % (chip->chipsize/(1 << chip->phys_erase_shift));
+ block_index = 0;
+
+ printk("block_index=%d\n", block_index);
+
+ mt6573_nand_erase(&(host->mtd), block_index*page_per_block);
+
+
+
+ //2.Write a page data with HW_ECC on
+ g_bAutoFMT = true;
+ g_bHwEcc = true;
+ g_bUseAHBMode = true;
+ g_bOOB_Test = false;
+
+ /*Write to 2nd Page*/
+ page_addr = block_index*page_per_block+1;
+ offset = (unsigned long long)(page_addr*page_size);
+
+ printk("write offset=%llu\n", offset);
+
+ host->mtd.write(&(host->mtd), (loff_t)offset, (size_t)page_size, (size_t *)&len, size_4K_page_data);
+
+
+ //3.Read the page main data and oob data
+ data_buffer = kmalloc(DATA_BUF_SIZE, GFP_KERNEL);
+ if(!data_buffer){
+ printk("alloc data buffer failed!\n");
+ return -1;
+ }
+ memset(data_buffer, 0, DATA_BUF_SIZE);
+ host->mtd.read(&(host->mtd), (loff_t)offset, (size_t)page_size, (size_t *)&len, data_buffer);
+
+ for (i=0; i<page_size; i++) {
+ if (data_buffer[i] != size_4K_page_data[i]) {
+ printk("basic write read test fail, i=%d\n", i);
+ //err++;
+
+#if 0
+ printk("*************RBuf***************\n");
+ dump_buf(data_buffer, page_size);
+ //printk("**************WBuf**************\n");
+ //dump_buf(size_4K_page_data, (int)oob_size);
+ //printk("****************************\n");
+#endif
+ result = -1;
+ goto end;
+ }
+ }
+
+ //read oob...
+ oob_size = (1 << (chip->page_shift - 9)) * g_spare_size;
+
+ mt6573_nand_read_oob_raw_more(&(host->mtd), data_buffer+page_size, page_addr, oob_size);
+
+
+ for(ecc_error_bit = 1; ecc_error_bit <= max_errors; ecc_error_bit++){
+
+ //4.Create the test data with 4~16 bit error for every sector
+ for(sector_num = 0; sector_num < (page_size/NAND_SECTOR_SIZE); sector_num++){
+ CM_ECC_Invert_Bits(data_buffer, sector_num*NAND_SECTOR_SIZE*8+ecc_error_bit*200);
+ }
+ #if 1
+ error =0;
+
+ for (i=0; i<page_size; i++) {
+ if (data_buffer[i] != size_4K_page_data[i]) {
+ error++;
+ }
+ }
+ printk("%d bit error test\n", error);
+ #endif
+
+ //5.Write the test data to the next page in PIO mode and HW_ECC is off
+ g_bAutoFMT = false;
+ g_bHwEcc = false;
+ g_bUseAHBMode = false;
+ g_bOOB_Test = true;
+
+ page_addr++;//Select the next page
+ offset = (unsigned long long)(page_addr*page_size);
+ //mt6573_nand_write_oob_raw_more(&(host->mtd), data_buffer+page_size, page_addr, oob_size);
+
+
+ memcpy(local_oob_buf, data_buffer+page_size, oob_size);
+
+ host->mtd.write(&(host->mtd), (loff_t)offset, (size_t)page_size, (size_t *)&len, data_buffer);
+
+ //6.Read the data from the next page when HW_ECC is on
+ g_bAutoFMT = true;
+ g_bHwEcc = true;
+ g_bUseAHBMode = true;
+ g_bOOB_Test = false;
+
+ memset(read_buf, 0, 4097);
+
+ host->mtd.read(&(host->mtd), (loff_t)offset, (size_t)page_size, (size_t *)&len, read_buf);
+
+ if(ecc_error_bit <= hw_ecc_bits){
+
+ //7.Check whether the error bit be corrected
+ for (i=0; i<page_size; i++) {
+ if (read_buf[i] != size_4K_page_data[i]) {
+ printk("%d bit ecc correct test fail, i=%d\n", ecc_error_bit, i);
+ //err++;
+ result = -1;
+ goto end;
+ }
+ }
+ printk("%d bit error test PASS\n", error);
+ }
+ else{
+ #if 0
+ printk("%d bit error, more than hw ecc bits,ignore\n", error);
+ uncorrect_num = 0;
+ for (i=0; i<page_size; i++) {
+ if (read_buf[i] != size_4K_page_data[i]) {
+ //printk("%d bit ecc correct test fail, i=%d\n", ecc_error_bit, i);
+ uncorrect_num++;
+ }
+ }
+ if(uncorrect_num)
+ printk("%d bit error test: uncorrect_num=%d\n", error, uncorrect_num);
+ else
+ printk("%d bit error test PASS\n", error);
+ #endif
+ }
+ printk("**********************************************\n");
+
+ }
+
+
+end:
+ if(size_4K_page_data){
+ kfree(size_4K_page_data);
+ }
+
+ if(read_buf){
+ kfree(read_buf);
+ }
+ if(data_buffer){
+ kfree(data_buffer);
+ }
+ if(result){
+ printk("NAND ECC Test Fail!!\n");
+ }
+ else{
+ printk("NAND ECC Test PASS!!\n");
+ }
+
+ return result;
+}
+
+#endif
+#endif
+
+
+/******************************************************************************
+ * mt6573_nand_proc_read
+ *
+ * DESCRIPTION:
+ * Read the proc file to get the interrupt scheme setting !
+ *
+ * PARAMETERS:
+ * char *page, char **start, off_t off, int count, int *eof, void *data
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_proc_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ int nand_mode = 0;
+ if (off > 0)
+ {
+ return 0;
+ }
+
+ nand_mode = (g_bAutoFMT << 3) | (g_bHwEcc << 2) | (g_i4Interrupt << 1) | g_bUseAHBMode;
+ // return sprintf(page, "Interrupt-Scheme is %d\n", g_i4Interrupt);
+ len = sprintf(page, "%d\n", nand_mode);
+ len += sprintf(page + len, "Name: %s, ID: 0x%x, total size: %dMiB, page size: %d B\n", devinfo.devciename, devinfo.id,
+ devinfo.totalsize, devinfo.pagesize);
+ len += sprintf(page + len, "Current Interrupt is %s, working in %s mode \nHW_ECC is %s, AUTO_FMT is %s \nOOB_Test is %s\n",
+ g_i4Interrupt ? "ON" : "OFF",
+ g_bUseAHBMode? "DMA" : "PIO", g_bHwEcc? "ON" : "OFF", g_bAutoFMT? "ON" : "OFF", g_bOOB_Test? "ON" : "OFF");
+
+
+ return len;
+}
+
+
+/******************************************************************************
+ * mt6573_nand_proc_write
+ *
+ * DESCRIPTION:
+ * Write the proc file to set the interrupt scheme !
+ *
+ * PARAMETERS:
+ * struct file* file, const char* buffer, unsigned long count, void *data
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_proc_write(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+ struct mtd_info *mtd = &host->mtd;
+ char buf[16];
+ int len = count, n;
+
+ if (len >= sizeof(buf))
+ {
+ len = sizeof(buf) - 1;
+ }
+
+ if (copy_from_user(buf, buffer, len))
+ {
+ return -EFAULT;
+ }
+
+ buf[len] = '\0';
+ if (buf[0] == 'I')
+ {
+ // sync before switching between polling and interrupt,
+ n = simple_strtol(buf+1, NULL, 10);
+
+ if ((n > 0 && !g_i4Interrupt) ||
+ (n == 0 && g_i4Interrupt))
+ {
+ nand_get_device(mtd, FL_READING);
+
+ g_i4Interrupt = n;
+
+ if (g_i4Interrupt)
+ {
+ DRV_Reg16(NFI_INTR_REG16);
+ enable_irq(MT6573_NFI_IRQ_LINE);
+ }
+ else{
+ disable_irq(MT6573_NFI_IRQ_LINE);
+ }
+
+ nand_release_device(mtd);
+ }
+ }
+
+ if (buf[0] == 'D')
+ {
+#ifdef _MTK_NAND_DUMMY_DRIVER_
+ printk(KERN_INFO "Enable dummy driver\n");
+ dummy_driver_debug = 1;
+#endif
+ }
+
+#ifdef NAND_PFM
+ if (buf[0] == 'P')
+ {
+ /* Reset values */
+ g_PFM_R = 0;
+ g_PFM_W = 0;
+ g_PFM_E = 0;
+ g_PFM_RD = 0;
+ g_PFM_WD = 0;
+ g_kCMD.pureReadOOBNum = 0;
+ }
+#endif
+
+ //AHB/PIO mode Switch
+ if(buf[0] == 'A'){
+ // sync before switching between polling and interrupt,
+ n = simple_strtol(buf+1, NULL, 10);
+
+ if ((n == 1 && !g_bUseAHBMode) ||
+ (n == 0 && g_bUseAHBMode))
+ {
+ nand_get_device(mtd, FL_READING);
+
+ if(n){
+ g_bUseAHBMode = true;
+ }
+ else{
+ g_bUseAHBMode = false;
+ }
+
+
+ nand_release_device(mtd);
+ }
+ }
+
+ //HW_ECC Switch
+ if(buf[0] == 'E'){
+ // sync before switching between polling and interrupt,
+ n = simple_strtol(buf+1, NULL, 10);
+
+ if ((n == 1 && !g_bHwEcc) ||
+ (n == 0 && g_bHwEcc))
+ {
+ nand_get_device(mtd, FL_READING);
+
+ if(n){
+ g_bHwEcc = true;
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+ else{
+ g_bHwEcc = false;
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+
+
+ nand_release_device(mtd);
+ }
+ }
+
+ //AUTO_FMT Switch
+ if(buf[0] == 'F'){
+ // sync before switching between polling and interrupt,
+ n = simple_strtol(buf+1, NULL, 10);
+
+ if ((n == 1 && !g_bAutoFMT) ||
+ (n == 0 && g_bAutoFMT))
+ {
+ nand_get_device(mtd, FL_READING);
+
+ if(n){
+ g_bAutoFMT = true;
+ mt6573_nand_set_autoformat(true);
+ }
+ else{
+ g_bAutoFMT = false;
+ mt6573_nand_set_autoformat(false);
+ }
+
+
+ nand_release_device(mtd);
+ }
+ }
+
+ //OOB Test Switch
+ if(buf[0] == 'O'){
+ // sync before switching between polling and interrupt,
+ n = simple_strtol(buf+1, NULL, 10);
+
+ if ((n == 1 && !g_bOOB_Test) ||
+ (n == 0 && g_bOOB_Test))
+ {
+ nand_get_device(mtd, FL_READING);
+
+ if(n){
+ g_bOOB_Test = true;
+ }
+ else{
+ g_bOOB_Test = false;
+ }
+
+
+ nand_release_device(mtd);
+ }
+ }
+
+ return len;
+}
+#ifdef TCSUPPORT_NAND_BMT
+int calc_bmt_pool_size(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ int chip_size = nand->chipsize;
+ int block_size = 1 << nand->phys_erase_shift;
+ int total_block = chip_size / block_size;
+ int last_block = total_block - 1;
+
+ u16 valid_block_num = 0;
+ u16 need_valid_block_num = total_block * POOL_GOOD_BLOCK_PERCENT;
+#if 0
+ printk("need_valid_block_num:%d \n", need_valid_block_num);
+ printk("total block:%d \n", total_block);
+#endif
+ for(;last_block > 0; --last_block)
+ {
+ if(mt6573_nand_block_bad_hw(mtd, last_block * block_size, BAD_BLOCK_RAW))
+ {
+ continue;
+
+ }
+ else
+ {
+ valid_block_num++;
+ if(valid_block_num == need_valid_block_num)
+ {
+ break;
+ }
+
+ }
+
+ }
+
+ return (total_block - last_block);
+
+}
+#endif
+
+static int mt6573_nand_setup(struct mt6573_nand_host_hw *hw)
+{
+ //struct mt6573_nand_host_hw *hw;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int err = 0;
+ int id;
+ u32 ext_id;
+ u8 ext_id1, ext_id2, ext_id3;
+ int num;
+
+/* OSBNB00043636: to Fix NAND DualImage , tclinux related MTD partition can't be programmed issue 20130313 Byron */
+#if defined(TCSUPPORT_DUAL_IMAGE_ENHANCE)
+ u32 erase_unit;
+#endif
+
+ /***********************************/
+ //return -1;
+ /********************************/
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct mt6573_nand_host), GFP_KERNEL);
+ if (!host)
+ {
+ MSG(INIT, "mt6573_nand: failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* for 32bytes alignment */
+ local_buffer_16_align_r = local_buffer_r + 32 - ((u32)local_buffer_r % 32);
+ /* flush cache_page */
+ dma_cache_inv(((unsigned long) local_buffer_16_align_r), 4096);
+ /* Un-cache memory */
+ local_buffer_16_align_r = ((u8 *) K0_TO_K1(local_buffer_16_align_r));
+ printk( "Allocate 16 byte aligned buffer_r: %p\n", local_buffer_16_align_r);
+
+ /* for 32bytes alignment */
+ local_buffer_16_align_w = local_buffer_w + 32 - ((u32)local_buffer_w % 32);
+ /* flush cache_page */
+ dma_cache_inv(((unsigned long) local_buffer_16_align_w), 4096);
+ /* Un-cache memory */
+ local_buffer_16_align_w = ((u8 *) K0_TO_K1(local_buffer_16_align_w));
+ printk( "Allocate 16 byte aligned buffer_w: %p\n", local_buffer_16_align_w);
+
+
+ host->hw = hw;
+
+ /* init mtd data structure */
+ nand_chip = &host->nand_chip;
+ nand_chip->priv = host; /* link the private data structures */
+
+ mtd = &host->mtd;
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+ mtd->name = "MT6573-Nand";
+
+ hw->nand_ecc_mode = NAND_ECC_HW;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = (void __iomem*)NFI_DATAR_REG32;
+ nand_chip->IO_ADDR_W = (void __iomem*)NFI_DATAW_REG32;
+ nand_chip->chip_delay = 20; /* 20us command delay time */
+ nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
+
+ nand_chip->read_byte = mt6573_nand_read_byte;
+ nand_chip->read_buf = mt6573_nand_read_buf;
+ nand_chip->write_buf = mt6573_nand_write_buf;
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ nand_chip->verify_buf = mt6573_nand_verify_buf;
+#endif
+ nand_chip->select_chip = mt6573_nand_select_chip;
+ nand_chip->dev_ready = mt6573_nand_dev_ready;
+ nand_chip->cmdfunc = mt6573_nand_command_bp;
+ nand_chip->ecc.read_page = mt6573_nand_read_page_hwecc;
+ nand_chip->ecc.write_page = mt6573_nand_write_page_hwecc;
+
+
+ nand_chip->ecc.layout = &nand_oob_64;
+ nand_chip->ecc.size = hw->nand_ecc_size; //2048
+ nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
+ //nand_chip->options = NAND_USE_FLASH_BBT;
+ nand_chip->options = NAND_SKIP_BBTSCAN;
+ //nand_chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+ /*
+ BBT_AUTO_REFRESH |
+ NAND_NO_SUBPAGE_WRITE |
+ NAND_NO_AUTOINCR;
+ */
+
+ // For BMT, we need to revise driver architecture
+ nand_chip->write_page = mt6573_nand_write_page;
+ nand_chip->ecc.write_oob = mt6573_nand_write_oob;
+ //nand_chip->read_page = mt6573_nand_read_page;
+ nand_chip->ecc.read_oob = mt6573_nand_read_oob;
+ nand_chip->block_markbad = mt6573_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
+ //nand_chip->erase = mt6573_nand_erase;
+ nand_chip->block_bad = mt6573_nand_block_bad;
+
+ nand_chip->ecc.calculate = mt6573_nand_calculate_ecc;
+ nand_chip->ecc.correct = mt6573_nand_correct_data;
+ nand_chip->ecc.hwctl = mt6573_nand_enable_hwecc;
+
+
+ err = mt6573_nand_init_hw(host);
+ if(err != 0){
+ MSG(INIT, "Init HW fail! \r\n");
+ goto out;
+ }
+ /* Select the device */
+ nand_chip->select_chip(mtd, 0);
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up
+ */
+ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+
+ /* Send the command for reading device ID */
+ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+
+
+ /* Read manufacturer and device IDs */
+ manu_id = nand_chip->read_byte(mtd);
+ dev_id = nand_chip->read_byte(mtd);
+ printk("manu_id=%x, dev_id=%x\n", manu_id , dev_id);
+
+ ext_id1 = nand_chip->read_byte(mtd);
+ ext_id2 = nand_chip->read_byte(mtd);
+ ext_id3 = nand_chip->read_byte(mtd);
+ ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
+
+ printk("ext_id=%x,\n", ext_id );
+
+ //Check NAND Info
+ // id = (dev_id<<8)|manu_id;
+ id = dev_id | (manu_id << 8);
+ if(!get_device_info(id, ext_id, &devinfo))
+ {
+ MSG(INIT, "Not Support this Device! \r\n");
+ }
+
+ if (devinfo.pagesize == 4096) {
+ nand_chip->ecc.layout = &nand_oob_128;
+ nand_chip->ecc.size = 4096;
+ nand_chip->ecc.bytes = 64;
+ } else if (devinfo.pagesize == 2048) {
+ nand_chip->ecc.layout = &nand_oob_64;
+ nand_chip->ecc.size = 2048;
+ nand_chip->ecc.bytes = 32;
+ } else if (devinfo.pagesize == 512) {
+ nand_chip->ecc.layout = &nand_oob_16;
+ nand_chip->ecc.size = 512;
+ nand_chip->ecc.bytes = 8;
+ }
+ MSG(INIT, "[NAND] pagesz:%d eccsz: %d, oobsz: %d\n",
+ nand_chip->ecc.size, nand_chip->ecc.bytes, sizeof(g_kCMD.au1OOB));
+
+ MSG(INIT, "Support this Device in MTK table! %x \r\n",id);
+ hw->nfi_bus_width = devinfo.iowidth;
+ DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
+
+ /* 16-bit bus width */
+ if (hw->nfi_bus_width == 16)
+ {
+ MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
+ nand_chip->options |= NAND_BUSWIDTH_16;
+ }
+ /* register NFI IRQ handler. */
+ err = request_irq(MT6573_NFI_IRQ_LINE, (irq_handler_t)mt6573_nand_irq_handler, 0,
+ "mt6573-nand", NULL);
+ if (0 != err)
+ {
+ MSG(INIT, "%s : Request IRQ fail: err = %d\n", MODULE_NAME, err);
+ goto out;
+ }
+
+ if (g_i4Interrupt)
+ enable_irq(MT6573_NFI_IRQ_LINE);
+ else
+ disable_irq(MT6573_NFI_IRQ_LINE);
+#if 0
+ if (devinfo.advancedmode & CACHE_READ)
+ {
+ nand_chip->ecc.read_multi_page_cache = NULL;
+ // nand_chip->ecc.read_multi_page_cache = mt6573_nand_read_multi_page_cache;
+ // MSG(INIT, "Device %x support cache read \r\n",id);
+ }
+ else
+ nand_chip->ecc.read_multi_page_cache = NULL;
+#endif
+ /* Scan to find existance of the device */
+ if (nand_scan(mtd, hw->nfi_cs_num))
+ {
+ MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
+ err = -ENXIO;
+ goto out;
+ }
+ g_page_size = mtd->writesize;
+ //platform_set_drvdata(pdev, host);
+
+ if (hw->nfi_bus_width == 16)
+ {
+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
+ }
+ nand_chip->select_chip(mtd, 0);
+
+ //printk("phys_erase_shift=%d,page_shift=%d,oob size=%d, mtd size=%x, chip size=%x, chip->numchips = %d\n",nand_chip->phys_erase_shift,nand_chip->page_shift, mtd->oobsize,
+ // mtd->size,nand_chip->chipsize, nand_chip->numchips);
+
+ //nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
+ mtd->size = nand_chip->chipsize;
+
+#ifdef TCSUPPORT_CT_PON
+ nand_flash_avalable_size = nand_chip->chipsize - MAX_BMT_SIZE * 0x20000;
+#endif
+
+ if (IS_NANDFLASH) {
+ //map->fldrv_priv = ra;
+ ranand_read_byte = ra_nand_read_byte;
+ ranand_read_dword = ra_nand_read_dword;
+ }
+
+
+#ifdef TCSUPPORT_NAND_BMT
+ bmt_pool_size = calc_bmt_pool_size(mtd);
+ /* Check bmt_pool_size */
+ if (bmt_pool_size > MAX_BMT_SIZE)
+ {
+ bmt_pool_size = MAX_BMT_SIZE;
+ }
+ printk("bmt pool size: %d \n", bmt_pool_size);
+ nand_chip->chipsize -= (bmt_pool_size) << nand_chip->phys_erase_shift;
+ if (!g_bmt)
+ {
+ if ( !(g_bmt = init_bmt(nand_chip, bmt_pool_size)) )
+ {
+ printk("Error: init bmt failed \n");
+ return -1;
+ }
+ }
+
+ if (!g_bbt)
+ {
+ if ( !(g_bbt = start_init_bbt()) )
+ {
+ printk("Error: init bbt failed \n");
+ return -1;
+ }
+ }
+
+ if(write_bbt_or_bmt_to_flash() != 0)
+ {
+ printk("Error: save bbt or bmt to nand failed \n");
+ return -1;
+ }
+
+ if(create_badblock_table_by_bbt())
+ {
+ printk("Error: create bad block table failed \n");
+ return -1;
+ }
+
+ mtd->size = nand_logic_size;
+#endif
+
+#if 1//def CONFIG_MTD_PARTITIONS
+
+
+ /* Register the partitions */
+ if (IS_SPIFLASH) {
+ num = ARRAY_SIZE(mt6573_partitions);
+ mt6573_partitions[ num-1 ].size = mtd->size;
+ add_mtd_partitions(mtd, mt6573_partitions, num);
+ }
+
+ //err = add_mtd_partitions(mtd, g_pasStatic_Partition, part_num);
+#else
+
+ err = add_mtd_device(mtd);
+
+#endif
+
+#ifdef _MTK_NAND_DUMMY_DRIVER_
+ dummy_driver_debug = 0;
+#endif
+
+ if (IS_NANDFLASH){
+#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+
+ offset = mtd->size/2;
+#if 1
+/* OSBNB00043636: to Fix NAND DualImage , tclinux related MTD partition can't be programmed issue 20130313 Byron */
+ erase_unit=devinfo.blocksize;
+ erase_unit=erase_unit<<10;
+ offset=(offset/(erase_unit)*(erase_unit));
+#else
+ offset = (offset/NAND_FLASH_BLOCK_SIZE)*NAND_FLASH_BLOCK_SIZE;
+#endif
+ printk("nandinit = offset:%x\n", offset);
+
+#endif
+ }
+
+ /* Successfully!! */
+ if (!err)
+ {
+ MSG(INIT, "[mt6573_nand] probe successfully!\n");
+ nand_disable_clock();
+ return err;
+ }
+
+ /* Fail!! */
+out:
+ MSG(INIT, "[NFI] mt6573_nand_probe fail, err = %d!\n", err);
+
+ nand_release(mtd);
+
+ //platform_set_drvdata(pdev, NULL);
+
+ kfree(host);
+
+ nand_disable_clock();
+ return err;
+}
+
+
+/*NAND Dev*/
+/*=======================================================================*/
+/* MT6573 NAND */
+/*=======================================================================*/
+
+#define NFI_base 0x1FBE0000
+#define NFIECC_base 0x1FBE1000
+static struct resource mt6573_resource_nand[] = {
+ {
+ .start = NFI_base,
+ .end = NFI_base + 0x220,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = NFIECC_base,
+ .end = NFIECC_base + 0x150,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MT6573_NFI_IRQ_LINE,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MT6573_NFIECC_IRQ_LINE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mt6573_nand_dev = {
+ .name = "mt6573-nand",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mt6573_resource_nand),
+ .resource = mt6573_resource_nand,
+ .dev = {
+ .platform_data = &mt6573_nand_hw,
+ },
+};
+
+
+int mt6573_nand_dev_register(void)
+{
+ int retval = 0;
+
+ retval = platform_device_register(&mt6573_nand_dev);
+ if (retval != 0) {
+ printk(KERN_ERR "register nand device fail\n");
+ }
+ return retval;
+
+}
+
+
+/******************************************************************************
+ * mt6573_nand_probe
+ *
+ * DESCRIPTION:
+ * register the nand device file operations !
+ *
+ * PARAMETERS:
+ * struct platform_device *pdev : device structure
+ *
+ * RETURNS:
+ * 0 : Success
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_probe(struct platform_device *pdev)
+{
+ struct mt6573_nand_host_hw *hw;
+ struct resource *res = pdev->resource;
+ int err = 0;
+
+
+
+ hw = (struct mt6573_nand_host_hw*)pdev->dev.platform_data;
+ BUG_ON(!hw);
+
+ if (pdev->num_resources != 4 ||
+ res[0].flags != IORESOURCE_MEM ||
+ res[1].flags != IORESOURCE_MEM ||
+ res[2].flags != IORESOURCE_IRQ ||
+ res[3].flags != IORESOURCE_IRQ)
+ {
+ MSG(INIT, "%s: invalid resource type\n", __FUNCTION__);
+ return -ENODEV;
+ }
+
+ /* Request IO memory */
+ if (!request_mem_region(res[0].start,
+ res[0].end - res[0].start + 1,
+ pdev->name))
+ {
+ return -EBUSY;
+ }
+ if (!request_mem_region(res[1].start,
+ res[1].end - res[1].start + 1,
+ pdev->name))
+ {
+ return -EBUSY;
+ }
+ err = mt6573_nand_setup(hw);
+ if(!err){
+ platform_set_drvdata(pdev, host);
+
+ }
+ else{
+ platform_set_drvdata(pdev, NULL);
+ }
+ return err;
+}
+/******************************************************************************
+ * mt6573_nand_suspend
+ *
+ * DESCRIPTION:
+ * Suspend the nand device!
+ *
+ * PARAMETERS:
+ * struct platform_device *pdev : device structure
+ *
+ * RETURNS:
+ * 0 : Success
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ if (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
+ {
+ MSG(POWERCTL, "[NFI] Busy, Suspend Fail !\n");
+ return 1; // BUSY
+ }
+
+ MSG(POWERCTL, "[NFI] Suspend !\n");
+ return 0;
+}
+/******************************************************************************
+ * mt6573_nand_resume
+ *
+ * DESCRIPTION:
+ * Resume the nand device!
+ *
+ * PARAMETERS:
+ * struct platform_device *pdev : device structure
+ *
+ * RETURNS:
+ * 0 : Success
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int mt6573_nand_resume(struct platform_device *pdev)
+{
+ MSG(POWERCTL, "[NFI] Resume !\n");
+ return 0;
+}
+/******************************************************************************
+ * mt6573_nand_remove
+ *
+ * DESCRIPTION:
+ * unregister the nand device file operations !
+ *
+ * PARAMETERS:
+ * struct platform_device *pdev : device structure
+ *
+ * RETURNS:
+ * 0 : Success
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 36)
+static int __devexit mt6573_nand_remove(struct platform_device *pdev)
+#else
+static int mt6573_nand_remove(struct platform_device *pdev)
+#endif
+{
+ struct mt6573_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ kfree(host);
+
+ nand_disable_clock();
+
+ return 0;
+}
+
+/******************************************************************************
+ * NAND OTP operations
+ * ***************************************************************************/
+#if (NAND_OTP_SUPPORT && SAMSUNG_OTP_SUPPORT)
+unsigned int samsung_OTPQueryLength(unsigned int *QLength)
+{
+ *QLength = SAMSUNG_OTP_PAGE_NUM * g_page_size;
+ return 0;
+}
+
+unsigned int samsung_OTPRead(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
+{
+ struct mtd_info *mtd = &host->mtd;
+ unsigned int rowaddr, coladdr;
+ unsigned int u4Size = g_page_size;
+ unsigned int timeout = 0xFFFF;
+ unsigned int bRet;
+ unsigned int sec_num = mtd->writesize >> 9;
+
+ if(PageAddr >= SAMSUNG_OTP_PAGE_NUM)
+ {
+ return OTP_ERROR_OVERSCOPE;
+ }
+
+ /* Col -> Row; LSB first */
+ coladdr = 0x00000000;
+ rowaddr = Samsung_OTP_Page[PageAddr];
+
+ MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
+nand_get_device(mtd, FL_READING);
+ /* Power on NFI HW component. */
+ // nand_enable_clock();
+
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0x30);
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0x65);
+
+ MSG(OTP, "[%s]: Start to read data from OTP area\n", __func__);
+
+ if (!mt6573_nand_reset())
+ {
+ bRet = OTP_ERROR_RESET;
+ goto cleanup;
+ }
+
+ mt6573_nand_set_mode(CNFG_OP_READ);
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
+
+ DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+
+ if(g_bHwEcc){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+ }
+ mt6573_nand_set_autoformat(true);
+ if(g_bHwEcc){
+ ECC_Decode_Start();
+ }
+
+ if (!mt6573_nand_set_command(NAND_CMD_READ0))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_set_address(coladdr, rowaddr, 2, 3))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_set_command(NAND_CMD_READSTART))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_read_page_data(mtd, BufferPtr, u4Size))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ mt6573_nand_read_fdm_data(SparePtr, sec_num);
+
+ mt6573_nand_stop_read();
+
+ MSG(OTP, "[%s]: End to read data from OTP area\n", __func__);
+
+ bRet = OTP_SUCCESS;
+
+cleanup:
+
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0xFF);
+ nand_release_device(mtd);
+ return bRet;
+}
+
+unsigned int samsung_OTPWrite(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
+{
+ struct mtd_info *mtd = &host->mtd;
+ unsigned int rowaddr, coladdr;
+ unsigned int u4Size = g_page_size;
+ unsigned int timeout = 0xFFFF;
+ unsigned int bRet;
+ unsigned int sec_num = mtd->writesize >> 9;
+
+ if(PageAddr >= SAMSUNG_OTP_PAGE_NUM)
+ {
+ return OTP_ERROR_OVERSCOPE;
+ }
+
+ /* Col -> Row; LSB first */
+ coladdr = 0x00000000;
+ rowaddr = Samsung_OTP_Page[PageAddr];
+
+ MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
+nand_get_device(mtd, FL_READING);
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0x30);
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0x65);
+
+ MSG(OTP, "[%s]: Start to write data to OTP area\n", __func__);
+
+ if (!mt6573_nand_reset())
+ {
+ bRet = OTP_ERROR_RESET;
+ goto cleanup;
+ }
+
+ mt6573_nand_set_mode(CNFG_OP_PRGM);
+
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
+
+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
+
+ DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
+
+if(g_bHwEcc){
+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+}else{
+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
+}
+
+ mt6573_nand_set_autoformat(true);
+
+ ECC_Encode_Start();
+
+ if (!mt6573_nand_set_command(NAND_CMD_SEQIN))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_set_address(coladdr, rowaddr, 2, 3))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ if (!mt6573_nand_status_ready(STA_NAND_BUSY))
+ {
+ bRet = OTP_ERROR_BUSY;
+ goto cleanup;
+ }
+
+ mt6573_nand_write_fdm_data((struct nand_chip *)mtd->priv, BufferPtr, sec_num);
+ (void)mt6573_nand_write_page_data(mtd, BufferPtr, u4Size);
+ if(!mt6573_nand_check_RW_count(u4Size))
+ {
+ MSG(OTP, "[%s]: Check RW count timeout !\n", __func__);
+ bRet = OTP_ERROR_TIMEOUT;
+ goto cleanup;
+ }
+
+ mt6573_nand_stop_write();
+ (void)mt6573_nand_set_command(NAND_CMD_PAGEPROG);
+ while(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
+
+ bRet = OTP_SUCCESS;
+
+ MSG(OTP, "[%s]: End to write data to OTP area\n", __func__);
+
+cleanup:
+ mt6573_nand_reset();
+ (void)mt6573_nand_set_command(0xFF);
+ nand_release_device(mtd);
+ return bRet;
+}
+#endif
+
+#if NAND_OTP_SUPPORT
+static int mt_otp_open(struct inode *inode, struct file *filp)
+{
+ MSG(OTP, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
+ filp->private_data = (int*)OTP_MAGIC_NUM;
+ return 0;
+}
+
+static int mt_otp_release(struct inode *inode, struct file *filp)
+{
+ MSG(OTP, "[%s]:(MAJOR)%d:(MINOR)%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
+ return 0;
+}
+
+static int mt_otp_access(unsigned int access_type, unsigned int offset, void *buff_ptr, unsigned int length, unsigned int *status)
+{
+ unsigned int i = 0, ret = 0;
+ char *BufAddr = (char *)buff_ptr;
+ unsigned int PageAddr, AccessLength=0;
+ int Status = 0;
+
+ static char *p_D_Buff = NULL;
+ char S_Buff[64];
+
+ if (!(p_D_Buff = kmalloc(g_page_size, GFP_KERNEL)))
+ {
+ ret = -ENOMEM;
+ *status = OTP_ERROR_NOMEM;
+ goto exit;
+ }
+
+ MSG(OTP, "[%s]: %s (0x%x) length:(%d bytes) !\n", __func__, access_type?"WRITE":"READ", offset, length);
+
+ while(1)
+ {
+ PageAddr = offset/g_page_size;
+ if(FS_OTP_READ == access_type)
+ {
+ memset(p_D_Buff, 0xff, g_page_size);
+ memset(S_Buff, 0xff, (sizeof(char)*64));
+
+ MSG(OTP, "[%s]: Read Access of page (%d)\n",__func__, PageAddr);
+
+ Status = g_mt6573_otp_fuc.OTPRead(PageAddr, p_D_Buff, &S_Buff);
+ *status = Status;
+
+ if( OTP_SUCCESS != Status)
+ {
+ MSG(OTP, "[%s]: Read status (%d)\n", __func__, Status);
+ break;
+ }
+
+ AccessLength = g_page_size - (offset % g_page_size);
+
+ if(length >= AccessLength)
+ {
+ memcpy(BufAddr, (p_D_Buff+(offset % g_page_size)), AccessLength);
+ }
+ else
+ {
+ //last time
+ memcpy(BufAddr, (p_D_Buff+(offset % g_page_size)), length);
+ }
+ }
+ else if(FS_OTP_WRITE == access_type)
+ {
+ AccessLength = g_page_size - (offset % g_page_size);
+ memset(p_D_Buff, 0xff, g_page_size);
+ memset(S_Buff, 0xff, (sizeof(char)*64));
+
+ if(length >= AccessLength)
+ {
+ memcpy((p_D_Buff+(offset % g_page_size)), BufAddr, AccessLength);
+ }
+ else
+ {
+ //last time
+ memcpy((p_D_Buff+(offset % g_page_size)), BufAddr, length);
+ }
+
+ Status = g_mt6573_otp_fuc.OTPWrite(PageAddr, p_D_Buff, &S_Buff);
+ *status = Status;
+
+ if( OTP_SUCCESS != Status)
+ {
+ MSG(OTP, "[%s]: Write status (%d)\n",__func__, Status);
+ break;
+ }
+ }
+ else
+ {
+ MSG(OTP, "[%s]: Error, not either read nor write operations !\n",__func__);
+ break;
+ }
+
+ offset += AccessLength;
+ BufAddr += AccessLength;
+ if(length <= AccessLength)
+ {
+ length = 0;
+ break;
+ }
+ else
+ {
+ length -= AccessLength;
+ MSG(OTP, "[%s]: Remaining %s (%d) !\n",__func__, access_type?"WRITE":"READ", length);
+ }
+ }
+error:
+ kfree(p_D_Buff);
+exit:
+ return ret;
+}
+
+static long mt_otp_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0, i=0;
+ static char *pbuf = NULL;
+
+ void __user *uarg = (void __user *)arg;
+ struct otp_ctl otpctl;
+
+ /* Lock */
+ spin_lock(&g_OTPLock);
+
+ if (copy_from_user(&otpctl, uarg, sizeof(struct otp_ctl)))
+ {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if(false == g_bInitDone)
+ {
+ MSG(OTP, "ERROR: NAND Flash Not initialized !!\n");
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!(pbuf = kmalloc(sizeof(char)*otpctl.Length, GFP_KERNEL)))
+ {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ switch (cmd)
+ {
+ case OTP_GET_LENGTH:
+ MSG(OTP, "OTP IOCTL: OTP_GET_LENGTH\n");
+ g_mt6573_otp_fuc.OTPQueryLength(&otpctl.QLength);
+ otpctl.status = OTP_SUCCESS;
+ MSG(OTP, "OTP IOCTL: The Length is %d\n", otpctl.QLength);
+ break;
+ case OTP_READ:
+ MSG(OTP, "OTP IOCTL: OTP_READ Offset(0x%x), Length(0x%x) \n", otpctl.Offset, otpctl.Length);
+ memset(pbuf, 0xff, sizeof(char)*otpctl.Length);
+
+ mt_otp_access(FS_OTP_READ, otpctl.Offset, pbuf, otpctl.Length, &otpctl.status);
+
+ if (copy_to_user(otpctl.BufferPtr, pbuf, (sizeof(char)*otpctl.Length)))
+ {
+ MSG(OTP, "OTP IOCTL: Copy to user buffer Error !\n");
+ goto error;
+ }
+ break;
+ case OTP_WRITE:
+ MSG(OTP, "OTP IOCTL: OTP_WRITE Offset(0x%x), Length(0x%x) \n", otpctl.Offset, otpctl.Length);
+ if (copy_from_user(pbuf, otpctl.BufferPtr, (sizeof(char)*otpctl.Length)))
+ {
+ MSG(OTP, "OTP IOCTL: Copy from user buffer Error !\n");
+ goto error;
+ }
+ mt_otp_access(FS_OTP_WRITE, otpctl.Offset , pbuf, otpctl.Length, &otpctl.status);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ret = copy_to_user(uarg, &otpctl, sizeof(struct otp_ctl));
+
+error:
+ kfree(pbuf);
+exit:
+ spin_unlock(&g_OTPLock);
+ return ret;
+}
+
+static struct file_operations nand_otp_fops = {
+ .owner= THIS_MODULE,
+ .ioctl= mt_otp_ioctl,
+ .open= mt_otp_open,
+ .release= mt_otp_release,
+};
+
+static struct miscdevice nand_otp_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "otp",
+ .fops = &nand_otp_fops,
+};
+#endif
+
+static struct mtd_info *nandflash_probe(struct map_info *map)
+//int __devinit ra_nand_init(void)
+{
+ int err = 0;
+
+ printk("tc3162 mtd init: nandflash_probe enter\n");
+
+ err = mt6573_nand_setup(&mt6573_nand_hw);
+ if(!err){
+ return &(host->mtd);
+
+ }
+ else{
+ return NULL;
+
+ }
+ //#ifdef TCSUPPORT_DUAL_IMAGE_ENHANCE
+ // offset = (1 << ra->flash->chip_shift)/2;
+ //#endif
+
+ //return &host->mtd;
+}
+
+static void nandflash_destroy(struct mtd_info *mtd)
+//static void __devexit ra_nand_remove(void)
+{
+ //struct mt6573_nand_host *host = platform_get_drvdata(pdev);
+ if(host){
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+
+ kfree(host);
+ }
+
+ nand_disable_clock();
+
+
+}
+
+
+static struct mtd_chip_driver nandflash_chipdrv = {
+ .probe = nandflash_probe,
+ .destroy = nandflash_destroy,
+ .name = "nandflash_probe",
+ .module = THIS_MODULE
+};
+
+
+/******************************************************************************
+Device driver structure
+******************************************************************************/
+static struct platform_driver mt6573_nand_driver = {
+ .probe = mt6573_nand_probe,
+ .remove = mt6573_nand_remove,
+ .suspend = mt6573_nand_suspend,
+ .resume = mt6573_nand_resume,
+ .driver = {
+ .name = "mt6573-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 36)
+int __devinit ra_nand_init(void)
+#else
+int ra_nand_init(void)
+#endif
+{
+ return platform_driver_register(&mt6573_nand_driver);
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 36)
+void __devexit ra_nand_remove(void)
+#else
+void ra_nand_remove(void)
+#endif
+{
+ platform_driver_unregister(&mt6573_nand_driver);
+}
+
+
+/******************************************************************************
+ * mt6573_nand_init
+ *
+ * DESCRIPTION:
+ * Init the device driver !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static int __init mt6573_nand_init(void)
+{
+ struct proc_dir_entry *entry;
+
+//#ifdef CONFIG_MTK_MTD_NAND_INTERRUPT_SCHEME
+ // boot up using polling mode
+ g_i4Interrupt = 0;
+//#else
+// g_i4Interrupt = 0;
+//#endif
+
+ printk("tc3162 mtd init: mt6573_nand_init enter\n");
+
+#if NAND_OTP_SUPPORT
+ int err = 0;
+ MSG(OTP, "OTP: register NAND OTP device ...\n");
+ err = misc_register(&nand_otp_dev);
+ if (unlikely(err))
+ {
+ MSG(OTP, "OTP: failed to register NAND OTP device!\n");
+ return err;
+ }
+ spin_lock_init(&g_OTPLock);
+#endif
+
+#if (NAND_OTP_SUPPORT && SAMSUNG_OTP_SUPPORT)
+ g_mt6573_otp_fuc.OTPQueryLength = samsung_OTPQueryLength;
+ g_mt6573_otp_fuc.OTPRead = samsung_OTPRead;
+ g_mt6573_otp_fuc.OTPWrite = samsung_OTPWrite;
+#endif
+
+
+ entry = create_proc_entry(PROCNAME, 0666, NULL);
+ if (entry == NULL)
+ {
+ MSG(INIT, "MT6573 Nand : unable to create /proc entry\n");
+ return -ENOMEM;
+ }
+ entry->read_proc = mt6573_nand_proc_read;
+ entry->write_proc = mt6573_nand_proc_write;
+
+#ifdef NAND_ECC_TEST
+ entry = create_proc_entry(PROCNAME_ECC, 0666, NULL);
+ if (entry == NULL)
+ {
+ MSG(INIT, "MT6573 Nand : unable to create /proc entry\n");
+ return -ENOMEM;
+ }
+ entry->read_proc = mt6573_nand_ecc_proc_read;
+ entry->write_proc = mt6573_nand_ecc_proc_write;
+ //entry->owner = THIS_MODULE;
+#endif
+ MSG(INIT, "MediaTek MT6573 Nand driver init, version %s\n", VERSION);
+ if(IS_SPIFLASH){ //SPI Flash boot
+ //return platform_driver_register(&mt6573_nand_driver);
+ return mt6573_nand_dev_register();
+ }
+ else{
+ register_mtd_chip_driver(&nandflash_chipdrv);
+ return 0;
+ }
+
+
+}
+
+/******************************************************************************
+ * mt6573_nand_exit
+ *
+ * DESCRIPTION:
+ * Free the device driver !
+ *
+ * PARAMETERS:
+ * None
+ *
+ * RETURNS:
+ * None
+ *
+ * NOTES:
+ * None
+ *
+ ******************************************************************************/
+static void __exit mt6573_nand_exit(void)
+{
+ MSG(INIT, "MediaTek MT6573 Nand driver exit, version %s\n", VERSION);
+
+#if NAND_OTP_SUPPORT
+ misc_deregister(&nand_otp_dev);
+#endif
+
+#ifdef SAMSUNG_OTP_SUPPORT
+ g_mt6573_otp_fuc.OTPQueryLength = NULL;
+ g_mt6573_otp_fuc.OTPRead = NULL;
+ g_mt6573_otp_fuc.OTPWrite = NULL;
+#endif
+ if(IS_SPIFLASH){ //SPI FLASH
+ //platform_driver_unregister(&mt6573_nand_driver);
+ }
+ else{
+ unregister_mtd_chip_driver(&nandflash_chipdrv);
+ }
+ remove_proc_entry(PROCNAME, NULL);
+ #ifdef NAND_ECC_TEST
+ remove_proc_entry(PROCNAME_ECC, NULL);
+ #endif
+}
+
+
+module_init(mt6573_nand_init);
+module_exit(mt6573_nand_exit);
+MODULE_LICENSE("GPL");
Index: linux-3.18.21/drivers/mtd/mtk/nand_devicelist.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/mtd/mtk/nand_devicelist.h 2018-02-05 13:20:41.000000000 +0800
@@ -0,0 +1,108 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2010. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek Software")
+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
+ * applicable license agreements with MediaTek Inc.
+ */
+
+
+#if 0
+#define RAMDOM_READ 1<<0 //0x05 0xE0
+#define CACHE_READ 1<<1 //0x31 0x3f
+
+typedef struct
+{
+ u16 id; //deviceid+menuid
+ u8 addr_cycle;
+ u8 iowidth;
+ u16 totalsize;
+ u16 blocksize;
+ u16 pagesize;
+ u32 timmingsetting;
+ char devciename[14];
+ u32 advancedmode; //
+}flashdev_info,*pflashdev_info;
+
+
+
+static const flashdev_info gen_FlashTable[]={
+ //micro
+ {0xAA2C, 5, 8, 256, 128, 2048, 0x01113, "MT29F2G08ABD", 0},
+ {0xB12C, 4, 16, 128, 128, 2048, 0x01113, "MT29F1G16ABC", 0},
+ {0xBA2C, 5, 16, 256, 128, 2048, 0x01113, "MT29F2G16ABD", 0},
+ {0xAC2C, 5, 8, 512, 128, 2048, 0x01113, "MT29F4G08ABC", RAMDOM_READ|CACHE_READ},
+ {0xA12C, 4, 8, 128, 128, 2048, 0x01113, "MT29F1G08ABB", 0},
+ {0xBC2C, 5, 16, 512, 128, 2048, 0x01112, "MT29F4G16ABBDA", RAMDOM_READ|CACHE_READ},
+ //samsung
+ {0xBAEC, 5, 16, 256, 128, 2048, 0x01123, "K522H1GACE", 0},
+ {0xBCEC, 5, 16, 512, 128, 2048, 0x01123, "K524G2GACB", RAMDOM_READ},
+ {0xDAEC, 5, 8, 256, 128, 2048, 0x33222, "K9F2G08U0A", 0},
+ {0xF1EC, 4, 8, 128, 128, 2048, 0x01123, "K9F1G08U0A", RAMDOM_READ},
+ {0xAAEC, 5, 8, 256, 128, 2048, 0x01123, "K9F2G08R0A", RAMDOM_READ},
+ //hynix
+ {0xD3AD, 5, 8, 1024, 256, 2048, 0x44333, "HY27UT088G2A", 0},
+ {0xA1AD, 4, 8, 128, 128, 2048, 0x01123, "H8BCSOPJOMCP", 0},
+ {0xBCAD, 5, 16, 512, 128, 2048, 0x01123, "H8BCSOUNOMCR", 0},
+ {0xBAAD, 5, 16, 256, 128, 2048, 0x01123, "H8BCSOSNOMCR", 0},
+ //toshiba
+ {0x9598, 5, 16, 816, 128, 2048, 0x00113, "TY9C000000CMG", 0},
+ {0x9498, 5, 16, 375, 128, 2048, 0x00113, "TY9C000000CMG", 0},
+ {0xBC98, 5, 16, 512, 128, 2048, 0x02113, "TY58NYG2S8E", RAMDOM_READ},
+ {0xC198, 4, 16, 128, 128, 2048, 0x44333, "TC58NWGOS8C", 0},
+ {0xBA98, 5, 16, 256, 128, 2048, 0x02113, "TC58NYG1S8C", 0},
+ //st-micro
+ {0xBA20, 5, 16, 256, 128, 2048, 0x01123, "ND02CGR4B2DI6", 0},
+ {0xBC20, 5, 16, 512, 128, 2048, 0x01123, "ST04GR4B", 0},
+ {0x0000, 0, 0, 0, 0, 0, 0, "xxxxxxxxxxxxx", 0}
+};
+#endif
+static const flashdev_info gen_FlashTable[]={
+ //st
+ {0x2075, 0x207520 ,4, 8, 32, 16, 512, 0x44333, "NAND256W3A", 0}, //32M,512 Page size
+ //micro
+ {0x2CDA, 0x909504, 5, 8, 256, 128, 2048, 0x44333, "MT29F2G08AAB", 0}, //256M, 2K Page Size
+ //{0x2CDC, 0x909504, 5, 8, 512, 128, 2048, 0x44333, "MT29F4G08AAC", 0}, //512M, 2K Page Size
+ //samsung
+ {0xECDC, 0x109554, 5, 8, 512, 128, 2048, 0x44333, "K9F4G08U0D", 0},//512M, 2K Page Size
+ //toshiba
+ {0x98D1, 0x901576, 4, 8, 128, 128, 2048, 0x44333, "TC58NVG0S3E", 0}, //128M
+ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 0x44333, "TC58NVG3S0F", 0}, //1024M
+ //Winbond
+ {0xeff1, 0x809500, 4, 8, 128, 128, 2048, 0x44333, "W29N01GV", 0}, //128M
+ //MXIC
+ {0xc2f1, 0x801dc2, 4, 8, 128, 128, 2048, 0x44333, "MX30LF1G08", 0}, //128M
+ {0xc2f1, 0x809502, 4, 8, 128, 128, 2048, 0x44333, "MX30LF1G18AC", 0}, //128M
+ //ESMT
+ {0x92f1, 0x809540, 4, 8, 128, 128, 2048, 0x44333, "F59L1G81A", 0}, //128M
+
+ {0x0000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxxxxx", 0}
+};
+
Index: linux-3.18.21/drivers/mtd/nand/nand_base.c
===================================================================
--- linux-3.18.21.orig/drivers/mtd/nand/nand_base.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/mtd/nand/nand_base.c 2018-02-05 13:20:41.000000000 +0800
@@ -93,7 +93,7 @@
.length = 78} }
};
-static int nand_get_device(struct mtd_info *mtd, int new_state);
+int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
@@ -131,7 +131,7 @@
*
* Release chip lock and wake up anyone waiting on the device.
*/
-static void nand_release_device(struct mtd_info *mtd)
+void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
@@ -803,7 +803,7 @@
*
* Get the device and lock it for exclusive access
*/
-static int
+int
nand_get_device(struct mtd_info *mtd, int new_state)
{
struct nand_chip *chip = mtd->priv;
@@ -2387,7 +2387,10 @@
return 0;
/* Reject writes, which are not page aligned */
+ #if 0
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ #endif
+ if (NOTALIGNED(to)) {
pr_notice("%s: attempt to write non page aligned data\n",
__func__);
return -EINVAL;
@@ -2424,7 +2427,8 @@
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
int use_bufpoi;
- int part_pagewr = (column || writelen < (mtd->writesize - 1));
+ //int part_pagewr = (column || writelen < (mtd->writesize - 1));
+ int part_pagewr = (column || writelen < (mtd->writesize));
if (part_pagewr)
use_bufpoi = 1;
@@ -2461,7 +2465,7 @@
break;
writelen -= bytes;
- if (!writelen)
+ if (writelen<=0)
break;
column = 0;
@@ -3987,10 +3991,12 @@
ecc->write_oob = nand_write_oob_syndrome;
if (mtd->writesize >= ecc->size) {
+ #if 0
if (!ecc->strength) {
pr_warn("Driver must set ecc.strength when using hardware ECC\n");
BUG();
}
+ #endif
break;
}
pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
Index: linux-3.18.21/drivers/net/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/net/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/net/Kconfig 2018-02-05 13:20:42.000000000 +0800
@@ -26,6 +26,7 @@
if NETDEVICES
config MII
+ default y
tristate
config NET_CORE
Index: linux-3.18.21/drivers/net/ppp/ppp_generic.c
===================================================================
--- linux-3.18.21.orig/drivers/net/ppp/ppp_generic.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/net/ppp/ppp_generic.c 2018-02-05 13:20:46.000000000 +0800
@@ -54,6 +54,8 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <ecnt_hook/ecnt_hook_net.h>
+
#define PPP_VERSION "2.4.2"
/*
@@ -975,6 +977,15 @@
struct ppp *ppp = netdev_priv(dev);
int npi, proto;
unsigned char *pp;
+ struct net_data_s net_data;
+ int ret = -1;
+
+ net_data.pskb = &skb;
+ ret = ECNT_PPP_GENERIC_HOOK(ECNT_NET_PPP_XMIT,&net_data);
+ if (ECNT_RETURN_DROP == ret)
+ {
+ goto outf;
+ }
npi = ethertype_to_npindex(ntohs(skb->protocol));
if (npi < 0)
Index: linux-3.18.21/drivers/net/ppp/pppoe.c
===================================================================
--- linux-3.18.21.orig/drivers/net/ppp/pppoe.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/net/ppp/pppoe.c 2018-02-05 13:20:47.000000000 +0800
@@ -570,7 +570,7 @@
po = pppox_sk(sk);
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ if(po->pppoe_dev){
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
Index: linux-3.18.21/drivers/net/wireless/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/net/wireless/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/net/wireless/Kconfig 2018-02-05 13:20:47.000000000 +0800
@@ -17,6 +17,16 @@
if WLAN
+config RALINK_WIRELESS
+ tristate "Ralink wireless support"
+ depends on PCI
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select CRC_CCITT
+ select FW_LOADER
+ ---help---
+ Ralink wireless driver support
+
config PCMCIA_RAYCS
tristate "Aviator/Raytheon 2.4GHz wireless support"
depends on PCMCIA
Index: linux-3.18.21/drivers/net/wireless/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/net/wireless/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/net/wireless/Makefile 2018-02-05 13:20:47.000000000 +0800
@@ -57,6 +57,12 @@
obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
+ifeq ($(TCSUPPORT_WLAN_MT7612_BUILD_IN),1)
+obj-y += ../../../../modules/private/wifi/$(MT7612_MODULES)/
+endif
+ifeq ($(TCSUPPORT_WLAN_MT7615_BUILD_IN),1)
+obj-y += ../../../../modules/private/wifi/$(MT7615_MODULES)/embedded/
+endif
obj-$(CONFIG_CW1200) += cw1200/
obj-$(CONFIG_RSI_91X) += rsi/
Index: linux-3.18.21/drivers/pci/pci.c
===================================================================
--- linux-3.18.21.orig/drivers/pci/pci.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/pci/pci.c 2018-02-05 13:20:52.000000000 +0800
@@ -26,6 +26,7 @@
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include "pci.h"
+#include <asm/tc3162/tc3162.h>
const char *pci_power_names[] = {
"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
@@ -181,6 +182,11 @@
u16 status;
pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
+
+ /* workaround for fixing the the bug (the value of configuration reg 0x100 is wrong)
+ & bonding mode problem (it doesn't to scan bus1/dev1,2.....) on MT7510 */
+ if(isMT751020 || isMT7505)
+ return 0;
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
Index: linux-3.18.21/drivers/scsi/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/scsi/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/scsi/Makefile 2018-02-05 13:20:55.000000000 +0800
@@ -172,6 +172,7 @@
hv_storvsc-y := storvsc_drv.o
sd_mod-objs := sd.o
+sd_mod-y += ecnt_scsi.o
sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
Index: linux-3.18.21/drivers/scsi/ecnt_scsi.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/scsi/ecnt_scsi.c 2018-02-05 13:20:56.000000000 +0800
@@ -0,0 +1,179 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (??EcoNet Software??)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (??EcoNet??) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (??ECONET SOFTWARE??) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN ??AS IS??
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER??S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER??S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+struct mutex signal_mutex;
+struct task_struct *mount_task_p;
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+int send_signal_to_app(int signal);
+int init_auto_mount(void);
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+/*---------------------------------------------------------------------
+ ** function mame: automount_pid_write_proc
+ *
+ ** description:
+ * 1. receives AutoMount's PID from user space and turns it into
+ * a task_struct pointer which will be used to send signals to
+ * AutoMount later.
+ *
+ ** parameters:
+ * 1. buffer: the pointer pointing to the string received from user
+ * 2. count: the length of the string in buffer
+ *
+ ** global:
+ * 1. mount_task_p: the pointer pointing to the task_struct structure
+ * that is going to be used to send signals to AutoMount
+ *
+ ** return: int
+ *
+ ** call: none
+ *
+ ** revision:
+ * 1. Trey 2010/08/16
+ *---------------------------------------------------------------------*/
+static int automount_pid_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char str_pid[16];
+ pid_t mount_pid = 0;
+ char *cp;
+
+ if (copy_from_user(str_pid, buffer, count)){
+ printk("automount_pid_write_proc failed\n");
+ return -EFAULT;
+ }
+
+ str_pid[count] = '\0';
+
+ /* atoi func */
+ for (cp = str_pid; *cp != '\0'; cp++) {
+ switch (*cp) {
+ case '0' ... '9':
+ mount_pid = 10*mount_pid+(*cp-'0');
+ break;
+ default:
+ break;
+ }
+ }
+ mount_task_p = find_task_by_vpid(mount_pid);
+
+ return 0;
+}
+
+int send_signal_to_app(int signal)
+{
+ if(mount_task_p != NULL)
+ {
+ mutex_lock(&signal_mutex);
+ send_sig(signal, mount_task_p, 0);
+ mutex_unlock(&signal_mutex);
+ }
+ return 0;
+}
+
+int init_auto_mount(void)
+{
+ struct proc_dir_entry *automount_proc;
+
+/* 1. mount_task_p: a pointer which will (later) point to the task_struct
+ * structure that stands for AutoMount's PID. The pointer will be used
+ * by send_sig() to send SIGUSR1 or SIGUSR2 to AutoMount.
+ * 2. signal_mutex: a mutex used to prevent more than one signals are sent
+ * at the same time.
+ * 3. automount_proc: used when AutoMount write its PID to
+ * "tc3162/automount_pid", automount_pid_write_proc() will be
+ * triggered to transform the PID to the task_struct structure which
+ * is pointed by mount_task_p. Then, mount_task_p can be used to send
+ * signals to AutoMount. --Trey 2010/08/17
+ */
+ mount_task_p = NULL;
+ mutex_init(&signal_mutex);
+ automount_proc = create_proc_entry("tc3162/automount_pid", 0, NULL);
+ automount_proc->write_proc = automount_pid_write_proc;
+ return 0;
+}
+
Index: linux-3.18.21/drivers/scsi/sd.c
===================================================================
--- linux-3.18.21.orig/drivers/scsi/sd.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/scsi/sd.c 2018-02-05 13:20:57.000000000 +0800
@@ -2945,6 +2945,7 @@
sdp->removable ? "removable " : "");
scsi_autopm_put_device(sdp);
put_device(&sdkp->dev);
+ send_signal_to_app(SIGUSR1);
}
/**
@@ -3087,7 +3088,7 @@
dev_set_drvdata(dev, NULL);
put_device(&sdkp->dev);
mutex_unlock(&sd_ref_mutex);
-
+ send_signal_to_app(SIGUSR2);
return 0;
}
@@ -3250,6 +3251,7 @@
{
int majors = 0, i, err;
+ init_auto_mount();
SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
for (i = 0; i < SD_MAJORS; i++) {
Index: linux-3.18.21/drivers/tty/serial/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/tty/serial/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/tty/serial/Kconfig 2018-02-05 13:21:04.000000000 +0800
@@ -1365,6 +1365,11 @@
help
Enable a Altera UART port to be the system console.
+config SERIAL_TC3162
+ bool "TC3162/TC3262 serial port support"
+ select SERIAL_CORE
+ depends on (MIPS_TC3162 || MIPS_TC3162U || MIPS_TC3262)
+
config SERIAL_IFX6X60
tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
depends on GPIOLIB && SPI
Index: linux-3.18.21/drivers/tty/serial/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/tty/serial/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/tty/serial/Makefile 2018-02-05 13:21:04.000000000 +0800
@@ -79,6 +79,10 @@
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
+obj-$(CONFIG_SERIAL_TC3162) += tc3162_uart.o
+ifeq ($(TCSUPPORT_UART2),1)
+obj-y += tc3162_uart2.o
+endif
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
Index: linux-3.18.21/drivers/tty/serial/tc3162_uart.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/tty/serial/tc3162_uart.c 2018-02-05 13:21:05.000000000 +0800
@@ -0,0 +1,1030 @@
+/*
+ * Serial driver for TC3162 SoC
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <linux/slab.h>
+
+#include <asm/tc3162/tc3162.h>
+#ifdef TCSUPPORT_UART1_ENHANCE
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <asm/errno.h>
+#include <linux/random.h>
+#endif
+
+#define TC3162_NR_PORTS 1
+
+#define TC3162_UART_SIZE 0x30
+
+#define PORT_TC3162 3162
+#ifdef CONFIG_TC3162_ADSL
+void (*send_uart_msg)(char* msg, int len);
+EXPORT_SYMBOL(send_uart_msg);
+static char tuart_buf[1024];
+#endif
+
+#ifdef TCSUPPORT_UART1_ENHANCE
+#define ENABLE (1)
+#define DISABLE (0)
+#define SUCCESS (0)
+#define FALSE (1)
+
+#ifndef TCSUPPORT_UART1_RINGBUFFER_KB_SIZE
+#define TCSUPPORT_UART1_RINGBUFFER_KB_SIZE 4
+#endif
+#define PRINT_BUFFER_INIT_LEN_KB (TCSUPPORT_UART1_RINGBUFFER_KB_SIZE << 10)
+#define INSUFFICIENT_BUF_CNT_THRESHOLD (5)
+
+static unsigned char isRingBufferMode = DISABLE;
+static unsigned char real_state = DISABLE;
+static unsigned char auto_mode = ENABLE;
+static wait_queue_head_t printQueue;
+static struct task_struct *printTask;
+static int printFlag;
+static struct circ_buf ringBuffer;
+static int ringBufferDbg = DISABLE;
+static char buffer[PRINT_BUFFER_INIT_LEN_KB];
+#endif
+
+int tc3162_uart_installed = 0;
+
+#ifdef TCSUPPORT_MT7510_E1
+#define READ_OTHER(x) ((x & 0xc) + 0xbfb003a0)
+#endif
+static void tc3162ser_stop_tx(struct uart_port *port)
+{
+
+ //if(tc3162_uart_installed == 1)
+ //{
+ // printk("tc3162ser_stop_tx: Can not stop tx return\r\n");
+ // return;
+ //}
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ VPchar(CR_UART_IER) &= ~IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPchar(CR_UART_IER) &= ~IER_THRE_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_irq_rx(struct uart_port *port)
+{
+ //struct tty_port *tty_port = port->state->port;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ch, flg;
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+
+ while (1) {
+ tmp = VPint(READ_OTHER(CR_UART_LSR));
+ wmb();
+ if(!(LSR_INDICATOR & LSR_RECEIVED_DATA_READY)){
+ wmb();
+ break;
+ }
+#else
+ while (LSR_INDICATOR & LSR_RECEIVED_DATA_READY) {
+#endif
+ /*
+ * We need to read rds before reading the
+ * character from the fifo
+ */
+#ifdef TCSUPPORT_MT7510_E1
+ tmp = VPint(READ_OTHER(CR_UART_RBR));
+ wmb();
+ ch = VPchar(CR_UART_RBR);
+ wmb();
+#else
+ ch = VPchar(CR_UART_RBR);
+#endif
+ port->icount.rx++;
+ if (tty->port->low_latency){
+ tty_flip_buffer_push(&port->state->port);
+ }
+
+ flg = TTY_NORMAL;
+ tty_insert_flip_char(&port->state->port, ch, flg);
+ }
+ tty_flip_buffer_push(&port->state->port);
+}
+
+static void tc3162ser_irq_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+#ifdef CONFIG_TC3162_ADSL
+ int len=0;
+ memset(tuart_buf, 0, sizeof(tuart_buf));
+#endif
+ if (port->x_char) {
+ VPchar(CR_UART_THR) = port->x_char;
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ tc3162ser_stop_tx(port);
+ return;
+ }
+
+ count = port->fifosize;
+ do {
+ VPchar(CR_UART_THR) =
+ xmit->buf[xmit->tail];
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+#ifdef CONFIG_TC3162_ADSL
+ if((void *)send_uart_msg){
+ tuart_buf[len] = xmit->buf[xmit->tail];
+ len++;
+ }
+#endif
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+#ifdef CONFIG_TC3162_ADSL
+ if((void *)send_uart_msg){
+ send_uart_msg(tuart_buf, len);
+ }
+#endif
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ tc3162ser_stop_tx(port);
+}
+
+static irqreturn_t tc3162ser_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ uint8 iir;
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IIR));
+ wmb();
+ iir = IIR_INDICATOR;
+ wmb();
+#else
+ iir = IIR_INDICATOR;
+#endif
+
+ if (((iir & IIR_RECEIVED_DATA_AVAILABLE) == IIR_RECEIVED_DATA_AVAILABLE) ||
+ ((iir & IIR_RECEIVER_IDLE_TRIGGER) == IIR_RECEIVER_IDLE_TRIGGER)) {
+ tc3162ser_irq_rx(port);
+ }
+ if ((iir & IIR_TRANSMITTED_REGISTER_EMPTY) == IIR_TRANSMITTED_REGISTER_EMPTY) {
+ tc3162ser_irq_tx(port);
+ }
+ return IRQ_HANDLED;
+}
+
+static unsigned int tc3162ser_tx_empty(struct uart_port *port)
+{
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IIR));
+ wmb();
+#endif
+ unsigned int ret;
+
+ ret = (LSR_INDICATOR & LSR_THRE) ? TIOCSER_TEMT : 0;
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+ return ret;
+}
+
+static void tc3162ser_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int tc3162ser_get_mctrl(struct uart_port *port)
+{
+ unsigned int result = 0;
+ return result;
+}
+
+static void tc3162ser_start_tx(struct uart_port *port)
+{
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ VPchar(CR_UART_IER) |= IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPchar(CR_UART_IER) |= IER_THRE_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_stop_rx(struct uart_port *port)
+{
+#if 1
+ if(tc3162_uart_installed == 1)
+ {
+ printk("tc3162ser_stop_rx: Can not stop rx return\r\n");
+ return;
+ }
+#endif
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ VPchar(CR_UART_IER) |= IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPchar(CR_UART_IER) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_enable_ms(struct uart_port *port)
+{
+}
+
+static void tc3162ser_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#ifdef CONFIG_MIPS_TC3262
+static void tc3162ser_irq_dispatch(void)
+{
+ do_IRQ(UART_INT);
+}
+#endif
+
+
+static int tc3162ser_startup(struct uart_port *port)
+{
+ int ret;
+#if 1
+ if(tc3162_uart_installed != 0)
+ {
+ printk("tc3162ser_startup: Already startup return\r\n");
+ return 0;
+ }
+ //tc3162_uart_installed = 1;
+#endif
+#ifdef CONFIG_MIPS_TC3262
+ if (cpu_has_vint)
+ set_vi_handler(port->irq, tc3162ser_irq_dispatch);
+#endif
+ ret = request_irq(port->irq, tc3162ser_irq, 0, "TC3162 UART", port);
+ if (ret) {
+ printk(KERN_ERR "Couldn't get irq %d ret=%d\n", port->irq, ret);
+ return ret;
+ }
+ printk("%s \n", __FUNCTION__);
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ VPchar(CR_UART_IER) |= IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPchar(CR_UART_IER) |= IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+ return 0;
+}
+
+static void tc3162ser_shutdown(struct uart_port *port)
+{
+#if 1
+ if(tc3162_uart_installed == 1)
+ {
+ printk("tc3162ser_shutdown: Can not shut down return\r\n");
+ return;
+ }
+#endif
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ VPchar(CR_UART_IER) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPchar(CR_UART_IER) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+ free_irq(port->irq, port);
+}
+
+static void tc3162ser_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int baud, quot;
+ unsigned long flags;
+
+ termios->c_cflag |= CREAD;
+
+ baud = 115200;
+ quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *tc3162ser_type(struct uart_port *port)
+{
+ return port->type == PORT_TC3162 ? "TC3162" : NULL;
+}
+
+static void tc3162ser_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_TC3162;
+}
+
+static void tc3162ser_release_port(struct uart_port *port)
+{
+ release_mem_region(port->iobase, TC3162_UART_SIZE);
+}
+
+static int tc3162ser_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->iobase, TC3162_UART_SIZE,
+ "tc3162-uart") != NULL ? 0 : -EBUSY;
+}
+
+static struct uart_ops tc3162ser_ops = {
+ .tx_empty = tc3162ser_tx_empty,
+ .set_mctrl = tc3162ser_set_mctrl,
+ .get_mctrl = tc3162ser_get_mctrl,
+ .stop_tx = tc3162ser_stop_tx,
+ .start_tx = tc3162ser_start_tx,
+ .stop_rx = tc3162ser_stop_rx,
+ .enable_ms = tc3162ser_enable_ms,
+ .break_ctl = tc3162ser_break_ctl,
+ .startup = tc3162ser_startup,
+ .shutdown = tc3162ser_shutdown,
+ .set_termios = tc3162ser_set_termios,
+ .type = tc3162ser_type,
+ .config_port = tc3162ser_config_port,
+ .release_port = tc3162ser_release_port,
+ .request_port = tc3162ser_request_port,
+};
+
+static void tc3162_console_put(const char c)
+{
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ while (1){
+ tmp = VPint(READ_OTHER(CR_UART_IER));
+ wmb();
+ if((LSR_INDICATOR & LSR_THRE)){
+ wmb();
+ break;
+ }
+ }
+ VPchar(CR_UART_THR) = c;
+ wmb();
+#else
+ while (!(LSR_INDICATOR & LSR_THRE))
+ ;
+ VPchar(CR_UART_THR) = c;
+#endif
+}
+
+static void tc3162_console_write(struct console *con, const char *s,
+ unsigned int count)
+{
+#ifdef CONFIG_TC3162_ADSL
+ /*The prink message is hook this funcion.*/
+ if((void *)send_uart_msg){
+ send_uart_msg((char *)s, count);
+ }
+#endif
+#ifdef TCSUPPORT_UART1_ENHANCE
+ /* before enter tc3162_console_write function, kernel has
+ * call spin_lock_irqsave() for prevent printk.
+ */
+
+ if(real_state == ENABLE) {
+ uart_print_task_wakeup(s, count);
+ }
+ else
+#endif
+ {
+ while (count--) {
+ if (*s == '\n')
+ tc3162_console_put('\r');
+ tc3162_console_put(*s);
+ s++;
+ }
+ }
+}
+
+static int tc3162_console_setup(struct console *con, char *options)
+{
+ return 0;
+}
+
+static struct uart_driver tc3162ser_reg;
+
+static struct console tc3162_serconsole = {
+ .name = "ttyS",
+ .write = tc3162_console_write,
+ .device = uart_console_device,
+ .setup = tc3162_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .cflag = B115200 | CS8 | CREAD,
+ .index = -1,
+ .data = &tc3162ser_reg,
+};
+
+static int __init tc3162_console_init(void)
+{
+ register_console(&tc3162_serconsole);
+ return 0;
+}
+
+console_initcall(tc3162_console_init);
+
+static struct uart_port tc3162ser_ports[] = {
+ {
+ .iobase = 0xbfbf0003,
+ .irq = UART_INT,
+ .uartclk = 115200,
+ .fifosize = 1,
+ .ops = &tc3162ser_ops,
+ .line = 0,
+ .flags = ASYNC_BOOT_AUTOCONF,
+ }
+};
+
+static struct uart_driver tc3162ser_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = 1,
+};
+
+#ifdef TCSUPPORT_UART1_ENHANCE
+/* The print message uses circular buffer.
+ * A 'head' index - the point at which the producer insert items into the
+ * buffer.
+ * A 'tail' index - the point at which the consumer finds the next item in
+ * the buffer.
+ *
+ * example:
+ * array[2:5] = "abc\n"
+ * -------------------------------
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * --------------------------------
+ * | 0 | 1 | a | b | c | \n | 6 | 7 |
+ * --------------------------------
+ *
+ * tail head
+ *
+ * The tail pointer is equal to the head pointer, the buffer is empty.
+ * The buffer is full when the head pointer is one less than the tail pointer.
+ * The head index is incremented when items are added, and the tail index
+ * when items are removed.
+ * The tail index should never jump the head index, and both indices should
+ * wrapped to 0 when they reach the end of the buffer.
+ */
+
+/******************************************************************************
+ Descriptor: It's used to get message length in ring buffer.
+ Input Args: none.
+ Ret Value: Message length in ring buffer.
+******************************************************************************/
+static int get_ring_buffer_cnt(void)
+{
+ return CIRC_CNT(ringBuffer.head, ringBuffer.tail, PRINT_BUFFER_INIT_LEN_KB);
+}
+
+/******************************************************************************
+ Descriptor: It's used to get free space in ring buffer.
+ Input Args: none.
+ Ret Value: Free space in ring buffer.
+******************************************************************************/
+static int get_ring_buffer_space(void)
+{
+ return CIRC_SPACE(ringBuffer.head, ringBuffer.tail, PRINT_BUFFER_INIT_LEN_KB);
+}
+
+/******************************************************************************
+ Descriptor: It's used to add string to ring buffer, if the ring buffer
+ has enough free space. On the contrary, if the ring buffer has
+ no enough free space, it will show error message to console.
+ Input Args: str: The string pointer.
+ str_len: The string length
+ Ret Value: none.
+******************************************************************************/
+static void add_msg_to_ring_buffer(char *str, int str_len)
+{
+ int offset, start, pos;
+ static int insufficient_buf_cnt = 0;
+ int buf_space = 0;
+ char msg[64] = {0};
+
+ if(ringBufferDbg == ENABLE) {
+ buf_space = get_ring_buffer_space();
+ snprintf(msg, sizeof(msg), "before add(%d) buffer space:%d\n", str_len, buf_space);
+ console_put_string(msg);
+ }
+
+ if(str_len > get_ring_buffer_space()) {
+ /* Has no enough ring buffer space to stroe message */
+ insufficient_buf_cnt++;
+ /* print insufficient ring buffer message */
+ if(insufficient_buf_cnt > INSUFFICIENT_BUF_CNT_THRESHOLD) {
+ console_put_string("insufficient ring buffer.\n");
+ /* clear cnt */
+ insufficient_buf_cnt = 0;
+ }
+ return;
+ }
+
+ /* store log message to buffer */
+ start = ringBuffer.head;
+ for(offset = 0; offset < str_len; offset++) {
+ /* pos = (start + offset) % PRINT_BUFFER_INIT_LEN_KB */
+ pos = (start + offset) & (PRINT_BUFFER_INIT_LEN_KB - 1);
+ ringBuffer.buf[pos] = str[offset];
+ }
+
+ /* shift head pointer */
+ ringBuffer.head = ((ringBuffer.head + str_len) & (PRINT_BUFFER_INIT_LEN_KB - 1));
+
+ if(ringBufferDbg == ENABLE) {
+ buf_space = get_ring_buffer_space();
+ snprintf(msg, sizeof(msg), "after add(%d) buffer space:%d\n", str_len, buf_space);
+ console_put_string(msg);
+ }
+}
+
+/******************************************************************************
+ Descriptor: It's used to print string to console(UART1).
+ Input Args: str: The string pointer.
+ Ret Value: none.
+******************************************************************************/
+void console_put_string(const char *str)
+{
+ while(*str) {
+ if (*str == '\n')
+ tc3162_console_put('\r');
+ tc3162_console_put(*str);
+ str++;
+ }
+}
+
+/******************************************************************************
+ Descriptor: It's used to flush data from ring buffer to console(UART1).
+ Input Args: none.
+ Ret Value: SUCCESS: flush ring buffer to console success.
+ FALSE: flush ring buffer to console failed.
+******************************************************************************/
+static int flush_ring_buffer(void)
+{
+ int offset, start, pos;
+ int ring_buffer_cnt;
+ int buf_space = 0;
+ char msg[64] = {0};
+ unsigned long flags;
+
+ if(ringBufferDbg == ENABLE) {
+ buf_space = get_ring_buffer_space();
+ snprintf(msg, sizeof(msg), "before flush buffer space:%d\n", buf_space);
+ console_put_string(msg);
+ }
+
+ if(isEN7516G) {
+ local_irq_save(flags);
+ }
+
+ ring_buffer_cnt = get_ring_buffer_cnt();
+
+ if(ring_buffer_cnt == 0) {
+ if(isEN7516G) {
+ local_irq_restore(flags);
+ }
+ return SUCCESS;
+ } else if(ring_buffer_cnt < 0) {
+ console_put_string("ring buffer count error.\n");
+ if(isEN7516G) {
+ local_irq_restore(flags);
+ }
+ return FALSE;
+ }
+
+ start = ringBuffer.tail;
+
+ for(offset = 0; offset < ring_buffer_cnt; offset++) {
+ /* pos = (start + offset) % PRINT_BUFFER_INIT_LEN_KB */
+ pos = (start + offset) & (PRINT_BUFFER_INIT_LEN_KB - 1);
+ if (ringBuffer.buf[pos] == '\n')
+ tc3162_console_put('\r');
+ tc3162_console_put(ringBuffer.buf[pos]);
+
+
+ /* shift tail pointer */
+ ringBuffer.tail = ((ringBuffer.tail + 1) & (PRINT_BUFFER_INIT_LEN_KB - 1));
+ }
+
+ if(isEN7516G) {
+ local_irq_restore(flags);
+ }
+
+ if(ringBufferDbg == ENABLE) {
+ buf_space = get_ring_buffer_space();
+ snprintf(msg, sizeof(msg), "after flush buffer space:%d\n", buf_space);
+ console_put_string(msg);
+ }
+
+ return SUCCESS;
+}
+
+/******************************************************************************
+ Descriptor: It's used to flush data from ring buffer to console(UART1).
+ Input Args: data: Never used.
+ Ret Value: 0: Success.
+******************************************************************************/
+static int uart_print_task_wait(void *data)
+{
+ while(!kthread_should_stop()) {
+ wait_event_interruptible(printQueue, (printFlag == ENABLE || kthread_should_stop()));
+ /* clear condition */
+ printFlag = DISABLE;
+
+ /* print debug msg */
+ if(flush_ring_buffer() == FALSE) {
+ /* reset head and tail pointer */
+ ringBuffer.head = 0;
+ ringBuffer.tail = 0;
+ }
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ Descriptor: It's used to insert string to ring buffer.
+ Input Args: data: Never used.
+ Ret Value: 0: Success.
+******************************************************************************/
+int uart_print_task_wakeup(const char *str, unsigned int str_len)
+{
+ /* store log message to buffer */
+ add_msg_to_ring_buffer(str, str_len);
+
+ if(isRingBufferMode == DISABLE) {
+ /* Disable is called, flush buffer and set real_state to DISABLE */
+ flush_ring_buffer();
+ real_state = DISABLE;
+ /* reset head and tail pointer */
+ ringBuffer.head = 0;
+ ringBuffer.tail = 0;
+ } else {
+ /* wake up task */
+ printFlag = ENABLE;
+ wake_up_interruptible(&printQueue);
+ }
+ return 0;
+}
+
+/******************************************************************************
+ Descriptor: It's used to enable or disable ring buffer mode with flush
+ ring buffer.
+ Input Args: enable: Enable or disable ring buffer mode.
+ flush_buffer: Enable or disable flush ring buffer when disable
+ ring buffer mode.
+ Ret Value: none.
+******************************************************************************/
+void set_ring_buffer_mode(int enable)
+{
+ if(enable == DISABLE) {
+ if(isRingBufferMode == ENABLE) {
+ isRingBufferMode = DISABLE;
+ }
+ } else {
+ if(isRingBufferMode == DISABLE) {
+ isRingBufferMode = ENABLE;
+ real_state = ENABLE;
+ }
+ }
+}
+
+/******************************************************************************
+ Descriptor: It's used to disable ring buffer mode with flush ring buffer.
+ Input Args: none.
+ Ret Value: none.
+******************************************************************************/
+void disable_ring_buffer_mode(void)
+{
+ set_ring_buffer_mode(DISABLE);
+}
+EXPORT_SYMBOL(disable_ring_buffer_mode);
+
+/******************************************************************************
+ Descriptor: It's used to set ring buffer mode in auto mode.
+ Input Args: enalbe : enable/disable ring buffer mode.
+ Ret Value: none.
+******************************************************************************/
+void set_ring_buffer_mode_auto(int enable)
+{
+ /* Set ring buffer mode automatically if auto mode is enable (default enable) */
+ if(auto_mode == ENABLE) {
+ if(enable == DISABLE) {
+ set_ring_buffer_mode(DISABLE);
+ } else {
+ set_ring_buffer_mode(ENABLE);
+ }
+ }
+}
+EXPORT_SYMBOL(set_ring_buffer_mode_auto);
+
+/******************************************************************************
+ Descriptor: It's used to test ring buffer mode. This function will printk a message with random
+ length.
+ Tester uses console information to check whether the
+ ring buffer mode correct or not.
+ Input Args: times: The times of printk.
+ Ret Value: none.
+******************************************************************************/
+static void random_printk(int times)
+{
+ /* printk buffer size are 1024 */
+ const int printk_size = 1024;
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 6
+ char msg_pattern[1023] = {0};
+ char msg_printk[1023] = {0};
+#else
+ char msg_pattern[printk_size - 1] = {0};
+ char msg_printk[printk_size - 1] = {0};
+#endif
+ int printk_len;
+ char msg_printk_len[32] = {0};
+ const int ascii_start = 0x21;
+ const int ascii_end = 0x7E;
+ const int ascii_range = ascii_end - ascii_start + 1;
+ int i;
+
+ if(isRingBufferMode == DISABLE) {
+ console_put_string("ring buffer mode is disabled.");
+ return;
+ }
+
+ /* init msg */
+ for(i = 0; i < sizeof(msg_pattern); i++) {
+ /* assign ASCII from 0x21(!) to 0x7E(~) */
+ msg_pattern[i] = (i % ascii_range) + ascii_start;
+ }
+
+ while(times--) {
+ get_random_bytes(&printk_len, sizeof(printk_len)) ;
+ /* minus '\n' length */
+ printk_len = (printk_len & (printk_size - 1 - strlen("\n")));
+
+ /* show to console for check printk length */
+ /* plus "\r\n" length */
+ snprintf(msg_printk_len, sizeof(msg_printk_len), "times:%d printk len:%d\n", (times + 1), printk_len + strlen("\r\n"));
+ console_put_string(msg_printk_len);
+
+ /* print to console */
+ memcpy(msg_printk, msg_pattern, printk_len);
+ msg_printk[printk_len] = '\n';
+ msg_printk[printk_len + 1] = '\0';
+ printk(msg_printk);
+
+ /* When UART baudrate are 115200,
+ * the costs of printk to console is about 1ms with 12 characters.
+ */
+ msleep((printk_size / 12) + 1);
+ }
+}
+
+static int uart_dbg_msg_read_proc(char *buf, char **start, off_t off, int count, int *eof, void *data)
+{
+ int index = 0;
+
+ index += sprintf(buf + index, "isRingBufferMode : %s\n", ((isRingBufferMode == ENABLE) ? "enable" : "disable"));
+ index += sprintf(buf + index, "ring buffer mode size: %dKB\n", (PRINT_BUFFER_INIT_LEN_KB >> 10));
+ index += sprintf(buf + index, "ringBufferDbg : %s\n", ((ringBufferDbg == ENABLE) ? "enable" : "disable"));
+ index += sprintf(buf + index, "auto mode : %s\n", ((auto_mode == ENABLE) ? "enable" : "disable"));
+
+ index += sprintf(buf + index, "\nusage:\n");
+ index += sprintf(buf + index, "ring_buffer_mode <0:disable, 1:enable>\n");
+ index += sprintf(buf + index, "busy_loop_test\n");
+ index += sprintf(buf + index, "crash_test\n");
+ index += sprintf(buf + index, "ring_buffer_dbg <0:disable, 1:enable>\n");
+ index += sprintf(buf + index, "ring_buffer_test <times>\n");
+ index += sprintf(buf + index, "ring_buffer_stop_test\n");
+
+ *eof = 1;
+
+ return index;
+}
+
+static int uart_dbg_msg_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char val_string[64], cmd[32];
+ int value, ret ;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL ;
+
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT ;
+
+ sscanf(val_string, "%s %d", cmd, &value) ;
+
+ if(!strcmp(cmd, "ring_buffer_mode")) {
+ /* Disable auto mode once ring buffer is enable/disable manually (command line) */
+ auto_mode = DISABLE;
+ if(value == 0) {
+ set_ring_buffer_mode(DISABLE);
+ } else {
+ set_ring_buffer_mode(ENABLE);
+ }
+ printk("isRingBufferMode:%s\n", ((isRingBufferMode == ENABLE) ? "enable" : "disable"));
+ } else if(!strcmp(cmd, "busy_loop_test")) {
+ /* use for test printk after occuring watchdog interrupt */
+ printk("before busy loop test.\n");
+ while(1);
+ } else if(!strcmp(cmd, "crash_test")) {
+ printk("before crash test.\n");
+ /* use for test printk when read error register crash */
+ regRead32(0x1234567);
+ printk("finished crash test.\n");
+ } else if(!strcmp(cmd, "ring_buffer_dbg")) {
+ if(value == 0) {
+ ringBufferDbg = DISABLE;
+ } else {
+ ringBufferDbg = ENABLE;
+ }
+ printk("ringBufferDbg:%s\n", ((ringBufferDbg == ENABLE) ? "enable" : "disable"));
+ } else if(!strcmp(cmd, "ring_buffer_test")) {
+ random_printk(value);
+ } else if(!strcmp(cmd, "ring_buffer_stop_test")) {
+ printk("before ring buffer stop.\n");
+ set_ring_buffer_mode(DISABLE);
+ printk("finished ring buffer stop.\n");
+ }
+
+ return count ;
+}
+
+/******************************************************************************
+ Descriptor: It's used to deinit ring buffer.
+ Input Args: none.
+ Ret Value: none.
+******************************************************************************/
+void uart_print_buffer_deinit(void)
+{
+ remove_proc_entry("uart/uart", 0);
+ remove_proc_entry("uart", 0);
+
+ if(printTask) {
+ kthread_stop(printTask);
+ }
+
+ if(ringBuffer.buf) {
+ kfree(ringBuffer.buf);
+ }
+}
+#endif
+
+static int __init tc3162ser_init(void)
+{
+ int ret, i;
+#ifdef TCSUPPORT_UART1_ENHANCE
+ struct proc_dir_entry *uart_proc = NULL;
+ struct proc_dir_entry *uart_proc_dir = NULL;
+ int errno;
+ char errMsg[64] = {0};
+#endif
+
+ ret = uart_register_driver(&tc3162ser_reg);
+ if (!ret) {
+ for (i = 0; i < TC3162_NR_PORTS; i++)
+ uart_add_one_port(&tc3162ser_reg, &tc3162ser_ports[i]);
+ }
+
+#ifdef TCSUPPORT_UART1_ENHANCE
+ /* init */
+ init_waitqueue_head(&printQueue);
+ printFlag = DISABLE;
+ ringBuffer.head = 0;
+ ringBuffer.tail = 0;
+
+ /* Initial proc file node */
+ uart_proc_dir = proc_mkdir("uart", NULL);
+
+ if(uart_proc_dir) {
+ uart_proc = create_proc_entry("uart", 0, uart_proc_dir) ;
+ if(uart_proc) {
+ uart_proc->read_proc = uart_dbg_msg_read_proc ;
+ uart_proc->write_proc = uart_dbg_msg_write_proc ;
+ }
+ }
+
+ printTask = kthread_run(uart_print_task_wait, NULL, "uart_print_task");
+ if(IS_ERR(printTask)) {
+ errno = PTR_ERR(printTask);
+ printTask = NULL;
+ snprintf(errMsg, sizeof(errMsg), "uart create kernel thread for ring buffer error:%d.\n", errno);
+ console_put_string(errMsg);
+ remove_proc_entry("uart/uart", 0);
+ remove_proc_entry("uart", 0);
+ } else {
+ ringBuffer.buf = buffer;
+ console_put_string("ECNT ring buffer init success\n");
+ }
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_TC3162_ADSL
+/*_____________________________________________________________________________
+** function name: tcconsole_cmd
+** descriptions:
+** This function is used the send command to uart drivers that is used
+** tcconsole utility.
+**
+** parameters:
+** cmd: Specify the command line strings.
+** len: The length of command.
+**
+** global:
+** tc3162ser_ports
+**
+** return:
+** none
+**
+** call:
+** tty_flip_buffer_push
+** tty_insert_flip_char
+**
+** revision:
+** 1. Here 2010/9/23
+**____________________________________________________________________________
+*/
+void
+tcconsole_cmd(char* cmd, int len){
+ struct uart_port *port= &tc3162ser_ports[0];
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ch, flg;
+ int i;
+
+ /*Ignore the line feed character*/
+ for(i=0; i<len-1; i++){
+ ch = cmd[i];
+ port->icount.rx++;
+
+ if (tty->port->low_latency)
+ //tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
+
+ flg = TTY_NORMAL;
+
+ //tty_insert_flip_char(tty, ch, flg);
+ tty_insert_flip_char(&port->state->port, ch, flg);
+ }
+ //tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&port->state->port);
+}/*end tcconsole_cmd*/
+EXPORT_SYMBOL(tcconsole_cmd);
+#endif
+
+__initcall(tc3162ser_init);
+
Index: linux-3.18.21/drivers/tty/serial/tc3162_uart2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/tty/serial/tc3162_uart2.c 2018-02-05 13:21:05.000000000 +0800
@@ -0,0 +1,890 @@
+/*
+ * Serial driver for TC3162 SoC
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+
+#include <asm/tc3162/tc3162.h>
+
+#if defined(TCSUPPORT_CPU_EN7580)
+#define TC3162_NR_PORTS 4 //UART2, UART3, UART4, UART5
+#elif defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527)
+#define TC3162_NR_PORTS 2 //UART2, UART3
+#else
+#define TC3162_NR_PORTS 1 //UART2
+#endif
+
+
+#define TC3162_UART_SIZE 0x30
+
+#define PORT_TC3162 3162
+#if 0 // def CONFIG_TC3162_ADSL
+void (*send_uart_msg)(char* msg, int len);
+EXPORT_SYMBOL(send_uart_msg);
+static char tuart_buf[1024];
+#endif
+
+#define UART_BAUDRATE_MIN 110
+#define UART_BAUDRATE_MAX 115200
+#define UART_BAUDRATE 9600
+
+#define UART_BRDL_20M 0x01
+#define UART_BRDH_20M 0x00
+#define UART_CRYSTAL_CLK_20M 20000000
+#define UART_CRYSTAL_CLK_DIV 10
+
+#define UART_IER_MSTS 0x08
+
+#define UART_MCR_RTS 0x02
+#define UART_MCR_LOOP 0x10
+
+#define UART_MSR_CTS 0x10
+
+#define UART_LCR_DLAB 0x80
+#define UART_LCR_BCON 0x40
+#define UART_LCR_SPBEN 0x20
+#define UART_LCR_EOPCON 0x10
+#define UART_LCR_PCEN 0x08
+#define UART_LCR_SB 0x04
+#define UART_LCR_CLEN_MASK 0x03
+#define UART_LCR_CLEN_C8 0x03
+#define UART_LCR_CLEN_C7 0x02
+#define UART_LCR_CLEN_C6 0x01
+#define UART_LCR_CLEN_C5 0x00
+
+#define UART_MISCC_CTSHWFC 0x08
+#define UART_MISCC_RTSHWFC 0x04
+
+#define UART_CFLAG_DEBUG 0x8000
+#define UART_CFLAG_DEVON 0x10000
+
+
+/* crystal clock is 20Mhz */
+/*---------------------
+| uclk_20M | baudrate |
+|---------------------|
+| 59904 | 115200 |
+| 29952 | 57600 |
+| 19968 | 38400 |
+| 14976 | 28800 |
+| 9984 | 19200 |
+| 7488 | 14400 |
+| 4992 | 9600 |
+| 2496 | 4800 |
+| 1248 | 2400 |
+| 624 | 1200 |
+| 312 | 600 |
+| 156 | 300 |
+| 57 | 110 |
+---------------------*/
+static unsigned long tc3162_get_uclk_20M(unsigned int baud)
+{
+ unsigned long uclk_20M = 0;
+ unsigned long long baud_tmp = 0;
+
+ baud_tmp = baud;
+ uclk_20M = (baud_tmp * UART_XYD_Y * (UART_BRDH_20M << 8 | UART_BRDL_20M) * 16) / (UART_CRYSTAL_CLK_20M / UART_CRYSTAL_CLK_DIV);
+
+ return uclk_20M;
+}
+#ifdef TCSUPPORT_MT7510_E1
+#define READ_OTHER(x) ((x & 0xc) + 0xbfb003a0)
+#endif
+static void tc3162ser_stop_tx(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IER));
+ wmb();
+ VPint(CR_UART2_IER + port->iobase) &= ~IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPint(CR_UART2_IER + port->iobase) &= ~IER_THRE_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_irq_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ch, flg;
+
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+
+ while (1) {
+ tmp = VPint(READ_OTHER(CR_UART2_LSR + port->iobase));
+ wmb();
+ if(!(VPint(CR_UART2_LSR + port->iobase)) & LSR_RECEIVED_DATA_READY)){
+ wmb();
+ break;
+ }
+#else
+ while ((VPint(CR_UART2_LSR + port->iobase)) & LSR_RECEIVED_DATA_READY) {
+#endif
+ /*
+ * We need to read rds before reading the
+ * character from the fifo
+ */
+#ifdef TCSUPPORT_MT7510_E1
+ tmp = VPint(READ_OTHER(CR_UART2_RBR + port->iobase));
+ wmb();
+ ch = VPint(CR_UART2_RBR + port->iobase);
+ wmb();
+#else
+ ch = VPint(CR_UART2_RBR + port->iobase);
+#endif
+ port->icount.rx++;
+
+ if (tty->port->low_latency)
+ tty_flip_buffer_push(&port->state->port);
+
+ flg = TTY_NORMAL;
+ tty_insert_flip_char(&port->state->port, ch, flg);
+ }
+ tty_flip_buffer_push(&port->state->port);
+}
+
+static void tc3162ser_irq_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+#if 0 // def CONFIG_TC3162_ADSL
+ int len=0;
+ memset(tuart_buf, 0, sizeof(tuart_buf));
+#endif
+ if (port->x_char) {
+ VPint(CR_UART2_THR + port->iobase) = port->x_char;
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ tc3162ser_stop_tx(port);
+ return;
+ }
+
+ count = port->fifosize;
+ do {
+ VPint(CR_UART2_THR + port->iobase) =
+ xmit->buf[xmit->tail];
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+#if 0 // def CONFIG_TC3162_ADSL
+ if((void *)send_uart_msg){
+ tuart_buf[len] = xmit->buf[xmit->tail];
+ len++;
+ }
+#endif
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+#if 0 // def CONFIG_TC3162_ADSL
+ if((void *)send_uart_msg){
+ send_uart_msg(tuart_buf, len);
+ }
+#endif
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ tc3162ser_stop_tx(port);
+}
+
+static irqreturn_t tc3162ser_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned int iir;
+
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IIR + port->iobase));
+ wmb();
+ iir = VPint(CR_UART2_IIR + port->iobase);
+ wmb();
+#else
+ iir = VPint(CR_UART2_IIR + port->iobase);
+#endif
+
+ if (((iir & IIR_RECEIVED_DATA_AVAILABLE) == IIR_RECEIVED_DATA_AVAILABLE) ||
+ ((iir & IIR_RECEIVER_IDLE_TRIGGER) == IIR_RECEIVER_IDLE_TRIGGER))
+ {
+ tc3162ser_irq_rx(port);
+ }
+ if ((iir & IIR_TRANSMITTED_REGISTER_EMPTY) == IIR_TRANSMITTED_REGISTER_EMPTY)
+ {
+ tc3162ser_irq_tx(port);
+ }
+
+ /* TODO: Handle Modem Status Interrupt */
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int tc3162ser_tx_empty(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IIR + port->iobase));
+ wmb();
+#endif
+ unsigned int ret;
+
+ ret = ((VPint(CR_UART2_LSR + port->iobase)) & LSR_THRE) ? TIOCSER_TEMT : 0;
+#ifdef TCSUPPORT_MT7510_E1
+ wmb();
+#endif
+ return ret;
+}
+
+static void tc3162ser_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+/* Do nothing, UART flow control is in hardware */
+/* Chip hasn't support modem control yet */
+/* TODO: Just pesudo code. Not yet tested */
+#if 0
+/*=====================================================================*/
+ unsigned char mcr = 0;
+
+ /* Don't need spinlock */
+ mcr = tc_inb(CR_UART2_MCR);
+ wmb();
+
+ if (!(mctrl & TIOCM_RTS))
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+ else
+ mcr &= ~UART_MCR_LOOP;
+
+ tc_outb(CR_UART2_MCR, mcr);
+ wmb();
+/*=====================================================================*/
+#endif
+}
+
+static unsigned int tc3162ser_get_mctrl(struct uart_port *port)
+{
+/* Chip hasn't support modem control yet */
+/* TODO: Just pesudo code. Not yet tested */
+#if 0
+/*=====================================================================*/
+ unsigned int mctrl = 0;
+ unsigned char msr = 0;
+
+ /* Don't need spinlock */
+ msr = tc_inb(CR_UART2_MSR);
+ wmb();
+
+ if (msr & UART_MSR_CTS)
+ mctrl |= TIOCM_CTS;
+
+ return mctrl;
+/*=====================================================================*/
+#else
+ /* Always return clear to send, because UART flow control is in hardware */
+ return TIOCM_CTS;
+#endif
+}
+
+static void tc3162ser_start_tx(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IER + port->iobase));
+ wmb();
+ VPint(CR_UART2_IER + port->iobase) |= IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPint(CR_UART2_IER + port->iobase) |= IER_THRE_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_stop_rx(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IER + port->iobase));
+ wmb();
+ VPint(CR_UART2_IER + port->iobase) |= IER_THRE_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPint(CR_UART2_IER + port->iobase) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+}
+
+static void tc3162ser_enable_ms(struct uart_port *port)
+{
+/* Chip hasn't support modem status yet */
+/* TODO: Just pesudo code. Not yet tested */
+#if 0
+/*=====================================================================*/
+ unsigned char ier = 0;
+
+ /* Don't need spinlock */
+ ier = tc_inb(CR_UART2_IER);
+ wmb();
+
+ ier |= UART_IER_MSTS;
+ tc_outb(CR_UART2_IER, ier);
+ wmb();
+/*=====================================================================*/
+#endif
+}
+
+static void tc3162ser_break_ctl(struct uart_port *port, int break_state)
+{
+ UART_DPRINT_MSG();
+/* TODO: Just pesudo code. Not yet tested */
+/*=====================================================================*/
+ unsigned long flags, lcr = 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ lcr = tc_inl(CR_UART2_LCR + port->iobase);
+ wmb();
+ if (break_state)
+ lcr |= UART_LCR_BCON;
+ else
+ lcr &=~UART_LCR_BCON;
+
+ tc_outl(CR_UART2_LCR + port->iobase, lcr);
+ wmb();
+
+ spin_unlock_irqrestore(&port->lock, flags);
+/*=====================================================================*/
+}
+
+
+static int tc3162ser_startup(struct uart_port *port)
+{
+ int ret;
+ UART_DPRINT_MSG();
+
+ if(port->iobase == CR_UART2_BASE)
+ ret = request_irq(port->irq, tc3162ser_irq, 0, "TC3162 UART2", port);
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+ else if(port->iobase == CR_UART3_BASE)
+ ret = request_irq(port->irq, tc3162ser_irq, 0, "TC3162 UART3", port);
+#if defined(TCSUPPORT_CPU_EN7580)
+ else if(port->iobase == CR_UART4_BASE)
+ ret = request_irq(port->irq, tc3162ser_irq, 0, "TC3162 UART4", port);
+ else if(port->iobase == CR_UART5_BASE)
+ ret = request_irq(port->irq, tc3162ser_irq, 0, "TC3162 UART5", port);
+#endif //7580
+#endif //7516 | 7527 | 7580
+
+ if (ret) {
+ printk(KERN_ERR "Couldn't get irq %d ret=%d\n", port->irq, ret);
+ return ret;
+ }
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IER + port->iobase));
+ wmb();
+ VPint(CR_UART2_IER + port->iobase) |= IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPint(CR_UART2_IER + port->iobase) |= IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+ return 0;
+}
+
+static void tc3162ser_shutdown(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ tmp = VPint(READ_OTHER(CR_UART2_IER + port->iobase));
+ wmb();
+ VPint(CR_UART2_IER + port->iobase) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+ wmb();
+#else
+ VPint(CR_UART2_IER + port->iobase) &= ~IER_RECEIVED_DATA_INTERRUPT_ENABLE;
+#endif
+ free_irq(port->irq, port);
+}
+
+static void tc3162ser_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ UART_DPRINT_MSG();
+ unsigned int baud = 0, reg = 0;
+ unsigned long flags;
+ unsigned long div_x = 0, div_y = 0, word = 0, lcr = 0;
+ unsigned char lcr_tmp = 0, miscc = 0;
+
+ switch (termios->c_cflag & CSIZE)
+ {
+ case CS5:
+ lcr_tmp = UART_LCR_CLEN_C5;
+ break;
+ case CS6:
+ lcr_tmp = UART_LCR_CLEN_C6;
+ break;
+ case CS7:
+ lcr_tmp = UART_LCR_CLEN_C7;
+ break;
+ default:
+ case CS8:
+ lcr_tmp = UART_LCR_CLEN_C8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ lcr_tmp |= UART_LCR_SB;
+ if (termios->c_cflag & PARENB)
+ lcr_tmp |= UART_LCR_PCEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_tmp |= UART_LCR_EOPCON;
+#ifdef CMSPAR
+ if (termios->c_cflag & CMSPAR)
+ lcr_tmp |= UART_LCR_SPBEN;
+#endif
+
+ if (termios->c_cflag & CRTSCTS)
+ {
+ if(port->unused1 & UART_HWFC_ENABLE)
+ miscc |= (UART_MISCC_CTSHWFC | UART_MISCC_RTSHWFC);
+ else {
+ termios->c_cflag &= ~CRTSCTS;
+ printk("Chip hasn't support HW flow control yet\n");
+ }
+ }
+
+ if (termios->c_cflag & UART_CFLAG_DEBUG) {
+ port->unused1 |= UART_DEBUG;
+ } else {
+ port->unused1 &= ~UART_DEBUG;
+ }
+
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527)
+ if (termios->c_cflag & UART_CFLAG_DEVON) {
+ if(!isFPGA) {
+ reg = VPint(0xbfa2015c); //GPIO_SHR_SCH
+ if(port->iobase == CR_UART2_BASE) {
+ reg |= (0x1 << 24) | (0x1 << 25); //Turn on UART2_MODE, UART2_CTSRTS
+ } else if(port->iobase == CR_UART3_BASE) {
+ reg |= (0x1 << 26); //Turn on UART3_MODE
+ }
+ VPint(0xbfa2015c) = reg;
+ }
+ } else {
+ if(!isFPGA) {
+ reg = VPint(0xbfa2015c); //GPIO_SHR_SCH
+ if(port->iobase == CR_UART2_BASE) {
+ reg &= ~((0x1 << 24) | (0x1 << 25)); //Turn off UART2_MODE, UART2_CTSRTS
+ } else if(port->iobase == CR_UART3_BASE) {
+ reg &= ~(0x1 << 26); //Turn off UART3_MODE
+ }
+ VPint(0xbfa2015c) = reg;
+ }
+ }
+#endif
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, UART_BAUDRATE_MIN, UART_BAUDRATE_MAX);
+
+ div_y = UART_XYD_Y;
+ div_x = tc3162_get_uclk_20M(baud);
+
+ word = ((div_x<<16) | div_y);
+
+ termios->c_cflag |= CREAD;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ tc_outl(CR_UART2_XYD + port->iobase, word);
+ wmb();
+
+ lcr = tc_inl(CR_UART2_LCR + port->iobase);
+ wmb();
+
+ lcr &= ~(UART_LCR_SPBEN | UART_LCR_EOPCON | UART_LCR_PCEN | UART_LCR_SB | UART_LCR_CLEN_MASK);
+ lcr |= lcr_tmp;
+
+ tc_outl(CR_UART2_LCR + port->iobase, lcr);
+ wmb();
+
+ tc_outl(CR_UART2_MISCC + port->iobase, miscc);
+ wmb();
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *tc3162ser_type(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+ return port->type == PORT_TC3162 ? "TC3162" : NULL;
+}
+
+static void tc3162ser_config_port(struct uart_port *port, int flags)
+{
+ UART_DPRINT_MSG();
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_TC3162;
+}
+
+static void tc3162ser_release_port(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+ release_mem_region(port->iobase, TC3162_UART_SIZE);
+}
+
+static int tc3162ser_request_port(struct uart_port *port)
+{
+ UART_DPRINT_MSG();
+ if(port->iobase == CR_UART2_BASE)
+ return request_mem_region(port->iobase, TC3162_UART_SIZE, "tc3162-uart2") != NULL ? 0 : -EBUSY;
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+ else if(port->iobase == CR_UART3_BASE)
+ return request_mem_region(port->iobase, TC3162_UART_SIZE, "tc3162-uart3") != NULL ? 0 : -EBUSY;
+#if defined(TCSUPPORT_CPU_EN7580)
+ else if(port->iobase == CR_UART4_BASE)
+ return request_mem_region(port->iobase, TC3162_UART_SIZE, "tc3162-uart4") != NULL ? 0 : -EBUSY;
+ else if(port->iobase == CR_UART5_BASE)
+ return request_mem_region(port->iobase, TC3162_UART_SIZE, "tc3162-uart5") != NULL ? 0 : -EBUSY;
+#endif // 7580
+#endif // 7527 | 7516 | 7580
+}
+
+
+static struct uart_ops tc3162ser_ops = {
+ .tx_empty = tc3162ser_tx_empty,
+ .set_mctrl = tc3162ser_set_mctrl,
+ .get_mctrl = tc3162ser_get_mctrl,
+ .stop_tx = tc3162ser_stop_tx,
+ .start_tx = tc3162ser_start_tx,
+ .stop_rx = tc3162ser_stop_rx,
+ .enable_ms = tc3162ser_enable_ms,
+ .break_ctl = tc3162ser_break_ctl,
+ .startup = tc3162ser_startup,
+ .shutdown = tc3162ser_shutdown,
+ .set_termios = tc3162ser_set_termios,
+ .type = tc3162ser_type,
+ .config_port = tc3162ser_config_port,
+ .release_port = tc3162ser_release_port,
+ .request_port = tc3162ser_request_port,
+};
+
+#if 0
+static void tc3162_console_put(const char c)
+{
+#ifdef TCSUPPORT_MT7510_E1
+ unsigned int tmp;
+ while (1){
+ tmp = VPint(READ_OTHER(CR_UART2_IER));
+ wmb();
+ if((LSR_INDICATOR2 & LSR_THRE)){
+ wmb();
+ break;
+ }
+ }
+ VPchar(CR_UART2_THR) = c;
+ wmb();
+#else
+ while (!(LSR_INDICATOR2 & LSR_THRE))
+ ;
+ VPchar(CR_UART2_THR) = c;
+#endif
+}
+
+static void tc3162_console_write(struct console *con, const char *s,
+ unsigned int count)
+{
+#if 0 // def CONFIG_TC3162_ADSL
+ /*The prink message is hook this funcion.*/
+ if((void *)send_uart_msg){
+ send_uart_msg((char *)s, count);
+ }
+#endif
+ while (count--) {
+ if (*s == '\n')
+ tc3162_console_put('\r');
+ tc3162_console_put(*s);
+ s++;
+ }
+}
+
+static int tc3162_console_setup(struct console *con, char *options)
+{
+ return 0;
+}
+
+static struct uart_driver tc3162ser_reg;
+
+static struct console tc3162_serconsole = {
+ .name = "ttyS2",
+ .write = tc3162_console_write,
+ .device = uart_console_device,
+ .setup = tc3162_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .cflag = B115200 | CS8 | CREAD,
+ .index = -1,
+ .data = &tc3162ser_reg,
+};
+
+static int __init tc3162_console_init(void)
+{
+ register_console(&tc3162_serconsole);
+ return 0;
+}
+
+console_initcall(tc3162_console_init);
+#endif
+
+static struct uart_port tc3162ser_ports[] = {
+ {
+ .iobase = CR_UART2_BASE,
+ .irq = UART2_INT,
+ .uartclk = 115200,
+ .fifosize = 1,
+ .ops = &tc3162ser_ops,
+ .line = 0,
+ .flags = ASYNC_BOOT_AUTOCONF,
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+ .unused1 = UART_HWFC_ENABLE,
+#else
+ .unused1 = UART_HWFC_DISABLE,
+#endif
+ },
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+ {
+ .iobase = CR_UART3_BASE,
+ .irq = UART3_INT,
+ .uartclk = 115200,
+ .fifosize = 1,
+ .ops = &tc3162ser_ops,
+ .line = 0,
+ .flags = ASYNC_BOOT_AUTOCONF,
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527)
+ .unused1 = UART_HWFC_DISABLE,
+#endif
+#if defined(TCSUPPORT_CPU_EN7580)
+ .unused1 = UART_HWFC_ENABLE,
+#endif
+ },
+#if defined(TCSUPPORT_CPU_EN7580)
+ {
+ .iobase = CR_UART4_BASE,
+ .irq = UART4_INT,
+ .uartclk = 115200,
+ .fifosize = 1,
+ .ops = &tc3162ser_ops,
+ .line = 0,
+ .flags = ASYNC_BOOT_AUTOCONF,
+ .unused1 = UART_HWFC_DISABLE,
+ },
+ {
+ .iobase = CR_UART5_BASE,
+ .irq = UART5_INT,
+ .uartclk = 115200,
+ .fifosize = 1,
+ .ops = &tc3162ser_ops,
+ .line = 0,
+ .flags = ASYNC_BOOT_AUTOCONF,
+ .unused1 = UART_HWFC_DISABLE,
+ },
+#endif //7580
+#endif //7527 | 7516 | 7580
+};
+
+static struct uart_driver tc3162ser_reg[] = {
+ {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS2",
+ .dev_name = "ttyS2",
+ .major = TTY_MAJOR,
+ .minor = 65,
+ .nr = 1,
+ },
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7580)
+ {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS3",
+ .dev_name = "ttyS3",
+ .major = TTY_MAJOR,
+ .minor = 66,
+ .nr = 1,
+ },
+#if defined(TCSUPPORT_CPU_EN7580)
+ {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS4",
+ .dev_name = "ttyS4",
+ .major = TTY_MAJOR,
+ .minor = 67,
+ .nr = 1,
+ },
+ {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS5",
+ .dev_name = "ttyS5",
+ .major = TTY_MAJOR,
+ .minor = 68,
+ .nr = 1,
+ },
+#endif //7580
+#endif //7512 | 7516 | 7580
+};
+
+static int __init tc3162ser_init(void)
+{
+ int ret, i;
+
+ /* UART2 Initial Start*/
+ unsigned long div_x = 0, div_y = 0;
+ unsigned long word = 0;
+ unsigned int reg = 0;
+ struct uart_port *port;
+
+ if(isEN751221)
+ {
+
+ reg = VPint(0xbfa20104); //GPIO_SHR_SCH
+ reg |= (0x1 << 18); //UART2_MODE
+ reg &= ~((0x01 << 3) | (0x01 << 7)); /* Disable Lan0_LED GE_Led */
+ VPint(0xbfa20104) = reg;
+ }
+ else if (isMT751020)
+ {
+ reg = VPint(0xbfb00860); //GPIO_SHR_SCH
+ reg |= (0x1 << 6); //UART2_MODE
+ reg &= ~(0x01 << 26); /* Disable GSW PHY MDIO MODE */
+ VPint(0xbfb00860) = reg;
+ }
+ else if (isEN751627)
+ {
+ printk("Init UART2 and UART3, still need to trun on for using.");
+ }
+ else
+ {
+ printk("Not support UART2!!!!\n");
+ return -ENODEV;
+ }
+
+ for(i = 0; i < TC3162_NR_PORTS; i++) {
+
+ port = &tc3162ser_ports[i];
+
+ /* Set FIFO control enable, reset RFIFO, TFIFO, 16550 mode, watermark=0x00 (1 byte) */
+ tc_outl(CR_UART2_FCR + port->iobase, UART_FCR|UART_WATERMARK);
+
+ /* Set modem control to 0 */
+ tc_outl(CR_UART2_MCR + port->iobase, UART_MCR);
+
+ /* Disable IRDA, Disable Power Saving Mode, RTS , CTS flow control */
+ tc_outl(CR_UART2_MISCC + port->iobase, UART_MISCC);
+
+ /* Set interrupt Enable to, enable Tx, Rx and Line status */
+ tc_outl(CR_UART2_IER + port->iobase, UART_IER);
+
+ /* access the bardrate divider */
+ tc_outl(CR_UART2_LCR + port->iobase, UART_BRD_ACCESS);
+
+ div_y = UART_XYD_Y;
+
+ div_x = tc3162_get_uclk_20M(UART_BAUDRATE);
+
+ word = (div_x<<16)|div_y;
+ tc_outl(CR_UART2_XYD + port->iobase, word);
+
+ /* Set Baud Rate Divisor to 1*16 */
+ tc_outl(CR_UART2_BRDL + port->iobase, UART_BRDL_20M);
+ tc_outl(CR_UART2_BRDH + port->iobase, UART_BRDH_20M);
+
+ /* Set DLAB = 0, clength = 8, stop =1, no parity check */
+ tc_outl(CR_UART2_LCR + port->iobase, UART_LCR);
+ /* UART2 Initial End */
+
+ ret = uart_register_driver(&tc3162ser_reg[i]);
+ if (!ret)
+ uart_add_one_port(&tc3162ser_reg[i], &tc3162ser_ports[i]);
+ }
+
+ return ret;
+}
+
+#if 0 // def CONFIG_TC3162_ADSL
+/*_____________________________________________________________________________
+** function name: tcconsole_cmd
+** descriptions:
+** This function is used the send command to uart drivers that is used
+** tcconsole utility.
+**
+** parameters:
+** cmd: Specify the command line strings.
+** len: The length of command.
+**
+** global:
+** tc3162ser_ports
+**
+** return:
+** none
+**
+** call:
+** tty_flip_buffer_push
+** tty_insert_flip_char
+**
+** revision:
+** 1. Here 2010/9/23
+**____________________________________________________________________________
+*/
+void
+tcconsole_cmd(char* cmd, int len){
+ struct uart_port *port= &tc3162ser_ports[0];
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ch, flg;
+ int i;
+
+ /*Ignore the line feed character*/
+ for(i=0; i<len-1; i++){
+ ch = cmd[i];
+ port->icount.rx++;
+
+ if (tty->low_latency)
+ tty_flip_buffer_push(tty);
+
+ flg = TTY_NORMAL;
+
+ tty_insert_flip_char(tty, ch, flg);
+ }
+ tty_flip_buffer_push(tty);
+}/*end tcconsole_cmd*/
+EXPORT_SYMBOL(tcconsole_cmd);
+#endif
+
+__initcall(tc3162ser_init);
+
Index: linux-3.18.21/drivers/usb/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/usb/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/Makefile 2018-02-05 13:21:05.000000000 +0800
@@ -11,6 +11,31 @@
obj-$(CONFIG_USB_MON) += mon/
+ifneq ($(strip $(TCSUPPORT_MUH_TEST)),)
+obj-$(CONFIG_USB_XHCI_HCD) += host/mtk_test/
+else
+ifneq ($(strip $(TCSUPPORT_CPU_EN7580)),)
+MTK_DR_M3U_HCD = ../../../kernel_ext/drivers/usb
+obj-$(CONFIG_PCI) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_EHCI_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_ISP116X_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_OHCI_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_UHCI_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_FHCI_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_XHCI_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_SL811_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_ISP1362_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_U132_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_R8A66597_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_HWA_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_ISP1760_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_IMX21_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_FSL_MPH_DR_OF) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_FUSBH200_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_FOTG210_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB_MAX3421_HCD) += $(MTK_DR_M3U_HCD)/mu3h/
+obj-$(CONFIG_USB) += $(MTK_DR_M3U_HCD)/mu3_phy/
+else
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
@@ -29,6 +54,8 @@
obj-$(CONFIG_USB_FUSBH200_HCD) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
+endif
+endif
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
Index: linux-3.18.21/drivers/usb/core/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/usb/core/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/core/Makefile 2018-02-05 13:21:05.000000000 +0800
@@ -2,12 +2,14 @@
# Makefile for USB Core files and filesystem
#
-usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
+usbcore-y := usb.o ecnt_core.o usbphy.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
usbcore-y += port.o
+ifeq ($(strip $(TCSUPPORT_MUH_TEST)),)
usbcore-$(CONFIG_PCI) += hcd-pci.o
+endif
usbcore-$(CONFIG_ACPI) += usb-acpi.o
obj-$(CONFIG_USB) += usbcore.o
Index: linux-3.18.21/drivers/usb/core/devio.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/core/devio.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/core/devio.c 2018-02-05 13:21:05.000000000 +0800
@@ -54,6 +54,7 @@
#include <linux/moduleparam.h>
#include "usb.h"
+#include "ecnt_drivers_usb_core.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX (USB_MAXBUS * 128)
@@ -1172,6 +1173,13 @@
ret = checkintf(ps, ret);
if (ret)
return ret;
+
+ /* USB3 Host will ignore the usb_clear_halt requested by up-layer */
+ if (ECNT_RETURN_DROP == ecnt_usb_proc_clearhalt_dev_inline_hook(ps->dev))
+ {
+ return 0;
+ }
+
check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT");
if (ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f);
Index: linux-3.18.21/drivers/usb/core/ecnt_core.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/core/ecnt_core.c 2018-02-05 13:21:05.000000000 +0800
@@ -0,0 +1,182 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (??EcoNet Software??)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (??EcoNet??) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (??ECONET SOFTWARE??) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN ??AS IS??
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER??S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER??S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include "hub.h"
+#include "ecnt_drivers_usb_core.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+/******************
+usb 2.0 port status definition which is the same with ehci.h.
+ shnwind add 20101012.
+***************/
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_OCC (1<<5) /* over current change */
+#define PORT_OC (1<<4) /* over current active */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+/***********************************/
+#define USB_PORT0_STAT_20_ADDR 0xbfba1064
+#define USB_PORT1_STAT_20_ADDR 0xbfba1068
+
+#define USB_PORT0_STAT_11_ADDR 0xbfba0054
+#define USB_PORT1_STAT_11_ADDR 0xbfba0058
+#define OHCI_USB_CONTROL_ADDR 0xbfba0004
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+int power_saving_mode(int mode);
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+int power_saving_mode(int mode)
+{
+ unsigned long x;
+
+ if(mode == 0){
+ /* clear bit7,6 of 0xc0000000 (after USB flash is unpluged)
+ * to reset rootHub and SIE of usb1.1, so that power saving
+ * mode can work properly --Trey */
+ if(((readl((void *)USB_PORT0_STAT_20_ADDR) & PORT_CONNECT) == 0)
+ && ((readl((void *)USB_PORT1_STAT_20_ADDR) & PORT_CONNECT) == 0)
+ && ((readl((void *)USB_PORT0_STAT_11_ADDR) & PORT_CONNECT) == 0)
+ && ((readl((void *)USB_PORT1_STAT_11_ADDR) & PORT_CONNECT) == 0)){
+ /* printk("SET USB 11 RESET\n"); */
+ x = readl((void *)OHCI_USB_CONTROL_ADDR);
+ x &= ~((1<<7) | (1<<6));
+ writel(x, (void *)OHCI_USB_CONTROL_ADDR);
+ }
+ }else if(mode == 1){
+ /* set bit7 and clear bit6 of 0xc0000000 (when USB flash is pluged)
+ * to set to normal mode for rootHub and SIE of usb1.1, so that power
+ * saving mode can work properly --Trey */
+ x = readl((void *)OHCI_USB_CONTROL_ADDR);
+ x |= (1<<7);
+ x &= ~(1<<6);
+ writel(x, (void *)OHCI_USB_CONTROL_ADDR);
+ /* printk("SET USB 11 OPERATION\n"); */
+ }
+ return 0;
+}
+
+int ecnt_hub_port_connect_inline_hook
+(struct usb_device *udev)
+{
+#ifdef TCSUPPORT_USB_HOST_LED
+ struct usb_hub *hub = NULL;
+ unsigned char maxchild = 0;
+
+ if(TCSUPPORT_XPON_HAL_API_VAL)
+ {
+ hub = usb_hub_to_struct_hub(udev);
+ if(hub){
+ maxchild = hub->descriptor->bNbrPorts;
+ if(maxchild == 4)
+ hubflag = 1;
+ }
+ if(!strcmp(dev_name(&udev->dev), "1-1.1"))
+ hubflag = 2;
+ }
+ if ( udev && udev->phyportnum )
+ {
+ if(TCSUPPORT_XPON_HAL_API_VAL)
+ {
+ if(udev->phyportnum ==USBPHYPORT1 && (hubflag == 1 || !strcmp(dev_name(&udev->dev), "1-1.2")))
+ return ECNT_CONTINUE;
+
+ if ( udev->phyportnum >= USBPHYPORT1
+ && udev->phyportnum <= USBPHYPORT2 )
+ pre_usb_state[udev->phyportnum - 1] = USB_CONNECT;
+
+ if ( Usb_Led_Flash_Op_hook )
+ Usb_Led_Flash_Op_hook(USB_CONNECT, udev->phyportnum);
+ }
+ else{
+ if ( udev->phyportnum >= USBPHYPORT1
+ && udev->phyportnum <= USBPHYPORT2 )
+ pre_usb_state[udev->phyportnum - 1] = USB_CONNECT;
+
+ if ( Usb_Led_Flash_Op_hook )
+ Usb_Led_Flash_Op_hook(USB_CONNECT, udev->phyportnum);
+ }
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
Index: linux-3.18.21/drivers/usb/core/ecnt_drivers_usb_core.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/core/ecnt_drivers_usb_core.h 2018-02-05 13:21:05.000000000 +0800
@@ -0,0 +1,107 @@
+#ifndef _LINUX_ECNT_DRIVERS_USB_CORE_H
+#define _LINUX_ECNT_DRIVERS_USB_CORE_H
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/atomic.h>
+#include <asm/types.h>
+#include <linux/usb.h>
+#include <ecnt_hook/ecnt_hook.h>
+#ifdef TCSUPPORT_USB_HOST_LED
+#include <asm/tc3162/tc3162.h>
+#include <asm/tc3162/ledcetrl.h>
+#endif
+#include <linux/libcompileoption.h>
+
+#ifdef TCSUPPORT_USB_HOST_LED
+extern void (*Usb_Led_Flash_Op_hook)(unsigned int opmode ,unsigned int phyport);
+extern int pre_usb_state[2];
+static int hubflag;
+#endif
+
+static inline int ecnt_usb_disconnect_inline_hook
+(struct usb_device *udev)
+{
+
+#ifdef TCSUPPORT_USB_HOST_LED
+ if ( udev && udev->phyportnum )
+ {
+ if ( udev->phyportnum >= USBPHYPORT1
+ && udev->phyportnum <= USBPHYPORT2 )
+ pre_usb_state[udev->phyportnum - 1] = USB_DISCONNECT;
+
+ if ( Usb_Led_Flash_Op_hook )
+ Usb_Led_Flash_Op_hook(USB_DISCONNECT, udev->phyportnum);
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_usb_submit_urb_inline_hook
+(struct usb_device *udev)
+{
+#ifdef TCSUPPORT_USB_HOST_LED
+ if ( udev && udev->phyportnum)
+ {
+ if(TCSUPPORT_XPON_HAL_API_VAL)
+ {
+ if(udev->phyportnum ==USBPHYPORT1 && (hubflag == 1 || !strcmp(dev_name(&udev->dev), "1-1.2")))
+ return ECNT_CONTINUE;
+
+ if ( Usb_Led_Flash_Op_hook )
+ Usb_Led_Flash_Op_hook(USB_BLINK, udev->phyportnum);
+ }
+ else{
+ if ( udev->phyportnum >= USBPHYPORT1
+ && udev->phyportnum <= USBPHYPORT2 )
+ pre_usb_state[udev->phyportnum - 1] = USB_BLINK;
+
+ if ( Usb_Led_Flash_Op_hook )
+ Usb_Led_Flash_Op_hook(USB_BLINK, udev->phyportnum);
+ }
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_usb_alloc_dev_inline_hook
+(struct usb_device *parent, struct usb_device *dev, unsigned port1)
+{
+ if ( !dev )
+ return ECNT_CONTINUE;
+
+#ifdef TCSUPPORT_USB_HOST_LED
+ if ( unlikely(!parent) )
+ {
+ dev->phyportnum = port1;
+ }
+ else
+ {
+ dev->phyportnum = dev->devpath[0] - 48;
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_usb_proc_clearhalt_dev_inline_hook(struct usb_device *dev)
+{
+ struct usb_hcd *hcd = NULL;
+
+ /* USB3 Host will ignore the usb_clear_halt requested by up-layer */
+ hcd = bus_to_hcd(dev->bus);
+ if (hcd->driver->flags & HCD_USB3)
+ {
+ return ECNT_RETURN_DROP;
+ }
+
+ return ECNT_CONTINUE;
+}
+
+#endif
+
Index: linux-3.18.21/drivers/usb/core/hub.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/core/hub.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/core/hub.c 2018-02-05 13:21:05.000000000 +0800
@@ -32,6 +32,7 @@
#include "hub.h"
#include "otg_whitelist.h"
+#include "ecnt_drivers_usb_core.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
@@ -2126,6 +2127,7 @@
struct usb_hub *hub = NULL;
int port1 = 1;
+ ecnt_usb_disconnect_inline_hook(udev);
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* this quiesces everything except pending urbs.
@@ -4611,6 +4613,7 @@
if (hcd->usb_phy && !hdev->parent)
usb_phy_notify_disconnect(hcd->usb_phy, udev->speed);
usb_disconnect(&port_dev->child);
+ power_saving_mode(0);
}
/* We can forget about a "removed" device when there's a physical
@@ -4650,6 +4653,7 @@
goto done;
return;
}
+ power_saving_mode(1);
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
@@ -4777,6 +4781,8 @@
if (status)
dev_dbg(hub->intfdev, "%dmA power budget left\n", status);
+ ecnt_hub_port_connect_inline_hook(udev);
+
return;
loop_disable:
Index: linux-3.18.21/drivers/usb/core/urb.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/core/urb.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/core/urb.c 2018-02-05 13:21:05.000000000 +0800
@@ -7,6 +7,7 @@
#include <linux/wait.h>
#include <linux/usb/hcd.h>
#include <linux/scatterlist.h>
+#include "ecnt_drivers_usb_core.h"
#define to_urb(d) container_of(d, struct urb, kref)
@@ -539,6 +540,8 @@
}
}
+ ecnt_usb_submit_urb_inline_hook(dev);
+
return usb_hcd_submit_urb(urb, mem_flags);
}
EXPORT_SYMBOL_GPL(usb_submit_urb);
Index: linux-3.18.21/drivers/usb/core/usb.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/core/usb.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/core/usb.c 2018-02-05 13:21:05.000000000 +0800
@@ -43,6 +43,7 @@
#include <linux/dma-mapping.h>
#include "usb.h"
+#include "ecnt_drivers_usb_core.h"
const char *usbcore_name = "usbcore";
@@ -499,6 +500,7 @@
dev->authorized = usb_hcd->authorized_default;
dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
}
+ ecnt_usb_alloc_dev_inline_hook(parent, dev, port1);
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
@@ -1051,6 +1053,7 @@
pr_info("%s: USB support disabled\n", usbcore_name);
return 0;
}
+ ecnt_usb_phy_init();
usb_init_pool_max();
retval = usb_debugfs_init();
Index: linux-3.18.21/drivers/usb/core/usbphy.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/core/usbphy.c 2018-02-05 13:21:05.000000000 +0800
@@ -0,0 +1,304 @@
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h> /* for in_interrupt() */
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+
+#include <asm/io.h>
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+
+
+#include <asm/tc3162/tc3162.h>
+/* #define USB_PHY_DBG 1 */
+
+void ecnt_usb_phy_init(void);
+
+void usb_tc3182_rt65168_phy_init(void){
+ unsigned long temp;
+
+ /*choose op mode host*/
+ temp = VPint(CR_AHB_SSR);
+ temp &= ~((1<<30) | (1<<29));
+ temp |= ((1<<30) | (1<<29));
+ VPint(CR_AHB_SSR) = temp;
+ mdelay(10);
+ //setup port 0 reset time
+ temp = VPint(0xbfb000a8);
+ temp |= ((1<<9) | (1<<10));
+ VPint(0xbfb000a8) = temp;
+ temp = VPint(0xbfb000ac);
+ temp &= ~((1<<29) | (1<<28));
+ VPint(0xbfb000ac) = temp;
+ //setup port 1 reset time
+ temp = VPint(0xbfb000a8);
+ temp |= ((1<<14) | (1<<13));
+ VPint(0xbfb000a8) = temp;
+ temp = VPint(0xbfb000a8);
+ temp &= ~((1<<12) | (1<<11));
+ VPint(0xbfb000a8) = temp;
+ //select reset mode internal
+ temp = VPint(0xbfb000ac);
+ temp &= ~(1<<27);
+ VPint(0xbfb000ac) = temp;
+ //phy reset
+ temp = VPint(0xbfb000ec);
+ temp &= ~(1<<31);
+ VPint(0xbfb000ec) = temp;
+ mdelay(1);
+ temp |= (1<<31);
+ VPint(0xbfb000ec) = temp;
+ mdelay(6);
+}
+
+void usb_mt751020_phy_init(void){
+
+#ifdef USB_PHY_DBG
+ printk("usb_debug:Init 7510/7520 USB PHY\n");
+//7510 ASIC rtlsim setting:
+//1. PHY Initial setting :
+ printk("usb_debug:PHY initial setting: \n");
+//a. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0000 : read data (0x0048086a)
+ printk("read address BFAF1800 (0x0048086a); value is: %lx\n",VPint(0xbfaf1800));
+ mdelay(1);
+//b. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0010 : read data (0x00015404)
+ printk("read address BFAF1810 (0x00015404); value is: %lx\n",VPint(0xbfaf1810));
+ mdelay(1);
+//c. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0110 : read data (0x00015404)
+ printk("read address BFAF1910 (0x00015404); value is: %lx\n",VPint(0xbfaf1910));
+ mdelay(1);
+//d. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : read data (0x00000402)
+ printk("read address BFAF1860 (0x00000402); value is: %lx\n",VPint(0xbfaf1860));
+ mdelay(1);
+//e. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0160 : read data (0x00000402)
+ printk("read address BFAF1960 (0x00000402); value is: %lx\n",VPint(0xbfaf1960));
+ mdelay(1);
+//f. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : write data (0x00ffff02)
+ VPint(0xbfaf1860) = 0x00ffff02;
+ printk("write address BFAF1860 with value 0x00ffff02\n");
+ mdelay(1);
+//g. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : read data (0x00ffff02)
+ printk("read address BFAF1860 (0x00ffff02); value is: %lx\n",VPint(0xbfaf1860));
+ mdelay(1);
+//h. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : write data (0x00555502)
+ VPint(0xbfaf1860) = 0x00555502;
+ printk("write address BFAF1860 with value 0x00555502\n");
+ mdelay(1);
+//i. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : read data (0x00555502)
+ printk("read address BFAF1860 (0x00555502); value is: %lx\n",VPint(0xbfaf1860));
+ mdelay(1);
+//j. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : write data (0x00aaaa02)
+ VPint(0xbfaf1860) = 0x00aaaa02;
+ printk("write address BFAF1860 with value 0x00aaaa02\n");
+ mdelay(1);
+//k. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : read data (0x00aaaa02)
+ printk("read address BFAF1860 (0x00aaaa02); value is: %lx\n",VPint(0xbfaf1860));
+#endif
+ mdelay(1);
+//l. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : write data (0x00000402)
+ //VPint(0xbfaf1860) = 0x00000402;
+ regWrite32(0xbfaf1860, 0x00000402);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1860 with value 0x00000402\n");
+#endif
+ mdelay(1);
+//m. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0060 : read data (0x00000402)
+#ifdef USB_PHY_DBG
+ printk("read address BFAF1860 (0x00000402); value is: %x\n",regRead32(0xbfaf1860));
+ mdelay(1);
+#endif
+//n. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0000 : write data (0x0048086a)
+ regWrite32(0xbfaf1800, 0x0048086a);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1800 with value 0x0048086a\n");
+#endif
+ mdelay(1);
+//o. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0004 : write data (0x4400001c)
+ regWrite32(0xbfaf1804, 0x4400001c);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1804 with value 0x4400001c\n");
+#endif
+ mdelay(1);
+//p. Write usb_phy_top bank (0xBFAF_1800) offset address 0x001c : write data (0xc0200000)
+ regWrite32(0xbfaf181c, 0xc0200000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF181c with value 0xc0200000\n");
+#endif
+ mdelay(1);
+//q. Write usb_phy_top bank (0xBFAF_1800) offset address 0x011c : write data (0xc0200000)
+ regWrite32(0xbfaf191c, 0xc0200000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF191c with value 0xc0200000\n");
+#endif
+ mdelay(1);
+//r. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0068 : write data (0x02000000)
+ regWrite32(0xbfaf1868, 0x02000000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1868 with value 0x02000000\n");
+#endif
+ mdelay(1);
+//s. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0168 : write data (0x02000000)
+ regWrite32(0xbfaf1968, 0x02000000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1968 with value 0x02000000\n");
+#endif
+ mdelay(1);
+
+//2. Reset usb phy :
+#ifdef USB_PHY_DBG
+ printk("usb_debug:Reset USB PHY: \n");
+#endif
+//a. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0068 : write data (0x02004000)
+ regWrite32(0xbfaf1868, 0x02004000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1868 with value 0x02004000\n");
+#endif
+ mdelay(1);
+//b. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0068 : read data (0x02004000)
+#ifdef USB_PHY_DBG
+ printk("read address BFAF1868 (0x02004000); value is: %lx\n",VPint(0xbfaf1868));
+ mdelay(1);
+#endif
+//c. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0168 : write data (0x02004000)
+ regWrite32(0xbfaf1968, 0x02004000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1968 with value 0x02004000\n");
+#endif
+ mdelay(1);
+//d. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0168 : read data (0x02004000)
+#ifdef USB_PHY_DBG
+ printk("read address BFAF1968 (0x02004000); value is: %x\n",regRead32(0xbfaf1968));
+ mdelay(1);
+#endif
+//e. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0068 : write data (0x02000000)
+ regWrite32(0xbfaf1868, 0x02000000);
+
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1868 with value 0x02000000\n");
+#endif
+ mdelay(1);
+//f. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0068 : read data (0x02000000)
+#ifdef USB_PHY_DBG
+ printk("read address BFAF1868 (0x02000000); value is: %x\n",regRead32(0xbfaf1868));
+ mdelay(1);
+#endif
+//g. Write usb_phy_top bank (0xBFAF_1800) offset address 0x0168 : write data (0x02000000)
+ regWrite32(0xbfaf1968, 0x02000000);
+#ifdef USB_PHY_DBG
+ printk("write address BFAF1968 with value 0x02000000\n");
+#endif
+ mdelay(1);
+//h. Read usb_phy_top bank (0xBFAF_1800) offset address 0x0168 : read data (0x02000000)
+#ifdef USB_PHY_DBG
+ printk("read address BFAF1968 (0x02000000); value is: %x\n",regRead32(0xbfaf1968));
+ mdelay(1);
+#endif
+
+ regWrite32(0xbfaf186c, 0x00800000);
+
+ if(isMT7505)
+ {
+ //this register is set to enable USB2.0 current
+ regWrite32(0xbfaf1808, regRead32(0xbfaf1808) | (0x1<<3) );
+ mdelay(1);
+ }
+//biker_20130814, add slew rate cal to pass eye diagram test
+//port 0
+ regWrite32(0xbfaf1810, regRead32(0xbfaf1810) | (0x1<<23) );
+ mdelay(1);
+ regWrite32(0xbfaf1f10, regRead32(0xbfaf1f10) | (0x1<<8) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) | (0x1<<10) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) | (0x1<<24) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) & (~(0x1<<24) ) );
+ mdelay(1);
+ regWrite32(0xbfaf1f10, regRead32(0xbfaf1f10) & (~(0x1<<8) ));
+ mdelay(1);
+ regWrite32(0xbfaf1810, (regRead32(0xbfaf1810) &(~(0x7 <<16)) ) | (0x4<<16) );
+ mdelay(1);
+ regWrite32(0xbfaf1810, regRead32(0xbfaf1810) & (~(0x1<<23) ) );
+ mdelay(1);
+
+//port 1
+ regWrite32(0xbfaf1910, regRead32(0xbfaf1910) | (0x1<<23) );
+ mdelay(1);
+ regWrite32(0xbfaf1f10, regRead32(0xbfaf1f10) | (0x1<<8) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) | (0x1<<10) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) | (0x1<<24) );
+ mdelay(1);
+ regWrite32(0xbfaf1f00, regRead32(0xbfaf1f00) & (~(0x1<<24) ) );
+ mdelay(1);
+ regWrite32(0xbfaf1f10, regRead32(0xbfaf1f10) & (~(0x1<<8) ));
+ mdelay(1);
+ regWrite32(0xbfaf1910, (regRead32(0xbfaf1910) &(~(0x7 <<16)) ) | (0x4<<16) );
+ mdelay(1);
+ regWrite32(0xbfaf1910, regRead32(0xbfaf1910) & (~(0x1<<23) ) );
+ mdelay(1);
+
+
+
+ return;
+}
+
+#if defined(TCSUPPORT_CPU_MT7505)
+extern int (*I2CWriterPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+extern int (*I2CReaderPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+#define USB_PHY_DEV_ADDR 0x60
+#define USB_CTRL_DEV_ADDR 0x50
+
+void usb_phy_init_I2C(void){
+ u8 u1Value[4] = {0, 0, 0, 0};
+
+ if(!I2CWriterPtr || !I2CReaderPtr)
+ return;
+
+ I2CReaderPtr(USB_PHY_DEV_ADDR, 0x6a, u1Value,4);
+ //config USB phy clock to 30MHZ
+ if(u1Value[0] != 0x00){
+ u1Value[0] = 0x00;
+ u1Value[1] = 0x02;
+ if(I2CWriterPtr(USB_PHY_DEV_ADDR, 0x6a, u1Value, 4));
+ }
+ I2CReaderPtr(USB_CTRL_DEV_ADDR, 0xab, u1Value,4);
+ //config USB driving strength to 0x11, Vpp is 3.3V
+ if(u1Value[0] != 0x03){
+ u1Value[0] = 0x03;
+ if(I2CWriterPtr(USB_CTRL_DEV_ADDR, 0xab, u1Value, 4));
+ }
+
+}
+#endif
+
+void ecnt_usb_phy_init(void){
+ if (!isFPGA){
+ if(isTC3182 || isRT65168){
+ usb_tc3182_rt65168_phy_init();
+ }
+ else if(isMT751020 || isMT7505){
+ usb_mt751020_phy_init();
+ }
+ }
+#if defined(TCSUPPORT_CPU_MT7505)
+ else{
+ usb_phy_init_I2C();
+ }
+#endif
+}
+
Index: linux-3.18.21/drivers/usb/host/Kconfig
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/Kconfig 2018-02-05 13:21:06.000000000 +0800
@@ -32,7 +32,12 @@
default y
config USB_XHCI_PLATFORM
- tristate
+ bool "xHCI platform"
+ default y
+
+config USB_EN7512_XHCI_HCD
+ bool "ECONET EN7512 xHCI"
+ depends on USB_XHCI_PLATFORM
config USB_XHCI_MVEBU
tristate "xHCI support for Marvell Armada 375/38x"
Index: linux-3.18.21/drivers/usb/host/Makefile
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/Makefile 2018-02-05 13:21:06.000000000 +0800
@@ -24,11 +24,16 @@
xhci-plat-hcd-y += xhci-rcar.o
endif
+ifdef CONFIG_USB_EN7512_XHCI_HCD
+xhci-hcd-y += mtk-phy.o xhci-mtk-scheduler.o xhci-mtk-power.o xhci-mtk.o mtk-phy-ahb.o
+endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
+ifndef CONFIG_USB_EN7512_XHCI_HCD
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
+endif
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -79,3 +84,6 @@
obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
+ifndef CONFIG_USB_EN7512_XHCI_HCD
+obj-$(CONFIG_MIPS_RT63365) += ehci_ohci.o
+endif
\ No newline at end of file
Index: linux-3.18.21/drivers/usb/host/ehci-hcd.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/ehci-hcd.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/ehci-hcd.c 2018-02-05 13:21:06.000000000 +0800
@@ -1248,6 +1248,12 @@
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
+
+#if defined (CONFIG_MIPS_RT63365)
+#include "ehci-rt3xxx.c"
+#define PLATFORM_DRIVER rt3xxx_ehci_driver
+#endif
+
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
#define PLATFORM_DRIVER ehci_hcd_sh_driver
Index: linux-3.18.21/drivers/usb/host/ehci-rt3xxx.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/ehci-rt3xxx.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,192 @@
+/*
+ * Ralink 3XXX(3883) EHCI Host Controller Driver
+ *
+ * Author: Ying Yuan Huang <yyhuang@ralinktech.com.tw>
+ * Based on "ehci-fsl.c" by Randy Vinson <rvinson@mvista.com>
+ *
+ * 2009 (c) Ralink Technology, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/platform_device.h>
+
+void static inline rt_writel(u32 val, unsigned long reg)
+{
+ *(volatile u32 *)(reg) = val;
+}
+
+static inline u32 rt_readl(unsigned long reg)
+{
+ return (*(volatile u32 *)reg);
+}
+
+static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
+{
+ unsigned port;
+
+ if (!HCS_PPC (ehci->hcs_params))
+ return;
+
+ ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
+ for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
+ (void) ehci_hub_control(ehci_to_hcd(ehci),
+ is_on ? SetPortFeature : ClearPortFeature,
+ USB_PORT_FEAT_POWER,
+ port--, NULL, 0);
+ /* Flush those writes */
+ ehci_readl(ehci, &ehci->regs->command);
+ msleep(20);
+}
+
+static int rt3xxx_ehci_init(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval = 0;
+
+ /* No USB-PCI space. */
+ ehci->caps = hcd->regs /* + 0x100 */;
+ ehci->regs = hcd->regs /* + 0x100 */ + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci_reset(ehci);
+
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci_port_power(ehci, 0);
+
+ return retval;
+}
+
+static const struct hc_driver rt3xxx_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Ralink EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+ .reset = rt3xxx_ehci_init,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .get_frame_number = ehci_get_frame,
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#if defined(CONFIG_PM)
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ .endpoint_reset = ehci_endpoint_reset,
+};
+
+static int rt3xxx_ehci_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ const struct hc_driver *driver = &rt3xxx_ehci_hc_driver;
+ struct resource *res;
+ int irq;
+ int retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Found HC with no IRQ.\n");
+ return -ENODEV;
+ }
+ irq = res->start;
+
+ hcd = usb_create_hcd(driver, &pdev->dev, "rt3xxx" /*dev_name(&pdev->dev)*/);
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Found HC with no register addr.\n");
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ // reset host controller
+ //rt_usbhost_reset();
+
+ // wake up usb module from power saving mode...
+ //try_wake_up();
+
+ // change port0 to host mode
+ //rt_set_host();
+
+ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (retval)
+ goto fail_add_hcd;
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(&pdev->dev, "RT3xxx EHCI init fail. %d\n", retval);
+ return retval;
+}
+
+static int rt3xxx_ehci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ /* ehci_shutdown() is supposed to be called implicitly in
+ ehci-hcd common code while removing module, but it isn't. */
+ ehci_shutdown(hcd);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ //if(!usb_find_device(0x0, 0x0)) // No any other USB host controller.
+ //try_sleep();
+
+ return 0;
+}
+
+MODULE_ALIAS("rt3xxx-ehci");
+
+static struct platform_driver rt3xxx_ehci_driver = {
+ .probe = rt3xxx_ehci_probe,
+ .remove = rt3xxx_ehci_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "rt3xxx-ehci",
+ },
+};
+
+
Index: linux-3.18.21/drivers/usb/host/ehci_ohci.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/ehci_ohci.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,111 @@
+/**************************************************************************
+ *
+ * BRIEF MODULE DESCRIPTION
+ * EHCI/OHCI init for Ralink RT3xxx
+ *
+ * Copyright 2009 Ralink Inc. (yyhuang@ralinktech.com.tw)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ **************************************************************************
+ * March 2009 YYHuang Initial Release
+ **************************************************************************
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/tc3162/tc3182_int_source.h>
+
+#if defined(CONFIG_MIPS_RT63365)
+//#define IRQ_RT3XXX_USB 18
+static struct resource rt3xxx_ehci_resources[] = {
+ [0] = {
+ .start = 0x1fbb0000,
+ .end = 0x1fbbffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RT3XXX_USB,
+ .end = IRQ_RT3XXX_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource rt3xxx_ohci_resources[] = {
+ [0] = {
+ .start = 0x1fba0000,
+ .end = 0x1fbaffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RT3XXX_USB,
+ .end = IRQ_RT3XXX_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+
+/*
+ * EHCI/OHCI Host controller.
+ */
+static u64 rt3xxx_ehci_dmamask = ~(u32)0;
+static struct platform_device rt3xxx_ehci_device = {
+ .name = "rt3xxx-ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &rt3xxx_ehci_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = 2,
+ .resource = rt3xxx_ehci_resources,
+};
+
+static u64 rt3xxx_ohci_dmamask = ~(u32)0;
+static struct platform_device rt3xxx_ohci_device = {
+ .name = "rt3xxx-ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &rt3xxx_ohci_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = 2,
+ .resource = rt3xxx_ohci_resources,
+};
+
+static struct platform_device *rt3xxx_devices[] __initdata = {
+ &rt3xxx_ehci_device,
+ &rt3xxx_ohci_device,
+};
+
+int __init init_rt3xxx_ehci_ohci(void)
+{
+ printk("RT3xxx EHCI/OHCI init.\n");
+ platform_add_devices(rt3xxx_devices, ARRAY_SIZE(rt3xxx_devices));
+ return 0;
+}
+
+device_initcall(init_rt3xxx_ehci_ohci);
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk-phy-7512.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk-phy-7512.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,470 @@
+#include "mtk-phy.h"
+
+#ifdef CONFIG_PROJECT_7512
+#include "mtk-phy-7512.h"
+
+//not used on SoC
+PHY_INT32 phy_init(struct u3phy_info *info){
+ writel(0xC0240008, 0xBFA8081C);//prot0
+ writel(0xC0240000, 0xBFA8101C);//port1
+ if(readl(0xbfb0008c)&0x01){
+ U3PhyWriteReg8(0xbfa80c1c, 0x18);
+ U3PhyWriteReg8(0xbfa80c1d, 0x18);
+ U3PhyWriteReg8(0xbfa80c1f, 0x18);
+ U3PhyWriteReg32(0xbfa80c24, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c28, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c30, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c38, 0x004a004a);
+ U3PhyWriteReg8(0xbfa80c3e, 0x4a);
+ U3PhyWriteReg8(0xbfa80c3f, 0x0);
+ U3PhyWriteReg8(0xbfa80c42, 0x48);
+ U3PhyWriteReg8(0xbfa80c43, 0x0);
+ U3PhyWriteReg8(0xbfa80c44, 0x48);
+ U3PhyWriteReg8(0xbfa80c45, 0x0);
+ U3PhyWriteReg8(0xbfa80c48, 0x48);
+ U3PhyWriteReg8(0xbfa80c49, 0x0);
+
+ U3PhyWriteReg8(0xbfa80b24, 0x90);
+ U3PhyWriteReg8(0xbfa80b25, 0x1);
+ U3PhyWriteReg32(0xbfa80b10, 0x1c000000);
+ U3PhyWriteReg8(0xbfa80b0b, 0xe);
+ }
+
+ return PHY_TRUE;
+}
+
+//not used on SoC
+PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase){
+ return PHY_TRUE;
+}
+
+//--------------------------------------------------------
+// Function : fgEyeScanHelper_CheckPtInRegion()
+// Description : Check if the test point is in a rectangle region.
+// If it is in the rectangle, also check if this point
+// is on the multiple of deltaX and deltaY.
+// Parameter : strucScanRegion * prEye - the region
+// BYTE bX
+// BYTE bY
+// Return : BYTE - TRUE : This point needs to be tested
+// FALSE: This point will be omitted
+// Note : First check within the rectangle.
+// Secondly, use modulous to check if the point will be tested.
+//--------------------------------------------------------
+static PHY_INT8 fgEyeScanHelper_CheckPtInRegion(struct strucScanRegion * prEye, PHY_INT8 bX, PHY_INT8 bY)
+{
+ PHY_INT8 fgValid = true;
+
+
+ /// Be careful, the axis origin is on the TOP-LEFT corner.
+ /// Therefore the top-left point has the minimum X and Y
+ /// Botton-right point is the maximum X and Y
+ if ( (prEye->bX_tl <= bX) && (bX <= prEye->bX_br)
+ && (prEye->bY_tl <= bY) && (bY <= prEye->bX_br))
+ {
+ // With the region, now check whether or not the input test point is
+ // on the multiples of X and Y
+ // Do not have to worry about negative value, because we have already
+ // check the input bX, and bY is within the region.
+ if ( ((bX - prEye->bX_tl) % (prEye->bDeltaX))
+ || ((bY - prEye->bY_tl) % (prEye->bDeltaY)) )
+ {
+ // if the division will have remainder, that means
+ // the input test point is on the multiples of X and Y
+ fgValid = false;
+ }
+ else
+ {
+ }
+ }
+ else
+ {
+
+ fgValid = false;
+ }
+ return fgValid;
+}
+
+//--------------------------------------------------------
+// Function : EyeScanHelper_RunTest()
+// Description : Enable the test, and wait til it is completed
+// Parameter : None
+// Return : None
+// Note : None
+//--------------------------------------------------------
+static void EyeScanHelper_RunTest(struct u3phy_info *info)
+{
+ DRV_UDELAY(100);
+ // Disable the test
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 0); //RG_SSUSB_RX_EYE_CNT_EN = 0
+ DRV_UDELAY(100);
+ // Run the test
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 1); //RG_SSUSB_RX_EYE_CNT_EN = 1
+ DRV_UDELAY(100);
+ // Wait til it's done
+ //RGS_SSUSB_RX_EYE_CNT_RDY
+ while(!U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5)
+ , RGS_SSUSB_EQ_EYE_CNT_RDY_OFST, RGS_SSUSB_EQ_EYE_CNT_RDY));
+}
+
+//--------------------------------------------------------
+// Function : fgEyeScanHelper_CalNextPoint()
+// Description : Calcualte the test point for the measurement
+// Parameter : None
+// Return : BOOL - TRUE : the next point is within the
+// boundaryof HW limit
+// FALSE: the next point is out of the HW limit
+// Note : The next point is obtained by calculating
+// from the bottom left of the region rectangle
+// and then scanning up until it reaches the upper
+// limit. At this time, the x will increment, and
+// start scanning downwards until the y hits the
+// zero.
+//--------------------------------------------------------
+static PHY_INT8 fgEyeScanHelper_CalNextPoint(void)
+{
+ if ( ((_bYcurr == MAX_Y) && (_eScanDir == SCAN_DN))
+ || ((_bYcurr == MIN_Y) && (_eScanDir == SCAN_UP))
+ )
+ {
+ /// Reaches the limit of Y axis
+ /// Increment X
+ _bXcurr++;
+ _fgXChged = true;
+ _eScanDir = (_eScanDir == SCAN_UP) ? SCAN_DN : SCAN_UP;
+
+ if (_bXcurr > MAX_X)
+ {
+ return false;
+ }
+ }
+ else
+ {
+ _bYcurr = (_eScanDir == SCAN_DN) ? _bYcurr + 1 : _bYcurr - 1;
+ _fgXChged = false;
+ }
+ return PHY_TRUE;
+}
+
+PHY_INT32 eyescan_init(struct u3phy_info *info){
+ //initial PHY setting
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_regs->rega)
+ , RG_SSUSB_CDR_EPEN_OFST, RG_SSUSB_CDR_EPEN, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->phyd_mix3)
+ , RG_SSUSB_FORCE_CDR_PI_PWD_OFST, RG_SSUSB_FORCE_CDR_PI_PWD, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1
+ return PHY_TRUE;
+}
+
+PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y
+ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt){
+ PHY_INT32 cOfst = 0;
+ PHY_UINT8 bIdxX = 0;
+ PHY_UINT8 bIdxY = 0;
+ //PHY_INT8 bCnt = 0;
+ PHY_UINT8 bIdxCycCnt = 0;
+ PHY_INT8 fgValid;
+ PHY_INT8 cX;
+ PHY_INT8 cY;
+ PHY_UINT8 bExtendCnt;
+ PHY_INT8 isContinue;
+ //PHY_INT8 isBreak;
+ PHY_UINT32 wErr0 = 0, wErr1 = 0;
+ //PHY_UINT32 temp;
+
+ PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+ PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+
+ _rEye1.bX_tl = x_t1;
+ _rEye1.bY_tl = y_t1;
+ _rEye1.bX_br = x_br;
+ _rEye1.bY_br = y_br;
+ _rEye1.bDeltaX = delta_x;
+ _rEye1.bDeltaY = delta_y;
+
+ _rEye2.bX_tl = x_t1;
+ _rEye2.bY_tl = y_t1;
+ _rEye2.bX_br = x_br;
+ _rEye2.bY_br = y_br;
+ _rEye2.bDeltaX = delta_x;
+ _rEye2.bDeltaY = delta_y;
+
+ _rTestCycle.wEyeCnt = eye_cnt;
+ _rTestCycle.bNumOfEyeCnt = num_cnt;
+ _rTestCycle.bNumOfIgnoreCnt = num_ignore_cnt;
+ _rTestCycle.bPICalEn = PI_cal_en;
+
+ _bXcurr = 0;
+ _bYcurr = 0;
+ _eScanDir = SCAN_DN;
+ _fgXChged = false;
+
+ printk("x_t1: %x, y_t1: %x, x_br: %x, y_br: %x, delta_x: %x, delta_y: %x, \
+ eye_cnt: %x, num_cnt: %x, PI_cal_en: %x, num_ignore_cnt: %x\n", \
+ x_t1, y_t1, x_br, y_br, delta_x, delta_y, eye_cnt, num_cnt, PI_cal_en, num_ignore_cnt);
+
+ //force SIGDET to OFF
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_SIGDET_EN_SEL_OFST, RG_SSUSB_RX_SIGDET_EN_SEL, 1); //RG_SSUSB_RX_SIGDET_SEL = 1
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_SIGDET_EN_OFST, RG_SSUSB_RX_SIGDET_EN, 0); //RG_SSUSB_RX_SIGDET_EN = 0
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1)
+ , RG_SSUSB_EQ_SIGDET_OFST, RG_SSUSB_EQ_SIGDET, 0); //RG_SSUSB_RX_SIGDET = 0
+
+ // RX_TRI_DET_EN to Disable
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq3)
+ , RG_SSUSB_EQ_TRI_DET_EN_OFST, RG_SSUSB_EQ_TRI_DET_EN, 0); //RG_SSUSB_RX_TRI_DET_EN = 0
+
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, 0); //RG_SSUSB_RX_EYE_XOFFSET = 0
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, 0); //RG_SSUSB_RX_EYE0_Y = 0
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, 0); //RG_SSUSB_RX_EYE1_Y = 0
+
+
+ if (PI_cal_en){
+ // PI Calibration
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1
+
+ DRV_UDELAY(20);
+
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0)
+ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0
+ _bPIResult = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5)
+ , RGS_SSUSB_EQ_PILPO_OFST, RGS_SSUSB_EQ_PILPO); //read RGS_SSUSB_RX_PILPO
+
+ printk(KERN_ERR "PI result: %d\n", _bPIResult);
+ }
+ // Read Initial DAC
+ // Set CYCLE
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye3)
+ ,RG_SSUSB_EQ_EYE_CNT_OFST, RG_SSUSB_EQ_EYE_CNT, eye_cnt); //RG_SSUSB_RX_EYE_CNT
+
+ // Eye Monitor Feature
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1)
+ , RG_SSUSB_EQ_EYE_MASK_OFST, RG_SSUSB_EQ_EYE_MASK, 0x3ff); //RG_SSUSB_RX_EYE_MASK = 0x3ff
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1
+
+ // Move X,Y to the top-left corner
+ for (cOfst = 0; cOfst >= -64; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ ,RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET
+ }
+ for (cOfst = 0; cOfst < 64; cOfst++)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst); //RG_SSUSB_RX_EYE0_Y
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst); //RG_SSUSB_RX_EYE1_Y
+ }
+ //ClearErrorResult
+ for(bIdxCycCnt = 0; bIdxCycCnt < CYCLE_COUNT_MAX; bIdxCycCnt++){
+ for(bIdxX = 0; bIdxX < ERRCNT_MAX; bIdxX++)
+ {
+ for(bIdxY = 0; bIdxY < ERRCNT_MAX; bIdxY++){
+ pwErrCnt0[bIdxCycCnt][bIdxX][bIdxY] = 0;
+ pwErrCnt1[bIdxCycCnt][bIdxX][bIdxY] = 0;
+ }
+ }
+ }
+ isContinue = true;
+ while(isContinue){
+ //printk(KERN_ERR "_bXcurr: %d, _bYcurr: %d\n", _bXcurr, _bYcurr);
+ // The point is within the boundary, then let's check if it is within
+ // the testing region.
+ // The point is only test-able if one of the eye region
+ // includes this point.
+ fgValid = fgEyeScanHelper_CheckPtInRegion(&_rEye1, _bXcurr, _bYcurr)
+ || fgEyeScanHelper_CheckPtInRegion(&_rEye2, _bXcurr, _bYcurr);
+ // Translate bX and bY to 2's complement from where the origin was on the
+ // top left corner.
+ // 0x40 and 0x3F needs a bit of thinking!!!! >"<
+ cX = (_bXcurr ^ 0x40);
+ cY = (_bYcurr ^ 0x3F);
+
+ // Set X if necessary
+ if (_fgXChged == true)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cX); //RG_SSUSB_RX_EYE_XOFFSET
+ }
+ // Set Y
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cY); //RG_SSUSB_RX_EYE0_Y
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cY); //RG_SSUSB_RX_EYE1_Y
+
+ /// Test this point!
+ if (fgValid){
+ for (bExtendCnt = 0; bExtendCnt < num_ignore_cnt; bExtendCnt++)
+ {
+ //run test
+ EyeScanHelper_RunTest(info);
+ }
+ for (bExtendCnt = 0; bExtendCnt < num_cnt; bExtendCnt++)
+ {
+ EyeScanHelper_RunTest(info);
+ wErr0 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon3)
+ , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0);
+ wErr1 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon4)
+ , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1);
+
+ pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr] = wErr0;
+ pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr] = wErr1;
+
+ //EyeScanHelper_GetResult(&_rRes.pwErrCnt0[bCnt], &_rRes.pwErrCnt1[bCnt]);
+// printk(KERN_ERR "cnt[%d] cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n"
+// , bExtendCnt, _bXcurr, _bYcurr, cX, cY, pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr], pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr]);
+ }
+ //printk(KERN_ERR "cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n", _bXcurr, _bYcurr, cX, cY, pwErrCnt0[0][_bXcurr][_bYcurr], pwErrCnt1[0][_bXcurr][_bYcurr]);
+ }
+ else{
+
+ }
+ if (fgEyeScanHelper_CalNextPoint() == false){
+#if 0
+ printk(KERN_ERR "Xcurr [0x%x] Ycurr [0x%x]\n", _bXcurr, _bYcurr);
+ printk(KERN_ERR "XcurrREG [0x%x] YcurrREG [0x%x]\n", cX, cY);
+#endif
+ printk(KERN_ERR "end of eye scan\n");
+ isContinue = false;
+ }
+ }
+ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n"
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET)
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y));
+
+ // Move X,Y to the top-left corner
+ for (cOfst = 63; cOfst >= 0; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET
+ }
+ for (cOfst = 63; cOfst >= 0; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0)
+ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst);
+
+ }
+ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n"
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET)
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y));
+
+ printk(KERN_ERR "PI result: %d\n", _bPIResult);
+ printk(KERN_ERR "pwErrCnt0 addr: 0x%x\n", (PHY_UINT32)pwErrCnt0);
+ printk(KERN_ERR "pwErrCnt1 addr: 0x%x\n", (PHY_UINT32)pwErrCnt1);
+
+ return PHY_TRUE;
+}
+
+//not used on SoC
+PHY_INT32 u2_save_cur_en(struct u3phy_info *info){
+ return PHY_TRUE;
+}
+
+//not used on SoC
+PHY_INT32 u2_save_cur_re(struct u3phy_info *info){
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info){
+ PHY_INT32 i=0;
+ //PHY_INT32 j=0;
+ //PHY_INT8 u1SrCalVal = 0;
+ //PHY_INT8 u1Reg_addr_HSTX_SRCAL_EN;
+ PHY_INT32 fgRet = 0;
+ PHY_INT32 u4FmOut = 0;
+ PHY_INT32 u4Tmp = 0;
+ //PHY_INT32 temp;
+
+ // => RG_USB20_HSTX_SRCAL_EN = 1
+ // enable HS TX SR calibration
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0)
+ , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0x1);
+ DRV_MSLEEP(1);
+
+ // => RG_FRCK_EN = 1
+ // Enable free run clock
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1)
+ , RG_FRCK_EN_OFST, RG_FRCK_EN, 1);
+
+ // MT6290 HS signal quality patch
+ // => RG_CYCLECNT = 400
+ // Setting cyclecnt =400
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0)
+ , RG_CYCLECNT_OFST, RG_CYCLECNT, 0x400);
+
+ // => RG_FREQDET_EN = 1
+ // Enable frequency meter
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0)
+ , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0x1);
+
+ // wait for FM detection done, set 10ms timeout
+ for(i=0; i<10; i++){
+ // => u4FmOut = USB_FM_OUT
+ // read FM_OUT
+ u4FmOut = U3PhyReadReg32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr0));
+ printk("FM_OUT value: u4FmOut = %d(0x%08X)\n", u4FmOut, u4FmOut);
+
+ // check if FM detection done
+ if (u4FmOut != 0)
+ {
+ fgRet = 0;
+ printk("FM detection done! loop = %d\n", i);
+
+ break;
+ }
+
+ fgRet = 1;
+ DRV_MSLEEP(1);
+ }
+ // => RG_FREQDET_EN = 0
+ // disable frequency meter
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0)
+ , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0);
+
+ // => RG_FRCK_EN = 0
+ // disable free run clock
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1)
+ , RG_FRCK_EN_OFST, RG_FRCK_EN, 0);
+
+ // => RG_USB20_HSTX_SRCAL_EN = 0
+ // disable HS TX SR calibration
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0)
+ , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0);
+ DRV_MSLEEP(1);
+
+ if(u4FmOut == 0){
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0)
+ , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, 0x4);
+
+ fgRet = 1;
+ }
+ else{
+ // set reg = (1024/FM_OUT) * 25 * 0.028 (round to the nearest digits)
+ u4Tmp = (((1024 * 25 * U2_SR_COEF_7512) / u4FmOut) + 500) / 1000;
+ printk("SR calibration value u1SrCalVal = %d\n", (PHY_UINT8)u4Tmp);
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0)
+ , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, u4Tmp);
+ }
+ return fgRet;
+}
+
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk-phy-7512.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk-phy-7512.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,2871 @@
+#ifdef CONFIG_PROJECT_7512
+#ifndef __MTK_PHY_7512_H
+#define __MTK_PHY_7512_H
+
+#define U2_SR_COEF_7512 28
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u2phy_reg {
+ //0x0
+ PHY_LE32 u2phyac0;
+ PHY_LE32 u2phyac1;
+ PHY_LE32 u2phyac2;
+ PHY_LE32 reserve0;
+ //0x10
+ PHY_LE32 u2phyacr0;
+ PHY_LE32 u2phyacr1;
+ PHY_LE32 u2phyacr2;
+ PHY_LE32 u2phyacr3;
+ //0x20
+ PHY_LE32 u2phyacr4;
+ PHY_LE32 u2phyamon0;
+ PHY_LE32 reserve1[2];
+ //0x30~0x50
+ PHY_LE32 reserve2[12];
+ //0x60
+ PHY_LE32 u2phydcr0;
+ PHY_LE32 u2phydcr1;
+ PHY_LE32 u2phydtm0;
+ PHY_LE32 u2phydtm1;
+ //0x70
+ PHY_LE32 u2phydmon0;
+ PHY_LE32 u2phydmon1;
+ PHY_LE32 u2phydmon2;
+ PHY_LE32 u2phydmon3;
+ //0x80
+ PHY_LE32 u2phybc12c;
+ PHY_LE32 u2phybc12c1;
+ PHY_LE32 reserve3[2];
+ //0x90~0xe0
+ PHY_LE32 reserve4[24];
+ //0xf0
+ PHY_LE32 reserve6[3];
+ PHY_LE32 regfcom;
+};
+
+//U3D_U2PHYAC0
+#define RG_USB20_USBPLL_DIVEN (0x7<<28) //30:28
+#define RG_USB20_USBPLL_CKCTRL (0x3<<26) //27:26
+#define RG_USB20_USBPLL_PREDIV (0x3<<24) //25:24
+#define RG_USB20_USBPLL_FORCE_ON (0x1<<23) //23:23
+#define RG_USB20_USBPLL_FBDIV (0x7f<<16) //22:16
+#define RG_USB20_REF_EN (0x1<<15) //15:15
+#define RG_USB20_INTR_EN (0x1<<14) //14:14
+#define RG_USB20_BG_TRIM (0xf<<8) //11:8
+#define RG_USB20_BG_RBSEL (0x3<<6) //7:6
+#define RG_USB20_BG_RASEL (0x3<<4) //5:4
+#define RG_USB20_BGR_DIV (0x3<<2) //3:2
+#define RG_SIFSLV_CHP_EN (0x1<<1) //1:1
+#define RG_SIFSLV_BGR_EN (0x1<<0) //0:0
+
+//U3D_U2PHYAC1
+#define RG_USB20_VRT_VREF_SEL (0x7<<28) //30:28
+#define RG_USB20_TERM_VREF_SEL (0x7<<24) //26:24
+#define RG_USB20_MPX_SEL (0xff<<16) //23:16
+#define RG_USB20_MPX_OUT_SEL (0x3<<12) //13:12
+#define RG_USB20_TX_PH_ROT_SEL (0x7<<8) //10:8
+#define RG_USB20_USBPLL_ACCEN (0x1<<3) //3:3
+#define RG_USB20_USBPLL_LF (0x1<<2) //2:2
+#define RG_USB20_USBPLL_BR (0x1<<1) //1:1
+#define RG_USB20_USBPLL_BP (0x1<<0) //0:0
+
+//U3D_U2PHYAC2
+#define RG_SIFSLV_MAC_BANDGAP_EN (0x1<<17) //17:17
+#define RG_SIFSLV_MAC_CHOPPER_EN (0x1<<16) //16:16
+#define RG_USB20_CLKREF_REV (0xff<<0) //7:0
+
+//U3D_U2PHYACR0
+#define RG_USB20_ICUSB_EN (0x1<<24) //24:24
+#define RG_USB20_HSTX_SRCAL_EN (0x1<<23) //23:23
+#define RG_USB20_HSTX_SRCTRL (0x7<<16) //18:16
+#define RG_USB20_LS_CR (0x7<<12) //14:12
+#define RG_USB20_FS_CR (0x7<<8) //10:8
+#define RG_USB20_LS_SR (0x7<<4) //6:4
+#define RG_USB20_FS_SR (0x7<<0) //2:0
+
+//U3D_U2PHYACR1
+#define RG_USB20_INIT_SQ_EN_DG (0x3<<28) //29:28
+#define RG_USB20_SQD (0x3<<24) //25:24
+#define RG_USB20_HSTX_TMODE_SEL (0x3<<20) //21:20
+#define RG_USB20_HSTX_TMODE_EN (0x1<<19) //19:19
+#define RG_USB20_PHYD_MONEN (0x1<<18) //18:18
+#define RG_USB20_INLPBK_EN (0x1<<17) //17:17
+#define RG_USB20_CHIRP_EN (0x1<<16) //16:16
+#define RG_USB20_DM_ABIST_SOURCE_EN (0x1<<15) //15:15
+#define RG_USB20_DM_ABIST_SELE (0xf<<8) //11:8
+#define RG_USB20_DP_ABIST_SOURCE_EN (0x1<<7) //7:7
+#define RG_USB20_DP_ABIST_SELE (0xf<<0) //3:0
+
+//U3D_U2PHYACR2
+#define RG_USB20_OTG_ABIST_SELE (0x7<<29) //31:29
+#define RG_USB20_OTG_ABIST_EN (0x1<<28) //28:28
+#define RG_USB20_OTG_VBUSCMP_EN (0x1<<27) //27:27
+#define RG_USB20_OTG_VBUSTH (0x7<<24) //26:24
+#define RG_USB20_DISC_FIT_EN (0x1<<22) //22:22
+#define RG_USB20_DISCD (0x3<<20) //21:20
+#define RG_USB20_DISCTH (0xf<<16) //19:16
+#define RG_USB20_SQCAL_EN (0x1<<15) //15:15
+#define RG_USB20_SQCAL (0xf<<8) //11:8
+#define RG_USB20_SQTH (0xf<<0) //3:0
+
+//U3D_U2PHYACR3
+#define RG_USB20_HSTX_DBIST (0xf<<28) //31:28
+#define RG_USB20_HSTX_BIST_EN (0x1<<26) //26:26
+#define RG_USB20_HSTX_I_EN_MODE (0x3<<24) //25:24
+#define RG_USB20_HSRX_TMODE_EN (0x1<<23) //23:23
+#define RG_USB20_HSRX_BIAS_EN_SEL (0x3<<20) //21:20
+#define RG_USB20_USB11_TMODE_EN (0x1<<19) //19:19
+#define RG_USB20_TMODE_FS_LS_TX_EN (0x1<<18) //18:18
+#define RG_USB20_TMODE_FS_LS_RCV_EN (0x1<<17) //17:17
+#define RG_USB20_TMODE_FS_LS_MODE (0x1<<16) //16:16
+#define RG_USB20_HS_TERM_EN_MODE (0x3<<13) //14:13
+#define RG_USB20_PUPD_BIST_EN (0x1<<12) //12:12
+#define RG_USB20_EN_PU_DM (0x1<<11) //11:11
+#define RG_USB20_EN_PD_DM (0x1<<10) //10:10
+#define RG_USB20_EN_PU_DP (0x1<<9) //9:9
+#define RG_USB20_EN_PD_DP (0x1<<8) //8:8
+#define RG_USB20_PHY_REV (0xff<<0) //7:0
+
+//U3D_U2PHYACR4
+#define RG_USB20_DP_100K_MODE (0x1<<18) //18:18
+#define RG_USB20_DM_100K_EN (0x1<<17) //17:17
+#define USB20_DP_100K_EN (0x1<<16) //16:16
+#define USB20_GPIO_DM_I (0x1<<15) //15:15
+#define USB20_GPIO_DP_I (0x1<<14) //14:14
+#define USB20_GPIO_DM_OE (0x1<<13) //13:13
+#define USB20_GPIO_DP_OE (0x1<<12) //12:12
+#define RG_USB20_GPIO_CTL (0x1<<9) //9:9
+#define USB20_GPIO_MODE (0x1<<8) //8:8
+#define RG_USB20_TX_BIAS_EN (0x1<<5) //5:5
+#define RG_USB20_TX_VCMPDN_EN (0x1<<4) //4:4
+#define RG_USB20_HS_SQ_EN_MODE (0x3<<2) //3:2
+#define RG_USB20_HS_RCV_EN_MODE (0x3<<0) //1:0
+
+//U3D_U2PHYAMON0
+#define RGO_USB20_GPIO_DM_O (0x1<<1) //1:1
+#define RGO_USB20_GPIO_DP_O (0x1<<0) //0:0
+
+//U3D_U2PHYDCR0
+#define RG_USB20_CDR_TST (0x3<<30) //31:30
+#define RG_USB20_GATED_ENB (0x1<<29) //29:29
+#define RG_USB20_TESTMODE (0x3<<26) //27:26
+#define RG_USB20_PLL_STABLE (0x1<<25) //25:25
+#define RG_USB20_PLL_FORCE_ON (0x1<<24) //24:24
+#define RG_USB20_PHYD_RESERVE (0xffff<<8) //23:8
+#define RG_USB20_EBTHRLD (0x1<<7) //7:7
+#define RG_USB20_EARLY_HSTX_I (0x1<<6) //6:6
+#define RG_USB20_TX_TST (0x1<<5) //5:5
+#define RG_USB20_NEGEDGE_ENB (0x1<<4) //4:4
+#define RG_USB20_CDR_FILT (0xf<<0) //3:0
+
+//U3D_U2PHYDCR1
+#define RG_USB20_PROBE_SEL (0xff<<24) //31:24
+#define RG_USB20_DRVVBUS (0x1<<23) //23:23
+#define RG_DEBUG_EN (0x1<<22) //22:22
+#define RG_USB20_OTG_PROBE (0x3<<20) //21:20
+#define RG_USB20_SW_PLLMODE (0x3<<18) //19:18
+#define RG_USB20_BERTH (0x3<<16) //17:16
+#define RG_USB20_LBMODE (0x3<<13) //14:13
+#define RG_USB20_FORCE_TAP (0x1<<12) //12:12
+#define RG_USB20_TAPSEL (0xfff<<0) //11:0
+
+//U3D_U2PHYDTM0
+#define RG_UART_MODE (0x3<<30) //31:30
+#define FORCE_UART_I (0x1<<29) //29:29
+#define FORCE_UART_BIAS_EN (0x1<<28) //28:28
+#define FORCE_UART_TX_OE (0x1<<27) //27:27
+#define FORCE_UART_EN (0x1<<26) //26:26
+#define FORCE_USB_CLKEN (0x1<<25) //25:25
+#define FORCE_DRVVBUS (0x1<<24) //24:24
+#define FORCE_DATAIN (0x1<<23) //23:23
+#define FORCE_TXVALID (0x1<<22) //22:22
+#define FORCE_DM_PULLDOWN (0x1<<21) //21:21
+#define FORCE_DP_PULLDOWN (0x1<<20) //20:20
+#define FORCE_XCVRSEL (0x1<<19) //19:19
+#define FORCE_SUSPENDM (0x1<<18) //18:18
+#define FORCE_TERMSEL (0x1<<17) //17:17
+#define FORCE_OPMODE (0x1<<16) //16:16
+#define UTMI_MUXSEL (0x1<<15) //15:15
+#define RG_RESET (0x1<<14) //14:14
+#define RG_DATAIN (0xf<<10) //13:10
+#define RG_TXVALIDH (0x1<<9) //9:9
+#define RG_TXVALID (0x1<<8) //8:8
+#define RG_DMPULLDOWN (0x1<<7) //7:7
+#define RG_DPPULLDOWN (0x1<<6) //6:6
+#define RG_XCVRSEL (0x3<<4) //5:4
+#define RG_SUSPENDM (0x1<<3) //3:3
+#define RG_TERMSEL (0x1<<2) //2:2
+#define RG_OPMODE (0x3<<0) //1:0
+
+//U3D_U2PHYDTM1
+#define RG_USB20_PRBS7_EN (0x1<<31) //31:31
+#define RG_USB20_PRBS7_BITCNT (0x3f<<24) //29:24
+#define RG_USB20_CLK48M_EN (0x1<<23) //23:23
+#define RG_USB20_CLK60M_EN (0x1<<22) //22:22
+#define RG_UART_I (0x1<<19) //19:19
+#define RG_UART_BIAS_EN (0x1<<18) //18:18
+#define RG_UART_TX_OE (0x1<<17) //17:17
+#define RG_UART_EN (0x1<<16) //16:16
+#define FORCE_VBUSVALID (0x1<<13) //13:13
+#define FORCE_SESSEND (0x1<<12) //12:12
+#define FORCE_BVALID (0x1<<11) //11:11
+#define FORCE_AVALID (0x1<<10) //10:10
+#define FORCE_IDDIG (0x1<<9) //9:9
+#define FORCE_IDPULLUP (0x1<<8) //8:8
+#define RG_VBUSVALID (0x1<<5) //5:5
+#define RG_SESSEND (0x1<<4) //4:4
+#define RG_BVALID (0x1<<3) //3:3
+#define RG_AVALID (0x1<<2) //2:2
+#define RG_IDDIG (0x1<<1) //1:1
+#define RG_IDPULLUP (0x1<<0) //0:0
+
+//U3D_U2PHYDMON0
+#define RG_USB20_PRBS7_BERTH (0xff<<0) //7:0
+
+//U3D_U2PHYDMON1
+#define USB20_UART_O (0x1<<31) //31:31
+#define RGO_USB20_LB_PASS (0x1<<30) //30:30
+#define RGO_USB20_LB_DONE (0x1<<29) //29:29
+#define AD_USB20_BVALID (0x1<<28) //28:28
+#define USB20_IDDIG (0x1<<27) //27:27
+#define AD_USB20_VBUSVALID (0x1<<26) //26:26
+#define AD_USB20_SESSEND (0x1<<25) //25:25
+#define AD_USB20_AVALID (0x1<<24) //24:24
+#define USB20_LINE_STATE (0x3<<22) //23:22
+#define USB20_HST_DISCON (0x1<<21) //21:21
+#define USB20_TX_READY (0x1<<20) //20:20
+#define USB20_RX_ERROR (0x1<<19) //19:19
+#define USB20_RX_ACTIVE (0x1<<18) //18:18
+#define USB20_RX_VALIDH (0x1<<17) //17:17
+#define USB20_RX_VALID (0x1<<16) //16:16
+#define USB20_DATA_OUT (0xffff<<0) //15:0
+
+//U3D_U2PHYDMON2
+#define RGO_TXVALID_CNT (0xff<<24) //31:24
+#define RGO_RXACTIVE_CNT (0xff<<16) //23:16
+#define RGO_USB20_LB_BERCNT (0xff<<8) //15:8
+#define USB20_PROBE_OUT (0xff<<0) //7:0
+
+//U3D_U2PHYDMON3
+#define RGO_USB20_PRBS7_ERRCNT (0xffff<<16) //31:16
+#define RGO_USB20_PRBS7_DONE (0x1<<3) //3:3
+#define RGO_USB20_PRBS7_LOCK (0x1<<2) //2:2
+#define RGO_USB20_PRBS7_PASS (0x1<<1) //1:1
+#define RGO_USB20_PRBS7_PASSTH (0x1<<0) //0:0
+
+//U3D_U2PHYBC12C
+#define RG_SIFSLV_CHGDT_DEGLCH_CNT (0xf<<28) //31:28
+#define RG_SIFSLV_CHGDT_CTRL_CNT (0xf<<24) //27:24
+#define RG_SIFSLV_CHGDT_FORCE_MODE (0x1<<16) //16:16
+#define RG_CHGDT_ISRC_LEV (0x3<<14) //15:14
+#define RG_CHGDT_VDATSRC (0x1<<13) //13:13
+#define RG_CHGDT_BGVREF_SEL (0x7<<10) //12:10
+#define RG_CHGDT_RDVREF_SEL (0x3<<8) //9:8
+#define RG_CHGDT_ISRC_DP (0x1<<7) //7:7
+#define RG_SIFSLV_CHGDT_OPOUT_DM (0x1<<6) //6:6
+#define RG_CHGDT_VDAT_DM (0x1<<5) //5:5
+#define RG_CHGDT_OPOUT_DP (0x1<<4) //4:4
+#define RG_SIFSLV_CHGDT_VDAT_DP (0x1<<3) //3:3
+#define RG_SIFSLV_CHGDT_COMP_EN (0x1<<2) //2:2
+#define RG_SIFSLV_CHGDT_OPDRV_EN (0x1<<1) //1:1
+#define RG_CHGDT_EN (0x1<<0) //0:0
+
+//U3D_U2PHYBC12C1
+#define RG_CHGDT_REV (0xff<<0) //7:0
+
+//U3D_REGFCOM
+#define RG_PAGE (0xff<<24) //31:24
+#define I2C_MODE (0x1<<16) //16:16
+
+
+/* OFFSET */
+
+//U3D_U2PHYAC0
+#define RG_USB20_USBPLL_DIVEN_OFST (28)
+#define RG_USB20_USBPLL_CKCTRL_OFST (26)
+#define RG_USB20_USBPLL_PREDIV_OFST (24)
+#define RG_USB20_USBPLL_FORCE_ON_OFST (23)
+#define RG_USB20_USBPLL_FBDIV_OFST (16)
+#define RG_USB20_REF_EN_OFST (15)
+#define RG_USB20_INTR_EN_OFST (14)
+#define RG_USB20_BG_TRIM_OFST (8)
+#define RG_USB20_BG_RBSEL_OFST (6)
+#define RG_USB20_BG_RASEL_OFST (4)
+#define RG_USB20_BGR_DIV_OFST (2)
+#define RG_SIFSLV_CHP_EN_OFST (1)
+#define RG_SIFSLV_BGR_EN_OFST (0)
+
+//U3D_U2PHYAC1
+#define RG_USB20_VRT_VREF_SEL_OFST (28)
+#define RG_USB20_TERM_VREF_SEL_OFST (24)
+#define RG_USB20_MPX_SEL_OFST (16)
+#define RG_USB20_MPX_OUT_SEL_OFST (12)
+#define RG_USB20_TX_PH_ROT_SEL_OFST (8)
+#define RG_USB20_USBPLL_ACCEN_OFST (3)
+#define RG_USB20_USBPLL_LF_OFST (2)
+#define RG_USB20_USBPLL_BR_OFST (1)
+#define RG_USB20_USBPLL_BP_OFST (0)
+
+//U3D_U2PHYAC2
+#define RG_SIFSLV_MAC_BANDGAP_EN_OFST (17)
+#define RG_SIFSLV_MAC_CHOPPER_EN_OFST (16)
+#define RG_USB20_CLKREF_REV_OFST (0)
+
+//U3D_U2PHYACR0
+#define RG_USB20_ICUSB_EN_OFST (24)
+#define RG_USB20_HSTX_SRCAL_EN_OFST (23)
+#define RG_USB20_HSTX_SRCTRL_OFST (16)
+#define RG_USB20_LS_CR_OFST (12)
+#define RG_USB20_FS_CR_OFST (8)
+#define RG_USB20_LS_SR_OFST (4)
+#define RG_USB20_FS_SR_OFST (0)
+
+//U3D_U2PHYACR1
+#define RG_USB20_INIT_SQ_EN_DG_OFST (28)
+#define RG_USB20_SQD_OFST (24)
+#define RG_USB20_HSTX_TMODE_SEL_OFST (20)
+#define RG_USB20_HSTX_TMODE_EN_OFST (19)
+#define RG_USB20_PHYD_MONEN_OFST (18)
+#define RG_USB20_INLPBK_EN_OFST (17)
+#define RG_USB20_CHIRP_EN_OFST (16)
+#define RG_USB20_DM_ABIST_SOURCE_EN_OFST (15)
+#define RG_USB20_DM_ABIST_SELE_OFST (8)
+#define RG_USB20_DP_ABIST_SOURCE_EN_OFST (7)
+#define RG_USB20_DP_ABIST_SELE_OFST (0)
+
+//U3D_U2PHYACR2
+#define RG_USB20_OTG_ABIST_SELE_OFST (29)
+#define RG_USB20_OTG_ABIST_EN_OFST (28)
+#define RG_USB20_OTG_VBUSCMP_EN_OFST (27)
+#define RG_USB20_OTG_VBUSTH_OFST (24)
+#define RG_USB20_DISC_FIT_EN_OFST (22)
+#define RG_USB20_DISCD_OFST (20)
+#define RG_USB20_DISCTH_OFST (16)
+#define RG_USB20_SQCAL_EN_OFST (15)
+#define RG_USB20_SQCAL_OFST (8)
+#define RG_USB20_SQTH_OFST (0)
+
+//U3D_U2PHYACR3
+#define RG_USB20_HSTX_DBIST_OFST (28)
+#define RG_USB20_HSTX_BIST_EN_OFST (26)
+#define RG_USB20_HSTX_I_EN_MODE_OFST (24)
+#define RG_USB20_HSRX_TMODE_EN_OFST (23)
+#define RG_USB20_HSRX_BIAS_EN_SEL_OFST (20)
+#define RG_USB20_USB11_TMODE_EN_OFST (19)
+#define RG_USB20_TMODE_FS_LS_TX_EN_OFST (18)
+#define RG_USB20_TMODE_FS_LS_RCV_EN_OFST (17)
+#define RG_USB20_TMODE_FS_LS_MODE_OFST (16)
+#define RG_USB20_HS_TERM_EN_MODE_OFST (13)
+#define RG_USB20_PUPD_BIST_EN_OFST (12)
+#define RG_USB20_EN_PU_DM_OFST (11)
+#define RG_USB20_EN_PD_DM_OFST (10)
+#define RG_USB20_EN_PU_DP_OFST (9)
+#define RG_USB20_EN_PD_DP_OFST (8)
+#define RG_USB20_PHY_REV_OFST (0)
+
+//U3D_U2PHYACR4
+#define RG_USB20_DP_100K_MODE_OFST (18)
+#define RG_USB20_DM_100K_EN_OFST (17)
+#define USB20_DP_100K_EN_OFST (16)
+#define USB20_GPIO_DM_I_OFST (15)
+#define USB20_GPIO_DP_I_OFST (14)
+#define USB20_GPIO_DM_OE_OFST (13)
+#define USB20_GPIO_DP_OE_OFST (12)
+#define RG_USB20_GPIO_CTL_OFST (9)
+#define USB20_GPIO_MODE_OFST (8)
+#define RG_USB20_TX_BIAS_EN_OFST (5)
+#define RG_USB20_TX_VCMPDN_EN_OFST (4)
+#define RG_USB20_HS_SQ_EN_MODE_OFST (2)
+#define RG_USB20_HS_RCV_EN_MODE_OFST (0)
+
+//U3D_U2PHYAMON0
+#define RGO_USB20_GPIO_DM_O_OFST (1)
+#define RGO_USB20_GPIO_DP_O_OFST (0)
+
+//U3D_U2PHYDCR0
+#define RG_USB20_CDR_TST_OFST (30)
+#define RG_USB20_GATED_ENB_OFST (29)
+#define RG_USB20_TESTMODE_OFST (26)
+#define RG_USB20_PLL_STABLE_OFST (25)
+#define RG_USB20_PLL_FORCE_ON_OFST (24)
+#define RG_USB20_PHYD_RESERVE_OFST (8)
+#define RG_USB20_EBTHRLD_OFST (7)
+#define RG_USB20_EARLY_HSTX_I_OFST (6)
+#define RG_USB20_TX_TST_OFST (5)
+#define RG_USB20_NEGEDGE_ENB_OFST (4)
+#define RG_USB20_CDR_FILT_OFST (0)
+
+//U3D_U2PHYDCR1
+#define RG_USB20_PROBE_SEL_OFST (24)
+#define RG_USB20_DRVVBUS_OFST (23)
+#define RG_DEBUG_EN_OFST (22)
+#define RG_USB20_OTG_PROBE_OFST (20)
+#define RG_USB20_SW_PLLMODE_OFST (18)
+#define RG_USB20_BERTH_OFST (16)
+#define RG_USB20_LBMODE_OFST (13)
+#define RG_USB20_FORCE_TAP_OFST (12)
+#define RG_USB20_TAPSEL_OFST (0)
+
+//U3D_U2PHYDTM0
+#define RG_UART_MODE_OFST (30)
+#define FORCE_UART_I_OFST (29)
+#define FORCE_UART_BIAS_EN_OFST (28)
+#define FORCE_UART_TX_OE_OFST (27)
+#define FORCE_UART_EN_OFST (26)
+#define FORCE_USB_CLKEN_OFST (25)
+#define FORCE_DRVVBUS_OFST (24)
+#define FORCE_DATAIN_OFST (23)
+#define FORCE_TXVALID_OFST (22)
+#define FORCE_DM_PULLDOWN_OFST (21)
+#define FORCE_DP_PULLDOWN_OFST (20)
+#define FORCE_XCVRSEL_OFST (19)
+#define FORCE_SUSPENDM_OFST (18)
+#define FORCE_TERMSEL_OFST (17)
+#define FORCE_OPMODE_OFST (16)
+#define UTMI_MUXSEL_OFST (15)
+#define RG_RESET_OFST (14)
+#define RG_DATAIN_OFST (10)
+#define RG_TXVALIDH_OFST (9)
+#define RG_TXVALID_OFST (8)
+#define RG_DMPULLDOWN_OFST (7)
+#define RG_DPPULLDOWN_OFST (6)
+#define RG_XCVRSEL_OFST (4)
+#define RG_SUSPENDM_OFST (3)
+#define RG_TERMSEL_OFST (2)
+#define RG_OPMODE_OFST (0)
+
+//U3D_U2PHYDTM1
+#define RG_USB20_PRBS7_EN_OFST (31)
+#define RG_USB20_PRBS7_BITCNT_OFST (24)
+#define RG_USB20_CLK48M_EN_OFST (23)
+#define RG_USB20_CLK60M_EN_OFST (22)
+#define RG_UART_I_OFST (19)
+#define RG_UART_BIAS_EN_OFST (18)
+#define RG_UART_TX_OE_OFST (17)
+#define RG_UART_EN_OFST (16)
+#define FORCE_VBUSVALID_OFST (13)
+#define FORCE_SESSEND_OFST (12)
+#define FORCE_BVALID_OFST (11)
+#define FORCE_AVALID_OFST (10)
+#define FORCE_IDDIG_OFST (9)
+#define FORCE_IDPULLUP_OFST (8)
+#define RG_VBUSVALID_OFST (5)
+#define RG_SESSEND_OFST (4)
+#define RG_BVALID_OFST (3)
+#define RG_AVALID_OFST (2)
+#define RG_IDDIG_OFST (1)
+#define RG_IDPULLUP_OFST (0)
+
+//U3D_U2PHYDMON0
+#define RG_USB20_PRBS7_BERTH_OFST (0)
+
+//U3D_U2PHYDMON1
+#define USB20_UART_O_OFST (31)
+#define RGO_USB20_LB_PASS_OFST (30)
+#define RGO_USB20_LB_DONE_OFST (29)
+#define AD_USB20_BVALID_OFST (28)
+#define USB20_IDDIG_OFST (27)
+#define AD_USB20_VBUSVALID_OFST (26)
+#define AD_USB20_SESSEND_OFST (25)
+#define AD_USB20_AVALID_OFST (24)
+#define USB20_LINE_STATE_OFST (22)
+#define USB20_HST_DISCON_OFST (21)
+#define USB20_TX_READY_OFST (20)
+#define USB20_RX_ERROR_OFST (19)
+#define USB20_RX_ACTIVE_OFST (18)
+#define USB20_RX_VALIDH_OFST (17)
+#define USB20_RX_VALID_OFST (16)
+#define USB20_DATA_OUT_OFST (0)
+
+//U3D_U2PHYDMON2
+#define RGO_TXVALID_CNT_OFST (24)
+#define RGO_RXACTIVE_CNT_OFST (16)
+#define RGO_USB20_LB_BERCNT_OFST (8)
+#define USB20_PROBE_OUT_OFST (0)
+
+//U3D_U2PHYDMON3
+#define RGO_USB20_PRBS7_ERRCNT_OFST (16)
+#define RGO_USB20_PRBS7_DONE_OFST (3)
+#define RGO_USB20_PRBS7_LOCK_OFST (2)
+#define RGO_USB20_PRBS7_PASS_OFST (1)
+#define RGO_USB20_PRBS7_PASSTH_OFST (0)
+
+//U3D_U2PHYBC12C
+#define RG_SIFSLV_CHGDT_DEGLCH_CNT_OFST (28)
+#define RG_SIFSLV_CHGDT_CTRL_CNT_OFST (24)
+#define RG_SIFSLV_CHGDT_FORCE_MODE_OFST (16)
+#define RG_CHGDT_ISRC_LEV_OFST (14)
+#define RG_CHGDT_VDATSRC_OFST (13)
+#define RG_CHGDT_BGVREF_SEL_OFST (10)
+#define RG_CHGDT_RDVREF_SEL_OFST (8)
+#define RG_CHGDT_ISRC_DP_OFST (7)
+#define RG_SIFSLV_CHGDT_OPOUT_DM_OFST (6)
+#define RG_CHGDT_VDAT_DM_OFST (5)
+#define RG_CHGDT_OPOUT_DP_OFST (4)
+#define RG_SIFSLV_CHGDT_VDAT_DP_OFST (3)
+#define RG_SIFSLV_CHGDT_COMP_EN_OFST (2)
+#define RG_SIFSLV_CHGDT_OPDRV_EN_OFST (1)
+#define RG_CHGDT_EN_OFST (0)
+
+//U3D_U2PHYBC12C1
+#define RG_CHGDT_REV_OFST (0)
+
+//U3D_REGFCOM
+#define RG_PAGE_OFST (24)
+#define I2C_MODE_OFST (16)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phya_reg {
+ //0x0
+ PHY_LE32 reg0;
+ PHY_LE32 reg1;
+ PHY_LE32 reg2;
+ PHY_LE32 reg3;
+ //0x10
+ PHY_LE32 reg4;
+ PHY_LE32 reg5;
+ PHY_LE32 reg6;
+ PHY_LE32 reg7;
+ //0x20
+ PHY_LE32 reg8;
+ PHY_LE32 reg9;
+ PHY_LE32 rega;
+ PHY_LE32 regb;
+ //0x30
+ PHY_LE32 regc;
+ PHY_LE32 regd;
+ PHY_LE32 rege;
+};
+
+//U3D_reg0
+#define RG_SSUSB_BGR_EN (0x1<<31) //31:31
+#define RG_SSUSB_CHPEN (0x1<<30) //30:30
+#define RG_SSUSB_BG_DIV (0x3<<28) //29:28
+#define RG_SSUSB_INTR_EN (0x1<<26) //26:26
+#define RG_SSUSB_MPX_OUT_SEL (0x3<<24) //25:24
+#define RG_SSUSB_MPX_SEL (0xff<<16) //23:16
+#define RG_SSUSB_REF_EN (0x1<<15) //15:15
+#define RG_SSUSB_VRT_VREF_SEL (0xf<<11) //14:11
+#define RG_SSUSB_BG_RASEL (0x3<<9) //10:9
+#define RG_SSUSB_BG_RBSEL (0x3<<7) //8:7
+#define RG_SSUSB_BG_MONEN (0x1<<6) //6:6
+#define RG_PCIE_CLKDRV_OFFSET (0x3<<0) //1:0
+
+//U3D_reg1
+#define RG_PCIE_CLKDRV_SLEW (0x3<<30) //31:30
+#define RG_PCIE_CLKDRV_AMP (0x7<<27) //29:27
+#define RG_SSUSB_XTAL_TST_A2DCK_EN (0x1<<26) //26:26
+#define RG_SSUSB_XTAL_MON_EN (0x1<<25) //25:25
+#define RG_SSUSB_XTAL_HYS (0x1<<24) //24:24
+#define RG_SSUSB_XTAL_TOP_RESERVE (0xffff<<8) //23:8
+#define RG_SSUSB_SYSPLL_RESERVE (0xf<<4) //7:4
+#define RG_SSUSB_SYSPLL_FBSEL (0x3<<2) //3:2
+#define RG_SSUSB_SYSPLL_PREDIV (0x3<<0) //1:0
+
+//U3D_reg2
+#define RG_SSUSB_SYSPLL_LF (0x1<<31) //31:31
+#define RG_SSUSB_SYSPLL_FBDIV (0x7f<<24) //30:24
+#define RG_SSUSB_SYSPLL_POSDIV (0x3<<22) //23:22
+#define RG_SSUSB_SYSPLL_VCO_DIV_SEL (0x1<<21) //21:21
+#define RG_SSUSB_SYSPLL_BLP (0x1<<20) //20:20
+#define RG_SSUSB_SYSPLL_BP (0x1<<19) //19:19
+#define RG_SSUSB_SYSPLL_BR (0x1<<18) //18:18
+#define RG_SSUSB_SYSPLL_BC (0x1<<17) //17:17
+#define RG_SSUSB_SYSPLL_DIVEN (0x7<<14) //16:14
+#define RG_SSUSB_SYSPLL_FPEN (0x1<<13) //13:13
+#define RG_SSUSB_SYSPLL_MONCK_EN (0x1<<12) //12:12
+#define RG_SSUSB_SYSPLL_MONVC_EN (0x1<<11) //11:11
+#define RG_SSUSB_SYSPLL_MONREF_EN (0x1<<10) //10:10
+#define RG_SSUSB_SYSPLL_VOD_EN (0x1<<9) //9:9
+#define RG_SSUSB_SYSPLL_CK_SEL (0x1<<8) //8:8
+
+//U3D_reg3
+#define RG_SSUSB_SYSPLL_TOP_RESERVE (0xffff<<16) //31:16
+
+//U3D_reg4
+#define RG_SSUSB_SYSPLL_PCW_NCPO (0x7fffffff<<1) //31:1
+
+//U3D_reg5
+#define RG_SSUSB_SYSPLL_DDS_PI_C (0x7<<29) //31:29
+#define RG_SSUSB_SYSPLL_DDS_HF_EN (0x1<<28) //28:28
+#define RG_SSUSB_SYSPLL_DDS_PREDIV2 (0x1<<27) //27:27
+#define RG_SSUSB_SYSPLL_DDS_POSTDIV2 (0x1<<26) //26:26
+#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN (0x1<<25) //25:25
+#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL (0x1<<24) //24:24
+#define RG_SSUSB_SYSPLL_DDS_MONEN (0x1<<23) //23:23
+#define RG_SSUSB_SYSPLL_DDS_LPF_EN (0x1<<22) //22:22
+#define RG_SSUSB_SYSPLL_CLK_PH_INV (0x1<<21) //21:21
+#define RG_SSUSB_SYSPLL_DDS_SEL_EXT (0x1<<20) //20:20
+#define RG_SSUSB_SYSPLL_DDS_DMY (0xffff<<0) //15:0
+
+//U3D_reg6
+#define RG_SSUSB_TX250MCK_INVB (0x1<<31) //31:31
+#define RG_SSUSB_IDRV_ITAILOP_EN (0x1<<30) //30:30
+#define RG_SSUSB_IDRV_CALIB (0x3f<<24) //29:24
+#define RG_SSUSB_TX_R50_FON (0x1<<23) //23:23
+#define RG_SSUSB_TX_SR (0x7<<20) //22:20
+#define RG_SSUSB_TX_EIDLE_CM (0xf<<16) //19:16
+#define RG_SSUSB_RXDET_RSEL (0x3<<14) //15:14
+#define RG_SSUSB_RXDET_VTHSEL (0x3<<12) //13:12
+#define RG_SSUSB_CKMON_EN (0x1<<11) //11:11
+#define RG_SSUSB_CKMON_SEL (0x7<<8) //10:8
+#define RG_SSUSB_TX_VLMON_EN (0x1<<7) //7:7
+#define RG_SSUSB_TX_VLMON_SEL (0x1<<6) //6:6
+#define RG_SSUSB_RXLBTX_EN (0x1<<5) //5:5
+#define RG_SSUSB_TXLBRX_EN (0x1<<4) //4:4
+
+//U3D_reg7
+#define RG_SSUSB_RESERVE (0xfffff<<12) //31:12
+#define RG_SSUSB_PLL_CKCTRL (0x3<<10) //11:10
+#define RG_SSUSB_PLL_POSDIV (0x3<<8) //9:8
+#define RG_SSUSB_PLL_AUTOK_LOAD (0x1<<7) //7:7
+#define RG_SSUSB_PLL_LOAD_RSTB (0x1<<6) //6:6
+#define RG_SSUSB_PLL_EP_EN (0x1<<5) //5:5
+#define RG_SSUSB_PLL_VOD_EN (0x1<<4) //4:4
+#define RG_SSUSB_PLL_V11_EN (0x1<<3) //3:3
+#define RG_SSUSB_PLL_MONREF_EN (0x1<<2) //2:2
+#define RG_SSUSB_PLL_MONCK_EN (0x1<<1) //1:1
+#define RG_SSUSB_PLL_MONVC_EN (0x1<<0) //0:0
+
+//U3D_reg8
+#define RG_SSUSB_PLL_RESERVE (0xffff<<0) //15:0
+
+//U3D_reg9
+#define RG_SSUSB_PLL_DDS_DMY (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_PRD (0xffff<<0) //15:0
+
+//U3D_regA
+#define RG_SSUSB_PLL_SSC_PHASE_INI (0x1<<31) //31:31
+#define RG_SSUSB_PLL_SSC_TRI_EN (0x1<<30) //30:30
+#define RG_SSUSB_PLL_CLK_PH_INV (0x1<<29) //29:29
+#define RG_SSUSB_PLL_DDS_LPF_EN (0x1<<28) //28:28
+#define RG_SSUSB_PLL_DDS_VADJ (0x7<<21) //23:21
+#define RG_SSUSB_PLL_DDS_MONEN (0x1<<20) //20:20
+#define RG_SSUSB_PLL_DDS_PS_VADJ (0x7<<17) //19:17
+#define RG_SSUSB_PLL_DDS_SEL_EXT (0x1<<16) //16:16
+#define RG_SSUSB_CDR_PD_DIV_BYPASS (0x1<<15) //15:15
+#define RG_SSUSB_CDR_PD_DIV_SEL (0x1<<14) //14:14
+#define RG_SSUSB_CDR_CPBIAS_SEL (0x1<<13) //13:13
+#define RG_SSUSB_CDR_OSCDET_EN (0x1<<12) //12:12
+#define RG_SSUSB_CDR_MONMUX (0x1<<11) //11:11
+#define RG_SSUSB_CDR_CKCTRL (0x3<<9) //10:9
+#define RG_SSUSB_CDR_ACCEN (0x1<<8) //8:8
+#define RG_SSUSB_CDR_BYPASS (0x3<<6) //7:6
+#define RG_SSUSB_CDR_PI_SLEW (0x3<<4) //5:4
+#define RG_SSUSB_CDR_EPEN (0x1<<3) //3:3
+#define RG_SSUSB_CDR_AUTOK_LOAD (0x1<<2) //2:2
+#define RG_SSUSB_CDR_LOAD_RSTB (0x1<<1) //1:1
+#define RG_SSUSB_CDR_MONEN (0x1<<0) //0:0
+
+//U3D_regB
+#define RG_SSUSB_CDR_MONEN_DIG (0x1<<31) //31:31
+#define RG_SSUSB_CDR_REGOD (0x3<<29) //30:29
+#define RG_SSUSB_RX_DAC_EN (0x1<<26) //26:26
+#define RG_SSUSB_RX_DAC_PWD (0x1<<25) //25:25
+#define RG_SSUSB_EQ_CURSEL (0x1<<24) //24:24
+#define RG_SSUSB_RX_DAC_MUX (0x1f<<19) //23:19
+#define RG_SSUSB_RX_R2T_EN (0x1<<18) //18:18
+#define RG_SSUSB_RX_T2R_EN (0x1<<17) //17:17
+#define RG_SSUSB_RX_50_LOWER (0x7<<14) //16:14
+#define RG_SSUSB_RX_50_TAR (0x3<<12) //13:12
+#define RG_SSUSB_RX_SW_CTRL (0xf<<7) //10:7
+#define RG_PCIE_SIGDET_VTH (0x3<<5) //6:5
+#define RG_PCIE_SIGDET_LPF (0x3<<3) //4:3
+#define RG_SSUSB_LFPS_MON_EN (0x1<<2) //2:2
+
+//U3D_regC
+#define RG_SSUSB_RXAFE_DCMON_SEL (0xf<<28) //31:28
+#define RG_SSUSB_CDR_RESERVE (0xff<<16) //23:16
+#define RG_SSUSB_RXAFE_RESERVE (0xff<<8) //15:8
+#define RG_PCIE_RX_RESERVE (0xff<<0) //7:0
+
+//U3D_redD
+#define RGS_SSUSB_CDR_NO_OSC (0x1<<8) //8:8
+#define RGS_SSUSB_RX_DEBUG_RESERVE (0xff<<0) //7:0
+
+//U3D_regE
+#define RG_SSUSB_INT_BIAS_SEL (0x1<<4) //4:4
+#define RG_SSUSB_EXT_BIAS_SEL (0x1<<3) //3:3
+#define RG_SSUSB_RX_P1_ENTRY_PASS (0x1<<2) //2:2
+#define RG_SSUSB_RX_PD_RST (0x1<<1) //1:1
+#define RG_SSUSB_RX_PD_RST_PASS (0x1<<0) //0:0
+
+
+/* OFFSET */
+
+//U3D_reg0
+#define RG_SSUSB_BGR_EN_OFST (31)
+#define RG_SSUSB_CHPEN_OFST (30)
+#define RG_SSUSB_BG_DIV_OFST (28)
+#define RG_SSUSB_INTR_EN_OFST (26)
+#define RG_SSUSB_MPX_OUT_SEL_OFST (24)
+#define RG_SSUSB_MPX_SEL_OFST (16)
+#define RG_SSUSB_REF_EN_OFST (15)
+#define RG_SSUSB_VRT_VREF_SEL_OFST (11)
+#define RG_SSUSB_BG_RASEL_OFST (9)
+#define RG_SSUSB_BG_RBSEL_OFST (7)
+#define RG_SSUSB_BG_MONEN_OFST (6)
+#define RG_PCIE_CLKDRV_OFFSET_OFST (0)
+
+//U3D_reg1
+#define RG_PCIE_CLKDRV_SLEW_OFST (30)
+#define RG_PCIE_CLKDRV_AMP_OFST (27)
+#define RG_SSUSB_XTAL_TST_A2DCK_EN_OFST (26)
+#define RG_SSUSB_XTAL_MON_EN_OFST (25)
+#define RG_SSUSB_XTAL_HYS_OFST (24)
+#define RG_SSUSB_XTAL_TOP_RESERVE_OFST (8)
+#define RG_SSUSB_SYSPLL_RESERVE_OFST (4)
+#define RG_SSUSB_SYSPLL_FBSEL_OFST (2)
+#define RG_SSUSB_SYSPLL_PREDIV_OFST (0)
+
+//U3D_reg2
+#define RG_SSUSB_SYSPLL_LF_OFST (31)
+#define RG_SSUSB_SYSPLL_FBDIV_OFST (24)
+#define RG_SSUSB_SYSPLL_POSDIV_OFST (22)
+#define RG_SSUSB_SYSPLL_VCO_DIV_SEL_OFST (21)
+#define RG_SSUSB_SYSPLL_BLP_OFST (20)
+#define RG_SSUSB_SYSPLL_BP_OFST (19)
+#define RG_SSUSB_SYSPLL_BR_OFST (18)
+#define RG_SSUSB_SYSPLL_BC_OFST (17)
+#define RG_SSUSB_SYSPLL_DIVEN_OFST (14)
+#define RG_SSUSB_SYSPLL_FPEN_OFST (13)
+#define RG_SSUSB_SYSPLL_MONCK_EN_OFST (12)
+#define RG_SSUSB_SYSPLL_MONVC_EN_OFST (11)
+#define RG_SSUSB_SYSPLL_MONREF_EN_OFST (10)
+#define RG_SSUSB_SYSPLL_VOD_EN_OFST (9)
+#define RG_SSUSB_SYSPLL_CK_SEL_OFST (8)
+
+//U3D_reg3
+#define RG_SSUSB_SYSPLL_TOP_RESERVE_OFST (16)
+
+//U3D_reg4
+#define RG_SSUSB_SYSPLL_PCW_NCPO_OFST (1)
+
+//U3D_reg5
+#define RG_SSUSB_SYSPLL_DDS_PI_C_OFST (29)
+#define RG_SSUSB_SYSPLL_DDS_HF_EN_OFST (28)
+#define RG_SSUSB_SYSPLL_DDS_PREDIV2_OFST (27)
+#define RG_SSUSB_SYSPLL_DDS_POSTDIV2_OFST (26)
+#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN_OFST (25)
+#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL_OFST (24)
+#define RG_SSUSB_SYSPLL_DDS_MONEN_OFST (23)
+#define RG_SSUSB_SYSPLL_DDS_LPF_EN_OFST (22)
+#define RG_SSUSB_SYSPLL_CLK_PH_INV_OFST (21)
+#define RG_SSUSB_SYSPLL_DDS_SEL_EXT_OFST (20)
+#define RG_SSUSB_SYSPLL_DDS_DMY_OFST (0)
+
+//U3D_reg6
+#define RG_SSUSB_TX250MCK_INVB_OFST (31)
+#define RG_SSUSB_IDRV_ITAILOP_EN_OFST (30)
+#define RG_SSUSB_IDRV_CALIB_OFST (24)
+#define RG_SSUSB_TX_R50_FON_OFST (23)
+#define RG_SSUSB_TX_SR_OFST (20)
+#define RG_SSUSB_TX_EIDLE_CM_OFST (16)
+#define RG_SSUSB_RXDET_RSEL_OFST (14)
+#define RG_SSUSB_RXDET_VTHSEL_OFST (12)
+#define RG_SSUSB_CKMON_EN_OFST (11)
+#define RG_SSUSB_CKMON_SEL_OFST (8)
+#define RG_SSUSB_TX_VLMON_EN_OFST (7)
+#define RG_SSUSB_TX_VLMON_SEL_OFST (6)
+#define RG_SSUSB_RXLBTX_EN_OFST (5)
+#define RG_SSUSB_TXLBRX_EN_OFST (4)
+
+//U3D_reg7
+#define RG_SSUSB_RESERVE_OFST (12)
+#define RG_SSUSB_PLL_CKCTRL_OFST (10)
+#define RG_SSUSB_PLL_POSDIV_OFST (8)
+#define RG_SSUSB_PLL_AUTOK_LOAD_OFST (7)
+#define RG_SSUSB_PLL_LOAD_RSTB_OFST (6)
+#define RG_SSUSB_PLL_EP_EN_OFST (5)
+#define RG_SSUSB_PLL_VOD_EN_OFST (4)
+#define RG_SSUSB_PLL_V11_EN_OFST (3)
+#define RG_SSUSB_PLL_MONREF_EN_OFST (2)
+#define RG_SSUSB_PLL_MONCK_EN_OFST (1)
+#define RG_SSUSB_PLL_MONVC_EN_OFST (0)
+
+//U3D_reg8
+#define RG_SSUSB_PLL_RESERVE_OFST (0)
+
+//U3D_reg9
+#define RG_SSUSB_PLL_DDS_DMY_OFST (16)
+#define RG_SSUSB_PLL_SSC_PRD_OFST (0)
+
+//U3D_regA
+#define RG_SSUSB_PLL_SSC_PHASE_INI_OFST (31)
+#define RG_SSUSB_PLL_SSC_TRI_EN_OFST (30)
+#define RG_SSUSB_PLL_CLK_PH_INV_OFST (29)
+#define RG_SSUSB_PLL_DDS_LPF_EN_OFST (28)
+#define RG_SSUSB_PLL_DDS_VADJ_OFST (21)
+#define RG_SSUSB_PLL_DDS_MONEN_OFST (20)
+#define RG_SSUSB_PLL_DDS_PS_VADJ_OFST (17)
+#define RG_SSUSB_PLL_DDS_SEL_EXT_OFST (16)
+#define RG_SSUSB_CDR_PD_DIV_BYPASS_OFST (15)
+#define RG_SSUSB_CDR_PD_DIV_SEL_OFST (14)
+#define RG_SSUSB_CDR_CPBIAS_SEL_OFST (13)
+#define RG_SSUSB_CDR_OSCDET_EN_OFST (12)
+#define RG_SSUSB_CDR_MONMUX_OFST (11)
+#define RG_SSUSB_CDR_CKCTRL_OFST (9)
+#define RG_SSUSB_CDR_ACCEN_OFST (8)
+#define RG_SSUSB_CDR_BYPASS_OFST (6)
+#define RG_SSUSB_CDR_PI_SLEW_OFST (4)
+#define RG_SSUSB_CDR_EPEN_OFST (3)
+#define RG_SSUSB_CDR_AUTOK_LOAD_OFST (2)
+#define RG_SSUSB_CDR_LOAD_RSTB_OFST (1)
+#define RG_SSUSB_CDR_MONEN_OFST (0)
+
+//U3D_regB
+#define RG_SSUSB_CDR_MONEN_DIG_OFST (31)
+#define RG_SSUSB_CDR_REGOD_OFST (29)
+#define RG_SSUSB_RX_DAC_EN_OFST (26)
+#define RG_SSUSB_RX_DAC_PWD_OFST (25)
+#define RG_SSUSB_EQ_CURSEL_OFST (24)
+#define RG_SSUSB_RX_DAC_MUX_OFST (19)
+#define RG_SSUSB_RX_R2T_EN_OFST (18)
+#define RG_SSUSB_RX_T2R_EN_OFST (17)
+#define RG_SSUSB_RX_50_LOWER_OFST (14)
+#define RG_SSUSB_RX_50_TAR_OFST (12)
+#define RG_SSUSB_RX_SW_CTRL_OFST (7)
+#define RG_PCIE_SIGDET_VTH_OFST (5)
+#define RG_PCIE_SIGDET_LPF_OFST (3)
+#define RG_SSUSB_LFPS_MON_EN_OFST (2)
+
+//U3D_regC
+#define RG_SSUSB_RXAFE_DCMON_SEL_OFST (28)
+#define RG_SSUSB_CDR_RESERVE_OFST (16)
+#define RG_SSUSB_RXAFE_RESERVE_OFST (8)
+#define RG_PCIE_RX_RESERVE_OFST (0)
+
+//U3D_redD
+#define RGS_SSUSB_CDR_NO_OSC_OFST (8)
+#define RGS_SSUSB_RX_DEBUG_RESERVE_OFST (0)
+
+//U3D_regE
+#define RG_SSUSB_INT_BIAS_SEL_OFST (4)
+#define RG_SSUSB_EXT_BIAS_SEL_OFST (3)
+#define RG_SSUSB_RX_P1_ENTRY_PASS_OFST (2)
+#define RG_SSUSB_RX_PD_RST_OFST (1)
+#define RG_SSUSB_RX_PD_RST_PASS_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phya_da_reg {
+ //0x0
+ PHY_LE32 reg0;
+ PHY_LE32 reg1;
+ PHY_LE32 reg4;
+ PHY_LE32 reg5;
+ //0x10
+ PHY_LE32 reg6;
+ PHY_LE32 reg7;
+ PHY_LE32 reg8;
+ PHY_LE32 reg9;
+ //0x20
+ PHY_LE32 reg10;
+ PHY_LE32 reg12;
+ PHY_LE32 reg13;
+ PHY_LE32 reg14;
+ //0x30
+ PHY_LE32 reg15;
+ PHY_LE32 reg16;
+ PHY_LE32 reg19;
+ PHY_LE32 reg20;
+ //0x40
+ PHY_LE32 reg21;
+ PHY_LE32 reg23;
+ PHY_LE32 reg25;
+ PHY_LE32 reg26;
+ //0x50
+ PHY_LE32 reg28;
+ PHY_LE32 reg29;
+ PHY_LE32 reg30;
+ PHY_LE32 reg31;
+ //0x60
+ PHY_LE32 reg32;
+ PHY_LE32 reg33;
+};
+
+//U3D_reg0
+#define RG_PCIE_SPEED_PE2D (0x1<<24) //24:24
+#define RG_PCIE_SPEED_PE2H (0x1<<23) //23:23
+#define RG_PCIE_SPEED_PE1D (0x1<<22) //22:22
+#define RG_PCIE_SPEED_PE1H (0x1<<21) //21:21
+#define RG_PCIE_SPEED_U3 (0x1<<20) //20:20
+#define RG_SSUSB_XTAL_EXT_EN_PE2D (0x3<<18) //19:18
+#define RG_SSUSB_XTAL_EXT_EN_PE2H (0x3<<16) //17:16
+#define RG_SSUSB_XTAL_EXT_EN_PE1D (0x3<<14) //15:14
+#define RG_SSUSB_XTAL_EXT_EN_PE1H (0x3<<12) //13:12
+#define RG_SSUSB_XTAL_EXT_EN_U3 (0x3<<10) //11:10
+#define RG_SSUSB_CDR_REFCK_SEL_PE2D (0x3<<8) //9:8
+#define RG_SSUSB_CDR_REFCK_SEL_PE2H (0x3<<6) //7:6
+#define RG_SSUSB_CDR_REFCK_SEL_PE1D (0x3<<4) //5:4
+#define RG_SSUSB_CDR_REFCK_SEL_PE1H (0x3<<2) //3:2
+#define RG_SSUSB_CDR_REFCK_SEL_U3 (0x3<<0) //1:0
+
+//U3D_reg1
+#define RG_USB20_REFCK_SEL_PE2D (0x1<<30) //30:30
+#define RG_USB20_REFCK_SEL_PE2H (0x1<<29) //29:29
+#define RG_USB20_REFCK_SEL_PE1D (0x1<<28) //28:28
+#define RG_USB20_REFCK_SEL_PE1H (0x1<<27) //27:27
+#define RG_USB20_REFCK_SEL_U3 (0x1<<26) //26:26
+#define RG_PCIE_REFCK_DIV4_PE2D (0x1<<25) //25:25
+#define RG_PCIE_REFCK_DIV4_PE2H (0x1<<24) //24:24
+#define RG_PCIE_REFCK_DIV4_PE1D (0x1<<18) //18:18
+#define RG_PCIE_REFCK_DIV4_PE1H (0x1<<17) //17:17
+#define RG_PCIE_REFCK_DIV4_U3 (0x1<<16) //16:16
+#define RG_PCIE_MODE_PE2D (0x1<<8) //8:8
+#define RG_PCIE_MODE_PE2H (0x1<<3) //3:3
+#define RG_PCIE_MODE_PE1D (0x1<<2) //2:2
+#define RG_PCIE_MODE_PE1H (0x1<<1) //1:1
+#define RG_PCIE_MODE_U3 (0x1<<0) //0:0
+
+//U3D_reg4
+#define RG_SSUSB_PLL_DIVEN_PE2D (0x7<<22) //24:22
+#define RG_SSUSB_PLL_DIVEN_PE2H (0x7<<19) //21:19
+#define RG_SSUSB_PLL_DIVEN_PE1D (0x7<<16) //18:16
+#define RG_SSUSB_PLL_DIVEN_PE1H (0x7<<13) //15:13
+#define RG_SSUSB_PLL_DIVEN_U3 (0x7<<10) //12:10
+#define RG_SSUSB_PLL_BC_PE2D (0x3<<8) //9:8
+#define RG_SSUSB_PLL_BC_PE2H (0x3<<6) //7:6
+#define RG_SSUSB_PLL_BC_PE1D (0x3<<4) //5:4
+#define RG_SSUSB_PLL_BC_PE1H (0x3<<2) //3:2
+#define RG_SSUSB_PLL_BC_U3 (0x3<<0) //1:0
+
+//U3D_reg5
+#define RG_SSUSB_PLL_BR_PE2D (0x7<<27) //29:27
+#define RG_SSUSB_PLL_BR_PE2H (0x7<<24) //26:24
+#define RG_SSUSB_PLL_BR_PE1D (0x7<<21) //23:21
+#define RG_SSUSB_PLL_BR_PE1H (0x7<<18) //20:18
+#define RG_SSUSB_PLL_BR_U3 (0x7<<15) //17:15
+#define RG_SSUSB_PLL_IC_PE2D (0x7<<12) //14:12
+#define RG_SSUSB_PLL_IC_PE2H (0x7<<9) //11:9
+#define RG_SSUSB_PLL_IC_PE1D (0x7<<6) //8:6
+#define RG_SSUSB_PLL_IC_PE1H (0x7<<3) //5:3
+#define RG_SSUSB_PLL_IC_U3 (0x7<<0) //2:0
+
+//U3D_reg6
+#define RG_SSUSB_PLL_IR_PE2D (0xf<<24) //27:24
+#define RG_SSUSB_PLL_IR_PE2H (0xf<<16) //19:16
+#define RG_SSUSB_PLL_IR_PE1D (0xf<<8) //11:8
+#define RG_SSUSB_PLL_IR_PE1H (0xf<<4) //7:4
+#define RG_SSUSB_PLL_IR_U3 (0xf<<0) //3:0
+
+//U3D_reg7
+#define RG_SSUSB_PLL_BP_PE2D (0xf<<24) //27:24
+#define RG_SSUSB_PLL_BP_PE2H (0xf<<16) //19:16
+#define RG_SSUSB_PLL_BP_PE1D (0xf<<8) //11:8
+#define RG_SSUSB_PLL_BP_PE1H (0xf<<4) //7:4
+#define RG_SSUSB_PLL_BP_U3 (0xf<<0) //3:0
+
+//U3D_reg8
+#define RG_SSUSB_PLL_FBKSEL_PE2D (0x3<<24) //25:24
+#define RG_SSUSB_PLL_FBKSEL_PE2H (0x3<<16) //17:16
+#define RG_SSUSB_PLL_FBKSEL_PE1D (0x3<<8) //9:8
+#define RG_SSUSB_PLL_FBKSEL_PE1H (0x3<<2) //3:2
+#define RG_SSUSB_PLL_FBKSEL_U3 (0x3<<0) //1:0
+
+//U3D_reg9
+#define RG_SSUSB_PLL_FBKDIV_PE2H (0x7f<<24) //30:24
+#define RG_SSUSB_PLL_FBKDIV_PE1D (0x7f<<16) //22:16
+#define RG_SSUSB_PLL_FBKDIV_PE1H (0x7f<<8) //14:8
+#define RG_SSUSB_PLL_FBKDIV_U3 (0x7f<<0) //6:0
+
+//U3D_reg10
+#define RG_SSUSB_PLL_PREDIV_PE2D (0x3<<26) //27:26
+#define RG_SSUSB_PLL_PREDIV_PE2H (0x3<<24) //25:24
+#define RG_SSUSB_PLL_PREDIV_PE1D (0x3<<18) //19:18
+#define RG_SSUSB_PLL_PREDIV_PE1H (0x3<<16) //17:16
+#define RG_SSUSB_PLL_PREDIV_U3 (0x3<<8) //9:8
+#define RG_SSUSB_PLL_FBKDIV_PE2D (0x7f<<0) //6:0
+
+//U3D_reg12
+#define RG_SSUSB_PLL_PCW_NCPO_U3 (0x7fffffff<<0) //30:0
+
+//U3D_reg13
+#define RG_SSUSB_PLL_PCW_NCPO_PE1H (0x7fffffff<<0) //30:0
+
+//U3D_reg14
+#define RG_SSUSB_PLL_PCW_NCPO_PE1D (0x7fffffff<<0) //30:0
+
+//U3D_reg15
+#define RG_SSUSB_PLL_PCW_NCPO_PE2H (0x7fffffff<<0) //30:0
+
+//U3D_reg16
+#define RG_SSUSB_PLL_PCW_NCPO_PE2D (0x7fffffff<<0) //30:0
+
+//U3D_reg19
+#define RG_SSUSB_PLL_SSC_DELTA1_PE1H (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_DELTA1_U3 (0xffff<<0) //15:0
+
+//U3D_reg20
+#define RG_SSUSB_PLL_SSC_DELTA1_PE2H (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_DELTA1_PE1D (0xffff<<0) //15:0
+
+//U3D_reg21
+#define RG_SSUSB_PLL_SSC_DELTA_U3 (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_DELTA1_PE2D (0xffff<<0) //15:0
+
+//U3D_reg23
+#define RG_SSUSB_PLL_SSC_DELTA_PE1D (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_DELTA_PE1H (0xffff<<0) //15:0
+
+//U3D_reg25
+#define RG_SSUSB_PLL_SSC_DELTA_PE2D (0xffff<<16) //31:16
+#define RG_SSUSB_PLL_SSC_DELTA_PE2H (0xffff<<0) //15:0
+
+//U3D_reg26
+#define RG_SSUSB_PLL_REFCKDIV_PE2D (0x1<<25) //25:25
+#define RG_SSUSB_PLL_REFCKDIV_PE2H (0x1<<24) //24:24
+#define RG_SSUSB_PLL_REFCKDIV_PE1D (0x1<<16) //16:16
+#define RG_SSUSB_PLL_REFCKDIV_PE1H (0x1<<8) //8:8
+#define RG_SSUSB_PLL_REFCKDIV_U3 (0x1<<0) //0:0
+
+//U3D_reg28
+#define RG_SSUSB_CDR_BPA_PE2D (0x3<<24) //25:24
+#define RG_SSUSB_CDR_BPA_PE2H (0x3<<16) //17:16
+#define RG_SSUSB_CDR_BPA_PE1D (0x3<<10) //11:10
+#define RG_SSUSB_CDR_BPA_PE1H (0x3<<8) //9:8
+#define RG_SSUSB_CDR_BPA_U3 (0x3<<0) //1:0
+
+//U3D_reg29
+#define RG_SSUSB_CDR_BPB_PE2D (0x7<<24) //26:24
+#define RG_SSUSB_CDR_BPB_PE2H (0x7<<16) //18:16
+#define RG_SSUSB_CDR_BPB_PE1D (0x7<<6) //8:6
+#define RG_SSUSB_CDR_BPB_PE1H (0x7<<3) //5:3
+#define RG_SSUSB_CDR_BPB_U3 (0x7<<0) //2:0
+
+//U3D_reg30
+#define RG_SSUSB_CDR_BR_PE2D (0x7<<24) //26:24
+#define RG_SSUSB_CDR_BR_PE2H (0x7<<16) //18:16
+#define RG_SSUSB_CDR_BR_PE1D (0x7<<6) //8:6
+#define RG_SSUSB_CDR_BR_PE1H (0x7<<3) //5:3
+#define RG_SSUSB_CDR_BR_U3 (0x7<<0) //2:0
+
+//U3D_reg31
+#define RG_SSUSB_CDR_FBDIV_PE2H (0x7f<<24) //30:24
+#define RG_SSUSB_CDR_FBDIV_PE1D (0x7f<<16) //22:16
+#define RG_SSUSB_CDR_FBDIV_PE1H (0x7f<<8) //14:8
+#define RG_SSUSB_CDR_FBDIV_U3 (0x7f<<0) //6:0
+
+//U3D_reg32
+#define RG_SSUSB_EQ_RSTEP1_PE2D (0x3<<30) //31:30
+#define RG_SSUSB_EQ_RSTEP1_PE2H (0x3<<28) //29:28
+#define RG_SSUSB_EQ_RSTEP1_PE1D (0x3<<26) //27:26
+#define RG_SSUSB_EQ_RSTEP1_PE1H (0x3<<24) //25:24
+#define RG_SSUSB_EQ_RSTEP1_U3 (0x3<<22) //23:22
+#define RG_SSUSB_LFPS_DEGLITCH_PE2D (0x3<<20) //21:20
+#define RG_SSUSB_LFPS_DEGLITCH_PE2H (0x3<<18) //19:18
+#define RG_SSUSB_LFPS_DEGLITCH_PE1D (0x3<<16) //17:16
+#define RG_SSUSB_LFPS_DEGLITCH_PE1H (0x3<<14) //15:14
+#define RG_SSUSB_LFPS_DEGLITCH_U3 (0x3<<12) //13:12
+#define RG_SSUSB_CDR_KVSEL_PE2D (0x1<<11) //11:11
+#define RG_SSUSB_CDR_KVSEL_PE2H (0x1<<10) //10:10
+#define RG_SSUSB_CDR_KVSEL_PE1D (0x1<<9) //9:9
+#define RG_SSUSB_CDR_KVSEL_PE1H (0x1<<8) //8:8
+#define RG_SSUSB_CDR_KVSEL_U3 (0x1<<7) //7:7
+#define RG_SSUSB_CDR_FBDIV_PE2D (0x7f<<0) //6:0
+
+//U3D_reg33
+#define RG_SSUSB_RX_CMPWD_PE2D (0x1<<26) //26:26
+#define RG_SSUSB_RX_CMPWD_PE2H (0x1<<25) //25:25
+#define RG_SSUSB_RX_CMPWD_PE1D (0x1<<24) //24:24
+#define RG_SSUSB_RX_CMPWD_PE1H (0x1<<23) //23:23
+#define RG_SSUSB_RX_CMPWD_U3 (0x1<<16) //16:16
+#define RG_SSUSB_EQ_RSTEP2_PE2D (0x3<<8) //9:8
+#define RG_SSUSB_EQ_RSTEP2_PE2H (0x3<<6) //7:6
+#define RG_SSUSB_EQ_RSTEP2_PE1D (0x3<<4) //5:4
+#define RG_SSUSB_EQ_RSTEP2_PE1H (0x3<<2) //3:2
+#define RG_SSUSB_EQ_RSTEP2_U3 (0x3<<0) //1:0
+
+
+/* OFFSET */
+
+//U3D_reg0
+#define RG_PCIE_SPEED_PE2D_OFST (24)
+#define RG_PCIE_SPEED_PE2H_OFST (23)
+#define RG_PCIE_SPEED_PE1D_OFST (22)
+#define RG_PCIE_SPEED_PE1H_OFST (21)
+#define RG_PCIE_SPEED_U3_OFST (20)
+#define RG_SSUSB_XTAL_EXT_EN_PE2D_OFST (18)
+#define RG_SSUSB_XTAL_EXT_EN_PE2H_OFST (16)
+#define RG_SSUSB_XTAL_EXT_EN_PE1D_OFST (14)
+#define RG_SSUSB_XTAL_EXT_EN_PE1H_OFST (12)
+#define RG_SSUSB_XTAL_EXT_EN_U3_OFST (10)
+#define RG_SSUSB_CDR_REFCK_SEL_PE2D_OFST (8)
+#define RG_SSUSB_CDR_REFCK_SEL_PE2H_OFST (6)
+#define RG_SSUSB_CDR_REFCK_SEL_PE1D_OFST (4)
+#define RG_SSUSB_CDR_REFCK_SEL_PE1H_OFST (2)
+#define RG_SSUSB_CDR_REFCK_SEL_U3_OFST (0)
+
+//U3D_reg1
+#define RG_USB20_REFCK_SEL_PE2D_OFST (30)
+#define RG_USB20_REFCK_SEL_PE2H_OFST (29)
+#define RG_USB20_REFCK_SEL_PE1D_OFST (28)
+#define RG_USB20_REFCK_SEL_PE1H_OFST (27)
+#define RG_USB20_REFCK_SEL_U3_OFST (26)
+#define RG_PCIE_REFCK_DIV4_PE2D_OFST (25)
+#define RG_PCIE_REFCK_DIV4_PE2H_OFST (24)
+#define RG_PCIE_REFCK_DIV4_PE1D_OFST (18)
+#define RG_PCIE_REFCK_DIV4_PE1H_OFST (17)
+#define RG_PCIE_REFCK_DIV4_U3_OFST (16)
+#define RG_PCIE_MODE_PE2D_OFST (8)
+#define RG_PCIE_MODE_PE2H_OFST (3)
+#define RG_PCIE_MODE_PE1D_OFST (2)
+#define RG_PCIE_MODE_PE1H_OFST (1)
+#define RG_PCIE_MODE_U3_OFST (0)
+
+//U3D_reg4
+#define RG_SSUSB_PLL_DIVEN_PE2D_OFST (22)
+#define RG_SSUSB_PLL_DIVEN_PE2H_OFST (19)
+#define RG_SSUSB_PLL_DIVEN_PE1D_OFST (16)
+#define RG_SSUSB_PLL_DIVEN_PE1H_OFST (13)
+#define RG_SSUSB_PLL_DIVEN_U3_OFST (10)
+#define RG_SSUSB_PLL_BC_PE2D_OFST (8)
+#define RG_SSUSB_PLL_BC_PE2H_OFST (6)
+#define RG_SSUSB_PLL_BC_PE1D_OFST (4)
+#define RG_SSUSB_PLL_BC_PE1H_OFST (2)
+#define RG_SSUSB_PLL_BC_U3_OFST (0)
+
+//U3D_reg5
+#define RG_SSUSB_PLL_BR_PE2D_OFST (27)
+#define RG_SSUSB_PLL_BR_PE2H_OFST (24)
+#define RG_SSUSB_PLL_BR_PE1D_OFST (21)
+#define RG_SSUSB_PLL_BR_PE1H_OFST (18)
+#define RG_SSUSB_PLL_BR_U3_OFST (15)
+#define RG_SSUSB_PLL_IC_PE2D_OFST (12)
+#define RG_SSUSB_PLL_IC_PE2H_OFST (9)
+#define RG_SSUSB_PLL_IC_PE1D_OFST (6)
+#define RG_SSUSB_PLL_IC_PE1H_OFST (3)
+#define RG_SSUSB_PLL_IC_U3_OFST (0)
+
+//U3D_reg6
+#define RG_SSUSB_PLL_IR_PE2D_OFST (24)
+#define RG_SSUSB_PLL_IR_PE2H_OFST (16)
+#define RG_SSUSB_PLL_IR_PE1D_OFST (8)
+#define RG_SSUSB_PLL_IR_PE1H_OFST (4)
+#define RG_SSUSB_PLL_IR_U3_OFST (0)
+
+//U3D_reg7
+#define RG_SSUSB_PLL_BP_PE2D_OFST (24)
+#define RG_SSUSB_PLL_BP_PE2H_OFST (16)
+#define RG_SSUSB_PLL_BP_PE1D_OFST (8)
+#define RG_SSUSB_PLL_BP_PE1H_OFST (4)
+#define RG_SSUSB_PLL_BP_U3_OFST (0)
+
+//U3D_reg8
+#define RG_SSUSB_PLL_FBKSEL_PE2D_OFST (24)
+#define RG_SSUSB_PLL_FBKSEL_PE2H_OFST (16)
+#define RG_SSUSB_PLL_FBKSEL_PE1D_OFST (8)
+#define RG_SSUSB_PLL_FBKSEL_PE1H_OFST (2)
+#define RG_SSUSB_PLL_FBKSEL_U3_OFST (0)
+
+//U3D_reg9
+#define RG_SSUSB_PLL_FBKDIV_PE2H_OFST (24)
+#define RG_SSUSB_PLL_FBKDIV_PE1D_OFST (16)
+#define RG_SSUSB_PLL_FBKDIV_PE1H_OFST (8)
+#define RG_SSUSB_PLL_FBKDIV_U3_OFST (0)
+
+//U3D_reg10
+#define RG_SSUSB_PLL_PREDIV_PE2D_OFST (26)
+#define RG_SSUSB_PLL_PREDIV_PE2H_OFST (24)
+#define RG_SSUSB_PLL_PREDIV_PE1D_OFST (18)
+#define RG_SSUSB_PLL_PREDIV_PE1H_OFST (16)
+#define RG_SSUSB_PLL_PREDIV_U3_OFST (8)
+#define RG_SSUSB_PLL_FBKDIV_PE2D_OFST (0)
+
+//U3D_reg12
+#define RG_SSUSB_PLL_PCW_NCPO_U3_OFST (0)
+
+//U3D_reg13
+#define RG_SSUSB_PLL_PCW_NCPO_PE1H_OFST (0)
+
+//U3D_reg14
+#define RG_SSUSB_PLL_PCW_NCPO_PE1D_OFST (0)
+
+//U3D_reg15
+#define RG_SSUSB_PLL_PCW_NCPO_PE2H_OFST (0)
+
+//U3D_reg16
+#define RG_SSUSB_PLL_PCW_NCPO_PE2D_OFST (0)
+
+//U3D_reg19
+#define RG_SSUSB_PLL_SSC_DELTA1_PE1H_OFST (16)
+#define RG_SSUSB_PLL_SSC_DELTA1_U3_OFST (0)
+
+//U3D_reg20
+#define RG_SSUSB_PLL_SSC_DELTA1_PE2H_OFST (16)
+#define RG_SSUSB_PLL_SSC_DELTA1_PE1D_OFST (0)
+
+//U3D_reg21
+#define RG_SSUSB_PLL_SSC_DELTA_U3_OFST (16)
+#define RG_SSUSB_PLL_SSC_DELTA1_PE2D_OFST (0)
+
+//U3D_reg23
+#define RG_SSUSB_PLL_SSC_DELTA_PE1D_OFST (16)
+#define RG_SSUSB_PLL_SSC_DELTA_PE1H_OFST (0)
+
+//U3D_reg25
+#define RG_SSUSB_PLL_SSC_DELTA_PE2D_OFST (16)
+#define RG_SSUSB_PLL_SSC_DELTA_PE2H_OFST (0)
+
+//U3D_reg26
+#define RG_SSUSB_PLL_REFCKDIV_PE2D_OFST (25)
+#define RG_SSUSB_PLL_REFCKDIV_PE2H_OFST (24)
+#define RG_SSUSB_PLL_REFCKDIV_PE1D_OFST (16)
+#define RG_SSUSB_PLL_REFCKDIV_PE1H_OFST (8)
+#define RG_SSUSB_PLL_REFCKDIV_U3_OFST (0)
+
+//U3D_reg28
+#define RG_SSUSB_CDR_BPA_PE2D_OFST (24)
+#define RG_SSUSB_CDR_BPA_PE2H_OFST (16)
+#define RG_SSUSB_CDR_BPA_PE1D_OFST (10)
+#define RG_SSUSB_CDR_BPA_PE1H_OFST (8)
+#define RG_SSUSB_CDR_BPA_U3_OFST (0)
+
+//U3D_reg29
+#define RG_SSUSB_CDR_BPB_PE2D_OFST (24)
+#define RG_SSUSB_CDR_BPB_PE2H_OFST (16)
+#define RG_SSUSB_CDR_BPB_PE1D_OFST (6)
+#define RG_SSUSB_CDR_BPB_PE1H_OFST (3)
+#define RG_SSUSB_CDR_BPB_U3_OFST (0)
+
+//U3D_reg30
+#define RG_SSUSB_CDR_BR_PE2D_OFST (24)
+#define RG_SSUSB_CDR_BR_PE2H_OFST (16)
+#define RG_SSUSB_CDR_BR_PE1D_OFST (6)
+#define RG_SSUSB_CDR_BR_PE1H_OFST (3)
+#define RG_SSUSB_CDR_BR_U3_OFST (0)
+
+//U3D_reg31
+#define RG_SSUSB_CDR_FBDIV_PE2H_OFST (24)
+#define RG_SSUSB_CDR_FBDIV_PE1D_OFST (16)
+#define RG_SSUSB_CDR_FBDIV_PE1H_OFST (8)
+#define RG_SSUSB_CDR_FBDIV_U3_OFST (0)
+
+//U3D_reg32
+#define RG_SSUSB_EQ_RSTEP1_PE2D_OFST (30)
+#define RG_SSUSB_EQ_RSTEP1_PE2H_OFST (28)
+#define RG_SSUSB_EQ_RSTEP1_PE1D_OFST (26)
+#define RG_SSUSB_EQ_RSTEP1_PE1H_OFST (24)
+#define RG_SSUSB_EQ_RSTEP1_U3_OFST (22)
+#define RG_SSUSB_LFPS_DEGLITCH_PE2D_OFST (20)
+#define RG_SSUSB_LFPS_DEGLITCH_PE2H_OFST (18)
+#define RG_SSUSB_LFPS_DEGLITCH_PE1D_OFST (16)
+#define RG_SSUSB_LFPS_DEGLITCH_PE1H_OFST (14)
+#define RG_SSUSB_LFPS_DEGLITCH_U3_OFST (12)
+#define RG_SSUSB_CDR_KVSEL_PE2D_OFST (11)
+#define RG_SSUSB_CDR_KVSEL_PE2H_OFST (10)
+#define RG_SSUSB_CDR_KVSEL_PE1D_OFST (9)
+#define RG_SSUSB_CDR_KVSEL_PE1H_OFST (8)
+#define RG_SSUSB_CDR_KVSEL_U3_OFST (7)
+#define RG_SSUSB_CDR_FBDIV_PE2D_OFST (0)
+
+//U3D_reg33
+#define RG_SSUSB_RX_CMPWD_PE2D_OFST (26)
+#define RG_SSUSB_RX_CMPWD_PE2H_OFST (25)
+#define RG_SSUSB_RX_CMPWD_PE1D_OFST (24)
+#define RG_SSUSB_RX_CMPWD_PE1H_OFST (23)
+#define RG_SSUSB_RX_CMPWD_U3_OFST (16)
+#define RG_SSUSB_EQ_RSTEP2_PE2D_OFST (8)
+#define RG_SSUSB_EQ_RSTEP2_PE2H_OFST (6)
+#define RG_SSUSB_EQ_RSTEP2_PE1D_OFST (4)
+#define RG_SSUSB_EQ_RSTEP2_PE1H_OFST (2)
+#define RG_SSUSB_EQ_RSTEP2_U3_OFST (0)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phyd_reg {
+ //0x0
+ PHY_LE32 phyd_mix0;
+ PHY_LE32 phyd_mix1;
+ PHY_LE32 phyd_lfps0;
+ PHY_LE32 phyd_lfps1;
+ //0x10
+ PHY_LE32 phyd_impcal0;
+ PHY_LE32 phyd_impcal1;
+ PHY_LE32 phyd_txpll0;
+ PHY_LE32 phyd_txpll1;
+ //0x20
+ PHY_LE32 phyd_txpll2;
+ PHY_LE32 phyd_fl0;
+ PHY_LE32 phyd_mix2;
+ PHY_LE32 phyd_rx0;
+ //0x30
+ PHY_LE32 phyd_t2rlb;
+ PHY_LE32 phyd_cppat;
+ PHY_LE32 phyd_mix3;
+ PHY_LE32 phyd_ebufctl;
+ //0x40
+ PHY_LE32 phyd_pipe0;
+ PHY_LE32 phyd_pipe1;
+ PHY_LE32 phyd_mix4;
+ PHY_LE32 phyd_ckgen0;
+ //0x50
+ PHY_LE32 phyd_mix5;
+ PHY_LE32 phyd_reserved;
+ PHY_LE32 phyd_cdr0;
+ PHY_LE32 phyd_cdr1;
+ //0x60
+ PHY_LE32 phyd_pll_0;
+ PHY_LE32 phyd_pll_1;
+ PHY_LE32 phyd_bcn_det_1;
+ PHY_LE32 phyd_bcn_det_2;
+ //0x70
+ PHY_LE32 eq0;
+ PHY_LE32 eq1;
+ PHY_LE32 eq2;
+ PHY_LE32 eq3;
+ //0x80
+ PHY_LE32 eq_eye0;
+ PHY_LE32 eq_eye1;
+ PHY_LE32 eq_eye2;
+ PHY_LE32 eq_dfe0;
+ //0x90
+ PHY_LE32 eq_dfe1;
+ PHY_LE32 eq_dfe2;
+ PHY_LE32 eq_dfe3;
+ PHY_LE32 reserve0;
+ //0xa0
+ PHY_LE32 phyd_mon0;
+ PHY_LE32 phyd_mon1;
+ PHY_LE32 phyd_mon2;
+ PHY_LE32 phyd_mon3;
+ //0xb0
+ PHY_LE32 phyd_mon4;
+ PHY_LE32 phyd_mon5;
+ PHY_LE32 phyd_mon6;
+ PHY_LE32 phyd_mon7;
+ //0xc0
+ PHY_LE32 phya_rx_mon0;
+ PHY_LE32 phya_rx_mon1;
+ PHY_LE32 phya_rx_mon2;
+ PHY_LE32 phya_rx_mon3;
+ //0xd0
+ PHY_LE32 phya_rx_mon4;
+ PHY_LE32 phya_rx_mon5;
+ PHY_LE32 phyd_cppat2;
+ PHY_LE32 eq_eye3;
+ //0xe0
+ PHY_LE32 kband_out;
+ PHY_LE32 kband_out1;
+};
+
+//U3D_PHYD_MIX0
+#define RG_SSUSB_P_P3_TX_NG (0x1<<31) //31:31
+#define RG_SSUSB_TSEQ_EN (0x1<<30) //30:30
+#define RG_SSUSB_TSEQ_POLEN (0x1<<29) //29:29
+#define RG_SSUSB_TSEQ_POL (0x1<<28) //28:28
+#define RG_SSUSB_P_P3_PCLK_NG (0x1<<27) //27:27
+#define RG_SSUSB_TSEQ_TH (0x7<<24) //26:24
+#define RG_SSUSB_PRBS_BERTH (0xff<<16) //23:16
+#define RG_SSUSB_DISABLE_PHY_U2_ON (0x1<<15) //15:15
+#define RG_SSUSB_DISABLE_PHY_U2_OFF (0x1<<14) //14:14
+#define RG_SSUSB_PRBS_EN (0x1<<13) //13:13
+#define RG_SSUSB_BPSLOCK (0x1<<12) //12:12
+#define RG_SSUSB_RTCOMCNT (0xf<<8) //11:8
+#define RG_SSUSB_COMCNT (0xf<<4) //7:4
+#define RG_SSUSB_PRBSEL_CALIB (0xf<<0) //3:0
+
+//U3D_PHYD_MIX1
+#define RG_SSUSB_SLEEP_EN (0x1<<31) //31:31
+#define RG_SSUSB_PRBSEL_PCS (0x7<<28) //30:28
+#define RG_SSUSB_TXLFPS_PRD (0xf<<24) //27:24
+#define RG_SSUSB_P_RX_P0S_CK (0x1<<23) //23:23
+#define RG_SSUSB_P_TX_P0S_CK (0x1<<22) //22:22
+#define RG_SSUSB_PDNCTL (0x3f<<16) //21:16
+#define RG_SSUSB_TX_DRV_EN (0x1<<15) //15:15
+#define RG_SSUSB_TX_DRV_SEL (0x1<<14) //14:14
+#define RG_SSUSB_TX_DRV_DLY (0x3f<<8) //13:8
+#define RG_SSUSB_BERT_EN (0x1<<7) //7:7
+#define RG_SSUSB_SCP_TH (0x7<<4) //6:4
+#define RG_SSUSB_SCP_EN (0x1<<3) //3:3
+#define RG_SSUSB_RXANSIDEC_TEST (0x7<<0) //2:0
+
+//U3D_PHYD_LFPS0
+#define RG_SSUSB_LFPS_PWD (0x1<<30) //30:30
+#define RG_SSUSB_FORCE_LFPS_PWD (0x1<<29) //29:29
+#define RG_SSUSB_RXLFPS_OVF (0x1f<<24) //28:24
+#define RG_SSUSB_P3_ENTRY_SEL (0x1<<23) //23:23
+#define RG_SSUSB_P3_ENTRY (0x1<<22) //22:22
+#define RG_SSUSB_RXLFPS_CDRSEL (0x3<<20) //21:20
+#define RG_SSUSB_RXLFPS_CDRTH (0xf<<16) //19:16
+#define RG_SSUSB_LOCK5G_BLOCK (0x1<<15) //15:15
+#define RG_SSUSB_TFIFO_EXT_D_SEL (0x1<<14) //14:14
+#define RG_SSUSB_TFIFO_NO_EXTEND (0x1<<13) //13:13
+#define RG_SSUSB_RXLFPS_LOB (0x1f<<8) //12:8
+#define RG_SSUSB_TXLFPS_EN (0x1<<7) //7:7
+#define RG_SSUSB_TXLFPS_SEL (0x1<<6) //6:6
+#define RG_SSUSB_RXLFPS_CDRLOCK (0x1<<5) //5:5
+#define RG_SSUSB_RXLFPS_UPB (0x1f<<0) //4:0
+
+//U3D_PHYD_LFPS1
+#define RG_SSUSB_RX_IMP_BIAS (0xf<<28) //31:28
+#define RG_SSUSB_TX_IMP_BIAS (0xf<<24) //27:24
+#define RG_SSUSB_FWAKE_TH (0x3f<<16) //21:16
+#define RG_SSUSB_RXLFPS_UDF (0x1f<<8) //12:8
+#define RG_SSUSB_RXLFPS_P0IDLETH (0xff<<0) //7:0
+
+//U3D_PHYD_IMPCAL0
+#define RG_SSUSB_FORCE_TX_IMPSEL (0x1<<31) //31:31
+#define RG_SSUSB_TX_IMPCAL_EN (0x1<<30) //30:30
+#define RG_SSUSB_FORCE_TX_IMPCAL_EN (0x1<<29) //29:29
+#define RG_SSUSB_TX_IMPSEL (0x1f<<24) //28:24
+#define RG_SSUSB_TX_IMPCAL_CALCYC (0x3f<<16) //21:16
+#define RG_SSUSB_TX_IMPCAL_STBCYC (0x1f<<10) //14:10
+#define RG_SSUSB_TX_IMPCAL_CYCCNT (0x3ff<<0) //9:0
+
+//U3D_PHYD_IMPCAL1
+#define RG_SSUSB_FORCE_RX_IMPSEL (0x1<<31) //31:31
+#define RG_SSUSB_RX_IMPCAL_EN (0x1<<30) //30:30
+#define RG_SSUSB_FORCE_RX_IMPCAL_EN (0x1<<29) //29:29
+#define RG_SSUSB_RX_IMPSEL (0x1f<<24) //28:24
+#define RG_SSUSB_RX_IMPCAL_CALCYC (0x3f<<16) //21:16
+#define RG_SSUSB_RX_IMPCAL_STBCYC (0x1f<<10) //14:10
+#define RG_SSUSB_RX_IMPCAL_CYCCNT (0x3ff<<0) //9:0
+
+//U3D_PHYD_TXPLL0
+#define RG_SSUSB_TXPLL_DDSEN_CYC (0x1f<<27) //31:27
+#define RG_SSUSB_TXPLL_ON (0x1<<26) //26:26
+#define RG_SSUSB_FORCE_TXPLLON (0x1<<25) //25:25
+#define RG_SSUSB_TXPLL_STBCYC (0x1ff<<16) //24:16
+#define RG_SSUSB_TXPLL_NCPOCHG_CYC (0xf<<12) //15:12
+#define RG_SSUSB_TXPLL_NCPOEN_CYC (0x3<<10) //11:10
+#define RG_SSUSB_TXPLL_DDSRSTB_CYC (0x7<<0) //2:0
+
+//U3D_PHYD_TXPLL1
+#define RG_SSUSB_PLL_NCPO_EN (0x1<<31) //31:31
+#define RG_SSUSB_PLL_FIFO_START_MAN (0x1<<30) //30:30
+#define RG_SSUSB_PLL_NCPO_CHG (0x1<<28) //28:28
+#define RG_SSUSB_PLL_DDS_RSTB (0x1<<27) //27:27
+#define RG_SSUSB_PLL_DDS_PWDB (0x1<<26) //26:26
+#define RG_SSUSB_PLL_DDSEN (0x1<<25) //25:25
+#define RG_SSUSB_PLL_AUTOK_VCO (0x1<<24) //24:24
+#define RG_SSUSB_PLL_PWD (0x1<<23) //23:23
+#define RG_SSUSB_RX_AFE_PWD (0x1<<22) //22:22
+#define RG_SSUSB_PLL_TCADJ (0x3f<<16) //21:16
+#define RG_SSUSB_FORCE_CDR_TCADJ (0x1<<15) //15:15
+#define RG_SSUSB_FORCE_CDR_AUTOK_VCO (0x1<<14) //14:14
+#define RG_SSUSB_FORCE_CDR_PWD (0x1<<13) //13:13
+#define RG_SSUSB_FORCE_PLL_NCPO_EN (0x1<<12) //12:12
+#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN (0x1<<11) //11:11
+#define RG_SSUSB_FORCE_PLL_NCPO_CHG (0x1<<9) //9:9
+#define RG_SSUSB_FORCE_PLL_DDS_RSTB (0x1<<8) //8:8
+#define RG_SSUSB_FORCE_PLL_DDS_PWDB (0x1<<7) //7:7
+#define RG_SSUSB_FORCE_PLL_DDSEN (0x1<<6) //6:6
+#define RG_SSUSB_FORCE_PLL_TCADJ (0x1<<5) //5:5
+#define RG_SSUSB_FORCE_PLL_AUTOK_VCO (0x1<<4) //4:4
+#define RG_SSUSB_FORCE_PLL_PWD (0x1<<3) //3:3
+#define RG_SSUSB_FLT_1_DISPERR_B (0x1<<2) //2:2
+
+//U3D_PHYD_TXPLL2
+#define RG_SSUSB_TX_LFPS_EN (0x1<<31) //31:31
+#define RG_SSUSB_FORCE_TX_LFPS_EN (0x1<<30) //30:30
+#define RG_SSUSB_TX_LFPS (0x1<<29) //29:29
+#define RG_SSUSB_FORCE_TX_LFPS (0x1<<28) //28:28
+#define RG_SSUSB_RXPLL_STB (0x1<<27) //27:27
+#define RG_SSUSB_TXPLL_STB (0x1<<26) //26:26
+#define RG_SSUSB_FORCE_RXPLL_STB (0x1<<25) //25:25
+#define RG_SSUSB_FORCE_TXPLL_STB (0x1<<24) //24:24
+#define RG_SSUSB_RXPLL_REFCKSEL (0x1<<16) //16:16
+#define RG_SSUSB_RXPLL_STBMODE (0x1<<11) //11:11
+#define RG_SSUSB_RXPLL_ON (0x1<<10) //10:10
+#define RG_SSUSB_FORCE_RXPLLON (0x1<<9) //9:9
+#define RG_SSUSB_FORCE_RX_AFE_PWD (0x1<<8) //8:8
+#define RG_SSUSB_CDR_AUTOK_VCO (0x1<<7) //7:7
+#define RG_SSUSB_CDR_PWD (0x1<<6) //6:6
+#define RG_SSUSB_CDR_TCADJ (0x3f<<0) //5:0
+
+//U3D_PHYD_FL0
+#define RG_SSUSB_RX_FL_TARGET (0xffff<<16) //31:16
+#define RG_SSUSB_RX_FL_CYCLECNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MIX2
+#define RG_SSUSB_RX_EQ_RST (0x1<<31) //31:31
+#define RG_SSUSB_RX_EQ_RST_SEL (0x1<<30) //30:30
+#define RG_SSUSB_RXVAL_RST (0x1<<29) //29:29
+#define RG_SSUSB_RXVAL_CNT (0x1f<<24) //28:24
+#define RG_SSUSB_CDROS_EN (0x1<<18) //18:18
+#define RG_SSUSB_CDR_LCKOP (0x3<<16) //17:16
+#define RG_SSUSB_RX_FL_LOCKTH (0xf<<8) //11:8
+#define RG_SSUSB_RX_FL_OFFSET (0xff<<0) //7:0
+
+//U3D_PHYD_RX0
+#define RG_SSUSB_T2RLB_BERTH (0xff<<24) //31:24
+#define RG_SSUSB_T2RLB_PAT (0xff<<16) //23:16
+#define RG_SSUSB_T2RLB_EN (0x1<<15) //15:15
+#define RG_SSUSB_T2RLB_BPSCRAMB (0x1<<14) //14:14
+#define RG_SSUSB_T2RLB_SERIAL (0x1<<13) //13:13
+#define RG_SSUSB_T2RLB_MODE (0x3<<11) //12:11
+#define RG_SSUSB_RX_SAOSC_EN (0x1<<10) //10:10
+#define RG_SSUSB_RX_SAOSC_EN_SEL (0x1<<9) //9:9
+#define RG_SSUSB_RX_DFE_OPTION (0x1<<8) //8:8
+#define RG_SSUSB_RX_DFE_EN (0x1<<7) //7:7
+#define RG_SSUSB_RX_DFE_EN_SEL (0x1<<6) //6:6
+#define RG_SSUSB_RX_EQ_EN (0x1<<5) //5:5
+#define RG_SSUSB_RX_EQ_EN_SEL (0x1<<4) //4:4
+#define RG_SSUSB_RX_SAOSC_RST (0x1<<3) //3:3
+#define RG_SSUSB_RX_SAOSC_RST_SEL (0x1<<2) //2:2
+#define RG_SSUSB_RX_DFE_RST (0x1<<1) //1:1
+#define RG_SSUSB_RX_DFE_RST_SEL (0x1<<0) //0:0
+
+//U3D_PHYD_T2RLB
+#define RG_SSUSB_EQTRAIN_CH_MODE (0x1<<28) //28:28
+#define RG_SSUSB_PRB_OUT_CPPAT (0x1<<27) //27:27
+#define RG_SSUSB_BPANSIENC (0x1<<26) //26:26
+#define RG_SSUSB_VALID_EN (0x1<<25) //25:25
+#define RG_SSUSB_EBUF_SRST (0x1<<24) //24:24
+#define RG_SSUSB_K_EMP (0xf<<20) //23:20
+#define RG_SSUSB_K_FUL (0xf<<16) //19:16
+#define RG_SSUSB_T2RLB_BDATRST (0xf<<12) //15:12
+#define RG_SSUSB_P_T2RLB_SKP_EN (0x1<<10) //10:10
+#define RG_SSUSB_T2RLB_PATMODE (0x3<<8) //9:8
+#define RG_SSUSB_T2RLB_TSEQCNT (0xff<<0) //7:0
+
+//U3D_PHYD_CPPAT
+#define RG_SSUSB_CPPAT_PROGRAM_EN (0x1<<24) //24:24
+#define RG_SSUSB_CPPAT_TOZ (0x3<<21) //22:21
+#define RG_SSUSB_CPPAT_PRBS_EN (0x1<<20) //20:20
+#define RG_SSUSB_CPPAT_OUT_TMP2 (0xf<<16) //19:16
+#define RG_SSUSB_CPPAT_OUT_TMP1 (0xff<<8) //15:8
+#define RG_SSUSB_CPPAT_OUT_TMP0 (0xff<<0) //7:0
+
+//U3D_PHYD_MIX3
+#define RG_SSUSB_CDR_TCADJ_MINUS (0x1<<31) //31:31
+#define RG_SSUSB_P_CDROS_EN (0x1<<30) //30:30
+#define RG_SSUSB_P_P2_TX_DRV_DIS (0x1<<28) //28:28
+#define RG_SSUSB_CDR_TCADJ_OFFSET (0x7<<24) //26:24
+#define RG_SSUSB_PLL_TCADJ_MINUS (0x1<<23) //23:23
+#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN (0x1<<20) //20:20
+#define RG_SSUSB_PLL_BIAS_LPF_EN (0x1<<19) //19:19
+#define RG_SSUSB_PLL_TCADJ_OFFSET (0x7<<16) //18:16
+#define RG_SSUSB_FORCE_PLL_SSCEN (0x1<<15) //15:15
+#define RG_SSUSB_PLL_SSCEN (0x1<<14) //14:14
+#define RG_SSUSB_FORCE_CDR_PI_PWD (0x1<<13) //13:13
+#define RG_SSUSB_CDR_PI_PWD (0x1<<12) //12:12
+#define RG_SSUSB_CDR_PI_MODE (0x1<<11) //11:11
+#define RG_SSUSB_TXPLL_SSCEN_CYC (0x3ff<<0) //9:0
+
+//U3D_PHYD_EBUFCTL
+#define RG_SSUSB_EBUFCTL (0xffffffff<<0) //31:0
+
+//U3D_PHYD_PIPE0
+#define RG_SSUSB_RXTERMINATION (0x1<<30) //30:30
+#define RG_SSUSB_RXEQTRAINING (0x1<<29) //29:29
+#define RG_SSUSB_RXPOLARITY (0x1<<28) //28:28
+#define RG_SSUSB_TXDEEMPH (0x3<<26) //27:26
+#define RG_SSUSB_POWERDOWN (0x3<<24) //25:24
+#define RG_SSUSB_TXONESZEROS (0x1<<23) //23:23
+#define RG_SSUSB_TXELECIDLE (0x1<<22) //22:22
+#define RG_SSUSB_TXDETECTRX (0x1<<21) //21:21
+#define RG_SSUSB_PIPE_SEL (0x1<<20) //20:20
+#define RG_SSUSB_TXDATAK (0xf<<16) //19:16
+#define RG_SSUSB_CDR_STABLE_SEL (0x1<<15) //15:15
+#define RG_SSUSB_CDR_STABLE (0x1<<14) //14:14
+#define RG_SSUSB_CDR_RSTB_SEL (0x1<<13) //13:13
+#define RG_SSUSB_CDR_RSTB (0x1<<12) //12:12
+#define RG_SSUSB_P_ERROR_SEL (0x3<<4) //5:4
+#define RG_SSUSB_TXMARGIN (0x7<<1) //3:1
+#define RG_SSUSB_TXCOMPLIANCE (0x1<<0) //0:0
+
+//U3D_PHYD_PIPE1
+#define RG_SSUSB_TXDATA (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MIX4
+#define RG_SSUSB_CDROS_CNT (0x3f<<24) //29:24
+#define RG_SSUSB_T2RLB_BER_EN (0x1<<16) //16:16
+#define RG_SSUSB_T2RLB_BER_RATE (0xffff<<0) //15:0
+
+//U3D_PHYD_CKGEN0
+#define RG_SSUSB_RFIFO_IMPLAT (0x1<<27) //27:27
+#define RG_SSUSB_TFIFO_PSEL (0x7<<24) //26:24
+#define RG_SSUSB_CKGEN_PSEL (0x3<<8) //9:8
+#define RG_SSUSB_RXCK_INV (0x1<<0) //0:0
+
+//U3D_PHYD_MIX5
+#define RG_SSUSB_PRB_SEL (0xffff<<16) //31:16
+#define RG_SSUSB_RXPLL_STBCYC (0x7ff<<0) //10:0
+
+//U3D_PHYD_RESERVED
+#define RG_SSUSB_PHYD_RESERVE (0xffffffff<<0) //31:0
+//#define RG_SSUSB_RX_SIGDET_SEL (0x1<<11)
+//#define RG_SSUSB_RX_SIGDET_EN (0x1<<12)
+//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL (0x1<<9)
+//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN (0x1<<10)
+
+//U3D_PHYD_CDR0
+#define RG_SSUSB_CDR_BIC_LTR (0xf<<28) //31:28
+#define RG_SSUSB_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define RG_SSUSB_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define RG_SSUSB_CDR_BC_LTR (0x1f<<8) //12:8
+#define RG_SSUSB_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_PHYD_CDR1
+#define RG_SSUSB_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define RG_SSUSB_CDR_BIR_LTR (0x1f<<16) //20:16
+#define RG_SSUSB_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define RG_SSUSB_CDR_BW_SEL (0x3<<6) //7:6
+#define RG_SSUSB_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_PHYD_PLL_0
+#define RG_SSUSB_FORCE_CDR_BAND_5G (0x1<<28) //28:28
+#define RG_SSUSB_FORCE_CDR_BAND_2P5G (0x1<<27) //27:27
+#define RG_SSUSB_FORCE_PLL_BAND_5G (0x1<<26) //26:26
+#define RG_SSUSB_FORCE_PLL_BAND_2P5G (0x1<<25) //25:25
+#define RG_SSUSB_P_EQ_T_SEL (0x3ff<<15) //24:15
+#define RG_SSUSB_PLL_ISO_EN_CYC (0x3ff<<5) //14:5
+#define RG_SSUSB_PLLBAND_RECAL (0x1<<4) //4:4
+#define RG_SSUSB_PLL_DDS_ISO_EN (0x1<<3) //3:3
+#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN (0x1<<2) //2:2
+#define RG_SSUSB_PLL_DDS_PWR_ON (0x1<<1) //1:1
+#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON (0x1<<0) //0:0
+
+//U3D_PHYD_PLL_1
+#define RG_SSUSB_CDR_BAND_5G (0xff<<24) //31:24
+#define RG_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16
+#define RG_SSUSB_PLL_BAND_5G (0xff<<8) //15:8
+#define RG_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0
+
+//U3D_PHYD_BCN_DET_1
+#define RG_SSUSB_P_BCN_OBS_PRD (0xffff<<16) //31:16
+#define RG_SSUSB_U_BCN_OBS_PRD (0xffff<<0) //15:0
+
+//U3D_PHYD_BCN_DET_2
+#define RG_SSUSB_P_BCN_OBS_SEL (0xfff<<16) //27:16
+#define RG_SSUSB_BCN_DET_DIS (0x1<<12) //12:12
+#define RG_SSUSB_U_BCN_OBS_SEL (0xfff<<0) //11:0
+
+//U3D_EQ0
+#define RG_SSUSB_EQ_DLHL_LFI (0x7f<<24) //30:24
+#define RG_SSUSB_EQ_DHHL_LFI (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_DD0HOS_LFI (0x7f<<8) //14:8
+#define RG_SSUSB_EQ_DD0LOS_LFI (0x7f<<0) //6:0
+
+//U3D_EQ1
+#define RG_SSUSB_EQ_DD1HOS_LFI (0x7f<<24) //30:24
+#define RG_SSUSB_EQ_DD1LOS_LFI (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_DE0OS_LFI (0x7f<<8) //14:8
+#define RG_SSUSB_EQ_DE1OS_LFI (0x7f<<0) //6:0
+
+//U3D_EQ2
+#define RG_SSUSB_EQ_DLHLOS_LFI (0x7f<<24) //30:24
+#define RG_SSUSB_EQ_DHHLOS_LFI (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_STOPTIME (0x1<<14) //14:14
+#define RG_SSUSB_EQ_DHHL_LF_SEL (0x7<<11) //13:11
+#define RG_SSUSB_EQ_DSAOS_LF_SEL (0x7<<8) //10:8
+#define RG_SSUSB_EQ_STARTTIME (0x3<<6) //7:6
+#define RG_SSUSB_EQ_DLEQ_LF_SEL (0x7<<3) //5:3
+#define RG_SSUSB_EQ_DLHL_LF_SEL (0x7<<0) //2:0
+
+//U3D_EQ3
+#define RG_SSUSB_EQ_DLEQ_LFI_GEN2 (0xf<<28) //31:28
+#define RG_SSUSB_EQ_DLEQ_LFI_GEN1 (0xf<<24) //27:24
+#define RG_SSUSB_EQ_DEYE0OS_LFI (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_DEYE1OS_LFI (0x7f<<8) //14:8
+#define RG_SSUSB_EQ_TRI_DET_EN (0x1<<7) //7:7
+#define RG_SSUSB_EQ_TRI_DET_TH (0x7f<<0) //6:0
+
+//U3D_EQ_EYE0
+#define RG_SSUSB_EQ_EYE_XOFFSET (0x7f<<25) //31:25
+#define RG_SSUSB_EQ_EYE_MON_EN (0x1<<24) //24:24
+#define RG_SSUSB_EQ_EYE0_Y (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_EYE1_Y (0x7f<<8) //14:8
+#define RG_SSUSB_EQ_PILPO_ROUT (0x1<<7) //7:7
+#define RG_SSUSB_EQ_PI_KPGAIN (0x7<<4) //6:4
+#define RG_SSUSB_EQ_EYE_CNT_EN (0x1<<3) //3:3
+
+//U3D_EQ_EYE1
+#define RG_SSUSB_EQ_SIGDET (0x7f<<24) //30:24
+#define RG_SSUSB_EQ_EYE_MASK (0x3ff<<7) //16:7
+
+//U3D_EQ_EYE2
+#define RG_SSUSB_EQ_RX500M_CK_SEL (0x1<<31) //31:31
+#define RG_SSUSB_EQ_SD_CNT1 (0x3f<<24) //29:24
+#define RG_SSUSB_EQ_ISIFLAG_SEL (0x3<<22) //23:22
+#define RG_SSUSB_EQ_SD_CNT0 (0x3f<<16) //21:16
+
+//U3D_EQ_DFE0
+#define RG_SSUSB_EQ_LEQMAX (0xf<<28) //31:28
+#define RG_SSUSB_EQ_DFEX_EN (0x1<<27) //27:27
+#define RG_SSUSB_EQ_DFEX_LF_SEL (0x7<<24) //26:24
+#define RG_SSUSB_EQ_CHK_EYE_H (0x1<<23) //23:23
+#define RG_SSUSB_EQ_PIEYE_INI (0x7f<<16) //22:16
+#define RG_SSUSB_EQ_PI90_INI (0x7f<<8) //14:8
+#define RG_SSUSB_EQ_PI0_INI (0x7f<<0) //6:0
+
+//U3D_EQ_DFE1
+#define RG_SSUSB_EQ_REV (0xffff<<16) //31:16
+#define RG_SSUSB_EQ_DFEYEN_DUR (0x7<<12) //14:12
+#define RG_SSUSB_EQ_DFEXEN_DUR (0x7<<8) //10:8
+#define RG_SSUSB_EQ_DFEX_RST (0x1<<7) //7:7
+#define RG_SSUSB_EQ_GATED_RXD_B (0x1<<6) //6:6
+#define RG_SSUSB_EQ_PI90CK_SEL (0x3<<4) //5:4
+#define RG_SSUSB_EQ_DFEX_DIS (0x1<<2) //2:2
+#define RG_SSUSB_EQ_DFEYEN_STOP_DIS (0x1<<1) //1:1
+#define RG_SSUSB_EQ_DFEXEN_SEL (0x1<<0) //0:0
+
+//U3D_EQ_DFE2
+#define RG_SSUSB_EQ_MON_SEL (0x1f<<24) //28:24
+#define RG_SSUSB_EQ_LEQOSC_DLYCNT (0x7<<16) //18:16
+#define RG_SSUSB_EQ_DLEQOS_LFI (0x1f<<8) //12:8
+#define RG_SSUSB_EQ_LEQ_STOP_TO (0x3<<0) //1:0
+
+//U3D_EQ_DFE3
+#define RG_SSUSB_EQ_RESERVED (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MON0
+#define RGS_SSUSB_BERT_BERC (0xffff<<16) //31:16
+#define RGS_SSUSB_LFPS (0xf<<12) //15:12
+#define RGS_SSUSB_TRAINDEC (0x7<<8) //10:8
+#define RGS_SSUSB_SCP_PAT (0xff<<0) //7:0
+
+//U3D_PHYD_MON1
+#define RGS_SSUSB_RX_FL_OUT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON2
+#define RGS_SSUSB_T2RLB_ERRCNT (0xffff<<16) //31:16
+#define RGS_SSUSB_RETRACK (0xf<<12) //15:12
+#define RGS_SSUSB_RXPLL_LOCK (0x1<<10) //10:10
+#define RGS_SSUSB_CDR_VCOCAL_CPLT_D (0x1<<9) //9:9
+#define RGS_SSUSB_PLL_VCOCAL_CPLT_D (0x1<<8) //8:8
+#define RGS_SSUSB_PDNCTL (0xff<<0) //7:0
+
+//U3D_PHYD_MON3
+#define RGS_SSUSB_TSEQ_ERRCNT (0xffff<<16) //31:16
+#define RGS_SSUSB_PRBS_ERRCNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON4
+#define RGS_SSUSB_RX_LSLOCK_CNT (0xf<<24) //27:24
+#define RGS_SSUSB_SCP_DETCNT (0xff<<16) //23:16
+#define RGS_SSUSB_TSEQ_DETCNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON5
+#define RGS_SSUSB_EBUFMSG (0xffff<<16) //31:16
+#define RGS_SSUSB_BERT_LOCK (0x1<<15) //15:15
+#define RGS_SSUSB_SCP_DET (0x1<<14) //14:14
+#define RGS_SSUSB_TSEQ_DET (0x1<<13) //13:13
+#define RGS_SSUSB_EBUF_UDF (0x1<<12) //12:12
+#define RGS_SSUSB_EBUF_OVF (0x1<<11) //11:11
+#define RGS_SSUSB_PRBS_PASSTH (0x1<<10) //10:10
+#define RGS_SSUSB_PRBS_PASS (0x1<<9) //9:9
+#define RGS_SSUSB_PRBS_LOCK (0x1<<8) //8:8
+#define RGS_SSUSB_T2RLB_ERR (0x1<<6) //6:6
+#define RGS_SSUSB_T2RLB_PASSTH (0x1<<5) //5:5
+#define RGS_SSUSB_T2RLB_PASS (0x1<<4) //4:4
+#define RGS_SSUSB_T2RLB_LOCK (0x1<<3) //3:3
+#define RGS_SSUSB_RX_IMPCAL_DONE (0x1<<2) //2:2
+#define RGS_SSUSB_TX_IMPCAL_DONE (0x1<<1) //1:1
+#define RGS_SSUSB_RXDETECTED (0x1<<0) //0:0
+
+//U3D_PHYD_MON6
+#define RGS_SSUSB_SIGCAL_DONE (0x1<<30) //30:30
+#define RGS_SSUSB_SIGCAL_CAL_OUT (0x1<<29) //29:29
+#define RGS_SSUSB_SIGCAL_OFFSET (0x1f<<24) //28:24
+#define RGS_SSUSB_RX_IMP_SEL (0x1f<<16) //20:16
+#define RGS_SSUSB_TX_IMP_SEL (0x1f<<8) //12:8
+#define RGS_SSUSB_TFIFO_MSG (0xf<<4) //7:4
+#define RGS_SSUSB_RFIFO_MSG (0xf<<0) //3:0
+
+//U3D_PHYD_MON7
+#define RGS_SSUSB_FT_OUT (0xff<<8) //15:8
+#define RGS_SSUSB_PRB_OUT (0xff<<0) //7:0
+
+//U3D_PHYA_RX_MON0
+#define RGS_SSUSB_EQ_DCLEQ (0xf<<24) //27:24
+#define RGS_SSUSB_EQ_DCD0H (0x7f<<16) //22:16
+#define RGS_SSUSB_EQ_DCD0L (0x7f<<8) //14:8
+#define RGS_SSUSB_EQ_DCD1H (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON1
+#define RGS_SSUSB_EQ_DCD1L (0x7f<<24) //30:24
+#define RGS_SSUSB_EQ_DCE0 (0x7f<<16) //22:16
+#define RGS_SSUSB_EQ_DCE1 (0x7f<<8) //14:8
+#define RGS_SSUSB_EQ_DCHHL (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON2
+#define RGS_SSUSB_EQ_LEQ_STOP (0x1<<31) //31:31
+#define RGS_SSUSB_EQ_DCLHL (0x7f<<24) //30:24
+#define RGS_SSUSB_EQ_STATUS (0xff<<16) //23:16
+#define RGS_SSUSB_EQ_DCEYE0 (0x7f<<8) //14:8
+#define RGS_SSUSB_EQ_DCEYE1 (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON3
+#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0 (0xfffff<<0) //19:0
+
+//U3D_PHYA_RX_MON4
+#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1 (0xfffff<<0) //19:0
+
+//U3D_PHYA_RX_MON5
+#define RGS_SSUSB_EQ_DCLEQOS (0x1f<<8) //12:8
+#define RGS_SSUSB_EQ_EYE_CNT_RDY (0x1<<7) //7:7
+#define RGS_SSUSB_EQ_PILPO (0x7f<<0) //6:0
+
+//U3D_PHYD_CPPAT2
+#define RG_SSUSB_CPPAT_OUT_H_TMP2 (0xf<<16) //19:16
+#define RG_SSUSB_CPPAT_OUT_H_TMP1 (0xff<<8) //15:8
+#define RG_SSUSB_CPPAT_OUT_H_TMP0 (0xff<<0) //7:0
+
+//U3D_EQ_EYE3
+#define RG_SSUSB_EQ_LEQ_SHIFT (0x7<<24) //26:24
+#define RG_SSUSB_EQ_EYE_CNT (0xfffff<<0) //19:0
+
+//U3D_KBAND_OUT
+#define RGS_SSUSB_CDR_BAND_5G (0xff<<24) //31:24
+#define RGS_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16
+#define RGS_SSUSB_PLL_BAND_5G (0xff<<8) //15:8
+#define RGS_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0
+
+//U3D_KBAND_OUT1
+#define RGS_SSUSB_CDR_VCOCAL_FAIL (0x1<<24) //24:24
+#define RGS_SSUSB_CDR_VCOCAL_STATE (0xff<<16) //23:16
+#define RGS_SSUSB_PLL_VCOCAL_FAIL (0x1<<8) //8:8
+#define RGS_SSUSB_PLL_VCOCAL_STATE (0xff<<0) //7:0
+
+
+/* OFFSET */
+
+//U3D_PHYD_MIX0
+#define RG_SSUSB_P_P3_TX_NG_OFST (31)
+#define RG_SSUSB_TSEQ_EN_OFST (30)
+#define RG_SSUSB_TSEQ_POLEN_OFST (29)
+#define RG_SSUSB_TSEQ_POL_OFST (28)
+#define RG_SSUSB_P_P3_PCLK_NG_OFST (27)
+#define RG_SSUSB_TSEQ_TH_OFST (24)
+#define RG_SSUSB_PRBS_BERTH_OFST (16)
+#define RG_SSUSB_DISABLE_PHY_U2_ON_OFST (15)
+#define RG_SSUSB_DISABLE_PHY_U2_OFF_OFST (14)
+#define RG_SSUSB_PRBS_EN_OFST (13)
+#define RG_SSUSB_BPSLOCK_OFST (12)
+#define RG_SSUSB_RTCOMCNT_OFST (8)
+#define RG_SSUSB_COMCNT_OFST (4)
+#define RG_SSUSB_PRBSEL_CALIB_OFST (0)
+
+//U3D_PHYD_MIX1
+#define RG_SSUSB_SLEEP_EN_OFST (31)
+#define RG_SSUSB_PRBSEL_PCS_OFST (28)
+#define RG_SSUSB_TXLFPS_PRD_OFST (24)
+#define RG_SSUSB_P_RX_P0S_CK_OFST (23)
+#define RG_SSUSB_P_TX_P0S_CK_OFST (22)
+#define RG_SSUSB_PDNCTL_OFST (16)
+#define RG_SSUSB_TX_DRV_EN_OFST (15)
+#define RG_SSUSB_TX_DRV_SEL_OFST (14)
+#define RG_SSUSB_TX_DRV_DLY_OFST (8)
+#define RG_SSUSB_BERT_EN_OFST (7)
+#define RG_SSUSB_SCP_TH_OFST (4)
+#define RG_SSUSB_SCP_EN_OFST (3)
+#define RG_SSUSB_RXANSIDEC_TEST_OFST (0)
+
+//U3D_PHYD_LFPS0
+#define RG_SSUSB_LFPS_PWD_OFST (30)
+#define RG_SSUSB_FORCE_LFPS_PWD_OFST (29)
+#define RG_SSUSB_RXLFPS_OVF_OFST (24)
+#define RG_SSUSB_P3_ENTRY_SEL_OFST (23)
+#define RG_SSUSB_P3_ENTRY_OFST (22)
+#define RG_SSUSB_RXLFPS_CDRSEL_OFST (20)
+#define RG_SSUSB_RXLFPS_CDRTH_OFST (16)
+#define RG_SSUSB_LOCK5G_BLOCK_OFST (15)
+#define RG_SSUSB_TFIFO_EXT_D_SEL_OFST (14)
+#define RG_SSUSB_TFIFO_NO_EXTEND_OFST (13)
+#define RG_SSUSB_RXLFPS_LOB_OFST (8)
+#define RG_SSUSB_TXLFPS_EN_OFST (7)
+#define RG_SSUSB_TXLFPS_SEL_OFST (6)
+#define RG_SSUSB_RXLFPS_CDRLOCK_OFST (5)
+#define RG_SSUSB_RXLFPS_UPB_OFST (0)
+
+//U3D_PHYD_LFPS1
+#define RG_SSUSB_RX_IMP_BIAS_OFST (28)
+#define RG_SSUSB_TX_IMP_BIAS_OFST (24)
+#define RG_SSUSB_FWAKE_TH_OFST (16)
+#define RG_SSUSB_RXLFPS_UDF_OFST (8)
+#define RG_SSUSB_RXLFPS_P0IDLETH_OFST (0)
+
+//U3D_PHYD_IMPCAL0
+#define RG_SSUSB_FORCE_TX_IMPSEL_OFST (31)
+#define RG_SSUSB_TX_IMPCAL_EN_OFST (30)
+#define RG_SSUSB_FORCE_TX_IMPCAL_EN_OFST (29)
+#define RG_SSUSB_TX_IMPSEL_OFST (24)
+#define RG_SSUSB_TX_IMPCAL_CALCYC_OFST (16)
+#define RG_SSUSB_TX_IMPCAL_STBCYC_OFST (10)
+#define RG_SSUSB_TX_IMPCAL_CYCCNT_OFST (0)
+
+//U3D_PHYD_IMPCAL1
+#define RG_SSUSB_FORCE_RX_IMPSEL_OFST (31)
+#define RG_SSUSB_RX_IMPCAL_EN_OFST (30)
+#define RG_SSUSB_FORCE_RX_IMPCAL_EN_OFST (29)
+#define RG_SSUSB_RX_IMPSEL_OFST (24)
+#define RG_SSUSB_RX_IMPCAL_CALCYC_OFST (16)
+#define RG_SSUSB_RX_IMPCAL_STBCYC_OFST (10)
+#define RG_SSUSB_RX_IMPCAL_CYCCNT_OFST (0)
+
+//U3D_PHYD_TXPLL0
+#define RG_SSUSB_TXPLL_DDSEN_CYC_OFST (27)
+#define RG_SSUSB_TXPLL_ON_OFST (26)
+#define RG_SSUSB_FORCE_TXPLLON_OFST (25)
+#define RG_SSUSB_TXPLL_STBCYC_OFST (16)
+#define RG_SSUSB_TXPLL_NCPOCHG_CYC_OFST (12)
+#define RG_SSUSB_TXPLL_NCPOEN_CYC_OFST (10)
+#define RG_SSUSB_TXPLL_DDSRSTB_CYC_OFST (0)
+
+//U3D_PHYD_TXPLL1
+#define RG_SSUSB_PLL_NCPO_EN_OFST (31)
+#define RG_SSUSB_PLL_FIFO_START_MAN_OFST (30)
+#define RG_SSUSB_PLL_NCPO_CHG_OFST (28)
+#define RG_SSUSB_PLL_DDS_RSTB_OFST (27)
+#define RG_SSUSB_PLL_DDS_PWDB_OFST (26)
+#define RG_SSUSB_PLL_DDSEN_OFST (25)
+#define RG_SSUSB_PLL_AUTOK_VCO_OFST (24)
+#define RG_SSUSB_PLL_PWD_OFST (23)
+#define RG_SSUSB_RX_AFE_PWD_OFST (22)
+#define RG_SSUSB_PLL_TCADJ_OFST (16)
+#define RG_SSUSB_FORCE_CDR_TCADJ_OFST (15)
+#define RG_SSUSB_FORCE_CDR_AUTOK_VCO_OFST (14)
+#define RG_SSUSB_FORCE_CDR_PWD_OFST (13)
+#define RG_SSUSB_FORCE_PLL_NCPO_EN_OFST (12)
+#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN_OFST (11)
+#define RG_SSUSB_FORCE_PLL_NCPO_CHG_OFST (9)
+#define RG_SSUSB_FORCE_PLL_DDS_RSTB_OFST (8)
+#define RG_SSUSB_FORCE_PLL_DDS_PWDB_OFST (7)
+#define RG_SSUSB_FORCE_PLL_DDSEN_OFST (6)
+#define RG_SSUSB_FORCE_PLL_TCADJ_OFST (5)
+#define RG_SSUSB_FORCE_PLL_AUTOK_VCO_OFST (4)
+#define RG_SSUSB_FORCE_PLL_PWD_OFST (3)
+#define RG_SSUSB_FLT_1_DISPERR_B_OFST (2)
+
+//U3D_PHYD_TXPLL2
+#define RG_SSUSB_TX_LFPS_EN_OFST (31)
+#define RG_SSUSB_FORCE_TX_LFPS_EN_OFST (30)
+#define RG_SSUSB_TX_LFPS_OFST (29)
+#define RG_SSUSB_FORCE_TX_LFPS_OFST (28)
+#define RG_SSUSB_RXPLL_STB_OFST (27)
+#define RG_SSUSB_TXPLL_STB_OFST (26)
+#define RG_SSUSB_FORCE_RXPLL_STB_OFST (25)
+#define RG_SSUSB_FORCE_TXPLL_STB_OFST (24)
+#define RG_SSUSB_RXPLL_REFCKSEL_OFST (16)
+#define RG_SSUSB_RXPLL_STBMODE_OFST (11)
+#define RG_SSUSB_RXPLL_ON_OFST (10)
+#define RG_SSUSB_FORCE_RXPLLON_OFST (9)
+#define RG_SSUSB_FORCE_RX_AFE_PWD_OFST (8)
+#define RG_SSUSB_CDR_AUTOK_VCO_OFST (7)
+#define RG_SSUSB_CDR_PWD_OFST (6)
+#define RG_SSUSB_CDR_TCADJ_OFST (0)
+
+//U3D_PHYD_FL0
+#define RG_SSUSB_RX_FL_TARGET_OFST (16)
+#define RG_SSUSB_RX_FL_CYCLECNT_OFST (0)
+
+//U3D_PHYD_MIX2
+#define RG_SSUSB_RX_EQ_RST_OFST (31)
+#define RG_SSUSB_RX_EQ_RST_SEL_OFST (30)
+#define RG_SSUSB_RXVAL_RST_OFST (29)
+#define RG_SSUSB_RXVAL_CNT_OFST (24)
+#define RG_SSUSB_CDROS_EN_OFST (18)
+#define RG_SSUSB_CDR_LCKOP_OFST (16)
+#define RG_SSUSB_RX_FL_LOCKTH_OFST (8)
+#define RG_SSUSB_RX_FL_OFFSET_OFST (0)
+
+//U3D_PHYD_RX0
+#define RG_SSUSB_T2RLB_BERTH_OFST (24)
+#define RG_SSUSB_T2RLB_PAT_OFST (16)
+#define RG_SSUSB_T2RLB_EN_OFST (15)
+#define RG_SSUSB_T2RLB_BPSCRAMB_OFST (14)
+#define RG_SSUSB_T2RLB_SERIAL_OFST (13)
+#define RG_SSUSB_T2RLB_MODE_OFST (11)
+#define RG_SSUSB_RX_SAOSC_EN_OFST (10)
+#define RG_SSUSB_RX_SAOSC_EN_SEL_OFST (9)
+#define RG_SSUSB_RX_DFE_OPTION_OFST (8)
+#define RG_SSUSB_RX_DFE_EN_OFST (7)
+#define RG_SSUSB_RX_DFE_EN_SEL_OFST (6)
+#define RG_SSUSB_RX_EQ_EN_OFST (5)
+#define RG_SSUSB_RX_EQ_EN_SEL_OFST (4)
+#define RG_SSUSB_RX_SAOSC_RST_OFST (3)
+#define RG_SSUSB_RX_SAOSC_RST_SEL_OFST (2)
+#define RG_SSUSB_RX_DFE_RST_OFST (1)
+#define RG_SSUSB_RX_DFE_RST_SEL_OFST (0)
+
+//U3D_PHYD_T2RLB
+#define RG_SSUSB_EQTRAIN_CH_MODE_OFST (28)
+#define RG_SSUSB_PRB_OUT_CPPAT_OFST (27)
+#define RG_SSUSB_BPANSIENC_OFST (26)
+#define RG_SSUSB_VALID_EN_OFST (25)
+#define RG_SSUSB_EBUF_SRST_OFST (24)
+#define RG_SSUSB_K_EMP_OFST (20)
+#define RG_SSUSB_K_FUL_OFST (16)
+#define RG_SSUSB_T2RLB_BDATRST_OFST (12)
+#define RG_SSUSB_P_T2RLB_SKP_EN_OFST (10)
+#define RG_SSUSB_T2RLB_PATMODE_OFST (8)
+#define RG_SSUSB_T2RLB_TSEQCNT_OFST (0)
+
+//U3D_PHYD_CPPAT
+#define RG_SSUSB_CPPAT_PROGRAM_EN_OFST (24)
+#define RG_SSUSB_CPPAT_TOZ_OFST (21)
+#define RG_SSUSB_CPPAT_PRBS_EN_OFST (20)
+#define RG_SSUSB_CPPAT_OUT_TMP2_OFST (16)
+#define RG_SSUSB_CPPAT_OUT_TMP1_OFST (8)
+#define RG_SSUSB_CPPAT_OUT_TMP0_OFST (0)
+
+//U3D_PHYD_MIX3
+#define RG_SSUSB_CDR_TCADJ_MINUS_OFST (31)
+#define RG_SSUSB_P_CDROS_EN_OFST (30)
+#define RG_SSUSB_P_P2_TX_DRV_DIS_OFST (28)
+#define RG_SSUSB_CDR_TCADJ_OFFSET_OFST (24)
+#define RG_SSUSB_PLL_TCADJ_MINUS_OFST (23)
+#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN_OFST (20)
+#define RG_SSUSB_PLL_BIAS_LPF_EN_OFST (19)
+#define RG_SSUSB_PLL_TCADJ_OFFSET_OFST (16)
+#define RG_SSUSB_FORCE_PLL_SSCEN_OFST (15)
+#define RG_SSUSB_PLL_SSCEN_OFST (14)
+#define RG_SSUSB_FORCE_CDR_PI_PWD_OFST (13)
+#define RG_SSUSB_CDR_PI_PWD_OFST (12)
+#define RG_SSUSB_CDR_PI_MODE_OFST (11)
+#define RG_SSUSB_TXPLL_SSCEN_CYC_OFST (0)
+
+//U3D_PHYD_EBUFCTL
+#define RG_SSUSB_EBUFCTL_OFST (0)
+
+//U3D_PHYD_PIPE0
+#define RG_SSUSB_RXTERMINATION_OFST (30)
+#define RG_SSUSB_RXEQTRAINING_OFST (29)
+#define RG_SSUSB_RXPOLARITY_OFST (28)
+#define RG_SSUSB_TXDEEMPH_OFST (26)
+#define RG_SSUSB_POWERDOWN_OFST (24)
+#define RG_SSUSB_TXONESZEROS_OFST (23)
+#define RG_SSUSB_TXELECIDLE_OFST (22)
+#define RG_SSUSB_TXDETECTRX_OFST (21)
+#define RG_SSUSB_PIPE_SEL_OFST (20)
+#define RG_SSUSB_TXDATAK_OFST (16)
+#define RG_SSUSB_CDR_STABLE_SEL_OFST (15)
+#define RG_SSUSB_CDR_STABLE_OFST (14)
+#define RG_SSUSB_CDR_RSTB_SEL_OFST (13)
+#define RG_SSUSB_CDR_RSTB_OFST (12)
+#define RG_SSUSB_P_ERROR_SEL_OFST (4)
+#define RG_SSUSB_TXMARGIN_OFST (1)
+#define RG_SSUSB_TXCOMPLIANCE_OFST (0)
+
+//U3D_PHYD_PIPE1
+#define RG_SSUSB_TXDATA_OFST (0)
+
+//U3D_PHYD_MIX4
+#define RG_SSUSB_CDROS_CNT_OFST (24)
+#define RG_SSUSB_T2RLB_BER_EN_OFST (16)
+#define RG_SSUSB_T2RLB_BER_RATE_OFST (0)
+
+//U3D_PHYD_CKGEN0
+#define RG_SSUSB_RFIFO_IMPLAT_OFST (27)
+#define RG_SSUSB_TFIFO_PSEL_OFST (24)
+#define RG_SSUSB_CKGEN_PSEL_OFST (8)
+#define RG_SSUSB_RXCK_INV_OFST (0)
+
+//U3D_PHYD_MIX5
+#define RG_SSUSB_PRB_SEL_OFST (16)
+#define RG_SSUSB_RXPLL_STBCYC_OFST (0)
+
+//U3D_PHYD_RESERVED
+#define RG_SSUSB_PHYD_RESERVE_OFST (0)
+//#define RG_SSUSB_RX_SIGDET_SEL_OFST (11)
+//#define RG_SSUSB_RX_SIGDET_EN_OFST (12)
+//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL_OFST (9)
+//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN_OFST (10)
+
+//U3D_PHYD_CDR0
+#define RG_SSUSB_CDR_BIC_LTR_OFST (28)
+#define RG_SSUSB_CDR_BIC_LTD0_OFST (24)
+#define RG_SSUSB_CDR_BC_LTD1_OFST (16)
+#define RG_SSUSB_CDR_BC_LTR_OFST (8)
+#define RG_SSUSB_CDR_BC_LTD0_OFST (0)
+
+//U3D_PHYD_CDR1
+#define RG_SSUSB_CDR_BIR_LTD1_OFST (24)
+#define RG_SSUSB_CDR_BIR_LTR_OFST (16)
+#define RG_SSUSB_CDR_BIR_LTD0_OFST (8)
+#define RG_SSUSB_CDR_BW_SEL_OFST (6)
+#define RG_SSUSB_CDR_BIC_LTD1_OFST (0)
+
+//U3D_PHYD_PLL_0
+#define RG_SSUSB_FORCE_CDR_BAND_5G_OFST (28)
+#define RG_SSUSB_FORCE_CDR_BAND_2P5G_OFST (27)
+#define RG_SSUSB_FORCE_PLL_BAND_5G_OFST (26)
+#define RG_SSUSB_FORCE_PLL_BAND_2P5G_OFST (25)
+#define RG_SSUSB_P_EQ_T_SEL_OFST (15)
+#define RG_SSUSB_PLL_ISO_EN_CYC_OFST (5)
+#define RG_SSUSB_PLLBAND_RECAL_OFST (4)
+#define RG_SSUSB_PLL_DDS_ISO_EN_OFST (3)
+#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN_OFST (2)
+#define RG_SSUSB_PLL_DDS_PWR_ON_OFST (1)
+#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON_OFST (0)
+
+//U3D_PHYD_PLL_1
+#define RG_SSUSB_CDR_BAND_5G_OFST (24)
+#define RG_SSUSB_CDR_BAND_2P5G_OFST (16)
+#define RG_SSUSB_PLL_BAND_5G_OFST (8)
+#define RG_SSUSB_PLL_BAND_2P5G_OFST (0)
+
+//U3D_PHYD_BCN_DET_1
+#define RG_SSUSB_P_BCN_OBS_PRD_OFST (16)
+#define RG_SSUSB_U_BCN_OBS_PRD_OFST (0)
+
+//U3D_PHYD_BCN_DET_2
+#define RG_SSUSB_P_BCN_OBS_SEL_OFST (16)
+#define RG_SSUSB_BCN_DET_DIS_OFST (12)
+#define RG_SSUSB_U_BCN_OBS_SEL_OFST (0)
+
+//U3D_EQ0
+#define RG_SSUSB_EQ_DLHL_LFI_OFST (24)
+#define RG_SSUSB_EQ_DHHL_LFI_OFST (16)
+#define RG_SSUSB_EQ_DD0HOS_LFI_OFST (8)
+#define RG_SSUSB_EQ_DD0LOS_LFI_OFST (0)
+
+//U3D_EQ1
+#define RG_SSUSB_EQ_DD1HOS_LFI_OFST (24)
+#define RG_SSUSB_EQ_DD1LOS_LFI_OFST (16)
+#define RG_SSUSB_EQ_DE0OS_LFI_OFST (8)
+#define RG_SSUSB_EQ_DE1OS_LFI_OFST (0)
+
+//U3D_EQ2
+#define RG_SSUSB_EQ_DLHLOS_LFI_OFST (24)
+#define RG_SSUSB_EQ_DHHLOS_LFI_OFST (16)
+#define RG_SSUSB_EQ_STOPTIME_OFST (14)
+#define RG_SSUSB_EQ_DHHL_LF_SEL_OFST (11)
+#define RG_SSUSB_EQ_DSAOS_LF_SEL_OFST (8)
+#define RG_SSUSB_EQ_STARTTIME_OFST (6)
+#define RG_SSUSB_EQ_DLEQ_LF_SEL_OFST (3)
+#define RG_SSUSB_EQ_DLHL_LF_SEL_OFST (0)
+
+//U3D_EQ3
+#define RG_SSUSB_EQ_DLEQ_LFI_GEN2_OFST (28)
+#define RG_SSUSB_EQ_DLEQ_LFI_GEN1_OFST (24)
+#define RG_SSUSB_EQ_DEYE0OS_LFI_OFST (16)
+#define RG_SSUSB_EQ_DEYE1OS_LFI_OFST (8)
+#define RG_SSUSB_EQ_TRI_DET_EN_OFST (7)
+#define RG_SSUSB_EQ_TRI_DET_TH_OFST (0)
+
+//U3D_EQ_EYE0
+#define RG_SSUSB_EQ_EYE_XOFFSET_OFST (25)
+#define RG_SSUSB_EQ_EYE_MON_EN_OFST (24)
+#define RG_SSUSB_EQ_EYE0_Y_OFST (16)
+#define RG_SSUSB_EQ_EYE1_Y_OFST (8)
+#define RG_SSUSB_EQ_PILPO_ROUT_OFST (7)
+#define RG_SSUSB_EQ_PI_KPGAIN_OFST (4)
+#define RG_SSUSB_EQ_EYE_CNT_EN_OFST (3)
+
+//U3D_EQ_EYE1
+#define RG_SSUSB_EQ_SIGDET_OFST (24)
+#define RG_SSUSB_EQ_EYE_MASK_OFST (7)
+
+//U3D_EQ_EYE2
+#define RG_SSUSB_EQ_RX500M_CK_SEL_OFST (31)
+#define RG_SSUSB_EQ_SD_CNT1_OFST (24)
+#define RG_SSUSB_EQ_ISIFLAG_SEL_OFST (22)
+#define RG_SSUSB_EQ_SD_CNT0_OFST (16)
+
+//U3D_EQ_DFE0
+#define RG_SSUSB_EQ_LEQMAX_OFST (28)
+#define RG_SSUSB_EQ_DFEX_EN_OFST (27)
+#define RG_SSUSB_EQ_DFEX_LF_SEL_OFST (24)
+#define RG_SSUSB_EQ_CHK_EYE_H_OFST (23)
+#define RG_SSUSB_EQ_PIEYE_INI_OFST (16)
+#define RG_SSUSB_EQ_PI90_INI_OFST (8)
+#define RG_SSUSB_EQ_PI0_INI_OFST (0)
+
+//U3D_EQ_DFE1
+#define RG_SSUSB_EQ_REV_OFST (16)
+#define RG_SSUSB_EQ_DFEYEN_DUR_OFST (12)
+#define RG_SSUSB_EQ_DFEXEN_DUR_OFST (8)
+#define RG_SSUSB_EQ_DFEX_RST_OFST (7)
+#define RG_SSUSB_EQ_GATED_RXD_B_OFST (6)
+#define RG_SSUSB_EQ_PI90CK_SEL_OFST (4)
+#define RG_SSUSB_EQ_DFEX_DIS_OFST (2)
+#define RG_SSUSB_EQ_DFEYEN_STOP_DIS_OFST (1)
+#define RG_SSUSB_EQ_DFEXEN_SEL_OFST (0)
+
+//U3D_EQ_DFE2
+#define RG_SSUSB_EQ_MON_SEL_OFST (24)
+#define RG_SSUSB_EQ_LEQOSC_DLYCNT_OFST (16)
+#define RG_SSUSB_EQ_DLEQOS_LFI_OFST (8)
+#define RG_SSUSB_EQ_LEQ_STOP_TO_OFST (0)
+
+//U3D_EQ_DFE3
+#define RG_SSUSB_EQ_RESERVED_OFST (0)
+
+//U3D_PHYD_MON0
+#define RGS_SSUSB_BERT_BERC_OFST (16)
+#define RGS_SSUSB_LFPS_OFST (12)
+#define RGS_SSUSB_TRAINDEC_OFST (8)
+#define RGS_SSUSB_SCP_PAT_OFST (0)
+
+//U3D_PHYD_MON1
+#define RGS_SSUSB_RX_FL_OUT_OFST (0)
+
+//U3D_PHYD_MON2
+#define RGS_SSUSB_T2RLB_ERRCNT_OFST (16)
+#define RGS_SSUSB_RETRACK_OFST (12)
+#define RGS_SSUSB_RXPLL_LOCK_OFST (10)
+#define RGS_SSUSB_CDR_VCOCAL_CPLT_D_OFST (9)
+#define RGS_SSUSB_PLL_VCOCAL_CPLT_D_OFST (8)
+#define RGS_SSUSB_PDNCTL_OFST (0)
+
+//U3D_PHYD_MON3
+#define RGS_SSUSB_TSEQ_ERRCNT_OFST (16)
+#define RGS_SSUSB_PRBS_ERRCNT_OFST (0)
+
+//U3D_PHYD_MON4
+#define RGS_SSUSB_RX_LSLOCK_CNT_OFST (24)
+#define RGS_SSUSB_SCP_DETCNT_OFST (16)
+#define RGS_SSUSB_TSEQ_DETCNT_OFST (0)
+
+//U3D_PHYD_MON5
+#define RGS_SSUSB_EBUFMSG_OFST (16)
+#define RGS_SSUSB_BERT_LOCK_OFST (15)
+#define RGS_SSUSB_SCP_DET_OFST (14)
+#define RGS_SSUSB_TSEQ_DET_OFST (13)
+#define RGS_SSUSB_EBUF_UDF_OFST (12)
+#define RGS_SSUSB_EBUF_OVF_OFST (11)
+#define RGS_SSUSB_PRBS_PASSTH_OFST (10)
+#define RGS_SSUSB_PRBS_PASS_OFST (9)
+#define RGS_SSUSB_PRBS_LOCK_OFST (8)
+#define RGS_SSUSB_T2RLB_ERR_OFST (6)
+#define RGS_SSUSB_T2RLB_PASSTH_OFST (5)
+#define RGS_SSUSB_T2RLB_PASS_OFST (4)
+#define RGS_SSUSB_T2RLB_LOCK_OFST (3)
+#define RGS_SSUSB_RX_IMPCAL_DONE_OFST (2)
+#define RGS_SSUSB_TX_IMPCAL_DONE_OFST (1)
+#define RGS_SSUSB_RXDETECTED_OFST (0)
+
+//U3D_PHYD_MON6
+#define RGS_SSUSB_SIGCAL_DONE_OFST (30)
+#define RGS_SSUSB_SIGCAL_CAL_OUT_OFST (29)
+#define RGS_SSUSB_SIGCAL_OFFSET_OFST (24)
+#define RGS_SSUSB_RX_IMP_SEL_OFST (16)
+#define RGS_SSUSB_TX_IMP_SEL_OFST (8)
+#define RGS_SSUSB_TFIFO_MSG_OFST (4)
+#define RGS_SSUSB_RFIFO_MSG_OFST (0)
+
+//U3D_PHYD_MON7
+#define RGS_SSUSB_FT_OUT_OFST (8)
+#define RGS_SSUSB_PRB_OUT_OFST (0)
+
+//U3D_PHYA_RX_MON0
+#define RGS_SSUSB_EQ_DCLEQ_OFST (24)
+#define RGS_SSUSB_EQ_DCD0H_OFST (16)
+#define RGS_SSUSB_EQ_DCD0L_OFST (8)
+#define RGS_SSUSB_EQ_DCD1H_OFST (0)
+
+//U3D_PHYA_RX_MON1
+#define RGS_SSUSB_EQ_DCD1L_OFST (24)
+#define RGS_SSUSB_EQ_DCE0_OFST (16)
+#define RGS_SSUSB_EQ_DCE1_OFST (8)
+#define RGS_SSUSB_EQ_DCHHL_OFST (0)
+
+//U3D_PHYA_RX_MON2
+#define RGS_SSUSB_EQ_LEQ_STOP_OFST (31)
+#define RGS_SSUSB_EQ_DCLHL_OFST (24)
+#define RGS_SSUSB_EQ_STATUS_OFST (16)
+#define RGS_SSUSB_EQ_DCEYE0_OFST (8)
+#define RGS_SSUSB_EQ_DCEYE1_OFST (0)
+
+//U3D_PHYA_RX_MON3
+#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST (0)
+
+//U3D_PHYA_RX_MON4
+#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST (0)
+
+//U3D_PHYA_RX_MON5
+#define RGS_SSUSB_EQ_DCLEQOS_OFST (8)
+#define RGS_SSUSB_EQ_EYE_CNT_RDY_OFST (7)
+#define RGS_SSUSB_EQ_PILPO_OFST (0)
+
+//U3D_PHYD_CPPAT2
+#define RG_SSUSB_CPPAT_OUT_H_TMP2_OFST (16)
+#define RG_SSUSB_CPPAT_OUT_H_TMP1_OFST (8)
+#define RG_SSUSB_CPPAT_OUT_H_TMP0_OFST (0)
+
+//U3D_EQ_EYE3
+#define RG_SSUSB_EQ_LEQ_SHIFT_OFST (24)
+#define RG_SSUSB_EQ_EYE_CNT_OFST (0)
+
+//U3D_KBAND_OUT
+#define RGS_SSUSB_CDR_BAND_5G_OFST (24)
+#define RGS_SSUSB_CDR_BAND_2P5G_OFST (16)
+#define RGS_SSUSB_PLL_BAND_5G_OFST (8)
+#define RGS_SSUSB_PLL_BAND_2P5G_OFST (0)
+
+//U3D_KBAND_OUT1
+#define RGS_SSUSB_CDR_VCOCAL_FAIL_OFST (24)
+#define RGS_SSUSB_CDR_VCOCAL_STATE_OFST (16)
+#define RGS_SSUSB_PLL_VCOCAL_FAIL_OFST (8)
+#define RGS_SSUSB_PLL_VCOCAL_STATE_OFST (0)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phyd_bank2_reg {
+ //0x0
+ PHY_LE32 b2_phyd_top1;
+ PHY_LE32 b2_phyd_top2;
+ PHY_LE32 b2_phyd_top3;
+ PHY_LE32 b2_phyd_top4;
+ //0x10
+ PHY_LE32 b2_phyd_top5;
+ PHY_LE32 b2_phyd_top6;
+ PHY_LE32 b2_phyd_top7;
+ PHY_LE32 b2_phyd_p_sigdet1;
+ //0x20
+ PHY_LE32 b2_phyd_p_sigdet2;
+ PHY_LE32 b2_phyd_p_sigdet_cal1;
+ PHY_LE32 b2_phyd_rxdet1;
+ PHY_LE32 b2_phyd_rxdet2;
+ //0x30
+ PHY_LE32 b2_phyd_misc0;
+ PHY_LE32 b2_phyd_misc2;
+ PHY_LE32 b2_phyd_misc3;
+ PHY_LE32 reserve0;
+ //0x40
+ PHY_LE32 b2_rosc_0;
+ PHY_LE32 b2_rosc_1;
+ PHY_LE32 b2_rosc_2;
+ PHY_LE32 b2_rosc_3;
+ //0x50
+ PHY_LE32 b2_rosc_4;
+ PHY_LE32 b2_rosc_5;
+ PHY_LE32 b2_rosc_6;
+ PHY_LE32 b2_rosc_7;
+ //0x60
+ PHY_LE32 b2_rosc_8;
+ PHY_LE32 b2_rosc_9;
+ PHY_LE32 b2_rosc_a;
+ PHY_LE32 reserve1;
+ //0x70~0xd0
+ PHY_LE32 reserve2[28];
+ //0xe0
+ PHY_LE32 phyd_version;
+ PHY_LE32 phyd_model;
+};
+
+//U3D_B2_PHYD_TOP1
+#define RG_SSUSB_PCIE2_K_EMP (0xf<<28) //31:28
+#define RG_SSUSB_PCIE2_K_FUL (0xf<<24) //27:24
+#define RG_SSUSB_TX_EIDLE_LP_EN (0x1<<17) //17:17
+#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN (0x1<<16) //16:16
+#define RG_SSUSB_SIGDET_EN (0x1<<15) //15:15
+#define RG_SSUSB_FORCE_SIGDET_EN (0x1<<14) //14:14
+#define RG_SSUSB_CLKRX_EN (0x1<<13) //13:13
+#define RG_SSUSB_FORCE_CLKRX_EN (0x1<<12) //12:12
+#define RG_SSUSB_CLKTX_EN (0x1<<11) //11:11
+#define RG_SSUSB_FORCE_CLKTX_EN (0x1<<10) //10:10
+#define RG_SSUSB_CLK_REQ_N_I (0x1<<9) //9:9
+#define RG_SSUSB_FORCE_CLK_REQ_N_I (0x1<<8) //8:8
+#define RG_SSUSB_RATE (0x1<<6) //6:6
+#define RG_SSUSB_FORCE_RATE (0x1<<5) //5:5
+#define RG_SSUSB_PCIE_MODE_SEL (0x1<<4) //4:4
+#define RG_SSUSB_FORCE_PCIE_MODE_SEL (0x1<<3) //3:3
+#define RG_SSUSB_PHY_MODE (0x3<<1) //2:1
+#define RG_SSUSB_FORCE_PHY_MODE (0x1<<0) //0:0
+
+//U3D_B2_PHYD_TOP2
+#define RG_SSUSB_FORCE_IDRV_6DB (0x1<<30) //30:30
+#define RG_SSUSB_IDRV_6DB (0x3f<<24) //29:24
+#define RG_SSUSB_FORCE_IDEM_3P5DB (0x1<<22) //22:22
+#define RG_SSUSB_IDEM_3P5DB (0x3f<<16) //21:16
+#define RG_SSUSB_FORCE_IDRV_3P5DB (0x1<<14) //14:14
+#define RG_SSUSB_IDRV_3P5DB (0x3f<<8) //13:8
+#define RG_SSUSB_FORCE_IDRV_0DB (0x1<<6) //6:6
+#define RG_SSUSB_IDRV_0DB (0x3f<<0) //5:0
+
+//U3D_B2_PHYD_TOP3
+#define RG_SSUSB_TX_BIASI (0x7<<25) //27:25
+#define RG_SSUSB_FORCE_TX_BIASI_EN (0x1<<24) //24:24
+#define RG_SSUSB_TX_BIASI_EN (0x1<<16) //16:16
+#define RG_SSUSB_FORCE_TX_BIASI (0x1<<13) //13:13
+#define RG_SSUSB_FORCE_IDEM_6DB (0x1<<8) //8:8
+#define RG_SSUSB_IDEM_6DB (0x3f<<0) //5:0
+
+//U3D_B2_PHYD_TOP4
+#define RG_SSUSB_G1_CDR_BIC_LTR (0xf<<28) //31:28
+#define RG_SSUSB_G1_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define RG_SSUSB_G1_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define RG_SSUSB_G1_CDR_BC_LTR (0x1f<<8) //12:8
+#define RG_SSUSB_G1_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_B2_PHYD_TOP5
+#define RG_SSUSB_G1_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define RG_SSUSB_G1_CDR_BIR_LTR (0x1f<<16) //20:16
+#define RG_SSUSB_G1_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define RG_SSUSB_G1_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_B2_PHYD_TOP6
+#define RG_SSUSB_G2_CDR_BIC_LTR (0xf<<28) //31:28
+#define RG_SSUSB_G2_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define RG_SSUSB_G2_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define RG_SSUSB_G2_CDR_BC_LTR (0x1f<<8) //12:8
+#define RG_SSUSB_G2_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_B2_PHYD_TOP7
+#define RG_SSUSB_G2_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define RG_SSUSB_G2_CDR_BIR_LTR (0x1f<<16) //20:16
+#define RG_SSUSB_G2_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define RG_SSUSB_G2_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_B2_PHYD_P_SIGDET1
+#define RG_SSUSB_P_SIGDET_FLT_DIS (0x1<<31) //31:31
+#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL (0x7f<<24) //30:24
+#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL (0x7f<<16) //22:16
+#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL (0x7f<<8) //14:8
+#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL (0x7f<<0) //6:0
+
+//U3D_B2_PHYD_P_SIGDET2
+#define RG_SSUSB_P_SIGDET_RX_VAL_S (0x1<<29) //29:29
+#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL (0x1<<28) //28:28
+#define RG_SSUSB_P_SIGDET_L0_EXIT_S (0x1<<27) //27:27
+#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S (0x3<<25) //26:25
+#define RG_SSUSB_P_SIGDET_L0S_EXIT_S (0x1<<24) //24:24
+#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S (0x1<<16) //16:16
+#define RG_SSUSB_P_SIGDET_PRB_SEL (0x1<<10) //10:10
+#define RG_SSUSB_P_SIGDET_BK_SIG_T (0x3<<8) //9:8
+#define RG_SSUSB_P_SIGDET_P2_RXLFPS (0x1<<6) //6:6
+#define RG_SSUSB_P_SIGDET_NON_BK_AD (0x1<<5) //5:5
+#define RG_SSUSB_P_SIGDET_BK_B_RXEQ (0x1<<4) //4:4
+#define RG_SSUSB_P_SIGDET_G2_KO_SEL (0x3<<2) //3:2
+#define RG_SSUSB_P_SIGDET_G1_KO_SEL (0x3<<0) //1:0
+
+//U3D_B2_PHYD_P_SIGDET_CAL1
+#define RG_SSUSB_P_SIGDET_CAL_OFFSET (0x1f<<24) //28:24
+#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET (0x1<<16) //16:16
+#define RG_SSUSB_P_SIGDET_CAL_EN (0x1<<8) //8:8
+#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN (0x1<<3) //3:3
+#define RG_SSUSB_P_SIGDET_FLT_EN (0x1<<2) //2:2
+#define RG_SSUSB_P_SIGDET_SAMPLE_PRD (0x1<<1) //1:1
+#define RG_SSUSB_P_SIGDET_REK (0x1<<0) //0:0
+
+//U3D_B2_PHYD_RXDET1
+#define RG_SSUSB_RXDET_PRB_SEL (0x1<<31) //31:31
+#define RG_SSUSB_FORCE_CMDET (0x1<<30) //30:30
+#define RG_SSUSB_RXDET_EN (0x1<<29) //29:29
+#define RG_SSUSB_FORCE_RXDET_EN (0x1<<28) //28:28
+#define RG_SSUSB_RXDET_K_TWICE (0x1<<27) //27:27
+#define RG_SSUSB_RXDET_STB3_SET (0x1ff<<18) //26:18
+#define RG_SSUSB_RXDET_STB2_SET (0x1ff<<9) //17:9
+#define RG_SSUSB_RXDET_STB1_SET (0x1ff<<0) //8:0
+
+//U3D_B2_PHYD_RXDET2
+#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN (0x1<<31) //31:31
+#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN (0x1<<30) //30:30
+#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN (0x1<<29) //29:29
+#define RG_SSUSB_PDN_T_SEL (0x3<<18) //19:18
+#define RG_SSUSB_RXDET_STB3_SET_P3 (0x1ff<<9) //17:9
+#define RG_SSUSB_RXDET_STB2_SET_P3 (0x1ff<<0) //8:0
+
+//U3D_B2_PHYD_MISC0
+#define RG_SSUSB_FORCE_PLL_DDS_HF_EN (0x1<<22) //22:22
+#define RG_SSUSB_PLL_DDS_HF_EN_MAN (0x1<<21) //21:21
+#define RG_SSUSB_RXLFPS_ENTXDRV (0x1<<20) //20:20
+#define RG_SSUSB_RX_FL_UNLOCKTH (0xf<<16) //19:16
+#define RG_SSUSB_LFPS_PSEL (0x1<<15) //15:15
+#define RG_SSUSB_RX_SIGDET_EN (0x1<<14) //14:14
+#define RG_SSUSB_RX_SIGDET_EN_SEL (0x1<<13) //13:13
+#define RG_SSUSB_RX_PI_CAL_EN (0x1<<12) //12:12
+#define RG_SSUSB_RX_PI_CAL_EN_SEL (0x1<<11) //11:11
+#define RG_SSUSB_P3_CLS_CK_SEL (0x1<<10) //10:10
+#define RG_SSUSB_T2RLB_PSEL (0x3<<8) //9:8
+#define RG_SSUSB_PPCTL_PSEL (0x7<<5) //7:5
+#define RG_SSUSB_PHYD_TX_DATA_INV (0x1<<4) //4:4
+#define RG_SSUSB_BERTLB_PSEL (0x3<<2) //3:2
+#define RG_SSUSB_RETRACK_DIS (0x1<<1) //1:1
+#define RG_SSUSB_PPERRCNT_CLR (0x1<<0) //0:0
+
+//U3D_B2_PHYD_MISC2
+#define RG_SSUSB_FRC_PLL_DDS_PREDIV2 (0x1<<31) //31:31
+#define RG_SSUSB_FRC_PLL_DDS_IADJ (0xf<<27) //30:27
+#define RG_SSUSB_P_SIGDET_125FILTER (0x1<<26) //26:26
+#define RG_SSUSB_P_SIGDET_RST_FILTER (0x1<<25) //25:25
+#define RG_SSUSB_P_SIGDET_EID_USE_RAW (0x1<<24) //24:24
+#define RG_SSUSB_P_SIGDET_LTD_USE_RAW (0x1<<23) //23:23
+#define RG_SSUSB_EIDLE_BF_RXDET (0x1<<22) //22:22
+#define RG_SSUSB_EIDLE_LP_STBCYC (0x1ff<<13) //21:13
+#define RG_SSUSB_TX_EIDLE_LP_POSTDLY (0x3f<<7) //12:7
+#define RG_SSUSB_TX_EIDLE_LP_PREDLY (0x3f<<1) //6:1
+#define RG_SSUSB_TX_EIDLE_LP_EN_ADV (0x1<<0) //0:0
+
+//U3D_B2_PHYD_MISC3
+#define RGS_SSUSB_DDS_CALIB_C_STATE (0x7<<16) //18:16
+#define RGS_SSUSB_PPERRCNT (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_0
+#define RG_SSUSB_RING_OSC_CNTEND (0x1ff<<23) //31:23
+#define RG_SSUSB_XTAL_OSC_CNTEND (0x7f<<16) //22:16
+#define RG_SSUSB_RING_OSC_EN (0x1<<3) //3:3
+#define RG_SSUSB_RING_OSC_FORCE_EN (0x1<<2) //2:2
+#define RG_SSUSB_FRC_RING_BYPASS_DET (0x1<<1) //1:1
+#define RG_SSUSB_RING_BYPASS_DET (0x1<<0) //0:0
+
+//U3D_B2_ROSC_1
+#define RG_SSUSB_RING_OSC_FRC_P3 (0x1<<20) //20:20
+#define RG_SSUSB_RING_OSC_P3 (0x1<<19) //19:19
+#define RG_SSUSB_RING_OSC_FRC_RECAL (0x3<<17) //18:17
+#define RG_SSUSB_RING_OSC_RECAL (0x1<<16) //16:16
+#define RG_SSUSB_RING_OSC_SEL (0xff<<8) //15:8
+#define RG_SSUSB_RING_OSC_FRC_SEL (0x1<<0) //0:0
+
+//U3D_B2_ROSC_2
+#define RG_SSUSB_RING_DET_STRCYC2 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_STRCYC1 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_3
+#define RG_SSUSB_RING_DET_DETWIN1 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_STRCYC3 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_4
+#define RG_SSUSB_RING_DET_DETWIN3 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_DETWIN2 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_5
+#define RG_SSUSB_RING_DET_LBOND1 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_UBOND1 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_6
+#define RG_SSUSB_RING_DET_LBOND2 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_UBOND2 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_7
+#define RG_SSUSB_RING_DET_LBOND3 (0xffff<<16) //31:16
+#define RG_SSUSB_RING_DET_UBOND3 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_8
+#define RG_SSUSB_RING_RESERVE (0xffff<<16) //31:16
+#define RG_SSUSB_ROSC_PROB_SEL (0xf<<2) //5:2
+#define RG_SSUSB_RING_FREQMETER_EN (0x1<<1) //1:1
+#define RG_SSUSB_RING_DET_BPS_UBOND (0x1<<0) //0:0
+
+//U3D_B2_ROSC_9
+#define RGS_FM_RING_CNT (0xffff<<16) //31:16
+#define RGS_SSUSB_RING_OSC_STATE (0x3<<10) //11:10
+#define RGS_SSUSB_RING_OSC_STABLE (0x1<<9) //9:9
+#define RGS_SSUSB_RING_OSC_CAL_FAIL (0x1<<8) //8:8
+#define RGS_SSUSB_RING_OSC_CAL (0xff<<0) //7:0
+
+//U3D_B2_ROSC_A
+#define RGS_SSUSB_ROSC_PROB_OUT (0xff<<0) //7:0
+
+//U3D_PHYD_VERSION
+#define RGS_SSUSB_PHYD_VERSION (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MODEL
+#define RGS_SSUSB_PHYD_MODEL (0xffffffff<<0) //31:0
+
+
+/* OFFSET */
+
+//U3D_B2_PHYD_TOP1
+#define RG_SSUSB_PCIE2_K_EMP_OFST (28)
+#define RG_SSUSB_PCIE2_K_FUL_OFST (24)
+#define RG_SSUSB_TX_EIDLE_LP_EN_OFST (17)
+#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN_OFST (16)
+#define RG_SSUSB_SIGDET_EN_OFST (15)
+#define RG_SSUSB_FORCE_SIGDET_EN_OFST (14)
+#define RG_SSUSB_CLKRX_EN_OFST (13)
+#define RG_SSUSB_FORCE_CLKRX_EN_OFST (12)
+#define RG_SSUSB_CLKTX_EN_OFST (11)
+#define RG_SSUSB_FORCE_CLKTX_EN_OFST (10)
+#define RG_SSUSB_CLK_REQ_N_I_OFST (9)
+#define RG_SSUSB_FORCE_CLK_REQ_N_I_OFST (8)
+#define RG_SSUSB_RATE_OFST (6)
+#define RG_SSUSB_FORCE_RATE_OFST (5)
+#define RG_SSUSB_PCIE_MODE_SEL_OFST (4)
+#define RG_SSUSB_FORCE_PCIE_MODE_SEL_OFST (3)
+#define RG_SSUSB_PHY_MODE_OFST (1)
+#define RG_SSUSB_FORCE_PHY_MODE_OFST (0)
+
+//U3D_B2_PHYD_TOP2
+#define RG_SSUSB_FORCE_IDRV_6DB_OFST (30)
+#define RG_SSUSB_IDRV_6DB_OFST (24)
+#define RG_SSUSB_FORCE_IDEM_3P5DB_OFST (22)
+#define RG_SSUSB_IDEM_3P5DB_OFST (16)
+#define RG_SSUSB_FORCE_IDRV_3P5DB_OFST (14)
+#define RG_SSUSB_IDRV_3P5DB_OFST (8)
+#define RG_SSUSB_FORCE_IDRV_0DB_OFST (6)
+#define RG_SSUSB_IDRV_0DB_OFST (0)
+
+//U3D_B2_PHYD_TOP3
+#define RG_SSUSB_TX_BIASI_OFST (25)
+#define RG_SSUSB_FORCE_TX_BIASI_EN_OFST (24)
+#define RG_SSUSB_TX_BIASI_EN_OFST (16)
+#define RG_SSUSB_FORCE_TX_BIASI_OFST (13)
+#define RG_SSUSB_FORCE_IDEM_6DB_OFST (8)
+#define RG_SSUSB_IDEM_6DB_OFST (0)
+
+//U3D_B2_PHYD_TOP4
+#define RG_SSUSB_G1_CDR_BIC_LTR_OFST (28)
+#define RG_SSUSB_G1_CDR_BIC_LTD0_OFST (24)
+#define RG_SSUSB_G1_CDR_BC_LTD1_OFST (16)
+#define RG_SSUSB_G1_CDR_BC_LTR_OFST (8)
+#define RG_SSUSB_G1_CDR_BC_LTD0_OFST (0)
+
+//U3D_B2_PHYD_TOP5
+#define RG_SSUSB_G1_CDR_BIR_LTD1_OFST (24)
+#define RG_SSUSB_G1_CDR_BIR_LTR_OFST (16)
+#define RG_SSUSB_G1_CDR_BIR_LTD0_OFST (8)
+#define RG_SSUSB_G1_CDR_BIC_LTD1_OFST (0)
+
+//U3D_B2_PHYD_TOP6
+#define RG_SSUSB_G2_CDR_BIC_LTR_OFST (28)
+#define RG_SSUSB_G2_CDR_BIC_LTD0_OFST (24)
+#define RG_SSUSB_G2_CDR_BC_LTD1_OFST (16)
+#define RG_SSUSB_G2_CDR_BC_LTR_OFST (8)
+#define RG_SSUSB_G2_CDR_BC_LTD0_OFST (0)
+
+//U3D_B2_PHYD_TOP7
+#define RG_SSUSB_G2_CDR_BIR_LTD1_OFST (24)
+#define RG_SSUSB_G2_CDR_BIR_LTR_OFST (16)
+#define RG_SSUSB_G2_CDR_BIR_LTD0_OFST (8)
+#define RG_SSUSB_G2_CDR_BIC_LTD1_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET1
+#define RG_SSUSB_P_SIGDET_FLT_DIS_OFST (31)
+#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL_OFST (24)
+#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL_OFST (16)
+#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL_OFST (8)
+#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET2
+#define RG_SSUSB_P_SIGDET_RX_VAL_S_OFST (29)
+#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL_OFST (28)
+#define RG_SSUSB_P_SIGDET_L0_EXIT_S_OFST (27)
+#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S_OFST (25)
+#define RG_SSUSB_P_SIGDET_L0S_EXIT_S_OFST (24)
+#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S_OFST (16)
+#define RG_SSUSB_P_SIGDET_PRB_SEL_OFST (10)
+#define RG_SSUSB_P_SIGDET_BK_SIG_T_OFST (8)
+#define RG_SSUSB_P_SIGDET_P2_RXLFPS_OFST (6)
+#define RG_SSUSB_P_SIGDET_NON_BK_AD_OFST (5)
+#define RG_SSUSB_P_SIGDET_BK_B_RXEQ_OFST (4)
+#define RG_SSUSB_P_SIGDET_G2_KO_SEL_OFST (2)
+#define RG_SSUSB_P_SIGDET_G1_KO_SEL_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET_CAL1
+#define RG_SSUSB_P_SIGDET_CAL_OFFSET_OFST (24)
+#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET_OFST (16)
+#define RG_SSUSB_P_SIGDET_CAL_EN_OFST (8)
+#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN_OFST (3)
+#define RG_SSUSB_P_SIGDET_FLT_EN_OFST (2)
+#define RG_SSUSB_P_SIGDET_SAMPLE_PRD_OFST (1)
+#define RG_SSUSB_P_SIGDET_REK_OFST (0)
+
+//U3D_B2_PHYD_RXDET1
+#define RG_SSUSB_RXDET_PRB_SEL_OFST (31)
+#define RG_SSUSB_FORCE_CMDET_OFST (30)
+#define RG_SSUSB_RXDET_EN_OFST (29)
+#define RG_SSUSB_FORCE_RXDET_EN_OFST (28)
+#define RG_SSUSB_RXDET_K_TWICE_OFST (27)
+#define RG_SSUSB_RXDET_STB3_SET_OFST (18)
+#define RG_SSUSB_RXDET_STB2_SET_OFST (9)
+#define RG_SSUSB_RXDET_STB1_SET_OFST (0)
+
+//U3D_B2_PHYD_RXDET2
+#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN_OFST (31)
+#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN_OFST (30)
+#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN_OFST (29)
+#define RG_SSUSB_PDN_T_SEL_OFST (18)
+#define RG_SSUSB_RXDET_STB3_SET_P3_OFST (9)
+#define RG_SSUSB_RXDET_STB2_SET_P3_OFST (0)
+
+//U3D_B2_PHYD_MISC0
+#define RG_SSUSB_FORCE_PLL_DDS_HF_EN_OFST (22)
+#define RG_SSUSB_PLL_DDS_HF_EN_MAN_OFST (21)
+#define RG_SSUSB_RXLFPS_ENTXDRV_OFST (20)
+#define RG_SSUSB_RX_FL_UNLOCKTH_OFST (16)
+#define RG_SSUSB_LFPS_PSEL_OFST (15)
+#define RG_SSUSB_RX_SIGDET_EN_OFST (14)
+#define RG_SSUSB_RX_SIGDET_EN_SEL_OFST (13)
+#define RG_SSUSB_RX_PI_CAL_EN_OFST (12)
+#define RG_SSUSB_RX_PI_CAL_EN_SEL_OFST (11)
+#define RG_SSUSB_P3_CLS_CK_SEL_OFST (10)
+#define RG_SSUSB_T2RLB_PSEL_OFST (8)
+#define RG_SSUSB_PPCTL_PSEL_OFST (5)
+#define RG_SSUSB_PHYD_TX_DATA_INV_OFST (4)
+#define RG_SSUSB_BERTLB_PSEL_OFST (2)
+#define RG_SSUSB_RETRACK_DIS_OFST (1)
+#define RG_SSUSB_PPERRCNT_CLR_OFST (0)
+
+//U3D_B2_PHYD_MISC2
+#define RG_SSUSB_FRC_PLL_DDS_PREDIV2_OFST (31)
+#define RG_SSUSB_FRC_PLL_DDS_IADJ_OFST (27)
+#define RG_SSUSB_P_SIGDET_125FILTER_OFST (26)
+#define RG_SSUSB_P_SIGDET_RST_FILTER_OFST (25)
+#define RG_SSUSB_P_SIGDET_EID_USE_RAW_OFST (24)
+#define RG_SSUSB_P_SIGDET_LTD_USE_RAW_OFST (23)
+#define RG_SSUSB_EIDLE_BF_RXDET_OFST (22)
+#define RG_SSUSB_EIDLE_LP_STBCYC_OFST (13)
+#define RG_SSUSB_TX_EIDLE_LP_POSTDLY_OFST (7)
+#define RG_SSUSB_TX_EIDLE_LP_PREDLY_OFST (1)
+#define RG_SSUSB_TX_EIDLE_LP_EN_ADV_OFST (0)
+
+//U3D_B2_PHYD_MISC3
+#define RGS_SSUSB_DDS_CALIB_C_STATE_OFST (16)
+#define RGS_SSUSB_PPERRCNT_OFST (0)
+
+//U3D_B2_ROSC_0
+#define RG_SSUSB_RING_OSC_CNTEND_OFST (23)
+#define RG_SSUSB_XTAL_OSC_CNTEND_OFST (16)
+#define RG_SSUSB_RING_OSC_EN_OFST (3)
+#define RG_SSUSB_RING_OSC_FORCE_EN_OFST (2)
+#define RG_SSUSB_FRC_RING_BYPASS_DET_OFST (1)
+#define RG_SSUSB_RING_BYPASS_DET_OFST (0)
+
+//U3D_B2_ROSC_1
+#define RG_SSUSB_RING_OSC_FRC_P3_OFST (20)
+#define RG_SSUSB_RING_OSC_P3_OFST (19)
+#define RG_SSUSB_RING_OSC_FRC_RECAL_OFST (17)
+#define RG_SSUSB_RING_OSC_RECAL_OFST (16)
+#define RG_SSUSB_RING_OSC_SEL_OFST (8)
+#define RG_SSUSB_RING_OSC_FRC_SEL_OFST (0)
+
+//U3D_B2_ROSC_2
+#define RG_SSUSB_RING_DET_STRCYC2_OFST (16)
+#define RG_SSUSB_RING_DET_STRCYC1_OFST (0)
+
+//U3D_B2_ROSC_3
+#define RG_SSUSB_RING_DET_DETWIN1_OFST (16)
+#define RG_SSUSB_RING_DET_STRCYC3_OFST (0)
+
+//U3D_B2_ROSC_4
+#define RG_SSUSB_RING_DET_DETWIN3_OFST (16)
+#define RG_SSUSB_RING_DET_DETWIN2_OFST (0)
+
+//U3D_B2_ROSC_5
+#define RG_SSUSB_RING_DET_LBOND1_OFST (16)
+#define RG_SSUSB_RING_DET_UBOND1_OFST (0)
+
+//U3D_B2_ROSC_6
+#define RG_SSUSB_RING_DET_LBOND2_OFST (16)
+#define RG_SSUSB_RING_DET_UBOND2_OFST (0)
+
+//U3D_B2_ROSC_7
+#define RG_SSUSB_RING_DET_LBOND3_OFST (16)
+#define RG_SSUSB_RING_DET_UBOND3_OFST (0)
+
+//U3D_B2_ROSC_8
+#define RG_SSUSB_RING_RESERVE_OFST (16)
+#define RG_SSUSB_ROSC_PROB_SEL_OFST (2)
+#define RG_SSUSB_RING_FREQMETER_EN_OFST (1)
+#define RG_SSUSB_RING_DET_BPS_UBOND_OFST (0)
+
+//U3D_B2_ROSC_9
+#define RGS_FM_RING_CNT_OFST (16)
+#define RGS_SSUSB_RING_OSC_STATE_OFST (10)
+#define RGS_SSUSB_RING_OSC_STABLE_OFST (9)
+#define RGS_SSUSB_RING_OSC_CAL_FAIL_OFST (8)
+#define RGS_SSUSB_RING_OSC_CAL_OFST (0)
+
+//U3D_B2_ROSC_A
+#define RGS_SSUSB_ROSC_PROB_OUT_OFST (0)
+
+//U3D_PHYD_VERSION
+#define RGS_SSUSB_PHYD_VERSION_OFST (0)
+
+//U3D_PHYD_MODEL
+#define RGS_SSUSB_PHYD_MODEL_OFST (0)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct sifslv_chip_reg {
+ PHY_LE32 xtalbias;
+ PHY_LE32 syspll1;
+ PHY_LE32 gpio_ctla;
+ PHY_LE32 gpio_ctlb;
+ PHY_LE32 gpio_ctlc;
+};
+
+//U3D_GPIO_CTLA
+#define RG_C60802_GPIO_CTLA (0xffffffff<<0) //31:0
+
+//U3D_GPIO_CTLB
+#define RG_C60802_GPIO_CTLB (0xffffffff<<0) //31:0
+
+//U3D_GPIO_CTLC
+#define RG_C60802_GPIO_CTLC (0xffffffff<<0) //31:0
+
+/* OFFSET */
+
+//U3D_GPIO_CTLA
+#define RG_C60802_GPIO_CTLA_OFST (0)
+
+//U3D_GPIO_CTLB
+#define RG_C60802_GPIO_CTLB_OFST (0)
+
+//U3D_GPIO_CTLC
+#define RG_C60802_GPIO_CTLC_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct sifslv_fm_feg {
+ //0x0
+ PHY_LE32 fmcr0;
+ PHY_LE32 fmcr1;
+ PHY_LE32 fmcr2;
+ PHY_LE32 fmmonr0;
+ //0x10
+ PHY_LE32 fmmonr1;
+};
+
+//U3D_FMCR0
+#define RG_LOCKTH (0xf<<28) //31:28
+#define RG_MONCLK_SEL (0x3<<26) //27:26
+#define RG_FM_MODE (0x1<<25) //25:25
+#define RG_FREQDET_EN (0x1<<24) //24:24
+#define RG_CYCLECNT (0xffffff<<0) //23:0
+
+//U3D_FMCR1
+#define RG_TARGET (0xffffffff<<0) //31:0
+
+//U3D_FMCR2
+#define RG_OFFSET (0xffffffff<<0) //31:0
+
+//U3D_FMMONR0
+#define USB_FM_OUT (0xffffffff<<0) //31:0
+
+//U3D_FMMONR1
+#define RG_MONCLK_SEL_3 (0x1<<9) //9:9
+#define RG_FRCK_EN (0x1<<8) //8:8
+#define USBPLL_LOCK (0x1<<1) //1:1
+#define USB_FM_VLD (0x1<<0) //0:0
+
+
+/* OFFSET */
+
+//U3D_FMCR0
+#define RG_LOCKTH_OFST (28)
+#define RG_MONCLK_SEL_OFST (26)
+#define RG_FM_MODE_OFST (25)
+#define RG_FREQDET_EN_OFST (24)
+#define RG_CYCLECNT_OFST (0)
+
+//U3D_FMCR1
+#define RG_TARGET_OFST (0)
+
+//U3D_FMCR2
+#define RG_OFFSET_OFST (0)
+
+//U3D_FMMONR0
+#define USB_FM_OUT_OFST (0)
+
+//U3D_FMMONR1
+#define RG_MONCLK_SEL_3_OFST (9)
+#define RG_FRCK_EN_OFST (8)
+#define USBPLL_LOCK_OFST (1)
+#define USB_FM_VLD_OFST (0)
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+PHY_INT32 phy_init(struct u3phy_info *info);
+PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase);
+PHY_INT32 eyescan_init(struct u3phy_info *info);
+PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y
+ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt);
+PHY_INT32 u2_save_cur_en(struct u3phy_info *info);
+PHY_INT32 u2_save_cur_re(struct u3phy_info *info);
+PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info);
+
+#endif
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk-phy-ahb.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk-phy-ahb.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,58 @@
+#include "mtk-phy.h"
+#ifdef CONFIG_U3D_HAL_SUPPORT
+#include "mu3d_hal_osal.h"
+#endif
+
+#ifdef CONFIG_U3_PHY_AHB_SUPPORT
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#ifndef CONFIG_U3D_HAL_SUPPORT
+#define os_writel(addr,data) {\
+ (*((volatile PHY_UINT32*)(addr)) = data);\
+ }
+#define os_readl(addr) *((volatile PHY_UINT32*)(addr))
+#define os_writelmsk(addr, data, msk) \
+ { os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk)))); \
+ }
+#define os_setmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) | msk); \
+ }
+#define os_clrmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) &~ msk); \
+ }
+/*msk the data first, then umsk with the umsk.*/
+#define os_writelmskumsk(addr, data, msk, umsk) \
+{\
+ os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk))) & (umsk));\
+}
+
+#endif
+
+PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data)
+{
+ os_writel(addr, data);
+
+ return 0;
+}
+
+PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr)
+{
+ return os_readl(addr);
+}
+
+PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data)
+{
+ os_writelmsk(addr&0xfffffffc, data<<((addr%4)*8), 0xff<<((addr%4)*8));
+
+ return 0;
+}
+
+PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr)
+{
+ return ((os_readl(addr)>>((addr%4)*8))&0xff);
+}
+
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk-phy.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk-phy.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,406 @@
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/tc3162/tc3162.h>
+#define U3_PHY_LIB
+#include "mtk-phy.h"
+#ifdef CONFIG_C60802_SUPPORT
+#include "mtk-phy-c60802.h"
+#endif
+#ifdef CONFIG_D60802_SUPPORT
+#include "mtk-phy-d60802.h"
+#endif
+#ifdef CONFIG_PROJECT_7662
+#include "mtk-phy-7662.h"
+#endif
+#ifdef CONFIG_PROJECT_5399
+#include "mtk-phy-5399.h"
+#endif
+#ifdef CONFIG_PROJECT_7512
+#include "mtk-phy-7512.h"
+#endif
+#ifdef CONFIG_PROJECT_7628
+#include "mtk-phy-7628.h"
+#endif
+#ifdef CONFIG_C60802_SUPPORT
+static const struct u3phy_operator c60802_operators = {
+ .init = phy_init_c60802,
+ .change_pipe_phase = phy_change_pipe_phase_c60802,
+ .eyescan_init = eyescan_init_c60802,
+ .eyescan = phy_eyescan_c60802,
+ .u2_save_current_entry = u2_save_cur_en_c60802,
+ .u2_save_current_recovery = u2_save_cur_re_c60802,
+ .u2_slew_rate_calibration = u2_slew_rate_calibration_c60802,
+};
+#endif
+#ifdef CONFIG_D60802_SUPPORT
+static const struct u3phy_operator d60802_operators = {
+ .init = phy_init_d60802,
+ .change_pipe_phase = phy_change_pipe_phase_d60802,
+ .eyescan_init = eyescan_init_d60802,
+ .eyescan = phy_eyescan_d60802,
+ //.u2_save_current_entry = u2_save_cur_en_d60802,
+ //.u2_save_current_recovery = u2_save_cur_re_d60802,
+ .u2_slew_rate_calibration = u2_slew_rate_calibration_d60802,
+};
+#endif
+#ifdef CONFIG_PROJECT_PHY
+static struct u3phy_operator project_operators = {
+ .init = phy_init,
+ .change_pipe_phase = phy_change_pipe_phase,
+ .eyescan_init = eyescan_init,
+ .eyescan = phy_eyescan,
+ .u2_slew_rate_calibration = u2_slew_rate_calibration,
+};
+#endif
+
+void static setup_25M_PLL(void)
+{
+
+ U3PhyWriteReg8(0xbfa80c1c, 0x18);
+ U3PhyWriteReg8(0xbfa80c1d, 0x18);
+ U3PhyWriteReg8(0xbfa80c1f, 0x18);
+ U3PhyWriteReg32(0xbfa80c24, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c28, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c30, 0x18000000);
+ U3PhyWriteReg32(0xbfa80c38, 0x004a004a);
+ U3PhyWriteReg8(0xbfa80c3e, 0x4a);
+ U3PhyWriteReg8(0xbfa80c3f, 0x0);
+ U3PhyWriteReg8(0xbfa80c42, 0x48);
+ U3PhyWriteReg8(0xbfa80c43, 0x0);
+ U3PhyWriteReg8(0xbfa80c44, 0x48);
+ U3PhyWriteReg8(0xbfa80c45, 0x0);
+ U3PhyWriteReg8(0xbfa80c48, 0x48);
+ U3PhyWriteReg8(0xbfa80c49, 0x0);
+
+ U3PhyWriteReg8(0xbfa80b24, 0x90);
+ U3PhyWriteReg8(0xbfa80b25, 0x1);
+ U3PhyWriteReg32(0xbfa80b10, 0x1c000000);
+ U3PhyWriteReg8(0xbfa80b0b, 0xe);
+ return;
+
+}
+PHY_INT32 u3phy_config_751221(void)
+{
+ if( (isEN751221 && (readl(0xbfb0008c)&0x01)) || isEN7526c )//Biker_0906_7522_always use 25M input clk
+ {
+ setup_25M_PLL(); //Biker_20160516
+ }
+
+
+ // 7512
+ //enable port 0: SSUSB -isEN7526c (7522), 7526D, 7526G, 7513, 7513G, 7521G, 7586
+ //enable port 1: USB2 -7526F, 7512, 7526D, 7526G, 7513, 7513G, 7521G, 7586
+
+ //7522 (7526c)
+ //enable port 0: 7526F
+ //7521S, 7521F NO USB
+
+ if(isEN7526c){ //Biker_20160906, Add setting for 7522
+
+ if(isEN7526F) //Biker_20170807
+ {
+ writel(0xC0240008, 0xBFA8081C); /* enable port0 */
+ printk(KERN_ERR "7522 USB PHY config, enable port0");
+ return 1;
+ }else
+ return 0; //No port enabled, following driver will not run
+
+
+
+ }// 7512 case
+ else if(isEN7526D || isEN7526G || isEN7513 || isEN7513G || isEN7521G || isEN7586){
+ writel(0xC0240008, 0xBFA8081C); /* enable port0 */
+ writel(0xC0240000, 0xBFA8101C); /* enable port1 */
+ printk(KERN_ERR "751221 USB PHY config, enable port0 port1");
+
+ //Patch TxDetRx Timing for 7512 E1, from DR 20160421, Biker_20160516
+ U3PhyWriteReg32(0xbfa80a28, ((U3PhyReadReg32(0xbfa80a28) &(~(0x1ff<<9)) )|(0x10<<9) ));//rg_ssusb_rxdet_stb2_set[8:0]
+ U3PhyWriteReg32(0xbfa80a2c, ((U3PhyReadReg32(0xbfa80a2c) &(~0x1ff) )|0x10 ));//rg_ssusb_rxdet_stb2_set_p3[8:0]
+
+ //Patch LFPS Filter Threshold for 7512 E1, from DR 20160421, Biker_20160516
+ U3PhyWriteReg32 (0xbfa8090c,((U3PhyReadReg32(0xbfa8090c) &(~(0x3f<<16)) )|(0x34<<16) ));//rg_ssusb_fwake_th[5:0]
+
+ return 1;
+
+ }else if(isEN7526F || isEN7512){
+ writel(0xC0241580, 0xBFA8081C);//disable port 0
+ writel(0xC0240000, 0xBFA8101C);//enable port 1
+ printk(KERN_ERR "7512/7526F USB PHY config, enable port1");
+ return 1;
+
+ }else
+ return 0;
+}
+
+PHY_INT32 u3phy_config_751627(void)
+{
+ if( (readl(0xbfb0008c)&0x40000) == 0)
+ {
+ setup_25M_PLL();
+ printk(KERN_ERR "USB PLL 25MHz setting\n");
+ }
+
+ writel(0xC0240008, 0xBFA8081C); /* enable port0 */
+ writel(0xC0240000, 0xBFA8101C); /* enable port1 */
+ printk(KERN_ERR "7516 USB PHY config, enable port0 port1\n");
+
+}
+
+PHY_INT32 u3phy_config_fpga(void){
+
+ /* TODO */
+ return 1;
+}
+
+PHY_INT32 U2_Slew_Rate_Calibration(void){
+ PHY_INT32 i=0;
+ PHY_INT32 j=0;
+ PHY_INT32 fgRet = 0;
+ PHY_INT32 u4FmOut = 0;
+ PHY_INT32 u4Tmp = 0;
+ PHY_INT32 U2_PHYA_CR0[U2_port_num]={0xbfa80810, 0xbfa81010};
+
+ for(j=0;j<U2_port_num;j++)
+ {
+ printk(KERN_ERR "port %d u2_slew_rate_calibration\n",j);
+ // => RG_USB20_HSTX_SRCAL_EN = 1
+ // enable HS TX SR calibration
+ U3PhyWriteReg32((void *)(U2_PHYA_CR0[j]), (U3PhyReadReg32((void *)(U2_PHYA_CR0[j]))&(~RG_USB20_HSTX_SRCAL_EN)|((0x1)<<RG_USB20_HSTX_SRCAL_EN_OFST)));
+ DRV_MSLEEP(1);
+ //printk("%x\n",U2_PHYA_CR0[j]);
+ //printk("%x\n",U3PhyReadReg32((void *)(U2_PHYA_CR0[j])));
+ // => RG_FRCK_EN = 1
+ // Enable free run clock
+ U3PhyWriteReg32(0xbfa80110, (U3PhyReadReg32(0xbfa80110)&(~RG_FRCK_EN)|((0x1)<<RG_FRCK_EN_OFST)));
+ //printk("%x\n",U3PhyReadReg32(0xbfa80110));
+ // => RG_MONCLK_SEL = 0x0/0x1 for port0/port1
+ // Setting MONCLK_SEL
+ U3PhyWriteReg32(0xbfa80100, (U3PhyReadReg32(0xbfa80100)&(~RG_MONCLK_SEL)|((j)<<RG_MONCLK_SEL_OFST)));
+ // => RG_CYCLECNT = 0x400
+ // Setting cyclecnt = 0x400
+ U3PhyWriteReg32(0xbfa80100, (U3PhyReadReg32(0xbfa80100)&(~RG_CYCLECNT)|((0x400)<<RG_CYCLECNT_OFST)));
+ // => RG_FREQDET_EN = 1
+ // Enable frequency meter
+ U3PhyWriteReg32(0xbfa80100, (U3PhyReadReg32(0xbfa80100)&(~RG_FREQDET_EN)|((0x1)<<RG_FREQDET_EN_OFST)));
+ //printk("%x\n",U3PhyReadReg32(0xbfa80100));
+ // wait for FM detection done, set 10ms timeout
+ for(i=0; i<10; i++){
+ u4FmOut = U3PhyReadReg32(0xbfa8010c);
+ // check if FM detection done
+ if (u4FmOut != 0)
+ {
+ // => u4FmOut = USB_FM_OUT
+ // read FM_OUT
+ printk(KERN_ERR "FM_OUT value = %d(0x%08X)\n", u4FmOut, u4FmOut);
+ fgRet = 0;
+ //printk(KERN_ERR "FM detection done! loop = %d\n", i);
+ break;
+ }
+
+ fgRet = 1;
+ DRV_MSLEEP(1);
+ }
+ // => RG_FREQDET_EN = 0
+ // disable frequency meter
+ U3PhyWriteReg32(0xbfa80100, (U3PhyReadReg32(0xbfa80100)&(~RG_FREQDET_EN)|((0x0)<<RG_FREQDET_EN_OFST)));
+
+ // => RG_FRCK_EN = 0
+ // disable free run clock
+ U3PhyWriteReg32(0xbfa80110, (U3PhyReadReg32(0xbfa80110)&(~RG_FRCK_EN)|((0x0)<<RG_FRCK_EN_OFST)));
+
+ // => RG_USB20_HSTX_SRCAL_EN = 0
+ // disable HS TX SR calibration
+ U3PhyWriteReg32((void *)(U2_PHYA_CR0[j]), (U3PhyReadReg32((void *)(U2_PHYA_CR0[j]))&(~RG_USB20_HSTX_SRCAL_EN)|((0x0)<<RG_USB20_HSTX_SRCAL_EN_OFST)));
+ DRV_MSLEEP(1);
+
+ if(u4FmOut == 0){
+ U3PhyWriteReg32((void *)(U2_PHYA_CR0[j]), (U3PhyReadReg32((void *)(U2_PHYA_CR0[j]))&(~RG_USB20_HSTX_SRCTRL)|((0x4)<<RG_USB20_HSTX_SRCTRL_OFST)));
+ fgRet = 1;
+ }
+ else{
+ // set reg = (1024/FM_OUT) * REF_CK * U2_SR_COEF (round to the nearest digits)
+ u4Tmp = (((1024 * REF_CK * U2_SR_COEF) / u4FmOut) + 500) / 1000;
+ printk(KERN_ERR "SR calibration value = %d\n", (PHY_UINT8)u4Tmp);
+ U3PhyWriteReg32((void *)(U2_PHYA_CR0[j]), (U3PhyReadReg32((void *)(U2_PHYA_CR0[j]))&(~RG_USB20_HSTX_SRCTRL)|((u4Tmp&0x7)<<RG_USB20_HSTX_SRCTRL_OFST)));
+ }
+ }
+ return fgRet;
+}
+
+int ecnt_u3h_phy_init(void)
+{
+ int ret = 0;
+
+ if(isFPGA){
+ ret = u3phy_config_fpga();
+ }else if(isEN751221){
+ ret = u3phy_config_751221();
+ printk(KERN_ERR "USB driver version: 751221.kernel3.3.20170822\n");
+ U2_Slew_Rate_Calibration();
+ }else if(isEN751627){
+ ret = u3phy_config_751627();
+ printk(KERN_ERR "USB driver version: 751627.kernel3.1.20170822\n");
+ U2_Slew_Rate_Calibration();
+ }else{
+ printk(KERN_ERR "**Unknown chip ID for USB driver**\n");
+ }
+
+
+ return ret;
+}
+
+PHY_INT32 u3phy_init(){
+#ifndef CONFIG_PROJECT_PHY
+ PHY_INT32 u3phy_version;
+#endif
+
+ if (u3phy != NULL)
+ return PHY_TRUE;
+
+ u3phy = kmalloc(sizeof(struct u3phy_info), GFP_NOIO);
+ if (u3phy == NULL)
+ return PHY_FALSE;
+
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ u3phy_p1 = kmalloc(sizeof(struct u3phy_info), GFP_NOIO);
+ if (u3phy_p1 == NULL)
+ return PHY_FALSE;
+#endif
+#ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+ u3phy->phyd_version_addr = 0x2000e4;
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ u3phy_p1->phyd_version_addr = 0x2000e4;
+#endif
+#else
+#if defined (CONFIG_RALINK_MT7628)
+ u3phy->phyd_version_addr = U2_PHY_BASE + 0xf0;
+ printk("******MT7628 mtk phy\n");
+#else
+ u3phy->phyd_version_addr = U3_PHYD_B2_BASE + 0xe4;
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ u3phy_p1->phyd_version_addr = U3_PHYD_B2_BASE_P1 + 0xe4;
+#endif
+#endif
+#endif
+
+#ifdef CONFIG_PROJECT_PHY
+ printk("*****run project phy.\n");
+ u3phy->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE;
+#if !defined (CONFIG_RALINK_MT7628)
+ u3phy->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE;
+ u3phy->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE;
+ u3phy->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE;
+ u3phy->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE;
+ u3phy->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE;
+#endif
+ u3phy->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE;
+ u3phy_ops = &project_operators;
+
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ u3phy_p1->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE_P1;
+ u3phy_p1->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE_P1;
+ u3phy_p1->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE_P1;
+ u3phy_p1->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE_P1;
+ u3phy_p1->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE_P1;
+ u3phy_p1->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE;
+ u3phy_p1->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE;
+#endif
+#else
+
+ //parse phy version
+ u3phy_version = U3PhyReadReg32(u3phy->phyd_version_addr);
+ printk(KERN_ERR "phy version: %x\n", u3phy_version);
+ u3phy->phy_version = u3phy_version;
+
+ if(u3phy_version == 0xc60802a){
+ #ifdef CONFIG_C60802_SUPPORT
+ #ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+ u3phy->u2phy_regs_c = 0x0;
+ u3phy->u3phyd_regs_c = 0x100000;
+ u3phy->u3phyd_bank2_regs_c = 0x200000;
+ u3phy->u3phya_regs_c = 0x300000;
+ u3phy->u3phya_da_regs_c = 0x400000;
+ u3phy->sifslv_chip_regs_c = 0x500000;
+ u3phy->sifslv_fm_regs_c = 0xf00000;
+ #else
+ u3phy->u2phy_regs_c = U2_PHY_BASE;
+ u3phy->u3phyd_regs_c = U3_PHYD_BASE;
+ u3phy->u3phyd_bank2_regs_c = U3_PHYD_B2_BASE;
+ u3phy->u3phya_regs_c = U3_PHYA_BASE;
+ u3phy->u3phya_da_regs_c = U3_PHYA_DA_BASE;
+ u3phy->sifslv_chip_regs_c = SIFSLV_CHIP_BASE;
+ u3phy->sifslv_fm_regs_c = SIFSLV_FM_FEG_BASE;
+ #endif
+ u3phy_ops = &c60802_operators;
+ #endif
+ }
+ else if(u3phy_version == 0xd60802a){
+ #ifdef CONFIG_D60802_SUPPORT
+ #ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+ u3phy->u2phy_regs_d = 0x0;
+ u3phy->u3phyd_regs_d = 0x100000;
+ u3phy->u3phyd_bank2_regs_d = 0x200000;
+ u3phy->u3phya_regs_d = 0x300000;
+ u3phy->u3phya_da_regs_d = 0x400000;
+ u3phy->sifslv_chip_regs_d = 0x500000;
+ u3phy->sifslv_fm_regs_d = 0xf00000;
+ #else
+ u3phy->u2phy_regs_d = U2_PHY_BASE;
+ u3phy->u3phyd_regs_d = U3_PHYD_BASE;
+ u3phy->u3phyd_bank2_regs_d = U3_PHYD_B2_BASE;
+ u3phy->u3phya_regs_d = U3_PHYA_BASE;
+ u3phy->u3phya_da_regs_d = U3_PHYA_DA_BASE;
+ u3phy->sifslv_chip_regs_d = SIFSLV_CHIP_BASE;
+ u3phy->sifslv_fm_regs_d = SIFSLV_FM_FEG_BASE;
+ #endif
+ u3phy_ops = &d60802_operators;
+ #endif
+ }
+ else if(u3phy_version == 0xe60802a){
+ #ifdef CONFIG_E60802_SUPPORT
+ #endif
+ }
+ else{
+ printk(KERN_ERR "No match phy version\n");
+ return PHY_FALSE;
+ }
+
+#endif
+
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){
+ PHY_INT8 cur_value;
+ PHY_INT8 new_value;
+
+ cur_value = U3PhyReadReg8(addr);
+ new_value = (cur_value & (~mask)) | (value << offset);
+ //udelay(i2cdelayus);
+ U3PhyWriteReg8(addr, new_value);
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){
+ PHY_INT32 cur_value;
+ PHY_INT32 new_value;
+
+ cur_value = U3PhyReadReg32(addr);
+ new_value = (cur_value & (~mask)) | ((value << offset) & mask);
+ U3PhyWriteReg32(addr, new_value);
+ //DRV_MDELAY(100);
+
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyReadField8(PHY_INT32 addr,PHY_INT32 offset,PHY_INT32 mask){
+
+ return ((U3PhyReadReg8(addr) & mask) >> offset);
+}
+
+PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask){
+
+ return ((U3PhyReadReg32(addr) & mask) >> offset);
+}
+
Index: linux-3.18.21/drivers/usb/host/mtk-phy.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk-phy.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,241 @@
+#ifndef __MTK_PHY_NEW_H
+#define __MTK_PHY_NEW_H
+
+//#define CONFIG_U3D_HAL_SUPPORT
+
+/* include system library */
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+/* Choose PHY R/W implementation */
+//#define CONFIG_U3_PHY_GPIO_SUPPORT //SW I2C implemented by GPIO
+//#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC
+
+/* Choose PHY version */
+//Select your project by defining one of the followings
+#define CONFIG_PROJECT_7512 //7621
+#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC
+#if defined (CONFIG_RALINK_MT7621)
+#define CONFIG_PROJECT_7621 //7621
+#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC
+#elif defined (CONFIG_RALINK_MT7628)
+#define CONFIG_PROJECT_7628 //7628
+#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC
+#else
+//#define CONFIG_PROJECT_7662 //7662, 7603
+//#define CONFIG_PROJECT_5399 //5399
+//#define CONFIG_U3_PHY_GPIO_SUPPORT //SW I2C implemented by GPIO
+#endif
+
+#ifndef CONFIG_PROJECT_PHY
+//These are for FPGA. All test chip PHY codes can be compiled at the same time
+//#define CONFIG_C60802_SUPPORT //T40 test chip
+//#define CONFIG_D60802_SUPPORT //T28 test chip
+//#define CONFIG_E60802_SUPPORT //T28HPM test chip
+#endif
+
+/* BASE ADDRESS DEFINE, should define this on ASIC */
+#if defined (CONFIG_PROJECT_7512)
+#define PHY_BASE 0xBFA80000
+#define SIFSLV_FM_FEG_BASE (PHY_BASE+0x100)
+#define SIFSLV_CHIP_BASE (PHY_BASE+0x700)
+#define U2_PHY_BASE (PHY_BASE+0x800)
+#define U3_PHYD_BASE (PHY_BASE+0x900)
+#define U3_PHYD_B2_BASE (PHY_BASE+0xa00)
+#define U3_PHYA_BASE (PHY_BASE+0xb00)
+#define U3_PHYA_DA_BASE (PHY_BASE+0xc00)
+/*EN7512 constant used for u2 slew rate calibration YMC add*/
+#define U2_SR_COEF 28
+#define REF_CK 20
+#define U2_port_num 2
+
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+#define SIFSLV_FM_FEG_BASE_P1 (PHY_BASE+0x100)
+#define SIFSLV_CHIP_BASE_P1 (PHY_BASE+0x700)
+#define U2_PHY_BASE_P1 (PHY_BASE+0x1000)
+#define U3_PHYD_BASE_P1 (PHY_BASE+0x1100)
+#define U3_PHYD_B2_BASE_P1 (PHY_BASE+0x1200)
+#define U3_PHYA_BASE_P1 (PHY_BASE+0x1300)
+#define U3_PHYA_DA_BASE_P1 (PHY_BASE+0x1400)
+#endif
+
+#endif
+/*
+
+0x00000100 MODULE ssusb_sifslv_fmreg ssusb_sifslv_fmreg
+0x00000700 MODULE ssusb_sifslv_ippc ssusb_sifslv_ippc
+0x00000800 MODULE ssusb_sifslv_u2phy_com ssusb_sifslv_u2_phy_com_T28
+0x00000900 MODULE ssusb_sifslv_u3phyd ssusb_sifslv_u3phyd_T28
+0x00000a00 MODULE ssusb_sifslv_u3phyd_bank2 ssusb_sifslv_u3phyd_bank2_T28
+0x00000b00 MODULE ssusb_sifslv_u3phya ssusb_sifslv_u3phya_T28
+0x00000c00 MODULE ssusb_sifslv_u3phya_da ssusb_sifslv_u3phya_da_T28
+*/
+
+
+/* TYPE DEFINE */
+typedef unsigned int PHY_UINT32;
+typedef int PHY_INT32;
+typedef unsigned short PHY_UINT16;
+typedef short PHY_INT16;
+typedef unsigned char PHY_UINT8;
+typedef char PHY_INT8;
+
+typedef PHY_UINT32 __bitwise PHY_LE32;
+
+/* CONSTANT DEFINE */
+#define PHY_FALSE 0
+#define PHY_TRUE 1
+
+#define RET_SUCCESS 0
+#define RET_FAIL 1
+
+/* MACRO DEFINE */
+#define DRV_WriteReg32(addr,data) ((*(volatile PHY_UINT32 *)(addr)) = (unsigned long)(data))
+#define DRV_Reg32(addr) (*(volatile PHY_UINT32 *)(addr))
+
+#define DRV_MDELAY mdelay
+#define DRV_MSLEEP msleep
+#define DRV_UDELAY udelay
+#define DRV_USLEEP usleep
+
+/* PHY FUNCTION DEFINE, implemented in platform files, ex. ahb, gpio */
+PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data);
+PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr);
+PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data);
+PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr);
+
+/* PHY GENERAL USAGE FUNC, implemented in mtk-phy.c */
+PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value);
+PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value);
+PHY_INT32 U3PhyReadField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask);
+PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask);
+
+struct u3phy_info {
+ PHY_INT32 phy_version;
+ PHY_INT32 phyd_version_addr;
+
+#ifdef CONFIG_PROJECT_PHY
+ struct u2phy_reg *u2phy_regs;
+ struct u3phya_reg *u3phya_regs;
+ struct u3phya_da_reg *u3phya_da_regs;
+ struct u3phyd_reg *u3phyd_regs;
+ struct u3phyd_bank2_reg *u3phyd_bank2_regs;
+ struct sifslv_chip_reg *sifslv_chip_regs;
+ struct sifslv_fm_feg *sifslv_fm_regs;
+
+ #else
+
+#ifdef CONFIG_C60802_SUPPORT
+ //c60802 regs reference
+ struct u2phy_reg_c *u2phy_regs_c;
+ struct u3phya_reg_c *u3phya_regs_c;
+ struct u3phya_da_reg_c *u3phya_da_regs_c;
+ struct u3phyd_reg_c *u3phyd_regs_c;
+ struct u3phyd_bank2_reg_c *u3phyd_bank2_regs_c;
+ struct sifslv_chip_reg_c *sifslv_chip_regs_c;
+ struct sifslv_fm_feg_c *sifslv_fm_regs_c;
+#endif
+#ifdef CONFIG_D60802_SUPPORT
+ //d60802 regs reference
+ struct u2phy_reg_d *u2phy_regs_d;
+ struct u3phya_reg_d *u3phya_regs_d;
+ struct u3phya_da_reg_d *u3phya_da_regs_d;
+ struct u3phyd_reg_d *u3phyd_regs_d;
+ struct u3phyd_bank2_reg_d *u3phyd_bank2_regs_d;
+ struct sifslv_chip_reg_d *sifslv_chip_regs_d;
+ struct sifslv_fm_feg_d *sifslv_fm_regs_d;
+#endif
+#ifdef CONFIG_E60802_SUPPORT
+ //e60802 regs reference
+ struct u2phy_reg_e *u2phy_regs_e;
+ struct u3phya_reg_e *u3phya_regs_e;
+ struct u3phya_da_reg_e *u3phya_da_regs_e;
+ struct u3phyd_reg_e *u3phyd_regs_e;
+ struct u3phyd_bank2_reg_e *u3phyd_bank2_regs_e;
+ struct sifslv_chip_reg_e *sifslv_chip_regs_e;
+ struct sifslv_fm_feg_e *sifslv_fm_regs_e;
+#endif
+
+ #endif
+};
+
+struct u3phy_operator {
+ PHY_INT32 (*init) (struct u3phy_info *info);
+ PHY_INT32 (*change_pipe_phase) (struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase);
+ PHY_INT32 (*eyescan_init) (struct u3phy_info *info);
+ PHY_INT32 (*eyescan) (struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y, PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt);
+ PHY_INT32 (*u2_save_current_entry) (struct u3phy_info *info);
+ PHY_INT32 (*u2_save_current_recovery) (struct u3phy_info *info);
+ PHY_INT32 (*u2_slew_rate_calibration) (struct u3phy_info *info);
+};
+
+#ifdef U3_PHY_LIB
+#define AUTOEXT
+#else
+#define AUTOEXT extern
+#endif
+
+AUTOEXT struct u3phy_info *u3phy;
+AUTOEXT struct u3phy_info *u3phy_p1;
+AUTOEXT struct u3phy_operator *u3phy_ops;
+
+/*********eye scan required*********/
+
+#define LO_BYTE(x) ((PHY_UINT8)((x) & 0xFF))
+#define HI_BYTE(x) ((PHY_UINT8)(((x) & 0xFF00) >> 8))
+
+typedef enum
+{
+ SCAN_UP,
+ SCAN_DN
+} enumScanDir;
+
+struct strucScanRegion
+{
+ PHY_INT8 bX_tl;
+ PHY_INT8 bY_tl;
+ PHY_INT8 bX_br;
+ PHY_INT8 bY_br;
+ PHY_INT8 bDeltaX;
+ PHY_INT8 bDeltaY;
+};
+
+struct strucTestCycle
+{
+ PHY_UINT16 wEyeCnt;
+ PHY_INT8 bNumOfEyeCnt;
+ PHY_INT8 bPICalEn;
+ PHY_INT8 bNumOfIgnoreCnt;
+};
+
+#define ERRCNT_MAX 128
+#define CYCLE_COUNT_MAX 15
+
+/// the map resolution is 128 x 128 pts
+#define MAX_X 127
+#define MAX_Y 127
+#define MIN_X 0
+#define MIN_Y 0
+
+PHY_INT32 u3phy_init(void);
+PHY_INT32 mt7628_phy_init(void);
+
+AUTOEXT struct strucScanRegion _rEye1;
+AUTOEXT struct strucScanRegion _rEye2;
+AUTOEXT struct strucTestCycle _rTestCycle;
+AUTOEXT PHY_UINT8 _bXcurr;
+AUTOEXT PHY_UINT8 _bYcurr;
+AUTOEXT enumScanDir _eScanDir;
+AUTOEXT PHY_INT8 _fgXChged;
+AUTOEXT PHY_INT8 _bPIResult;
+/* use local variable instead to save memory use */
+#if 0
+AUTOEXT PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+AUTOEXT PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+#endif
+
+/***********************************/
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/Kconfig
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/Kconfig 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,27 @@
+#
+# USB Host Controller Drivers
+#
+comment "USB Host Controller Drivers"
+ depends on USB
+
+config USB_MTK_XHCI_HCD_TEST_DRV
+ tristate "MTK xHCI HCD (USB 3.0) Test Driver"
+ depends on USB
+
+config USB_XHCI_HCD_DEBUGGING
+ bool "Debugging for the xHCI host controller"
+ depends on USB_MTK_XHCI_HCD_TEST_DRV
+ ---help---
+ Say 'Y' to turn on debugging for the xHCI host controller driver.
+ This will spew debugging output, even in interrupt context.
+ This should only be used for debugging xHCI driver bugs.
+
+ If unsure, say N.
+
+config U3_PHY_GPIO_SUPPORT
+ depends on USB_MTK_XHCI_HCD_TEST_DRV
+ bool "U3 PHY GPIO access registers support"
+
+config A60810_SUPPORT
+ depends on USB_MTK_XHCI_HCD_TEST_DRV
+ bool "U3 PHY A60810 support"
Index: linux-3.18.21/drivers/usb/host/mtk_test/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/Makefile 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,15 @@
+#
+
+# Makefile for USB Host Controller Drivers
+
+#
+
+ifeq ($(CONFIG_USB_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+EXTRA_CFLAGS += -Wno-error=strict-prototypes
+
+xhci-test-objs := xhci.o xhci-mem.o xhci-platform.o xhci-ring.o xhci-hub.o xhci-dbg.o mtk-protocol.o mtk-test.o mtk-test-lib.o mtk-usb-hcd.o xhci-mtk-scheduler.o xhci-mtk-power.o mtk-phy.o mtk-phy-ahb.o mtk-phy-a60810.o mtk-phy-gpio.o
+
+obj-$(CONFIG_USB_XHCI_HCD) += xhci-test.o
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-a60810.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-a60810.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,544 @@
+#include "mtk-phy.h"
+
+#ifdef CONFIG_A60810_SUPPORT
+#include "mtk-phy-a60810.h"
+
+PHY_INT32 phy_init_a60810(struct u3phy_info *info){
+
+ /* for U2 hs eye diagram */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr1)
+ ,A60810_RG_USB20_TERM_VREF_SEL_OFST, A60810_RG_USB20_TERM_VREF_SEL, 0x05);
+
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr1)
+ ,A60810_RG_USB20_VRT_VREF_SEL_OFST, A60810_RG_USB20_VRT_VREF_SEL, 0x05);
+
+ /* for U2 sensitivity */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr6)
+ ,A60810_RG_USB20_SQTH_OFST, A60810_RG_USB20_SQTH, 0x04);
+
+ /* disable ssusb_p3_entry to work aroud resume form P3 bug */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phyd_lfps0)
+ ,A60810_RG_SSUSB_P3_ENTRY_OFST, A60810_RG_SSUSB_P3_ENTRY, 0x0);
+
+ /* force disable ssusb_p3_entry to work aroud resume form P3 bug */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phyd_lfps0)
+ ,A60810_RG_SSUSB_P3_ENTRY_SEL_OFST, A60810_RG_SSUSB_P3_ENTRY_SEL, 0x01);
+
+ /* fine tune SSC delta1 to let SSC min average ~0ppm */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg19)
+ ,A60810_RG_SSUSB_PLL_SSC_DELTA1_U3_OFST, A60810_RG_SSUSB_PLL_SSC_DELTA1_U3
+ , 0x46);
+ /* fine tune SSC delta to let SSC min average ~0ppm */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg21)
+ ,A60810_RG_SSUSB_PLL_SSC_DELTA_U3_OFST, A60810_RG_SSUSB_PLL_SSC_DELTA_U3
+ , 0x40);
+ /* Fine tune SYSPLL to improve phase noise */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg4)
+ ,A60810_RG_SSUSB_PLL_BC_U3_OFST, A60810_RG_SSUSB_PLL_BC_U3, 0x03);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg4)
+ , A60810_RG_SSUSB_PLL_DIVEN_U3_OFST, A60810_RG_SSUSB_PLL_DIVEN_U3, 0x03);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg5)
+ , A60810_RG_SSUSB_PLL_IC_U3_OFST, A60810_RG_SSUSB_PLL_IC_U3, 0x01);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg5)
+ , A60810_RG_SSUSB_PLL_BR_U3_OFST,A60810_RG_SSUSB_PLL_BR_U3, 0x01);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg6)
+ , A60810_RG_SSUSB_PLL_IR_U3_OFST, A60810_RG_SSUSB_PLL_IR_U3, 0x01);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_da_regs_a60810->reg7)
+ , A60810_RG_SSUSB_PLL_BP_U3_OFST, A60810_RG_SSUSB_PLL_BP_U3, 0x0f);
+
+ /* force xtal pwd mode enable */
+ U3PhyWriteField32(((PHY_UINT32)&info->spllc_regs_a60810->u3d_xtalctl_2)
+ ,A60810_RG_SSUSB_FORCE_XTAL_PWD_OFST, A60810_RG_SSUSB_FORCE_XTAL_PWD, 0x01);
+ /* force xtal bias mode enable */
+ U3PhyWriteField32(((PHY_UINT32)&info->spllc_regs_a60810->u3d_xtalctl_2)
+ ,A60810_RG_SSUSB_FORCE_BIAS_PWD_OFST, A60810_RG_SSUSB_FORCE_BIAS_PWD, 0x01);
+ /* force xtal pwd mode enable */
+ U3PhyWriteField32(((PHY_UINT32)&info->spllc_regs_a60810->u3d_xtalctl_2)
+ ,A60810_RG_SSUSB_XTAL_PWD_OFST, A60810_RG_SSUSB_XTAL_PWD, 0x00);
+ /* force xtal pwd mode enable */
+ U3PhyWriteField32(((PHY_UINT32)&info->spllc_regs_a60810->u3d_xtalctl_2)
+ ,A60810_RG_SSUSB_BIAS_PWD_OFST, A60810_RG_SSUSB_BIAS_PWD, 0x00);
+ /* for better LPM BESL value */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->u2phydcr1)
+ , A60810_RG_USB20_SW_PLLMODE_OFST+1, A60810_RG_USB20_SW_PLLMODE, 0x1);
+
+ return PHY_TRUE;
+}
+
+#define PHY_DRV_SHIFT 3
+#define PHY_PHASE_SHIFT 3
+#define PHY_PHASE_DRV_SHIFT 1
+PHY_INT32 phy_change_pipe_phase_a60810(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase){
+ PHY_INT32 drv_reg_value;
+ PHY_INT32 phase_reg_value;
+ PHY_INT32 temp;
+
+ drv_reg_value = phy_drv << PHY_DRV_SHIFT;
+ phase_reg_value = (pipe_phase << PHY_PHASE_SHIFT) | (phy_drv << PHY_PHASE_DRV_SHIFT);
+ temp = U3PhyReadReg8(((PHY_UINT32)&info->sifslv_chip_regs_a60810->gpio_ctla)+2);
+ temp &= ~(0x3 << PHY_DRV_SHIFT);
+ temp |= drv_reg_value;
+ U3PhyWriteReg8(((PHY_UINT32)&info->sifslv_chip_regs_a60810->gpio_ctla)+2, temp);
+ temp = U3PhyReadReg8(((PHY_UINT32)&info->sifslv_chip_regs_a60810->gpio_ctla)+3);
+ temp &= ~((0x3 << PHY_PHASE_DRV_SHIFT) | (0x1f << PHY_PHASE_SHIFT));
+ temp |= phase_reg_value;
+ U3PhyWriteReg8(((PHY_UINT32)&info->sifslv_chip_regs_a60810->gpio_ctla)+3, temp);
+ return PHY_TRUE;
+}
+
+/* --------------------------------------------------------
+ * Function : fgEyeScanHelper_CheckPtInRegion()
+ * Description : Check if the test point is in a rectangle region.
+ * If it is in the rectangle, also check if this point
+ * is on the multiple of deltaX and deltaY.
+ * Parameter : strucScanRegion * prEye - the region
+ * BYTE bX
+ * BYTE bY
+ * Return : BYTE - TRUE : This point needs to be tested
+ * FALSE: This point will be omitted
+ * Note : First check within the rectangle.
+ * Secondly, use modulous to check if the point will be tested.
+ * --------------------------------------------------------
+ */
+static PHY_INT8 fgEyeScanHelper_CheckPtInRegion(struct strucScanRegion * prEye, PHY_INT8 bX, PHY_INT8 bY)
+{
+ PHY_INT8 fgValid = true;
+
+ /*
+ * Be careful, the axis origin is on the TOP-LEFT corner.
+ * Therefore the top-left point has the minimum X and Y
+ * Botton-right point is the maximum X and Y
+ */
+ if ( (prEye->bX_tl <= bX) && (bX <= prEye->bX_br)
+ && (prEye->bY_tl <= bY) && (bY <= prEye->bX_br))
+ {
+ /*
+ * With the region, now check whether or not the input test point is
+ * on the multiples of X and Y
+ * Do not have to worry about negative value, because we have already
+ * check the input bX, and bY is within the region.
+ */
+ if ( ((bX - prEye->bX_tl) % (prEye->bDeltaX))
+ || ((bY - prEye->bY_tl) % (prEye->bDeltaY)) )
+ {
+ /*
+ * if the division will have remainder, that means
+ * the input test point is on the multiples of X and Y
+ */
+ fgValid = false;
+ }
+ else
+ {
+ }
+ }
+ else
+ {
+
+ fgValid = false;
+ }
+ return fgValid;
+}
+/*
+ * --------------------------------------------------------
+ * Function : EyeScanHelper_RunTest()
+ * Description : Enable the test, and wait til it is completed
+ * Parameter : None
+ * Return : None
+ * Note : None
+ * --------------------------------------------------------
+ */
+static void EyeScanHelper_RunTest(struct u3phy_info *info)
+{
+ /*
+ * Disable the test
+ * RG_SSUSB_RX_EYE_CNT_EN = 0
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_CNT_EN_OFST, A60810_RG_SSUSB_EQ_EYE_CNT_EN, 0);
+
+ /*
+ * Run the test
+ * RG_SSUSB_RX_EYE_CNT_EN = 1
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_CNT_EN_OFST, A60810_RG_SSUSB_EQ_EYE_CNT_EN, 1);
+
+ /*
+ * Wait til it's done
+ * RGS_SSUSB_RX_EYE_CNT_RDY
+ */
+ while(!U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phya_rx_mon5)
+ , A60810_RGS_SSUSB_EQ_EYE_CNT_RDY_OFST, A60810_RGS_SSUSB_EQ_EYE_CNT_RDY));
+}
+
+/*
+ * --------------------------------------------------------
+ * Function : fgEyeScanHelper_CalNextPoint()
+ * Description : Calcualte the test point for the measurement
+ * Parameter : None
+ * Return : BOOL - TRUE : the next point is within the
+ * boundaryof HW limit
+ * FALSE: the next point is out of the HW limit
+ * Note : The next point is obtained by calculating
+ * from the bottom left of the region rectangle
+ * and then scanning up until it reaches the upper
+ * limit. At this time, the x will increment, and
+ * start scanning downwards until the y hits the
+ * zero.
+ * --------------------------------------------------------
+ */
+static PHY_INT8 fgEyeScanHelper_CalNextPoint(void)
+{
+ if ( ((_bYcurr == MAX_Y) && (_eScanDir == SCAN_DN))
+ || ((_bYcurr == MIN_Y) && (_eScanDir == SCAN_UP))
+ )
+ {
+ /*
+ * Reaches the limit of Y axis
+ * Increment X
+ */
+ _bXcurr++;
+ _fgXChged = true;
+ _eScanDir = (_eScanDir == SCAN_UP) ? SCAN_DN : SCAN_UP;
+
+ if (_bXcurr > MAX_X)
+ {
+ return false;
+ }
+ }
+ else
+ {
+ _bYcurr = (_eScanDir == SCAN_DN) ? _bYcurr + 1 : _bYcurr - 1;
+ _fgXChged = false;
+ }
+ return PHY_TRUE;
+}
+
+PHY_INT32 eyescan_init_a60810 (struct u3phy_info *info){
+ /* initial PHY setting */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_regs_a60810->reg9)
+ , A60810_RG_SSUSB_CDR_EPEN_OFST, A60810_RG_SSUSB_CDR_EPEN, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phyd_mix3)
+ , A60810_RG_SSUSB_FORCE_CDR_PI_PWD_OFST, A60810_RG_SSUSB_FORCE_CDR_PI_PWD, 1);
+
+ return PHY_TRUE;
+}
+
+PHY_INT32 phy_eyescan_a60810(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y
+ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt){
+ PHY_INT32 cOfst = 0;
+ PHY_UINT8 bIdxX = 0;
+ PHY_UINT8 bIdxY = 0;
+ PHY_UINT8 bIdxCycCnt = 0;
+ PHY_INT8 fgValid;
+ PHY_INT8 cX;
+ PHY_INT8 cY;
+ PHY_UINT8 bExtendCnt;
+ PHY_INT8 isContinue;
+ PHY_UINT32 wErr0 = 0, wErr1 = 0;
+
+ _rEye1.bX_tl = x_t1;
+ _rEye1.bY_tl = y_t1;
+ _rEye1.bX_br = x_br;
+ _rEye1.bY_br = y_br;
+ _rEye1.bDeltaX = delta_x;
+ _rEye1.bDeltaY = delta_y;
+
+ _rEye2.bX_tl = x_t1;
+ _rEye2.bY_tl = y_t1;
+ _rEye2.bX_br = x_br;
+ _rEye2.bY_br = y_br;
+ _rEye2.bDeltaX = delta_x;
+ _rEye2.bDeltaY = delta_y;
+
+ _rTestCycle.wEyeCnt = eye_cnt;
+ _rTestCycle.bNumOfEyeCnt = num_cnt;
+ _rTestCycle.bNumOfIgnoreCnt = num_ignore_cnt;
+ _rTestCycle.bPICalEn = PI_cal_en;
+
+ _bXcurr = 0;
+ _bYcurr = 0;
+ _eScanDir = SCAN_DN;
+ _fgXChged = false;
+
+ printk("x_t1: %x, y_t1: %x, x_br: %x, y_br: %x, delta_x: %x, delta_y: %x, \
+ eye_cnt: %x, num_cnt: %x, PI_cal_en: %x, num_ignore_cnt: %x\n", \
+ x_t1, y_t1, x_br, y_br, delta_x, delta_y, eye_cnt, num_cnt, PI_cal_en, num_ignore_cnt);
+
+ /* force SIGDET to OFF */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_SIGDET_EN_SEL_OFST, A60810_RG_SSUSB_RX_SIGDET_EN_SEL, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_SIGDET_EN_OFST, A60810_RG_SSUSB_RX_SIGDET_EN, 0);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye1)
+ , A60810_RG_SSUSB_EQ_SIGDET_OFST, A60810_RG_SSUSB_EQ_SIGDET, 0);
+
+ /* RX_TRI_DET_EN to Disable */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq3)
+ , A60810_RG_SSUSB_EQ_TRI_DET_EN_OFST, A60810_RG_SSUSB_EQ_TRI_DET_EN, 0);
+
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_MON_EN_OFST, A60810_RG_SSUSB_EQ_EYE_MON_EN, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET, 0);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE0_Y_OFST, A60810_RG_SSUSB_EQ_EYE0_Y, 0);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE1_Y_OFST, A60810_RG_SSUSB_EQ_EYE1_Y, 0);
+
+
+ if (PI_cal_en){
+ /* PI Calibration */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, A60810_RG_SSUSB_RX_PI_CAL_EN_SEL, 1);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_PI_CAL_EN_OFST, A60810_RG_SSUSB_RX_PI_CAL_EN, 0);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_PI_CAL_EN_OFST, A60810_RG_SSUSB_RX_PI_CAL_EN, 1);
+
+ DRV_UDELAY(20);
+
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs_a60810->b2_phyd_misc0)
+ , A60810_RG_SSUSB_RX_PI_CAL_EN_OFST, A60810_RG_SSUSB_RX_PI_CAL_EN, 0);
+
+ _bPIResult = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phya_rx_mon5)
+ , A60810_RGS_SSUSB_EQ_PILPO_OFST, A60810_RGS_SSUSB_EQ_PILPO);
+
+ printk(KERN_ERR "PI result: %d\n", _bPIResult);
+ }
+ /*
+ * Read Initial DAC
+ * Set CYCLE
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye3)
+ ,A60810_RG_SSUSB_EQ_EYE_CNT_OFST, A60810_RG_SSUSB_EQ_EYE_CNT, eye_cnt);
+ /* Eye Monitor Feature */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye1)
+ , A60810_RG_SSUSB_EQ_EYE_MASK_OFST, A60810_RG_SSUSB_EQ_EYE_MASK, 0x3ff);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_MON_EN_OFST, A60810_RG_SSUSB_EQ_EYE_MON_EN, 1);
+
+ /* Move X,Y to the top-left corner */
+ for (cOfst = 0; cOfst >= -64; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ ,A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET, cOfst);
+ }
+ for (cOfst = 0; cOfst < 64; cOfst++)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE0_Y_OFST, A60810_RG_SSUSB_EQ_EYE0_Y, cOfst);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE1_Y_OFST, A60810_RG_SSUSB_EQ_EYE1_Y, cOfst);
+ }
+ /* ClearErrorResult */
+ for(bIdxCycCnt = 0; bIdxCycCnt < CYCLE_COUNT_MAX; bIdxCycCnt++){
+ for(bIdxX = 0; bIdxX < ERRCNT_MAX; bIdxX++)
+ {
+ for(bIdxY = 0; bIdxY < ERRCNT_MAX; bIdxY++){
+ pwErrCnt0[bIdxCycCnt][bIdxX][bIdxY] = 0;
+ pwErrCnt1[bIdxCycCnt][bIdxX][bIdxY] = 0;
+ }
+ }
+ }
+ isContinue = true;
+ while(isContinue){
+ printk(KERN_ERR "_bXcurr: %d, _bYcurr: %d\n", _bXcurr, _bYcurr);
+ /*
+ * The point is within the boundary, then let's check if it is within
+ * the testing region.
+ * The point is only test-able if one of the eye region
+ * includes this point.
+ */
+ fgValid = fgEyeScanHelper_CheckPtInRegion(&_rEye1, _bXcurr, _bYcurr)
+ || fgEyeScanHelper_CheckPtInRegion(&_rEye2, _bXcurr, _bYcurr);
+ /*
+ * Translate bX and bY to 2's complement from where the origin was on the
+ * top left corner.
+ * 0x40 and 0x3F needs a bit of thinking!!!! >"<
+ */
+ cX = (_bXcurr ^ 0x40);
+ cY = (_bYcurr ^ 0x3F);
+
+ /* Set X if necessary */
+ if (_fgXChged == true)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET, cX);
+ }
+ /* Set Y */
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE0_Y_OFST, A60810_RG_SSUSB_EQ_EYE0_Y, cY);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE1_Y_OFST, A60810_RG_SSUSB_EQ_EYE1_Y, cY);
+
+ /* Test this point! */
+ if (fgValid){
+ for (bExtendCnt = 0; bExtendCnt < num_ignore_cnt; bExtendCnt++)
+ {
+ /* run test */
+ EyeScanHelper_RunTest(info);
+ }
+ for (bExtendCnt = 0; bExtendCnt < num_cnt; bExtendCnt++)
+ {
+ EyeScanHelper_RunTest(info);
+ wErr0 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phya_rx_mon3)
+ , A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST, A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0);
+ wErr1 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->phya_rx_mon4)
+ , A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST, A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1);
+
+ pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr] = wErr0;
+ pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr] = wErr1;
+
+ }
+ }
+ else{
+
+ }
+ if (fgEyeScanHelper_CalNextPoint() == false){
+ printk(KERN_ERR "Xcurr [0x%x] Ycurr [0x%x]\n", _bXcurr, _bYcurr);
+ printk(KERN_ERR "XcurrREG [0x%x] YcurrREG [0x%x]\n", cX, cY);
+ printk(KERN_ERR "end of eye scan\n");
+ isContinue = false;
+ }
+ }
+ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n"
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0), A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET)
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0), A60810_RG_SSUSB_EQ_EYE0_Y_OFST, A60810_RG_SSUSB_EQ_EYE0_Y));
+
+ /* Move X,Y to the top-left corner */
+ for (cOfst = 63; cOfst >= 0; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET, cOfst);
+ }
+ for (cOfst = 63; cOfst >= 0; cOfst--)
+ {
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE0_Y_OFST, A60810_RG_SSUSB_EQ_EYE0_Y, cOfst);
+ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0)
+ , A60810_RG_SSUSB_EQ_EYE1_Y_OFST, A60810_RG_SSUSB_EQ_EYE1_Y, cOfst);
+
+ }
+ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n"
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0), A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST, A60810_RG_SSUSB_EQ_EYE_XOFFSET)
+ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs_a60810->eq_eye0), A60810_RG_SSUSB_EQ_EYE0_Y_OFST,A60810_RG_SSUSB_EQ_EYE0_Y));
+
+ printk(KERN_ERR "PI result: %d\n", _bPIResult);
+ printk(KERN_ERR "pwErrCnt0 addr: 0x%x\n", (PHY_UINT32)pwErrCnt0);
+ printk(KERN_ERR "pwErrCnt1 addr: 0x%x\n", (PHY_UINT32)pwErrCnt1);
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_connect_a60810(struct u3phy_info *info){
+ /* for better LPM BESL value */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->u2phydcr1)
+ , A60810_RG_USB20_SW_PLLMODE_OFST, A60810_RG_USB20_SW_PLLMODE, 0x1);
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_disconnect_a60810(struct u3phy_info *info){
+ /* for better LPM BESL value */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->u2phydcr1)
+ , A60810_RG_USB20_SW_PLLMODE_OFST, A60810_RG_USB20_SW_PLLMODE, 0x0);
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_save_cur_en_a60810(struct u3phy_info *info){
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_save_cur_re_a60810(struct u3phy_info *info){
+ return PHY_TRUE;
+}
+
+PHY_INT32 u2_slew_rate_calibration_a60810(struct u3phy_info *info){
+ PHY_INT32 i=0;
+ PHY_INT32 fgRet = 0;
+ PHY_INT32 u4FmOut = 0;
+ PHY_INT32 u4Tmp = 0;
+
+ /*
+ * => RG_USB20_HSTX_SRCAL_EN = 1
+ * enable HS TX SR calibration
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr5)
+ , A60810_RG_USB20_HSTX_SRCAL_EN_OFST, A60810_RG_USB20_HSTX_SRCAL_EN, 1);
+ DRV_MSLEEP(1);
+
+ /*
+ * => RG_FRCK_EN = 1
+ * Enable free run clock
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmmonr1)
+ , A60810_RG_FRCK_EN_OFST, A60810_RG_FRCK_EN, 0x1);
+ /*
+ * => RG_CYCLECNT = 0x400
+ * Setting cyclecnt = 0x400
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmcr0)
+ , A60810_RG_CYCLECNT_OFST, A60810_RG_CYCLECNT, 0x400);
+ /*
+ * => RG_FREQDET_EN = 1
+ * Enable frequency meter
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmcr0)
+ , A60810_RG_FREQDET_EN_OFST, A60810_RG_FREQDET_EN, 0x1);
+
+ /* wait for FM detection done, set 10ms timeout */
+ for(i=0; i<10; i++){
+ /*
+ * => u4FmOut = USB_FM_OUT
+ * read FM_OUT
+ */
+ u4FmOut = U3PhyReadReg32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmmonr0));
+ printk("FM_OUT value: u4FmOut = %d(0x%08X)\n", u4FmOut, u4FmOut);
+
+ /* check if FM detection done */
+ if (u4FmOut != 0)
+ {
+ fgRet = 0;
+ printk("FM detection done! loop = %d\n", i);
+
+ break;
+ }
+
+ fgRet = 1;
+ DRV_MSLEEP(1);
+ }
+ /*
+ * => RG_FREQDET_EN = 0
+ * disable frequency meter
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmcr0)
+ , A60810_RG_FREQDET_EN_OFST, A60810_RG_FREQDET_EN, 0);
+ /*
+ * => RG_FRCK_EN = 0
+ * disable free run clock
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs_a60810->fmmonr1)
+ , A60810_RG_FRCK_EN_OFST, A60810_RG_FRCK_EN, 0);
+ /*
+ * => RG_USB20_HSTX_SRCAL_EN = 0
+ * disable HS TX SR calibration
+ */
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr5)
+ , A60810_RG_USB20_HSTX_SRCAL_EN_OFST, A60810_RG_USB20_HSTX_SRCAL_EN, 0);
+ DRV_MSLEEP(1);
+
+ if(u4FmOut == 0){
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr5)
+ , A60810_RG_USB20_HSTX_SRCTRL_OFST, A60810_RG_USB20_HSTX_SRCTRL, 0x4);
+
+ fgRet = 1;
+ }
+ else{
+ u4Tmp = (((1024 * REF_CK * U2_SR_COEF_A60810) / u4FmOut) + 500) / 1000;
+ printk("SR calibration value u1SrCalVal = %d\n", (PHY_UINT8)u4Tmp);
+ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs_a60810->usbphyacr5)
+ , A60810_RG_USB20_HSTX_SRCTRL_OFST, A60810_RG_USB20_HSTX_SRCTRL, u4Tmp);
+ }
+
+ return fgRet;
+}
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-a60810.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-a60810.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,3121 @@
+#ifdef CONFIG_A60810_SUPPORT
+#ifndef __MTK_PHY_A60810_H
+#define __MTK_PHY_A60810_H
+
+#define U2_SR_COEF_A60810 26
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u2phy_reg_a60810 {
+ //0x0
+ PHY_LE32 usbphyacr0;
+ PHY_LE32 usbphyacr1;
+ PHY_LE32 usbphyacr2;
+ PHY_LE32 reserve0;
+ //0x10
+ PHY_LE32 usbphyacr4;
+ PHY_LE32 usbphyacr5;
+ PHY_LE32 usbphyacr6;
+ PHY_LE32 u2phyacr3;
+ //0x20
+ PHY_LE32 u2phyacr4;
+ PHY_LE32 u2phyamon0;
+ PHY_LE32 reserve1[2];
+ //0x30~0x50
+ PHY_LE32 reserve2[12];
+ //0x60
+ PHY_LE32 u2phydcr0;
+ PHY_LE32 u2phydcr1;
+ PHY_LE32 u2phydtm0;
+ PHY_LE32 u2phydtm1;
+ //0x70
+ PHY_LE32 u2phydmon0;
+ PHY_LE32 u2phydmon1;
+ PHY_LE32 u2phydmon2;
+ PHY_LE32 u2phydmon3;
+ //0x80
+ PHY_LE32 u2phybc12c;
+ PHY_LE32 u2phybc12c1;
+ PHY_LE32 reserve3[2];
+ //0x90~0xd0
+ PHY_LE32 reserve4[20];
+ //0xe0
+ PHY_LE32 regfppc;
+ PHY_LE32 reserve5[3];
+ //0xf0
+ PHY_LE32 versionc;
+ PHY_LE32 reserve6[2];
+ PHY_LE32 regfcom;
+};
+
+//U3D_USBPHYACR0
+#define A60810_RG_USB20_MPX_OUT_SEL (0x7<<28) //30:28
+#define A60810_RG_USB20_TX_PH_ROT_SEL (0x7<<24) //26:24
+#define A60810_RG_USB20_PLL_DIVEN (0x7<<20) //22:20
+#define A60810_RG_USB20_PLL_BR (0x1<<18) //18:18
+#define A60810_RG_USB20_PLL_BP (0x1<<17) //17:17
+#define A60810_RG_USB20_PLL_BLP (0x1<<16) //16:16
+#define A60810_RG_USB20_USBPLL_FORCE_ON (0x1<<15) //15:15
+#define A60810_RG_USB20_PLL_FBDIV (0x7f<<8) //14:8
+#define A60810_RG_USB20_PLL_PREDIV (0x3<<6) //7:6
+#define A60810_RG_USB20_INTR_EN (0x1<<5) //5:5
+#define A60810_RG_USB20_REF_EN (0x1<<4) //4:4
+#define A60810_RG_USB20_BGR_DIV (0x3<<2) //3:2
+#define A60810_RG_SIFSLV_CHP_EN (0x1<<1) //1:1
+#define A60810_RG_SIFSLV_BGR_EN (0x1<<0) //0:0
+
+//U3D_USBPHYACR1
+#define A60810_RG_USB20_INTR_CAL (0x1f<<19) //23:19
+#define A60810_RG_USB20_OTG_VBUSTH (0x7<<16) //18:16
+#define A60810_RG_USB20_VRT_VREF_SEL (0x7<<12) //14:12
+#define A60810_RG_USB20_TERM_VREF_SEL (0x7<<8) //10:8
+#define A60810_RG_USB20_MPX_SEL (0xff<<0) //7:0
+
+//U3D_USBPHYACR2
+#define A60810_RG_SIFSLV_MAC_BANDGAP_EN (0x1<<17) //17:17
+#define A60810_RG_SIFSLV_MAC_CHOPPER_EN (0x1<<16) //16:16
+#define A60810_RG_USB20_CLKREF_REV (0xffff<<0) //15:0
+
+//U3D_USBPHYACR4
+#define A60810_RG_USB20_DP_ABIST_SOURCE_EN (0x1<<31) //31:31
+#define A60810_RG_USB20_DP_ABIST_SELE (0xf<<24) //27:24
+#define A60810_RG_USB20_ICUSB_EN (0x1<<16) //16:16
+#define A60810_RG_USB20_LS_CR (0x7<<12) //14:12
+#define A60810_RG_USB20_FS_CR (0x7<<8) //10:8
+#define A60810_RG_USB20_LS_SR (0x7<<4) //6:4
+#define A60810_RG_USB20_FS_SR (0x7<<0) //2:0
+
+//U3D_USBPHYACR5
+#define A60810_RG_USB20_DISC_FIT_EN (0x1<<28) //28:28
+#define A60810_RG_USB20_INIT_SQ_EN_DG (0x3<<26) //27:26
+#define A60810_RG_USB20_HSTX_TMODE_SEL (0x3<<24) //25:24
+#define A60810_RG_USB20_SQD (0x3<<22) //23:22
+#define A60810_RG_USB20_DISCD (0x3<<20) //21:20
+#define A60810_RG_USB20_HSTX_TMODE_EN (0x1<<19) //19:19
+#define A60810_RG_USB20_PHYD_MONEN (0x1<<18) //18:18
+#define A60810_RG_USB20_INLPBK_EN (0x1<<17) //17:17
+#define A60810_RG_USB20_CHIRP_EN (0x1<<16) //16:16
+#define A60810_RG_USB20_HSTX_SRCAL_EN (0x1<<15) //15:15
+#define A60810_RG_USB20_HSTX_SRCTRL (0x7<<12) //14:12
+#define A60810_RG_USB20_HS_100U_U3_EN (0x1<<11) //11:11
+#define A60810_RG_USB20_GBIAS_ENB (0x1<<10) //10:10
+#define A60810_RG_USB20_DM_ABIST_SOURCE_EN (0x1<<7) //7:7
+#define A60810_RG_USB20_DM_ABIST_SELE (0xf<<0) //3:0
+
+//U3D_USBPHYACR6
+#define A60810_RG_USB20_PHY_REV (0xff<<24) //31:24
+#define A60810_RG_USB20_BC11_SW_EN (0x1<<23) //23:23
+#define A60810_RG_USB20_SR_CLK_SEL (0x1<<22) //22:22
+#define A60810_RG_USB20_OTG_VBUSCMP_EN (0x1<<20) //20:20
+#define A60810_RG_USB20_OTG_ABIST_EN (0x1<<19) //19:19
+#define A60810_RG_USB20_OTG_ABIST_SELE (0x7<<16) //18:16
+#define A60810_RG_USB20_HSRX_MMODE_SELE (0x3<<12) //13:12
+#define A60810_RG_USB20_HSRX_BIAS_EN_SEL (0x3<<9) //10:9
+#define A60810_RG_USB20_HSRX_TMODE_EN (0x1<<8) //8:8
+#define A60810_RG_USB20_DISCTH (0xf<<4) //7:4
+#define A60810_RG_USB20_SQTH (0xf<<0) //3:0
+
+//U3D_U2PHYACR3
+#define A60810_RG_USB20_HSTX_DBIST (0xf<<28) //31:28
+#define A60810_RG_USB20_HSTX_BIST_EN (0x1<<26) //26:26
+#define A60810_RG_USB20_HSTX_I_EN_MODE (0x3<<24) //25:24
+#define A60810_RG_USB20_USB11_TMODE_EN (0x1<<19) //19:19
+#define A60810_RG_USB20_TMODE_FS_LS_TX_EN (0x1<<18) //18:18
+#define A60810_RG_USB20_TMODE_FS_LS_RCV_EN (0x1<<17) //17:17
+#define A60810_RG_USB20_TMODE_FS_LS_MODE (0x1<<16) //16:16
+#define A60810_RG_USB20_HS_TERM_EN_MODE (0x3<<13) //14:13
+#define A60810_RG_USB20_PUPD_BIST_EN (0x1<<12) //12:12
+#define A60810_RG_USB20_EN_PU_DM (0x1<<11) //11:11
+#define A60810_RG_USB20_EN_PD_DM (0x1<<10) //10:10
+#define A60810_RG_USB20_EN_PU_DP (0x1<<9) //9:9
+#define A60810_RG_USB20_EN_PD_DP (0x1<<8) //8:8
+
+//U3D_U2PHYACR4
+#define A60810_RG_USB20_DP_100K_MODE (0x1<<18) //18:18
+#define A60810_RG_USB20_DM_100K_EN (0x1<<17) //17:17
+#define A60810_USB20_DP_100K_EN (0x1<<16) //16:16
+#define A60810_USB20_GPIO_DM_I (0x1<<15) //15:15
+#define A60810_USB20_GPIO_DP_I (0x1<<14) //14:14
+#define A60810_USB20_GPIO_DM_OE (0x1<<13) //13:13
+#define A60810_USB20_GPIO_DP_OE (0x1<<12) //12:12
+#define A60810_RG_USB20_GPIO_CTL (0x1<<9) //9:9
+#define A60810_USB20_GPIO_MODE (0x1<<8) //8:8
+#define A60810_RG_USB20_TX_BIAS_EN (0x1<<5) //5:5
+#define A60810_RG_USB20_TX_VCMPDN_EN (0x1<<4) //4:4
+#define A60810_RG_USB20_HS_SQ_EN_MODE (0x3<<2) //3:2
+#define A60810_RG_USB20_HS_RCV_EN_MODE (0x3<<0) //1:0
+
+//U3D_U2PHYAMON0
+#define A60810_RGO_USB20_GPIO_DM_O (0x1<<1) //1:1
+#define A60810_RGO_USB20_GPIO_DP_O (0x1<<0) //0:0
+
+//U3D_U2PHYDCR0
+#define A60810_RG_USB20_CDR_TST (0x3<<30) //31:30
+#define A60810_RG_USB20_GATED_ENB (0x1<<29) //29:29
+#define A60810_RG_USB20_TESTMODE (0x3<<26) //27:26
+#define A60810_RG_SIFSLV_USB20_PLL_STABLE (0x1<<25) //25:25
+#define A60810_RG_SIFSLV_USB20_PLL_FORCE_ON (0x1<<24) //24:24
+#define A60810_RG_USB20_PHYD_RESERVE (0xffff<<8) //23:8
+#define A60810_RG_USB20_EBTHRLD (0x1<<7) //7:7
+#define A60810_RG_USB20_EARLY_HSTX_I (0x1<<6) //6:6
+#define A60810_RG_USB20_TX_TST (0x1<<5) //5:5
+#define A60810_RG_USB20_NEGEDGE_ENB (0x1<<4) //4:4
+#define A60810_RG_USB20_CDR_FILT (0xf<<0) //3:0
+
+//U3D_U2PHYDCR1
+#define A60810_RG_USB20_PROBE_SEL (0xff<<24) //31:24
+#define A60810_RG_USB20_DRVVBUS (0x1<<23) //23:23
+#define A60810_RG_DEBUG_EN (0x1<<22) //22:22
+#define A60810_RG_USB20_OTG_PROBE (0x3<<20) //21:20
+#define A60810_RG_USB20_SW_PLLMODE (0x3<<18) //19:18
+#define A60810_RG_USB20_BERTH (0x3<<16) //17:16
+#define A60810_RG_USB20_LBMODE (0x3<<13) //14:13
+#define A60810_RG_USB20_FORCE_TAP (0x1<<12) //12:12
+#define A60810_RG_USB20_TAPSEL (0xfff<<0) //11:0
+
+//U3D_U2PHYDTM0
+#define A60810_RG_UART_MODE (0x3<<30) //31:30
+#define A60810_FORCE_UART_I (0x1<<29) //29:29
+#define A60810_FORCE_UART_BIAS_EN (0x1<<28) //28:28
+#define A60810_FORCE_UART_TX_OE (0x1<<27) //27:27
+#define A60810_FORCE_UART_EN (0x1<<26) //26:26
+#define A60810_FORCE_USB_CLKEN (0x1<<25) //25:25
+#define A60810_FORCE_DRVVBUS (0x1<<24) //24:24
+#define A60810_FORCE_DATAIN (0x1<<23) //23:23
+#define A60810_FORCE_TXVALID (0x1<<22) //22:22
+#define A60810_FORCE_DM_PULLDOWN (0x1<<21) //21:21
+#define A60810_FORCE_DP_PULLDOWN (0x1<<20) //20:20
+#define A60810_FORCE_XCVRSEL (0x1<<19) //19:19
+#define A60810_FORCE_SUSPENDM (0x1<<18) //18:18
+#define A60810_FORCE_TERMSEL (0x1<<17) //17:17
+#define A60810_FORCE_OPMODE (0x1<<16) //16:16
+#define A60810_UTMI_MUXSEL (0x1<<15) //15:15
+#define A60810_RG_RESET (0x1<<14) //14:14
+#define A60810_RG_DATAIN (0xf<<10) //13:10
+#define A60810_RG_TXVALIDH (0x1<<9) //9:9
+#define A60810_RG_TXVALID (0x1<<8) //8:8
+#define A60810_RG_DMPULLDOWN (0x1<<7) //7:7
+#define A60810_RG_DPPULLDOWN (0x1<<6) //6:6
+#define A60810_RG_XCVRSEL (0x3<<4) //5:4
+#define A60810_RG_SUSPENDM (0x1<<3) //3:3
+#define A60810_RG_TERMSEL (0x1<<2) //2:2
+#define A60810_RG_OPMODE (0x3<<0) //1:0
+
+//U3D_U2PHYDTM1
+#define A60810_RG_USB20_PRBS7_EN (0x1<<31) //31:31
+#define A60810_RG_USB20_PRBS7_BITCNT (0x3f<<24) //29:24
+#define A60810_RG_USB20_CLK48M_EN (0x1<<23) //23:23
+#define A60810_RG_USB20_CLK60M_EN (0x1<<22) //22:22
+#define A60810_RG_UART_I (0x1<<19) //19:19
+#define A60810_RG_UART_BIAS_EN (0x1<<18) //18:18
+#define A60810_RG_UART_TX_OE (0x1<<17) //17:17
+#define A60810_RG_UART_EN (0x1<<16) //16:16
+#define A60810_RG_IP_U2_PORT_POWER (0x1<<15) //15:15
+#define A60810_FORCE_IP_U2_PORT_POWER (0x1<<14) //14:14
+#define A60810_FORCE_VBUSVALID (0x1<<13) //13:13
+#define A60810_FORCE_SESSEND (0x1<<12) //12:12
+#define A60810_FORCE_BVALID (0x1<<11) //11:11
+#define A60810_FORCE_AVALID (0x1<<10) //10:10
+#define A60810_FORCE_IDDIG (0x1<<9) //9:9
+#define A60810_FORCE_IDPULLUP (0x1<<8) //8:8
+#define A60810_RG_VBUSVALID (0x1<<5) //5:5
+#define A60810_RG_SESSEND (0x1<<4) //4:4
+#define A60810_RG_BVALID (0x1<<3) //3:3
+#define A60810_RG_AVALID (0x1<<2) //2:2
+#define A60810_RG_IDDIG (0x1<<1) //1:1
+#define A60810_RG_IDPULLUP (0x1<<0) //0:0
+
+//U3D_U2PHYDMON0
+#define A60810_RG_USB20_PRBS7_BERTH (0xff<<0) //7:0
+
+//U3D_U2PHYDMON1
+#define A60810_USB20_UART_O (0x1<<31) //31:31
+#define A60810_RGO_USB20_LB_PASS (0x1<<30) //30:30
+#define A60810_RGO_USB20_LB_DONE (0x1<<29) //29:29
+#define A60810_AD_USB20_BVALID (0x1<<28) //28:28
+#define A60810_USB20_IDDIG (0x1<<27) //27:27
+#define A60810_AD_USB20_VBUSVALID (0x1<<26) //26:26
+#define A60810_AD_USB20_SESSEND (0x1<<25) //25:25
+#define A60810_AD_USB20_AVALID (0x1<<24) //24:24
+#define A60810_USB20_LINE_STATE (0x3<<22) //23:22
+#define A60810_USB20_HST_DISCON (0x1<<21) //21:21
+#define A60810_USB20_TX_READY (0x1<<20) //20:20
+#define A60810_USB20_RX_ERROR (0x1<<19) //19:19
+#define A60810_USB20_RX_ACTIVE (0x1<<18) //18:18
+#define A60810_USB20_RX_VALIDH (0x1<<17) //17:17
+#define A60810_USB20_RX_VALID (0x1<<16) //16:16
+#define A60810_USB20_DATA_OUT (0xffff<<0) //15:0
+
+//U3D_U2PHYDMON2
+#define A60810_RGO_TXVALID_CNT (0xff<<24) //31:24
+#define A60810_RGO_RXACTIVE_CNT (0xff<<16) //23:16
+#define A60810_RGO_USB20_LB_BERCNT (0xff<<8) //15:8
+#define A60810_USB20_PROBE_OUT (0xff<<0) //7:0
+
+//U3D_U2PHYDMON3
+#define A60810_RGO_USB20_PRBS7_ERRCNT (0xffff<<16) //31:16
+#define A60810_RGO_USB20_PRBS7_DONE (0x1<<3) //3:3
+#define A60810_RGO_USB20_PRBS7_LOCK (0x1<<2) //2:2
+#define A60810_RGO_USB20_PRBS7_PASS (0x1<<1) //1:1
+#define A60810_RGO_USB20_PRBS7_PASSTH (0x1<<0) //0:0
+
+//U3D_U2PHYBC12C
+#define A60810_RG_SIFSLV_CHGDT_DEGLCH_CNT (0xf<<28) //31:28
+#define A60810_RG_SIFSLV_CHGDT_CTRL_CNT (0xf<<24) //27:24
+#define A60810_RG_SIFSLV_CHGDT_FORCE_MODE (0x1<<16) //16:16
+#define A60810_RG_CHGDT_ISRC_LEV (0x3<<14) //15:14
+#define A60810_RG_CHGDT_VDATSRC (0x1<<13) //13:13
+#define A60810_RG_CHGDT_BGVREF_SEL (0x7<<10) //12:10
+#define A60810_RG_CHGDT_RDVREF_SEL (0x3<<8) //9:8
+#define A60810_RG_CHGDT_ISRC_DP (0x1<<7) //7:7
+#define A60810_RG_SIFSLV_CHGDT_OPOUT_DM (0x1<<6) //6:6
+#define A60810_RG_CHGDT_VDAT_DM (0x1<<5) //5:5
+#define A60810_RG_CHGDT_OPOUT_DP (0x1<<4) //4:4
+#define A60810_RG_SIFSLV_CHGDT_VDAT_DP (0x1<<3) //3:3
+#define A60810_RG_SIFSLV_CHGDT_COMP_EN (0x1<<2) //2:2
+#define A60810_RG_SIFSLV_CHGDT_OPDRV_EN (0x1<<1) //1:1
+#define A60810_RG_CHGDT_EN (0x1<<0) //0:0
+
+//U3D_U2PHYBC12C1
+#define A60810_RG_CHGDT_REV (0xff<<0) //7:0
+
+//U3D_REGFPPC
+#define A60810_USB11_OTG_REG (0x1<<4) //4:4
+#define A60810_USB20_OTG_REG (0x1<<3) //3:3
+#define A60810_CHGDT_REG (0x1<<2) //2:2
+#define A60810_USB11_REG (0x1<<1) //1:1
+#define A60810_USB20_REG (0x1<<0) //0:0
+
+//U3D_VERSIONC
+#define A60810_VERSION_CODE_REGFILE (0xff<<24) //31:24
+#define A60810_USB11_VERSION_CODE (0xff<<16) //23:16
+#define A60810_VERSION_CODE_ANA (0xff<<8) //15:8
+#define A60810_VERSION_CODE_DIG (0xff<<0) //7:0
+
+//U3D_REGFCOM
+#define A60810_RG_PAGE (0xff<<24) //31:24
+#define A60810_I2C_MODE (0x1<<16) //16:16
+
+/* OFFSET */
+
+//U3D_USBPHYACR0
+#define A60810_RG_USB20_MPX_OUT_SEL_OFST (28)
+#define A60810_RG_USB20_TX_PH_ROT_SEL_OFST (24)
+#define A60810_RG_USB20_PLL_DIVEN_OFST (20)
+#define A60810_RG_USB20_PLL_BR_OFST (18)
+#define A60810_RG_USB20_PLL_BP_OFST (17)
+#define A60810_RG_USB20_PLL_BLP_OFST (16)
+#define A60810_RG_USB20_USBPLL_FORCE_ON_OFST (15)
+#define A60810_RG_USB20_PLL_FBDIV_OFST (8)
+#define A60810_RG_USB20_PLL_PREDIV_OFST (6)
+#define A60810_RG_USB20_INTR_EN_OFST (5)
+#define A60810_RG_USB20_REF_EN_OFST (4)
+#define A60810_RG_USB20_BGR_DIV_OFST (2)
+#define A60810_RG_SIFSLV_CHP_EN_OFST (1)
+#define A60810_RG_SIFSLV_BGR_EN_OFST (0)
+
+//U3D_USBPHYACR1
+#define A60810_RG_USB20_INTR_CAL_OFST (19)
+#define A60810_RG_USB20_OTG_VBUSTH_OFST (16)
+#define A60810_RG_USB20_VRT_VREF_SEL_OFST (12)
+#define A60810_RG_USB20_TERM_VREF_SEL_OFST (8)
+#define A60810_RG_USB20_MPX_SEL_OFST (0)
+
+//U3D_USBPHYACR2
+#define A60810_RG_SIFSLV_MAC_BANDGAP_EN_OFST (17)
+#define A60810_RG_SIFSLV_MAC_CHOPPER_EN_OFST (16)
+#define A60810_RG_USB20_CLKREF_REV_OFST (0)
+
+//U3D_USBPHYACR4
+#define A60810_RG_USB20_DP_ABIST_SOURCE_EN_OFST (31)
+#define A60810_RG_USB20_DP_ABIST_SELE_OFST (24)
+#define A60810_RG_USB20_ICUSB_EN_OFST (16)
+#define A60810_RG_USB20_LS_CR_OFST (12)
+#define A60810_RG_USB20_FS_CR_OFST (8)
+#define A60810_RG_USB20_LS_SR_OFST (4)
+#define A60810_RG_USB20_FS_SR_OFST (0)
+
+//U3D_USBPHYACR5
+#define A60810_RG_USB20_DISC_FIT_EN_OFST (28)
+#define A60810_RG_USB20_INIT_SQ_EN_DG_OFST (26)
+#define A60810_RG_USB20_HSTX_TMODE_SEL_OFST (24)
+#define A60810_RG_USB20_SQD_OFST (22)
+#define A60810_RG_USB20_DISCD_OFST (20)
+#define A60810_RG_USB20_HSTX_TMODE_EN_OFST (19)
+#define A60810_RG_USB20_PHYD_MONEN_OFST (18)
+#define A60810_RG_USB20_INLPBK_EN_OFST (17)
+#define A60810_RG_USB20_CHIRP_EN_OFST (16)
+#define A60810_RG_USB20_HSTX_SRCAL_EN_OFST (15)
+#define A60810_RG_USB20_HSTX_SRCTRL_OFST (12)
+#define A60810_RG_USB20_HS_100U_U3_EN_OFST (11)
+#define A60810_RG_USB20_GBIAS_ENB_OFST (10)
+#define A60810_RG_USB20_DM_ABIST_SOURCE_EN_OFST (7)
+#define A60810_RG_USB20_DM_ABIST_SELE_OFST (0)
+
+//U3D_USBPHYACR6
+#define A60810_RG_USB20_PHY_REV_OFST (24)
+#define A60810_RG_USB20_BC11_SW_EN_OFST (23)
+#define A60810_RG_USB20_SR_CLK_SEL_OFST (22)
+#define A60810_RG_USB20_OTG_VBUSCMP_EN_OFST (20)
+#define A60810_RG_USB20_OTG_ABIST_EN_OFST (19)
+#define A60810_RG_USB20_OTG_ABIST_SELE_OFST (16)
+#define A60810_RG_USB20_HSRX_MMODE_SELE_OFST (12)
+#define A60810_RG_USB20_HSRX_BIAS_EN_SEL_OFST (9)
+#define A60810_RG_USB20_HSRX_TMODE_EN_OFST (8)
+#define A60810_RG_USB20_DISCTH_OFST (4)
+#define A60810_RG_USB20_SQTH_OFST (0)
+
+//U3D_U2PHYACR3
+#define A60810_RG_USB20_HSTX_DBIST_OFST (28)
+#define A60810_RG_USB20_HSTX_BIST_EN_OFST (26)
+#define A60810_RG_USB20_HSTX_I_EN_MODE_OFST (24)
+#define A60810_RG_USB20_USB11_TMODE_EN_OFST (19)
+#define A60810_RG_USB20_TMODE_FS_LS_TX_EN_OFST (18)
+#define A60810_RG_USB20_TMODE_FS_LS_RCV_EN_OFST (17)
+#define A60810_RG_USB20_TMODE_FS_LS_MODE_OFST (16)
+#define A60810_RG_USB20_HS_TERM_EN_MODE_OFST (13)
+#define A60810_RG_USB20_PUPD_BIST_EN_OFST (12)
+#define A60810_RG_USB20_EN_PU_DM_OFST (11)
+#define A60810_RG_USB20_EN_PD_DM_OFST (10)
+#define A60810_RG_USB20_EN_PU_DP_OFST (9)
+#define A60810_RG_USB20_EN_PD_DP_OFST (8)
+
+//U3D_U2PHYACR4
+#define A60810_RG_USB20_DP_100K_MODE_OFST (18)
+#define A60810_RG_USB20_DM_100K_EN_OFST (17)
+#define A60810_USB20_DP_100K_EN_OFST (16)
+#define A60810_USB20_GPIO_DM_I_OFST (15)
+#define A60810_USB20_GPIO_DP_I_OFST (14)
+#define A60810_USB20_GPIO_DM_OE_OFST (13)
+#define A60810_USB20_GPIO_DP_OE_OFST (12)
+#define A60810_RG_USB20_GPIO_CTL_OFST (9)
+#define A60810_USB20_GPIO_MODE_OFST (8)
+#define A60810_RG_USB20_TX_BIAS_EN_OFST (5)
+#define A60810_RG_USB20_TX_VCMPDN_EN_OFST (4)
+#define A60810_RG_USB20_HS_SQ_EN_MODE_OFST (2)
+#define A60810_RG_USB20_HS_RCV_EN_MODE_OFST (0)
+
+//U3D_U2PHYAMON0
+#define A60810_RGO_USB20_GPIO_DM_O_OFST (1)
+#define A60810_RGO_USB20_GPIO_DP_O_OFST (0)
+
+//U3D_U2PHYDCR0
+#define A60810_RG_USB20_CDR_TST_OFST (30)
+#define A60810_RG_USB20_GATED_ENB_OFST (29)
+#define A60810_RG_USB20_TESTMODE_OFST (26)
+#define A60810_RG_SIFSLV_USB20_PLL_STABLE_OFST (25)
+#define A60810_RG_SIFSLV_USB20_PLL_FORCE_ON_OFST (24)
+#define A60810_RG_USB20_PHYD_RESERVE_OFST (8)
+#define A60810_RG_USB20_EBTHRLD_OFST (7)
+#define A60810_RG_USB20_EARLY_HSTX_I_OFST (6)
+#define A60810_RG_USB20_TX_TST_OFST (5)
+#define A60810_RG_USB20_NEGEDGE_ENB_OFST (4)
+#define A60810_RG_USB20_CDR_FILT_OFST (0)
+
+//U3D_U2PHYDCR1
+#define A60810_RG_USB20_PROBE_SEL_OFST (24)
+#define A60810_RG_USB20_DRVVBUS_OFST (23)
+#define A60810_RG_DEBUG_EN_OFST (22)
+#define A60810_RG_USB20_OTG_PROBE_OFST (20)
+#define A60810_RG_USB20_SW_PLLMODE_OFST (18)
+#define A60810_RG_USB20_BERTH_OFST (16)
+#define A60810_RG_USB20_LBMODE_OFST (13)
+#define A60810_RG_USB20_FORCE_TAP_OFST (12)
+#define A60810_RG_USB20_TAPSEL_OFST (0)
+
+//U3D_U2PHYDTM0
+#define A60810_RG_UART_MODE_OFST (30)
+#define A60810_FORCE_UART_I_OFST (29)
+#define A60810_FORCE_UART_BIAS_EN_OFST (28)
+#define A60810_FORCE_UART_TX_OE_OFST (27)
+#define A60810_FORCE_UART_EN_OFST (26)
+#define A60810_FORCE_USB_CLKEN_OFST (25)
+#define A60810_FORCE_DRVVBUS_OFST (24)
+#define A60810_FORCE_DATAIN_OFST (23)
+#define A60810_FORCE_TXVALID_OFST (22)
+#define A60810_FORCE_DM_PULLDOWN_OFST (21)
+#define A60810_FORCE_DP_PULLDOWN_OFST (20)
+#define A60810_FORCE_XCVRSEL_OFST (19)
+#define A60810_FORCE_SUSPENDM_OFST (18)
+#define A60810_FORCE_TERMSEL_OFST (17)
+#define A60810_FORCE_OPMODE_OFST (16)
+#define A60810_UTMI_MUXSEL_OFST (15)
+#define A60810_RG_RESET_OFST (14)
+#define A60810_RG_DATAIN_OFST (10)
+#define A60810_RG_TXVALIDH_OFST (9)
+#define A60810_RG_TXVALID_OFST (8)
+#define A60810_RG_DMPULLDOWN_OFST (7)
+#define A60810_RG_DPPULLDOWN_OFST (6)
+#define A60810_RG_XCVRSEL_OFST (4)
+#define A60810_RG_SUSPENDM_OFST (3)
+#define A60810_RG_TERMSEL_OFST (2)
+#define A60810_RG_OPMODE_OFST (0)
+
+//U3D_U2PHYDTM1
+#define A60810_RG_USB20_PRBS7_EN_OFST (31)
+#define A60810_RG_USB20_PRBS7_BITCNT_OFST (24)
+#define A60810_RG_USB20_CLK48M_EN_OFST (23)
+#define A60810_RG_USB20_CLK60M_EN_OFST (22)
+#define A60810_RG_UART_I_OFST (19)
+#define A60810_RG_UART_BIAS_EN_OFST (18)
+#define A60810_RG_UART_TX_OE_OFST (17)
+#define A60810_RG_UART_EN_OFST (16)
+#define A60810_RG_IP_U2_PORT_POWER_OFST (15)
+#define A60810_FORCE_IP_U2_PORT_POWER_OFST (14)
+#define A60810_FORCE_VBUSVALID_OFST (13)
+#define A60810_FORCE_SESSEND_OFST (12)
+#define A60810_FORCE_BVALID_OFST (11)
+#define A60810_FORCE_AVALID_OFST (10)
+#define A60810_FORCE_IDDIG_OFST (9)
+#define A60810_FORCE_IDPULLUP_OFST (8)
+#define A60810_RG_VBUSVALID_OFST (5)
+#define A60810_RG_SESSEND_OFST (4)
+#define A60810_RG_BVALID_OFST (3)
+#define A60810_RG_AVALID_OFST (2)
+#define A60810_RG_IDDIG_OFST (1)
+#define A60810_RG_IDPULLUP_OFST (0)
+
+//U3D_U2PHYDMON0
+#define A60810_RG_USB20_PRBS7_BERTH_OFST (0)
+
+//U3D_U2PHYDMON1
+#define A60810_USB20_UART_O_OFST (31)
+#define A60810_RGO_USB20_LB_PASS_OFST (30)
+#define A60810_RGO_USB20_LB_DONE_OFST (29)
+#define A60810_AD_USB20_BVALID_OFST (28)
+#define A60810_USB20_IDDIG_OFST (27)
+#define A60810_AD_USB20_VBUSVALID_OFST (26)
+#define A60810_AD_USB20_SESSEND_OFST (25)
+#define A60810_AD_USB20_AVALID_OFST (24)
+#define A60810_USB20_LINE_STATE_OFST (22)
+#define A60810_USB20_HST_DISCON_OFST (21)
+#define A60810_USB20_TX_READY_OFST (20)
+#define A60810_USB20_RX_ERROR_OFST (19)
+#define A60810_USB20_RX_ACTIVE_OFST (18)
+#define A60810_USB20_RX_VALIDH_OFST (17)
+#define A60810_USB20_RX_VALID_OFST (16)
+#define A60810_USB20_DATA_OUT_OFST (0)
+
+//U3D_U2PHYDMON2
+#define A60810_RGO_TXVALID_CNT_OFST (24)
+#define A60810_RGO_RXACTIVE_CNT_OFST (16)
+#define A60810_RGO_USB20_LB_BERCNT_OFST (8)
+#define A60810_USB20_PROBE_OUT_OFST (0)
+
+//U3D_U2PHYDMON3
+#define A60810_RGO_USB20_PRBS7_ERRCNT_OFST (16)
+#define A60810_RGO_USB20_PRBS7_DONE_OFST (3)
+#define A60810_RGO_USB20_PRBS7_LOCK_OFST (2)
+#define A60810_RGO_USB20_PRBS7_PASS_OFST (1)
+#define A60810_RGO_USB20_PRBS7_PASSTH_OFST (0)
+
+//U3D_U2PHYBC12C
+#define A60810_RG_SIFSLV_CHGDT_DEGLCH_CNT_OFST (28)
+#define A60810_RG_SIFSLV_CHGDT_CTRL_CNT_OFST (24)
+#define A60810_RG_SIFSLV_CHGDT_FORCE_MODE_OFST (16)
+#define A60810_RG_CHGDT_ISRC_LEV_OFST (14)
+#define A60810_RG_CHGDT_VDATSRC_OFST (13)
+#define A60810_RG_CHGDT_BGVREF_SEL_OFST (10)
+#define A60810_RG_CHGDT_RDVREF_SEL_OFST (8)
+#define A60810_RG_CHGDT_ISRC_DP_OFST (7)
+#define A60810_RG_SIFSLV_CHGDT_OPOUT_DM_OFST (6)
+#define A60810_RG_CHGDT_VDAT_DM_OFST (5)
+#define A60810_RG_CHGDT_OPOUT_DP_OFST (4)
+#define A60810_RG_SIFSLV_CHGDT_VDAT_DP_OFST (3)
+#define A60810_RG_SIFSLV_CHGDT_COMP_EN_OFST (2)
+#define A60810_RG_SIFSLV_CHGDT_OPDRV_EN_OFST (1)
+#define A60810_RG_CHGDT_EN_OFST (0)
+
+//U3D_U2PHYBC12C1
+#define A60810_RG_CHGDT_REV_OFST (0)
+
+//U3D_REGFPPC
+#define A60810_USB11_OTG_REG_OFST (4)
+#define A60810_USB20_OTG_REG_OFST (3)
+#define A60810_CHGDT_REG_OFST (2)
+#define A60810_USB11_REG_OFST (1)
+#define A60810_USB20_REG_OFST (0)
+
+//U3D_VERSIONC
+#define A60810_VERSION_CODE_REGFILE_OFST (24)
+#define A60810_USB11_VERSION_CODE_OFST (16)
+#define A60810_VERSION_CODE_ANA_OFST (8)
+#define A60810_VERSION_CODE_DIG_OFST (0)
+
+//U3D_REGFCOM
+#define A60810_RG_PAGE_OFST (24)
+#define A60810_I2C_MODE_OFST (16)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phya_reg_a60810 {
+ //0x0
+ PHY_LE32 reg0;
+ PHY_LE32 reg1;
+ PHY_LE32 reg2;
+ PHY_LE32 reg3;
+ //0x10
+ PHY_LE32 reg4;
+ PHY_LE32 reg5;
+ PHY_LE32 reg6;
+ PHY_LE32 reg7;
+ //0x20
+ PHY_LE32 reg8;
+ PHY_LE32 reg9;
+ PHY_LE32 rega;
+ PHY_LE32 regb;
+ //0x30
+ PHY_LE32 regc;
+};
+
+//U3D_reg0
+#define A60810_RG_SSUSB_BGR_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_CHPEN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_BG_DIV (0x3<<28) //29:28
+#define A60810_RG_SSUSB_INTR_EN (0x1<<26) //26:26
+#define A60810_RG_SSUSB_MPX_EN (0x1<<24) //24:24
+#define A60810_RG_SSUSB_MPX_SEL (0xff<<16) //23:16
+#define A60810_RG_SSUSB_REF_EN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_VRT_VREF_SEL (0xf<<11) //14:11
+#define A60810_RG_SSUSB_BG_MONEN (0x1<<8) //8:8
+#define A60810_RG_SSUSB_INT_BIAS_SEL (0x1<<7) //7:7
+#define A60810_RG_SSUSB_EXT_BIAS_SEL (0x1<<6) //6:6
+#define A60810_RG_PCIE_CLKDRV_OFFSET (0x3<<2) //3:2
+#define A60810_RG_PCIE_CLKDRV_SLEW (0x3<<0) //1:0
+
+//U3D_reg1
+#define A60810_RG_PCIE_CLKDRV_AMP (0x7<<29) //31:29
+#define A60810_RG_SSUSB_XTAL_TST_A2DCK_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_XTAL_MON_EN (0x1<<27) //27:27
+#define A60810_RG_SSUSB_XTAL_HYS (0x1<<26) //26:26
+#define A60810_RG_SSUSB_XTAL_TOP_RESERVE (0xffff<<10) //25:10
+#define A60810_RG_SSUSB_SYSPLL_PREDIV (0x3<<8) //9:8
+#define A60810_RG_SSUSB_SYSPLL_POSDIV (0x3<<6) //7:6
+#define A60810_RG_SSUSB_SYSPLL_VCO_DIV_SEL (0x1<<5) //5:5
+#define A60810_RG_SSUSB_SYSPLL_VOD_EN (0x1<<4) //4:4
+#define A60810_RG_SSUSB_SYSPLL_RST_DLY (0x3<<2) //3:2
+#define A60810_RG_SSUSB_SYSPLL_BLP (0x1<<1) //1:1
+#define A60810_RG_SSUSB_SYSPLL_BP (0x1<<0) //0:0
+
+//U3D_reg2
+#define A60810_RG_SSUSB_SYSPLL_BR (0x1<<31) //31:31
+#define A60810_RG_SSUSB_SYSPLL_BC (0x1<<30) //30:30
+#define A60810_RG_SSUSB_SYSPLL_MONCK_EN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_SYSPLL_MONVC_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_SYSPLL_MONREF_EN (0x1<<27) //27:27
+#define A60810_RG_SSUSB_SYSPLL_SDM_IFM (0x1<<26) //26:26
+#define A60810_RG_SSUSB_SYSPLL_SDM_OUT (0x1<<25) //25:25
+#define A60810_RG_SSUSB_SYSPLL_BACK_EN (0x1<<24) //24:24
+
+//U3D_reg3
+#define A60810_RG_SSUSB_SYSPLL_FBDIV (0x7fffffff<<1) //31:1
+#define A60810_RG_SSUSB_SYSPLL_HR_EN (0x1<<0) //0:0
+
+//U3D_reg4
+#define A60810_RG_SSUSB_SYSPLL_SDM_DI_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_SYSPLL_SDM_DI_LS (0x3<<29) //30:29
+#define A60810_RG_SSUSB_SYSPLL_SDM_ORD (0x3<<27) //28:27
+#define A60810_RG_SSUSB_SYSPLL_SDM_MODE (0x3<<25) //26:25
+#define A60810_RG_SSUSB_SYSPLL_RESERVE (0xff<<17) //24:17
+#define A60810_RG_SSUSB_SYSPLL_TOP_RESERVE (0xffff<<1) //16:1
+
+//U3D_reg5
+#define A60810_RG_SSUSB_TX250MCK_INVB (0x1<<31) //31:31
+#define A60810_RG_SSUSB_IDRV_ITAILOP_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_IDRV_CALIB (0x3f<<24) //29:24
+#define A60810_RG_SSUSB_IDEM_BIAS (0xf<<20) //23:20
+#define A60810_RG_SSUSB_TX_R50_FON (0x1<<19) //19:19
+#define A60810_RG_SSUSB_TX_SR (0x7<<16) //18:16
+#define A60810_RG_SSUSB_RXDET_RSEL (0x3<<14) //15:14
+#define A60810_RG_SSUSB_RXDET_UPDN_FORCE (0x1<<13) //13:13
+#define A60810_RG_SSUSB_RXDET_UPDN_SEL (0x1<<12) //12:12
+#define A60810_RG_SSUSB_RXDET_VTHSEL_L (0x3<<10) //11:10
+#define A60810_RG_SSUSB_RXDET_VTHSEL_H (0x3<<8) //9:8
+#define A60810_RG_SSUSB_CKMON_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_TX_VLMON_EN (0x1<<6) //6:6
+#define A60810_RG_SSUSB_TX_VLMON_SEL (0x3<<4) //5:4
+#define A60810_RG_SSUSB_CKMON_SEL (0xf<<0) //3:0
+
+//U3D_reg6
+#define A60810_RG_SSUSB_TX_EIDLE_CM (0xf<<28) //31:28
+#define A60810_RG_SSUSB_RXLBTX_EN (0x1<<27) //27:27
+#define A60810_RG_SSUSB_TXLBRX_EN (0x1<<26) //26:26
+#define A60810_RG_SSUSB_RESERVE (0x3ff<<16) //25:16
+#define A60810_RG_SSUSB_PLL_POSDIV (0x3<<14) //15:14
+#define A60810_RG_SSUSB_PLL_AUTOK_LOAD (0x1<<13) //13:13
+#define A60810_RG_SSUSB_PLL_VOD_EN (0x1<<12) //12:12
+#define A60810_RG_SSUSB_PLL_MONREF_EN (0x1<<11) //11:11
+#define A60810_RG_SSUSB_PLL_MONCK_EN (0x1<<10) //10:10
+#define A60810_RG_SSUSB_PLL_MONVC_EN (0x1<<9) //9:9
+#define A60810_RG_SSUSB_PLL_RLH_EN (0x1<<8) //8:8
+#define A60810_RG_SSUSB_PLL_AUTOK_KS (0x3<<6) //7:6
+#define A60810_RG_SSUSB_PLL_AUTOK_KF (0x3<<4) //5:4
+#define A60810_RG_SSUSB_PLL_RST_DLY (0x3<<2) //3:2
+
+//U3D_reg7
+#define A60810_RG_SSUSB_PLL_RESERVE (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_PRD (0xffff<<0) //15:0
+
+//U3D_reg8
+#define A60810_RG_SSUSB_PLL_SSC_PHASE_INI (0x1<<31) //31:31
+#define A60810_RG_SSUSB_PLL_SSC_TRI_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_PLL_CLK_PH_INV (0x1<<29) //29:29
+#define A60810_RG_SSUSB_PLL_DDS_LPF_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_PLL_DDS_RST_SEL (0x1<<27) //27:27
+#define A60810_RG_SSUSB_PLL_DDS_VADJ (0x1<<26) //26:26
+#define A60810_RG_SSUSB_PLL_DDS_MONEN (0x1<<25) //25:25
+#define A60810_RG_SSUSB_PLL_DDS_SEL_EXT (0x1<<24) //24:24
+#define A60810_RG_SSUSB_PLL_DDS_PI_PL_EN (0x1<<23) //23:23
+#define A60810_RG_SSUSB_PLL_DDS_FRAC_MUTE (0x7<<20) //22:20
+#define A60810_RG_SSUSB_PLL_DDS_HF_EN (0x1<<19) //19:19
+#define A60810_RG_SSUSB_PLL_DDS_C (0x7<<16) //18:16
+#define A60810_RG_SSUSB_PLL_DDS_PREDIV2 (0x1<<15) //15:15
+#define A60810_RG_SSUSB_LFPS_LPF (0x3<<13) //14:13
+
+//U3D_reg9
+#define A60810_RG_SSUSB_CDR_PD_DIV_BYPASS (0x1<<31) //31:31
+#define A60810_RG_SSUSB_CDR_PD_DIV_SEL (0x1<<30) //30:30
+#define A60810_RG_SSUSB_CDR_CPBIAS_SEL (0x1<<29) //29:29
+#define A60810_RG_SSUSB_CDR_OSCDET_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_CDR_MONMUX (0x1<<27) //27:27
+#define A60810_RG_SSUSB_CDR_RST_DLY (0x3<<25) //26:25
+#define A60810_RG_SSUSB_CDR_RSTB_MANUAL (0x1<<24) //24:24
+#define A60810_RG_SSUSB_CDR_BYPASS (0x3<<22) //23:22
+#define A60810_RG_SSUSB_CDR_PI_SLEW (0x3<<20) //21:20
+#define A60810_RG_SSUSB_CDR_EPEN (0x1<<19) //19:19
+#define A60810_RG_SSUSB_CDR_AUTOK_LOAD (0x1<<18) //18:18
+#define A60810_RG_SSUSB_CDR_MONEN (0x1<<16) //16:16
+#define A60810_RG_SSUSB_CDR_MONEN_DIG (0x1<<15) //15:15
+#define A60810_RG_SSUSB_CDR_REGOD (0x3<<13) //14:13
+#define A60810_RG_SSUSB_CDR_AUTOK_KS (0x3<<11) //12:11
+#define A60810_RG_SSUSB_CDR_AUTOK_KF (0x3<<9) //10:9
+#define A60810_RG_SSUSB_RX_DAC_EN (0x1<<8) //8:8
+#define A60810_RG_SSUSB_RX_DAC_PWD (0x1<<7) //7:7
+#define A60810_RG_SSUSB_EQ_CURSEL (0x1<<6) //6:6
+#define A60810_RG_SSUSB_RX_DAC_MUX (0x1f<<1) //5:1
+#define A60810_RG_SSUSB_RX_R2T_EN (0x1<<0) //0:0
+
+//U3D_regA
+#define A60810_RG_SSUSB_RX_T2R_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_RX_50_LOWER (0x7<<28) //30:28
+#define A60810_RG_SSUSB_RX_50_TAR (0x3<<26) //27:26
+#define A60810_RG_SSUSB_RX_SW_CTRL (0xf<<21) //24:21
+#define A60810_RG_PCIE_SIGDET_VTH (0x3<<19) //20:19
+#define A60810_RG_PCIE_SIGDET_LPF (0x3<<17) //18:17
+#define A60810_RG_SSUSB_LFPS_MON_EN (0x1<<16) //16:16
+#define A60810_RG_SSUSB_RXAFE_DCMON_SEL (0xf<<12) //15:12
+#define A60810_RG_SSUSB_RX_P1_ENTRY_PASS (0x1<<11) //11:11
+#define A60810_RG_SSUSB_RX_PD_RST (0x1<<10) //10:10
+#define A60810_RG_SSUSB_RX_PD_RST_PASS (0x1<<9) //9:9
+
+//U3D_regB
+#define A60810_RG_SSUSB_CDR_RESERVE (0xff<<24) //31:24
+#define A60810_RG_SSUSB_RXAFE_RESERVE (0xff<<16) //23:16
+#define A60810_RG_PCIE_RX_RESERVE (0xff<<8) //15:8
+#define A60810_RG_SSUSB_VRT_25M_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_RX_PD_PICAL_SWAP (0x1<<6) //6:6
+#define A60810_RG_SSUSB_RX_DAC_MEAS_EN (0x1<<5) //5:5
+#define A60810_RG_SSUSB_MPX_SEL_L0 (0x1<<4) //4:4
+#define A60810_RG_SSUSB_LFPS_SLCOUT_SEL (0x1<<3) //3:3
+#define A60810_RG_SSUSB_LFPS_CMPOUT_SEL (0x1<<2) //2:2
+#define A60810_RG_PCIE_SIGDET_HF (0x3<<0) //1:0
+
+//U3D_regC
+#define A60810_RGS_SSUSB_RX_DEBUG_RESERVE (0xff<<0) //7:0
+
+/* OFFSET */
+
+//U3D_reg0
+#define A60810_RG_SSUSB_BGR_EN_OFST (31)
+#define A60810_RG_SSUSB_CHPEN_OFST (30)
+#define A60810_RG_SSUSB_BG_DIV_OFST (28)
+#define A60810_RG_SSUSB_INTR_EN_OFST (26)
+#define A60810_RG_SSUSB_MPX_EN_OFST (24)
+#define A60810_RG_SSUSB_MPX_SEL_OFST (16)
+#define A60810_RG_SSUSB_REF_EN_OFST (15)
+#define A60810_RG_SSUSB_VRT_VREF_SEL_OFST (11)
+#define A60810_RG_SSUSB_BG_MONEN_OFST (8)
+#define A60810_RG_SSUSB_INT_BIAS_SEL_OFST (7)
+#define A60810_RG_SSUSB_EXT_BIAS_SEL_OFST (6)
+#define A60810_RG_PCIE_CLKDRV_OFFSET_OFST (2)
+#define A60810_RG_PCIE_CLKDRV_SLEW_OFST (0)
+
+//U3D_reg1
+#define A60810_RG_PCIE_CLKDRV_AMP_OFST (29)
+#define A60810_RG_SSUSB_XTAL_TST_A2DCK_EN_OFST (28)
+#define A60810_RG_SSUSB_XTAL_MON_EN_OFST (27)
+#define A60810_RG_SSUSB_XTAL_HYS_OFST (26)
+#define A60810_RG_SSUSB_XTAL_TOP_RESERVE_OFST (10)
+#define A60810_RG_SSUSB_SYSPLL_PREDIV_OFST (8)
+#define A60810_RG_SSUSB_SYSPLL_POSDIV_OFST (6)
+#define A60810_RG_SSUSB_SYSPLL_VCO_DIV_SEL_OFST (5)
+#define A60810_RG_SSUSB_SYSPLL_VOD_EN_OFST (4)
+#define A60810_RG_SSUSB_SYSPLL_RST_DLY_OFST (2)
+#define A60810_RG_SSUSB_SYSPLL_BLP_OFST (1)
+#define A60810_RG_SSUSB_SYSPLL_BP_OFST (0)
+
+//U3D_reg2
+#define A60810_RG_SSUSB_SYSPLL_BR_OFST (31)
+#define A60810_RG_SSUSB_SYSPLL_BC_OFST (30)
+#define A60810_RG_SSUSB_SYSPLL_MONCK_EN_OFST (29)
+#define A60810_RG_SSUSB_SYSPLL_MONVC_EN_OFST (28)
+#define A60810_RG_SSUSB_SYSPLL_MONREF_EN_OFST (27)
+#define A60810_RG_SSUSB_SYSPLL_SDM_IFM_OFST (26)
+#define A60810_RG_SSUSB_SYSPLL_SDM_OUT_OFST (25)
+#define A60810_RG_SSUSB_SYSPLL_BACK_EN_OFST (24)
+
+//U3D_reg3
+#define A60810_RG_SSUSB_SYSPLL_FBDIV_OFST (1)
+#define A60810_RG_SSUSB_SYSPLL_HR_EN_OFST (0)
+
+//U3D_reg4
+#define A60810_RG_SSUSB_SYSPLL_SDM_DI_EN_OFST (31)
+#define A60810_RG_SSUSB_SYSPLL_SDM_DI_LS_OFST (29)
+#define A60810_RG_SSUSB_SYSPLL_SDM_ORD_OFST (27)
+#define A60810_RG_SSUSB_SYSPLL_SDM_MODE_OFST (25)
+#define A60810_RG_SSUSB_SYSPLL_RESERVE_OFST (17)
+#define A60810_RG_SSUSB_SYSPLL_TOP_RESERVE_OFST (1)
+
+//U3D_reg5
+#define A60810_RG_SSUSB_TX250MCK_INVB_OFST (31)
+#define A60810_RG_SSUSB_IDRV_ITAILOP_EN_OFST (30)
+#define A60810_RG_SSUSB_IDRV_CALIB_OFST (24)
+#define A60810_RG_SSUSB_IDEM_BIAS_OFST (20)
+#define A60810_RG_SSUSB_TX_R50_FON_OFST (19)
+#define A60810_RG_SSUSB_TX_SR_OFST (16)
+#define A60810_RG_SSUSB_RXDET_RSEL_OFST (14)
+#define A60810_RG_SSUSB_RXDET_UPDN_FORCE_OFST (13)
+#define A60810_RG_SSUSB_RXDET_UPDN_SEL_OFST (12)
+#define A60810_RG_SSUSB_RXDET_VTHSEL_L_OFST (10)
+#define A60810_RG_SSUSB_RXDET_VTHSEL_H_OFST (8)
+#define A60810_RG_SSUSB_CKMON_EN_OFST (7)
+#define A60810_RG_SSUSB_TX_VLMON_EN_OFST (6)
+#define A60810_RG_SSUSB_TX_VLMON_SEL_OFST (4)
+#define A60810_RG_SSUSB_CKMON_SEL_OFST (0)
+
+//U3D_reg6
+#define A60810_RG_SSUSB_TX_EIDLE_CM_OFST (28)
+#define A60810_RG_SSUSB_RXLBTX_EN_OFST (27)
+#define A60810_RG_SSUSB_TXLBRX_EN_OFST (26)
+#define A60810_RG_SSUSB_RESERVE_OFST (16)
+#define A60810_RG_SSUSB_PLL_POSDIV_OFST (14)
+#define A60810_RG_SSUSB_PLL_AUTOK_LOAD_OFST (13)
+#define A60810_RG_SSUSB_PLL_VOD_EN_OFST (12)
+#define A60810_RG_SSUSB_PLL_MONREF_EN_OFST (11)
+#define A60810_RG_SSUSB_PLL_MONCK_EN_OFST (10)
+#define A60810_RG_SSUSB_PLL_MONVC_EN_OFST (9)
+#define A60810_RG_SSUSB_PLL_RLH_EN_OFST (8)
+#define A60810_RG_SSUSB_PLL_AUTOK_KS_OFST (6)
+#define A60810_RG_SSUSB_PLL_AUTOK_KF_OFST (4)
+#define A60810_RG_SSUSB_PLL_RST_DLY_OFST (2)
+
+//U3D_reg7
+#define A60810_RG_SSUSB_PLL_RESERVE_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_PRD_OFST (0)
+
+//U3D_reg8
+#define A60810_RG_SSUSB_PLL_SSC_PHASE_INI_OFST (31)
+#define A60810_RG_SSUSB_PLL_SSC_TRI_EN_OFST (30)
+#define A60810_RG_SSUSB_PLL_CLK_PH_INV_OFST (29)
+#define A60810_RG_SSUSB_PLL_DDS_LPF_EN_OFST (28)
+#define A60810_RG_SSUSB_PLL_DDS_RST_SEL_OFST (27)
+#define A60810_RG_SSUSB_PLL_DDS_VADJ_OFST (26)
+#define A60810_RG_SSUSB_PLL_DDS_MONEN_OFST (25)
+#define A60810_RG_SSUSB_PLL_DDS_SEL_EXT_OFST (24)
+#define A60810_RG_SSUSB_PLL_DDS_PI_PL_EN_OFST (23)
+#define A60810_RG_SSUSB_PLL_DDS_FRAC_MUTE_OFST (20)
+#define A60810_RG_SSUSB_PLL_DDS_HF_EN_OFST (19)
+#define A60810_RG_SSUSB_PLL_DDS_C_OFST (16)
+#define A60810_RG_SSUSB_PLL_DDS_PREDIV2_OFST (15)
+#define A60810_RG_SSUSB_LFPS_LPF_OFST (13)
+
+//U3D_reg9
+#define A60810_RG_SSUSB_CDR_PD_DIV_BYPASS_OFST (31)
+#define A60810_RG_SSUSB_CDR_PD_DIV_SEL_OFST (30)
+#define A60810_RG_SSUSB_CDR_CPBIAS_SEL_OFST (29)
+#define A60810_RG_SSUSB_CDR_OSCDET_EN_OFST (28)
+#define A60810_RG_SSUSB_CDR_MONMUX_OFST (27)
+#define A60810_RG_SSUSB_CDR_RST_DLY_OFST (25)
+#define A60810_RG_SSUSB_CDR_RSTB_MANUAL_OFST (24)
+#define A60810_RG_SSUSB_CDR_BYPASS_OFST (22)
+#define A60810_RG_SSUSB_CDR_PI_SLEW_OFST (20)
+#define A60810_RG_SSUSB_CDR_EPEN_OFST (19)
+#define A60810_RG_SSUSB_CDR_AUTOK_LOAD_OFST (18)
+#define A60810_RG_SSUSB_CDR_MONEN_OFST (16)
+#define A60810_RG_SSUSB_CDR_MONEN_DIG_OFST (15)
+#define A60810_RG_SSUSB_CDR_REGOD_OFST (13)
+#define A60810_RG_SSUSB_CDR_AUTOK_KS_OFST (11)
+#define A60810_RG_SSUSB_CDR_AUTOK_KF_OFST (9)
+#define A60810_RG_SSUSB_RX_DAC_EN_OFST (8)
+#define A60810_RG_SSUSB_RX_DAC_PWD_OFST (7)
+#define A60810_RG_SSUSB_EQ_CURSEL_OFST (6)
+#define A60810_RG_SSUSB_RX_DAC_MUX_OFST (1)
+#define A60810_RG_SSUSB_RX_R2T_EN_OFST (0)
+
+//U3D_regA
+#define A60810_RG_SSUSB_RX_T2R_EN_OFST (31)
+#define A60810_RG_SSUSB_RX_50_LOWER_OFST (28)
+#define A60810_RG_SSUSB_RX_50_TAR_OFST (26)
+#define A60810_RG_SSUSB_RX_SW_CTRL_OFST (21)
+#define A60810_RG_PCIE_SIGDET_VTH_OFST (19)
+#define A60810_RG_PCIE_SIGDET_LPF_OFST (17)
+#define A60810_RG_SSUSB_LFPS_MON_EN_OFST (16)
+#define A60810_RG_SSUSB_RXAFE_DCMON_SEL_OFST (12)
+#define A60810_RG_SSUSB_RX_P1_ENTRY_PASS_OFST (11)
+#define A60810_RG_SSUSB_RX_PD_RST_OFST (10)
+#define A60810_RG_SSUSB_RX_PD_RST_PASS_OFST (9)
+
+//U3D_regB
+#define A60810_RG_SSUSB_CDR_RESERVE_OFST (24)
+#define A60810_RG_SSUSB_RXAFE_RESERVE_OFST (16)
+#define A60810_RG_PCIE_RX_RESERVE_OFST (8)
+#define A60810_RG_SSUSB_VRT_25M_EN_OFST (7)
+#define A60810_RG_SSUSB_RX_PD_PICAL_SWAP_OFST (6)
+#define A60810_RG_SSUSB_RX_DAC_MEAS_EN_OFST (5)
+#define A60810_RG_SSUSB_MPX_SEL_L0_OFST (4)
+#define A60810_RG_SSUSB_LFPS_SLCOUT_SEL_OFST (3)
+#define A60810_RG_SSUSB_LFPS_CMPOUT_SEL_OFST (2)
+#define A60810_RG_PCIE_SIGDET_HF_OFST (0)
+
+//U3D_regC
+#define A60810_RGS_SSUSB_RX_DEBUG_RESERVE_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phya_da_reg_a60810 {
+ //0x0
+ PHY_LE32 reg0;
+ PHY_LE32 reg1;
+ PHY_LE32 reg4;
+ PHY_LE32 reg5;
+ //0x10
+ PHY_LE32 reg6;
+ PHY_LE32 reg7;
+ PHY_LE32 reg8;
+ PHY_LE32 reg9;
+ //0x20
+ PHY_LE32 reg10;
+ PHY_LE32 reg12;
+ PHY_LE32 reg13;
+ PHY_LE32 reg14;
+ //0x30
+ PHY_LE32 reg15;
+ PHY_LE32 reg16;
+ PHY_LE32 reg19;
+ PHY_LE32 reg20;
+ //0x40
+ PHY_LE32 reg21;
+ PHY_LE32 reg23;
+ PHY_LE32 reg25;
+ PHY_LE32 reg26;
+ //0x50
+ PHY_LE32 reg28;
+ PHY_LE32 reg29;
+ PHY_LE32 reg30;
+ PHY_LE32 reg31;
+ //0x60
+ PHY_LE32 reg32;
+ PHY_LE32 reg33;
+};
+
+//U3D_reg0
+#define A60810_RG_PCIE_SPEED_PE2D (0x1<<24) //24:24
+#define A60810_RG_PCIE_SPEED_PE2H (0x1<<23) //23:23
+#define A60810_RG_PCIE_SPEED_PE1D (0x1<<22) //22:22
+#define A60810_RG_PCIE_SPEED_PE1H (0x1<<21) //21:21
+#define A60810_RG_PCIE_SPEED_U3 (0x1<<20) //20:20
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE2D (0x3<<18) //19:18
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE2H (0x3<<16) //17:16
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE1D (0x3<<14) //15:14
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE1H (0x3<<12) //13:12
+#define A60810_RG_SSUSB_XTAL_EXT_EN_U3 (0x3<<10) //11:10
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE2D (0x3<<8) //9:8
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE2H (0x3<<6) //7:6
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE1D (0x3<<4) //5:4
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE1H (0x3<<2) //3:2
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_U3 (0x3<<0) //1:0
+
+//U3D_reg1
+#define A60810_RG_USB20_REFCK_SEL_PE2D (0x1<<30) //30:30
+#define A60810_RG_USB20_REFCK_SEL_PE2H (0x1<<29) //29:29
+#define A60810_RG_USB20_REFCK_SEL_PE1D (0x1<<28) //28:28
+#define A60810_RG_USB20_REFCK_SEL_PE1H (0x1<<27) //27:27
+#define A60810_RG_USB20_REFCK_SEL_U3 (0x1<<26) //26:26
+#define A60810_RG_PCIE_REFCK_DIV4_PE2D (0x1<<25) //25:25
+#define A60810_RG_PCIE_REFCK_DIV4_PE2H (0x1<<24) //24:24
+#define A60810_RG_PCIE_REFCK_DIV4_PE1D (0x1<<18) //18:18
+#define A60810_RG_PCIE_REFCK_DIV4_PE1H (0x1<<17) //17:17
+#define A60810_RG_PCIE_REFCK_DIV4_U3 (0x1<<16) //16:16
+#define A60810_RG_PCIE_MODE_PE2D (0x1<<8) //8:8
+#define A60810_RG_PCIE_MODE_PE2H (0x1<<3) //3:3
+#define A60810_RG_PCIE_MODE_PE1D (0x1<<2) //2:2
+#define A60810_RG_PCIE_MODE_PE1H (0x1<<1) //1:1
+#define A60810_RG_PCIE_MODE_U3 (0x1<<0) //0:0
+
+//U3D_reg4
+#define A60810_RG_SSUSB_PLL_DIVEN_PE2D (0x7<<22) //24:22
+#define A60810_RG_SSUSB_PLL_DIVEN_PE2H (0x7<<19) //21:19
+#define A60810_RG_SSUSB_PLL_DIVEN_PE1D (0x7<<16) //18:16
+#define A60810_RG_SSUSB_PLL_DIVEN_PE1H (0x7<<13) //15:13
+#define A60810_RG_SSUSB_PLL_DIVEN_U3 (0x7<<10) //12:10
+#define A60810_RG_SSUSB_PLL_BC_PE2D (0x3<<8) //9:8
+#define A60810_RG_SSUSB_PLL_BC_PE2H (0x3<<6) //7:6
+#define A60810_RG_SSUSB_PLL_BC_PE1D (0x3<<4) //5:4
+#define A60810_RG_SSUSB_PLL_BC_PE1H (0x3<<2) //3:2
+#define A60810_RG_SSUSB_PLL_BC_U3 (0x3<<0) //1:0
+
+//U3D_reg5
+#define A60810_RG_SSUSB_PLL_BR_PE2D (0x3<<30) //31:30
+#define A60810_RG_SSUSB_PLL_BR_PE2H (0x3<<28) //29:28
+#define A60810_RG_SSUSB_PLL_BR_PE1D (0x3<<26) //27:26
+#define A60810_RG_SSUSB_PLL_BR_PE1H (0x3<<24) //25:24
+#define A60810_RG_SSUSB_PLL_BR_U3 (0x3<<22) //23:22
+#define A60810_RG_SSUSB_PLL_IC_PE2D (0xf<<16) //19:16
+#define A60810_RG_SSUSB_PLL_IC_PE2H (0xf<<12) //15:12
+#define A60810_RG_SSUSB_PLL_IC_PE1D (0xf<<8) //11:8
+#define A60810_RG_SSUSB_PLL_IC_PE1H (0xf<<4) //7:4
+#define A60810_RG_SSUSB_PLL_IC_U3 (0xf<<0) //3:0
+
+//U3D_reg6
+#define A60810_RG_SSUSB_PLL_IR_PE2D (0xf<<24) //27:24
+#define A60810_RG_SSUSB_PLL_IR_PE2H (0xf<<16) //19:16
+#define A60810_RG_SSUSB_PLL_IR_PE1D (0xf<<8) //11:8
+#define A60810_RG_SSUSB_PLL_IR_PE1H (0xf<<4) //7:4
+#define A60810_RG_SSUSB_PLL_IR_U3 (0xf<<0) //3:0
+
+//U3D_reg7
+#define A60810_RG_SSUSB_PLL_BP_PE2D (0xf<<24) //27:24
+#define A60810_RG_SSUSB_PLL_BP_PE2H (0xf<<16) //19:16
+#define A60810_RG_SSUSB_PLL_BP_PE1D (0xf<<8) //11:8
+#define A60810_RG_SSUSB_PLL_BP_PE1H (0xf<<4) //7:4
+#define A60810_RG_SSUSB_PLL_BP_U3 (0xf<<0) //3:0
+
+//U3D_reg8
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE2D (0x3<<24) //25:24
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE2H (0x3<<16) //17:16
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE1D (0x3<<8) //9:8
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE1H (0x3<<2) //3:2
+#define A60810_RG_SSUSB_PLL_FBKSEL_U3 (0x3<<0) //1:0
+
+//U3D_reg9
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE2H (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE1D (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE1H (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_PLL_FBKDIV_U3 (0x7f<<0) //6:0
+
+//U3D_reg10
+#define A60810_RG_SSUSB_PLL_PREDIV_PE2D (0x3<<26) //27:26
+#define A60810_RG_SSUSB_PLL_PREDIV_PE2H (0x3<<24) //25:24
+#define A60810_RG_SSUSB_PLL_PREDIV_PE1D (0x3<<18) //19:18
+#define A60810_RG_SSUSB_PLL_PREDIV_PE1H (0x3<<16) //17:16
+#define A60810_RG_SSUSB_PLL_PREDIV_U3 (0x3<<8) //9:8
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE2D (0x7f<<0) //6:0
+
+//U3D_reg12
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_U3 (0x7fffffff<<0) //30:0
+
+//U3D_reg13
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE1H (0x7fffffff<<0) //30:0
+
+//U3D_reg14
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE1D (0x7fffffff<<0) //30:0
+
+//U3D_reg15
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE2H (0x7fffffff<<0) //30:0
+
+//U3D_reg16
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE2D (0x7fffffff<<0) //30:0
+
+//U3D_reg19
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE1H (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_U3 (0xffff<<0) //15:0
+
+//U3D_reg20
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE2H (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE1D (0xffff<<0) //15:0
+
+//U3D_reg21
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_U3 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE2D (0xffff<<0) //15:0
+
+//U3D_reg23
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE1D (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE1H (0xffff<<0) //15:0
+
+//U3D_reg25
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE2D (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE2H (0xffff<<0) //15:0
+
+//U3D_reg26
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE2D (0x1<<25) //25:25
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE2H (0x1<<24) //24:24
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE1D (0x1<<16) //16:16
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE1H (0x1<<8) //8:8
+#define A60810_RG_SSUSB_PLL_REFCKDIV_U3 (0x1<<0) //0:0
+
+//U3D_reg28
+#define A60810_RG_SSUSB_CDR_BPA_PE2D (0x3<<24) //25:24
+#define A60810_RG_SSUSB_CDR_BPA_PE2H (0x3<<16) //17:16
+#define A60810_RG_SSUSB_CDR_BPA_PE1D (0x3<<10) //11:10
+#define A60810_RG_SSUSB_CDR_BPA_PE1H (0x3<<8) //9:8
+#define A60810_RG_SSUSB_CDR_BPA_U3 (0x3<<0) //1:0
+
+//U3D_reg29
+#define A60810_RG_SSUSB_CDR_BPB_PE2D (0x7<<24) //26:24
+#define A60810_RG_SSUSB_CDR_BPB_PE2H (0x7<<16) //18:16
+#define A60810_RG_SSUSB_CDR_BPB_PE1D (0x7<<6) //8:6
+#define A60810_RG_SSUSB_CDR_BPB_PE1H (0x7<<3) //5:3
+#define A60810_RG_SSUSB_CDR_BPB_U3 (0x7<<0) //2:0
+
+//U3D_reg30
+#define A60810_RG_SSUSB_CDR_BR_PE2D (0x7<<24) //26:24
+#define A60810_RG_SSUSB_CDR_BR_PE2H (0x7<<16) //18:16
+#define A60810_RG_SSUSB_CDR_BR_PE1D (0x7<<6) //8:6
+#define A60810_RG_SSUSB_CDR_BR_PE1H (0x7<<3) //5:3
+#define A60810_RG_SSUSB_CDR_BR_U3 (0x7<<0) //2:0
+
+//U3D_reg31
+#define A60810_RG_SSUSB_CDR_FBDIV_PE2H (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_CDR_FBDIV_PE1D (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_CDR_FBDIV_PE1H (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_CDR_FBDIV_U3 (0x7f<<0) //6:0
+
+//U3D_reg32
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE2D (0x3<<30) //31:30
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE2H (0x3<<28) //29:28
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE1D (0x3<<26) //27:26
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE1H (0x3<<24) //25:24
+#define A60810_RG_SSUSB_EQ_RSTEP1_U3 (0x3<<22) //23:22
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE2D (0x3<<20) //21:20
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE2H (0x3<<18) //19:18
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE1D (0x3<<16) //17:16
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE1H (0x3<<14) //15:14
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_U3 (0x3<<12) //13:12
+#define A60810_RG_SSUSB_CDR_KVSEL_PE2D (0x1<<11) //11:11
+#define A60810_RG_SSUSB_CDR_KVSEL_PE2H (0x1<<10) //10:10
+#define A60810_RG_SSUSB_CDR_KVSEL_PE1D (0x1<<9) //9:9
+#define A60810_RG_SSUSB_CDR_KVSEL_PE1H (0x1<<8) //8:8
+#define A60810_RG_SSUSB_CDR_KVSEL_U3 (0x1<<7) //7:7
+#define A60810_RG_SSUSB_CDR_FBDIV_PE2D (0x7f<<0) //6:0
+
+//U3D_reg33
+#define A60810_RG_SSUSB_RX_CMPWD_PE2D (0x1<<26) //26:26
+#define A60810_RG_SSUSB_RX_CMPWD_PE2H (0x1<<25) //25:25
+#define A60810_RG_SSUSB_RX_CMPWD_PE1D (0x1<<24) //24:24
+#define A60810_RG_SSUSB_RX_CMPWD_PE1H (0x1<<23) //23:23
+#define A60810_RG_SSUSB_RX_CMPWD_U3 (0x1<<16) //16:16
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE2D (0x3<<8) //9:8
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE2H (0x3<<6) //7:6
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE1D (0x3<<4) //5:4
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE1H (0x3<<2) //3:2
+#define A60810_RG_SSUSB_EQ_RSTEP2_U3 (0x3<<0) //1:0
+
+/* OFFSET DEFINITION */
+
+//U3D_reg0
+#define A60810_RG_PCIE_SPEED_PE2D_OFST (24)
+#define A60810_RG_PCIE_SPEED_PE2H_OFST (23)
+#define A60810_RG_PCIE_SPEED_PE1D_OFST (22)
+#define A60810_RG_PCIE_SPEED_PE1H_OFST (21)
+#define A60810_RG_PCIE_SPEED_U3_OFST (20)
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE2D_OFST (18)
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE2H_OFST (16)
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE1D_OFST (14)
+#define A60810_RG_SSUSB_XTAL_EXT_EN_PE1H_OFST (12)
+#define A60810_RG_SSUSB_XTAL_EXT_EN_U3_OFST (10)
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE2D_OFST (8)
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE2H_OFST (6)
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE1D_OFST (4)
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_PE1H_OFST (2)
+#define A60810_RG_SSUSB_CDR_REFCK_SEL_U3_OFST (0)
+
+//U3D_reg1
+#define A60810_RG_USB20_REFCK_SEL_PE2D_OFST (30)
+#define A60810_RG_USB20_REFCK_SEL_PE2H_OFST (29)
+#define A60810_RG_USB20_REFCK_SEL_PE1D_OFST (28)
+#define A60810_RG_USB20_REFCK_SEL_PE1H_OFST (27)
+#define A60810_RG_USB20_REFCK_SEL_U3_OFST (26)
+#define A60810_RG_PCIE_REFCK_DIV4_PE2D_OFST (25)
+#define A60810_RG_PCIE_REFCK_DIV4_PE2H_OFST (24)
+#define A60810_RG_PCIE_REFCK_DIV4_PE1D_OFST (18)
+#define A60810_RG_PCIE_REFCK_DIV4_PE1H_OFST (17)
+#define A60810_RG_PCIE_REFCK_DIV4_U3_OFST (16)
+#define A60810_RG_PCIE_MODE_PE2D_OFST (8)
+#define A60810_RG_PCIE_MODE_PE2H_OFST (3)
+#define A60810_RG_PCIE_MODE_PE1D_OFST (2)
+#define A60810_RG_PCIE_MODE_PE1H_OFST (1)
+#define A60810_RG_PCIE_MODE_U3_OFST (0)
+
+//U3D_reg4
+#define A60810_RG_SSUSB_PLL_DIVEN_PE2D_OFST (22)
+#define A60810_RG_SSUSB_PLL_DIVEN_PE2H_OFST (19)
+#define A60810_RG_SSUSB_PLL_DIVEN_PE1D_OFST (16)
+#define A60810_RG_SSUSB_PLL_DIVEN_PE1H_OFST (13)
+#define A60810_RG_SSUSB_PLL_DIVEN_U3_OFST (10)
+#define A60810_RG_SSUSB_PLL_BC_PE2D_OFST (8)
+#define A60810_RG_SSUSB_PLL_BC_PE2H_OFST (6)
+#define A60810_RG_SSUSB_PLL_BC_PE1D_OFST (4)
+#define A60810_RG_SSUSB_PLL_BC_PE1H_OFST (2)
+#define A60810_RG_SSUSB_PLL_BC_U3_OFST (0)
+
+//U3D_reg5
+#define A60810_RG_SSUSB_PLL_BR_PE2D_OFST (30)
+#define A60810_RG_SSUSB_PLL_BR_PE2H_OFST (28)
+#define A60810_RG_SSUSB_PLL_BR_PE1D_OFST (26)
+#define A60810_RG_SSUSB_PLL_BR_PE1H_OFST (24)
+#define A60810_RG_SSUSB_PLL_BR_U3_OFST (22)
+#define A60810_RG_SSUSB_PLL_IC_PE2D_OFST (16)
+#define A60810_RG_SSUSB_PLL_IC_PE2H_OFST (12)
+#define A60810_RG_SSUSB_PLL_IC_PE1D_OFST (8)
+#define A60810_RG_SSUSB_PLL_IC_PE1H_OFST (4)
+#define A60810_RG_SSUSB_PLL_IC_U3_OFST (0)
+
+//U3D_reg6
+#define A60810_RG_SSUSB_PLL_IR_PE2D_OFST (24)
+#define A60810_RG_SSUSB_PLL_IR_PE2H_OFST (16)
+#define A60810_RG_SSUSB_PLL_IR_PE1D_OFST (8)
+#define A60810_RG_SSUSB_PLL_IR_PE1H_OFST (4)
+#define A60810_RG_SSUSB_PLL_IR_U3_OFST (0)
+
+//U3D_reg7
+#define A60810_RG_SSUSB_PLL_BP_PE2D_OFST (24)
+#define A60810_RG_SSUSB_PLL_BP_PE2H_OFST (16)
+#define A60810_RG_SSUSB_PLL_BP_PE1D_OFST (8)
+#define A60810_RG_SSUSB_PLL_BP_PE1H_OFST (4)
+#define A60810_RG_SSUSB_PLL_BP_U3_OFST (0)
+
+//U3D_reg8
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE2D_OFST (24)
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE2H_OFST (16)
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE1D_OFST (8)
+#define A60810_RG_SSUSB_PLL_FBKSEL_PE1H_OFST (2)
+#define A60810_RG_SSUSB_PLL_FBKSEL_U3_OFST (0)
+
+//U3D_reg9
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE2H_OFST (24)
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE1D_OFST (16)
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE1H_OFST (8)
+#define A60810_RG_SSUSB_PLL_FBKDIV_U3_OFST (0)
+
+//U3D_reg10
+#define A60810_RG_SSUSB_PLL_PREDIV_PE2D_OFST (26)
+#define A60810_RG_SSUSB_PLL_PREDIV_PE2H_OFST (24)
+#define A60810_RG_SSUSB_PLL_PREDIV_PE1D_OFST (18)
+#define A60810_RG_SSUSB_PLL_PREDIV_PE1H_OFST (16)
+#define A60810_RG_SSUSB_PLL_PREDIV_U3_OFST (8)
+#define A60810_RG_SSUSB_PLL_FBKDIV_PE2D_OFST (0)
+
+//U3D_reg12
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_U3_OFST (0)
+
+//U3D_reg13
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE1H_OFST (0)
+
+//U3D_reg14
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE1D_OFST (0)
+
+//U3D_reg15
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE2H_OFST (0)
+
+//U3D_reg16
+#define A60810_RG_SSUSB_PLL_PCW_NCPO_PE2D_OFST (0)
+
+//U3D_reg19
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE1H_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_U3_OFST (0)
+
+//U3D_reg20
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE2H_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE1D_OFST (0)
+
+//U3D_reg21
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_U3_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_DELTA1_PE2D_OFST (0)
+
+//U3D_reg23
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE1D_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE1H_OFST (0)
+
+//U3D_reg25
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE2D_OFST (16)
+#define A60810_RG_SSUSB_PLL_SSC_DELTA_PE2H_OFST (0)
+
+//U3D_reg26
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE2D_OFST (25)
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE2H_OFST (24)
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE1D_OFST (16)
+#define A60810_RG_SSUSB_PLL_REFCKDIV_PE1H_OFST (8)
+#define A60810_RG_SSUSB_PLL_REFCKDIV_U3_OFST (0)
+
+//U3D_reg28
+#define A60810_RG_SSUSB_CDR_BPA_PE2D_OFST (24)
+#define A60810_RG_SSUSB_CDR_BPA_PE2H_OFST (16)
+#define A60810_RG_SSUSB_CDR_BPA_PE1D_OFST (10)
+#define A60810_RG_SSUSB_CDR_BPA_PE1H_OFST (8)
+#define A60810_RG_SSUSB_CDR_BPA_U3_OFST (0)
+
+//U3D_reg29
+#define A60810_RG_SSUSB_CDR_BPB_PE2D_OFST (24)
+#define A60810_RG_SSUSB_CDR_BPB_PE2H_OFST (16)
+#define A60810_RG_SSUSB_CDR_BPB_PE1D_OFST (6)
+#define A60810_RG_SSUSB_CDR_BPB_PE1H_OFST (3)
+#define A60810_RG_SSUSB_CDR_BPB_U3_OFST (0)
+
+//U3D_reg30
+#define A60810_RG_SSUSB_CDR_BR_PE2D_OFST (24)
+#define A60810_RG_SSUSB_CDR_BR_PE2H_OFST (16)
+#define A60810_RG_SSUSB_CDR_BR_PE1D_OFST (6)
+#define A60810_RG_SSUSB_CDR_BR_PE1H_OFST (3)
+#define A60810_RG_SSUSB_CDR_BR_U3_OFST (0)
+
+//U3D_reg31
+#define A60810_RG_SSUSB_CDR_FBDIV_PE2H_OFST (24)
+#define A60810_RG_SSUSB_CDR_FBDIV_PE1D_OFST (16)
+#define A60810_RG_SSUSB_CDR_FBDIV_PE1H_OFST (8)
+#define A60810_RG_SSUSB_CDR_FBDIV_U3_OFST (0)
+
+//U3D_reg32
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE2D_OFST (30)
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE2H_OFST (28)
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE1D_OFST (26)
+#define A60810_RG_SSUSB_EQ_RSTEP1_PE1H_OFST (24)
+#define A60810_RG_SSUSB_EQ_RSTEP1_U3_OFST (22)
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE2D_OFST (20)
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE2H_OFST (18)
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE1D_OFST (16)
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_PE1H_OFST (14)
+#define A60810_RG_SSUSB_LFPS_DEGLITCH_U3_OFST (12)
+#define A60810_RG_SSUSB_CDR_KVSEL_PE2D_OFST (11)
+#define A60810_RG_SSUSB_CDR_KVSEL_PE2H_OFST (10)
+#define A60810_RG_SSUSB_CDR_KVSEL_PE1D_OFST (9)
+#define A60810_RG_SSUSB_CDR_KVSEL_PE1H_OFST (8)
+#define A60810_RG_SSUSB_CDR_KVSEL_U3_OFST (7)
+#define A60810_RG_SSUSB_CDR_FBDIV_PE2D_OFST (0)
+
+//U3D_reg33
+#define A60810_RG_SSUSB_RX_CMPWD_PE2D_OFST (26)
+#define A60810_RG_SSUSB_RX_CMPWD_PE2H_OFST (25)
+#define A60810_RG_SSUSB_RX_CMPWD_PE1D_OFST (24)
+#define A60810_RG_SSUSB_RX_CMPWD_PE1H_OFST (23)
+#define A60810_RG_SSUSB_RX_CMPWD_U3_OFST (16)
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE2D_OFST (8)
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE2H_OFST (6)
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE1D_OFST (4)
+#define A60810_RG_SSUSB_EQ_RSTEP2_PE1H_OFST (2)
+#define A60810_RG_SSUSB_EQ_RSTEP2_U3_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phyd_reg_a60810 {
+ //0x0
+ PHY_LE32 phyd_mix0;
+ PHY_LE32 phyd_mix1;
+ PHY_LE32 phyd_lfps0;
+ PHY_LE32 phyd_lfps1;
+ //0x10
+ PHY_LE32 phyd_impcal0;
+ PHY_LE32 phyd_impcal1;
+ PHY_LE32 phyd_txpll0;
+ PHY_LE32 phyd_txpll1;
+ //0x20
+ PHY_LE32 phyd_txpll2;
+ PHY_LE32 phyd_fl0;
+ PHY_LE32 phyd_mix2;
+ PHY_LE32 phyd_rx0;
+ //0x30
+ PHY_LE32 phyd_t2rlb;
+ PHY_LE32 phyd_cppat;
+ PHY_LE32 phyd_mix3;
+ PHY_LE32 phyd_ebufctl;
+ //0x40
+ PHY_LE32 phyd_pipe0;
+ PHY_LE32 phyd_pipe1;
+ PHY_LE32 phyd_mix4;
+ PHY_LE32 phyd_ckgen0;
+ //0x50
+ PHY_LE32 phyd_mix5;
+ PHY_LE32 phyd_reserved;
+ PHY_LE32 phyd_cdr0;
+ PHY_LE32 phyd_cdr1;
+ //0x60
+ PHY_LE32 phyd_pll_0;
+ PHY_LE32 phyd_pll_1;
+ PHY_LE32 phyd_bcn_det_1;
+ PHY_LE32 phyd_bcn_det_2;
+ //0x70
+ PHY_LE32 eq0;
+ PHY_LE32 eq1;
+ PHY_LE32 eq2;
+ PHY_LE32 eq3;
+ //0x80
+ PHY_LE32 eq_eye0;
+ PHY_LE32 eq_eye1;
+ PHY_LE32 eq_eye2;
+ PHY_LE32 eq_dfe0;
+ //0x90
+ PHY_LE32 eq_dfe1;
+ PHY_LE32 eq_dfe2;
+ PHY_LE32 eq_dfe3;
+ PHY_LE32 reserve0;
+ //0xa0
+ PHY_LE32 phyd_mon0;
+ PHY_LE32 phyd_mon1;
+ PHY_LE32 phyd_mon2;
+ PHY_LE32 phyd_mon3;
+ //0xb0
+ PHY_LE32 phyd_mon4;
+ PHY_LE32 phyd_mon5;
+ PHY_LE32 phyd_mon6;
+ PHY_LE32 phyd_mon7;
+ //0xc0
+ PHY_LE32 phya_rx_mon0;
+ PHY_LE32 phya_rx_mon1;
+ PHY_LE32 phya_rx_mon2;
+ PHY_LE32 phya_rx_mon3;
+ //0xd0
+ PHY_LE32 phya_rx_mon4;
+ PHY_LE32 phya_rx_mon5;
+ PHY_LE32 phyd_cppat2;
+ PHY_LE32 eq_eye3;
+ //0xe0
+ PHY_LE32 kband_out;
+ PHY_LE32 kband_out1;
+};
+
+//U3D_PHYD_MIX0
+#define A60810_RG_SSUSB_P_P3_TX_NG (0x1<<31) //31:31
+#define A60810_RG_SSUSB_TSEQ_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_TSEQ_POLEN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_TSEQ_POL (0x1<<28) //28:28
+#define A60810_RG_SSUSB_P_P3_PCLK_NG (0x1<<27) //27:27
+#define A60810_RG_SSUSB_TSEQ_TH (0x7<<24) //26:24
+#define A60810_RG_SSUSB_PRBS_BERTH (0xff<<16) //23:16
+#define A60810_RG_SSUSB_DISABLE_PHY_U2_ON (0x1<<15) //15:15
+#define A60810_RG_SSUSB_DISABLE_PHY_U2_OFF (0x1<<14) //14:14
+#define A60810_RG_SSUSB_PRBS_EN (0x1<<13) //13:13
+#define A60810_RG_SSUSB_BPSLOCK (0x1<<12) //12:12
+#define A60810_RG_SSUSB_RTCOMCNT (0xf<<8) //11:8
+#define A60810_RG_SSUSB_COMCNT (0xf<<4) //7:4
+#define A60810_RG_SSUSB_PRBSEL_CALIB (0xf<<0) //3:0
+
+//U3D_PHYD_MIX1
+#define A60810_RG_SSUSB_SLEEP_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_PRBSEL_PCS (0x7<<28) //30:28
+#define A60810_RG_SSUSB_TXLFPS_PRD (0xf<<24) //27:24
+#define A60810_RG_SSUSB_P_RX_P0S_CK (0x1<<23) //23:23
+#define A60810_RG_SSUSB_P_TX_P0S_CK (0x1<<22) //22:22
+#define A60810_RG_SSUSB_PDNCTL (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_TX_DRV_EN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_TX_DRV_SEL (0x1<<14) //14:14
+#define A60810_RG_SSUSB_TX_DRV_DLY (0x3f<<8) //13:8
+#define A60810_RG_SSUSB_BERT_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_SCP_TH (0x7<<4) //6:4
+#define A60810_RG_SSUSB_SCP_EN (0x1<<3) //3:3
+#define A60810_RG_SSUSB_RXANSIDEC_TEST (0x7<<0) //2:0
+
+//U3D_PHYD_LFPS0
+#define A60810_RG_SSUSB_LFPS_PWD (0x1<<30) //30:30
+#define A60810_RG_SSUSB_FORCE_LFPS_PWD (0x1<<29) //29:29
+#define A60810_RG_SSUSB_RXLFPS_OVF (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_P3_ENTRY_SEL (0x1<<23) //23:23
+#define A60810_RG_SSUSB_P3_ENTRY (0x1<<22) //22:22
+#define A60810_RG_SSUSB_RXLFPS_CDRSEL (0x3<<20) //21:20
+#define A60810_RG_SSUSB_RXLFPS_CDRTH (0xf<<16) //19:16
+#define A60810_RG_SSUSB_LOCK5G_BLOCK (0x1<<15) //15:15
+#define A60810_RG_SSUSB_TFIFO_EXT_D_SEL (0x1<<14) //14:14
+#define A60810_RG_SSUSB_TFIFO_NO_EXTEND (0x1<<13) //13:13
+#define A60810_RG_SSUSB_RXLFPS_LOB (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_TXLFPS_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_TXLFPS_SEL (0x1<<6) //6:6
+#define A60810_RG_SSUSB_RXLFPS_CDRLOCK (0x1<<5) //5:5
+#define A60810_RG_SSUSB_RXLFPS_UPB (0x1f<<0) //4:0
+
+//U3D_PHYD_LFPS1
+#define A60810_RG_SSUSB_RX_IMP_BIAS (0xf<<28) //31:28
+#define A60810_RG_SSUSB_TX_IMP_BIAS (0xf<<24) //27:24
+#define A60810_RG_SSUSB_FWAKE_TH (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_P1_ENTRY_SEL (0x1<<14) //14:14
+#define A60810_RG_SSUSB_P1_ENTRY (0x1<<13) //13:13
+#define A60810_RG_SSUSB_RXLFPS_UDF (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_RXLFPS_P0IDLETH (0xff<<0) //7:0
+
+//U3D_PHYD_IMPCAL0
+#define A60810_RG_SSUSB_FORCE_TX_IMPSEL (0x1<<31) //31:31
+#define A60810_RG_SSUSB_TX_IMPCAL_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_FORCE_TX_IMPCAL_EN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_TX_IMPSEL (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_TX_IMPCAL_CALCYC (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_TX_IMPCAL_STBCYC (0x1f<<10) //14:10
+#define A60810_RG_SSUSB_TX_IMPCAL_CYCCNT (0x3ff<<0) //9:0
+
+//U3D_PHYD_IMPCAL1
+#define A60810_RG_SSUSB_FORCE_RX_IMPSEL (0x1<<31) //31:31
+#define A60810_RG_SSUSB_RX_IMPCAL_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_FORCE_RX_IMPCAL_EN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_RX_IMPSEL (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_RX_IMPCAL_CALCYC (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_RX_IMPCAL_STBCYC (0x1f<<10) //14:10
+#define A60810_RG_SSUSB_RX_IMPCAL_CYCCNT (0x3ff<<0) //9:0
+
+//U3D_PHYD_TXPLL0
+#define A60810_RG_SSUSB_TXPLL_DDSEN_CYC (0x1f<<27) //31:27
+#define A60810_RG_SSUSB_TXPLL_ON (0x1<<26) //26:26
+#define A60810_RG_SSUSB_FORCE_TXPLLON (0x1<<25) //25:25
+#define A60810_RG_SSUSB_TXPLL_STBCYC (0x1ff<<16) //24:16
+#define A60810_RG_SSUSB_TXPLL_NCPOCHG_CYC (0xf<<12) //15:12
+#define A60810_RG_SSUSB_TXPLL_NCPOEN_CYC (0x3<<10) //11:10
+#define A60810_RG_SSUSB_TXPLL_DDSRSTB_CYC (0x7<<0) //2:0
+
+//U3D_PHYD_TXPLL1
+#define A60810_RG_SSUSB_PLL_NCPO_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_PLL_FIFO_START_MAN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_PLL_NCPO_CHG (0x1<<28) //28:28
+#define A60810_RG_SSUSB_PLL_DDS_RSTB (0x1<<27) //27:27
+#define A60810_RG_SSUSB_PLL_DDS_PWDB (0x1<<26) //26:26
+#define A60810_RG_SSUSB_PLL_DDSEN (0x1<<25) //25:25
+#define A60810_RG_SSUSB_PLL_AUTOK_VCO (0x1<<24) //24:24
+#define A60810_RG_SSUSB_PLL_PWD (0x1<<23) //23:23
+#define A60810_RG_SSUSB_RX_AFE_PWD (0x1<<22) //22:22
+#define A60810_RG_SSUSB_PLL_TCADJ (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_FORCE_CDR_TCADJ (0x1<<15) //15:15
+#define A60810_RG_SSUSB_FORCE_CDR_AUTOK_VCO (0x1<<14) //14:14
+#define A60810_RG_SSUSB_FORCE_CDR_PWD (0x1<<13) //13:13
+#define A60810_RG_SSUSB_FORCE_PLL_NCPO_EN (0x1<<12) //12:12
+#define A60810_RG_SSUSB_FORCE_PLL_FIFO_START_MAN (0x1<<11) //11:11
+#define A60810_RG_SSUSB_FORCE_PLL_NCPO_CHG (0x1<<9) //9:9
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_RSTB (0x1<<8) //8:8
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_PWDB (0x1<<7) //7:7
+#define A60810_RG_SSUSB_FORCE_PLL_DDSEN (0x1<<6) //6:6
+#define A60810_RG_SSUSB_FORCE_PLL_TCADJ (0x1<<5) //5:5
+#define A60810_RG_SSUSB_FORCE_PLL_AUTOK_VCO (0x1<<4) //4:4
+#define A60810_RG_SSUSB_FORCE_PLL_PWD (0x1<<3) //3:3
+#define A60810_RG_SSUSB_FLT_1_DISPERR_B (0x1<<2) //2:2
+
+//U3D_PHYD_TXPLL2
+#define A60810_RG_SSUSB_TX_LFPS_EN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_FORCE_TX_LFPS_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_TX_LFPS (0x1<<29) //29:29
+#define A60810_RG_SSUSB_FORCE_TX_LFPS (0x1<<28) //28:28
+#define A60810_RG_SSUSB_RXPLL_STB (0x1<<27) //27:27
+#define A60810_RG_SSUSB_TXPLL_STB (0x1<<26) //26:26
+#define A60810_RG_SSUSB_FORCE_RXPLL_STB (0x1<<25) //25:25
+#define A60810_RG_SSUSB_FORCE_TXPLL_STB (0x1<<24) //24:24
+#define A60810_RG_SSUSB_RXPLL_REFCKSEL (0x1<<16) //16:16
+#define A60810_RG_SSUSB_RXPLL_STBMODE (0x1<<11) //11:11
+#define A60810_RG_SSUSB_RXPLL_ON (0x1<<10) //10:10
+#define A60810_RG_SSUSB_FORCE_RXPLLON (0x1<<9) //9:9
+#define A60810_RG_SSUSB_FORCE_RX_AFE_PWD (0x1<<8) //8:8
+#define A60810_RG_SSUSB_CDR_AUTOK_VCO (0x1<<7) //7:7
+#define A60810_RG_SSUSB_CDR_PWD (0x1<<6) //6:6
+#define A60810_RG_SSUSB_CDR_TCADJ (0x3f<<0) //5:0
+
+//U3D_PHYD_FL0
+#define A60810_RG_SSUSB_RX_FL_TARGET (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RX_FL_CYCLECNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MIX2
+#define A60810_RG_SSUSB_RX_EQ_RST (0x1<<31) //31:31
+#define A60810_RG_SSUSB_RX_EQ_RST_SEL (0x1<<30) //30:30
+#define A60810_RG_SSUSB_RXVAL_RST (0x1<<29) //29:29
+#define A60810_RG_SSUSB_RXVAL_CNT (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_CDROS_EN (0x1<<18) //18:18
+#define A60810_RG_SSUSB_CDR_LCKOP (0x3<<16) //17:16
+#define A60810_RG_SSUSB_RX_FL_LOCKTH (0xf<<8) //11:8
+#define A60810_RG_SSUSB_RX_FL_OFFSET (0xff<<0) //7:0
+
+//U3D_PHYD_RX0
+#define A60810_RG_SSUSB_T2RLB_BERTH (0xff<<24) //31:24
+#define A60810_RG_SSUSB_T2RLB_PAT (0xff<<16) //23:16
+#define A60810_RG_SSUSB_T2RLB_EN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_T2RLB_BPSCRAMB (0x1<<14) //14:14
+#define A60810_RG_SSUSB_T2RLB_SERIAL (0x1<<13) //13:13
+#define A60810_RG_SSUSB_T2RLB_MODE (0x3<<11) //12:11
+#define A60810_RG_SSUSB_RX_SAOSC_EN (0x1<<10) //10:10
+#define A60810_RG_SSUSB_RX_SAOSC_EN_SEL (0x1<<9) //9:9
+#define A60810_RG_SSUSB_RX_DFE_OPTION (0x1<<8) //8:8
+#define A60810_RG_SSUSB_RX_DFE_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_RX_DFE_EN_SEL (0x1<<6) //6:6
+#define A60810_RG_SSUSB_RX_EQ_EN (0x1<<5) //5:5
+#define A60810_RG_SSUSB_RX_EQ_EN_SEL (0x1<<4) //4:4
+#define A60810_RG_SSUSB_RX_SAOSC_RST (0x1<<3) //3:3
+#define A60810_RG_SSUSB_RX_SAOSC_RST_SEL (0x1<<2) //2:2
+#define A60810_RG_SSUSB_RX_DFE_RST (0x1<<1) //1:1
+#define A60810_RG_SSUSB_RX_DFE_RST_SEL (0x1<<0) //0:0
+
+//U3D_PHYD_T2RLB
+#define A60810_RG_SSUSB_EQTRAIN_CH_MODE (0x1<<28) //28:28
+#define A60810_RG_SSUSB_PRB_OUT_CPPAT (0x1<<27) //27:27
+#define A60810_RG_SSUSB_BPANSIENC (0x1<<26) //26:26
+#define A60810_RG_SSUSB_VALID_EN (0x1<<25) //25:25
+#define A60810_RG_SSUSB_EBUF_SRST (0x1<<24) //24:24
+#define A60810_RG_SSUSB_K_EMP (0xf<<20) //23:20
+#define A60810_RG_SSUSB_K_FUL (0xf<<16) //19:16
+#define A60810_RG_SSUSB_T2RLB_BDATRST (0xf<<12) //15:12
+#define A60810_RG_SSUSB_P_T2RLB_SKP_EN (0x1<<10) //10:10
+#define A60810_RG_SSUSB_T2RLB_PATMODE (0x3<<8) //9:8
+#define A60810_RG_SSUSB_T2RLB_TSEQCNT (0xff<<0) //7:0
+
+//U3D_PHYD_CPPAT
+#define A60810_RG_SSUSB_CPPAT_PROGRAM_EN (0x1<<24) //24:24
+#define A60810_RG_SSUSB_CPPAT_TOZ (0x3<<21) //22:21
+#define A60810_RG_SSUSB_CPPAT_PRBS_EN (0x1<<20) //20:20
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP2 (0xf<<16) //19:16
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP1 (0xff<<8) //15:8
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP0 (0xff<<0) //7:0
+
+//U3D_PHYD_MIX3
+#define A60810_RG_SSUSB_CDR_TCADJ_MINUS (0x1<<31) //31:31
+#define A60810_RG_SSUSB_P_CDROS_EN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_P_P2_TX_DRV_DIS (0x1<<28) //28:28
+#define A60810_RG_SSUSB_CDR_TCADJ_OFFSET (0x7<<24) //26:24
+#define A60810_RG_SSUSB_PLL_TCADJ_MINUS (0x1<<23) //23:23
+#define A60810_RG_SSUSB_FORCE_PLL_BIAS_LPF_EN (0x1<<20) //20:20
+#define A60810_RG_SSUSB_PLL_BIAS_LPF_EN (0x1<<19) //19:19
+#define A60810_RG_SSUSB_PLL_TCADJ_OFFSET (0x7<<16) //18:16
+#define A60810_RG_SSUSB_FORCE_PLL_SSCEN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_PLL_SSCEN (0x1<<14) //14:14
+#define A60810_RG_SSUSB_FORCE_CDR_PI_PWD (0x1<<13) //13:13
+#define A60810_RG_SSUSB_CDR_PI_PWD (0x1<<12) //12:12
+#define A60810_RG_SSUSB_CDR_PI_MODE (0x1<<11) //11:11
+#define A60810_RG_SSUSB_TXPLL_SSCEN_CYC (0x3ff<<0) //9:0
+
+//U3D_PHYD_EBUFCTL
+#define A60810_RG_SSUSB_EBUFCTL (0xffffffff<<0) //31:0
+
+//U3D_PHYD_PIPE0
+#define A60810_RG_SSUSB_RXTERMINATION (0x1<<30) //30:30
+#define A60810_RG_SSUSB_RXEQTRAINING (0x1<<29) //29:29
+#define A60810_RG_SSUSB_RXPOLARITY (0x1<<28) //28:28
+#define A60810_RG_SSUSB_TXDEEMPH (0x3<<26) //27:26
+#define A60810_RG_SSUSB_POWERDOWN (0x3<<24) //25:24
+#define A60810_RG_SSUSB_TXONESZEROS (0x1<<23) //23:23
+#define A60810_RG_SSUSB_TXELECIDLE (0x1<<22) //22:22
+#define A60810_RG_SSUSB_TXDETECTRX (0x1<<21) //21:21
+#define A60810_RG_SSUSB_PIPE_SEL (0x1<<20) //20:20
+#define A60810_RG_SSUSB_TXDATAK (0xf<<16) //19:16
+#define A60810_RG_SSUSB_CDR_STABLE_SEL (0x1<<15) //15:15
+#define A60810_RG_SSUSB_CDR_STABLE (0x1<<14) //14:14
+#define A60810_RG_SSUSB_CDR_RSTB_SEL (0x1<<13) //13:13
+#define A60810_RG_SSUSB_CDR_RSTB (0x1<<12) //12:12
+#define A60810_RG_SSUSB_FRC_PIPE_POWERDOWN (0x1<<11) //11:11
+#define A60810_RG_SSUSB_P_TXBCN_DIS (0x1<<6) //6:6
+#define A60810_RG_SSUSB_P_ERROR_SEL (0x3<<4) //5:4
+#define A60810_RG_SSUSB_TXMARGIN (0x7<<1) //3:1
+#define A60810_RG_SSUSB_TXCOMPLIANCE (0x1<<0) //0:0
+
+//U3D_PHYD_PIPE1
+#define A60810_RG_SSUSB_TXDATA (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MIX4
+#define A60810_RG_SSUSB_CDROS_CNT (0x3f<<24) //29:24
+#define A60810_RG_SSUSB_T2RLB_BER_EN (0x1<<16) //16:16
+#define A60810_RG_SSUSB_T2RLB_BER_RATE (0xffff<<0) //15:0
+
+//U3D_PHYD_CKGEN0
+#define A60810_RG_SSUSB_RFIFO_IMPLAT (0x1<<27) //27:27
+#define A60810_RG_SSUSB_TFIFO_PSEL (0x7<<24) //26:24
+#define A60810_RG_SSUSB_CKGEN_PSEL (0x3<<8) //9:8
+#define A60810_RG_SSUSB_RXCK_INV (0x1<<0) //0:0
+
+//U3D_PHYD_MIX5
+#define A60810_RG_SSUSB_PRB_SEL (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RXPLL_STBCYC (0x7ff<<0) //10:0
+
+//U3D_PHYD_RESERVED
+#define A60810_RG_SSUSB_PHYD_RESERVE (0xffffffff<<0) //31:0
+
+//U3D_PHYD_CDR0
+#define A60810_RG_SSUSB_CDR_BIC_LTR (0xf<<28) //31:28
+#define A60810_RG_SSUSB_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define A60810_RG_SSUSB_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_CDR_BC_LTR (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_PHYD_CDR1
+#define A60810_RG_SSUSB_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_CDR_BIR_LTR (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_CDR_BW_SEL (0x3<<6) //7:6
+#define A60810_RG_SSUSB_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_PHYD_PLL_0
+#define A60810_RG_SSUSB_FORCE_CDR_BAND_5G (0x1<<28) //28:28
+#define A60810_RG_SSUSB_FORCE_CDR_BAND_2P5G (0x1<<27) //27:27
+#define A60810_RG_SSUSB_FORCE_PLL_BAND_5G (0x1<<26) //26:26
+#define A60810_RG_SSUSB_FORCE_PLL_BAND_2P5G (0x1<<25) //25:25
+#define A60810_RG_SSUSB_P_EQ_T_SEL (0x3ff<<15) //24:15
+#define A60810_RG_SSUSB_PLL_ISO_EN_CYC (0x3ff<<5) //14:5
+#define A60810_RG_SSUSB_PLLBAND_RECAL (0x1<<4) //4:4
+#define A60810_RG_SSUSB_PLL_DDS_ISO_EN (0x1<<3) //3:3
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_ISO_EN (0x1<<2) //2:2
+#define A60810_RG_SSUSB_PLL_DDS_PWR_ON (0x1<<1) //1:1
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_PWR_ON (0x1<<0) //0:0
+
+//U3D_PHYD_PLL_1
+#define A60810_RG_SSUSB_CDR_BAND_5G (0xff<<24) //31:24
+#define A60810_RG_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16
+#define A60810_RG_SSUSB_PLL_BAND_5G (0xff<<8) //15:8
+#define A60810_RG_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0
+
+//U3D_PHYD_BCN_DET_1
+#define A60810_RG_SSUSB_P_BCN_OBS_PRD (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_U_BCN_OBS_PRD (0xffff<<0) //15:0
+
+//U3D_PHYD_BCN_DET_2
+#define A60810_RG_SSUSB_P_BCN_OBS_SEL (0xfff<<16) //27:16
+#define A60810_RG_SSUSB_BCN_DET_DIS (0x1<<12) //12:12
+#define A60810_RG_SSUSB_U_BCN_OBS_SEL (0xfff<<0) //11:0
+
+//U3D_EQ0
+#define A60810_RG_SSUSB_EQ_DLHL_LFI (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_EQ_DHHL_LFI (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_DD0HOS_LFI (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_EQ_DD0LOS_LFI (0x7f<<0) //6:0
+
+//U3D_EQ1
+#define A60810_RG_SSUSB_EQ_DD1HOS_LFI (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_EQ_DD1LOS_LFI (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_DE0OS_LFI (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_EQ_DE1OS_LFI (0x7f<<0) //6:0
+
+//U3D_EQ2
+#define A60810_RG_SSUSB_EQ_DLHLOS_LFI (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_EQ_DHHLOS_LFI (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_STOPTIME (0x1<<14) //14:14
+#define A60810_RG_SSUSB_EQ_DHHL_LF_SEL (0x7<<11) //13:11
+#define A60810_RG_SSUSB_EQ_DSAOS_LF_SEL (0x7<<8) //10:8
+#define A60810_RG_SSUSB_EQ_STARTTIME (0x3<<6) //7:6
+#define A60810_RG_SSUSB_EQ_DLEQ_LF_SEL (0x7<<3) //5:3
+#define A60810_RG_SSUSB_EQ_DLHL_LF_SEL (0x7<<0) //2:0
+
+//U3D_EQ3
+#define A60810_RG_SSUSB_EQ_DLEQ_LFI_GEN2 (0xf<<28) //31:28
+#define A60810_RG_SSUSB_EQ_DLEQ_LFI_GEN1 (0xf<<24) //27:24
+#define A60810_RG_SSUSB_EQ_DEYE0OS_LFI (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_DEYE1OS_LFI (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_EQ_TRI_DET_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_EQ_TRI_DET_TH (0x7f<<0) //6:0
+
+//U3D_EQ_EYE0
+#define A60810_RG_SSUSB_EQ_EYE_XOFFSET (0x7f<<25) //31:25
+#define A60810_RG_SSUSB_EQ_EYE_MON_EN (0x1<<24) //24:24
+#define A60810_RG_SSUSB_EQ_EYE0_Y (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_EYE1_Y (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_EQ_PILPO_ROUT (0x1<<7) //7:7
+#define A60810_RG_SSUSB_EQ_PI_KPGAIN (0x7<<4) //6:4
+#define A60810_RG_SSUSB_EQ_EYE_CNT_EN (0x1<<3) //3:3
+
+//U3D_EQ_EYE1
+#define A60810_RG_SSUSB_EQ_SIGDET (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_EQ_EYE_MASK (0x3ff<<7) //16:7
+
+//U3D_EQ_EYE2
+#define A60810_RG_SSUSB_EQ_RX500M_CK_SEL (0x1<<31) //31:31
+#define A60810_RG_SSUSB_EQ_SD_CNT1 (0x3f<<24) //29:24
+#define A60810_RG_SSUSB_EQ_ISIFLAG_SEL (0x3<<22) //23:22
+#define A60810_RG_SSUSB_EQ_SD_CNT0 (0x3f<<16) //21:16
+
+//U3D_EQ_DFE0
+#define A60810_RG_SSUSB_EQ_LEQMAX (0xf<<28) //31:28
+#define A60810_RG_SSUSB_EQ_DFEX_EN (0x1<<27) //27:27
+#define A60810_RG_SSUSB_EQ_DFEX_LF_SEL (0x7<<24) //26:24
+#define A60810_RG_SSUSB_EQ_CHK_EYE_H (0x1<<23) //23:23
+#define A60810_RG_SSUSB_EQ_PIEYE_INI (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_EQ_PI90_INI (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_EQ_PI0_INI (0x7f<<0) //6:0
+
+//U3D_EQ_DFE1
+#define A60810_RG_SSUSB_EQ_REV (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_EQ_DFEYEN_DUR (0x7<<12) //14:12
+#define A60810_RG_SSUSB_EQ_DFEXEN_DUR (0x7<<8) //10:8
+#define A60810_RG_SSUSB_EQ_DFEX_RST (0x1<<7) //7:7
+#define A60810_RG_SSUSB_EQ_GATED_RXD_B (0x1<<6) //6:6
+#define A60810_RG_SSUSB_EQ_PI90CK_SEL (0x3<<4) //5:4
+#define A60810_RG_SSUSB_EQ_DFEX_DIS (0x1<<2) //2:2
+#define A60810_RG_SSUSB_EQ_DFEYEN_STOP_DIS (0x1<<1) //1:1
+#define A60810_RG_SSUSB_EQ_DFEXEN_SEL (0x1<<0) //0:0
+
+//U3D_EQ_DFE2
+#define A60810_RG_SSUSB_EQ_MON_SEL (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_EQ_LEQOSC_DLYCNT (0x7<<16) //18:16
+#define A60810_RG_SSUSB_EQ_DLEQOS_LFI (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_EQ_DFE_TOG (0x1<<2) //2:2
+#define A60810_RG_SSUSB_EQ_LEQ_STOP_TO (0x3<<0) //1:0
+
+//U3D_EQ_DFE3
+#define A60810_RG_SSUSB_EQ_RESERVED (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MON0
+#define A60810_RGS_SSUSB_BERT_BERC (0xffff<<16) //31:16
+#define A60810_RGS_SSUSB_LFPS (0xf<<12) //15:12
+#define A60810_RGS_SSUSB_TRAINDEC (0x7<<8) //10:8
+#define A60810_RGS_SSUSB_SCP_PAT (0xff<<0) //7:0
+
+//U3D_PHYD_MON1
+#define A60810_RGS_SSUSB_RX_FL_OUT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON2
+#define A60810_RGS_SSUSB_T2RLB_ERRCNT (0xffff<<16) //31:16
+#define A60810_RGS_SSUSB_RETRACK (0xf<<12) //15:12
+#define A60810_RGS_SSUSB_RXPLL_LOCK (0x1<<10) //10:10
+#define A60810_RGS_SSUSB_CDR_VCOCAL_CPLT_D (0x1<<9) //9:9
+#define A60810_RGS_SSUSB_PLL_VCOCAL_CPLT_D (0x1<<8) //8:8
+#define A60810_RGS_SSUSB_PDNCTL (0xff<<0) //7:0
+
+//U3D_PHYD_MON3
+#define A60810_RGS_SSUSB_TSEQ_ERRCNT (0xffff<<16) //31:16
+#define A60810_RGS_SSUSB_PRBS_ERRCNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON4
+#define A60810_RGS_SSUSB_RX_LSLOCK_CNT (0xf<<24) //27:24
+#define A60810_RGS_SSUSB_SCP_DETCNT (0xff<<16) //23:16
+#define A60810_RGS_SSUSB_TSEQ_DETCNT (0xffff<<0) //15:0
+
+//U3D_PHYD_MON5
+#define A60810_RGS_SSUSB_EBUFMSG (0xffff<<16) //31:16
+#define A60810_RGS_SSUSB_BERT_LOCK (0x1<<15) //15:15
+#define A60810_RGS_SSUSB_SCP_DET (0x1<<14) //14:14
+#define A60810_RGS_SSUSB_TSEQ_DET (0x1<<13) //13:13
+#define A60810_RGS_SSUSB_EBUF_UDF (0x1<<12) //12:12
+#define A60810_RGS_SSUSB_EBUF_OVF (0x1<<11) //11:11
+#define A60810_RGS_SSUSB_PRBS_PASSTH (0x1<<10) //10:10
+#define A60810_RGS_SSUSB_PRBS_PASS (0x1<<9) //9:9
+#define A60810_RGS_SSUSB_PRBS_LOCK (0x1<<8) //8:8
+#define A60810_RGS_SSUSB_T2RLB_ERR (0x1<<6) //6:6
+#define A60810_RGS_SSUSB_T2RLB_PASSTH (0x1<<5) //5:5
+#define A60810_RGS_SSUSB_T2RLB_PASS (0x1<<4) //4:4
+#define A60810_RGS_SSUSB_T2RLB_LOCK (0x1<<3) //3:3
+#define A60810_RGS_SSUSB_RX_IMPCAL_DONE (0x1<<2) //2:2
+#define A60810_RGS_SSUSB_TX_IMPCAL_DONE (0x1<<1) //1:1
+#define A60810_RGS_SSUSB_RXDETECTED (0x1<<0) //0:0
+
+//U3D_PHYD_MON6
+#define A60810_RGS_SSUSB_SIGCAL_DONE (0x1<<30) //30:30
+#define A60810_RGS_SSUSB_SIGCAL_CAL_OUT (0x1<<29) //29:29
+#define A60810_RGS_SSUSB_SIGCAL_OFFSET (0x1f<<24) //28:24
+#define A60810_RGS_SSUSB_RX_IMP_SEL (0x1f<<16) //20:16
+#define A60810_RGS_SSUSB_TX_IMP_SEL (0x1f<<8) //12:8
+#define A60810_RGS_SSUSB_TFIFO_MSG (0xf<<4) //7:4
+#define A60810_RGS_SSUSB_RFIFO_MSG (0xf<<0) //3:0
+
+//U3D_PHYD_MON7
+#define A60810_RGS_SSUSB_FT_OUT (0xff<<8) //15:8
+#define A60810_RGS_SSUSB_PRB_OUT (0xff<<0) //7:0
+
+//U3D_PHYA_RX_MON0
+#define A60810_RGS_SSUSB_EQ_DCLEQ (0xf<<24) //27:24
+#define A60810_RGS_SSUSB_EQ_DCD0H (0x7f<<16) //22:16
+#define A60810_RGS_SSUSB_EQ_DCD0L (0x7f<<8) //14:8
+#define A60810_RGS_SSUSB_EQ_DCD1H (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON1
+#define A60810_RGS_SSUSB_EQ_DCD1L (0x7f<<24) //30:24
+#define A60810_RGS_SSUSB_EQ_DCE0 (0x7f<<16) //22:16
+#define A60810_RGS_SSUSB_EQ_DCE1 (0x7f<<8) //14:8
+#define A60810_RGS_SSUSB_EQ_DCHHL (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON2
+#define A60810_RGS_SSUSB_EQ_LEQ_STOP (0x1<<31) //31:31
+#define A60810_RGS_SSUSB_EQ_DCLHL (0x7f<<24) //30:24
+#define A60810_RGS_SSUSB_EQ_STATUS (0xff<<16) //23:16
+#define A60810_RGS_SSUSB_EQ_DCEYE0 (0x7f<<8) //14:8
+#define A60810_RGS_SSUSB_EQ_DCEYE1 (0x7f<<0) //6:0
+
+//U3D_PHYA_RX_MON3
+#define A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0 (0xfffff<<0) //19:0
+
+//U3D_PHYA_RX_MON4
+#define A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1 (0xfffff<<0) //19:0
+
+//U3D_PHYA_RX_MON5
+#define A60810_RGS_SSUSB_EQ_DCLEQOS (0x1f<<8) //12:8
+#define A60810_RGS_SSUSB_EQ_EYE_CNT_RDY (0x1<<7) //7:7
+#define A60810_RGS_SSUSB_EQ_PILPO (0x7f<<0) //6:0
+
+//U3D_PHYD_CPPAT2
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP2 (0xf<<16) //19:16
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP1 (0xff<<8) //15:8
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP0 (0xff<<0) //7:0
+
+//U3D_EQ_EYE3
+#define A60810_RG_SSUSB_EQ_LEQ_SHIFT (0x7<<24) //26:24
+#define A60810_RG_SSUSB_EQ_EYE_CNT (0xfffff<<0) //19:0
+
+//U3D_KBAND_OUT
+#define A60810_RGS_SSUSB_CDR_BAND_5G (0xff<<24) //31:24
+#define A60810_RGS_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16
+#define A60810_RGS_SSUSB_PLL_BAND_5G (0xff<<8) //15:8
+#define A60810_RGS_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0
+
+//U3D_KBAND_OUT1
+#define A60810_RGS_SSUSB_CDR_VCOCAL_FAIL (0x1<<24) //24:24
+#define A60810_RGS_SSUSB_CDR_VCOCAL_STATE (0xff<<16) //23:16
+#define A60810_RGS_SSUSB_PLL_VCOCAL_FAIL (0x1<<8) //8:8
+#define A60810_RGS_SSUSB_PLL_VCOCAL_STATE (0xff<<0) //7:0
+
+/* OFFSET */
+
+//U3D_PHYD_MIX0
+#define A60810_RG_SSUSB_P_P3_TX_NG_OFST (31)
+#define A60810_RG_SSUSB_TSEQ_EN_OFST (30)
+#define A60810_RG_SSUSB_TSEQ_POLEN_OFST (29)
+#define A60810_RG_SSUSB_TSEQ_POL_OFST (28)
+#define A60810_RG_SSUSB_P_P3_PCLK_NG_OFST (27)
+#define A60810_RG_SSUSB_TSEQ_TH_OFST (24)
+#define A60810_RG_SSUSB_PRBS_BERTH_OFST (16)
+#define A60810_RG_SSUSB_DISABLE_PHY_U2_ON_OFST (15)
+#define A60810_RG_SSUSB_DISABLE_PHY_U2_OFF_OFST (14)
+#define A60810_RG_SSUSB_PRBS_EN_OFST (13)
+#define A60810_RG_SSUSB_BPSLOCK_OFST (12)
+#define A60810_RG_SSUSB_RTCOMCNT_OFST (8)
+#define A60810_RG_SSUSB_COMCNT_OFST (4)
+#define A60810_RG_SSUSB_PRBSEL_CALIB_OFST (0)
+
+//U3D_PHYD_MIX1
+#define A60810_RG_SSUSB_SLEEP_EN_OFST (31)
+#define A60810_RG_SSUSB_PRBSEL_PCS_OFST (28)
+#define A60810_RG_SSUSB_TXLFPS_PRD_OFST (24)
+#define A60810_RG_SSUSB_P_RX_P0S_CK_OFST (23)
+#define A60810_RG_SSUSB_P_TX_P0S_CK_OFST (22)
+#define A60810_RG_SSUSB_PDNCTL_OFST (16)
+#define A60810_RG_SSUSB_TX_DRV_EN_OFST (15)
+#define A60810_RG_SSUSB_TX_DRV_SEL_OFST (14)
+#define A60810_RG_SSUSB_TX_DRV_DLY_OFST (8)
+#define A60810_RG_SSUSB_BERT_EN_OFST (7)
+#define A60810_RG_SSUSB_SCP_TH_OFST (4)
+#define A60810_RG_SSUSB_SCP_EN_OFST (3)
+#define A60810_RG_SSUSB_RXANSIDEC_TEST_OFST (0)
+
+//U3D_PHYD_LFPS0
+#define A60810_RG_SSUSB_LFPS_PWD_OFST (30)
+#define A60810_RG_SSUSB_FORCE_LFPS_PWD_OFST (29)
+#define A60810_RG_SSUSB_RXLFPS_OVF_OFST (24)
+#define A60810_RG_SSUSB_P3_ENTRY_SEL_OFST (23)
+#define A60810_RG_SSUSB_P3_ENTRY_OFST (22)
+#define A60810_RG_SSUSB_RXLFPS_CDRSEL_OFST (20)
+#define A60810_RG_SSUSB_RXLFPS_CDRTH_OFST (16)
+#define A60810_RG_SSUSB_LOCK5G_BLOCK_OFST (15)
+#define A60810_RG_SSUSB_TFIFO_EXT_D_SEL_OFST (14)
+#define A60810_RG_SSUSB_TFIFO_NO_EXTEND_OFST (13)
+#define A60810_RG_SSUSB_RXLFPS_LOB_OFST (8)
+#define A60810_RG_SSUSB_TXLFPS_EN_OFST (7)
+#define A60810_RG_SSUSB_TXLFPS_SEL_OFST (6)
+#define A60810_RG_SSUSB_RXLFPS_CDRLOCK_OFST (5)
+#define A60810_RG_SSUSB_RXLFPS_UPB_OFST (0)
+
+//U3D_PHYD_LFPS1
+#define A60810_RG_SSUSB_RX_IMP_BIAS_OFST (28)
+#define A60810_RG_SSUSB_TX_IMP_BIAS_OFST (24)
+#define A60810_RG_SSUSB_FWAKE_TH_OFST (16)
+#define A60810_RG_SSUSB_P1_ENTRY_SEL_OFST (14)
+#define A60810_RG_SSUSB_P1_ENTRY_OFST (13)
+#define A60810_RG_SSUSB_RXLFPS_UDF_OFST (8)
+#define A60810_RG_SSUSB_RXLFPS_P0IDLETH_OFST (0)
+
+//U3D_PHYD_IMPCAL0
+#define A60810_RG_SSUSB_FORCE_TX_IMPSEL_OFST (31)
+#define A60810_RG_SSUSB_TX_IMPCAL_EN_OFST (30)
+#define A60810_RG_SSUSB_FORCE_TX_IMPCAL_EN_OFST (29)
+#define A60810_RG_SSUSB_TX_IMPSEL_OFST (24)
+#define A60810_RG_SSUSB_TX_IMPCAL_CALCYC_OFST (16)
+#define A60810_RG_SSUSB_TX_IMPCAL_STBCYC_OFST (10)
+#define A60810_RG_SSUSB_TX_IMPCAL_CYCCNT_OFST (0)
+
+//U3D_PHYD_IMPCAL1
+#define A60810_RG_SSUSB_FORCE_RX_IMPSEL_OFST (31)
+#define A60810_RG_SSUSB_RX_IMPCAL_EN_OFST (30)
+#define A60810_RG_SSUSB_FORCE_RX_IMPCAL_EN_OFST (29)
+#define A60810_RG_SSUSB_RX_IMPSEL_OFST (24)
+#define A60810_RG_SSUSB_RX_IMPCAL_CALCYC_OFST (16)
+#define A60810_RG_SSUSB_RX_IMPCAL_STBCYC_OFST (10)
+#define A60810_RG_SSUSB_RX_IMPCAL_CYCCNT_OFST (0)
+
+//U3D_PHYD_TXPLL0
+#define A60810_RG_SSUSB_TXPLL_DDSEN_CYC_OFST (27)
+#define A60810_RG_SSUSB_TXPLL_ON_OFST (26)
+#define A60810_RG_SSUSB_FORCE_TXPLLON_OFST (25)
+#define A60810_RG_SSUSB_TXPLL_STBCYC_OFST (16)
+#define A60810_RG_SSUSB_TXPLL_NCPOCHG_CYC_OFST (12)
+#define A60810_RG_SSUSB_TXPLL_NCPOEN_CYC_OFST (10)
+#define A60810_RG_SSUSB_TXPLL_DDSRSTB_CYC_OFST (0)
+
+//U3D_PHYD_TXPLL1
+#define A60810_RG_SSUSB_PLL_NCPO_EN_OFST (31)
+#define A60810_RG_SSUSB_PLL_FIFO_START_MAN_OFST (30)
+#define A60810_RG_SSUSB_PLL_NCPO_CHG_OFST (28)
+#define A60810_RG_SSUSB_PLL_DDS_RSTB_OFST (27)
+#define A60810_RG_SSUSB_PLL_DDS_PWDB_OFST (26)
+#define A60810_RG_SSUSB_PLL_DDSEN_OFST (25)
+#define A60810_RG_SSUSB_PLL_AUTOK_VCO_OFST (24)
+#define A60810_RG_SSUSB_PLL_PWD_OFST (23)
+#define A60810_RG_SSUSB_RX_AFE_PWD_OFST (22)
+#define A60810_RG_SSUSB_PLL_TCADJ_OFST (16)
+#define A60810_RG_SSUSB_FORCE_CDR_TCADJ_OFST (15)
+#define A60810_RG_SSUSB_FORCE_CDR_AUTOK_VCO_OFST (14)
+#define A60810_RG_SSUSB_FORCE_CDR_PWD_OFST (13)
+#define A60810_RG_SSUSB_FORCE_PLL_NCPO_EN_OFST (12)
+#define A60810_RG_SSUSB_FORCE_PLL_FIFO_START_MAN_OFST (11)
+#define A60810_RG_SSUSB_FORCE_PLL_NCPO_CHG_OFST (9)
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_RSTB_OFST (8)
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_PWDB_OFST (7)
+#define A60810_RG_SSUSB_FORCE_PLL_DDSEN_OFST (6)
+#define A60810_RG_SSUSB_FORCE_PLL_TCADJ_OFST (5)
+#define A60810_RG_SSUSB_FORCE_PLL_AUTOK_VCO_OFST (4)
+#define A60810_RG_SSUSB_FORCE_PLL_PWD_OFST (3)
+#define A60810_RG_SSUSB_FLT_1_DISPERR_B_OFST (2)
+
+//U3D_PHYD_TXPLL2
+#define A60810_RG_SSUSB_TX_LFPS_EN_OFST (31)
+#define A60810_RG_SSUSB_FORCE_TX_LFPS_EN_OFST (30)
+#define A60810_RG_SSUSB_TX_LFPS_OFST (29)
+#define A60810_RG_SSUSB_FORCE_TX_LFPS_OFST (28)
+#define A60810_RG_SSUSB_RXPLL_STB_OFST (27)
+#define A60810_RG_SSUSB_TXPLL_STB_OFST (26)
+#define A60810_RG_SSUSB_FORCE_RXPLL_STB_OFST (25)
+#define A60810_RG_SSUSB_FORCE_TXPLL_STB_OFST (24)
+#define A60810_RG_SSUSB_RXPLL_REFCKSEL_OFST (16)
+#define A60810_RG_SSUSB_RXPLL_STBMODE_OFST (11)
+#define A60810_RG_SSUSB_RXPLL_ON_OFST (10)
+#define A60810_RG_SSUSB_FORCE_RXPLLON_OFST (9)
+#define A60810_RG_SSUSB_FORCE_RX_AFE_PWD_OFST (8)
+#define A60810_RG_SSUSB_CDR_AUTOK_VCO_OFST (7)
+#define A60810_RG_SSUSB_CDR_PWD_OFST (6)
+#define A60810_RG_SSUSB_CDR_TCADJ_OFST (0)
+
+//U3D_PHYD_FL0
+#define A60810_RG_SSUSB_RX_FL_TARGET_OFST (16)
+#define A60810_RG_SSUSB_RX_FL_CYCLECNT_OFST (0)
+
+//U3D_PHYD_MIX2
+#define A60810_RG_SSUSB_RX_EQ_RST_OFST (31)
+#define A60810_RG_SSUSB_RX_EQ_RST_SEL_OFST (30)
+#define A60810_RG_SSUSB_RXVAL_RST_OFST (29)
+#define A60810_RG_SSUSB_RXVAL_CNT_OFST (24)
+#define A60810_RG_SSUSB_CDROS_EN_OFST (18)
+#define A60810_RG_SSUSB_CDR_LCKOP_OFST (16)
+#define A60810_RG_SSUSB_RX_FL_LOCKTH_OFST (8)
+#define A60810_RG_SSUSB_RX_FL_OFFSET_OFST (0)
+
+//U3D_PHYD_RX0
+#define A60810_RG_SSUSB_T2RLB_BERTH_OFST (24)
+#define A60810_RG_SSUSB_T2RLB_PAT_OFST (16)
+#define A60810_RG_SSUSB_T2RLB_EN_OFST (15)
+#define A60810_RG_SSUSB_T2RLB_BPSCRAMB_OFST (14)
+#define A60810_RG_SSUSB_T2RLB_SERIAL_OFST (13)
+#define A60810_RG_SSUSB_T2RLB_MODE_OFST (11)
+#define A60810_RG_SSUSB_RX_SAOSC_EN_OFST (10)
+#define A60810_RG_SSUSB_RX_SAOSC_EN_SEL_OFST (9)
+#define A60810_RG_SSUSB_RX_DFE_OPTION_OFST (8)
+#define A60810_RG_SSUSB_RX_DFE_EN_OFST (7)
+#define A60810_RG_SSUSB_RX_DFE_EN_SEL_OFST (6)
+#define A60810_RG_SSUSB_RX_EQ_EN_OFST (5)
+#define A60810_RG_SSUSB_RX_EQ_EN_SEL_OFST (4)
+#define A60810_RG_SSUSB_RX_SAOSC_RST_OFST (3)
+#define A60810_RG_SSUSB_RX_SAOSC_RST_SEL_OFST (2)
+#define A60810_RG_SSUSB_RX_DFE_RST_OFST (1)
+#define A60810_RG_SSUSB_RX_DFE_RST_SEL_OFST (0)
+
+//U3D_PHYD_T2RLB
+#define A60810_RG_SSUSB_EQTRAIN_CH_MODE_OFST (28)
+#define A60810_RG_SSUSB_PRB_OUT_CPPAT_OFST (27)
+#define A60810_RG_SSUSB_BPANSIENC_OFST (26)
+#define A60810_RG_SSUSB_VALID_EN_OFST (25)
+#define A60810_RG_SSUSB_EBUF_SRST_OFST (24)
+#define A60810_RG_SSUSB_K_EMP_OFST (20)
+#define A60810_RG_SSUSB_K_FUL_OFST (16)
+#define A60810_RG_SSUSB_T2RLB_BDATRST_OFST (12)
+#define A60810_RG_SSUSB_P_T2RLB_SKP_EN_OFST (10)
+#define A60810_RG_SSUSB_T2RLB_PATMODE_OFST (8)
+#define A60810_RG_SSUSB_T2RLB_TSEQCNT_OFST (0)
+
+//U3D_PHYD_CPPAT
+#define A60810_RG_SSUSB_CPPAT_PROGRAM_EN_OFST (24)
+#define A60810_RG_SSUSB_CPPAT_TOZ_OFST (21)
+#define A60810_RG_SSUSB_CPPAT_PRBS_EN_OFST (20)
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP2_OFST (16)
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP1_OFST (8)
+#define A60810_RG_SSUSB_CPPAT_OUT_TMP0_OFST (0)
+
+//U3D_PHYD_MIX3
+#define A60810_RG_SSUSB_CDR_TCADJ_MINUS_OFST (31)
+#define A60810_RG_SSUSB_P_CDROS_EN_OFST (30)
+#define A60810_RG_SSUSB_P_P2_TX_DRV_DIS_OFST (28)
+#define A60810_RG_SSUSB_CDR_TCADJ_OFFSET_OFST (24)
+#define A60810_RG_SSUSB_PLL_TCADJ_MINUS_OFST (23)
+#define A60810_RG_SSUSB_FORCE_PLL_BIAS_LPF_EN_OFST (20)
+#define A60810_RG_SSUSB_PLL_BIAS_LPF_EN_OFST (19)
+#define A60810_RG_SSUSB_PLL_TCADJ_OFFSET_OFST (16)
+#define A60810_RG_SSUSB_FORCE_PLL_SSCEN_OFST (15)
+#define A60810_RG_SSUSB_PLL_SSCEN_OFST (14)
+#define A60810_RG_SSUSB_FORCE_CDR_PI_PWD_OFST (13)
+#define A60810_RG_SSUSB_CDR_PI_PWD_OFST (12)
+#define A60810_RG_SSUSB_CDR_PI_MODE_OFST (11)
+#define A60810_RG_SSUSB_TXPLL_SSCEN_CYC_OFST (0)
+
+//U3D_PHYD_EBUFCTL
+#define A60810_RG_SSUSB_EBUFCTL_OFST (0)
+
+//U3D_PHYD_PIPE0
+#define A60810_RG_SSUSB_RXTERMINATION_OFST (30)
+#define A60810_RG_SSUSB_RXEQTRAINING_OFST (29)
+#define A60810_RG_SSUSB_RXPOLARITY_OFST (28)
+#define A60810_RG_SSUSB_TXDEEMPH_OFST (26)
+#define A60810_RG_SSUSB_POWERDOWN_OFST (24)
+#define A60810_RG_SSUSB_TXONESZEROS_OFST (23)
+#define A60810_RG_SSUSB_TXELECIDLE_OFST (22)
+#define A60810_RG_SSUSB_TXDETECTRX_OFST (21)
+#define A60810_RG_SSUSB_PIPE_SEL_OFST (20)
+#define A60810_RG_SSUSB_TXDATAK_OFST (16)
+#define A60810_RG_SSUSB_CDR_STABLE_SEL_OFST (15)
+#define A60810_RG_SSUSB_CDR_STABLE_OFST (14)
+#define A60810_RG_SSUSB_CDR_RSTB_SEL_OFST (13)
+#define A60810_RG_SSUSB_CDR_RSTB_OFST (12)
+#define A60810_RG_SSUSB_FRC_PIPE_POWERDOWN_OFST (11)
+#define A60810_RG_SSUSB_P_TXBCN_DIS_OFST (6)
+#define A60810_RG_SSUSB_P_ERROR_SEL_OFST (4)
+#define A60810_RG_SSUSB_TXMARGIN_OFST (1)
+#define A60810_RG_SSUSB_TXCOMPLIANCE_OFST (0)
+
+//U3D_PHYD_PIPE1
+#define A60810_RG_SSUSB_TXDATA_OFST (0)
+
+//U3D_PHYD_MIX4
+#define A60810_RG_SSUSB_CDROS_CNT_OFST (24)
+#define A60810_RG_SSUSB_T2RLB_BER_EN_OFST (16)
+#define A60810_RG_SSUSB_T2RLB_BER_RATE_OFST (0)
+
+//U3D_PHYD_CKGEN0
+#define A60810_RG_SSUSB_RFIFO_IMPLAT_OFST (27)
+#define A60810_RG_SSUSB_TFIFO_PSEL_OFST (24)
+#define A60810_RG_SSUSB_CKGEN_PSEL_OFST (8)
+#define A60810_RG_SSUSB_RXCK_INV_OFST (0)
+
+//U3D_PHYD_MIX5
+#define A60810_RG_SSUSB_PRB_SEL_OFST (16)
+#define A60810_RG_SSUSB_RXPLL_STBCYC_OFST (0)
+
+//U3D_PHYD_RESERVED
+#define A60810_RG_SSUSB_PHYD_RESERVE_OFST (0)
+
+//U3D_PHYD_CDR0
+#define A60810_RG_SSUSB_CDR_BIC_LTR_OFST (28)
+#define A60810_RG_SSUSB_CDR_BIC_LTD0_OFST (24)
+#define A60810_RG_SSUSB_CDR_BC_LTD1_OFST (16)
+#define A60810_RG_SSUSB_CDR_BC_LTR_OFST (8)
+#define A60810_RG_SSUSB_CDR_BC_LTD0_OFST (0)
+
+//U3D_PHYD_CDR1
+#define A60810_RG_SSUSB_CDR_BIR_LTD1_OFST (24)
+#define A60810_RG_SSUSB_CDR_BIR_LTR_OFST (16)
+#define A60810_RG_SSUSB_CDR_BIR_LTD0_OFST (8)
+#define A60810_RG_SSUSB_CDR_BW_SEL_OFST (6)
+#define A60810_RG_SSUSB_CDR_BIC_LTD1_OFST (0)
+
+//U3D_PHYD_PLL_0
+#define A60810_RG_SSUSB_FORCE_CDR_BAND_5G_OFST (28)
+#define A60810_RG_SSUSB_FORCE_CDR_BAND_2P5G_OFST (27)
+#define A60810_RG_SSUSB_FORCE_PLL_BAND_5G_OFST (26)
+#define A60810_RG_SSUSB_FORCE_PLL_BAND_2P5G_OFST (25)
+#define A60810_RG_SSUSB_P_EQ_T_SEL_OFST (15)
+#define A60810_RG_SSUSB_PLL_ISO_EN_CYC_OFST (5)
+#define A60810_RG_SSUSB_PLLBAND_RECAL_OFST (4)
+#define A60810_RG_SSUSB_PLL_DDS_ISO_EN_OFST (3)
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_ISO_EN_OFST (2)
+#define A60810_RG_SSUSB_PLL_DDS_PWR_ON_OFST (1)
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_PWR_ON_OFST (0)
+
+//U3D_PHYD_PLL_1
+#define A60810_RG_SSUSB_CDR_BAND_5G_OFST (24)
+#define A60810_RG_SSUSB_CDR_BAND_2P5G_OFST (16)
+#define A60810_RG_SSUSB_PLL_BAND_5G_OFST (8)
+#define A60810_RG_SSUSB_PLL_BAND_2P5G_OFST (0)
+
+//U3D_PHYD_BCN_DET_1
+#define A60810_RG_SSUSB_P_BCN_OBS_PRD_OFST (16)
+#define A60810_RG_SSUSB_U_BCN_OBS_PRD_OFST (0)
+
+//U3D_PHYD_BCN_DET_2
+#define A60810_RG_SSUSB_P_BCN_OBS_SEL_OFST (16)
+#define A60810_RG_SSUSB_BCN_DET_DIS_OFST (12)
+#define A60810_RG_SSUSB_U_BCN_OBS_SEL_OFST (0)
+
+//U3D_EQ0
+#define A60810_RG_SSUSB_EQ_DLHL_LFI_OFST (24)
+#define A60810_RG_SSUSB_EQ_DHHL_LFI_OFST (16)
+#define A60810_RG_SSUSB_EQ_DD0HOS_LFI_OFST (8)
+#define A60810_RG_SSUSB_EQ_DD0LOS_LFI_OFST (0)
+
+//U3D_EQ1
+#define A60810_RG_SSUSB_EQ_DD1HOS_LFI_OFST (24)
+#define A60810_RG_SSUSB_EQ_DD1LOS_LFI_OFST (16)
+#define A60810_RG_SSUSB_EQ_DE0OS_LFI_OFST (8)
+#define A60810_RG_SSUSB_EQ_DE1OS_LFI_OFST (0)
+
+//U3D_EQ2
+#define A60810_RG_SSUSB_EQ_DLHLOS_LFI_OFST (24)
+#define A60810_RG_SSUSB_EQ_DHHLOS_LFI_OFST (16)
+#define A60810_RG_SSUSB_EQ_STOPTIME_OFST (14)
+#define A60810_RG_SSUSB_EQ_DHHL_LF_SEL_OFST (11)
+#define A60810_RG_SSUSB_EQ_DSAOS_LF_SEL_OFST (8)
+#define A60810_RG_SSUSB_EQ_STARTTIME_OFST (6)
+#define A60810_RG_SSUSB_EQ_DLEQ_LF_SEL_OFST (3)
+#define A60810_RG_SSUSB_EQ_DLHL_LF_SEL_OFST (0)
+
+//U3D_EQ3
+#define A60810_RG_SSUSB_EQ_DLEQ_LFI_GEN2_OFST (28)
+#define A60810_RG_SSUSB_EQ_DLEQ_LFI_GEN1_OFST (24)
+#define A60810_RG_SSUSB_EQ_DEYE0OS_LFI_OFST (16)
+#define A60810_RG_SSUSB_EQ_DEYE1OS_LFI_OFST (8)
+#define A60810_RG_SSUSB_EQ_TRI_DET_EN_OFST (7)
+#define A60810_RG_SSUSB_EQ_TRI_DET_TH_OFST (0)
+
+//U3D_EQ_EYE0
+#define A60810_RG_SSUSB_EQ_EYE_XOFFSET_OFST (25)
+#define A60810_RG_SSUSB_EQ_EYE_MON_EN_OFST (24)
+#define A60810_RG_SSUSB_EQ_EYE0_Y_OFST (16)
+#define A60810_RG_SSUSB_EQ_EYE1_Y_OFST (8)
+#define A60810_RG_SSUSB_EQ_PILPO_ROUT_OFST (7)
+#define A60810_RG_SSUSB_EQ_PI_KPGAIN_OFST (4)
+#define A60810_RG_SSUSB_EQ_EYE_CNT_EN_OFST (3)
+
+//U3D_EQ_EYE1
+#define A60810_RG_SSUSB_EQ_SIGDET_OFST (24)
+#define A60810_RG_SSUSB_EQ_EYE_MASK_OFST (7)
+
+//U3D_EQ_EYE2
+#define A60810_RG_SSUSB_EQ_RX500M_CK_SEL_OFST (31)
+#define A60810_RG_SSUSB_EQ_SD_CNT1_OFST (24)
+#define A60810_RG_SSUSB_EQ_ISIFLAG_SEL_OFST (22)
+#define A60810_RG_SSUSB_EQ_SD_CNT0_OFST (16)
+
+//U3D_EQ_DFE0
+#define A60810_RG_SSUSB_EQ_LEQMAX_OFST (28)
+#define A60810_RG_SSUSB_EQ_DFEX_EN_OFST (27)
+#define A60810_RG_SSUSB_EQ_DFEX_LF_SEL_OFST (24)
+#define A60810_RG_SSUSB_EQ_CHK_EYE_H_OFST (23)
+#define A60810_RG_SSUSB_EQ_PIEYE_INI_OFST (16)
+#define A60810_RG_SSUSB_EQ_PI90_INI_OFST (8)
+#define A60810_RG_SSUSB_EQ_PI0_INI_OFST (0)
+
+//U3D_EQ_DFE1
+#define A60810_RG_SSUSB_EQ_REV_OFST (16)
+#define A60810_RG_SSUSB_EQ_DFEYEN_DUR_OFST (12)
+#define A60810_RG_SSUSB_EQ_DFEXEN_DUR_OFST (8)
+#define A60810_RG_SSUSB_EQ_DFEX_RST_OFST (7)
+#define A60810_RG_SSUSB_EQ_GATED_RXD_B_OFST (6)
+#define A60810_RG_SSUSB_EQ_PI90CK_SEL_OFST (4)
+#define A60810_RG_SSUSB_EQ_DFEX_DIS_OFST (2)
+#define A60810_RG_SSUSB_EQ_DFEYEN_STOP_DIS_OFST (1)
+#define A60810_RG_SSUSB_EQ_DFEXEN_SEL_OFST (0)
+
+//U3D_EQ_DFE2
+#define A60810_RG_SSUSB_EQ_MON_SEL_OFST (24)
+#define A60810_RG_SSUSB_EQ_LEQOSC_DLYCNT_OFST (16)
+#define A60810_RG_SSUSB_EQ_DLEQOS_LFI_OFST (8)
+#define A60810_RG_SSUSB_EQ_DFE_TOG_OFST (2)
+#define A60810_RG_SSUSB_EQ_LEQ_STOP_TO_OFST (0)
+
+//U3D_EQ_DFE3
+#define A60810_RG_SSUSB_EQ_RESERVED_OFST (0)
+
+//U3D_PHYD_MON0
+#define A60810_RGS_SSUSB_BERT_BERC_OFST (16)
+#define A60810_RGS_SSUSB_LFPS_OFST (12)
+#define A60810_RGS_SSUSB_TRAINDEC_OFST (8)
+#define A60810_RGS_SSUSB_SCP_PAT_OFST (0)
+
+//U3D_PHYD_MON1
+#define A60810_RGS_SSUSB_RX_FL_OUT_OFST (0)
+
+//U3D_PHYD_MON2
+#define A60810_RGS_SSUSB_T2RLB_ERRCNT_OFST (16)
+#define A60810_RGS_SSUSB_RETRACK_OFST (12)
+#define A60810_RGS_SSUSB_RXPLL_LOCK_OFST (10)
+#define A60810_RGS_SSUSB_CDR_VCOCAL_CPLT_D_OFST (9)
+#define A60810_RGS_SSUSB_PLL_VCOCAL_CPLT_D_OFST (8)
+#define A60810_RGS_SSUSB_PDNCTL_OFST (0)
+
+//U3D_PHYD_MON3
+#define A60810_RGS_SSUSB_TSEQ_ERRCNT_OFST (16)
+#define A60810_RGS_SSUSB_PRBS_ERRCNT_OFST (0)
+
+//U3D_PHYD_MON4
+#define A60810_RGS_SSUSB_RX_LSLOCK_CNT_OFST (24)
+#define A60810_RGS_SSUSB_SCP_DETCNT_OFST (16)
+#define A60810_RGS_SSUSB_TSEQ_DETCNT_OFST (0)
+
+//U3D_PHYD_MON5
+#define A60810_RGS_SSUSB_EBUFMSG_OFST (16)
+#define A60810_RGS_SSUSB_BERT_LOCK_OFST (15)
+#define A60810_RGS_SSUSB_SCP_DET_OFST (14)
+#define A60810_RGS_SSUSB_TSEQ_DET_OFST (13)
+#define A60810_RGS_SSUSB_EBUF_UDF_OFST (12)
+#define A60810_RGS_SSUSB_EBUF_OVF_OFST (11)
+#define A60810_RGS_SSUSB_PRBS_PASSTH_OFST (10)
+#define A60810_RGS_SSUSB_PRBS_PASS_OFST (9)
+#define A60810_RGS_SSUSB_PRBS_LOCK_OFST (8)
+#define A60810_RGS_SSUSB_T2RLB_ERR_OFST (6)
+#define A60810_RGS_SSUSB_T2RLB_PASSTH_OFST (5)
+#define A60810_RGS_SSUSB_T2RLB_PASS_OFST (4)
+#define A60810_RGS_SSUSB_T2RLB_LOCK_OFST (3)
+#define A60810_RGS_SSUSB_RX_IMPCAL_DONE_OFST (2)
+#define A60810_RGS_SSUSB_TX_IMPCAL_DONE_OFST (1)
+#define A60810_RGS_SSUSB_RXDETECTED_OFST (0)
+
+//U3D_PHYD_MON6
+#define A60810_RGS_SSUSB_SIGCAL_DONE_OFST (30)
+#define A60810_RGS_SSUSB_SIGCAL_CAL_OUT_OFST (29)
+#define A60810_RGS_SSUSB_SIGCAL_OFFSET_OFST (24)
+#define A60810_RGS_SSUSB_RX_IMP_SEL_OFST (16)
+#define A60810_RGS_SSUSB_TX_IMP_SEL_OFST (8)
+#define A60810_RGS_SSUSB_TFIFO_MSG_OFST (4)
+#define A60810_RGS_SSUSB_RFIFO_MSG_OFST (0)
+
+//U3D_PHYD_MON7
+#define A60810_RGS_SSUSB_FT_OUT_OFST (8)
+#define A60810_RGS_SSUSB_PRB_OUT_OFST (0)
+
+//U3D_PHYA_RX_MON0
+#define A60810_RGS_SSUSB_EQ_DCLEQ_OFST (24)
+#define A60810_RGS_SSUSB_EQ_DCD0H_OFST (16)
+#define A60810_RGS_SSUSB_EQ_DCD0L_OFST (8)
+#define A60810_RGS_SSUSB_EQ_DCD1H_OFST (0)
+
+//U3D_PHYA_RX_MON1
+#define A60810_RGS_SSUSB_EQ_DCD1L_OFST (24)
+#define A60810_RGS_SSUSB_EQ_DCE0_OFST (16)
+#define A60810_RGS_SSUSB_EQ_DCE1_OFST (8)
+#define A60810_RGS_SSUSB_EQ_DCHHL_OFST (0)
+
+//U3D_PHYA_RX_MON2
+#define A60810_RGS_SSUSB_EQ_LEQ_STOP_OFST (31)
+#define A60810_RGS_SSUSB_EQ_DCLHL_OFST (24)
+#define A60810_RGS_SSUSB_EQ_STATUS_OFST (16)
+#define A60810_RGS_SSUSB_EQ_DCEYE0_OFST (8)
+#define A60810_RGS_SSUSB_EQ_DCEYE1_OFST (0)
+
+//U3D_PHYA_RX_MON3
+#define A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST (0)
+
+//U3D_PHYA_RX_MON4
+#define A60810_RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST (0)
+
+//U3D_PHYA_RX_MON5
+#define A60810_RGS_SSUSB_EQ_DCLEQOS_OFST (8)
+#define A60810_RGS_SSUSB_EQ_EYE_CNT_RDY_OFST (7)
+#define A60810_RGS_SSUSB_EQ_PILPO_OFST (0)
+
+//U3D_PHYD_CPPAT2
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP2_OFST (16)
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP1_OFST (8)
+#define A60810_RG_SSUSB_CPPAT_OUT_H_TMP0_OFST (0)
+
+//U3D_EQ_EYE3
+#define A60810_RG_SSUSB_EQ_LEQ_SHIFT_OFST (24)
+#define A60810_RG_SSUSB_EQ_EYE_CNT_OFST (0)
+
+//U3D_KBAND_OUT
+#define A60810_RGS_SSUSB_CDR_BAND_5G_OFST (24)
+#define A60810_RGS_SSUSB_CDR_BAND_2P5G_OFST (16)
+#define A60810_RGS_SSUSB_PLL_BAND_5G_OFST (8)
+#define A60810_RGS_SSUSB_PLL_BAND_2P5G_OFST (0)
+
+//U3D_KBAND_OUT1
+#define A60810_RGS_SSUSB_CDR_VCOCAL_FAIL_OFST (24)
+#define A60810_RGS_SSUSB_CDR_VCOCAL_STATE_OFST (16)
+#define A60810_RGS_SSUSB_PLL_VCOCAL_FAIL_OFST (8)
+#define A60810_RGS_SSUSB_PLL_VCOCAL_STATE_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct u3phyd_bank2_reg_a60810 {
+ //0x0
+ PHY_LE32 b2_phyd_top1;
+ PHY_LE32 b2_phyd_top2;
+ PHY_LE32 b2_phyd_top3;
+ PHY_LE32 b2_phyd_top4;
+ //0x10
+ PHY_LE32 b2_phyd_top5;
+ PHY_LE32 b2_phyd_top6;
+ PHY_LE32 b2_phyd_top7;
+ PHY_LE32 b2_phyd_p_sigdet1;
+ //0x20
+ PHY_LE32 b2_phyd_p_sigdet2;
+ PHY_LE32 b2_phyd_p_sigdet_cal1;
+ PHY_LE32 b2_phyd_rxdet1;
+ PHY_LE32 b2_phyd_rxdet2;
+ //0x30
+ PHY_LE32 b2_phyd_misc0;
+ PHY_LE32 b2_phyd_misc2;
+ PHY_LE32 b2_phyd_misc3;
+ PHY_LE32 b2_phyd_l1ss;
+ //0x40
+ PHY_LE32 b2_rosc_0;
+ PHY_LE32 b2_rosc_1;
+ PHY_LE32 b2_rosc_2;
+ PHY_LE32 b2_rosc_3;
+ //0x50
+ PHY_LE32 b2_rosc_4;
+ PHY_LE32 b2_rosc_5;
+ PHY_LE32 b2_rosc_6;
+ PHY_LE32 b2_rosc_7;
+ //0x60
+ PHY_LE32 b2_rosc_8;
+ PHY_LE32 b2_rosc_9;
+ PHY_LE32 b2_rosc_a;
+ PHY_LE32 reserve1;
+ //0x70~0xd0
+ PHY_LE32 reserve2[28];
+ //0xe0
+ PHY_LE32 phyd_version;
+ PHY_LE32 phyd_model;
+};
+
+//U3D_B2_PHYD_TOP1
+#define A60810_RG_SSUSB_PCIE2_K_EMP (0xf<<28) //31:28
+#define A60810_RG_SSUSB_PCIE2_K_FUL (0xf<<24) //27:24
+#define A60810_RG_SSUSB_TX_EIDLE_LP_EN (0x1<<17) //17:17
+#define A60810_RG_SSUSB_FORCE_TX_EIDLE_LP_EN (0x1<<16) //16:16
+#define A60810_RG_SSUSB_SIGDET_EN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_FORCE_SIGDET_EN (0x1<<14) //14:14
+#define A60810_RG_SSUSB_CLKRX_EN (0x1<<13) //13:13
+#define A60810_RG_SSUSB_FORCE_CLKRX_EN (0x1<<12) //12:12
+#define A60810_RG_SSUSB_CLKTX_EN (0x1<<11) //11:11
+#define A60810_RG_SSUSB_FORCE_CLKTX_EN (0x1<<10) //10:10
+#define A60810_RG_SSUSB_CLK_REQ_N_I (0x1<<9) //9:9
+#define A60810_RG_SSUSB_FORCE_CLK_REQ_N_I (0x1<<8) //8:8
+#define A60810_RG_SSUSB_RATE (0x1<<6) //6:6
+#define A60810_RG_SSUSB_FORCE_RATE (0x1<<5) //5:5
+#define A60810_RG_SSUSB_PCIE_MODE_SEL (0x1<<4) //4:4
+#define A60810_RG_SSUSB_FORCE_PCIE_MODE_SEL (0x1<<3) //3:3
+#define A60810_RG_SSUSB_PHY_MODE (0x3<<1) //2:1
+#define A60810_RG_SSUSB_FORCE_PHY_MODE (0x1<<0) //0:0
+
+//U3D_B2_PHYD_TOP2
+#define A60810_RG_SSUSB_FORCE_IDRV_6DB (0x1<<30) //30:30
+#define A60810_RG_SSUSB_IDRV_6DB (0x3f<<24) //29:24
+#define A60810_RG_SSUSB_FORCE_IDEM_3P5DB (0x1<<22) //22:22
+#define A60810_RG_SSUSB_IDEM_3P5DB (0x3f<<16) //21:16
+#define A60810_RG_SSUSB_FORCE_IDRV_3P5DB (0x1<<14) //14:14
+#define A60810_RG_SSUSB_IDRV_3P5DB (0x3f<<8) //13:8
+#define A60810_RG_SSUSB_FORCE_IDRV_0DB (0x1<<6) //6:6
+#define A60810_RG_SSUSB_IDRV_0DB (0x3f<<0) //5:0
+
+//U3D_B2_PHYD_TOP3
+#define A60810_RG_SSUSB_TX_BIASI (0x7<<25) //27:25
+#define A60810_RG_SSUSB_FORCE_TX_BIASI_EN (0x1<<24) //24:24
+#define A60810_RG_SSUSB_TX_BIASI_EN (0x1<<16) //16:16
+#define A60810_RG_SSUSB_FORCE_TX_BIASI (0x1<<13) //13:13
+#define A60810_RG_SSUSB_FORCE_IDEM_6DB (0x1<<8) //8:8
+#define A60810_RG_SSUSB_IDEM_6DB (0x3f<<0) //5:0
+
+//U3D_B2_PHYD_TOP4
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTR (0xf<<28) //31:28
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define A60810_RG_SSUSB_G1_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_G1_L1SS_CDR_BW_SEL (0x3<<13) //14:13
+#define A60810_RG_SSUSB_G1_CDR_BC_LTR (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_G1_CDR_BW_SEL (0x3<<5) //6:5
+#define A60810_RG_SSUSB_G1_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_B2_PHYD_TOP5
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTR (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_B2_PHYD_TOP6
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTR (0xf<<28) //31:28
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTD0 (0xf<<24) //27:24
+#define A60810_RG_SSUSB_G2_CDR_BC_LTD1 (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_G2_L1SS_CDR_BW_SEL (0x3<<13) //14:13
+#define A60810_RG_SSUSB_G2_CDR_BC_LTR (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_G2_CDR_BW_SEL (0x3<<5) //6:5
+#define A60810_RG_SSUSB_G2_CDR_BC_LTD0 (0x1f<<0) //4:0
+
+//U3D_B2_PHYD_TOP7
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTD1 (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTR (0x1f<<16) //20:16
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTD0 (0x1f<<8) //12:8
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTD1 (0xf<<0) //3:0
+
+//U3D_B2_PHYD_P_SIGDET1
+#define A60810_RG_SSUSB_P_SIGDET_FLT_DIS (0x1<<31) //31:31
+#define A60810_RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL (0x7f<<24) //30:24
+#define A60810_RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL (0x7f<<8) //14:8
+#define A60810_RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL (0x7f<<0) //6:0
+
+//U3D_B2_PHYD_P_SIGDET2
+#define A60810_RG_SSUSB_P_SIGDET_RX_VAL_S (0x1<<29) //29:29
+#define A60810_RG_SSUSB_P_SIGDET_L0S_DEAS_SEL (0x1<<28) //28:28
+#define A60810_RG_SSUSB_P_SIGDET_L0_EXIT_S (0x1<<27) //27:27
+#define A60810_RG_SSUSB_P_SIGDET_L0S_EXIT_T_S (0x3<<25) //26:25
+#define A60810_RG_SSUSB_P_SIGDET_L0S_EXIT_S (0x1<<24) //24:24
+#define A60810_RG_SSUSB_P_SIGDET_L0S_ENTRY_S (0x1<<16) //16:16
+#define A60810_RG_SSUSB_P_SIGDET_PRB_SEL (0x1<<10) //10:10
+#define A60810_RG_SSUSB_P_SIGDET_BK_SIG_T (0x3<<8) //9:8
+#define A60810_RG_SSUSB_P_SIGDET_P2_RXLFPS (0x1<<6) //6:6
+#define A60810_RG_SSUSB_P_SIGDET_NON_BK_AD (0x1<<5) //5:5
+#define A60810_RG_SSUSB_P_SIGDET_BK_B_RXEQ (0x1<<4) //4:4
+#define A60810_RG_SSUSB_P_SIGDET_G2_KO_SEL (0x3<<2) //3:2
+#define A60810_RG_SSUSB_P_SIGDET_G1_KO_SEL (0x3<<0) //1:0
+
+//U3D_B2_PHYD_P_SIGDET_CAL1
+#define A60810_RG_SSUSB_G2_2EIOS_DET_EN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_P_SIGDET_CAL_OFFSET (0x1f<<24) //28:24
+#define A60810_RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET (0x1<<16) //16:16
+#define A60810_RG_SSUSB_P_SIGDET_CAL_EN (0x1<<8) //8:8
+#define A60810_RG_SSUSB_P_FORCE_SIGDET_CAL_EN (0x1<<3) //3:3
+#define A60810_RG_SSUSB_P_SIGDET_FLT_EN (0x1<<2) //2:2
+#define A60810_RG_SSUSB_P_SIGDET_SAMPLE_PRD (0x1<<1) //1:1
+#define A60810_RG_SSUSB_P_SIGDET_REK (0x1<<0) //0:0
+
+//U3D_B2_PHYD_RXDET1
+#define A60810_RG_SSUSB_RXDET_PRB_SEL (0x1<<31) //31:31
+#define A60810_RG_SSUSB_FORCE_CMDET (0x1<<30) //30:30
+#define A60810_RG_SSUSB_RXDET_EN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_FORCE_RXDET_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_RXDET_K_TWICE (0x1<<27) //27:27
+#define A60810_RG_SSUSB_RXDET_STB3_SET (0x1ff<<18) //26:18
+#define A60810_RG_SSUSB_RXDET_STB2_SET (0x1ff<<9) //17:9
+#define A60810_RG_SSUSB_RXDET_STB1_SET (0x1ff<<0) //8:0
+
+//U3D_B2_PHYD_RXDET2
+#define A60810_RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN (0x1<<31) //31:31
+#define A60810_RG_SSUSB_PHYD_BERTLB_FORCE_CGEN (0x1<<30) //30:30
+#define A60810_RG_SSUSB_PHYD_T2RLB_FORCE_CGEN (0x1<<29) //29:29
+#define A60810_RG_SSUSB_LCK2REF_EXT_EN (0x1<<28) //28:28
+#define A60810_RG_SSUSB_G2_LCK2REF_EXT_SEL (0xf<<24) //27:24
+#define A60810_RG_SSUSB_LCK2REF_EXT_SEL (0xf<<20) //23:20
+#define A60810_RG_SSUSB_PDN_T_SEL (0x3<<18) //19:18
+#define A60810_RG_SSUSB_RXDET_STB3_SET_P3 (0x1ff<<9) //17:9
+#define A60810_RG_SSUSB_RXDET_STB2_SET_P3 (0x1ff<<0) //8:0
+
+//U3D_B2_PHYD_MISC0
+#define A60810_RG_SSUSB_TX_EIDLE_LP_P0DLYCYC (0x3f<<26) //31:26
+#define A60810_RG_SSUSB_TX_SER_EN (0x1<<25) //25:25
+#define A60810_RG_SSUSB_FORCE_TX_SER_EN (0x1<<24) //24:24
+#define A60810_RG_SSUSB_TXPLL_REFCKSEL (0x1<<23) //23:23
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_HF_EN (0x1<<22) //22:22
+#define A60810_RG_SSUSB_PLL_DDS_HF_EN_MAN (0x1<<21) //21:21
+#define A60810_RG_SSUSB_RXLFPS_ENTXDRV (0x1<<20) //20:20
+#define A60810_RG_SSUSB_RX_FL_UNLOCKTH (0xf<<16) //19:16
+#define A60810_RG_SSUSB_LFPS_PSEL (0x1<<15) //15:15
+#define A60810_RG_SSUSB_RX_SIGDET_EN (0x1<<14) //14:14
+#define A60810_RG_SSUSB_RX_SIGDET_EN_SEL (0x1<<13) //13:13
+#define A60810_RG_SSUSB_RX_PI_CAL_EN (0x1<<12) //12:12
+#define A60810_RG_SSUSB_RX_PI_CAL_EN_SEL (0x1<<11) //11:11
+#define A60810_RG_SSUSB_P3_CLS_CK_SEL (0x1<<10) //10:10
+#define A60810_RG_SSUSB_T2RLB_PSEL (0x3<<8) //9:8
+#define A60810_RG_SSUSB_PPCTL_PSEL (0x7<<5) //7:5
+#define A60810_RG_SSUSB_PHYD_TX_DATA_INV (0x1<<4) //4:4
+#define A60810_RG_SSUSB_BERTLB_PSEL (0x3<<2) //3:2
+#define A60810_RG_SSUSB_RETRACK_DIS (0x1<<1) //1:1
+#define A60810_RG_SSUSB_PPERRCNT_CLR (0x1<<0) //0:0
+
+//U3D_B2_PHYD_MISC2
+#define A60810_RG_SSUSB_FRC_PLL_DDS_PREDIV2 (0x1<<31) //31:31
+#define A60810_RG_SSUSB_FRC_PLL_DDS_IADJ (0xf<<27) //30:27
+#define A60810_RG_SSUSB_P_SIGDET_125FILTER (0x1<<26) //26:26
+#define A60810_RG_SSUSB_P_SIGDET_RST_FILTER (0x1<<25) //25:25
+#define A60810_RG_SSUSB_P_SIGDET_EID_USE_RAW (0x1<<24) //24:24
+#define A60810_RG_SSUSB_P_SIGDET_LTD_USE_RAW (0x1<<23) //23:23
+#define A60810_RG_SSUSB_EIDLE_BF_RXDET (0x1<<22) //22:22
+#define A60810_RG_SSUSB_EIDLE_LP_STBCYC (0x1ff<<13) //21:13
+#define A60810_RG_SSUSB_TX_EIDLE_LP_POSTDLY (0x3f<<7) //12:7
+#define A60810_RG_SSUSB_TX_EIDLE_LP_PREDLY (0x3f<<1) //6:1
+#define A60810_RG_SSUSB_TX_EIDLE_LP_EN_ADV (0x1<<0) //0:0
+
+//U3D_B2_PHYD_MISC3
+#define A60810_RGS_SSUSB_DDS_CALIB_C_STATE (0x7<<16) //18:16
+#define A60810_RGS_SSUSB_PPERRCNT (0xffff<<0) //15:0
+
+//U3D_B2_PHYD_L1SS
+#define A60810_RG_SSUSB_L1SS_REV1 (0xff<<24) //31:24
+#define A60810_RG_SSUSB_L1SS_REV0 (0xff<<16) //23:16
+#define A60810_RG_SSUSB_P_LTD1_SLOCK_DIS (0x1<<11) //11:11
+#define A60810_RG_SSUSB_PLL_CNT_CLEAN_DIS (0x1<<10) //10:10
+#define A60810_RG_SSUSB_P_PLL_REK_SEL (0x1<<9) //9:9
+#define A60810_RG_SSUSB_TXDRV_MASKDLY (0x1<<8) //8:8
+#define A60810_RG_SSUSB_RXSTS_VAL (0x1<<7) //7:7
+#define A60810_RG_PCIE_PHY_CLKREQ_N_EN (0x1<<6) //6:6
+#define A60810_RG_PCIE_FORCE_PHY_CLKREQ_N_EN (0x1<<5) //5:5
+#define A60810_RG_PCIE_PHY_CLKREQ_N_OUT (0x1<<4) //4:4
+#define A60810_RG_PCIE_FORCE_PHY_CLKREQ_N_OUT (0x1<<3) //3:3
+#define A60810_RG_SSUSB_RXPLL_STB_PX0 (0x1<<2) //2:2
+#define A60810_RG_PCIE_L1SS_EN (0x1<<1) //1:1
+#define A60810_RG_PCIE_FORCE_L1SS_EN (0x1<<0) //0:0
+
+//U3D_B2_ROSC_0
+#define A60810_RG_SSUSB_RING_OSC_CNTEND (0x1ff<<23) //31:23
+#define A60810_RG_SSUSB_XTAL_OSC_CNTEND (0x7f<<16) //22:16
+#define A60810_RG_SSUSB_RING_OSC_EN (0x1<<3) //3:3
+#define A60810_RG_SSUSB_RING_OSC_FORCE_EN (0x1<<2) //2:2
+#define A60810_RG_SSUSB_FRC_RING_BYPASS_DET (0x1<<1) //1:1
+#define A60810_RG_SSUSB_RING_BYPASS_DET (0x1<<0) //0:0
+
+//U3D_B2_ROSC_1
+#define A60810_RG_SSUSB_RING_OSC_FRC_P3 (0x1<<20) //20:20
+#define A60810_RG_SSUSB_RING_OSC_P3 (0x1<<19) //19:19
+#define A60810_RG_SSUSB_RING_OSC_FRC_RECAL (0x3<<17) //18:17
+#define A60810_RG_SSUSB_RING_OSC_RECAL (0x1<<16) //16:16
+#define A60810_RG_SSUSB_RING_OSC_SEL (0xff<<8) //15:8
+#define A60810_RG_SSUSB_RING_OSC_FRC_SEL (0x1<<0) //0:0
+
+//U3D_B2_ROSC_2
+#define A60810_RG_SSUSB_RING_DET_STRCYC2 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_STRCYC1 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_3
+#define A60810_RG_SSUSB_RING_DET_DETWIN1 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_STRCYC3 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_4
+#define A60810_RG_SSUSB_RING_DET_DETWIN3 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_DETWIN2 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_5
+#define A60810_RG_SSUSB_RING_DET_LBOND1 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_UBOND1 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_6
+#define A60810_RG_SSUSB_RING_DET_LBOND2 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_UBOND2 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_7
+#define A60810_RG_SSUSB_RING_DET_LBOND3 (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_RING_DET_UBOND3 (0xffff<<0) //15:0
+
+//U3D_B2_ROSC_8
+#define A60810_RG_SSUSB_RING_RESERVE (0xffff<<16) //31:16
+#define A60810_RG_SSUSB_ROSC_PROB_SEL (0xf<<2) //5:2
+#define A60810_RG_SSUSB_RING_FREQMETER_EN (0x1<<1) //1:1
+#define A60810_RG_SSUSB_RING_DET_BPS_UBOND (0x1<<0) //0:0
+
+//U3D_B2_ROSC_9
+#define A60810_RGS_FM_RING_CNT (0xffff<<16) //31:16
+#define A60810_RGS_SSUSB_RING_OSC_STATE (0x3<<10) //11:10
+#define A60810_RGS_SSUSB_RING_OSC_STABLE (0x1<<9) //9:9
+#define A60810_RGS_SSUSB_RING_OSC_CAL_FAIL (0x1<<8) //8:8
+#define A60810_RGS_SSUSB_RING_OSC_CAL (0xff<<0) //7:0
+
+//U3D_B2_ROSC_A
+#define A60810_RGS_SSUSB_ROSC_PROB_OUT (0xff<<0) //7:0
+
+//U3D_PHYD_VERSION
+#define A60810_RGS_SSUSB_PHYD_VERSION (0xffffffff<<0) //31:0
+
+//U3D_PHYD_MODEL
+#define A60810_RGS_SSUSB_PHYD_MODEL (0xffffffff<<0) //31:0
+
+/* OFFSET */
+
+//U3D_B2_PHYD_TOP1
+#define A60810_RG_SSUSB_PCIE2_K_EMP_OFST (28)
+#define A60810_RG_SSUSB_PCIE2_K_FUL_OFST (24)
+#define A60810_RG_SSUSB_TX_EIDLE_LP_EN_OFST (17)
+#define A60810_RG_SSUSB_FORCE_TX_EIDLE_LP_EN_OFST (16)
+#define A60810_RG_SSUSB_SIGDET_EN_OFST (15)
+#define A60810_RG_SSUSB_FORCE_SIGDET_EN_OFST (14)
+#define A60810_RG_SSUSB_CLKRX_EN_OFST (13)
+#define A60810_RG_SSUSB_FORCE_CLKRX_EN_OFST (12)
+#define A60810_RG_SSUSB_CLKTX_EN_OFST (11)
+#define A60810_RG_SSUSB_FORCE_CLKTX_EN_OFST (10)
+#define A60810_RG_SSUSB_CLK_REQ_N_I_OFST (9)
+#define A60810_RG_SSUSB_FORCE_CLK_REQ_N_I_OFST (8)
+#define A60810_RG_SSUSB_RATE_OFST (6)
+#define A60810_RG_SSUSB_FORCE_RATE_OFST (5)
+#define A60810_RG_SSUSB_PCIE_MODE_SEL_OFST (4)
+#define A60810_RG_SSUSB_FORCE_PCIE_MODE_SEL_OFST (3)
+#define A60810_RG_SSUSB_PHY_MODE_OFST (1)
+#define A60810_RG_SSUSB_FORCE_PHY_MODE_OFST (0)
+
+//U3D_B2_PHYD_TOP2
+#define A60810_RG_SSUSB_FORCE_IDRV_6DB_OFST (30)
+#define A60810_RG_SSUSB_IDRV_6DB_OFST (24)
+#define A60810_RG_SSUSB_FORCE_IDEM_3P5DB_OFST (22)
+#define A60810_RG_SSUSB_IDEM_3P5DB_OFST (16)
+#define A60810_RG_SSUSB_FORCE_IDRV_3P5DB_OFST (14)
+#define A60810_RG_SSUSB_IDRV_3P5DB_OFST (8)
+#define A60810_RG_SSUSB_FORCE_IDRV_0DB_OFST (6)
+#define A60810_RG_SSUSB_IDRV_0DB_OFST (0)
+
+//U3D_B2_PHYD_TOP3
+#define A60810_RG_SSUSB_TX_BIASI_OFST (25)
+#define A60810_RG_SSUSB_FORCE_TX_BIASI_EN_OFST (24)
+#define A60810_RG_SSUSB_TX_BIASI_EN_OFST (16)
+#define A60810_RG_SSUSB_FORCE_TX_BIASI_OFST (13)
+#define A60810_RG_SSUSB_FORCE_IDEM_6DB_OFST (8)
+#define A60810_RG_SSUSB_IDEM_6DB_OFST (0)
+
+//U3D_B2_PHYD_TOP4
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTR_OFST (28)
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTD0_OFST (24)
+#define A60810_RG_SSUSB_G1_CDR_BC_LTD1_OFST (16)
+#define A60810_RG_SSUSB_G1_L1SS_CDR_BW_SEL_OFST (13)
+#define A60810_RG_SSUSB_G1_CDR_BC_LTR_OFST (8)
+#define A60810_RG_SSUSB_G1_CDR_BW_SEL_OFST (5)
+#define A60810_RG_SSUSB_G1_CDR_BC_LTD0_OFST (0)
+
+//U3D_B2_PHYD_TOP5
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTD1_OFST (24)
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTR_OFST (16)
+#define A60810_RG_SSUSB_G1_CDR_BIR_LTD0_OFST (8)
+#define A60810_RG_SSUSB_G1_CDR_BIC_LTD1_OFST (0)
+
+//U3D_B2_PHYD_TOP6
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTR_OFST (28)
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTD0_OFST (24)
+#define A60810_RG_SSUSB_G2_CDR_BC_LTD1_OFST (16)
+#define A60810_RG_SSUSB_G2_L1SS_CDR_BW_SEL_OFST (13)
+#define A60810_RG_SSUSB_G2_CDR_BC_LTR_OFST (8)
+#define A60810_RG_SSUSB_G2_CDR_BW_SEL_OFST (5)
+#define A60810_RG_SSUSB_G2_CDR_BC_LTD0_OFST (0)
+
+//U3D_B2_PHYD_TOP7
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTD1_OFST (24)
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTR_OFST (16)
+#define A60810_RG_SSUSB_G2_CDR_BIR_LTD0_OFST (8)
+#define A60810_RG_SSUSB_G2_CDR_BIC_LTD1_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET1
+#define A60810_RG_SSUSB_P_SIGDET_FLT_DIS_OFST (31)
+#define A60810_RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL_OFST (24)
+#define A60810_RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL_OFST (16)
+#define A60810_RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL_OFST (8)
+#define A60810_RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET2
+#define A60810_RG_SSUSB_P_SIGDET_RX_VAL_S_OFST (29)
+#define A60810_RG_SSUSB_P_SIGDET_L0S_DEAS_SEL_OFST (28)
+#define A60810_RG_SSUSB_P_SIGDET_L0_EXIT_S_OFST (27)
+#define A60810_RG_SSUSB_P_SIGDET_L0S_EXIT_T_S_OFST (25)
+#define A60810_RG_SSUSB_P_SIGDET_L0S_EXIT_S_OFST (24)
+#define A60810_RG_SSUSB_P_SIGDET_L0S_ENTRY_S_OFST (16)
+#define A60810_RG_SSUSB_P_SIGDET_PRB_SEL_OFST (10)
+#define A60810_RG_SSUSB_P_SIGDET_BK_SIG_T_OFST (8)
+#define A60810_RG_SSUSB_P_SIGDET_P2_RXLFPS_OFST (6)
+#define A60810_RG_SSUSB_P_SIGDET_NON_BK_AD_OFST (5)
+#define A60810_RG_SSUSB_P_SIGDET_BK_B_RXEQ_OFST (4)
+#define A60810_RG_SSUSB_P_SIGDET_G2_KO_SEL_OFST (2)
+#define A60810_RG_SSUSB_P_SIGDET_G1_KO_SEL_OFST (0)
+
+//U3D_B2_PHYD_P_SIGDET_CAL1
+#define A60810_RG_SSUSB_G2_2EIOS_DET_EN_OFST (29)
+#define A60810_RG_SSUSB_P_SIGDET_CAL_OFFSET_OFST (24)
+#define A60810_RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET_OFST (16)
+#define A60810_RG_SSUSB_P_SIGDET_CAL_EN_OFST (8)
+#define A60810_RG_SSUSB_P_FORCE_SIGDET_CAL_EN_OFST (3)
+#define A60810_RG_SSUSB_P_SIGDET_FLT_EN_OFST (2)
+#define A60810_RG_SSUSB_P_SIGDET_SAMPLE_PRD_OFST (1)
+#define A60810_RG_SSUSB_P_SIGDET_REK_OFST (0)
+
+//U3D_B2_PHYD_RXDET1
+#define A60810_RG_SSUSB_RXDET_PRB_SEL_OFST (31)
+#define A60810_RG_SSUSB_FORCE_CMDET_OFST (30)
+#define A60810_RG_SSUSB_RXDET_EN_OFST (29)
+#define A60810_RG_SSUSB_FORCE_RXDET_EN_OFST (28)
+#define A60810_RG_SSUSB_RXDET_K_TWICE_OFST (27)
+#define A60810_RG_SSUSB_RXDET_STB3_SET_OFST (18)
+#define A60810_RG_SSUSB_RXDET_STB2_SET_OFST (9)
+#define A60810_RG_SSUSB_RXDET_STB1_SET_OFST (0)
+
+//U3D_B2_PHYD_RXDET2
+#define A60810_RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN_OFST (31)
+#define A60810_RG_SSUSB_PHYD_BERTLB_FORCE_CGEN_OFST (30)
+#define A60810_RG_SSUSB_PHYD_T2RLB_FORCE_CGEN_OFST (29)
+#define A60810_RG_SSUSB_LCK2REF_EXT_EN_OFST (28)
+#define A60810_RG_SSUSB_G2_LCK2REF_EXT_SEL_OFST (24)
+#define A60810_RG_SSUSB_LCK2REF_EXT_SEL_OFST (20)
+#define A60810_RG_SSUSB_PDN_T_SEL_OFST (18)
+#define A60810_RG_SSUSB_RXDET_STB3_SET_P3_OFST (9)
+#define A60810_RG_SSUSB_RXDET_STB2_SET_P3_OFST (0)
+
+//U3D_B2_PHYD_MISC0
+#define A60810_RG_SSUSB_TX_EIDLE_LP_P0DLYCYC_OFST (26)
+#define A60810_RG_SSUSB_TX_SER_EN_OFST (25)
+#define A60810_RG_SSUSB_FORCE_TX_SER_EN_OFST (24)
+#define A60810_RG_SSUSB_TXPLL_REFCKSEL_OFST (23)
+#define A60810_RG_SSUSB_FORCE_PLL_DDS_HF_EN_OFST (22)
+#define A60810_RG_SSUSB_PLL_DDS_HF_EN_MAN_OFST (21)
+#define A60810_RG_SSUSB_RXLFPS_ENTXDRV_OFST (20)
+#define A60810_RG_SSUSB_RX_FL_UNLOCKTH_OFST (16)
+#define A60810_RG_SSUSB_LFPS_PSEL_OFST (15)
+#define A60810_RG_SSUSB_RX_SIGDET_EN_OFST (14)
+#define A60810_RG_SSUSB_RX_SIGDET_EN_SEL_OFST (13)
+#define A60810_RG_SSUSB_RX_PI_CAL_EN_OFST (12)
+#define A60810_RG_SSUSB_RX_PI_CAL_EN_SEL_OFST (11)
+#define A60810_RG_SSUSB_P3_CLS_CK_SEL_OFST (10)
+#define A60810_RG_SSUSB_T2RLB_PSEL_OFST (8)
+#define A60810_RG_SSUSB_PPCTL_PSEL_OFST (5)
+#define A60810_RG_SSUSB_PHYD_TX_DATA_INV_OFST (4)
+#define A60810_RG_SSUSB_BERTLB_PSEL_OFST (2)
+#define A60810_RG_SSUSB_RETRACK_DIS_OFST (1)
+#define A60810_RG_SSUSB_PPERRCNT_CLR_OFST (0)
+
+//U3D_B2_PHYD_MISC2
+#define A60810_RG_SSUSB_FRC_PLL_DDS_PREDIV2_OFST (31)
+#define A60810_RG_SSUSB_FRC_PLL_DDS_IADJ_OFST (27)
+#define A60810_RG_SSUSB_P_SIGDET_125FILTER_OFST (26)
+#define A60810_RG_SSUSB_P_SIGDET_RST_FILTER_OFST (25)
+#define A60810_RG_SSUSB_P_SIGDET_EID_USE_RAW_OFST (24)
+#define A60810_RG_SSUSB_P_SIGDET_LTD_USE_RAW_OFST (23)
+#define A60810_RG_SSUSB_EIDLE_BF_RXDET_OFST (22)
+#define A60810_RG_SSUSB_EIDLE_LP_STBCYC_OFST (13)
+#define A60810_RG_SSUSB_TX_EIDLE_LP_POSTDLY_OFST (7)
+#define A60810_RG_SSUSB_TX_EIDLE_LP_PREDLY_OFST (1)
+#define A60810_RG_SSUSB_TX_EIDLE_LP_EN_ADV_OFST (0)
+
+//U3D_B2_PHYD_MISC3
+#define A60810_RGS_SSUSB_DDS_CALIB_C_STATE_OFST (16)
+#define A60810_RGS_SSUSB_PPERRCNT_OFST (0)
+
+//U3D_B2_PHYD_L1SS
+#define A60810_RG_SSUSB_L1SS_REV1_OFST (24)
+#define A60810_RG_SSUSB_L1SS_REV0_OFST (16)
+#define A60810_RG_SSUSB_P_LTD1_SLOCK_DIS_OFST (11)
+#define A60810_RG_SSUSB_PLL_CNT_CLEAN_DIS_OFST (10)
+#define A60810_RG_SSUSB_P_PLL_REK_SEL_OFST (9)
+#define A60810_RG_SSUSB_TXDRV_MASKDLY_OFST (8)
+#define A60810_RG_SSUSB_RXSTS_VAL_OFST (7)
+#define A60810_RG_PCIE_PHY_CLKREQ_N_EN_OFST (6)
+#define A60810_RG_PCIE_FORCE_PHY_CLKREQ_N_EN_OFST (5)
+#define A60810_RG_PCIE_PHY_CLKREQ_N_OUT_OFST (4)
+#define A60810_RG_PCIE_FORCE_PHY_CLKREQ_N_OUT_OFST (3)
+#define A60810_RG_SSUSB_RXPLL_STB_PX0_OFST (2)
+#define A60810_RG_PCIE_L1SS_EN_OFST (1)
+#define A60810_RG_PCIE_FORCE_L1SS_EN_OFST (0)
+
+//U3D_B2_ROSC_0
+#define A60810_RG_SSUSB_RING_OSC_CNTEND_OFST (23)
+#define A60810_RG_SSUSB_XTAL_OSC_CNTEND_OFST (16)
+#define A60810_RG_SSUSB_RING_OSC_EN_OFST (3)
+#define A60810_RG_SSUSB_RING_OSC_FORCE_EN_OFST (2)
+#define A60810_RG_SSUSB_FRC_RING_BYPASS_DET_OFST (1)
+#define A60810_RG_SSUSB_RING_BYPASS_DET_OFST (0)
+
+//U3D_B2_ROSC_1
+#define A60810_RG_SSUSB_RING_OSC_FRC_P3_OFST (20)
+#define A60810_RG_SSUSB_RING_OSC_P3_OFST (19)
+#define A60810_RG_SSUSB_RING_OSC_FRC_RECAL_OFST (17)
+#define A60810_RG_SSUSB_RING_OSC_RECAL_OFST (16)
+#define A60810_RG_SSUSB_RING_OSC_SEL_OFST (8)
+#define A60810_RG_SSUSB_RING_OSC_FRC_SEL_OFST (0)
+
+//U3D_B2_ROSC_2
+#define A60810_RG_SSUSB_RING_DET_STRCYC2_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_STRCYC1_OFST (0)
+
+//U3D_B2_ROSC_3
+#define A60810_RG_SSUSB_RING_DET_DETWIN1_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_STRCYC3_OFST (0)
+
+//U3D_B2_ROSC_4
+#define A60810_RG_SSUSB_RING_DET_DETWIN3_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_DETWIN2_OFST (0)
+
+//U3D_B2_ROSC_5
+#define A60810_RG_SSUSB_RING_DET_LBOND1_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_UBOND1_OFST (0)
+
+//U3D_B2_ROSC_6
+#define A60810_RG_SSUSB_RING_DET_LBOND2_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_UBOND2_OFST (0)
+
+//U3D_B2_ROSC_7
+#define A60810_RG_SSUSB_RING_DET_LBOND3_OFST (16)
+#define A60810_RG_SSUSB_RING_DET_UBOND3_OFST (0)
+
+//U3D_B2_ROSC_8
+#define A60810_RG_SSUSB_RING_RESERVE_OFST (16)
+#define A60810_RG_SSUSB_ROSC_PROB_SEL_OFST (2)
+#define A60810_RG_SSUSB_RING_FREQMETER_EN_OFST (1)
+#define A60810_RG_SSUSB_RING_DET_BPS_UBOND_OFST (0)
+
+//U3D_B2_ROSC_9
+#define A60810_RGS_FM_RING_CNT_OFST (16)
+#define A60810_RGS_SSUSB_RING_OSC_STATE_OFST (10)
+#define A60810_RGS_SSUSB_RING_OSC_STABLE_OFST (9)
+#define A60810_RGS_SSUSB_RING_OSC_CAL_FAIL_OFST (8)
+#define A60810_RGS_SSUSB_RING_OSC_CAL_OFST (0)
+
+//U3D_B2_ROSC_A
+#define A60810_RGS_SSUSB_ROSC_PROB_OUT_OFST (0)
+
+//U3D_PHYD_VERSION
+#define A60810_RGS_SSUSB_PHYD_VERSION_OFST (0)
+
+//U3D_PHYD_MODEL
+#define A60810_RGS_SSUSB_PHYD_MODEL_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct sifslv_chip_reg_a60810 {
+ //0x0
+ PHY_LE32 gpio_ctla;
+ PHY_LE32 gpio_ctlb;
+ PHY_LE32 gpio_ctlc;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct sifslv_fm_feg_a60810 {
+ //0x0
+ PHY_LE32 fmcr0;
+ PHY_LE32 fmcr1;
+ PHY_LE32 fmcr2;
+ PHY_LE32 fmmonr0;
+ //0X10
+ PHY_LE32 fmmonr1;
+};
+
+//U3D_FMCR0
+#define A60810_RG_LOCKTH (0xf<<28) //31:28
+#define A60810_RG_MONCLK_SEL (0x3<<26) //27:26
+#define A60810_RG_FM_MODE (0x1<<25) //25:25
+#define A60810_RG_FREQDET_EN (0x1<<24) //24:24
+#define A60810_RG_CYCLECNT (0xffffff<<0) //23:0
+
+//U3D_FMCR1
+#define A60810_RG_TARGET (0xffffffff<<0) //31:0
+
+//U3D_FMCR2
+#define A60810_RG_OFFSET (0xffffffff<<0) //31:0
+
+//U3D_FMMONR0
+#define A60810_USB_FM_OUT (0xffffffff<<0) //31:0
+
+//U3D_FMMONR1
+#define A60810_RG_MONCLK_SEL_2 (0x1<<9) //9:9
+#define A60810_RG_FRCK_EN (0x1<<8) //8:8
+#define A60810_USBPLL_LOCK (0x1<<1) //1:1
+#define A60810_USB_FM_VLD (0x1<<0) //0:0
+
+/* OFFSET */
+
+//U3D_FMCR0
+#define A60810_RG_LOCKTH_OFST (28)
+#define A60810_RG_MONCLK_SEL_OFST (26)
+#define A60810_RG_FM_MODE_OFST (25)
+#define A60810_RG_FREQDET_EN_OFST (24)
+#define A60810_RG_CYCLECNT_OFST (0)
+
+//U3D_FMCR1
+#define A60810_RG_TARGET_OFST (0)
+
+//U3D_FMCR2
+#define A60810_RG_OFFSET_OFST (0)
+
+//U3D_FMMONR0
+#define A60810_USB_FM_OUT_OFST (0)
+
+//U3D_FMMONR1
+#define A60810_RG_MONCLK_SEL_2_OFST (9)
+#define A60810_RG_FRCK_EN_OFST (8)
+#define A60810_USBPLL_LOCK_OFST (1)
+#define A60810_USB_FM_VLD_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct spllc_reg_a60810 {
+ //0x0
+ PHY_LE32 u3d_syspll_0;
+ PHY_LE32 u3d_syspll_1;
+ PHY_LE32 u3d_syspll_2;
+ PHY_LE32 u3d_syspll_sdm;
+ //0x10
+ PHY_LE32 u3d_xtalctl_1;
+ PHY_LE32 u3d_xtalctl_2;
+ PHY_LE32 u3d_xtalctl3;
+};
+
+//U3D_SYSPLL_0
+#define A60810_RG_SSUSB_SPLL_DDSEN_CYC (0x1f<<27) //31:27
+#define A60810_RG_SSUSB_SPLL_NCPOEN_CYC (0x3<<25) //26:25
+#define A60810_RG_SSUSB_SPLL_STBCYC (0x1ff<<16) //24:16
+#define A60810_RG_SSUSB_SPLL_NCPOCHG_CYC (0xf<<12) //15:12
+#define A60810_RG_SSUSB_SYSPLL_ON (0x1<<11) //11:11
+#define A60810_RG_SSUSB_FORCE_SYSPLLON (0x1<<10) //10:10
+#define A60810_RG_SSUSB_SPLL_DDSRSTB_CYC (0x7<<0) //2:0
+
+//U3D_SYSPLL_1
+#define A60810_RG_SSUSB_PLL_BIAS_CYC (0xff<<24) //31:24
+#define A60810_RG_SSUSB_SYSPLL_STB (0x1<<23) //23:23
+#define A60810_RG_SSUSB_FORCE_SYSPLL_STB (0x1<<22) //22:22
+#define A60810_RG_SSUSB_SPLL_DDS_ISO_EN (0x1<<21) //21:21
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_ISO_EN (0x1<<20) //20:20
+#define A60810_RG_SSUSB_SPLL_DDS_PWR_ON (0x1<<19) //19:19
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_PWR_ON (0x1<<18) //18:18
+#define A60810_RG_SSUSB_PLL_BIAS_PWD (0x1<<17) //17:17
+#define A60810_RG_SSUSB_FORCE_PLL_BIAS_PWD (0x1<<16) //16:16
+#define A60810_RG_SSUSB_FORCE_SPLL_NCPO_EN (0x1<<15) //15:15
+#define A60810_RG_SSUSB_FORCE_SPLL_FIFO_START_MAN (0x1<<14) //14:14
+#define A60810_RG_SSUSB_FORCE_SPLL_NCPO_CHG (0x1<<12) //12:12
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_RSTB (0x1<<11) //11:11
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_PWDB (0x1<<10) //10:10
+#define A60810_RG_SSUSB_FORCE_SPLL_DDSEN (0x1<<9) //9:9
+#define A60810_RG_SSUSB_FORCE_SPLL_PWD (0x1<<8) //8:8
+#define A60810_RG_SSUSB_SPLL_NCPO_EN (0x1<<7) //7:7
+#define A60810_RG_SSUSB_SPLL_FIFO_START_MAN (0x1<<6) //6:6
+#define A60810_RG_SSUSB_SPLL_NCPO_CHG (0x1<<4) //4:4
+#define A60810_RG_SSUSB_SPLL_DDS_RSTB (0x1<<3) //3:3
+#define A60810_RG_SSUSB_SPLL_DDS_PWDB (0x1<<2) //2:2
+#define A60810_RG_SSUSB_SPLL_DDSEN (0x1<<1) //1:1
+#define A60810_RG_SSUSB_SPLL_PWD (0x1<<0) //0:0
+
+//U3D_SYSPLL_2
+#define A60810_RG_SSUSB_SPLL_P_ON_SEL (0x1<<11) //11:11
+#define A60810_RG_SSUSB_SPLL_FBDIV_CHG (0x1<<10) //10:10
+#define A60810_RG_SSUSB_SPLL_DDS_ISOEN_CYC (0x3ff<<0) //9:0
+
+//U3D_SYSPLL_SDM
+#define A60810_RG_SSUSB_SPLL_SDM_ISO_EN_CYC (0x3ff<<14) //23:14
+#define A60810_RG_SSUSB_SPLL_FORCE_SDM_ISO_EN (0x1<<13) //13:13
+#define A60810_RG_SSUSB_SPLL_SDM_ISO_EN (0x1<<12) //12:12
+#define A60810_RG_SSUSB_SPLL_SDM_PWR_ON_CYC (0x3ff<<2) //11:2
+#define A60810_RG_SSUSB_SPLL_FORCE_SDM_PWR_ON (0x1<<1) //1:1
+#define A60810_RG_SSUSB_SPLL_SDM_PWR_ON (0x1<<0) //0:0
+
+//U3D_XTALCTL_1
+#define A60810_RG_SSUSB_BIAS_STBCYC (0x3fff<<17) //30:17
+#define A60810_RG_SSUSB_XTAL_CLK_REQ_N (0x1<<16) //16:16
+#define A60810_RG_SSUSB_XTAL_FORCE_CLK_REQ_N (0x1<<15) //15:15
+#define A60810_RG_SSUSB_XTAL_STBCYC (0x7fff<<0) //14:0
+
+//U3D_XTALCTL_2
+#define A60810_RG_SSUSB_INT_XTAL_SEL (0x1<<29) //29:29
+#define A60810_RG_SSUSB_BG_LPF_DLY (0x3<<27) //28:27
+#define A60810_RG_SSUSB_BG_LPF_EN (0x1<<26) //26:26
+#define A60810_RG_SSUSB_FORCE_BG_LPF_EN (0x1<<25) //25:25
+#define A60810_RG_SSUSB_P3_BIAS_PWD (0x1<<24) //24:24
+#define A60810_RG_SSUSB_PCIE_CLKDET_HIT (0x1<<20) //20:20
+#define A60810_RG_SSUSB_PCIE_CLKDET_EN (0x1<<19) //19:19
+#define A60810_RG_SSUSB_FRC_PCIE_CLKDET_EN (0x1<<18) //18:18
+#define A60810_RG_SSUSB_USB20_BIAS_EN (0x1<<17) //17:17
+#define A60810_RG_SSUSB_USB20_SLEEP (0x1<<16) //16:16
+#define A60810_RG_SSUSB_OSC_ONLY (0x1<<9) //9:9
+#define A60810_RG_SSUSB_OSC_EN (0x1<<8) //8:8
+#define A60810_RG_SSUSB_XTALBIAS_STB (0x1<<5) //5:5
+#define A60810_RG_SSUSB_FORCE_XTALBIAS_STB (0x1<<4) //4:4
+#define A60810_RG_SSUSB_BIAS_PWD (0x1<<3) //3:3
+#define A60810_RG_SSUSB_XTAL_PWD (0x1<<2) //2:2
+#define A60810_RG_SSUSB_FORCE_BIAS_PWD (0x1<<1) //1:1
+#define A60810_RG_SSUSB_FORCE_XTAL_PWD (0x1<<0) //0:0
+
+//U3D_XTALCTL3
+#define A60810_RG_SSUSB_XTALCTL_REV (0xf<<12) //15:12
+#define A60810_RG_SSUSB_BIASIMR_EN (0x1<<11) //11:11
+#define A60810_RG_SSUSB_FORCE_BIASIMR_EN (0x1<<10) //10:10
+#define A60810_RG_SSUSB_XTAL_RX_PWD (0x1<<9) //9:9
+#define A60810_RG_SSUSB_FRC_XTAL_RX_PWD (0x1<<8) //8:8
+#define A60810_RG_SSUSB_CKBG_PROB_SEL (0x3<<6) //7:6
+#define A60810_RG_SSUSB_XTAL_PROB_SEL (0x3<<4) //5:4
+#define A60810_RG_SSUSB_XTAL_VREGBIAS_LPF_ENB (0x1<<3) //3:3
+#define A60810_RG_SSUSB_XTAL_FRC_VREGBIAS_LPF_ENB (0x1<<2) //2:2
+#define A60810_RG_SSUSB_XTAL_VREGBIAS_PWD (0x1<<1) //1:1
+#define A60810_RG_SSUSB_XTAL_FRC_VREGBIAS_PWD (0x1<<0) //0:0
+
+
+/* SSUSB_SIFSLV_SPLLC FIELD OFFSET DEFINITION */
+
+//U3D_SYSPLL_0
+#define A60810_RG_SSUSB_SPLL_DDSEN_CYC_OFST (27)
+#define A60810_RG_SSUSB_SPLL_NCPOEN_CYC_OFST (25)
+#define A60810_RG_SSUSB_SPLL_STBCYC_OFST (16)
+#define A60810_RG_SSUSB_SPLL_NCPOCHG_CYC_OFST (12)
+#define A60810_RG_SSUSB_SYSPLL_ON_OFST (11)
+#define A60810_RG_SSUSB_FORCE_SYSPLLON_OFST (10)
+#define A60810_RG_SSUSB_SPLL_DDSRSTB_CYC_OFST (0)
+
+//U3D_SYA60810_SPLL_1
+#define A60810_RG_SSUSB_PLL_BIAS_CYC_OFST (24)
+#define A60810_RG_SSUSB_SYSPLL_STB_OFST (23)
+#define A60810_RG_SSUSB_FORCE_SYSPLL_STB_OFST (22)
+#define A60810_RG_SSUSB_SPLL_DDS_ISO_EN_OFST (21)
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_ISO_EN_OFST (20)
+#define A60810_RG_SSUSB_SPLL_DDS_PWR_ON_OFST (19)
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_PWR_ON_OFST (18)
+#define A60810_RG_SSUSB_PLL_BIAS_PWD_OFST (17)
+#define A60810_RG_SSUSB_FORCE_PLL_BIAS_PWD_OFST (16)
+#define A60810_RG_SSUSB_FORCE_SPLL_NCPO_EN_OFST (15)
+#define A60810_RG_SSUSB_FORCE_SPLL_FIFO_START_MAN_OFST (14)
+#define A60810_RG_SSUSB_FORCE_SPLL_NCPO_CHG_OFST (12)
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_RSTB_OFST (11)
+#define A60810_RG_SSUSB_FORCE_SPLL_DDS_PWDB_OFST (10)
+#define A60810_RG_SSUSB_FORCE_SPLL_DDSEN_OFST (9)
+#define A60810_RG_SSUSB_FORCE_SPLL_PWD_OFST (8)
+#define A60810_RG_SSUSB_SPLL_NCPO_EN_OFST (7)
+#define A60810_RG_SSUSB_SPLL_FIFO_START_MAN_OFST (6)
+#define A60810_RG_SSUSB_SPLL_NCPO_CHG_OFST (4)
+#define A60810_RG_SSUSB_SPLL_DDS_RSTB_OFST (3)
+#define A60810_RG_SSUSB_SPLL_DDS_PWDB_OFST (2)
+#define A60810_RG_SSUSB_SPLL_DDSEN_OFST (1)
+#define A60810_RG_SSUSB_SPLL_PWD_OFST (0)
+
+//U3D_SYSPLL_2
+#define A60810_RG_SSUSB_SPLL_P_ON_SEL_OFST (11)
+#define A60810_RG_SSUSB_SPLL_FBDIV_CHG_OFST (10)
+#define A60810_RG_SSUSB_SPLL_DDS_ISOEN_CYC_OFST (0)
+
+//U3D_SYSPLL_SDM
+#define A60810_RG_SSUSB_SPLL_SDM_ISO_EN_CYC_OFST (14)
+#define A60810_RG_SSUSB_SPLL_FORCE_SDM_ISO_EN_OFST (13)
+#define A60810_RG_SSUSB_SPLL_SDM_ISO_EN_OFST (12)
+#define A60810_RG_SSUSB_SPLL_SDM_PWR_ON_CYC_OFST (2)
+#define A60810_RG_SSUSB_SPLL_FORCE_SDM_PWR_ON_OFST (1)
+#define A60810_RG_SSUSB_SPLL_SDM_PWR_ON_OFST (0)
+
+//U3D_XTALCTL_1
+#define A60810_RG_SSUSB_BIAS_STBCYC_OFST (17)
+#define A60810_RG_SSUSB_XTAL_CLK_REQ_N_OFST (16)
+#define A60810_RG_SSUSB_XTAL_FORCE_CLK_REQ_N_OFST (15)
+#define A60810_RG_SSUSB_XTAL_STBCYC_OFST (0)
+
+//U3D_XTALCTL_2
+#define A60810_RG_SSUSB_INT_XTAL_SEL_OFST (29)
+#define A60810_RG_SSUSB_BG_LPF_DLY_OFST (27)
+#define A60810_RG_SSUSB_BG_LPF_EN_OFST (26)
+#define A60810_RG_SSUSB_FORCE_BG_LPF_EN_OFST (25)
+#define A60810_RG_SSUSB_P3_BIAS_PWD_OFST (24)
+#define A60810_RG_SSUSB_PCIE_CLKDET_HIT_OFST (20)
+#define A60810_RG_SSUSB_PCIE_CLKDET_EN_OFST (19)
+#define A60810_RG_SSUSB_FRC_PCIE_CLKDET_EN_OFST (18)
+#define A60810_RG_SSUSB_USB20_BIAS_EN_OFST (17)
+#define A60810_RG_SSUSB_USB20_SLEEP_OFST (16)
+#define A60810_RG_SSUSB_OSC_ONLY_OFST (9)
+#define A60810_RG_SSUSB_OSC_EN_OFST (8)
+#define A60810_RG_SSUSB_XTALBIAS_STB_OFST (5)
+#define A60810_RG_SSUSB_FORCE_XTALBIAS_STB_OFST (4)
+#define A60810_RG_SSUSB_BIAS_PWD_OFST (3)
+#define A60810_RG_SSUSB_XTAL_PWD_OFST (2)
+#define A60810_RG_SSUSB_FORCE_BIAS_PWD_OFST (1)
+#define A60810_RG_SSUSB_FORCE_XTAL_PWD_OFST (0)
+
+//U3D_XTALCTL3
+#define A60810_RG_SSUSB_XTALCTL_REV_OFST (12)
+#define A60810_RG_SSUSB_BIASIMR_EN_OFST (11)
+#define A60810_RG_SSUSB_FORCE_BIASIMR_EN_OFST (10)
+#define A60810_RG_SSUSB_XTAL_RX_PWD_OFST (9)
+#define A60810_RG_SSUSB_FRC_XTAL_RX_PWD_OFST (8)
+#define A60810_RG_SSUSB_CKBG_PROB_SEL_OFST (6)
+#define A60810_RG_SSUSB_XTAL_PROB_SEL_OFST (4)
+#define A60810_RG_SSUSB_XTAL_VREGBIAS_LPF_ENB_OFST (3)
+#define A60810_RG_SSUSB_XTAL_FRC_VREGBIAS_LPF_ENB_OFST (2)
+#define A60810_RG_SSUSB_XTAL_VREGBIAS_PWD_OFST (1)
+#define A60810_RG_SSUSB_XTAL_FRC_VREGBIAS_PWD_OFST (0)
+
+///////////////////////////////////////////////////////////////////////////////
+PHY_INT32 phy_init_a60810(struct u3phy_info *info);
+PHY_INT32 phy_change_pipe_phase_a60810(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase);
+PHY_INT32 eyescan_init_a60810 (struct u3phy_info *info);
+PHY_INT32 phy_eyescan_a60810(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y
+ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt);
+PHY_INT32 u2_connect_a60810(struct u3phy_info *info);
+PHY_INT32 u2_disconnect_a60810(struct u3phy_info *info);
+PHY_INT32 u2_save_cur_en_a60810(struct u3phy_info *info);
+PHY_INT32 u2_save_cur_re_a60810(struct u3phy_info *info);
+PHY_INT32 u2_slew_rate_calibration_a60810(struct u3phy_info *info);
+
+#endif
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-ahb.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-ahb.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,58 @@
+#include "mtk-phy.h"
+#ifdef CONFIG_U3D_HAL_SUPPORT
+#include "mu3d_hal_osal.h"
+#endif
+
+#ifdef CONFIG_U3_PHY_AHB_SUPPORT
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#ifndef CONFIG_U3D_HAL_SUPPORT
+#define os_writel(addr,data) {\
+ (*((volatile PHY_UINT32*)(addr)) = data);\
+ }
+#define os_readl(addr) *((volatile PHY_UINT32*)(addr))
+#define os_writelmsk(addr, data, msk) \
+ { os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk)))); \
+ }
+#define os_setmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) | msk); \
+ }
+#define os_clrmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) &~ msk); \
+ }
+/*msk the data first, then umsk with the umsk.*/
+#define os_writelmskumsk(addr, data, msk, umsk) \
+{\
+ os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk))) & (umsk));\
+}
+
+#endif
+
+PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data)
+{
+ os_writel(addr, data);
+
+ return 0;
+}
+
+PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr)
+{
+ return os_readl(addr);
+}
+
+PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data)
+{
+ os_writelmsk(addr&0xfffffffc, data<<((addr%4)*8), 0xff<<((addr%4)*8));
+
+ return 0;
+}
+
+PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr)
+{
+ return ((os_readl(addr)>>((addr%4)*8))&0xff);
+}
+
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-gpio.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy-gpio.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,404 @@
+#define CONFIG_U3_PHY_GPIO_SUPPORT
+#ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include "mtk-phy.h"
+
+/* TEST CHIP PHY define, edit this in different platform */
+#define U3_PHY_I2C_DEV 0x60
+#define U3_PHY_PAGE 0xff
+#define GPIO_BASE 0xBFA80700
+#define SSUSB_I2C_OUT GPIO_BASE+0xd0
+#define SSUSB_I2C_IN GPIO_BASE+0xd4
+
+extern int (*I2CWriterPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+extern int (*I2CReaderPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+
+/****************************************************************/
+
+#define OUTPUT 1
+#define INPUT 0
+
+#define SDA 0 /* GPIO #0: I2C data pin */
+#define SCL 1 /* GPIO #1: I2C clock pin */
+
+/****************************************************************/
+
+#define SDA_OUT (1<<0)
+#define SDA_OEN (1<<1)
+#define SCL_OUT (1<<2)
+#define SCL_OEN (1<<3)
+
+#define SDA_IN_OFFSET 0
+#define SCL_IN_OFFSET 1
+
+#define os_writel(addr,data) {\
+ (*((volatile PHY_UINT32*)(addr)) = data);\
+ }
+#define os_readl(addr) *((volatile PHY_UINT32*)(addr))
+#define os_writelmsk(addr, data, msk) \
+ { os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk)))); \
+ }
+#define os_setmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) | msk); \
+ }
+#define os_clrmsk(addr, msk) \
+ { os_writel(addr, os_readl(addr) &~ msk); \
+ }
+/*msk the data first, then umsk with the umsk.*/
+#define os_writelmskumsk(addr, data, msk, umsk) \
+{\
+ os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk))) & (umsk));\
+}
+
+PHY_INT32 U3HWriteReg32(PHY_UINT32 addr, PHY_UINT32 data)
+{
+ os_writel(addr, data);
+
+ return 0;
+}
+
+PHY_INT32 U3HReadReg32(PHY_UINT32 addr)
+{
+ return os_readl(addr);
+}
+
+PHY_INT32 U3HWriteReg8(PHY_UINT32 addr, PHY_UINT8 data)
+{
+ os_writelmsk(addr&0xfffffffc, data<<((addr%4)*8), 0xff<<((addr%4)*8));
+
+ return 0;
+}
+
+PHY_INT8 U3HReadReg8(PHY_UINT32 addr)
+{
+ return ((os_readl(addr)>>((addr%4)*8))&0xff);
+}
+
+PHY_INT32 _U3Write_Reg(PHY_INT32 address, PHY_INT32 value);
+PHY_INT32 _U3Read_Reg(PHY_INT32 address);
+
+void gpio_dir_set(PHY_INT32 pin){
+ PHY_INT32 addr, temp;
+ addr = SSUSB_I2C_OUT;
+ temp = DRV_Reg32(addr);
+ if(pin == SDA){
+ temp |= SDA_OEN;
+ DRV_WriteReg32(addr,temp);
+ }
+ else{
+ temp |= SCL_OEN;
+ DRV_WriteReg32(addr,temp);
+ }
+}
+
+void gpio_dir_clr(PHY_INT32 pin){
+ PHY_INT32 addr, temp;
+ addr = SSUSB_I2C_OUT;
+ temp = DRV_Reg32(addr);
+ if(pin == SDA){
+ temp &= ~SDA_OEN;
+ DRV_WriteReg32(addr,temp);
+ }
+ else{
+ temp &= ~SCL_OEN;
+ DRV_WriteReg32(addr,temp);
+ }
+}
+
+void gpio_dout_set(PHY_INT32 pin){
+ PHY_INT32 addr, temp;
+ addr = SSUSB_I2C_OUT;
+ temp = DRV_Reg32(addr);
+ if(pin == SDA){
+ temp |= SDA_OUT;
+ DRV_WriteReg32(addr,temp);
+ }
+ else{
+ temp |= SCL_OUT;
+ DRV_WriteReg32(addr,temp);
+ }
+}
+
+void gpio_dout_clr(PHY_INT32 pin){
+ PHY_INT32 addr, temp;
+ addr = SSUSB_I2C_OUT;
+ temp = DRV_Reg32(addr);
+ if(pin == SDA){
+ temp &= ~SDA_OUT;
+ DRV_WriteReg32(addr,temp);
+ }
+ else{
+ temp &= ~SCL_OUT;
+ DRV_WriteReg32(addr,temp);
+ }
+}
+
+PHY_INT32 gpio_din(PHY_INT32 pin){
+ PHY_INT32 addr, temp;
+ addr = SSUSB_I2C_IN;
+ temp = DRV_Reg32(addr);
+ if(pin == SDA){
+ temp = (temp >> SDA_IN_OFFSET) & 1;
+ }
+ else{
+ temp = (temp >> SCL_IN_OFFSET) & 1;
+ }
+ return temp;
+}
+
+#define GPIO_DIR_SET(pin) gpio_dir_set(pin)
+#define GPIO_DOUT_SET(pin) gpio_dout_set(pin);
+#define GPIO_DIR_CLR(pin) gpio_dir_clr(pin)
+#define GPIO_DOUT_CLR(pin) gpio_dout_clr(pin)
+#define GPIO_DIN(pin) gpio_din(pin)
+
+
+PHY_UINT32 i2c_dummy_cnt;
+
+#define I2C_DELAY 10
+#define I2C_DUMMY_DELAY(_delay) for (i2c_dummy_cnt = ((_delay)) ; i2c_dummy_cnt!=0; i2c_dummy_cnt--)
+
+void GPIO_InitIO(PHY_UINT32 dir, PHY_UINT32 pin)
+{
+ if (dir == OUTPUT)
+ {
+ GPIO_DIR_SET(pin);
+ }
+ else
+ {
+ GPIO_DIR_CLR(pin);
+ }
+ I2C_DUMMY_DELAY(100);
+}
+
+void GPIO_WriteIO(PHY_UINT32 data, PHY_UINT32 pin)
+{
+ if (data == 1){
+ GPIO_DOUT_SET(pin);
+ }
+ else{
+ GPIO_DOUT_CLR(pin);
+ }
+}
+
+PHY_UINT32 GPIO_ReadIO( PHY_UINT32 pin)
+{
+ PHY_UINT16 data;
+ data=GPIO_DIN(pin);
+ return (PHY_UINT32)data;
+}
+
+
+void SerialCommStop(void)
+{
+ GPIO_InitIO(OUTPUT,SDA);
+ GPIO_WriteIO(0,SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(0,SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1,SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1,SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_InitIO(INPUT,SCL);
+ GPIO_InitIO(INPUT,SDA);
+}
+
+void SerialCommStart(void) /* Prepare the SDA and SCL for sending/receiving */
+{
+ GPIO_InitIO(OUTPUT,SCL);
+ GPIO_InitIO(OUTPUT,SDA);
+ GPIO_WriteIO(1,SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1,SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(0,SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(0,SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+}
+
+PHY_UINT32 SerialCommTxByte(PHY_UINT8 data) /* return 0 --> ack */
+{
+ PHY_INT32 i, ack;
+
+ GPIO_InitIO(OUTPUT,SDA);
+
+ for(i=8; --i>0;){
+ GPIO_WriteIO((data>>i)&0x01, SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO( 1, SCL); /* high */
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO( 0, SCL); /* low */
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ }
+ GPIO_WriteIO((data>>i)&0x01, SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO( 1, SCL); /* high */
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO( 0, SCL); /* low */
+ I2C_DUMMY_DELAY(I2C_DELAY);
+
+ GPIO_WriteIO(0, SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_InitIO(INPUT,SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ ack = GPIO_ReadIO(SDA); /* ack 1: error , 0:ok */
+ GPIO_WriteIO(0, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+
+ if(ack==1)
+ return PHY_FALSE;
+ else
+ return PHY_TRUE;
+}
+
+void SerialCommRxByte(PHY_UINT8 *data, PHY_UINT8 ack)
+{
+ PHY_INT32 i;
+ PHY_UINT32 dataCache;
+
+ dataCache = 0;
+ GPIO_InitIO(INPUT,SDA);
+ for(i=8; --i>=0;){
+ dataCache <<= 1;
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ dataCache |= GPIO_ReadIO(SDA);
+ GPIO_WriteIO(0, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ }
+ GPIO_InitIO(OUTPUT,SDA);
+ GPIO_WriteIO(ack, SDA);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(1, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ GPIO_WriteIO(0, SCL);
+ I2C_DUMMY_DELAY(I2C_DELAY);
+ *data = (unsigned char)dataCache;
+}
+
+
+PHY_INT32 I2cWriteReg(PHY_UINT8 dev_id, PHY_UINT8 Addr, PHY_UINT8 Data)
+{
+ PHY_INT8 acknowledge=0;
+ acknowledge = I2CWriterPtr(dev_id, Addr, &Data, 1);
+ if(acknowledge)
+ {
+ return PHY_FALSE;
+ }
+ else
+ {
+ return PHY_TRUE;
+ }
+
+}
+
+PHY_INT32 I2cReadReg(PHY_UINT8 dev_id, PHY_UINT8 Addr, PHY_UINT8 *Data)
+{
+ PHY_INT32 acknowledge = 0;
+ acknowledge = I2CReaderPtr(dev_id, Addr, Data, 1);
+ return acknowledge;
+}
+
+
+void _U3_Write_Bank(PHY_INT32 bankValue){
+ I2cWriteReg(U3_PHY_I2C_DEV, U3_PHY_PAGE, bankValue);
+}
+
+PHY_INT32 _U3Write_Reg(PHY_INT32 address, PHY_INT32 value){
+ I2cWriteReg(U3_PHY_I2C_DEV, address, value);
+}
+
+PHY_INT32 _U3Read_Reg(PHY_INT32 address){
+ PHY_INT8 *pu1Buf;
+ PHY_INT32 ret;
+
+ pu1Buf = (char *)kmalloc(1, GFP_NOIO);
+ ret = I2cReadReg(U3_PHY_I2C_DEV, address, pu1Buf);
+ if(ret == PHY_FALSE){
+ printk(KERN_ERR "Read failed\n");
+ return PHY_FALSE;
+ }
+ ret = (unsigned char)pu1Buf[0];
+ kfree(pu1Buf);
+ return ret;
+
+}
+
+PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data){
+ PHY_INT32 bank;
+ PHY_INT32 addr8;
+ PHY_INT8 data_0, data_1, data_2, data_3;
+
+ bank = (addr >> 16) & 0xff;
+ addr8 = addr & 0xff;
+ data_0 = data & 0xff;
+ data_1 = (data>>8) & 0xff;
+ data_2 = (data>>16) & 0xff;
+ data_3 = (data>>24) & 0xff;
+
+#if 0
+ HW_I2C_writer(U3_PHY_I2C_DEV, U3_PHY_PAGE, bank, 1);
+ HW_I2C_writer(U3_PHY_I2C_DEV, addr8, data_0, 1);
+ HW_I2C_writer(U3_PHY_I2C_DEV, addr8+1, data_1, 1);
+ HW_I2C_writer(U3_PHY_I2C_DEV, addr8+2, data_2, 1);
+ HW_I2C_writer(U3_PHY_I2C_DEV, addr8+3, data_3, 1);
+#else
+ _U3_Write_Bank(bank);
+ _U3Write_Reg(addr8, data_0);
+ _U3Write_Reg(addr8+1, data_1);
+ _U3Write_Reg(addr8+2, data_2);
+ _U3Write_Reg(addr8+3, data_3);
+#endif
+
+ return 0;
+}
+
+PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr){
+ PHY_INT32 bank;
+ PHY_INT32 addr8;
+ PHY_INT32 data;
+
+ bank = (addr >> 16) & 0xff;
+ addr8 = addr & 0xff;
+
+ _U3_Write_Bank(bank);
+ data = _U3Read_Reg(addr8);
+ data |= (_U3Read_Reg(addr8+1) << 8);
+ data |= (_U3Read_Reg(addr8+2) << 16);
+ data |= (_U3Read_Reg(addr8+3) << 24);
+ return data;
+}
+
+PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data)
+{
+ PHY_INT32 bank;
+ PHY_INT32 addr8;
+
+ bank = (addr >> 16) & 0xff;
+ addr8 = addr & 0xff;
+ _U3_Write_Bank(bank);
+ _U3Write_Reg(addr8, data);
+
+ return PHY_TRUE;
+}
+
+PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr){
+ PHY_INT32 bank;
+ PHY_INT32 addr8;
+ PHY_INT32 data;
+
+ bank = (addr >> 16) & 0xff;
+ addr8 = addr & 0xff;
+ _U3_Write_Bank(bank);
+ data = _U3Read_Reg(addr8);
+ return data;
+}
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,343 @@
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/tc3162/tc3162.h>
+#define U3_PHY_LIB
+#include "mtk-phy.h"
+
+#ifdef CONFIG_A60810_SUPPORT
+#include "mtk-phy-a60810.h"
+#endif
+PHY_INT32 u3phy_config();
+
+#ifdef CONFIG_A60810_SUPPORT
+static const struct u3phy_operator a60810_operators = {
+ .init = phy_init_a60810,
+ .change_pipe_phase = phy_change_pipe_phase_a60810,
+ .eyescan_init = eyescan_init_a60810,
+ .eyescan = phy_eyescan_a60810,
+ .u2_connect = u2_connect_a60810,
+ .u2_disconnect = u2_disconnect_a60810,
+ .u2_slew_rate_calibration = u2_slew_rate_calibration_a60810,
+};
+#endif
+
+#ifdef CONFIG_PROJECT_PHY
+static struct u3phy_operator project_operators = {
+ .init = phy_init,
+ .change_pipe_phase = phy_change_pipe_phase,
+ .eyescan_init = eyescan_init,
+ .eyescan = phy_eyescan,
+ .u2_connect = u2_connect,
+ .u2_disconnect = u2_disconnect,
+ .u2_slew_rate_calibration = u2_slew_rate_calibration,
+};
+#endif
+void static setup_25M_PLL(void)
+{
+
+ U3HWriteReg8(0xbfa80c1c, 0x18);
+ U3HWriteReg8(0xbfa80c1d, 0x18);
+ U3HWriteReg8(0xbfa80c1f, 0x18);
+ U3HWriteReg32(0xbfa80c24, 0x18000000);
+ U3HWriteReg32(0xbfa80c28, 0x18000000);
+ U3HWriteReg32(0xbfa80c30, 0x18000000);
+ U3HWriteReg32(0xbfa80c38, 0x004a004a);
+ U3HWriteReg8(0xbfa80c3e, 0x4a);
+ U3HWriteReg8(0xbfa80c3f, 0x0);
+ U3HWriteReg8(0xbfa80c42, 0x48);
+ U3HWriteReg8(0xbfa80c43, 0x0);
+ U3HWriteReg8(0xbfa80c44, 0x48);
+ U3HWriteReg8(0xbfa80c45, 0x0);
+ U3HWriteReg8(0xbfa80c48, 0x48);
+ U3HWriteReg8(0xbfa80c49, 0x0);
+
+ U3HWriteReg8(0xbfa80b24, 0x90);
+ U3HWriteReg8(0xbfa80b25, 0x1);
+ U3HWriteReg32(0xbfa80b10, 0x1c000000);
+ U3HWriteReg8(0xbfa80b0b, 0xe);
+ return;
+
+}
+PHY_INT32 u3phy_config_751627(void)
+{
+ if( (readl(0xbfb0008c)&0x40000) == 0)
+ {
+ setup_25M_PLL();
+ printk(KERN_ERR "USB PLL 25MHz setting\n");
+ }
+
+ writel(0xC0240008, 0xBFA8081C); /* enable port0 */
+ writel(0xC0240000, 0xBFA8101C); /* enable port1 */
+ printk(KERN_ERR "7516 USB PHY config, enable port0 port1\n");
+
+}
+
+PHY_INT32 clear_reset()
+{
+ PHY_INT32 value;
+ value = readl(0xbfb00834);
+
+ value |= (0x1<<22);
+ writel(value, 0xbfb00834);
+
+ value = readl(0xbfb00834);
+
+ value = 0;
+ writel(value, 0xbfb00834);
+
+ value = readl(0xbfa80700);
+
+ value = 0x10c00;
+ writel(value, 0xbfa80700);
+
+ value = readl(0xbfa80704);
+
+ value = 0;
+ writel(value, 0xbfa80704);
+
+ value = readl(0xbfa80730);
+
+ value = 0xc;
+ writel(value, 0xbfa80730);
+}
+
+extern int (*I2CWriterPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+extern int (*I2CReaderPtr)(u8 DevAddr, u8 WordAddr, u8* data_value, u8 data_len);
+#define USB_PHY_DEV_ADDR 0x60
+
+PHY_INT32 u3phy_config_FPGA(){
+ u8 u1Value[4] = {0, 0, 0,0};
+
+ if(!I2CWriterPtr || !I2CReaderPtr)
+ return;
+ u1Value[0] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x55;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x05, u1Value, 1);
+
+ u1Value[0] = 0x84;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x18, u1Value, 1);
+
+ u1Value[0] = 0x10;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x84;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x0a, u1Value, 1);
+
+ u1Value[0] = 0x40;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+/***********************************************************/
+ u1Value[0] = 0x46;
+ u1Value[1] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x38, u1Value, 2);
+
+ u1Value[0] = 0x40;
+ u1Value[1] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x42, u1Value, 2);
+
+ u1Value[0] = 0xab;
+ u1Value[1] = 0x0c;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x08, u1Value, 2);
+/***********************************************************/
+ u1Value[0] = 0x71;
+ u1Value[1] = 0xe7;
+ u1Value[2] = 0x4f;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x0c, u1Value, 3);
+/***********************************************************/
+ u1Value[0] = 0xe1;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x10, u1Value, 1);
+
+ u1Value[0] = 0x5f;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x14, u1Value, 1);
+
+ u1Value[0] = 0x60;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x03;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x14, u1Value, 1);
+
+ u1Value[0] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x40;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x15, u1Value, 1);
+/***********************************************************/
+ u1Value[0] = 0x50;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x10;
+ u1Value[1] = 0x54;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x02, u1Value, 2);
+/***********************************************************/
+ u1Value[0] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x08;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x68, u1Value, 1);
+
+ u1Value[0] = 0x04;
+ u1Value[1] = 0x0;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x6a, u1Value, 2);
+
+ u1Value[0] = 0x10;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0xff, u1Value, 1);
+
+ u1Value[0] = 0x10;
+ u1Value[0] = 0x44;
+ I2CWriterPtr(USB_PHY_DEV_ADDR, 0x42, u1Value, 2);
+
+}
+
+#define BGA_TYPE 1
+PHY_INT32 u3phy_config(){
+ if(isEN751627){
+ u3phy_config_751627();
+ }else{
+#if BGA_TYPE
+ writel(0xC0240008, 0xBFA8081C);/* prot0 */
+ writel(0xC0240000, 0xBFA8101C);/* port1 */
+ if(readl(0xbfb0008c)&0x01){
+ setup_25M_PLL();
+ }
+#else
+ writel(0xC0241580, 0xBFA8081C);
+ writel(0xC0240000, 0xBFA8101C);
+#endif
+ }
+}
+
+PHY_INT32 u3phy_init_FPGA(){
+ PHY_INT32 value;
+ value = readl(0xbfb40004);
+
+ value |= (0x1<<18);
+ writel(value, 0xbfb40004);
+
+ value = readl(0xbfa80700);
+
+ value &= ~(0x1<<0);
+ writel(value, 0xbfa80700);
+
+ value = readl(0xbfa80704);
+ value &= ~(0x1<<0);
+ writel(value, 0xbfa80704);
+
+ value = readl(0xbfa80750);
+ value &= ~(0x3<<0);
+ writel(value, 0xbfa80750);
+
+ value = readl(0xbfb90430);
+ value |= (0x1<<9);
+ writel(value, 0xbfb90430);
+
+}
+
+
+PHY_INT32 u3phy_init(){
+#ifndef CONFIG_PROJECT_PHY
+ PHY_INT32 u3phy_version;
+#endif
+
+ if(u3phy != NULL){
+ return PHY_TRUE;
+ }
+
+ u3phy = kmalloc(sizeof(struct u3phy_info), GFP_NOIO);
+ u3phy_p1 = kmalloc(sizeof(struct u3phy_info), GFP_NOIO);
+#ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+ u3phy->phyd_version_addr = 0x2000e4;
+ u3phy_p1->phyd_version_addr = 0x2000e4;
+#else
+ u3phy->phyd_version_addr = U3_PHYD_B2_BASE + 0xe4;
+ u3phy_p1->phyd_version_addr = U3_PHYD_B2_BASE_P1 + 0xe4;
+#endif
+
+#ifdef CONFIG_PROJECT_PHY
+ u3phy->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE;
+ u3phy->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE;
+ u3phy->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE;
+ u3phy->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE;
+ u3phy->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE;
+ u3phy->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE;
+ u3phy->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE;
+ u3phy_ops = &project_operators;
+
+ u3phy_p1->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE_P1;
+ u3phy_p1->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE_P1;
+ u3phy_p1->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE_P1;
+ u3phy_p1->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE_P1;
+ u3phy_p1->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE_P1;
+ u3phy_p1->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE;
+ u3phy_p1->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE;
+
+#else
+ /* parse phy version */
+ u3phy_version = U3PhyReadReg32(u3phy->phyd_version_addr);
+ printk(KERN_ERR "phy version: %x\n", u3phy_version);
+ u3phy->phy_version = u3phy_version;
+
+ if(u3phy_version == 0xa60810a){
+#ifdef CONFIG_A60810_SUPPORT
+#ifdef CONFIG_U3_PHY_GPIO_SUPPORT
+ u3phy->u2phy_regs_a60810 = (struct u2phy_reg_a60810 *)0x0;
+ u3phy->u3phyd_regs_a60810 = (struct u3phyd_reg_a60810 *)0x100000;
+ u3phy->u3phyd_bank2_regs_a60810 = (struct u3phyd_bank2_reg_a60810 *)0x200000;
+ u3phy->u3phya_regs_a60810 = (struct u3phya_reg_a60810 *)0x300000;
+ u3phy->u3phya_da_regs_a60810 = (struct u3phya_da_reg_a60810 *)0x400000;
+ u3phy->sifslv_chip_regs_a60810 = (struct sifslv_chip_reg_a60810 *)0x500000;
+ u3phy->spllc_regs_a60810 = (struct spllc_reg_a60810 *)0x600000;
+ u3phy->sifslv_fm_regs_a60810 = (struct sifslv_fm_feg_a60810 *)0xf00000;
+#else
+ u3phy->u2phy_regs_a60810 = (struct u2phy_reg_a60810 *)U2_PHY_BASE;
+ u3phy->u3phyd_regs_a60810 = (struct u3phyd_reg_a60810 *)U3_PHYD_BASE;
+ u3phy->u3phyd_bank2_regs_a60810 = (struct u3phyd_bank2_reg_a60810 *)U3_PHYD_B2_BASE;
+ u3phy->u3phya_regs_a60810 = (struct u3phya_reg_a60810 *)U3_PHYA_BASE;
+ u3phy->u3phya_da_regs_a60810 = (struct u3phya_da_reg_a60810 *)U3_PHYA_DA_BASE;
+ u3phy->sifslv_chip_regs_a60810 = (struct sifslv_chip_reg_a60810 *)SIFSLV_CHIP_BASE;
+ u3phy->sifslv_fm_regs_a60810 = (struct sifslv_fm_feg_a60810 *)SIFSLV_FM_FEG_BASE;
+#endif
+ u3phy_ops = (struct u3phy_operator *)&a60810_operators;
+#endif
+ }
+ else{
+ printk(KERN_ERR "No match phy version\n");
+ return PHY_FALSE;
+ }
+#endif
+
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){
+ PHY_INT8 cur_value;
+ PHY_INT8 new_value;
+
+ cur_value = U3PhyReadReg8(addr);
+ new_value = (cur_value & (~mask)) | (value << offset);
+ U3PhyWriteReg8(addr, new_value);
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){
+ PHY_INT32 cur_value;
+ PHY_INT32 new_value;
+
+ cur_value = U3PhyReadReg32(addr);
+ new_value = (cur_value & (~mask)) | ((value << offset) & mask);
+ U3PhyWriteReg32(addr, new_value);
+
+ return PHY_TRUE;
+}
+
+PHY_INT32 U3PhyReadField8(PHY_INT32 addr,PHY_INT32 offset,PHY_INT32 mask){
+
+ return ((U3PhyReadReg8(addr) & mask) >> offset);
+}
+
+PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask){
+
+ return ((U3PhyReadReg32(addr) & mask) >> offset);
+}
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-phy.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,242 @@
+#ifndef __MTK_PHY_NEW_H
+#define __MTK_PHY_NEW_H
+
+//#define CONFIG_U3D_HAL_SUPPORT
+#ifdef CONFIG_U3D_HAL_SUPPORT
+#include "mu3d_hal_hw.h"
+#endif
+#ifdef CONFIG_U3D_HAL_SUPPORT
+#define REF_CK U3D_PHY_REF_CK
+#else
+#define REF_CK 25
+#endif
+
+/* include system library */
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+/* Choose PHY R/W implementation */
+#define CONFIG_U3_PHY_GPIO_SUPPORT //SW I2C implemented by GPIO
+//#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC
+
+/* Choose PHY version */
+//Select your project by defining one of the followings
+//#define CONFIG_PROJECT_7662 //7662, 7603
+//#define CONFIG_PROJECT_5399 //5399
+//#define CONFIG_PROJECT_7621 //7621
+
+#if defined CONFIG_PROJECT_7662 || defined CONFIG_PROJECT_5399 || defined CONFIG_PROJECT_7621
+//#define CONFIG_PROJECT_PHY
+#endif
+
+#ifndef CONFIG_PROJECT_PHY
+//These are for FPGA. All test chip PHY codes can be compiled at the same time
+//#define CONFIG_C60802_SUPPORT //T40 test chip
+//#define CONFIG_D60802_SUPPORT //T28 test chip
+//#define CONFIG_E60802_SUPPORT //T28HPM test chip
+#define CONFIG_A60810_SUPPORT //T20 test chip
+#endif
+
+/* BASE ADDRESS DEFINE, should define this on ASIC */
+#define PHY_BASE 0xBFA80000
+#define SIFSLV_FM_FEG_BASE (PHY_BASE+0x100)
+#define SIFSLV_CHIP_BASE (PHY_BASE+0x700)
+#define U2_PHY_BASE (PHY_BASE+0x800)
+#define U3_PHYD_BASE (PHY_BASE+0x900)
+#define U3_PHYD_B2_BASE (PHY_BASE+0xa00)
+#define U3_PHYA_BASE (PHY_BASE+0xb00)
+#define U3_PHYA_DA_BASE (PHY_BASE+0xc00)
+
+#define SIFSLV_FM_FEG_BASE_P1 (PHY_BASE+0x100)
+#define SIFSLV_CHIP_BASE_P1 (PHY_BASE+0x700)
+#define U2_PHY_BASE_P1 (PHY_BASE+0x1000)
+#define U3_PHYD_BASE_P1 (PHY_BASE+0x1100)
+#define U3_PHYD_B2_BASE_P1 (PHY_BASE+0x1200)
+#define U3_PHYA_BASE_P1 (PHY_BASE+0x1300)
+#define U3_PHYA_DA_BASE_P1 (PHY_BASE+0x1400)
+/*
+
+0x00000100 MODULE ssusb_sifslv_fmreg ssusb_sifslv_fmreg
+0x00000700 MODULE ssusb_sifslv_ippc ssusb_sifslv_ippc
+0x00000800 MODULE ssusb_sifslv_u2phy_com ssusb_sifslv_u2_phy_com_T28
+0x00000900 MODULE ssusb_sifslv_u3phyd ssusb_sifslv_u3phyd_T28
+0x00000a00 MODULE ssusb_sifslv_u3phyd_bank2 ssusb_sifslv_u3phyd_bank2_T28
+0x00000b00 MODULE ssusb_sifslv_u3phya ssusb_sifslv_u3phya_T28
+0x00000c00 MODULE ssusb_sifslv_u3phya_da ssusb_sifslv_u3phya_da_T28
+*/
+
+
+/* TYPE DEFINE */
+typedef unsigned int PHY_UINT32;
+typedef int PHY_INT32;
+typedef unsigned short PHY_UINT16;
+typedef short PHY_INT16;
+typedef unsigned char PHY_UINT8;
+typedef char PHY_INT8;
+
+typedef PHY_UINT32 __bitwise PHY_LE32;
+
+/* CONSTANT DEFINE */
+#define PHY_FALSE 0
+#define PHY_TRUE 1
+
+/* MACRO DEFINE */
+#define DRV_WriteReg32(addr,data) ((*(volatile PHY_UINT32 *)(addr)) = (unsigned long)(data))
+#define DRV_Reg32(addr) (*(volatile PHY_UINT32 *)(addr))
+
+#define DRV_MDELAY mdelay
+#define DRV_MSLEEP msleep
+#define DRV_UDELAY udelay
+#define DRV_USLEEP usleep
+
+/* PHY FUNCTION DEFINE, implemented in platform files, ex. ahb, gpio */
+PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data);
+PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr);
+PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data);
+PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr);
+
+/* PHY GENERAL USAGE FUNC, implemented in mtk-phy.c */
+PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value);
+PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value);
+PHY_INT32 U3PhyReadField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask);
+PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask);
+
+struct u3phy_info {
+ PHY_INT32 phy_version;
+ PHY_INT32 phyd_version_addr;
+
+ #ifdef CONFIG_PROJECT_PHY
+
+ struct u2phy_reg *u2phy_regs;
+ struct u3phya_reg *u3phya_regs;
+ struct u3phya_da_reg *u3phya_da_regs;
+ struct u3phyd_reg *u3phyd_regs;
+ struct u3phyd_bank2_reg *u3phyd_bank2_regs;
+ struct sifslv_chip_reg *sifslv_chip_regs;
+ struct sifslv_fm_feg *sifslv_fm_regs;
+
+ #else
+
+#ifdef CONFIG_C60802_SUPPORT
+ //c60802 regs reference
+ struct u2phy_reg_c *u2phy_regs_c;
+ struct u3phya_reg_c *u3phya_regs_c;
+ struct u3phya_da_reg_c *u3phya_da_regs_c;
+ struct u3phyd_reg_c *u3phyd_regs_c;
+ struct u3phyd_bank2_reg_c *u3phyd_bank2_regs_c;
+ struct sifslv_chip_reg_c *sifslv_chip_regs_c;
+ struct sifslv_fm_feg_c *sifslv_fm_regs_c;
+#endif
+#ifdef CONFIG_D60802_SUPPORT
+ //d60802 regs reference
+ struct u2phy_reg_d *u2phy_regs_d;
+ struct u3phya_reg_d *u3phya_regs_d;
+ struct u3phya_da_reg_d *u3phya_da_regs_d;
+ struct u3phyd_reg_d *u3phyd_regs_d;
+ struct u3phyd_bank2_reg_d *u3phyd_bank2_regs_d;
+ struct sifslv_chip_reg_d *sifslv_chip_regs_d;
+ struct sifslv_fm_feg_d *sifslv_fm_regs_d;
+#endif
+#ifdef CONFIG_E60802_SUPPORT
+ //e60802 regs reference
+ struct u2phy_reg_e *u2phy_regs_e;
+ struct u3phya_reg_e *u3phya_regs_e;
+ struct u3phya_da_reg_e *u3phya_da_regs_e;
+ struct u3phyd_reg_e *u3phyd_regs_e;
+ struct u3phyd_bank2_reg_e *u3phyd_bank2_regs_e;
+ struct sifslv_chip_reg_e *sifslv_chip_regs_e;
+ struct sifslv_fm_feg_e *sifslv_fm_regs_e;
+#endif
+#ifdef CONFIG_A60810_SUPPORT
+ //a60810 regs reference
+ struct u2phy_reg_a60810 *u2phy_regs_a60810;
+ struct u3phya_reg_a60810 *u3phya_regs_a60810;
+ struct u3phya_da_reg_a60810 *u3phya_da_regs_a60810;
+ struct u3phyd_reg_a60810 *u3phyd_regs_a60810;
+ struct u3phyd_bank2_reg_a60810 *u3phyd_bank2_regs_a60810;
+ struct sifslv_chip_reg_a60810 *sifslv_chip_regs_a60810;
+ struct spllc_reg_a60810 *spllc_regs_a60810;
+ struct sifslv_fm_feg_a60810 *sifslv_fm_regs_a60810;
+#endif
+
+ #endif
+};
+
+struct u3phy_operator {
+ PHY_INT32 (*init) (struct u3phy_info *info);
+ PHY_INT32 (*change_pipe_phase) (struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase);
+ PHY_INT32 (*eyescan_init) (struct u3phy_info *info);
+ PHY_INT32 (*eyescan) (struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y, PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt);
+ PHY_INT32 (*u2_connect) (struct u3phy_info *info);
+ PHY_INT32 (*u2_disconnect) (struct u3phy_info *info);
+ PHY_INT32 (*u2_save_current_entry) (struct u3phy_info *info);
+ PHY_INT32 (*u2_save_current_recovery) (struct u3phy_info *info);
+ PHY_INT32 (*u2_slew_rate_calibration) (struct u3phy_info *info);
+};
+
+#ifdef U3_PHY_LIB
+#define AUTOEXT
+#else
+#define AUTOEXT extern
+#endif
+
+AUTOEXT struct u3phy_info *u3phy;
+AUTOEXT struct u3phy_info *u3phy_p1;
+AUTOEXT struct u3phy_operator *u3phy_ops;
+
+/*********eye scan required*********/
+
+#define LO_BYTE(x) ((PHY_UINT8)((x) & 0xFF))
+#define HI_BYTE(x) ((PHY_UINT8)(((x) & 0xFF00) >> 8))
+
+typedef enum
+{
+ SCAN_UP,
+ SCAN_DN
+} enumScanDir;
+
+struct strucScanRegion
+{
+ PHY_INT8 bX_tl;
+ PHY_INT8 bY_tl;
+ PHY_INT8 bX_br;
+ PHY_INT8 bY_br;
+ PHY_INT8 bDeltaX;
+ PHY_INT8 bDeltaY;
+};
+
+struct strucTestCycle
+{
+ PHY_UINT16 wEyeCnt;
+ PHY_INT8 bNumOfEyeCnt;
+ PHY_INT8 bPICalEn;
+ PHY_INT8 bNumOfIgnoreCnt;
+};
+
+#define ERRCNT_MAX 128
+#define CYCLE_COUNT_MAX 15
+
+/// the map resolution is 128 x 128 pts
+#define MAX_X 127
+#define MAX_Y 127
+#define MIN_X 0
+#define MIN_Y 0
+
+PHY_INT32 u3phy_init(void);
+
+AUTOEXT struct strucScanRegion _rEye1;
+AUTOEXT struct strucScanRegion _rEye2;
+AUTOEXT struct strucTestCycle _rTestCycle;
+AUTOEXT PHY_UINT8 _bXcurr;
+AUTOEXT PHY_UINT8 _bYcurr;
+AUTOEXT enumScanDir _eScanDir;
+AUTOEXT PHY_INT8 _fgXChged;
+AUTOEXT PHY_INT8 _bPIResult;
+AUTOEXT PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+AUTOEXT PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX];
+
+/***********************************/
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-protocol.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-protocol.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,812 @@
+#include "mtk-protocol.h"
+#include "mtk-test-lib.h"
+#include <linux/random.h>
+
+int dev_query_result()
+{
+ int ret;
+ struct usb_ctrlrequest *dr;
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct protocol_query *query;
+
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ query= kmalloc(AT_CMD_ACK_DATA_LENGTH, GFP_NOIO);
+
+ memset(query, 0, AT_CMD_ACK_DATA_LENGTH);
+
+ dr->bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_ACK;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(AT_CMD_ACK_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, query, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ memcpy(query, urb->transfer_buffer, AT_CMD_ACK_DATA_LENGTH);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "[DEV]query status ctrl request failed!!\n");
+ ret = STATUS_FAIL;
+ }
+ else{
+ ret = query->result;
+ }
+ kfree(dr);
+ kfree(query);
+ usb_free_urb(urb);
+ return ret;
+}
+
+int dev_polling_result(){
+ int i, value;
+ int count = POLLING_COUNT;
+ int delay_msecs = POLLING_STOP_DELAY_MSECS;
+
+ for(i=0; i<count; i++){
+ value=dev_query_result();
+ printk(KERN_INFO "polling device status: %d !!\n", value);
+ msleep(delay_msecs);
+ if(value != STATUS_BUSY)
+ break;
+ }
+ return value;
+}
+
+
+int dev_query_status(struct usb_device *dev)
+{
+ int ret;
+ struct usb_ctrlrequest *dr;
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct protocol_query *query;
+
+ if(dev){
+ udev = dev;
+ }
+ else{
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ query= kmalloc(AT_CMD_ACK_DATA_LENGTH, GFP_NOIO);
+
+ memset(query, 0, AT_CMD_ACK_DATA_LENGTH);
+
+ dr->bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_ACK;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(AT_CMD_ACK_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, query, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ memcpy(query, urb->transfer_buffer, AT_CMD_ACK_DATA_LENGTH);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "[DEV]query status ctrl request failed!!\n");
+ ret = STATUS_FAIL;
+ }
+ else{
+ ret = le16_to_cpu(query->status);
+ }
+ kfree(dr);
+ kfree(query);
+ usb_free_urb(urb);
+ return ret;
+}
+
+int dev_polling_status(struct usb_device *dev){
+ int i, value;
+ int count = POLLING_COUNT;
+ int delay_msecs = POLLING_DELAY_MSECS;
+
+ for(i=0; i<count; i++){
+ value=dev_query_status(dev);
+ msleep(delay_msecs);
+ if(value != STATUS_BUSY)
+ break;
+ }
+ return value;
+}
+
+int dev_reset(USB_DEV_SPEED speed, struct usb_device *dev){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ int ret;
+ char *ptr;
+
+ if(dev){
+ udev = dev;
+ }
+ else{
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+
+ memset(ptr, 0, RESET_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=RESET_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=RESET_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=RESET_STATE&0xFF;
+ *(ptr+5)=RESET_STATE>>8;
+ *(ptr+6)=speed;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(RESET_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "[DEV]reset device ctrl request failed!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int dev_config_ep0(short int maxp, struct usb_device *usbdev){
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+
+ memset(ptr, 0, CONFIGEP_STATE_DATA_LENGTH);
+
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=CONFIGEP_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=CONFIGEP_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=CONFIGEP_STATE&0xFF;
+ *(ptr+5)=CONFIGEP_STATE>>8;
+ *(ptr+6)=0; /* ep_num */
+ *(ptr+7)=0; /* dir */
+ *(ptr+8)=EPATT_CTRL; /* transfer_type */
+ *(ptr+9)=0; /* interval */
+ *(ptr+10)=maxp&0xFF; /* maxp */
+ *(ptr+11)=(maxp>>8); /* maxp */
+ *(ptr+12)=0; /* mult */
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(CONFIGEP_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "[DEV]config ep ctrl request failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret=dev_polling_status(udev);
+
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+int dev_config_ep(char ep_num,char dir,char type,short int maxp,char bInterval
+,char slot, char burst, char mult, struct usb_device *dev){
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+
+ rhdev = my_hcd->self.root_hub;
+ if(dev){
+ udev = dev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+
+ memset(ptr, 0, CONFIGEP_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=CONFIGEP_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=CONFIGEP_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=CONFIGEP_STATE&0xFF;
+ *(ptr+5)=CONFIGEP_STATE>>8;
+ *(ptr+6)=ep_num;
+ *(ptr+7)=dir;
+ *(ptr+8)=type;
+ *(ptr+9)=bInterval;
+ *(ptr+10)=maxp&0xFF;
+ *(ptr+11)=(maxp>>8);
+ *(ptr+12)=slot;
+ *(ptr+13)=burst;
+ *(ptr+14)=mult;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(CONFIGEP_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "[DEV]config ep ctrl request failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret=dev_polling_status(udev);
+
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int dev_ctrl_transfer(char dir,int length,char *buffer, struct usb_device *dev){
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ int ret;
+
+ rhdev = my_hcd->self.root_hub;
+ if(dev){
+ udev = dev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = dir | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CTRL_TEST;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(length);
+
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "[DEV]config ep ctrl request failed!!\n");
+ return RET_FAIL;
+ }
+/*
+ ret=dev_polling_status(udev);
+
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return RET_FAIL;
+ }
+*/
+ return RET_SUCCESS;
+}
+
+
+int dev_ctrl_loopback(int length, struct usb_device *dev){
+ char *ptr1,*ptr2;
+ int ret,i;
+
+ ptr1= kmalloc(length, GFP_NOIO);
+ get_random_bytes(ptr1, length);
+ ret=dev_ctrl_transfer(USB_DIR_OUT,length,ptr1,dev);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl loopback fail!!\n");
+ return RET_FAIL;
+ }
+ ptr2= kmalloc(length, GFP_NOIO);
+ memset(ptr2, 0, length);
+
+ ret=dev_ctrl_transfer(USB_DIR_IN,length,ptr2,dev);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl loopback fail!!\n");
+ return RET_FAIL;
+ }
+
+ for(i=0; i<length; i++){
+ if((*(ptr1+i)) != (*(ptr2+i))){
+ printk(KERN_ERR "[ERROR] buffer %d not match, tx 0x%x, rx 0x%x\n", i, *(ptr1+i), *(ptr2+i));
+ break;
+ }
+ }
+
+ kfree(ptr1);
+ kfree(ptr2);
+
+ return RET_SUCCESS;
+}
+
+int dev_loopback(char bdp,int length,int gpd_buf_size,int bd_buf_size, char dram_offset, char extension, struct usb_device *dev){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+
+ int dma_burst = 3;
+ int dma_limiter = 3;
+
+ rhdev = my_hcd->self.root_hub;
+ if(dev){
+ udev = dev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+
+ memset(ptr, 0, LOOPBACK_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=LOOPBACK_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=LOOPBACK_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=LOOPBACK_STATE&0xFF;
+ *(ptr+5)=LOOPBACK_STATE>>8;
+ *(ptr+6)=length&0xFF;
+ *(ptr+7)=(length>>8)&0xFF;
+ *(ptr+8)=(length>>16)&0xFF;
+ *(ptr+9)=(length>>24)&0xFF;
+ *(ptr+10)=gpd_buf_size&0xFF;
+ *(ptr+11)=(gpd_buf_size>>8)&0xFF;
+ *(ptr+12)=(gpd_buf_size>>16)&0xFF;
+ *(ptr+13)=(gpd_buf_size>>24)&0xFF;
+ *(ptr+14)=bd_buf_size&0xFF;
+ *(ptr+15)=(bd_buf_size>>8)&0xFF;
+ *(ptr+16)=bdp;
+ *(ptr+17)=dram_offset;
+ *(ptr+18)=extension;
+ *(ptr+19)=dma_burst;
+ *(ptr+20)=dma_limiter;
+
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(LOOPBACK_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+int dev_remotewakeup(int delay, struct usb_device *dev){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+ if(!dev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = dev;
+ }
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+
+ memset(ptr, 0, REMOTE_WAKEUP_DATA_LENGTH);
+
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=REMOTE_WAKEUP_DATA_LENGTH&0xFF;
+ *(ptr+3)=REMOTE_WAKEUP_DATA_LENGTH>>8;
+ *(ptr+4)=REMOTE_WAKEUP&0xFF;
+ *(ptr+5)=REMOTE_WAKEUP>>8;
+ *(ptr+6)=delay&0xFF;
+ *(ptr+7)=(delay>>8)&0xFF;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(REMOTE_WAKEUP_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int dev_stress(char bdp,int length,int gpd_buf_size,int bd_buf_size,char num, struct usb_device *usbdev){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, STRESS_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=STRESS_DATA_LENGTH&0xFF;
+ *(ptr+3)=STRESS_DATA_LENGTH>>8;
+ *(ptr+4)=STRESS&0xFF;
+ *(ptr+5)=STRESS>>8;
+ *(ptr+6)=length&0xFF;
+ *(ptr+7)=(length>>8)&0xFF;
+ *(ptr+8)=(length>>16)&0xFF;
+ *(ptr+9)=(length>>24)&0xFF;
+ *(ptr+10)=gpd_buf_size&0xFF;
+ *(ptr+11)=(gpd_buf_size>>8)&0xFF;
+ *(ptr+12)=(gpd_buf_size>>16)&0xFF;
+ *(ptr+13)=(gpd_buf_size>>24)&0xFF;
+ *(ptr+14)=bd_buf_size&0xFF;
+ *(ptr+15)=(bd_buf_size>>8)&0xFF;
+ *(ptr+16)=bdp;
+ *(ptr+17)=num;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(STRESS_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+
+int dev_random_stop(int length,int gpd_buf_size,int bd_buf_size,char dev_dir_1,char dev_dir_2,int stop_count_1,int stop_count_2){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, RANDOM_STOP_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=RANDOM_STOP_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=RANDOM_STOP_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=RANDOM_STOP_STATE&0xFF;
+ *(ptr+5)=RANDOM_STOP_STATE>>8;
+ *(ptr+6)=length&0xFF;
+ *(ptr+7)=(length>>8)&0xFF;
+ *(ptr+8)=(length>>16)&0xFF;
+ *(ptr+9)=(length>>24)&0xFF;
+ *(ptr+10)=gpd_buf_size&0xFF;
+ *(ptr+11)=(gpd_buf_size>>8)&0xFF;
+ *(ptr+12)=(gpd_buf_size>>16)&0xFF;
+ *(ptr+13)=(gpd_buf_size>>24)&0xFF;
+ *(ptr+14)=bd_buf_size&0xFF;
+ *(ptr+15)=(bd_buf_size>>8)&0xFF;
+ *(ptr+16)=dev_dir_1;
+ *(ptr+17)=dev_dir_2;
+ *(ptr+18)=stop_count_1&0xFF;
+ *(ptr+19)=(stop_count_1>>8)&0xFF;
+ *(ptr+20)=(stop_count_1>>16)&0xFF;
+ *(ptr+21)=(stop_count_1>>24)&0xFF;
+ *(ptr+22)=stop_count_2&0xFF;
+ *(ptr+23)=(stop_count_2>>8)&0xFF;
+ *(ptr+24)=(stop_count_2>>16)&0xFF;
+ *(ptr+25)=(stop_count_2>>24)&0xFF;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(RANDOM_STOP_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int dev_notifiaction(int type,int valuel,int valueh){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, DEV_NOTIFICATION_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=DEV_NOTIFICATION_DATA_LENGTH&0xFF;
+ *(ptr+3)=DEV_NOTIFICATION_DATA_LENGTH>>8;
+ *(ptr+4)=DEV_NOTIFICATION_STATE&0xFF;
+ *(ptr+5)=DEV_NOTIFICATION_STATE>>8;
+ *(ptr+6)=valuel&0xFF;
+ *(ptr+7)=(valuel>>8)&0xFF;
+ *(ptr+8)=(valuel>>16)&0xFF;
+ *(ptr+9)=(valuel>>24)&0xFF;
+ *(ptr+10)=valueh&0xFF;
+ *(ptr+11)=(valueh>>8)&0xFF;
+ *(ptr+12)=(valueh>>16)&0xFF;
+ *(ptr+13)=(valueh>>24)&0xFF;
+ *(ptr+14)=type&0xFF;
+
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(DEV_NOTIFICATION_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+
+int dev_power(int test_mode, char u1_value, char u2_value,char en_u1, char en_u2, struct usb_device *usbdev){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ int mode;
+ struct urb *urb;
+
+ if(usbdev == NULL){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+ if(test_mode != 3){
+ if((u1_value == 0) && (u2_value != 0)){
+ mode = 2;
+ }
+ else if((u1_value != 0) && (u2_value == 0)){
+ mode = 1;
+ }
+ else{
+ mode = 0;
+ }
+ }
+ else{
+ mode = 3;
+ }
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, POWER_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=POWER_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=POWER_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=POWER_STATE&0xFF;
+ *(ptr+5)=POWER_STATE>>8;
+ *(ptr+6)=mode&0xFF;
+ *(ptr+7)=(mode>>8)&0xFF;
+ *(ptr+8)=(mode>>16)&0xFF;
+ *(ptr+9)=(mode>>24)&0xFF;
+ *(ptr+10)= u1_value&0xFF;
+ *(ptr+11)= u2_value&0xFF;
+ *(ptr+12)= en_u1&0xFF;
+ *(ptr+13)= en_u2&0xFF;
+
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(POWER_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int dev_lpm_config(char lpm_mode, char wakeup,
+ char beslck, char beslck_u3, char beslckd, char cond, char cond_en){
+ struct usb_device *udev, *rhdev;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ int ret;
+ struct urb *urb;
+
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, LPM_STATE_DATA_LENGTH);
+ *ptr=0x55;
+ *(ptr+1)=0xAA;
+ *(ptr+2)=LPM_STATE_DATA_LENGTH&0xFF;
+ *(ptr+3)=LPM_STATE_DATA_LENGTH>>8;
+ *(ptr+4)=LPM_STATE&0xFF;
+ *(ptr+5)=LPM_STATE>>8;
+
+ *(ptr+6)=lpm_mode&0xFF;
+ *(ptr+7)=wakeup&0xFF;
+ *(ptr+8)=beslck&0xFF;
+ *(ptr+9)=beslck_u3&0xFF;
+ *(ptr+10)=beslckd&0xFF;
+ *(ptr+11)=cond&0xFF;
+ *(ptr+12)=cond_en&0xFF;
+
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_SET;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(LPM_STATE_DATA_LENGTH);
+
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl request fail!!\n");
+ return RET_FAIL;
+ }
+
+ ret = dev_polling_status(udev);
+ if(ret)
+ {
+ printk(KERN_ERR "device is still busy!!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+
+int dev_polling_stop_status(struct usb_device *dev){
+ int i, value;
+ int count = POLLING_COUNT;
+ int delay_msecs = POLLING_STOP_DELAY_MSECS;
+
+ for(i=0; i<count; i++){
+ value=dev_query_result();
+ msleep(delay_msecs);
+ if(value != STATUS_BUSY)
+ break;
+ }
+ return value;
+}
+
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-protocol.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-protocol.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,122 @@
+#include "mtk-usb-hcd.h"
+
+#define POLLING_COUNT 100;
+#define POLLING_DELAY_MSECS 20;
+#define POLLING_STOP_DELAY_MSECS 200;
+
+#define RESET_STATE_DATA_LENGTH 7
+//#define DATAIN_STATE_DATA_LENGTH 15
+//#define DATAOUT_STATE_DATA_LENGTH 15
+#define CONFIGEP_STATE_DATA_LENGTH 15
+#define LOOPBACK_STATE_DATA_LENGTH 21
+#define LOOPBACK_EXT_STATE_DATA_LENGTH 19
+//#define UNITGPD_STATE_DATA_LENGTH 15
+//#define LOOPBACK_2T2R_STATE_DATA_LENGTH 15
+#define Loopback_STRESS_DATA_LENGTH 3
+#define STOP_QMU_DATA_LENGTH 8
+#define STRESS_DATA_LENGTH 18
+#define STALL_DATA_LENGTH 17
+#define WARMRESET_DATA_LENGTH 17
+#define EP_RESET_DATA_LENGTH 6
+#define RANDOM_STOP_STATE_DATA_LENGTH 26
+#define RX_ZLP_STATE_DATA_LENGTH 18
+#define DEV_NOTIFICATION_DATA_LENGTH 15
+#define STOP_QMU_STATE_DATA_LENGTH 17
+#define SINGLE_DATA_LENGTH 18
+#define POWER_STATE_DATA_LENGTH 14
+#define U1U2_STATE_DATA_LENGTH 12
+#define LPM_STATE_DATA_LENGTH 13
+#define AT_CMD_ACK_DATA_LENGTH 8
+#define REMOTE_WAKEUP_DATA_LENGTH 8
+
+#define DUMMY_DATA_LENGTH 256
+
+#define STALL_COUNT 3
+#define GET_STATUS 0x00
+#define CLEAR_FEATURE 0x01
+#define SET_FEATURE 0x03
+#define ENDPOINT_HALT 0x00
+#define EP0_IN_STALL 0xFD
+#define EP0_OUT_STALL 0xFE
+#define EP0_STALL 0xFF
+
+#if 0
+#define MULTIGPD_STATE_DATA_LENGTH 15
+#define MULTIGPD_IN_STATE_DATA_LENGTH 15
+#define MULTIGPD_OUT_STATE_DATA_LENGTH 15
+#define MULTIGPD_IN_BPS_STATE_DATA_LENGTH 15
+#define MULTIGPD_OUT_BPS_STATE_DATA_LENGTH 15
+#define DUMMY_DATA_LENGTH 256
+#endif
+
+typedef enum {
+ DEV_SPEED_INACTIVE = 0,
+ DEV_SPEED_FULL = 1,
+ DEV_SPEED_HIGH = 3,
+ DEV_SPEED_SUPER = 4,
+} USB_DEV_SPEED;
+
+typedef enum
+{
+ RESET_STATE,
+ CONFIGEP_STATE,
+ LOOPBACK_STATE,
+ LOOPBACK_EXT_STATE,
+ REMOTE_WAKEUP,
+ STRESS,
+ EP_RESET,
+ WARM_RESET,
+ STALL,
+ RANDOM_STOP_STATE,
+ RX_ZLP_STATE,
+ DEV_NOTIFICATION_STATE,
+ STOP_QMU_STATE,
+ SINGLE,
+ POWER_STATE,
+ U1U2_STATE,
+ LPM_STATE
+} USB_U3_TEST_CASE;
+
+typedef enum
+{
+ AT_CMD_SET,
+ AT_CMD_ACK,
+ AT_CTRL_TEST
+} USB_AT_CMD;
+
+
+
+typedef enum
+{
+ STATUS_READY=0,
+ STATUS_BUSY,
+ STATUS_FAIL
+} USB_U3_QUERY_STATUS;
+
+struct protocol_query {
+ __u16 header;
+ __u16 length;
+ __u16 status;
+ __u16 result;
+} __attribute__ ((packed));
+
+
+
+int dev_reset(USB_DEV_SPEED speed, struct usb_device *dev);
+int dev_config_ep(char ep_num,char dir,char type,short int maxp,char bInterval,char slot, char burst, char mult, struct usb_device *dev);
+
+int dev_config_ep0(short int maxp, struct usb_device *dev);
+int dev_query_status(struct usb_device *dev);
+int dev_polling_status(struct usb_device *dev);
+int dev_polling_stop_status(struct usb_device *dev);
+int dev_loopback(char bdp,int length,int gpd_buf_size,int bd_buf_size, char dram_offset, char extension, struct usb_device *dev);
+int dev_ctrl_loopback(int length, struct usb_device *dev);
+//int dev_remotewakeup(char bdp,int length,int gpd_buf_size,int bd_buf_size, struct usb_device *dev);
+//int dev_remotewakeup(int delay);
+int dev_remotewakeup(int delay, struct usb_device *dev);
+int dev_stress(char bdp,int length,int gpd_buf_size,int bd_buf_size,char num, struct usb_device *usbdev);
+int dev_random_stop(int length,int gpd_buf_size,int bd_buf_size,char dev_dir_1,char dev_dir_2,int stop_count_1,int stop_count_2);
+int dev_power(int test_mode, char u1_value, char u2_value,char en_u1, char en_u2, struct usb_device *usbdev);
+int dev_notifiaction(int type,int valuel,int valueh);
+int dev_lpm_config(char lpm_mode, char wakeup, char beslck, char beslck_u3, char beslckd, char cond, char cond_en);
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-test-lib.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-test-lib.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,5782 @@
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <linux/usb/ch9.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/kthread.h>
+
+#define MTK_TEST_LIB
+#include "mtk-test-lib.h"
+#undef MTK_TEST_LIB
+#include "mtk-test.h"
+#include "mtk-usb-hcd.h"
+#include "mtk-protocol.h"
+#include "xhci.h"
+#include "xhci-mtk-power.h"
+
+extern u32 xhci_port_state_to_neutral(u32 state);
+
+void print_speed(int speed){
+ if(speed == USB_SPEED_SUPER){
+ printk(KERN_DEBUG "SUPER_SPEED device\n");
+ }
+ if(speed == USB_SPEED_HIGH){
+ printk(KERN_DEBUG "HIGH_SPEED device\n");
+ }
+ if(speed == USB_SPEED_FULL){
+ printk(KERN_DEBUG "FULL_SPEED device\n");
+ }
+ if(speed == USB_SPEED_LOW){
+ printk(KERN_DEBUG "LOW_SPEED device\n");
+ }
+}
+
+int mtk_xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int msec)
+{
+ u32 result;
+
+ do {
+ result = xhci_readl(xhci, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return RET_FAIL;
+ result &= mask;
+ if (result == done)
+ return RET_SUCCESS;
+ mdelay(1);
+ msec--;
+ } while (msec > 0);
+ return RET_FAIL;
+}
+
+int get_port_id(int slot_id){
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *out_ctx;
+
+ xhci = hcd_to_xhci(my_hcd);
+ virt_dev = xhci->devs[slot_id];
+ out_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ return (((cpu_to_le32(out_ctx->dev_info2)) >> 16) & 0xff);
+}
+
+int get_port_index(int port_id){
+ int i;
+ struct xhci_port *port;
+
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = rh_port[i];
+ if(port->port_id == port_id){
+ return i;
+ }
+ }
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = rh_port[i];
+ if(port->port_id == 0){
+ return i;
+ }
+ }
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = rh_port[i];
+ if(port->port_status == DISCONNECTED){
+ port->port_id = port_id;
+ return i;
+ }
+ }
+ return RH_PORT_NUM;
+}
+
+int wait_not_event_on_timeout(int *ptr, int value, int msecs){
+ int i;
+ for(i= msecs; i>0; i--){
+ if(*ptr != value){
+ return RET_SUCCESS;
+ }
+ msleep(1);
+ }
+ if(*ptr != value)
+ return RET_SUCCESS;
+ else
+ return RET_FAIL;
+}
+
+int wait_event_on_timeout(volatile int *ptr, int done, int msecs){
+ int i;
+
+ for(i = (msecs * 100); i > 0; i--){
+ if(*ptr == done){
+ return RET_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ if(*ptr == done)
+ return RET_SUCCESS;
+ else
+ return RET_FAIL;
+}
+
+int poll_event_on_timeout(volatile int *ptr, int done, int msecs){
+ int i;
+ for(i= msecs; i>0; i--){
+ if(le32_to_cpu(*ptr) == done){
+ return RET_SUCCESS;
+ }
+ msleep(1);
+ }
+ if(le32_to_cpu(*ptr) == done)
+ return RET_SUCCESS;
+ else
+ return RET_FAIL;
+}
+
+int f_test_lib_init(){
+ int ret;
+ int i;
+ struct xhci_port *port;
+
+ my_hcd = NULL;
+ g_port_connect = false;
+ g_port_reset = false;
+ g_port_id = 0;
+ g_slot_id = 0;
+ g_speed = 0; /* UNKNOWN_SPEED */
+ g_cmd_status = CMD_DONE;
+ g_event_full = false;
+ g_got_event_full = false;
+ g_intr_handled = -1;
+ g_is_bei = false;
+ g_td_to_noop = false;
+ g_iso_frame = false;
+ g_test_random_stop_ep = false;
+ g_stopping_ep = false;
+ g_cmd_ring_pointer1 = 0;
+ g_cmd_ring_pointer2 = 0;
+ g_idt_transfer = false;
+ g_hs_block_reset = false;
+ g_concurrent_resume = false;
+
+ for(i=0; i<DEV_NUM; i++){
+ dev_list[i] = NULL;
+ }
+ for(i=0; i<HUB_DEV_NUM; i++){
+ hdev_list[i] = NULL;
+ }
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = NULL;
+ port = kmalloc(sizeof(struct xhci_port), GFP_NOIO);
+ port->port_id = 0;
+ port->port_speed = 0;
+ port->port_status = 0;
+ rh_port[i] = port;
+ }
+ port = NULL;
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = rh_port[i];
+ printk("port[%d]: 0x%x\n", i, port);
+ printk(" port_id=%d\n", port->port_id);
+ printk(" port_speed=%d\n", port->port_speed);
+ printk(" port_status=%d\n", port->port_status);
+ }
+
+ ret = mtk_xhci_hcd_init();
+ if(ret){
+ printk(KERN_DEBUG "hcd init fail!!\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_test_lib_cleanup(){
+ int ret;
+ int i;
+ struct xhci_port *port;
+
+ for(i=0; i<RH_PORT_NUM; i++){
+ port = rh_port[i];
+ kfree(port);
+ rh_port[i] = NULL;
+ }
+ if(my_hcd == NULL){
+ printk(KERN_ERR "driver already cleared\n");
+ return RET_SUCCESS;
+ }
+ mtk_xhci_hcd_cleanup();
+ my_hcd = NULL;
+
+ return RET_SUCCESS;
+}
+
+int f_port_set_pls(int port_id, int pls){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = temp & ~(PORT_PLS_MASK);
+ temp = temp | (pls << 5) | PORT_LINK_STROBE;
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (pls<<5), 3000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != pls){
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+struct random_regs_data{
+ struct xhci_hcd *xhci;
+ int port_id;
+ int speed;
+ int power_required;
+};
+
+static int access_regs_thread(void *data){
+ struct random_regs_data *rreg_data = data;
+ struct xhci_hcd *xhci;
+ int port_id;
+ int speed;
+ unsigned int randomSleep;
+ u32 __iomem *addr;
+ int num_u3_port;
+ int port_index;
+ struct xhci_port *port;
+ int temp;
+ unsigned int randomIndex;
+ unsigned int randomOffset;
+
+ g_power_down_allowed = 1;
+ xhci = rreg_data->xhci;
+ port_id = rreg_data->port_id;
+ speed = rreg_data->speed;
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_index = get_port_index(port_id);
+ port = rh_port[port_index];
+
+ xhci_err(xhci, "random access regs thread initial\n");
+ do {
+ xhci_err(xhci, "round \n");
+ /* randomly sleep a while */
+ randomSleep = get_random_int();
+ randomSleep = randomSleep%100;
+ xhci_err(xhci, "sleep %d msecs\n", randomSleep);
+ msleep(randomSleep);
+ /* enable port clock/power if needed */
+ if(rreg_data->power_required){
+ g_power_down_allowed = 0;
+ enablePortClockPower(port_id,rreg_data->speed);
+ }
+ /* random access(read) MAC3/MAC2 regs */
+ randomIndex = get_random_int()%3;
+ if(randomIndex == 0){
+ addr = SSUSB_U3_MAC_BASE;
+ }
+ else if(randomIndex == 1){
+ addr = SSUSB_U3_SYS_BASE;
+ }
+ else if(randomIndex == 2){
+ addr = SSUSB_U2_SYS_BASE;
+ }
+ xhci_err(xhci, "randomIndex %d\n", randomIndex);
+ randomOffset = get_random_int()%0x80; //4 bytes align addr
+ xhci_err(xhci, "randomOffset 0x%x\n", randomOffset);
+ addr = addr + randomOffset;
+ xhci_err(xhci, "read 0x%x\n", addr);
+ temp = xhci_readl(xhci, addr);
+ /* access port_status regs */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ xhci_err(xhci, "read 0x%x\n", addr);
+ temp = xhci_readl(xhci, addr);
+ } while(1);
+ xhci_err(xhci, "random access regs stopped\n");
+ return RET_SUCCESS;
+}
+
+int f_add_random_access_reg_thread(struct xhci_hcd *xhci, int port_id, int port_rev
+ , int power_required){
+
+ struct random_regs_data *rreg_data;
+ xhci_err(xhci, "start random access regs thread, port_id: %d, port_rev %d, power required %d\n"
+ , port_id, port_rev, power_required);
+ rreg_data = kzalloc(sizeof(struct random_regs_data), GFP_KERNEL);
+ rreg_data->xhci = xhci;
+ rreg_data->port_id = port_id;
+ rreg_data->speed = port_rev;
+ rreg_data->power_required = power_required;
+ kthread_run(access_regs_thread, rreg_data, "rrgt");
+}
+
+struct random_doorbell_data {
+ struct xhci_hcd *xhci;
+ int slot_id;
+ int ep_index;
+};
+
+static int ring_doorbell_thread(void *data)
+{
+ struct random_doorbell_data *rrdb_data = data;
+ struct xhci_hcd *xhci;
+ int slot_id;
+ int ep_index;
+ struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
+ unsigned int randomSleep = 1000;
+ u32 field;
+
+ xhci = rrdb_data->xhci;
+ slot_id = rrdb_data->slot_id;
+ ep_index = rrdb_data->ep_index;
+ __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+ xhci_err(xhci, "random ring doorbell thread is running\n");
+ vdev = xhci->devs[slot_id];
+ ep = &(vdev->eps[ep_index]);
+ xhci_err(xhci, "ep_state: 0x%x\n", ep->ep_state);
+ do {
+ randomSleep = get_random_int();
+ randomSleep = randomSleep%100;
+ xhci_err(xhci, "sleep: %d\n", randomSleep);
+ msleep(randomSleep);
+ xhci_err(xhci, "ring ep doorbell, slot id: %d, ep index: %d\n"
+ , slot_id, ep_index);
+ if (!(ep->ep_state & EP_HALT_PENDING) && !(ep->ep_state & SET_DEQ_PENDING)
+ && !(ep->ep_state & EP_HALTED)) {
+ field = xhci_readl(xhci, db_addr) & DB_MASK;
+ field |= EPI_TO_DB(ep_index);
+ writel(field, db_addr);
+ xhci_writel(xhci, field, db_addr);
+ }
+ vdev = xhci->devs[slot_id];
+ xhci_err(xhci, "vdev: 0x%x\n", vdev);
+ if(vdev)
+ ep = &(vdev->eps[ep_index]);
+ xhci_err(xhci, "ep: 0x%x\n", ep);
+ } while (vdev && ep && (!(ep->ep_state & EP_HALT_PENDING) && !(ep->ep_state & SET_DEQ_PENDING)
+ && !(ep->ep_state & EP_HALTED)));
+ xhci_err(xhci, "ring_doorbell thread stopped, slot id: %d, ep index: %d, state: 0x%x\n"
+ , slot_id, ep_index, ep->ep_state);
+ return 0;
+}
+
+int f_add_random_ring_doorbell_thread(struct xhci_hcd *xhci, int slot_id, int ep_index){
+
+ struct random_doorbell_data *rrdb_data;
+ xhci_err(xhci, "start random ring doorbell thread, eps: %d\n", ep_index);
+ rrdb_data = kzalloc(sizeof(struct random_doorbell_data), GFP_KERNEL);
+ rrdb_data->xhci = xhci;
+ rrdb_data->slot_id = slot_id;
+ rrdb_data->ep_index = ep_index;
+ kthread_run(ring_doorbell_thread, rrdb_data, "rdbt");
+}
+
+struct stop_endpoint_data {
+ struct xhci_hcd *xhci;
+ int slot_id;
+ int ep_index;
+};
+
+static int stop_endpoint_thread(void *data){
+ struct stop_endpoint_data *stpep_data = data;
+ struct xhci_hcd *xhci;
+ int slot_id;
+ int ep_index;
+ struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
+ unsigned int randomSleep = 1000;
+ u32 field;
+
+ xhci = stpep_data->xhci;
+ slot_id = stpep_data->slot_id;
+ ep_index = stpep_data->ep_index;
+ __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+ vdev = xhci->devs[slot_id];
+ ep = &(vdev->eps[ep_index]);
+ do {
+ randomSleep = get_random_int();
+
+ randomSleep = randomSleep%100;
+ msleep(randomSleep);
+ if (!(ep->ep_state & EP_HALT_PENDING) && !(ep->ep_state & SET_DEQ_PENDING)
+ && !(ep->ep_state & EP_HALTED)) {
+ while(g_stopping_ep){msleep(1);}
+ g_stopping_ep = true;
+ xhci_queue_stop_endpoint(xhci, slot_id, ep_index);
+ xhci_ring_cmd_db(xhci);
+ g_stopping_ep = false;
+ msleep(100);
+ field = xhci_readl(xhci, db_addr) & DB_MASK;
+ field |= EPI_TO_DB(ep_index);
+ writel(field, db_addr);
+ }
+ }while (vdev && ep && (!(ep->ep_state & EP_HALT_PENDING) && !(ep->ep_state & SET_DEQ_PENDING)
+ && !(ep->ep_state & EP_HALTED)));
+ xhci_err(xhci, "stop endpoint thread stopped, slot id: %d, ep index: %d, state: 0x%x\n"
+ , slot_id, ep_index, ep->ep_state);
+ return 0;
+}
+
+int f_add_random_stop_ep_thread(struct xhci_hcd *xhci, int slot_id, int ep_index){
+ struct stop_endpoint_data *stpep_data;
+ xhci_err(xhci, "start random stop ep thread, eps: %d\n", ep_index);
+ stpep_data = kzalloc(sizeof(struct stop_endpoint_data), GFP_KERNEL);
+ stpep_data->xhci = xhci;
+ stpep_data->slot_id = slot_id;
+ stpep_data->ep_index = ep_index;
+ kthread_run(stop_endpoint_thread, stpep_data, "rstpep");
+}
+
+
+int f_enable_port(int index){
+ int timeout = ATTACH_TIMEOUT;
+ struct xhci_port *port = rh_port[index];
+
+ /* waiting for device to connect */
+ xhci_err(xhci, "Waiting for device[%d] to attach\n", index);
+ while(port->port_status != ENABLED && timeout > 0){
+ msleep(1);
+ timeout--;
+ }
+ if(port->port_status != ENABLED){
+ xhci_err(xhci, "[ERROR] port[%d] enabled timeout\n", index);
+ return RET_FAIL;
+ }
+ g_port_id = port->port_id;
+ xhci_dbg(xhci, "port[%d], id=%d enabled done\n", index, port->port_id);
+ xhci_dbg(xhci, " port_reenabled: %d, port_speed: %d, port_status: %d\n"
+ , port->port_reenabled, port->port_speed, port->port_status);
+ return RET_SUCCESS;
+}
+
+int f_disconnect_port(int index){
+ struct xhci_port *port;
+ int timeout = ATTACH_TIMEOUT;
+
+ port = rh_port[index];
+ /* waiting for device to disconnect */
+ xhci_dbg(xhci, "Waiting for device[%d] to disconnect\n", index);
+ while(port->port_status != DISCONNECTED && timeout > 0){
+ ssleep(1);
+ timeout--;
+ }
+ if(port->port_status != DISCONNECTED){
+ xhci_err(xhci, "[ERROR] Device disconnect timeout\n");
+ return RET_FAIL;
+ }
+ g_port_id = port->port_id;
+ xhci_dbg(xhci, "port [%d] disconnect done\n", index);
+ return RET_SUCCESS;
+}
+
+void start_port_reenabled(int index, int speed){
+ struct xhci_port *port;
+ port = rh_port[index];
+ g_port_id = port->port_id;
+ int next_port_index;
+ int cur_speed;
+ if(index == 0){
+ next_port_index = 1;
+ }
+ else{
+ next_port_index = 0;
+ }
+ cur_speed = DEV_SPEED_SUPER;
+ if(port->port_speed == USB_SPEED_SUPER){
+ cur_speed = DEV_SPEED_SUPER;
+ }
+ else if(port->port_speed == USB_SPEED_HIGH){
+ cur_speed = DEV_SPEED_HIGH;
+ }
+ else if(port->port_speed == USB_SPEED_FULL){
+ cur_speed = DEV_SPEED_FULL;
+ }
+ if((cur_speed != speed) && ((speed == DEV_SPEED_SUPER) || (cur_speed == DEV_SPEED_SUPER))){
+ rh_port[next_port_index]->port_id = port->port_id;
+ rh_port[next_port_index]->port_speed = port->port_speed;
+ rh_port[next_port_index]->port_reenabled = port->port_reenabled;
+ rh_port[next_port_index]->port_status = port->port_status;
+ port->port_id = 0;
+ port->port_speed = 0;
+ port->port_reenabled = 1;
+ port->port_status = DISCONNECTED;
+ }
+ else{
+ port->port_reenabled = 0;
+ }
+
+ return;
+}
+
+int f_reenable_port(int index){
+ struct xhci_port *port;
+ int timeout = ATTACH_TIMEOUT;
+
+ port = rh_port[index];
+ return wait_event_on_timeout(&(port->port_reenabled), 2, ATTACH_TIMEOUT);
+}
+
+int f_enable_dev_note(){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ u32 temp;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->dev_notification;
+ temp = 0xffff;
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+int f_enable_slot(struct usb_device *dev){
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_port *port;
+ int port_index;
+ int ret;
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev == NULL){
+ if(g_port_id == 0){
+ xhci_err(xhci, "[ERROR] g_port_id is 0!\n");
+ return RET_FAIL;
+ }
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+
+ /* new usb device */
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ udev = mtk_usb_alloc_dev(rhdev, rhdev->bus, g_port_id);
+ udev->speed = port->port_speed;
+ udev->level = rhdev->level + 1;
+ rhdev->children[g_port_id-1] = udev;
+ }
+ else{
+ udev = dev;
+ }
+ /* enable slot */
+ g_cmd_status = CMD_RUNNING;
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ xhci_err(xhci, "[ERROR]FIXME: allocate a command ring segment\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "enable slot command\n");
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR]command timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR]command failed\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "enable slot done\n");
+
+ /* alloc xhci_virt_device */
+ xhci_dbg(xhci, "g_slot_id %d\n", g_slot_id);
+ udev->slot_id = g_slot_id;
+ if (!xhci_alloc_virt_device(xhci, g_slot_id, udev, GFP_KERNEL)) {
+ /* Disable slot, if we can do it without mem alloc */
+ xhci_warn(xhci, "[WARN]Could not allocate xHCI USB device data structures\n");
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, g_slot_id))
+ xhci_ring_cmd_db(xhci);
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "alloc xhci_virt_device done\n");
+ return RET_SUCCESS;
+}
+
+int f_address_slot(char isBSR, struct usb_device *dev){
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
+ unsigned long flags;
+ int ret;
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev == NULL){
+ if(g_slot_id == 0){
+ xhci_err(xhci, "[ERROR] global slot ID not valid\n");
+ return RET_FAIL;
+ }
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = dev;
+ }
+ /* address device */
+ g_cmd_status = CMD_RUNNING;
+ virt_dev = xhci->devs[udev->slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ /*
+ * If this is the first Set Address since device plug-in or
+ * virt_device realloaction after a resume with an xHCI power loss,
+ * then set up the slot context.
+ */
+ if (!slot_ctx->dev_info)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ /* Otherwise, update the control endpoint ring enqueue pointer. */
+ else{
+ xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+ }
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id, isBSR);
+ if (ret) {
+ xhci_err(xhci, "[ERROR]FIXME: allocate a command ring segment\n");
+ return RET_FAIL;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_ring_cmd_db(xhci);
+ xhci_dbg(xhci, "address device\n");
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR]command timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR]command failed\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "address device done\n");
+
+ /* wait device to finish set_address request, 3351 capability */
+ msleep(100);
+
+ return RET_SUCCESS;
+}
+
+int f_disable_slot(){
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(my_hcd);
+ /* disable slot */
+ g_cmd_status = CMD_RUNNING;
+ xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, g_slot_id);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] disable slot timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_DONE){
+ xhci_dbg(xhci, "disable slot done\n");
+ }
+ else{
+ xhci_err(xhci, "[ERROR]disable slot fail\n");
+ return RET_FAIL;
+ }
+ xhci_free_virt_device(xhci, g_slot_id);
+ g_slot_id = 0;
+ return RET_SUCCESS;
+}
+
+int f_evaluate_context(int max_exit_latency, int maxp0, int preping_mode, int preping, int besl, int besld){
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *out_ctx, *in_ctx;
+ struct xhci_container_ctx *in_container_ctx;
+ struct xhci_ep_ctx *ep0_in_ctx, *ep0_out_ctx;
+ struct xhci_port *port;
+ int port_id, port_index;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int orig_maxp, new_maxp, orig_max_exit_latency, new_max_exit_latency, orig_preping_mode
+ , new_preping_mode, orig_preping, new_preping;
+ int orig_besl, new_besl, orig_besld, new_besld;
+ int ret;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ port_id = get_port_id(g_slot_id);
+ port_index = get_port_index(port_id);
+ port = rh_port[port_index];
+ virt_dev = xhci->devs[g_slot_id];
+ out_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ in_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ in_container_ctx = virt_dev->in_ctx;
+ ep0_out_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, 0);
+ ep0_in_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
+
+ /* if default state, ep0 may be used before */
+ if(GET_SLOT_STATE(le32_to_cpu(out_ctx->dev_state)) == SLOT_STATE_DEFAULT ||
+ GET_SLOT_STATE(le32_to_cpu(out_ctx->dev_state)) == SLOT_STATE_ADDRESSED ||
+ GET_SLOT_STATE(le32_to_cpu(out_ctx->dev_state)) == SLOT_STATE_CONFIGURED){
+ xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+ }
+ orig_maxp = (le32_to_cpu(ep0_out_ctx->ep_info2) >> 16);
+ orig_max_exit_latency = (le32_to_cpu(out_ctx->dev_info2) & 0xffff);
+ ep0_in_ctx->ep_info2 &= cpu_to_le32(~(0xffff << 16));
+ ep0_in_ctx->ep_info2 |= cpu_to_le32(maxp0 << 16);
+ in_ctx->dev_info2 &= cpu_to_le32(~(0xffff));
+ in_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+ in_ctx->reserved[0] &= cpu_to_le32(~(0x7fff));
+ in_ctx->reserved[0] |= cpu_to_le32(preping);
+ in_ctx->reserved[0] &= cpu_to_le32(~(1 << 16));
+ in_ctx->reserved[0] |= cpu_to_le32((preping_mode << 16));
+ in_ctx->reserved[1] &= cpu_to_le32(~(0xffff));
+ in_ctx->reserved[1] |=cpu_to_le32( ((besld << 8) | besl));
+
+ xhci_dbg_ctx(xhci, in_container_ctx, 0);
+
+ /* Set up the input context flags for the command */
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_container_ctx);
+ ctrl_ctx->add_flags |= cpu_to_le32(EP0_FLAG);
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
+ g_cmd_status = CMD_RUNNING;
+ xhci_queue_evaluate_context(xhci, in_container_ctx->dma, g_slot_id);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] evaluate context timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_DONE){
+ xhci_dbg(xhci, "evaluate context done\n");
+ }
+ else{
+ xhci_err(xhci, "[ERROR]evaluate context fail\n");
+ return RET_FAIL;
+ }
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
+ new_maxp = (le32_to_cpu(ep0_out_ctx->ep_info2) >> 16);
+ new_max_exit_latency = (le32_to_cpu(out_ctx->dev_info2) & 0xffff);
+ new_preping_mode = ((le32_to_cpu(out_ctx->reserved[0]) >> 16) & 0x1);
+ new_preping = (le32_to_cpu(out_ctx->reserved[0]) & 0x7fff);
+ new_besld = (((le32_to_cpu(out_ctx->reserved[1])) >> 8) & 0xff);
+ new_besl = (le32_to_cpu(out_ctx->reserved[1]) & 0xff);
+ if(new_maxp != maxp0){
+ xhci_err(xhci, "[ERROR] maxp doesn't match input[%d], new[%d]\n", maxp0, new_maxp);
+ ret = RET_FAIL;
+ }
+ if(new_max_exit_latency != max_exit_latency){
+ xhci_err(xhci, "[ERROR] max_exit_latency doesn't match input[%d], new[%d]\n"
+ , max_exit_latency, new_max_exit_latency);
+ ret = RET_FAIL;
+ }
+ if(new_preping_mode != preping_mode){
+ xhci_err(xhci, "[ERROR] preping_mode doesn't match input[%d], new[%d]\n"
+ , preping_mode, new_preping_mode);
+ ret = RET_FAIL;
+ }
+ if(new_preping != preping){
+ xhci_err(xhci, "[ERROR] preping doesn't match input[%d], new[%d]\n"
+ , preping, new_preping);
+ ret = RET_FAIL;
+ }
+ if(new_besld != besld){
+ xhci_err(xhci, "[ERROR] besld doesn't match input[%d], new[%d]\n"
+ , besld, new_besld);
+ ret = RET_FAIL;
+ }
+ if(new_besl != besl){
+ xhci_err(xhci, "[ERROR] besl doesn't match input[%d], new[%d]\n"
+ , besl, new_besl);
+ ret = RET_FAIL;
+ }
+ return ret;
+
+}
+
+struct urb *alloc_ctrl_urb(struct usb_ctrlrequest *dr, char *buffer, struct usb_device *udev){
+ struct urb *urb;
+ dma_addr_t mapping;
+ struct device *dev;
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+ urb = usb_alloc_urb(0, GFP_NOIO);
+ if(dr->bRequestType & USB_DIR_IN){
+ usb_fill_control_urb(urb, udev, usb_rcvctrlpipe(udev, 0), (unsigned char *)dr, buffer,
+ le16_to_cpu(dr->wLength), NULL, NULL);
+ }
+ else{
+ usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)dr, buffer,
+ le16_to_cpu(dr->wLength), NULL, NULL);
+ }
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags |= (dr->bRequestType & USB_DIR_MASK);
+ urb->ep = &udev->ep0;
+ if(buffer){
+ if (le16_to_cpu(dr->wLength) != 0)
+ {
+ mapping = dma_map_single(dev, buffer, le16_to_cpu(dr->wLength), DMA_BIDIRECTIONAL);
+ urb->transfer_dma = mapping;
+ dma_sync_single_for_device(dev, mapping, urb->transfer_buffer_length, DMA_BIDIRECTIONAL);
+ }
+ }
+
+ return urb;
+}
+
+int f_ctrlrequest_nowait(struct urb *urb, struct usb_device *udev){
+ int ret;
+ int i;
+ char *tmp;
+ struct device *dev;
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(my_hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ int size;
+
+ dev = xhci_to_hcd(xhci)->self.controller;
+ size = 1;
+ urb_priv = kmalloc(sizeof(struct urb_priv) + size * sizeof(struct xhci_td *), GFP_KERNEL);
+
+ if (!urb_priv){
+ xhci_err(xhci, "[ERROR] allocate urb_priv failed\n");
+ return RET_FAIL;
+ }
+
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kmalloc(sizeof(struct xhci_td), GFP_KERNEL);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+ }
+
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+ urb->status = -EINPROGRESS;
+ xhci_dbg(xhci, "ctrl request\n");
+ xhci_dbg(xhci, "setup packet: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n"
+ , *(urb->setup_packet), *(urb->setup_packet+1), *(urb->setup_packet+2), *(urb->setup_packet+3)
+ , *(urb->setup_packet+4), *(urb->setup_packet+5), *(urb->setup_packet+6), *(urb->setup_packet+7));
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, udev->slot_id, 0);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return RET_SUCCESS;
+}
+
+int f_ctrlrequest(struct urb *urb, struct usb_device *udev){
+ int ret;
+ int i;
+ char *tmp;
+ struct device *dev;
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(my_hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ int size;
+
+ dev = xhci_to_hcd(xhci)->self.controller;
+ size = 1;
+ urb_priv = kmalloc(sizeof(struct urb_priv) + size * sizeof(struct xhci_td *), GFP_KERNEL);
+
+ if (!urb_priv){
+ xhci_err(xhci, "[ERROR] allocate urb_priv failed\n");
+ return RET_FAIL;
+ }
+
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kmalloc(sizeof(struct xhci_td), GFP_KERNEL);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+ }
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+ urb->status = -EINPROGRESS;
+ xhci_dbg(xhci, "ctrl request\n");
+ xhci_dbg(xhci, "setup packet: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n"
+ , *(urb->setup_packet), *(urb->setup_packet+1), *(urb->setup_packet+2), *(urb->setup_packet+3)
+ , *(urb->setup_packet+4), *(urb->setup_packet+5), *(urb->setup_packet+6), *(urb->setup_packet+7));
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, udev->slot_id, 0);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ wait_not_event_on_timeout(&(urb->status), -EINPROGRESS, TRANS_TIMEOUT);
+ if(urb->status == 0){
+ if(urb->transfer_buffer_length > 0){
+ dma_sync_single_for_cpu(dev,urb->transfer_dma,urb->transfer_buffer_length,DMA_BIDIRECTIONAL);
+ }
+ xhci_urb_free_priv(xhci, urb->hcpriv);
+ return RET_SUCCESS;
+ }
+ else{
+ xhci_err(xhci, "[ERROR] control request failed\n");
+ ret = urb->status;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+}
+
+int f_update_hub_device(struct usb_device *udev, int num_ports){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_slot_ctx *in_ctx;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ vdev = xhci->devs[udev->slot_id];
+ in_ctx = xhci_get_slot_ctx(xhci, vdev->in_ctx);
+ in_ctx->dev_info |= DEV_HUB;
+ in_ctx->dev_info2 |= XHCI_MAX_PORTS(num_ports);
+ in_ctx->tt_info |= TT_THINK_TIME(0);
+
+ return ret;
+}
+
+int f_udev_add_ep(struct usb_host_endpoint *ep, struct usb_device *udev){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ int epnum, is_out;
+ int ep_index;
+
+ epnum = usb_endpoint_num(&ep->desc);
+ is_out = usb_endpoint_dir_out(&ep->desc);
+ xhci = hcd_to_xhci(my_hcd);
+ virt_dev = xhci->devs[udev->slot_id];
+
+ if(is_out){
+ udev->ep_out[epnum] = ep;
+ }
+ else{
+ udev->ep_in[epnum] = ep;
+ }
+ ret = xhci_mtk_add_endpoint(my_hcd, udev, ep);
+ if(ret){
+ xhci_err(xhci, "[ERROR] add endpoint failed\n");
+ return RET_FAIL;
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ virt_dev->eps[ep_index].ring = virt_dev->eps[ep_index].new_ring;
+
+ return RET_SUCCESS;
+}
+
+int f_udev_drop_ep(int is_out, int epnum, struct usb_device *udev){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ int ep_index;
+ struct usb_host_endpoint *ep;
+
+ if(is_out){
+ ep = udev->ep_out[epnum];
+ }
+ else{
+ ep = udev->ep_in[epnum];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ret = xhci_mtk_drop_endpoint(my_hcd, udev, ep);
+ if(ret){
+ xhci_err(xhci, "[ERROR] drop endpoint failed\n");
+ return RET_FAIL;
+ }
+ kfree(ep);
+ if(is_out){
+ udev->ep_out[epnum] = NULL;
+ }
+ else{
+ udev->ep_in[epnum] = NULL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int f_xhci_config_ep(struct usb_device *udev){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int i;
+
+ xhci = hcd_to_xhci(my_hcd);
+ virt_dev = xhci->devs[udev->slot_id];
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+ ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ g_cmd_status = CMD_RUNNING;
+ ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
+ udev->slot_id, false);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] config endpoint timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] config endpoint failed\n");
+ return RET_FAIL;
+ }
+ for (i = 1; i < 31; ++i) {
+ if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ }
+ xhci_zero_in_ctx(xhci, virt_dev);
+ return RET_SUCCESS;
+}
+
+int f_xhci_deconfig_ep(struct usb_device *udev){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int i;
+
+ xhci = hcd_to_xhci(my_hcd);
+ virt_dev = xhci->devs[udev->slot_id];
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+ ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ g_cmd_status = CMD_RUNNING;
+ ret = xhci_queue_deconfigure_endpoint(xhci, virt_dev->in_ctx->dma,
+ udev->slot_id, false);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] config endpoint timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] config endpoint failed\n");
+ return RET_FAIL;
+ }
+ for (i = 1; i < 31; ++i) {
+ if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ }
+ xhci_zero_in_ctx(xhci, virt_dev);
+ return RET_SUCCESS;
+}
+
+int f_config_ep(char ep_num,int ep_dir,int transfer_type, int maxp,int bInterval, int burst, int mult, struct usb_device *udev,int config_xhci){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ struct usb_device *dev, *rhdev;
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(udev == NULL){
+ rhdev = my_hcd->self.root_hub;
+ dev = rhdev->children[g_port_id-1];
+ }
+ else{
+ dev = udev;
+ }
+
+ if(maxp>1024){
+ maxp = 1024 |(((maxp/1024)-1) << 11);
+
+ }
+ ep_tx = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep_tx->desc.bDescriptorType = USB_DT_ENDPOINT;
+ ep_tx->desc.bEndpointAddress = EPADD_NUM(ep_num) | ep_dir;
+ ep_tx->desc.bmAttributes = transfer_type;
+ ep_tx->desc.wMaxPacketSize = maxp;
+ if(dev->speed == USB_SPEED_HIGH){
+ ep_tx->desc.wMaxPacketSize |= (mult << 11);
+ }
+ ep_tx->desc.bInterval = bInterval;
+ ep_tx->ss_ep_comp.bMaxBurst = burst;
+ ep_tx->ss_ep_comp.bmAttributes = mult;
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ ep_tx->ss_ep_comp.wBytesPerInterval = ((burst+1) * (mult+1) * maxp);
+ }
+ else if(usb_endpoint_xfer_int(&ep_tx->desc)){
+ ep_tx->ss_ep_comp.wBytesPerInterval = ((burst+1) * maxp);
+ }
+ else{
+ ep_tx->ss_ep_comp.wBytesPerInterval = 0;
+ }
+
+
+ ret = f_udev_add_ep(ep_tx, dev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ if(config_xhci){
+ ret = f_xhci_config_ep(dev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ }
+ return RET_SUCCESS;
+}
+
+int f_deconfig_ep(char is_all, char ep_num,int ep_dir,struct usb_device *usbdev,int config_xhci){
+ int i;
+ int ret;
+ int is_out;
+ struct usb_device *udev, *rhdev;
+ struct usb_host_endpoint *ep;
+
+ if(usbdev == NULL){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+
+ if(is_all){
+ for(i=1; i<=15; i++){
+ ep = udev->ep_out[i];
+ if(ep){
+ kfree(ep);
+ udev->ep_out[i] = NULL;
+ }
+ ep = udev->ep_in[i];
+ if(ep){
+ kfree(ep);
+ udev->ep_out[i] = NULL;
+ }
+ }
+ return f_xhci_deconfig_ep(udev);
+ }
+ else{
+ if(ep_dir == EPADD_OUT){
+ is_out = 1;
+ }
+ else{
+ is_out = 0;
+ }
+ ret = f_udev_drop_ep(is_out, ep_num, udev);
+ if(config_xhci){
+ ret = f_xhci_config_ep(udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ }
+ return RET_SUCCESS;
+ }
+}
+
+
+int f_loopback_config_ep(char ep_out,char ep_in,int transfer_type, int maxp,int bInterval, struct usb_device *udev){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ struct usb_device *dev, *rhdev;
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(udev == NULL){
+ rhdev = my_hcd->self.root_hub;
+ dev = rhdev->children[g_port_id-1];
+ }
+ else{
+ dev = udev;
+ }
+
+ ep_tx = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep_tx->desc.bDescriptorType = USB_DT_ENDPOINT;
+ ep_tx->desc.bEndpointAddress = EPADD_NUM(ep_out) | EPADD_OUT;
+ ep_tx->desc.bmAttributes = transfer_type;
+ ep_tx->desc.wMaxPacketSize = maxp;
+ ep_tx->desc.bInterval = bInterval;
+
+ ret = f_udev_add_ep(ep_tx, dev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ ep_rx = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep_rx->desc.bDescriptorType = USB_DT_ENDPOINT;
+ ep_rx->desc.bEndpointAddress = EPADD_NUM(ep_in) | EPADD_IN;
+ ep_rx->desc.bmAttributes = transfer_type;
+ ep_rx->desc.wMaxPacketSize = maxp;
+ ep_rx->desc.bInterval = bInterval;
+
+ ret = f_udev_add_ep(ep_rx, dev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ ret = f_xhci_config_ep(dev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_reset_ep(int slot_id, int ep_index){
+ struct xhci_hcd *xhci;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
+ struct xhci_td *cur_td = NULL;
+ struct xhci_dequeue_state deq_state;
+ struct xhci_virt_device *dev;
+
+ xhci = hcd_to_xhci(my_hcd);
+ g_cmd_status = CMD_RUNNING;
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] reset endpoint timout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] reset endpiont failed\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+
+int f_queue_urb(struct urb *urb,int wait, struct usb_device *dev){
+
+ struct usb_device *udev, *rhdev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep;
+ struct urb_priv *urb_priv;
+ int ep_index;
+ int ret;
+ int size, i;
+ unsigned long flags;
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ if(dev){
+ udev = dev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+ ep = urb->ep;
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ size = 1;
+ if(!urb->hcpriv){
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ size = urb->number_of_packets;
+ else
+ size = 1;
+ urb_priv = kmalloc(sizeof(struct urb_priv) + size * sizeof(struct xhci_td *), GFP_KERNEL);
+ if (!urb_priv){
+ xhci_err(xhci, "[ERROR] allocate urb_priv failed\n");
+ return RET_FAIL;
+ }
+
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kmalloc(sizeof(struct xhci_td), GFP_KERNEL);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+ }
+
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb->hcpriv = urb_priv;
+ }
+ else{
+ urb_priv = urb->hcpriv;
+ size = urb_priv->length;
+ urb_priv->td_cnt = 0;
+ }
+ if(usb_endpoint_xfer_bulk(&ep->desc)){
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_bulk_tx(xhci, GFP_KERNEL, urb, udev->slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ else if(usb_endpoint_xfer_int(&ep->desc)){
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_intr_tx(xhci, GFP_KERNEL, urb, udev->slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_isoc_tx_prepare(xhci, GFP_KERNEL, urb, udev->slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ if(ret){
+ xhci_err(xhci, "[ERROR] queue tx error %d\n", ret);
+ }
+
+ if(wait){
+ if(usb_endpoint_xfer_isoc(&ep->desc)){
+ wait_not_event_on_timeout(&(urb->status), -EINPROGRESS, 500000);
+ }
+ else{
+ wait_not_event_on_timeout(&(urb->status), -EINPROGRESS, TRANS_TIMEOUT);
+ }
+ if(urb->status != 0){
+ xhci_err(xhci, "[ERROR] Tx transfer error, status=%d\n", urb->status);
+ if(urb->status == -EINPROGRESS){
+ xhci_err(xhci, "[ERROR] Timeout, stop endpoint and set tr dequeue pointer\n");
+ f_ring_stop_ep(g_slot_id, ep_index);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, ep_index, urb);
+ f_reset_ep(g_slot_id,ep_index);
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
+ return RET_FAIL;
+ }
+ else if(urb->status == -EPIPE){
+ f_reset_ep(g_slot_id,ep_index);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, ep_index, urb);
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
+ return RET_FAIL;
+ }
+ }
+ xhci_urb_free_priv(xhci, urb->hcpriv);
+ urb->hcpriv = NULL;
+ xhci_dbg(xhci, "Tx done, status=%d\n", urb->status);
+ }
+
+ ret = RET_SUCCESS;
+
+ return ret;
+}
+
+
+void f_free_urb(struct urb *urb,int data_length, int start_add){
+ struct device *dev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ /* struct urb *urb; */
+ struct usb_host_endpoint *ep;
+ int ep_index;
+ void *buffer;
+ dma_addr_t mapping;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+
+ urb->transfer_buffer -= start_add;
+ urb->transfer_dma -= start_add;
+ urb->transfer_buffer_length = data_length+start_add;
+
+ xhci_dbg(xhci, "free transfer buffer address 0x%x\n", urb->transfer_buffer);
+ xhci_dbg(xhci, "free transfer dma address 0x%x\n", urb->transfer_dma);
+
+ dma_unmap_single(dev, urb->transfer_dma, urb->transfer_buffer_length,DMA_BIDIRECTIONAL);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+}
+int f_fill_urb_with_buffer(struct urb *urb,int ep_num,int data_length,void *buffer,int start_add,int dir, int iso_num_packets, int psize
+, dma_addr_t dma_mapping, struct usb_device *usbdev){
+
+ struct usb_device *udev, *rhdev;
+ struct device *dev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ /* struct urb *urb; */
+ struct usb_host_endpoint *ep;
+ int ep_index;
+ u8 *tmp1, *tmp2;
+ /* int data_length; */
+ int num_sgs;
+ dma_addr_t mapping;/* dma stream buffer */
+ int ret;
+ int i, j;
+
+ ret = 0;
+ num_sgs = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+ if(dir==URB_DIR_OUT){
+ ep = udev->ep_out[ep_num];
+ }else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ /* Tx */
+ if(!dma_mapping){
+ mapping = dma_map_single(dev, buffer,data_length, DMA_BIDIRECTIONAL);
+ }
+ else{
+ mapping = dma_mapping;
+ }
+ xhci_dbg(xhci, "[Debug] psize %d\n", psize);
+ xhci_dbg(xhci, "dma buffer address 0x%x\n", buffer);
+ xhci_dbg(xhci, "mapping buffer address 0x%x\n", mapping);
+ if(usb_endpoint_xfer_bulk(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length, NULL, NULL);
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length, NULL, NULL);
+ }
+ else if(usb_endpoint_xfer_int(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length, NULL, NULL, ep->desc.bInterval);
+ }
+ else if(usb_endpoint_xfer_int(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length, NULL, NULL, ep->desc.bInterval);
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ urb->dev = udev;
+ urb->pipe = usb_sndisocpipe(udev,usb_endpoint_num(&ep->desc));
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = buffer;
+ urb->number_of_packets = iso_num_packets;
+ urb->transfer_buffer_length = data_length;
+ for(j=0; j<iso_num_packets; j++){
+ urb->iso_frame_desc[j].offset = j * psize;
+ xhci_dbg(xhci, "[Debug] iso frame offset %d\n", urb->iso_frame_desc[j].offset);
+ if(j == iso_num_packets-1){
+ urb->iso_frame_desc[j].length = (data_length-(j*psize));
+ }
+ else{
+ urb->iso_frame_desc[j].length = psize;
+ }
+ xhci_dbg(xhci, "[Debug] iso frame length %d\n", urb->iso_frame_desc[j].length);
+ }
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ urb->dev = udev;
+ urb->pipe = usb_sndisocpipe(udev,usb_endpoint_num(&ep->desc));
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = buffer;
+ urb->number_of_packets = iso_num_packets;
+ urb->transfer_buffer_length = data_length;
+ for(j=0; j<iso_num_packets; j++){
+ urb->iso_frame_desc[j].offset = j * psize;
+ xhci_dbg(xhci, "[Debug] iso frame offset %d\n", urb->iso_frame_desc[j].offset);
+ if(j == iso_num_packets-1){
+ urb->iso_frame_desc[j].length = (data_length-(j*psize));
+ }
+ else{
+ urb->iso_frame_desc[j].length = psize;
+ }
+ xhci_dbg(xhci, "[Debug] iso frame length %d\n", urb->iso_frame_desc[j].length);
+ }
+ }
+ urb->transfer_dma = mapping;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags |= dir | URB_ZERO_PACKET;
+ urb->ep = ep;
+ urb->num_sgs = 0;
+
+ urb->transfer_buffer += start_add;
+ urb->transfer_dma += start_add;
+ urb->transfer_buffer_length = data_length;
+
+ if(dir==URB_DIR_OUT){
+ get_random_bytes(urb->transfer_buffer, data_length);
+ }
+
+ dma_sync_single_for_device(dev, urb->transfer_dma, data_length, DMA_BIDIRECTIONAL);
+
+ return ret;
+}
+
+int f_fill_urb(struct urb *urb,int ep_num,int data_length, int start_add,int dir, int iso_num_packets, int psize, struct usb_device *usbdev){
+ struct usb_device *udev, *rhdev;
+ struct device *dev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ /* struct urb *urb; */
+ struct usb_host_endpoint *ep;
+ int ep_index;
+ void *buffer;
+ u8 *tmp1, *tmp2;
+ /* int data_length; */
+ dma_addr_t mapping;/* dma stream buffer */
+ struct scatterlist *sg;
+ void *tmp_sg;
+ int cur_sg_len;
+ int ret;
+ int i, j;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ if(usbdev){
+ udev = usbdev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+ if(dir==URB_DIR_OUT){
+ ep = udev->ep_out[ep_num];
+ }else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ /* Tx */
+ buffer = kmalloc(data_length+start_add, GFP_KERNEL);
+ mapping = dma_map_single(dev, buffer,data_length+start_add, DMA_BIDIRECTIONAL);
+ xhci_dbg(xhci, "[Debug] psize %d\n", psize);
+ xhci_dbg(xhci, "dma buffer address 0x%x\n", buffer);
+ xhci_dbg(xhci, "mapping buffer address 0x%x\n", mapping);
+ if(usb_endpoint_xfer_bulk(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length+start_add, NULL, NULL);
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length+start_add, NULL, NULL);
+ }
+ else if(usb_endpoint_xfer_int(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length+start_add, NULL, NULL, ep->desc.bInterval);
+ }
+ else if(usb_endpoint_xfer_int(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, usb_endpoint_num(&ep->desc)), buffer, data_length+start_add, NULL, NULL, ep->desc.bInterval);
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc) && usb_endpoint_dir_out(&ep->desc)){
+ urb->dev = udev;
+ urb->pipe = usb_sndisocpipe(udev,usb_endpoint_num(&ep->desc));
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = buffer;
+ urb->number_of_packets = iso_num_packets;
+ urb->transfer_buffer_length = data_length;
+ for(j=0; j<iso_num_packets; j++){
+ urb->iso_frame_desc[j].offset = j * psize;
+ xhci_dbg(xhci, "[Debug] iso frame offset %d\n", urb->iso_frame_desc[j].offset);
+ if(j == iso_num_packets-1){
+ urb->iso_frame_desc[j].length = (data_length-(j*psize));
+ }
+ else{
+ urb->iso_frame_desc[j].length = psize;
+ }
+ xhci_dbg(xhci, "[Debug] iso frame length %d\n", urb->iso_frame_desc[j].length);
+ }
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc) && usb_endpoint_dir_in(&ep->desc)){
+ urb->dev = udev;
+ urb->pipe = usb_sndisocpipe(udev,usb_endpoint_num(&ep->desc));
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = buffer;
+ urb->number_of_packets = iso_num_packets;
+ urb->transfer_buffer_length = data_length;
+ for(j=0; j<iso_num_packets; j++){
+ urb->iso_frame_desc[j].offset = j * psize;
+ xhci_dbg(xhci, "[Debug] iso frame offset %d\n", urb->iso_frame_desc[j].offset);
+ if(j == iso_num_packets-1){
+ urb->iso_frame_desc[j].length = (data_length-(j*psize));
+ }
+ else{
+ urb->iso_frame_desc[j].length = psize;
+ }
+ xhci_dbg(xhci, "[Debug] iso frame length %d\n", urb->iso_frame_desc[j].length);
+ }
+ }
+ urb->transfer_dma = mapping;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags |= dir | URB_ZERO_PACKET;
+ urb->ep = ep;
+ urb->num_sgs = 0;
+
+ urb->transfer_buffer += start_add;
+ urb->transfer_dma += start_add;
+ urb->transfer_buffer_length = data_length;
+ get_random_bytes(urb->transfer_buffer, data_length);
+
+ if(dir==URB_DIR_OUT){
+ for(i=0; i<data_length; i++){
+ tmp1 = urb->transfer_buffer+i;
+ if(*tmp1 == 0xff){
+ *tmp1 = 0xfe;
+ }
+ }
+ }
+
+ dma_sync_single_for_device(dev, mapping, data_length+start_add, DMA_BIDIRECTIONAL);
+ urb->hcpriv = NULL;
+ return ret;
+}
+
+struct mtk_sg {
+ int sg_len;
+ struct list_head list;
+};
+
+#define MIN_SG_LENGTH 512
+#define MAX_SG_LENGTH 4096
+int f_fill_urb_sg(struct urb *urb, int dir,int ep_num,int data_length, int sg_len, struct usb_device *usbdev){
+ struct usb_device *udev, *rhdev;
+ struct device *dev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ int ep_index;
+ struct usb_host_endpoint *ep;
+ struct scatterlist *sg;
+ void *tmp_sg;
+ int cur_sg_len, num_sgs, data_length_left;
+ unsigned int tmp_sg_len;
+ int ret;
+ int i, j;
+ struct list_head sg_head, *next;
+ struct mtk_sg *tmp_mtk_sg;
+ dma_addr_t cur_transfer_dma;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ if(usbdev){
+ udev = usbdev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+ if(dir==URB_DIR_OUT){
+ ep = udev->ep_out[ep_num];
+ }else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ num_sgs = 0;
+ if(sg_len != 0){
+ num_sgs = data_length/sg_len;
+ if(data_length % sg_len != 0){
+ num_sgs++;
+ }
+ tmp_sg = kmalloc((sizeof(struct scatterlist)*num_sgs), GFP_KERNEL);
+ sg = tmp_sg;
+ for(i=0; i<num_sgs; i++){
+ xhci_dbg(xhci, "sg address 0x%x\n", sg);
+ if(i == (num_sgs-1)){
+ sg->page_link = 0x02;
+ }
+ else{
+ sg->page_link = 0x0;
+ }
+ if((data_length - (i*sg_len)) > sg_len){
+ cur_sg_len = sg_len;
+ }
+ else{
+ cur_sg_len = (data_length - (i*sg_len));
+ }
+ sg->length = cur_sg_len;
+ sg->dma_address = urb->transfer_dma + (i*sg_len);
+
+ sg++;
+
+ }
+ }
+ else{
+ data_length_left = data_length;
+ /* random sg_len from 512 ~ 4096 */
+ INIT_LIST_HEAD(&sg_head);
+ while(data_length_left > 0){
+ if(data_length_left < 512){
+ cur_sg_len = data_length_left;
+ }
+ else{
+ cur_sg_len = (get_random_int() + MIN_SG_LENGTH)%MAX_SG_LENGTH;
+ }
+ num_sgs++;
+ tmp_mtk_sg = kmalloc(sizeof(struct mtk_sg), GFP_KERNEL);
+ tmp_mtk_sg->sg_len = cur_sg_len;
+ list_add_tail(&tmp_mtk_sg->list, &sg_head);
+ }
+ tmp_sg = kmalloc((sizeof(struct scatterlist)*num_sgs), GFP_KERNEL);
+ cur_transfer_dma = urb->transfer_dma;
+ sg = tmp_sg;
+ for(i=0; i<num_sgs; i++){
+ tmp_mtk_sg = (struct mtk_sg *)list_entry(sg_head.next, struct mtk_sg, list);
+ sg->length = tmp_mtk_sg->sg_len;
+ sg->dma_address = cur_transfer_dma;
+ cur_transfer_dma = cur_transfer_dma + tmp_mtk_sg->sg_len;
+ sg++;
+ list_del(sg_head.next);
+ kfree(tmp_mtk_sg);
+ }
+ }
+ urb->sg = tmp_sg;
+ urb->num_sgs = num_sgs;
+ return ret;
+}
+
+/* Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static inline u32 mtk_xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int max_burst;
+ int max_packet;
+
+ /* Only applies for interrupt or isochronous endpoints */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ return 0;
+
+ if (udev->speed == USB_SPEED_SUPER)
+ return ep->ss_ep_comp.wBytesPerInterval;
+
+ max_packet = (ep->desc.wMaxPacketSize) & 0x7ff;
+ max_burst = ((ep->desc.wMaxPacketSize) & 0x1800) >> 11;
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * (max_burst + 1);
+}
+
+int f_loopback_loop(int ep_out, int ep_in, int data_length, int start_add, struct usb_device *usbdev){
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb_tx, *urb_rx;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ void *buffer_tx, *buffer_rx;
+ u8 *tmp1, *tmp2;
+ /* int data_length; */
+ int num_sgs;
+ int ret;
+ int i;
+ int max_esit_payload;
+ int iso_num_packets;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ if(usbdev){
+ udev = usbdev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+ ep_tx = udev->ep_out[ep_out];
+ ep_rx = udev->ep_in[ep_in];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ /* if superspeed, need to add ss_ep_comp.wBytesPerInterval in ep structure */
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+
+ if(data_length%max_esit_payload == 0){
+ iso_num_packets = data_length/max_esit_payload;
+ }
+ else{
+ iso_num_packets = data_length/max_esit_payload + 1;
+ }
+ }
+ else{
+ iso_num_packets = 0;
+ }
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ msleep(1000);
+ }
+ /* Tx */
+ urb_tx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ ret = f_fill_urb(urb_tx,ep_out,data_length,start_add,URB_DIR_OUT, iso_num_packets, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_tx,1, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]tx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ msleep(1000);
+ }
+ /* Rx */
+ urb_rx = usb_alloc_urb(iso_num_packets, GFP_NOIO);
+ ret = f_fill_urb(urb_rx,ep_in,data_length,start_add,URB_DIR_IN, iso_num_packets, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_rx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]rx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ dma_sync_single_for_cpu(dev,urb_tx->transfer_dma-start_add,data_length+start_add,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(dev,urb_rx->transfer_dma-start_add,data_length+start_add,DMA_BIDIRECTIONAL);
+ /* Compare */
+ for(i=0; i<urb_tx->transfer_buffer_length; i++){
+ tmp1 = urb_tx->transfer_buffer+i;
+ tmp2 = urb_rx->transfer_buffer+i;
+ if((*tmp1) != (*tmp2)){
+ xhci_err(xhci, "[ERROR] buffer %d not match, tx 0x%x, rx 0x%x\n", i, *tmp1, *tmp2);
+ ret = RET_FAIL;
+ break;
+ }
+ }
+ xhci_dbg(xhci, "Buffer compared done\n");
+ f_free_urb(urb_tx,data_length,start_add);
+ f_free_urb(urb_rx,data_length,start_add);
+ return ret;
+}
+
+int f_loopback_sg_loop(int ep_out, int ep_in, int data_length, int start_add, int sg_len, struct usb_device *usbdev){
+ /* if sg_len = 0, means random sg_length
+ * can not test isoc transfer type
+ */
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb_tx, *urb_rx;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int sg_length;
+ void *buffer_tx, *buffer_rx;
+ u8 *tmp1, *tmp2;
+ /* int data_length; */
+ int num_sgs;
+ int ret;
+ int i;
+ int max_esit_payload;
+
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ if(usbdev){
+ udev = usbdev;
+ }
+ else{
+ udev = rhdev->children[g_port_id-1];
+ }
+ ep_tx = udev->ep_out[ep_out];
+ ep_rx = udev->ep_in[ep_in];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+
+ urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+ ret = f_fill_urb(urb_tx,ep_out,data_length,start_add,URB_DIR_OUT
+ , 0, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_fill_urb_sg(urb_tx, URB_DIR_OUT, ep_out, data_length, sg_len, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb sg Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_tx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]tx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ /* Rx */
+ urb_rx = usb_alloc_urb(0, GFP_NOIO);
+ ret = f_fill_urb(urb_rx,ep_in,data_length,start_add,URB_DIR_IN, 0, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_fill_urb_sg(urb_rx, URB_DIR_IN, ep_in, data_length, sg_len, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb sg Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_rx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]rx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ dma_sync_single_for_cpu(dev,urb_tx->transfer_dma-start_add,data_length+start_add,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(dev,urb_rx->transfer_dma-start_add,data_length+start_add,DMA_BIDIRECTIONAL);
+ /* Compare */
+ for(i=0; i<urb_tx->transfer_buffer_length; i++){
+ tmp1 = urb_tx->transfer_buffer+i;
+ tmp2 = urb_rx->transfer_buffer+i;
+ if((*tmp1) != (*tmp2)){
+ xhci_err(xhci, "[ERROR] buffer %d not match, tx 0x%x, rx 0x%x\n", i, *tmp1, *tmp2);
+ ret = RET_FAIL;
+ break;
+ }
+ }
+ xhci_dbg(xhci, "Buffer compared done\n");
+ kfree(urb_tx->sg);
+ kfree(urb_rx->sg);
+ f_free_urb(urb_tx,data_length,start_add);
+ f_free_urb(urb_rx,data_length,start_add);
+ return ret;
+}
+
+int f_loopback_loop_gpd(int ep_out, int ep_in, int data_length, int start_add, int gpd_length, struct usb_device *usbdev){
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ int ret, i;
+ u8 *tmp1, *tmp2;
+ struct urb *urb_tx, *urb_rx;
+ int iso_num_packets;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+ int num_urbs;
+ void *buffer_tx, *buffer_rx;
+ dma_addr_t mapping_tx, mapping_rx;
+ int tmp_urb_len, total_running;
+
+ ret = 0;
+ iso_num_packets = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+ ep_tx = udev->ep_out[ep_out];
+ ep_rx = udev->ep_in[ep_in];
+
+ ret = 0;
+ buffer_tx = kmalloc(data_length+start_add, GFP_KERNEL);
+ mapping_tx = dma_map_single(dev, buffer_tx,data_length+start_add, DMA_BIDIRECTIONAL);
+ xhci_dbg(xhci, "buffer_tx 0x%x dma 0x%x\n", buffer_tx, mapping_tx);
+ num_urbs = data_length/gpd_length;
+ if(data_length % gpd_length != 0){
+ num_urbs++;
+ }
+ xhci_dbg(xhci, "[LOOPBACK]Num urbs: %d\n", num_urbs);
+ total_running = 0;
+ xhci_dbg(xhci, "[LOOPBACK]Start to do Tx, buffer 0x%x, mapping 0x%x\n", buffer_tx, mapping_tx);
+
+ for(i=0; i<num_urbs; i++){
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ mdelay(1000);
+ }
+ tmp_urb_len = min_t(int, gpd_length, (data_length-total_running));
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ /* if superspeed, need to add ss_ep_comp.wBytesPerInterval in ep structure */
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+ xhci_dbg(xhci, "[Debug] max_esit_payload: %d\n", max_esit_payload);
+ if(tmp_urb_len%max_esit_payload == 0){
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ else{
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ }
+ xhci_dbg(xhci, "[LOOPBACK]Tx round %d, urb_length %d, buffer 0x%x, mapping 0x%x\n"
+ , i, tmp_urb_len, buffer_tx+total_running, mapping_tx+total_running);
+ urb_tx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_tx, ep_out, tmp_urb_len, buffer_tx+total_running
+ , start_add, URB_DIR_OUT, iso_num_packets, max_esit_payload, mapping_tx+total_running, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_queue_urb(urb_tx,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_tx->transfer_buffer = NULL;
+ urb_tx->transfer_dma = NULL;
+ usb_free_urb(urb_tx);
+ total_running += tmp_urb_len;
+ }
+
+/* RX */
+ buffer_rx = kmalloc(data_length+start_add, GFP_KERNEL);
+ memset(buffer_rx, 0, data_length+start_add);
+ mapping_rx = dma_map_single(dev, buffer_rx,data_length+start_add, DMA_BIDIRECTIONAL);
+ xhci_dbg(xhci, "buffer_rx 0x%x dma 0x%x\n", buffer_rx, mapping_rx);
+ total_running = 0;
+ xhci_dbg(xhci, "[LOOPBACK]Start to do Rx, buffer 0x%x, mapping 0x%x\n", buffer_rx, mapping_rx);
+ for(i=0; i<num_urbs; i++){
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ mdelay(1000);
+ }
+ tmp_urb_len = min_t(int, gpd_length, (data_length-total_running));
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ /* if superspeed, need to add ss_ep_comp.wBytesPerInterval in ep structure */
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+
+ if(tmp_urb_len%max_esit_payload == 0){
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ else{
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ }
+ urb_rx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ xhci_dbg(xhci, "[LOOPBACK]Rx round %d, urb_length %d, buffer 0x%x, mapping 0x%x\n"
+ , i, tmp_urb_len, buffer_rx+total_running, mapping_rx+total_running);
+ f_fill_urb_with_buffer(urb_rx, ep_in, tmp_urb_len, buffer_rx+total_running
+ , start_add, URB_DIR_IN, iso_num_packets, max_esit_payload, mapping_rx+total_running,udev);
+ xhci_dbg(xhci, "urb_rx buffer 0x%x transfer_dma 0x%x\n", urb_rx->transfer_buffer, urb_rx->transfer_dma);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_rx,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_buffer = NULL;
+ urb_rx->transfer_dma = NULL;
+ usb_free_urb(urb_rx);
+ total_running += tmp_urb_len;
+ }
+ dma_sync_single_for_cpu(dev,mapping_tx,data_length+start_add,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(dev,mapping_rx,data_length+start_add,DMA_BIDIRECTIONAL);
+ for(i=0; i<data_length; i++){
+ tmp1 = buffer_tx+i+start_add;
+ tmp2 = buffer_rx+i+start_add;
+ if((*tmp1) != (*tmp2)){
+ xhci_err(xhci, "[ERROR] buffer %d not match, tx buf 0x%x, rx buf 0x%x, tx 0x%x, rx 0x%x\n"
+ , i, tmp1, tmp2, *tmp1, *tmp2);
+ ret = RET_FAIL;
+ break;
+ }
+ }
+ kfree(buffer_tx);
+ kfree(buffer_rx);
+ return ret;
+}
+
+int f_loopback_sg_loop_gpd(int ep_out, int ep_in, int data_length, int start_add, int sg_len, int gpd_length, struct usb_device *usbdev){
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ int ret, i;;
+ u8 *tmp1, *tmp2;
+ struct urb *urb_tx, *urb_rx;
+ int iso_num_packets;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+ int num_urbs;
+ void *buffer_tx, *buffer_rx;
+ dma_addr_t mapping_tx, mapping_rx;
+ int tmp_urb_len, total_running;
+
+ ret = 0;
+ iso_num_packets = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+ ep_tx = udev->ep_out[ep_out];
+ ep_rx = udev->ep_in[ep_in];
+
+ ret = 0;
+ buffer_tx = kmalloc(data_length+start_add, GFP_KERNEL);
+ mapping_tx = dma_map_single(dev, buffer_tx,data_length+start_add, DMA_BIDIRECTIONAL);
+ num_urbs = data_length/gpd_length;
+ if(data_length % gpd_length != 0){
+ num_urbs++;
+ }
+ xhci_dbg(xhci, "[LOOPBACK]Num urbs: %d\n", num_urbs);
+ total_running = 0;
+ xhci_dbg(xhci, "[LOOPBACK]Start to do Tx, buffer 0x%x, mapping 0x%x\n", buffer_tx, mapping_tx);
+ for(i=0; i<num_urbs; i++){
+
+ tmp_urb_len = min_t(int, gpd_length, (data_length-total_running));
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ /* if superspeed, need to add ss_ep_comp.wBytesPerInterval in ep structure */
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+/* printk(KERN_ERR "[Debug] max_esit_payload: %d\n", max_esit_payload); */
+ if(tmp_urb_len%max_esit_payload == 0){
+ iso_num_packets = tmp_urb_len/max_esit_payload;
+ }
+ else{
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ }
+ xhci_dbg(xhci, "[LOOPBACK]Tx round %d, urb_length %d, buffer 0x%x, mapping 0x%x\n"
+ , i, tmp_urb_len, buffer_tx+total_running, mapping_tx+total_running);
+ urb_tx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ ret = f_fill_urb_with_buffer(urb_tx, ep_out, tmp_urb_len, buffer_tx+total_running
+ , start_add, URB_DIR_OUT, iso_num_packets, max_esit_payload, mapping_tx+total_running, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_fill_urb_sg(
+ urb_tx,URB_DIR_OUT,ep_out,tmp_urb_len,sg_len,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb sg Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_tx,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_tx->transfer_buffer = NULL;
+ urb_tx->transfer_dma = NULL;
+ usb_free_urb(urb_tx);
+ total_running += tmp_urb_len;
+ }
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ mdelay(500);
+ }
+
+ buffer_rx = kmalloc(data_length+start_add, GFP_KERNEL);
+ memset(buffer_rx, 0, data_length+start_add);
+ mapping_rx = dma_map_single(dev, buffer_rx,data_length+start_add, DMA_BIDIRECTIONAL);
+
+ total_running = 0;
+ xhci_dbg(xhci, "[LOOPBACK]Start to do Rx, buffer 0x%x, mapping 0x%x\n", buffer_rx, mapping_rx);
+ for(i=0; i<num_urbs; i++){
+ tmp_urb_len = min_t(int, gpd_length, (data_length-total_running));
+ if(usb_endpoint_xfer_isoc(&ep_tx->desc)){
+ /* if superspeed, need to add ss_ep_comp.wBytesPerInterval in ep structure */
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+/* printk(KERN_ERR "[Debug] max_esit_payload: %d\n", max_esit_payload); */
+ if(tmp_urb_len%max_esit_payload == 0){
+ iso_num_packets = tmp_urb_len/max_esit_payload;
+ }
+ else{
+ iso_num_packets = tmp_urb_len/max_esit_payload + 1;
+ }
+ }
+ urb_rx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ xhci_dbg(xhci, "[LOOPBACK]Rx round %d, urb_length %d, buffer 0x%x, mapping 0x%x\n"
+ , i, tmp_urb_len, buffer_rx+total_running, mapping_rx+total_running);
+ ret = f_fill_urb_with_buffer(urb_rx, ep_in, tmp_urb_len, buffer_rx+total_running
+ , start_add, URB_DIR_IN, iso_num_packets, max_esit_payload, mapping_rx+total_running,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_fill_urb_sg(
+ urb_rx,URB_DIR_IN,ep_in,tmp_urb_len,sg_len,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb sg Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_rx,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_buffer = NULL;
+ urb_rx->transfer_dma = NULL;
+ usb_free_urb(urb_rx);
+ total_running += tmp_urb_len;
+ }
+ dma_sync_single_for_cpu(dev,mapping_tx,data_length+start_add,DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(dev,mapping_rx,data_length+start_add,DMA_BIDIRECTIONAL);
+ for(i=0; i<data_length; i++){
+ tmp1 = buffer_tx+i+start_add;
+ tmp2 = buffer_rx+i+start_add;
+ if((*tmp1) != (*tmp2)){
+ xhci_err(xhci, "[ERROR] buffer %d not match, tx 0x%x, rx 0x%x\n", i, *tmp1, *tmp2);
+ ret = RET_FAIL;
+ break;
+ }
+ }
+ kfree(buffer_tx);
+ kfree(buffer_rx);
+ return ret;
+}
+
+
+struct loopback_data {
+ int dev_num;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ int out_ep_num;
+ int in_ep_num;
+ struct mutex *lock;
+ char is_ctrl;
+};
+
+static int loopback_thread(void *data){
+ struct loopback_data *lb_data = data;
+ char bdp;
+ short gpd_buf_size,bd_buf_size;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb_tx, *urb_rx;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int sg_length, length, start_add;
+ void *buffer_tx, *buffer_rx;
+ u8 *tmp1, *tmp2;
+ int ret;
+ int i;
+ int max_esit_payload;
+
+ ret = 0;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+ xhci = lb_data->xhci;
+ udev = lb_data->udev;
+
+ if(lb_data->is_ctrl){
+ do{
+ length = ((get_random_int() % 128) + 1) * 4;
+ xhci_err(xhci, "[CTRL LOOPBACK] dev %d, length %d\n"
+ , lb_data->dev_num, length);
+ mutex_lock(lb_data->lock);
+ ret = dev_ctrl_loopback(length,udev);
+ if(ret){
+ xhci_err(xhci, "control loopback fail!!\n");
+ g_stress_status = RET_FAIL;
+ break;
+ }
+ mutex_unlock(lb_data->lock);
+ }while(g_stress_status != RET_FAIL);
+ }
+ else{
+ ep_tx = udev->ep_out[lb_data->out_ep_num];
+ ep_rx = udev->ep_in[lb_data->in_ep_num];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ do{
+ length = (get_random_int() % 65023) + 1;
+ start_add = get_random_int() % 64;
+ sg_length = (get_random_int() % 4) * 1024;
+
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=8192;
+
+ xhci_err(xhci, "[LOOPBACK] dev %d, ep_out %d, ep_in %d, length %d, start_add %d, sg_length %d\n"
+ , lb_data->dev_num, lb_data->out_ep_num, lb_data->in_ep_num, length, start_add, sg_length);
+ mutex_lock(lb_data->lock);
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size, 0, 0,udev);
+ if(ret)
+ {
+ xhci_err(xhci, "loopback request fail!!\n");
+ g_stress_status = RET_FAIL;
+ break;
+ }
+ mutex_unlock(lb_data->lock);
+ if((length > sg_length) && sg_length != 0 && (length/sg_length < 60)){
+ ret = f_loopback_sg_loop(
+ lb_data->out_ep_num, lb_data->in_ep_num, length, start_add, sg_length, udev);
+ }
+ else{
+ ret = f_loopback_loop(lb_data->out_ep_num, lb_data->in_ep_num, length, start_add, udev);
+ }
+ if(ret)
+ {
+ xhci_err(xhci, "loopback fail!!\n");
+ g_stress_status = RET_FAIL;
+ break;
+ }
+ }while(g_stress_status != RET_FAIL);
+ }
+}
+
+int f_add_loopback_thread(struct xhci_hcd *xhci, int dev_num
+ , struct usb_device *udev, int out_ep_num, int in_ep_num, struct mutex *lock, char is_ctrl){
+
+ struct loopback_data *lb_data;
+ xhci_err(xhci, "[LOOPBACK]Start loopback thread, devnum %d, ep_out %d, ep_in %d, isctrl %d\n"
+ , dev_num, out_ep_num, in_ep_num, is_ctrl);
+ lb_data = kzalloc(sizeof(struct loopback_data), GFP_KERNEL);
+ lb_data->xhci = xhci;
+ lb_data->dev_num = dev_num;
+ lb_data->udev = udev;
+ lb_data->out_ep_num = out_ep_num;
+ lb_data->in_ep_num = in_ep_num;
+ lb_data->lock = lock;
+ lb_data->is_ctrl = is_ctrl;
+ kthread_run(loopback_thread, lb_data, "lbt");
+}
+
+int f_slot_reset_device(int slot_id, char isWarmReset){
+ int ret, slot_state, i;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+/* struct xhci_container_ctx *out_ctx; */
+ struct xhci_slot_ctx *out_ctx;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_port *port;
+ int last_freed_endpoint;
+ u32 __iomem *addr;
+ int temp;
+ int c_slot_id, port_id, port_index;
+
+ ret = 0;
+
+/* isWarmReset = false; */
+ c_slot_id = slot_id;
+ if(c_slot_id == 0)
+ c_slot_id = g_slot_id;
+ if(c_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ port_id = get_port_id(c_slot_id);
+ port_index = get_port_index(port_id);
+ port = rh_port[port_index];
+ port->port_status = RESET;
+
+/* g_port_reset = false; */
+ /* enablePortClockPower(port_index,0x3); */
+ if(isWarmReset && port->port_speed == USB_SPEED_SUPER){
+ enablePortClockPower(port_index,0x3); /* add by mtk40184 */
+ /* do warm reset */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "after reset port %d = 0x%x\n", port_id-1, temp);
+
+ }
+ else{
+ enablePortClockPower(port_index,0x2); /* add by mtk40184 */
+ /* hot reset port */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ }
+ wait_event_on_timeout((volatile int *)&(port->port_status), ENABLED, ATTACH_TIMEOUT);
+ if(port->port_status != ENABLED){
+ xhci_err(xhci, "[ERROR]Device reset timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "port reset done\n");
+
+ virt_dev = xhci->devs[g_slot_id];
+ g_cmd_status = CMD_RUNNING;
+ ret = xhci_queue_reset_device(xhci, c_slot_id);
+ if (ret){
+ xhci_err(xhci, "[ERROR]FIXME: allocate a command ring segment\n");
+ return ret;
+ }
+ xhci_dbg(xhci, "reset dev command\n");
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR]reset device command timeout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR]reset device command failed\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "reset device success\n");
+#if 0
+ slot_state = xhci_get_slot_state(xhci, virt_dev->out_ctx);
+ print_slot_state(slot_state);
+#endif
+ xhci_dbg(xhci, "Output Control Context, after:\n");
+ xhci_dbg_slot_ctx(xhci, virt_dev->out_ctx);
+ /* check parameters */
+ out_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if(LAST_CTX_TO_EP_NUM(le32_to_cpu(out_ctx->dev_info)) != 0){
+ xhci_err(xhci, "[FAIL] slot context entries is not 1 after reset device command\n");
+ ret = RET_FAIL;
+ }
+ if(GET_SLOT_STATE(le32_to_cpu(out_ctx->dev_state)) != SLOT_STATE_DEFAULT){
+ xhci_err(xhci, "[FAIL] slot state is not default after reset device command\n");
+ ret = RET_FAIL;
+ }
+ /* Everything but endpoint 0 is disabled, so free or cache the rings. */
+ last_freed_endpoint = 1;
+ for (i = 1; i < 31; ++i) {
+ if (!virt_dev->eps[i].ring)
+ continue;
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
+ xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
+ /* check ep_ctx */
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, 0);
+ if( (ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) != cpu_to_le32(EP_STATE_STOPPED)){
+ xhci_err(xhci, "[FAIL] EP0 state is not in stoped after reset device command\n");
+ ret = RET_FAIL;
+ }
+ for (i = 1; i < 31; ++i) {
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+ if((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) != cpu_to_le32(EP_STATE_DISABLED)){
+ xhci_err(xhci, "[FAIL] EP%d state is not in disabled after reset device command\n", i);
+ ret = RET_FAIL;
+ }
+ }
+ xhci_slot_copy(xhci, virt_dev->in_ctx, virt_dev->out_ctx);
+ return ret;
+}
+
+int f_power_suspend(){
+ int ret, i;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_port *port;
+ u32 __iomem *addr;
+ int temp;
+ int port_index;
+ int num_u3_port;
+
+ ret = RET_SUCCESS;
+#if 0
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+#endif
+ xhci = hcd_to_xhci(my_hcd);
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+ /* set power/clock gating */
+ if(!g_concurrent_resume){
+ if(port->port_speed == USB_SPEED_SUPER){
+ disablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ disablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+ }
+ /* set PLS = 3 */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", g_port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ if(!g_concurrent_resume){
+ msleep(100);
+ if(port->port_speed == USB_SPEED_SUPER){
+ enablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ enablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 3*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ ret = RET_FAIL;
+ }
+ else{
+ if(port->port_speed == USB_SPEED_SUPER){
+ disablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ disablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+ /* disablePortClockPower(); */
+ ret = RET_SUCCESS;
+ }
+ }
+ return ret;
+}
+
+int f_power_resume(){
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_port *port;
+ u32 __iomem *addr;
+ int temp;
+ int i;
+ int port_index;
+ int num_u3_port;
+#if 0
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+#endif
+ xhci = hcd_to_xhci(my_hcd);
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+ if(!g_concurrent_resume){
+ if(port->port_speed == USB_SPEED_SUPER){
+ enablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ enablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ if(!g_concurrent_resume){
+ xhci_dbg(xhci, "before resume port %d = 0x%x\n", g_port_id-1, temp);
+ if(PORT_PLS(temp) != (3 << 5)){
+ xhci_err(xhci, "port not in U3 state\n");
+ return RET_FAIL;
+ }
+ }
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ if(DEV_SUPERSPEED(temp)){
+ /* superspeed direct set U0 */
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ }
+ else{
+ /* HS/FS, set resume for 20ms, then set U0 */
+ temp = (temp | (15 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mdelay(20);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ }
+ for(i=0; i<200; i++){
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) == 0){
+ break;
+ }
+ msleep(1);
+
+ }
+ if(PORT_PLS_VALUE(temp) != 0){
+ xhci_err(xhci, "port not return U0 state\n");
+ return RET_FAIL;
+ }
+ else{
+ }
+ return RET_SUCCESS;
+}
+
+int f_power_remotewakeup(){
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+ u32 __iomem *addr;
+ struct xhci_port *port;
+ int temp, ret,i;
+ int num_u3_port;
+ int port_index;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+ /* suspend first */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before suspend port %d = 0x%x\n", g_port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ /* set power/clock gating */
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ }
+ else{
+ xhci_err(xhci, "port enter U3 OK\n");
+ if(port->port_speed == USB_SPEED_SUPER){
+ disablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ disablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+ }
+ g_port_resume = 0;
+
+ /* wait_event_on_timeout((volatile int *)&g_port_resume, 1, 5000); */
+ poll_event_on_timeout((volatile int *)&g_port_resume, 1, 5000);
+ if(g_port_resume == 0){
+ xhci_err(xhci, "port not in Resume state after timeout\n");
+ return RET_FAIL;
+ }
+ else{
+ }
+
+ if(port->port_speed != USB_SPEED_SUPER){
+ msleep(20);
+ }
+
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+
+ if(port->port_speed == USB_SPEED_SUPER){
+ enablePortClockPower((g_port_id-1), 0x3);
+ }
+ else{
+ enablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ }
+
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | PORT_LINK_STROBE);
+
+
+ xhci_writel(xhci, temp, addr);
+ msleep(20);
+ mtk_xhci_handshake(xhci, addr, (15<<5), 0, 10*1000);
+ temp = xhci_readl(xhci, addr);
+
+ if(PORT_PLS_VALUE(temp) != 0){/* (temp & PORT_PLS(15)) != PORT_PLS(0)){ */
+ xhci_err(xhci, "port not return to U0\n");
+ return RET_FAIL;
+ }
+ else{
+ xhci_warn(xhci, "port return to U0\n");
+ }
+
+ return RET_SUCCESS;
+}
+
+
+int f_power_set_u1u2(int u_num, int value1, int value2){
+ int ret, i;
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* set PLS = n */
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ if(u_num == 1){
+ temp = temp & (~(0x000000ff));
+ temp = temp | value1;
+ }
+ else if(u_num == 2){
+ temp = temp & (~(0x0000ff00));
+ temp = temp | (value1 << 8);
+ }
+ else if(u_num == 3){
+ temp = temp & (~(0x0000ffff));
+ temp = temp | (value1) | (value2 << 8);
+ }
+ if(u_num >= 2 && value2 > 0){
+ disablePortClockPower(g_port_id-1, 0x3);
+ }
+ else if(u_num >=2 && value2 == 0){
+ enablePortClockPower(g_port_id-1, 0x3);
+ }
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+#define USB3_U1_STATE_INFO (SSUSB_U3_MAC_BASE + 0x50) /* 0xf0042450 */
+#define USB3_U2_STATE_INFO (SSUSB_U3_MAC_BASE + 0x54) /* 0xf0042454 */
+
+int f_power_reset_u1u2_counter(int u_num){
+ int temp = (1<<16);
+ if(u_num == 1){
+ writel(temp, USB3_U1_STATE_INFO);
+ }
+ else if(u_num == 2){
+ writel(temp, USB3_U2_STATE_INFO);
+ }
+ return RET_SUCCESS;
+}
+
+int f_power_get_u1u2_counter(int u_num){
+ if(u_num == 1){
+ return (readl(USB3_U1_STATE_INFO) & 0xff);
+ }
+ else if(u_num == 2){
+ return (readl(USB3_U2_STATE_INFO) & 0xff);
+ }
+}
+
+int f_power_send_fla(int value){
+ int ret;
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(1<<16);
+ temp |= ((value & 0x1)<<16);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+#define USB20_LPM_ENTRY_COUNT (SSUSB_U3_XHCI_BASE + 0x3448)
+
+int f_power_reset_L1_counter(int direct){
+ /* direct: 1:ENTRY, 2:EXIT */
+ int temp;
+ if(direct == 1){
+ temp = (1<<8);
+ }
+ else if (direct == 2){
+ temp = (1<<9);
+ }
+ writel(temp, USB20_LPM_ENTRY_COUNT);
+}
+
+int f_power_get_L1_counter(int direct){
+ /* direct: 1:ENTRY, 2:EXIT */
+ if(direct == 1){
+ return (readl(USB20_LPM_ENTRY_COUNT) && 0xff);
+ }
+ else if (direct == 2){
+ return ((readl(USB20_LPM_ENTRY_COUNT) && 0xff0000) >> 16);
+ }
+ return 0;
+}
+
+int f_power_get_l1s(void)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_port *port;
+ u32 __iomem *addr;
+ int temp;
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* get L1S */
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr) & MSK_L1S;
+
+ return temp;
+}
+
+int f_power_config_lpm(u32 slot_id, u32 hirdm, u32 L1_timeout, u32 rwe, u32 besl, u32 besld, u32 hle
+ , u32 int_nak_active, u32 bulk_nyet_active){
+ struct xhci_hcd *xhci;
+ struct xhci_port *port;
+ int port_index;
+ u32 __iomem *addr;
+ int ret;
+ int temp;
+ int num_u3_port;
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* set bulk_nyet_active_mask & int_nak_active_mask */
+ addr = (SSUSB_U3_XHCI_BASE + 0x97c);
+ temp = xhci_readl(xhci, addr);
+ /* pdn */
+
+ /* directly fill port 0 reg, which U2 port should be considered in driver */
+ temp &= ~(0x1f1f);
+ temp = temp | (int_nak_active << 8) | bulk_nyet_active;
+ xhci_writel(xhci, temp, addr);
+ /* set PORTPMSC */
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = temp & ~(MSK_RWE|MSK_BESL|MSK_L1_DEV_SLOT|MSK_HLE);
+ temp |= (rwe<<3) & MSK_RWE;
+ temp |= (besl<<4) & MSK_BESL;
+ temp |= (slot_id<<8) & MSK_L1_DEV_SLOT;
+ temp |= (hle<<16) & MSK_HLE;
+ xhci_writel(xhci, temp, addr);
+ xhci_dbg(xhci, "addr: %p\n", addr);
+ xhci_dbg(xhci, "data: %p\n", temp);
+
+ /* set PORTHLPMC */
+ addr = &xhci->op_regs->port_lpm_ctrl_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ /* HIRDM = 1, L1 TIMEOUT = 0 (128us), BESLD = 0x7; */
+ temp = hirdm | (L1_timeout << 2) | (besld << 10);
+ xhci_writel(xhci, temp, addr);
+ xhci_dbg(xhci, "addr: %p\n", addr);
+ xhci_dbg(xhci, "data: %p\n", temp);
+
+ /* if hle and rwe, we want device to remote wakeup */
+ /* write LPM_L1_EXIT_TIMER to max value */
+ port_index = get_port_index(g_port_id);
+ port = rh_port[port_index];
+ addr = (SSUSB_U3_XHCI_BASE + 0x978);
+ if(hle == 1 && rwe == 1){
+ temp = 0xff;
+ xhci_writel(xhci, temp, addr);
+ }
+ else{
+ if(port->port_speed == USB_SPEED_HIGH)
+ temp = 0x4;
+ else if(port->port_speed == USB_SPEED_FULL)
+ temp = 0x8;
+ xhci_writel(xhci, temp, addr);
+ }
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ if(hle == 1){
+ disablePortClockPower(g_port_id-1-num_u3_port, 0x02);
+ }
+ else{
+ enablePortClockPower(g_port_id-1-num_u3_port, 0x02);
+ }
+
+ return RET_SUCCESS;
+}
+
+
+int f_ring_enlarge(int ep_dir, int ep_num, int dev_num){
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_segment *next, *prev;
+ u32 val, cycle_bit;
+ int i, ret;
+ int slot_id, ep_index;
+
+ ret = 0;
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev_num == -1){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ slot_id = udev->slot_id;
+ }
+ else{
+ udev = dev_list[dev_num-1];
+ slot_id = udev->slot_id;
+ }
+ virt_dev = xhci->devs[udev->slot_id];
+ if(ep_dir == EPADD_OUT){
+ ep = udev->ep_out[ep_num];
+ }
+ else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ring = (&(virt_dev->eps[ep_index]))->ring;
+
+ prev = ep_ring->enq_seg;
+ next = xhci_segment_alloc(xhci, GFP_NOIO);
+ next->next = prev->next;
+ next->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = cpu_to_le64(prev->next->dma);
+ val = le32_to_cpu(next->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ val |= TRB_CHAIN;
+ next->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+ (unsigned long long)prev->dma,
+ (unsigned long long)next->dma);
+ /* adjust cycle bit */
+ if(ep_ring->cycle_state == 1){
+ cycle_bit = 0;
+ }
+ else{
+ cycle_bit = 1;
+ }
+ for(i=0; i<TRBS_PER_SEGMENT; i++){
+ val = le32_to_cpu(next->trbs[i].generic.field[3]);
+ if(cycle_bit == 1){
+ val |= cycle_bit;
+ }
+ else{
+ val &= ~cycle_bit;
+ }
+ next->trbs[i].generic.field[3] = cpu_to_le32(val);
+ xhci_dbg(xhci, "Set new segment trb %d cycle bit 0x%x\n", i, val);
+ }
+ xhci_link_segments(xhci, prev, next, true);
+ if(le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control) & LINK_TOGGLE){
+ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val &= ~LINK_TOGGLE;
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ val = le32_to_cpu(next->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val |= LINK_TOGGLE;
+ next->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ }
+ return ret;
+}
+
+int f_ring_stop_ep(int slot_id, int ep_index){
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(my_hcd);
+ g_cmd_status = CMD_RUNNING;
+ xhci_queue_stop_endpoint(xhci, slot_id, ep_index);
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] stop ep ring timout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] stop ep ring failed\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_ring_set_tr_dequeue_pointer(int slot_id, int ep_index, struct urb *urb){
+ struct xhci_hcd *xhci;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
+ struct xhci_td *cur_td = NULL;
+ struct xhci_dequeue_state deq_state;
+ struct xhci_virt_device *dev;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci->devs[slot_id];
+ memset(&deq_state, 0, sizeof(deq_state));
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ cur_td = ep->stopped_td;
+ if(!cur_td){
+ cur_td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ dev->eps[ep_index].stopped_trb = cur_td->first_trb;
+
+ }
+ xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
+ 0,
+ cur_td, &deq_state);
+ xhci_queue_new_dequeue_state(xhci,
+ slot_id, ep_index,
+ 0,
+ &deq_state);
+ g_cmd_status = CMD_RUNNING;
+ xhci_ring_cmd_db(xhci);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] set tr dequeue pointer timout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] set tr dequeue pointer failed\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_ring_stop_cmd(){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ u64 val_64;
+ int temp, tmp_add, i;
+
+ if(my_hcd == NULL){
+ printk(KERN_ERR "[ERROR]host controller driver not initiated\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ val_64 = val_64 | CMD_RING_PAUSE;
+ xhci_dbg(xhci, "/* Setting command ring register to 0x%x\n", (unsigned int)val_64);
+ g_cmd_status = CMD_RUNNING;
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] stop command ring timout\n");
+ return RET_FAIL;
+ }
+ if(g_cmd_status == CMD_FAIL){
+ xhci_err(xhci, "[ERROR] stop command ring failed\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "command ring register 0x%x\n", (unsigned)val_64);
+ if((val_64 & CMD_RING_RUNNING) != 0){
+ xhci_err(xhci, "[ERROR] command ring doesn't stop\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_ring_abort_cmd(){
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ u64 val_64;
+ int temp;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ val_64 = val_64 | CMD_RING_ABORT;
+ g_cmd_status = CMD_RUNNING;
+ /* xhci_ring_cmd_db(xhci); */
+ temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+ xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+ udelay(5);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ wait_event_on_timeout(&g_cmd_status, CMD_DONE, CMD_TIMEOUT);
+ if(g_cmd_status == CMD_RUNNING){
+ xhci_err(xhci, "[ERROR] abort command timeout\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+int f_hub_getPortStatus(int hdev_num, int port_num, u32 *status){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ u32 tmp_status;
+
+ ret = RET_SUCCESS;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+
+ rhdev = my_hcd->self.root_hub;
+ udev = hdev_list[hdev_num-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ /* get status */
+
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN | USB_RT_PORT;
+ dr->bRequest = USB_REQ_GET_STATUS;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(port_num);
+ dr->wLength = cpu_to_le16(USB_HUB_PORT_STATUS_SIZE);
+ desc = kmalloc(USB_HUB_PORT_STATUS_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_HUB_PORT_STATUS_SIZE);
+ urb = alloc_ctrl_urb(dr, (char *)desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "get status success\n buffer:\n");
+ for(i=0; i<urb->transfer_buffer_length; i++){
+ tmp = urb->transfer_buffer+i;
+ xhci_dbg(xhci, "0x%x ", *tmp);
+ tmp_status = (u32)*tmp;
+ *status |= (tmp_status << (i*8));
+ }
+
+ }
+ else{
+ xhci_err(xhci, "[ERROR] get status failed\n");
+ ret = RET_FAIL;
+ }
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ return ret;
+}
+
+int f_hub_sethubfeature(int hdev_num, int wValue){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = RET_SUCCESS;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+
+ rhdev = my_hcd->self.root_hub;
+ udev = hdev_list[hdev_num-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ /* set configuration */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_RT_HUB;
+ dr->bRequest = USB_REQ_SET_FEATURE;
+ dr->wValue = cpu_to_le16(wValue);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set feature success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set feature failed\n");
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+
+ return ret;
+}
+
+int f_dev_setportfeature(int hdev_num, int wValue, int wIndex){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = RET_SUCCESS;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+
+ rhdev = my_hcd->self.root_hub;
+ udev = hdev_list[hdev_num-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ /* set configuration */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0;
+ dr->bRequest = USB_REQ_SET_FEATURE;
+ dr->wValue = cpu_to_le16(wValue);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set feature success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set feature failed %d\n", urb->status);
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+
+ return ret;
+}
+
+int f_hub_setportfeature(int hdev_num, int wValue, int wIndex){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = RET_SUCCESS;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+
+ rhdev = my_hcd->self.root_hub;
+ udev = hdev_list[hdev_num-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ /* set configuration */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_RT_PORT;
+ dr->bRequest = USB_REQ_SET_FEATURE;
+ dr->wValue = cpu_to_le16(wValue);
+ dr->wIndex = cpu_to_le16(wIndex);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set feature success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set feature failed\n");
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+
+ return ret;
+}
+
+int f_hub_clearportfeature(int hdev_num, int wValue, int wIndex){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = RET_SUCCESS;
+
+ /* get xhci struct */
+ xhci = hcd_to_xhci(my_hcd);
+ /* initial wait queue */
+
+ rhdev = my_hcd->self.root_hub;
+ udev = hdev_list[hdev_num-1];
+ /* set */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_RT_PORT;
+ dr->bRequest = USB_REQ_CLEAR_FEATURE;
+ dr->wValue = cpu_to_le16(wValue);
+ dr->wIndex = cpu_to_le16(wIndex);
+ dr->wLength = cpu_to_le16(0);
+
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+
+ f_ctrlrequest(urb, udev);
+ if(urb->status == 0){
+ xhci_dbg(xhci, "clear feature success\n");
+
+ }
+ else{
+ xhci_err(xhci, "[ERROR] clear feature failed\n");
+ ret = RET_FAIL;
+ }
+ kfree(dr);
+ usb_free_urb(urb);
+ return ret;
+}
+
+int f_hub_configep(int hdev_num, int rh_port_index){
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *config_cmd;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_container_ctx *in_ctx, *out_ctx;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_host_endpoint *ep;
+ void *buffer;
+ int slot_state;
+ int ret, i;
+ char *tmp;
+ struct xhci_port *port;
+ xhci = hcd_to_xhci(my_hcd);
+ udev = hdev_list[hdev_num-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ port = rh_port[rh_port_index];
+
+ /* get device descriptor */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ buffer = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(buffer, 0, USB_DT_DEVICE_SIZE);
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "get descriptor success\n buffer:\n");
+ for(i=0; i<urb->transfer_buffer_length; i++){
+ tmp = urb->transfer_buffer+i;
+ xhci_dbg(xhci, "0x%x ", *tmp);
+ }
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] get descriptor failed\n");
+ ret = urb->status;
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+ /* get config descriptor 255 bytes */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_CONFIG << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(255);
+ buffer = kmalloc(255, GFP_KERNEL);
+ memset(buffer, 0, 255);
+
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ f_ctrlrequest(urb, udev);
+ if(urb->status == 0){
+ xhci_dbg(xhci, "get config descriptor success\n buffer:\n");
+ for(i=0; i<urb->transfer_buffer_length; i++){
+ tmp = urb->transfer_buffer+i;
+ xhci_dbg(xhci, "0x%x ", *tmp);
+ }
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] get config descriptor failed\n");
+ ret = urb->status;
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+#if 0
+ /* get hub descriptor */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN | USB_RT_HUB;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(71);
+ buffer = kmalloc(71, GFP_KERNEL);
+ memset(buffer, 0, 71);
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ f_ctrlrequest(urb, udev);
+
+ if(urb->status == 0){
+ xhci_dbg(xhci, "get hub descriptor success\n buffer:\n");
+ for(i=0; i<urb->transfer_buffer_length; i++){
+ tmp = urb->transfer_buffer+i;
+ xhci_dbg(xhci, "0x%x ", *tmp);
+ }
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] get hub descriptor failed\n");
+ ret = urb->status;
+ kfree(dr);
+ kfree(buffer);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+#endif
+ /* set configuration */
+/* urb = usb_alloc_urb(0, GFP_NOIO); */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ buffer = NULL;
+
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ f_ctrlrequest(urb, udev);
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set configuration success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set configuration failed\n");
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+ /* superspeed set hub depth */
+ /* config ep */
+ if(port->port_speed == USB_SPEED_SUPER){
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0x20;
+ dr->bRequest = 0x0c;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ f_ctrlrequest(urb, udev);
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set hub depth success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set hub depth failed\n");
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+ /* config endpoint */
+ /* prepare ep description */
+ ep = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep->desc.bDescriptorType = USB_DT_ENDPOINT;
+ ep->desc.bEndpointAddress = EPADD_NUM(1) | EPADD_IN;
+ ep->desc.bmAttributes = EPATT_INT;
+ ep->desc.wMaxPacketSize = 2;
+ ep->desc.bInterval = 16;
+ ep->ss_ep_comp.bMaxBurst = 0;
+ ep->ss_ep_comp.bmAttributes = 0;
+ ep->ss_ep_comp.wBytesPerInterval = 2;
+ /* SW add endpoint in context */
+ ret = f_udev_add_ep(ep, udev);
+ /* ret = xhci_mtk_add_endpoint(my_hcd, udev, ep); */
+ if(ret){
+ xhci_err(xhci, "[ERROR] add endpoint failed\n");
+ return RET_FAIL;
+ }
+ ret = f_xhci_config_ep(udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR] config endpoint failed\n");
+ return RET_FAIL;
+ }
+
+ }
+ else{
+ /* config endpoint */
+ /* prepare ep description */
+ ep = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep->desc.bDescriptorType = USB_DT_ENDPOINT;
+ ep->desc.bEndpointAddress = EPADD_NUM(1) | EPADD_IN;
+ ep->desc.bmAttributes = EPATT_INT;
+ ep->desc.wMaxPacketSize = 1;
+ ep->desc.bInterval = 0xFE;/* 2046; */
+ /* SW add endpoint in context */
+ ret = f_udev_add_ep(ep, udev);
+ /* ret = xhci_mtk_add_endpoint(my_hcd, udev, ep); */
+ if(ret){
+ xhci_err(xhci, "[ERROR] add endpoint failed\n");
+ return RET_FAIL;
+ }
+ ret = f_xhci_config_ep(udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR] config endpoint failed\n");
+ return RET_FAIL;
+ }
+ }
+ return RET_SUCCESS;
+
+}
+
+int f_hub_config_subhub(int parent_hub_num, int hub_num, int port_num){
+ u32 status;
+ struct usb_device *udev, *hdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int ret, i, speed;
+ int count = 0;
+ int count_down = 1000;
+
+ xhci = hcd_to_xhci(my_hcd);
+
+ hdev = hdev_list[parent_hub_num-1];
+
+ count = count_down;
+ while(!(status & USB_PORT_STAT_CONNECTION) && count > 0){
+ ret = f_hub_getPortStatus(parent_hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERRROR] Wait port connection status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Got port connection status\n");
+
+ if(f_hub_setportfeature(parent_hub_num, HUB_FEATURE_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set port reset failed\n");
+ return RET_FAIL;
+ }
+ status = 0;
+ count = count_down;
+ while(!((status>>16) & USB_PORT_STAT_C_RESET) && count > 0){
+ ret = f_hub_getPortStatus(parent_hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERROR] Wait port reset change status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Got port reset change status\n");
+
+ /* FIXME: check superspeed */
+ if(status & USB_PORT_STAT_HIGH_SPEED){
+/* g_speed = USB_SPEED_HIGH; */
+ speed = USB_SPEED_HIGH;
+ }
+ else if(status & USB_PORT_STAT_LOW_SPEED){
+/* g_speed = USB_SPEED_LOW; */
+ speed = USB_SPEED_LOW;
+ }
+ else{
+/* g_speed = USB_SPEED_FULL; */
+ speed = USB_SPEED_FULL;
+ }
+
+ if(f_hub_clearportfeature(parent_hub_num, HUB_FEATURE_C_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set port reset failed\n");
+ return RET_FAIL;
+ }
+ /* new usb device */
+ udev = hdev->children[port_num-1];
+ udev = mtk_usb_alloc_dev(hdev, hdev->bus, port_num);
+ udev->level = hdev->level + 1;
+ udev->speed = speed;
+ hdev->children[port_num-1] = udev;
+ hdev_list[hub_num-1] = udev;
+ print_speed(udev->speed);
+ if (hdev->tt) {
+ udev->tt = hdev->tt;
+ udev->ttport = hdev->ttport;
+ }
+ else if(udev->speed != USB_SPEED_HIGH
+ && hdev->speed == USB_SPEED_HIGH) {
+ udev->tt = kzalloc(sizeof(struct usb_tt), GFP_KERNEL);
+ udev->tt->hub = hdev;
+ udev->tt->multi = false;
+ udev->tt->think_time = 0;
+ udev->ttport = port_num;
+ }
+ /* enable slot */
+ ret = f_enable_slot(udev);
+
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ /* address device */
+ ret = f_address_slot(false, NULL);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ if(f_hub_configep(hub_num, 0) != RET_SUCCESS){
+ xhci_err(xhci, "config hub endpoint failed\n");
+ return RET_FAIL;
+ }
+ /* set port_power */
+ for(i=1; i<=4; i++){
+ if(f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_POWER, i) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] set port_power 1 failed\n");
+ return RET_FAIL;
+ }
+ }
+ /* clear C_PORT_CONNECTION */
+ for(i=1; i<=4; i++){
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_C_PORT_CONNECTION, i) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear c_port_connection failed\n");
+ }
+ }
+
+ return RET_SUCCESS;
+}
+
+int f_hub_init_device(int hub_num, int port_num, int dev_num){
+ u32 status;
+ int ret;
+ struct usb_device *udev, *hdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ int slot_state;
+ int count = 0;
+ int count_down = 1000;
+ int speed;
+
+ xhci = hcd_to_xhci(my_hcd);
+ hdev = hdev_list[hub_num-1];
+ status = 0;
+ count = count_down;
+ while(!(status & USB_PORT_STAT_CONNECTION) && count > 0){
+ ret = f_hub_getPortStatus(hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERRROR] Wait port connection status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Got port connection status\n");
+ if(f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set port reset failed\n");
+ return RET_FAIL;
+ }
+
+ status = 0;
+ count = count_down;
+ while(!((status>>16) & USB_PORT_STAT_C_RESET) && count > 0){
+ ret = f_hub_getPortStatus(hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERROR] Wait port reset change status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Port reset done\n");
+
+ /* FIXME: check superspeed */
+ if(status & USB_PORT_STAT_HIGH_SPEED){
+ speed = USB_SPEED_HIGH;
+ }
+ else if(status & USB_PORT_STAT_LOW_SPEED){
+ speed = USB_SPEED_LOW;
+ }
+ else{
+ speed = USB_SPEED_FULL;
+ }
+
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_C_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set port reset failed\n");
+ return RET_FAIL;
+ }
+ /* new usb device */
+ udev = hdev->children[port_num-1];
+ udev = mtk_usb_alloc_dev(hdev, hdev->bus, port_num);
+ udev->level = hdev->level + 1;
+ udev->speed = speed;
+ hdev->children[port_num-1] = udev;
+ /* need to add tt handler */
+ if (hdev->tt) {
+ udev->tt = hdev->tt;
+ udev->ttport = hdev->ttport;
+ }
+ else if(udev->speed != USB_SPEED_HIGH
+ && hdev->speed == USB_SPEED_HIGH) {
+ udev->tt = kzalloc(sizeof(struct usb_tt), GFP_KERNEL);
+ udev->tt->hub = hdev;
+ udev->tt->multi = false;
+ udev->tt->think_time = 0;
+ udev->ttport = port_num;
+
+ }
+ dev_list[dev_num-1] = udev;
+ print_speed(udev->speed);
+ /* enable slot */
+ ret = f_enable_slot(udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ /* address device */
+ ret = f_address_slot(false, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+int f_hub_reset_dev(struct usb_device *udev,int dev_num, int port_num, int speed){
+ struct xhci_hcd *xhci;
+ int ret;
+ int count;
+ int count_down = 1000;
+
+ xhci = hcd_to_xhci(my_hcd);
+ ret = RET_SUCCESS;
+ ret = dev_reset(speed,udev);
+ if(f_hub_clearportfeature(1, HUB_FEATURE_PORT_POWER, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear port_power %d failed\n", port_num);
+ return RET_FAIL;
+ }
+ mdelay(500);
+ if(f_hub_setportfeature(1, HUB_FEATURE_PORT_POWER, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] set port_power %d failed\n", port_num);
+ return RET_FAIL;
+ }
+ if(f_hub_clearportfeature(1, HUB_FEATURE_C_PORT_CONNECTION, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear c_port_connection failed\n");
+ }
+ g_slot_id = udev->slot_id;
+ f_disable_slot();
+ kfree(udev);
+ dev_list[dev_num-1] = NULL;
+ f_hub_init_device(1, port_num, dev_num);
+ return ret;
+}
+
+
+#define DATA_LENGTH 2000
+
+#define TOTAL_RX_URB 30
+#define TOTAL_TX_URB 30
+#define URB_STATUS_IDLE 150
+#define URB_STATUS_TX 151
+
+struct ixia_data {
+ int dev_num;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ int ep_out;
+ int ep_in;
+ struct urb *urb_rx_list[TOTAL_RX_URB];
+ struct urb *urb_tx_list[TOTAL_RX_URB];
+};
+
+static int ixia_rx_thread(void *data){
+ int i, ret;
+ struct urb *urb_rx;
+ struct ixia_data *ix_data = data;
+ struct ixia_dev *ix_dev = ix_dev_list[ix_data->dev_num - 1];
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int rx_index;
+ char is_running;
+ int rx_status;
+
+ is_running = true;
+ ret = 0;
+ xhci = ix_data->xhci;
+ udev = ix_data->udev;
+ ep_tx = udev->ep_out[ix_data->ep_out];
+ ep_rx = udev->ep_in[ix_data->ep_in];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+ rx_index = 0;
+ xhci_err(xhci, "[IXIA] start rx threading\n");
+ do{
+ urb_rx = ix_data->urb_rx_list[rx_index];
+ rx_status = urb_rx->status;
+ while(rx_status != URB_STATUS_IDLE){
+ if(rx_status != 0 && rx_status != -EINPROGRESS
+ && rx_status != URB_STATUS_IDLE && rx_status != URB_STATUS_TX){
+ xhci_err(xhci, "[ERROR] urb_rx %d not in valid status - %d\n", i, urb_rx->status);
+ is_running = false;
+ break;
+ }
+ msleep(1);
+ rx_status = urb_rx->status;
+ }
+ xhci_dbg(xhci, "[IXIA] queue rx urb %d\n", rx_index);
+ /* queue free rx urb */
+ /* memset(urb_rx->transfer_buffer, 0, 1514); */
+ urb_rx->actual_length = 0;
+ urb_rx->status = -EINPROGRESS;
+ f_queue_urb(urb_rx,0,udev);
+ /* ***************** */
+ rx_index++;
+ if(rx_index == TOTAL_RX_URB){
+ rx_index = 0;
+ }
+ }while(is_running);
+ xhci_err(xhci, "[ERROR] exit rx urb handler thread\n");
+ return 0;
+}
+
+static int ixia_rx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct ixia_data *ix_data = data;
+ struct ixia_dev *ix_dev = ix_dev_list[ix_data->dev_num - 1];
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int rx_index;
+ char is_running;
+ int rx_status;
+
+ is_running = true;
+ ret = 0;
+ xhci = ix_data->xhci;
+ udev = ix_data->udev;
+ ep_tx = udev->ep_out[ix_data->ep_out];
+ ep_rx = udev->ep_in[ix_data->ep_in];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+ rx_index = 0;
+ xhci_err(xhci, "[IXIA] start rx done threading\n");
+ do{
+ urb_rx = ix_data->urb_rx_list[rx_index];
+ rx_status = urb_rx->status;
+ while(rx_status != 0){
+ if(rx_status != 0 && rx_status != -EINPROGRESS
+ && rx_status != URB_STATUS_IDLE && rx_status != URB_STATUS_TX){
+ xhci_err(xhci, "[ERROR] urb_rx %d not in valid status - %d\n", i, urb_rx->status);
+ is_running = false;
+ break;
+ }
+ msleep(1);
+ rx_status = urb_rx->status;
+ }
+ xhci_dbg(xhci, "[IXIA] rx urb %d success, queue tx urb\n", rx_index);
+ urb_rx->status = URB_STATUS_TX;
+ /* queue tx urb */
+ urb_tx = ix_data->urb_tx_list[rx_index];
+ urb_tx->status = -EINPROGRESS;
+ urb_tx->transfer_buffer = urb_rx->transfer_buffer;
+ urb_tx->transfer_buffer_length = urb_rx->actual_length;
+ urb_tx->transfer_dma = urb_rx->transfer_dma;
+ xhci_dbg(xhci, "[IXIA] tx urb 0x%x\n", urb_tx);
+ f_queue_urb(urb_tx,0,udev);
+ /* ***************** */
+ rx_index++;
+ if(rx_index == TOTAL_RX_URB){
+ rx_index = 0;
+ }
+ }while(is_running);
+ xhci_err(xhci, "[ERROR] exit rx done urb handler thread\n");
+ return 0;
+}
+
+static int ixia_tx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct ixia_data *ix_data = data;
+ struct ixia_dev *ix_dev = ix_dev_list[ix_data->dev_num - 1];
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int rx_index;
+ char is_running;
+ int tx_status;
+
+ is_running = true;
+ ret = 0;
+ xhci = ix_data->xhci;
+ udev = ix_data->udev;
+ ep_tx = udev->ep_out[ix_data->ep_out];
+ ep_rx = udev->ep_in[ix_data->ep_in];
+ ep_index_tx = xhci_get_endpoint_index(&ep_tx->desc);
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+ rx_index = 0;
+ xhci_err(xhci, "[IXIA] start tx done threading\n");
+ do{
+
+ urb_tx = ix_data->urb_tx_list[rx_index];
+ tx_status = urb_tx->status;
+ xhci_dbg(xhci, "[IXIA] check tx urb %d 0x%x\n", rx_index, (unsigned int)urb_tx);
+ while(tx_status != 0){
+ /* xhci_dbg(xhci, "[IXIA] tx_status %d\n", urb_tx->status); */
+ if(tx_status != 0 && tx_status != -EINPROGRESS){
+ xhci_err(xhci, "[ERROR] urb_tx %d not in valid status - %d\n", i, urb_tx->status);
+ is_running = false;
+ break;
+ }
+ msleep(1);
+ tx_status = urb_tx->status;
+ }
+ xhci_dbg(xhci, "[IXIA] tx urb %d done\n", rx_index);
+ /* change rx status */
+ urb_rx = ix_data->urb_rx_list[rx_index];
+ urb_rx->status = URB_STATUS_IDLE;
+ urb_tx->status = -EINPROGRESS;
+ /* ***************** */
+ rx_index++;
+ if(rx_index == TOTAL_RX_URB){
+ rx_index = 0;
+ }
+ }while(is_running);
+ xhci_err(xhci, "[ERROR] exit tx done urb handler thread\n");
+ return 0;
+}
+
+int f_add_ixia_thread(struct xhci_hcd *xhci, int dev_num, struct ixia_dev *ix_dev){
+ int i, ret;
+ struct usb_device *udev;
+ struct ixia_data *ix_data;
+ struct urb *urb_tx, *urb_rx;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+
+ ret =0;
+ xhci_err(xhci, "[IXIA]Start process, devnum %d\n", dev_num);
+ udev = dev_list[dev_num-1];
+ ep_tx = udev->ep_out[ix_dev->ep_out];
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep_tx);
+
+ ix_data = kzalloc(sizeof(struct ixia_data), GFP_KERNEL);
+ ix_data->xhci = xhci;
+ ix_data->dev_num = dev_num;
+ ix_data->udev = udev;
+ ix_data->ep_in = ix_dev->ep_in;
+ ix_data->ep_out = ix_dev->ep_out;
+
+ for(i=0; i<TOTAL_RX_URB; i++){
+ urb_rx = usb_alloc_urb(0, GFP_NOIO);
+ ret = f_fill_urb(urb_rx,ix_data->ep_in,1514,0,URB_DIR_IN, 0, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->status = URB_STATUS_IDLE;
+ ix_data->urb_rx_list[i] = urb_rx;
+ xhci_err(xhci, "[IXIA] URB_RX %d -- 0x%x\n", i, (unsigned int)urb_rx);
+ urb_rx = NULL;
+ }
+ for(i=0; i<TOTAL_RX_URB; i++){
+ urb_tx = usb_alloc_urb(0, GFP_NOIO);
+ ret = f_fill_urb(urb_tx,ix_data->ep_out,1514,0,URB_DIR_OUT, 0, max_esit_payload, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ kfree(urb_tx->transfer_buffer);
+ ix_data->urb_tx_list[i] = urb_tx;
+ xhci_err(xhci, "[IXIA] URB_TX %d -- 0x%x\n", i, (unsigned int)urb_tx);
+ urb_tx = NULL;
+ }
+ kthread_run(ixia_rx_thread, ix_data, "ixiarxt");
+ kthread_run(ixia_rx_done_thread, ix_data, "ixiarxdt");
+ kthread_run(ixia_tx_done_thread, ix_data, "ixiatxdt");
+ return 0;
+}
+
+void SetETHEPConfig(int dev_num, char *buf, struct usb_device *udev){
+ unsigned char bEndCount;
+ int nTxEP = 0;
+ int nRxEP = 0;
+ struct MUSB_ConfigurationDescriptor *pConfDes;
+ struct MUSB_InterfaceDescriptor *pInterDes;
+ struct MUSB_EndpointDescriptor *pEndDes;
+ struct MUSB_DeviceDescriptor *pDevDes;
+ char *pBuf;
+ int i, j, ret;
+ int transfer_type = 0, maxp_tx = 0, maxp_rx = 0;
+ struct ixia_dev *ix_dev;
+
+ pBuf = pConfDes = (struct MUSB_ConfigurationDescriptor *) buf;
+ printk(KERN_ERR "pBuf 0x%x\n", (unsigned int)pBuf);
+ pBuf += 9;/* sizeof(struct MUSB_ConfigurationDescriptor); */
+ printk(KERN_ERR "pBuf 0x%x\n", (unsigned int)pBuf);
+ printk(KERN_ERR "bNumInterfaces %d\n", pConfDes->bNumInterfaces);
+ for (i=0; i<pConfDes->bNumInterfaces; i++) {
+ pInterDes = (struct MUSB_InterfaceDescriptor *) pBuf;
+ pBuf += sizeof(struct MUSB_InterfaceDescriptor);
+ printk(KERN_ERR "pBuf 0x%x\n", (unsigned int)pBuf);
+ bEndCount = pInterDes->bNumEndpoints;
+ printk(KERN_ERR "bNumEndpoints %d\n", pInterDes->bNumEndpoints);
+ for (j=0; j<pInterDes->bNumEndpoints; j++) {
+ pEndDes = (struct MUSB_EndpointDescriptor *) pBuf;
+ unsigned int ep_num = pEndDes->bEndpointAddress & 0xf;
+ USB_DIR dir = (MUSB_DIR_OUT == (pEndDes->bEndpointAddress & 0x80)) ? USB_TX : USB_RX;
+ if(dir == USB_TX){
+ nTxEP = ep_num;
+ maxp_tx = pEndDes->wMaxPacketSize;
+ printk(KERN_ERR "nTxEP %d\n", nTxEP);
+ printk(KERN_ERR "maxp_tx %d\n", maxp_tx);
+ }
+ else if(dir == USB_RX){
+ nRxEP = ep_num;
+ maxp_rx = pEndDes->wMaxPacketSize;
+ printk(KERN_ERR "nRxEP %d\n", nRxEP);
+ printk(KERN_ERR "maxp_rx %d\n", maxp_rx);
+ }
+ transfer_type = pEndDes->bmAttributes & 0x3;
+ printk(KERN_ERR "transfer_type %d\n", transfer_type);
+ pBuf += 7;/* sizeof(struct MUSB_EndpointDescriptor); */
+ printk(KERN_ERR "pBuf 0x%x\n", pBuf);
+ }
+ }
+ ix_dev = kmalloc(sizeof(struct ixia_dev), GFP_NOIO);
+ ix_dev->udev = udev;
+ ix_dev->ep_out = nTxEP;
+ ix_dev->ep_in = nRxEP;
+ ix_dev_list[dev_num-1] = ix_dev;
+ ret = f_config_ep(nTxEP,EPADD_OUT,transfer_type,maxp_tx,0,0,0,udev ,0);
+ if(ret){
+ xhci_err(xhci, "[FAIL] config out endpoint failed\n");
+ }
+ ret = f_config_ep(nRxEP,EPADD_IN,transfer_type,maxp_rx,0,0,0,udev ,1);
+ if(ret){
+ xhci_err(xhci, "[FAIL] config in endpoint failed\n");
+ }
+}
+
+int f_hub_configure_eth_device(int hub_num, int port_num, int dev_num){
+ int nEnumStep, count;
+ struct usb_device *udev, *hdev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ char *ptr;
+ unsigned char eth_enum_write_index;
+ int ret;
+
+ struct ethenumeration_t ethenumeration_step[] = {
+ {"MUSB_REQ_VENDER_1F_1", /* nodata */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_GPIOS),
+ (0x13),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_1F_2", /* nodata */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_GPIOS),
+ (0x01),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_1F_3", /* nodata */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_GPIOS),
+ (0x03),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_10_4", /* nodata */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_RX_CTL),
+ (0x80),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_17_5", /* data in */
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_NODE_ID),
+ (0),
+ (0),
+ (0x06)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_19_6", /* data in */
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_PHY_ID),
+ (0),
+ (0),
+ (0x02)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_06_7", /* no data */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_SW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+#if 1
+ {"MUSB_REQ_VENDER_08_8", /* data out */
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MII_REG),
+ (0x03),
+ (0),
+ (0x02)
+ }
+ },
+#endif
+ {"MUSB_REQ_VENDER_0A_9",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_HW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_06_10",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_SW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+#if 1
+ {"MUSB_REQ_VENDER_08_11",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MII_REG),
+ (0x03),
+ (0x04),
+ (0x02)
+ }
+ },
+#endif
+ {"MUSB_REQ_VENDER_0A_12",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_HW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_06_13",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_SW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_07_14",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_MII_REG),
+ (0x03),
+ (0),
+ (0x02)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_0A_15",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_HW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+ {"MUSB_REQ_VENDER_06_16",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_SW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+#if 1
+ {"MUSB_REQ_VENDER_08_17",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MII_REG),
+ (0x03),
+ (0),
+ (0x02)
+ }
+ },
+#endif
+ {"MUSB_REQ_VENDER_0A_18",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_SET_HW_MII),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+/* ifconfig */
+ {"MUSB_REQ_VENDER_10_19",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_RX_CTL),
+ (0x8c),
+ (0),
+ (0)
+ }
+ },
+#if 1
+ {"MUSB_REQ_VENDER_16_20",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MULTI_FILTER),
+ (0),
+ (0),
+ (0x08)
+ }
+ },
+#endif
+ {"MUSB_REQ_VENDER_10_21",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_RX_CTL),
+ (0x9c),
+ (0),
+ (0)
+ }
+ },
+#if 1
+ {"MUSB_REQ_VENDER_16_22",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MULTI_FILTER),
+ (0),
+ (0),
+ (0x08)
+ }
+ },
+#endif
+ {"MUSB_REQ_VENDER_10_23",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_RX_CTL),
+ (0x9c),
+ (0),
+ (0)
+ }
+ },
+
+/* Write Medium Status */
+ {"MUSB_REQ_VENDOR_1A_24",
+ {(MUSB_DIR_OUT|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_WRITE_MEDIUM_MODE),
+ (0x16),
+ (0),
+ (0)
+ }
+ },
+#if 1
+/* Read Medium Status */
+ {"MUSB_REQ_VENDOR_1A_25",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_MEDIUM_MODE),
+ (0x00),
+ (0),
+ (1000)
+ }
+ },
+#endif
+#if 0
+
+/* Read Operation Mode */
+ {"MUSB_REQ_VENDOR_09_26",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_MII_OPERATION_MODE),
+ (0x00),
+ (0),
+ (0x01)
+ }
+ },
+
+/* Read Rx Control Register */
+ {"MUSB_REQ_VENDOR_0F_27",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_RX_CONTROL_REG),
+ (0x00),
+ (0),
+ (0x02)
+ }
+ },
+
+/* Read IPG/IPG1/IPG2 Register */
+ {"MUSB_REQ_VENDOR_11_28",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_IPG012),
+ (0x00),
+ (0),
+ (0x03)
+ }
+ },
+
+/* Read Multi-Filter Array */
+ {"MUSB_REQ_VENDOR_15_29",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_MULTIFILTER_ARRAY),
+ (0x00),
+ (0),
+ (0x08)
+ }
+ },
+
+/* Read Monitor Mode Status */
+ {"MUSB_REQ_VENDOR_1c_30",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_MONITOR_MODE),
+ (0x00),
+ (0),
+ (0x01)
+ }
+ },
+
+/* Read GPIOs */
+ {"MUSB_REQ_VENDOR_1e_31",
+ {(MUSB_DIR_IN|MUSB_TYPE_VENDOR|MUSB_RECIP_DEVICE),
+ (AX_CMD_READ_GPIOS),
+ (0x00),
+ (0),
+ (0x01)
+ }
+ },
+#endif
+
+ {NULL,
+ {0,
+ (0),
+ (0),
+ (0),
+ (0)
+ }
+ },
+
+};
+ unsigned char eth_enum_write_value[5][8] = { {0, 0x80}, {0xe1, 0x05}, {0, 0x32}, /* 0x08 */
+ {0, 0, 0, 0x80, 0, 0, 0, 0}, {0, 0, 0, 0x80, 0, 0, 0, 0} };
+
+ xhci = hcd_to_xhci(my_hcd);
+ hdev = hdev_list[hub_num-1];
+ udev = dev_list[dev_num-1];
+
+ ret = 0;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr = kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, 2048);
+ dr->bRequestType = MUSB_DIR_IN|MUSB_TYPE_STANDARD|MUSB_RECIP_DEVICE;
+ dr->bRequest = MUSB_REQ_GET_DESCRIPTOR;
+ dr->wValue = MUSB_DT_CONFIG << 8;
+ dr->wIndex = 0;
+ dr->wLength = 0x39;
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+ /* parse tx ep, rx ep */
+
+ if(ret){
+ printk(KERN_ERR "[DEV]Get Device descriptor ctrl request failed!!\n");
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+ SetETHEPConfig(dev_num, ptr,udev);
+
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ f_ctrlrequest(urb, udev);
+ if(urb->status == 0){
+ xhci_dbg(xhci, "set configuration success\n");
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ xhci_err(xhci, "[ERROR] set configuration failed\n");
+ ret = urb->status;
+ kfree(dr);
+ usb_free_urb(urb);
+ return RET_FAIL;
+ }
+
+ nEnumStep = 0;
+ eth_enum_write_index = 0;
+ while(ethenumeration_step[nEnumStep].pDesciptor != NULL){
+ struct MUSB_DeviceRequest *pDevReq = &(ethenumeration_step[nEnumStep].sDevReq);
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ ptr= kmalloc(2048, GFP_NOIO);
+ memset(ptr, 0, 2048);
+
+ switch(pDevReq->bRequest){
+ case AX_CMD_WRITE_MII_REG: /* 0x08 */
+ {
+ *ptr = eth_enum_write_value[eth_enum_write_index][0];
+ *(ptr+1) = eth_enum_write_value[eth_enum_write_index++][1];
+ break;
+ }
+ case AX_CMD_WRITE_MULTI_FILTER: /* 0x16 */
+ {
+ *ptr = eth_enum_write_value[eth_enum_write_index][0];
+ *(ptr+1) = eth_enum_write_value[eth_enum_write_index][1];
+ *(ptr+2) = eth_enum_write_value[eth_enum_write_index][2];
+ *(ptr+3) = eth_enum_write_value[eth_enum_write_index][3];
+ *(ptr+4) = eth_enum_write_value[eth_enum_write_index][4];
+ *(ptr+5) = eth_enum_write_value[eth_enum_write_index][5];
+ *(ptr+6) = eth_enum_write_value[eth_enum_write_index][6];
+ *(ptr+7) = eth_enum_write_value[eth_enum_write_index++][7];
+ break;
+ }
+ default:
+ break;
+ }
+ dr->bRequestType = pDevReq->bmRequestType;
+ dr->bRequest = pDevReq->bRequest;
+ dr->wValue = pDevReq->wValue;
+ dr->wIndex = pDevReq->wIndex;
+ dr->wLength = pDevReq->wLength;
+ urb = alloc_ctrl_urb(dr, ptr, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(ptr);
+ usb_free_urb(urb);
+ if(ret)
+ {
+ printk(KERN_ERR "[DEV]config ep ctrl request failed!!\n");
+ return RET_FAIL;
+ }
+ nEnumStep++;
+ }
+ return ret;
+}
+
+
+
+int f_hub_configuredevice(int hub_num, int port_num, int dev_num
+ , int transfer_type, int maxp, int bInterval, char is_config_ep, char is_stress, int stress_config){
+ u32 status;
+ int ret;
+ struct usb_device *udev, *hdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int slot_state;
+ int count = 0;
+ int count_down = 1000;
+ int speed;
+ int mult,burst;
+ int dev_slot;
+
+ mult = 0;
+ burst = 8;
+ dev_slot = 1;
+ xhci = hcd_to_xhci(my_hcd);
+ hdev = hdev_list[hub_num-1];
+ status = 0;
+ count = count_down;
+ speed = USB_SPEED_HIGH;
+ if(hdev->speed == USB_SPEED_SUPER){
+ speed = USB_SPEED_SUPER;
+ }
+ if(transfer_type == EPATT_INT){
+ mult = 0;
+ burst = 0;
+ }
+ if(transfer_type == EPATT_ISO){
+ mult = 0;
+ burst = 0;
+ dev_slot = 3;
+ }
+ while(!(status & USB_PORT_STAT_CONNECTION) && count > 0){
+ ret = f_hub_getPortStatus(hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERRROR] Wait port connection status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Got port connection status\n");
+#if 0
+ if(speed == USB_SPEED_SUPER){
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_C_PORT_CONNECTION, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Clear port reset change failed\n");
+ return RET_FAIL;
+ }
+ }
+#endif
+ if(f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set port reset failed\n");
+ return RET_FAIL;
+ }
+ /*
+ if(f_dev_setportfeature(hub_num, 1, 0) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Set dev port reset failed\n");
+ return RET_FAIL;
+ }
+ */
+ status = 0;
+ count = count_down;
+ while(!((status>>16) & USB_PORT_STAT_C_RESET) && count > 0){
+ ret = f_hub_getPortStatus(hub_num, port_num, &status);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] Get status failed\n");
+ return RET_FAIL;
+ }
+ count--;
+ }
+ if(count == 0){
+ xhci_err(xhci, "[ERROR] Wait port reset change status timeout\n");
+ return RET_FAIL;
+ }
+ xhci_dbg(xhci, "Port reset done\n");
+
+ /* FIXME: check superspeed */
+
+ if(speed != USB_SPEED_SUPER){
+ if(status & USB_PORT_STAT_HIGH_SPEED){
+ speed = USB_SPEED_HIGH;
+ }
+ else if(status & USB_PORT_STAT_LOW_SPEED){
+ speed = USB_SPEED_LOW;
+ }
+ else{
+ speed = USB_SPEED_FULL;
+ }
+ }
+
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_C_PORT_RESET, port_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear port reset change failed\n");
+ return RET_FAIL;
+ }
+ /* new usb device */
+ udev = hdev->children[port_num-1];
+ udev = mtk_usb_alloc_dev(hdev, hdev->bus, port_num);
+ udev->level = hdev->level + 1;
+ udev->speed = speed;
+ hdev->children[port_num-1] = udev;
+ /* need to add tt handler */
+ if (hdev->tt) {
+ udev->tt = hdev->tt;
+ udev->ttport = hdev->ttport;
+ }
+ else if(udev->speed != USB_SPEED_HIGH
+ && hdev->speed == USB_SPEED_HIGH) {
+ udev->tt = kzalloc(sizeof(struct usb_tt), GFP_KERNEL);
+ udev->tt->hub = hdev;
+ udev->tt->multi = false;
+ udev->tt->think_time = 0;
+ udev->ttport = port_num;
+
+ }
+ dev_list[dev_num-1] = udev;
+ print_speed(udev->speed);
+ /* enable slot */
+ ret = f_enable_slot(udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ /* address device */
+ ret = f_address_slot(false, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ if(is_config_ep){
+ /* return f_loopback_config_ep(1,2,transfer_type, maxp, bInterval, udev); */
+ ret = dev_config_ep(1, USB_RX, transfer_type, maxp, bInterval,dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(1, USB_TX, transfer_type, maxp, bInterval,dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ ret = f_config_ep(1, EPADD_OUT, transfer_type, maxp, bInterval,burst,mult, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_IN, transfer_type, maxp, bInterval,burst,mult, udev, 1);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ }
+ else if(is_stress){
+ if(speed == USB_SPEED_SUPER){
+ if(stress_config == 1){/* BULK+INT */
+ ret = dev_config_ep(1, USB_RX, EPATT_BULK, 1024, 1,1,8,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(1, USB_TX, EPATT_BULK, 1024, 1, 1, 8,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_OUT, EPATT_BULK, 1024, 0,8,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_IN, EPATT_BULK, 1024, 0,8,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_RX, EPATT_INT, 1024, 1,1,0,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_TX, EPATT_INT, 1024, 1, 1, 0,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_OUT, EPATT_INT, 1024, 1,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_IN, EPATT_INT, 1024, 1,0,0, udev, 1);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ }
+ else if(stress_config == 2){/* BULK+ISO */
+ ret = dev_config_ep(1, USB_RX, EPATT_BULK, 1024, 0,1,8,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(1, USB_TX, EPATT_BULK, 1024, 0, 1, 8,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_OUT, EPATT_BULK, 1024, 0,8,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_IN, EPATT_BULK, 1024, 0,8,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_RX, EPATT_ISO, 1024, 4,3,0,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_TX, EPATT_ISO, 1024, 4, 3, 0,0, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_OUT, EPATT_ISO, 1024, 4,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_IN, EPATT_ISO, 1024, 4,0,0, udev, 1);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ f_ring_enlarge(EPADD_OUT, 2, dev_num);
+ f_ring_enlarge(EPADD_OUT, 2, dev_num);
+ f_ring_enlarge(EPADD_IN, 2, dev_num);
+ f_ring_enlarge(EPADD_IN, 2, dev_num);
+ }
+ }
+ else if(speed == USB_SPEED_HIGH){
+ /* return f_loopback_config_ep(1,2,transfer_type, maxp, bInterval, udev); */
+ ret = dev_config_ep(1, USB_RX, EPATT_BULK, 512, 0,dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(1, USB_TX, EPATT_BULK, 512, 0, dev_slot, burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#if 1
+ ret = dev_config_ep(2, USB_RX, EPATT_INT, 1024, 1, dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_TX, EPATT_INT, 1024, 1, dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#endif
+ ret = f_config_ep(1, EPADD_OUT, EPATT_BULK, 512, 0,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_IN, EPATT_BULK, 512, 0,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#if 1
+ ret = f_config_ep(2, EPADD_OUT, EPATT_INT, 1024, 1,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_IN, EPATT_INT, 1024, 1,0,0, udev, 1);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#endif
+ }
+ else if(speed == USB_SPEED_FULL){
+ ret = dev_config_ep(1, USB_RX, EPATT_BULK, 64, 0,dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(1, USB_TX, EPATT_BULK, 64, 0, dev_slot, burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#if 1
+ ret = dev_config_ep(2, USB_RX, EPATT_INT, 64, 1, dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = dev_config_ep(2, USB_TX, EPATT_INT, 64, 1, dev_slot,burst,mult, udev);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#endif
+ ret = f_config_ep(1, EPADD_OUT, EPATT_BULK, 64, 0,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(1, EPADD_IN, EPATT_BULK, 64, 0,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#if 1
+ ret = f_config_ep(2, EPADD_OUT, EPATT_INT, 64, 1,0,0, udev, 0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_config_ep(2, EPADD_IN, EPATT_INT, 64, 1,0,0, udev, 1);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+#endif
+ }
+
+ }
+ return RET_SUCCESS;
+
+}
+
+void f_hub_alloc_urb(struct urb *urb, void *buffer, int transfer_type, int data_length, int ep_num
+ , int dir, int interval, struct usb_host_endpoint *ep, int dev_index){
+ struct usb_device *udev, *rhdev;
+ struct xhci_hcd *xhci;
+ int ret;
+ int max_data_length = MAX_DATA_LENGTH;
+
+ xhci = hcd_to_xhci(my_hcd);
+ udev = dev_list[dev_index];
+
+ if(transfer_type == EPATT_BULK && dir == URB_DIR_OUT){
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, ep_num), buffer, max_data_length, NULL, NULL);
+ }
+ else if(transfer_type == EPATT_INT && dir == URB_DIR_OUT ){
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, ep_num), buffer, max_data_length, NULL, NULL, interval);
+ }
+ else if(transfer_type == EPATT_BULK && dir == URB_DIR_IN){
+ usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_num), buffer, max_data_length, NULL, NULL);
+ }
+ else if(transfer_type == EPATT_INT && dir == URB_DIR_IN){
+ usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, ep_num), buffer, max_data_length, NULL, NULL, interval);
+ }
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags |= dir | URB_ZERO_PACKET;
+ urb->ep = ep;
+ urb->num_sgs = 0;
+
+ ret = mtk_map_urb_for_dma(my_hcd, urb, GFP_KERNEL);
+ urb->transfer_buffer_length = data_length;
+ xhci_dbg(xhci, "urb 0x%x\n", (unsigned int)urb);
+ xhci_dbg(xhci, "urb transfer buffer 0x%x\n", (unsigned int)urb->transfer_buffer);
+}
+
+#define TRANSFER_MAX_LENGTH 16*1024-1
+#define MORE_TRANSFER_TIMES 100
+
+struct transfer_data {
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ int ep_num;
+ int dir;
+ int stop_count;
+ int cur_stop_count;
+ int transfer_length;
+ volatile char is_running;
+ volatile char is_correct;
+};
+
+static int transfer_thread(void *data){
+ int ret;
+ struct transfer_data *t_data = data;
+ struct device *dev;
+ struct usb_device *udev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int max_esit_payload;
+ char is_going;
+ u32 length;
+ int transfer_times = 0;
+ int more_transfer_times = MORE_TRANSFER_TIMES;
+
+ ret =0;
+ xhci = t_data->xhci;
+ dev = xhci_to_hcd(xhci)->self.controller;
+ udev = t_data->udev;
+ if(t_data->dir == URB_DIR_OUT){
+ ep = udev->ep_out[t_data->ep_num];
+ }
+ else{
+ ep = udev->ep_in[t_data->ep_num];
+ }
+ max_esit_payload = mtk_xhci_get_max_esit_payload(xhci, udev, ep);
+ length = t_data->transfer_length;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ ret = f_fill_urb(urb,t_data->ep_num,length,0,t_data->dir, 0, max_esit_payload, udev);
+ xhci_err(xhci, "Start random stop transfer thread, cur_stop_count %d, stop_count %d\n"
+ , t_data->cur_stop_count, t_data->stop_count);
+ is_going = true;
+ do{
+ /* queue urb */
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_buffer_length = length;
+ ret = f_queue_urb(urb,1, udev);
+ if(ret){
+ t_data->cur_stop_count++;
+
+
+ xhci_err(xhci, "Transfer of ep %d, dir %x t_data->cur_stop_count :%d stop_count :%d \n"
+ , t_data->ep_num, t_data->dir,t_data->cur_stop_count ,t_data->stop_count);
+
+
+ if(t_data->cur_stop_count > t_data->stop_count){
+ /* error */
+ xhci_err(xhci, "Transfer of ep %d, dir %d fail occurred more than set %d times\n"
+ , t_data->ep_num, t_data->dir, t_data->stop_count);
+ t_data->is_correct = false;
+ is_going = false;
+ xhci_err(xhci, "[FAIL]\n");
+ }
+ /* must delay some time */
+ dev_polling_stop_status(udev);
+ }
+ else{
+ if(t_data->cur_stop_count == t_data->stop_count){
+ transfer_times++;
+ xhci_err(xhci, "transfer_times++ %d\n", transfer_times);
+ if(transfer_times >= more_transfer_times){
+ is_going = false;
+ t_data->is_correct = true;
+ xhci_err(xhci, "[PASS]\n");
+ }
+ }
+ }
+ }while(is_going && !g_stopped);
+ length = TRANSFER_MAX_LENGTH;
+ f_free_urb(urb,length,0);
+ xhci_err(xhci, "[INFO]Exit transfer thread, ep_num %d, dir %d\n", t_data->ep_num, t_data->dir);
+ t_data->is_running = false;
+ return ret;
+}
+
+int f_random_stop(int ep_1_num, int ep_2_num, int stop_count_1, int stop_count_2, int urb_dir_1, int urb_dir_2, int length){
+ struct transfer_data *t_data_1, *t_data_2;
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ int ret;
+ ret = 0;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ t_data_1 = kzalloc(sizeof(struct transfer_data), GFP_KERNEL);
+ t_data_1->xhci = xhci;
+ t_data_1->udev = udev;
+ t_data_1->ep_num = ep_1_num;
+ t_data_1->dir = urb_dir_1;
+ t_data_1->cur_stop_count = 0;
+ t_data_1->stop_count = stop_count_1;
+ t_data_1->transfer_length = length;
+ t_data_1->is_running = true;
+ t_data_1->is_correct = true;
+
+ t_data_2 = kzalloc(sizeof(struct transfer_data), GFP_KERNEL);
+ t_data_2->xhci = xhci;
+ t_data_2->udev = udev;
+ t_data_2->ep_num = ep_2_num;
+ t_data_2->dir = urb_dir_2;
+ t_data_2->cur_stop_count = 0;
+ t_data_2->stop_count = stop_count_2;
+ t_data_2->transfer_length = length;
+ t_data_2->is_running = true;
+ t_data_2->is_correct = true;
+ g_stopped = false;
+ kthread_run(transfer_thread, t_data_1, "transfer_1_t");
+ kthread_run(transfer_thread, t_data_2, "transfer_2_t");
+#if 0
+ while(t_data_1->is_running || t_data_2->is_running){
+ msleep(10);
+ }
+ if(!t_data_1->is_correct || !t_data_2->is_correct){
+ return RET_FAIL;
+ }
+#endif
+ return RET_SUCCESS;
+}
+
+struct stress_data{
+ int dev_num;
+ int ep_num;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ struct urb *urb_rx_list[TOTAL_URB];
+ struct urb *urb_tx_list[TOTAL_URB];
+ int loop_count[TOTAL_URB];
+ char *buffer[TOTAL_URB];
+ char isCompare;
+ int max_esit_payload;
+ int max_buffer_len[TOTAL_URB];
+};
+
+static int stress_ep0_thread(void *data){
+ int ret;
+ struct stress_data *str_data = data;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev;
+ struct device *dev;
+ char is_running;
+ int length;
+ char *ptr1,*ptr2;
+ struct usb_ctrlrequest *dr1, *dr2;
+ struct urb *urb1, *urb2;
+ int count, count_boundary;
+ dma_addr_t mapping;
+ int i;
+
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+ is_running = true;
+#if 0
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+ is_running = true;
+ length = 100;
+
+ ptr1= kmalloc(2048, GFP_NOIO);
+ get_random_bytes(ptr1, 2048);
+
+ ptr2= kzalloc(2048, GFP_NOIO);
+ memcpy(ptr2, ptr1, 2048);
+
+ dr1 = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr1->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr1->bRequest = AT_CTRL_TEST;
+ dr1->wValue = cpu_to_le16(0);
+ dr1->wIndex = cpu_to_le16(0);
+ dr1->wLength = cpu_to_le16(2048);
+
+ dr2 = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr2->bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr2->bRequest = AT_CTRL_TEST;
+ dr2->wValue = cpu_to_le16(0);
+ dr2->wIndex = cpu_to_le16(0);
+ dr2->wLength = cpu_to_le16(2048);
+
+ urb1 = usb_alloc_urb(0, GFP_NOIO);
+ usb_fill_control_urb(urb1, udev, usb_sndctrlpipe(udev, 0), dr1, ptr1,
+ dr1->wLength, NULL, NULL);
+ urb1->ep = &udev->ep0;
+ mapping = dma_map_single(dev, ptr1, 2048, DMA_BIDIRECTIONAL);
+ urb1->transfer_dma = mapping;
+ dma_sync_single_for_device(dev, mapping, 2048, DMA_BIDIRECTIONAL);
+
+ urb2 = usb_alloc_urb(0, GFP_NOIO);
+ usb_fill_control_urb(urb2, udev, usb_rcvctrlpipe(udev, 0), dr2, ptr2,
+ dr2->wLength, NULL, NULL);
+ urb2->ep = &udev->ep0;
+ mapping = dma_map_single(dev, ptr2, 2048, DMA_BIDIRECTIONAL);
+ urb2->transfer_dma = mapping;
+ dma_sync_single_for_device(dev, mapping, 2048, DMA_BIDIRECTIONAL);
+#endif
+ count = 0;
+ count_boundary = 100;
+ do{
+ msleep(10);
+ dev_polling_status(udev);
+#if 0
+ length = ((get_random_int()%2048) + 1);
+ /* ret=dev_ctrl_loopback(length,udev); */
+ urb1->status = -EINPROGRESS;
+ urb1->actual_length = 0;
+ urb1->transfer_buffer_length = length;
+ ret = f_ctrlrequest(urb1, udev);
+ if(ret != RET_SUCCESS){
+ g_correct = false;
+ is_running = false;
+ }
+
+ urb2->status = -EINPROGRESS;
+ urb2->actual_length = 0;
+ urb2->transfer_buffer_length = length;
+ ret = f_ctrlrequest(urb2, udev);
+ if(ret != RET_SUCCESS){
+ g_correct = false;
+ is_running = false;
+ }
+ count++;
+ if(count >= count_boundary){
+ dma_sync_single_for_device(dev, urb2->transfer_dma, 2048, DMA_BIDIRECTIONAL);
+ for(i=0; i<2048; i++){
+ if((*(ptr1+i)) != (*(ptr2+i))){
+ xhci_err(xhci, "[ERROR] buffer %d not match, tx 0x%x, rx 0x%x\n", i, *(ptr1+i), *(ptr2+i));
+ return RET_FAIL;
+ }
+ }
+ count = 0;
+ }
+#endif
+
+ }while(is_running && g_correct);
+ xhci_err(xhci, "[ERROR] exit ep0 stress thread, dev_num %d\n"
+ , str_data->dev_num);
+ return 0;
+}
+
+static int stress_tx_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int tx_status;
+ char is_running;
+
+ is_running = true;
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+
+ tx_index = 0;
+ xhci_err(xhci, "[STRESS] start tx threading, dev_num %d, ep_num %d\n", str_data->dev_num, str_data->ep_num);
+ do{
+ urb_tx = str_data->urb_tx_list[tx_index];
+ tx_status = urb_tx->status;
+ while(tx_status != URB_STATUS_IDLE){
+ if(tx_status != 0 && tx_status != -EINPROGRESS
+ && tx_status != URB_STATUS_IDLE && tx_status != URB_STATUS_RX){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_tx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_tx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ tx_status = urb_tx->status;
+ }
+ xhci_dbg(xhci, "[STRESS] queue tx urb %d, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+ urb_tx->actual_length = 0;
+ urb_tx->status = -EINPROGRESS;
+ if(urb_tx->number_of_packets > 0){
+ for(i=0; i<urb_tx->number_of_packets; i++){
+ urb_tx->iso_frame_desc[i].actual_length = 0;
+ }
+ }
+ while(g_stopping_ep){}
+ f_queue_urb(urb_tx, 0, udev);
+
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ tx_index = 0;
+ }
+ }while(is_running && g_correct);
+ xhci_err(xhci, "[ERROR] exit tx urb handler thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+static int stress_tx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int tx_status;
+ char is_running;
+ is_running = true;
+
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+ tx_index = 0;
+ xhci_err(xhci, "[STRESS] start tx done threading, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ do{
+ urb_tx = str_data->urb_tx_list[tx_index];
+ tx_status = urb_tx->status;
+ while(tx_status != 0){
+ if(tx_status != 0 && tx_status != -EINPROGRESS
+ && tx_status != URB_STATUS_IDLE && tx_status != URB_STATUS_RX){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_tx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_tx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ tx_status = urb_tx->status;
+ }
+ ep_rx = urb_tx->ep;
+ if(usb_endpoint_xfer_isoc(&ep_rx->desc)){
+ msleep(1500);
+ }
+ /* queue rx */
+ xhci_dbg(xhci, "[STRESS] tx urb %d success, queue rx urb, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+ urb_tx->status = URB_STATUS_RX;
+ urb_rx = str_data->urb_rx_list[tx_index];
+ urb_rx->status = -EINPROGRESS;
+ urb_rx->transfer_buffer_length = urb_tx->actual_length;
+ urb_rx->actual_length = 0;
+ if(urb_rx->number_of_packets > 0){
+ for(i=0; i<urb_rx->number_of_packets; i++){
+ urb_rx->iso_frame_desc[i].actual_length = 0;
+ }
+ }
+ while(g_stopping_ep){}
+ f_queue_urb(urb_rx, 0, udev);
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ tx_index = 0;
+ }
+ } while(is_running && g_correct);
+ xhci_err(xhci, "[STRESS][ERROR] exit tx urb done thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+static stress_rx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int rx_status;
+ char is_running;
+ int data_len;
+ int count;
+#if 0
+ u32 __iomem *addr;
+ int temp;
+#endif
+ is_running = true;
+ char *tmp1, *tmp2;
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+ tx_index = 0;
+ count = 0;
+ xhci_err(xhci, "[STRESS] start rx done threading, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ do{
+ urb_rx = str_data->urb_rx_list[tx_index];
+ rx_status = urb_rx->status;
+ xhci_dbg(xhci, "[STRESS] check rx urb %d 0x%x, dev %d, ep %d\n"
+ , tx_index, (unsigned int)urb_rx, str_data->dev_num, str_data->ep_num);
+ while(rx_status != 0){
+ if(rx_status != 0 && rx_status != -EINPROGRESS){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_rx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_rx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ rx_status = urb_rx->status;
+ }
+ /* update urb_rx status to IDLE */
+ /* update urb_rx status to INPROGRESS */
+ xhci_dbg(xhci, "[STRESS] rx urb %d done, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+
+ if(str_data->isCompare){
+ str_data->loop_count[tx_index]++;
+ if(((str_data->ep_num==1) && (str_data->loop_count[tx_index] == 300))
+ || ((str_data->ep_num==2) && (str_data->loop_count[tx_index] == 300))){
+ data_len = GPD_LENGTH;
+ /* xhci_err(xhci, "[STRESS] comparing dev %d ep %d buffer %d\n", str_data->dev_num, str_data->ep_num, tx_index); */
+ /* compare buffer data */
+ for(i=0; i<data_len; i++){
+ tmp1 = urb_rx->transfer_buffer+i;
+ tmp2 = str_data->buffer[tx_index]+i;
+ if((*tmp1) != (*tmp2)){
+#if 0
+ /* generate LGO_U1 */
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*(0 & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = temp & (~(0x000000ff));
+ temp = temp | 1;
+ xhci_writel(xhci, temp, addr);
+ msleep(5);
+ temp = 0;
+ xhci_writel(xhci, temp, addr);
+#endif
+ xhci_err(xhci, "[STRESS][ERROR] buffer %d not match, rx 0x%x, buffer 0x%x, dev %d, ep %d\n"
+ , i, *tmp1, *tmp2, str_data->dev_num, str_data->ep_num);
+
+ is_running = false;
+/* while(1); */
+ g_correct = false;
+ break;
+
+
+ }
+ }
+ /* xhci_err(xhci, "[STRESS] comparing buffer dev %d ep %d %d done\n", str_data->dev_num, str_data->ep_num, tx_index); */
+ /* reset loop_count */
+ str_data->loop_count[tx_index]=0;
+ }
+ }
+
+ urb_tx = str_data->urb_tx_list[tx_index];
+ urb_tx->status = URB_STATUS_IDLE;
+ urb_rx->status = -EINPROGRESS;
+
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ count++;
+ if(count == 100){
+ xhci_err(xhci, "[STRESS] stress is running, dev %d ep %d\n",str_data->dev_num, str_data->ep_num);
+ count = 0;
+ }
+ tx_index = 0;
+ }
+ }while(is_running && g_correct);
+ xhci_err(xhci, "[STRESS][ERROR] exit rx urb done thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+static int stress_rdn_len_tx_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ struct urb_priv *urb_priv;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int tx_status;
+ char is_running;
+ int this_len;
+ int size;
+
+ is_running = true;
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+
+ tx_index = 0;
+ xhci_err(xhci, "[STRESS] start tx threading, dev_num %d, ep_num %d\n", str_data->dev_num, str_data->ep_num);
+ do{
+ urb_tx = str_data->urb_tx_list[tx_index];
+ tx_status = urb_tx->status;
+ while(tx_status != URB_STATUS_IDLE){
+ if(tx_status != 0 && tx_status != -EINPROGRESS
+ && tx_status != URB_STATUS_IDLE && tx_status != URB_STATUS_RX){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_tx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_tx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ tx_status = urb_tx->status;
+ }
+ xhci_dbg(xhci, "[STRESS] queue tx urb %d, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+ urb_tx->actual_length = 0;
+ urb_tx->status = -EINPROGRESS;
+
+ this_len = (get_random_int()%(GPD_LENGTH_RDN-512)) + 1;
+ if((this_len % str_data->max_esit_payload) == 0){
+ this_len++;
+ }
+ if(urb_tx->number_of_packets > 0){
+ max_esit_payload = str_data->max_esit_payload;
+ urb_tx->number_of_packets = ((this_len+max_esit_payload)/max_esit_payload);
+ for(i=0; i<urb_tx->number_of_packets; i++){
+ urb_tx->iso_frame_desc[i].actual_length = 0;
+ if(i == urb_tx->number_of_packets-1){
+ urb_tx->iso_frame_desc[i].length = (this_len-(i*max_esit_payload));
+ }
+ else{
+ urb_tx->iso_frame_desc[i].length = max_esit_payload;
+ }
+ }
+ if(urb_tx->hcpriv){
+ xhci_urb_free_priv(xhci, urb_tx->hcpriv);
+ }
+ size = urb_tx->number_of_packets;
+ urb_priv = kmalloc(sizeof(struct urb_priv) + size * sizeof(struct xhci_td *), GFP_KERNEL);
+ if (!urb_priv){
+ xhci_err(xhci, "[ERROR] allocate urb_priv failed\n");
+ return RET_FAIL;
+ }
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kmalloc(sizeof(struct xhci_td), GFP_KERNEL);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+ }
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb_tx->hcpriv = urb_priv;
+ }
+
+ while(g_stopping_ep){}
+ urb_tx->transfer_buffer_length = this_len;
+ if(this_len > str_data->max_buffer_len[tx_index]){
+ str_data->max_buffer_len[tx_index] = this_len;
+ }
+ f_queue_urb(urb_tx, 0, udev);
+
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ tx_index = 0;
+ }
+ }while(is_running && g_correct);
+ xhci_err(xhci, "[ERROR] exit tx urb handler thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+static int stress_rdn_len_tx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ struct urb_priv *urb_priv;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int tx_status;
+ char is_running;
+ is_running = true;
+ int size;
+
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+ tx_index = 0;
+
+ xhci_err(xhci, "[STRESS] start tx done threading, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ do{
+ urb_tx = str_data->urb_tx_list[tx_index];
+ tx_status = urb_tx->status;
+ while(tx_status != 0 && g_correct){
+ if(tx_status != 0 && tx_status != -EINPROGRESS
+ && tx_status != URB_STATUS_IDLE && tx_status != URB_STATUS_RX){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_tx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_tx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ tx_status = urb_tx->status;
+ }
+ ep_rx = urb_tx->ep;
+ if(usb_endpoint_xfer_isoc(&ep_rx->desc)){
+ msleep(1500);
+ }
+ /* queue rx */
+ xhci_dbg(xhci, "[STRESS] tx urb %d success, queue rx urb, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+ urb_tx->status = URB_STATUS_RX;
+ urb_rx = str_data->urb_rx_list[tx_index];
+ urb_rx->status = -EINPROGRESS;
+ urb_rx->transfer_buffer_length = urb_tx->actual_length;
+ urb_rx->actual_length = 0;
+ if(urb_rx->number_of_packets > 0){
+ max_esit_payload = str_data->max_esit_payload;
+ urb_rx->number_of_packets = ((urb_tx->actual_length+max_esit_payload)/max_esit_payload);
+ for(i=0; i<urb_rx->number_of_packets; i++){
+ urb_rx->iso_frame_desc[i].actual_length = 0;
+ }
+ if(urb_rx->hcpriv){
+ xhci_urb_free_priv(xhci, urb_rx->hcpriv);
+ }
+ size = urb_rx->number_of_packets;
+ urb_priv = kmalloc(sizeof(struct urb_priv) + size * sizeof(struct xhci_td *), GFP_KERNEL);
+ if (!urb_priv){
+ xhci_err(xhci, "[ERROR] allocate urb_priv failed\n");
+ return RET_FAIL;
+ }
+ for (i = 0; i < size; i++) {
+ urb_priv->td[i] = kmalloc(sizeof(struct xhci_td), GFP_KERNEL);
+ if (!urb_priv->td[i]) {
+ urb_priv->length = i;
+ xhci_urb_free_priv(xhci, urb_priv);
+ return RET_FAIL;
+ }
+ }
+ urb_priv->length = size;
+ urb_priv->td_cnt = 0;
+ urb_rx->hcpriv = urb_priv;
+ }
+ while(g_stopping_ep){}
+ f_queue_urb(urb_rx, 0, udev);
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ tx_index = 0;
+ }
+ } while(is_running && g_correct);
+ xhci_err(xhci, "[STRESS][ERROR] exit tx urb done thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+static stress_rdn_len_rx_done_thread(void *data){
+ int i, ret;
+ struct urb *urb_tx, *urb_rx;
+ struct stress_data *str_data = data;
+ struct usb_device *udev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int ep_index_tx, ep_index_rx;
+ int max_esit_payload;
+ int tx_index;
+ int rx_status;
+ char is_running;
+ int data_len;
+ int count;
+
+
+ is_running = true;
+ char *tmp1, *tmp2;
+ xhci = str_data->xhci;
+ udev = str_data->udev;
+ tx_index = 0;
+ count = 0;
+ xhci_err(xhci, "[STRESS] start rx done threading, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ do{
+ urb_rx = str_data->urb_rx_list[tx_index];
+ rx_status = urb_rx->status;
+ xhci_dbg(xhci, "[STRESS] check rx urb %d 0x%x, dev %d, ep %d\n"
+ , tx_index, (unsigned int)urb_rx, str_data->dev_num, str_data->ep_num);
+ while(rx_status != 0 && g_correct){
+ if(rx_status != 0 && rx_status != -EINPROGRESS){
+ xhci_err(xhci, "[STRESS][ERROR] dev %d, ep %d, urb_rx %d not in valid status - %d\n"
+ , str_data->dev_num, str_data->ep_num, tx_index, urb_rx->status);
+ is_running = false;
+ g_correct = false;
+ break;
+ }
+ msleep(1);
+ rx_status = urb_rx->status;
+ }
+ /* update urb_rx status to IDLE */
+ /* update urb_rx status to INPROGRESS */
+ xhci_dbg(xhci, "[STRESS] rx urb %d done, dev %d, ep %d\n"
+ , tx_index, str_data->dev_num, str_data->ep_num);
+
+ if(str_data->isCompare){
+ str_data->loop_count[tx_index]++;
+ if(((str_data->ep_num==1) && (str_data->loop_count[tx_index] == 300))
+ || ((str_data->ep_num==2) && (str_data->loop_count[tx_index] == 300))){
+ data_len = str_data->max_buffer_len[tx_index];
+ /* xhci_err(xhci, "[STRESS] comparing dev %d ep %d buffer %d\n", str_data->dev_num, str_data->ep_num, tx_index); */
+ /* compare buffer data */
+ for(i=0; i<data_len; i++){
+ tmp1 = urb_rx->transfer_buffer+i;
+ tmp2 = str_data->buffer[tx_index]+i;
+ if((*tmp1) != (*tmp2)){
+ xhci_err(xhci, "[STRESS][ERROR] buffer %d not match, rx 0x%x, buffer 0x%x, dev %d, ep %d\n"
+ , i, *tmp1, *tmp2, str_data->dev_num, str_data->ep_num);
+ is_running = false;
+
+ g_correct = false;
+ break;
+ }
+ }
+ /* xhci_err(xhci, "[STRESS] comparing buffer dev %d ep %d %d done\n", str_data->dev_num, str_data->ep_num, tx_index); */
+ /* reset loop_count */
+ str_data->loop_count[tx_index]=0;
+ }
+ }
+
+ urb_tx = str_data->urb_tx_list[tx_index];
+ urb_tx->status = URB_STATUS_IDLE;
+ urb_rx->status = -EINPROGRESS;
+
+ tx_index++;
+ if(tx_index == TOTAL_URB){
+ count++;
+ if(count == 100){
+ xhci_err(xhci, "[STRESS] stress is running, dev %d ep %d\n",str_data->dev_num, str_data->ep_num);
+ count = 0;
+ }
+ tx_index = 0;
+ }
+ }while(is_running && g_correct);
+ xhci_err(xhci, "[STRESS][ERROR] exit rx urb done thread, dev_num %d, ep_num %d\n"
+ , str_data->dev_num, str_data->ep_num);
+ kfree(str_data);
+ return 0;
+}
+
+
+int f_add_rdn_len_str_threads(int dev_num, int ep_num, int maxp, char isCompare, struct usb_device *usbdev, char isEP0){
+ int ret,i,j;
+ struct device *dev;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct stress_data *str_data;
+ int data_len;
+ dma_addr_t mapping;
+ int iso_packet_num;
+ struct usb_host_endpoint *ep;
+ u8 *tmp;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+
+ ep = udev->ep_out[ep_num];
+
+ if(usb_endpoint_xfer_isoc(&ep->desc)){
+ iso_packet_num = ((GPD_LENGTH_RDN+maxp)/maxp);
+ }
+ else{
+ iso_packet_num = 0;
+ }
+
+/* xhci_dbg(xhci, "[STRESS DBG]dev slot id %d\n", udev->slot_id); */
+ xhci_err(xhci, "[STRESS]Start stress process, dev_num %d, ep_num %d\n", dev_num, ep_num);
+ str_data = kzalloc(sizeof(struct stress_data), GFP_KERNEL);
+ str_data->xhci = xhci;
+ str_data->udev = udev;
+ str_data->dev_num = dev_num;
+ str_data->ep_num = ep_num;
+ str_data->isCompare = isCompare;
+ str_data->max_esit_payload = maxp;
+ xhci_err(xhci, "[STRESS] str_data address 0x%x\n", (unsigned int)str_data);
+
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->loop_count[i] = 0;
+ str_data->max_buffer_len[i] = 0;
+ }
+
+ data_len = GPD_LENGTH_RDN;
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->buffer[i] = kmalloc(data_len, GFP_KERNEL);
+ get_random_bytes(str_data->buffer[i], data_len);
+#if 0
+ for(j=0; j<data_len; j++){
+ tmp = str_data->buffer[i]+j;
+ if((j%1024)==0){
+ *tmp = (u8)i&0xff;
+ }
+ else if(j%1024==1){
+ *tmp = (u8)(j/1024)&0xff;
+ }
+ else{
+ *tmp = (u8)((j+i)&0xff);
+ }
+ }
+#endif
+ }
+/* xhci_err(xhci, "iso_packet_num %d\n", iso_packet_num); */
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->urb_tx_list[i] = usb_alloc_urb(iso_packet_num, GFP_NOIO);
+ ret = f_fill_urb(str_data->urb_tx_list[i], ep_num
+ , data_len, 0, EPADD_OUT, iso_packet_num, maxp, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ memcpy(str_data->urb_tx_list[i]->transfer_buffer, str_data->buffer[i], data_len);
+ dma_sync_single_for_device(dev, str_data->urb_tx_list[i]->transfer_dma
+ , data_len, DMA_BIDIRECTIONAL);
+ str_data->urb_tx_list[i]->status = URB_STATUS_IDLE;
+/* xhci_err(xhci, "[STRESS] URB_TX %d -- 0x%x\n", i, str_data->urb_tx_list[i]); */
+ }
+
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->urb_rx_list[i] = usb_alloc_urb(iso_packet_num, GFP_NOIO);
+ ret = f_fill_urb_with_buffer(str_data->urb_rx_list[i]
+ ,ep_num, data_len, str_data->urb_tx_list[i]->transfer_buffer
+ , 0, EPADD_IN, iso_packet_num, maxp, str_data->urb_tx_list[i]->transfer_dma, udev);
+ str_data->urb_rx_list[i]->status = -EINPROGRESS;
+/* xhci_err(xhci, "[STRESS] URB_RX %d -- 0x%x\n", i, str_data->urb_rx_list[i]); */
+ }
+ kthread_run(stress_rdn_len_tx_thread, str_data, "stresstxt");
+ kthread_run(stress_rdn_len_tx_done_thread, str_data, "stresstxdt");
+ kthread_run(stress_rdn_len_rx_done_thread, str_data, "stress_rxdt");
+ if(isEP0){
+ kthread_run(stress_ep0_thread, str_data, "stress_ep0");
+ }
+ return 0;
+}
+
+int f_add_str_threads(int dev_num, int ep_num, int maxp, char isCompare, struct usb_device *usbdev, char isEP0){
+ int ret,i,j;
+ struct device *dev;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct stress_data *str_data;
+ int data_len;
+ dma_addr_t mapping;
+ int iso_packet_num;
+ struct usb_host_endpoint *ep;
+ u8 *tmp;
+
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;
+ if(!usbdev){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ }
+ else{
+ udev = usbdev;
+ }
+
+ ep = udev->ep_out[ep_num];
+
+ if(usb_endpoint_xfer_isoc(&ep->desc)){
+ iso_packet_num = ((GPD_LENGTH+maxp)/maxp);
+ }
+ else{
+ iso_packet_num = 0;
+ }
+ xhci_dbg(xhci, "[STRESS DBG]dev slot id %d\n", udev->slot_id);
+ xhci_err(xhci, "[STRESS]Start stress process, dev_num %d, ep_num %d\n", dev_num, ep_num);
+ str_data = kzalloc(sizeof(struct stress_data), GFP_KERNEL);
+ str_data->xhci = xhci;
+ str_data->udev = udev;
+ str_data->dev_num = dev_num;
+ str_data->ep_num = ep_num;
+ xhci_err(xhci, "[STRESS] str_data address 0x%x\n", (unsigned int)str_data);
+ str_data->isCompare = isCompare;
+
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->loop_count[i] = 0;
+ }
+
+ data_len = GPD_LENGTH;
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->buffer[i] = kmalloc(data_len, GFP_KERNEL);
+ /* get_random_bytes(str_data->buffer[i], data_len); */
+ for(j=0; j<data_len; j++){
+ tmp = str_data->buffer[i]+j;
+ if((j%1024)==0){
+ *tmp = (u8)i&0xff;
+ }
+ else if(j%1024==1){
+ *tmp = (u8)(j/1024)&0xff;
+ }
+ else{
+ *tmp = (u8)((j+i)&0xff);
+ }
+ }
+ }
+/* xhci_err(xhci, "iso_packet_num %d\n", iso_packet_num); */
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->urb_tx_list[i] = usb_alloc_urb(iso_packet_num, GFP_NOIO);
+ ret = f_fill_urb(str_data->urb_tx_list[i], ep_num
+ , data_len, 0, EPADD_OUT, iso_packet_num, maxp, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ memcpy(str_data->urb_tx_list[i]->transfer_buffer, str_data->buffer[i], data_len);
+ dma_sync_single_for_device(dev, str_data->urb_tx_list[i]->transfer_dma
+ , data_len, DMA_BIDIRECTIONAL);
+ str_data->urb_tx_list[i]->status = URB_STATUS_IDLE;
+/* xhci_err(xhci, "[STRESS] URB_TX %d -- 0x%x\n", i, str_data->urb_tx_list[i]); */
+ }
+
+ for(i=0; i<TOTAL_URB; i++){
+ str_data->urb_rx_list[i] = usb_alloc_urb(iso_packet_num, GFP_NOIO);
+ ret = f_fill_urb_with_buffer(str_data->urb_rx_list[i]
+ ,ep_num, data_len, str_data->urb_tx_list[i]->transfer_buffer
+ , 0, EPADD_IN, iso_packet_num, maxp, str_data->urb_tx_list[i]->transfer_dma, udev);
+ str_data->urb_rx_list[i]->status = -EINPROGRESS;
+/* xhci_err(xhci, "[STRESS] URB_RX %d -- 0x%x\n", i, str_data->urb_rx_list[i]); */
+ }
+ kthread_run(stress_tx_thread, str_data, "stresstxt");
+ kthread_run(stress_tx_done_thread, str_data, "stresstxdt");
+ kthread_run(stress_rx_done_thread, str_data, "stress_rxdt");
+ if(isEP0){
+ kthread_run(stress_ep0_thread, str_data, "stress_ep0");
+ }
+ return 0;
+}
+
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-test-lib.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-test-lib.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,461 @@
+#ifndef __MTK_TEST_LIB_H
+#define __MTK_TEST_LIB_H
+#include "mtk-usb-hcd.h"
+#include "mtk-phy.h"
+
+#define KERNEL_30 0 //if port to Linux 3.0, enable this
+
+#define SSUSB_U3_XHCI_BASE 0xBFB90000
+#define SSUSB_U3_MAC_BASE 0xBFB92400
+#define SSUSB_U3_SYS_BASE 0xBFB92600
+#define SSUSB_U2_SYS_BASE 0xBFB93400
+#define SSUB_SIF_SLV_TOP 0xBFA80000
+#define SIFSLV_IPPC (SSUB_SIF_SLV_TOP + 0x700)
+
+#define IPRESET_ADDR 0xBFB00834
+#define IPRESET_BIT1 22
+#define IPRESET_BIT2 25
+
+#define U3_PIPE_LATCH_SEL_ADD SSUSB_U3_MAC_BASE + 0x130
+#define U3_PIPE_LATCH_TX 0
+#define U3_PIPE_LATCH_RX 0
+
+#define U3_UX_EXIT_LFPS_TIMING_PAR 0xa0
+#define U3_REF_CK_PAR 0xb0
+#define U3_RX_UX_EXIT_LFPS_REF_OFFSET 8
+
+#define U3_RX_UX_EXIT_LFPS_REF 6 // 10MHz:3, 20MHz: 6, 25MHz: 8, 27MHz: 9, 50MHz: 15, 100MHz: 30
+#define U3_REF_CK_VAL 20 //MHz = value
+
+#define U3_TIMING_PULSE_CTRL 0xb4
+#define CNT_1US_VALUE 100 //62.5MHz:63, 70MHz:70, 80MHz:80, 100MHz:100, 125MHz:125
+
+#define USB20_TIMING_PARAMETER 0x40
+#define TIME_VALUE_1US 100 //62.5MHz:63, 80MHz:80, 100MHz:100, 125MHz:125
+
+#define LINK_PM_TIMER 0x8
+#define PM_LC_TIMEOUT_VALUE 3
+
+#define XHCI_IMOD 0x624
+#define XHCI_IMOD_MT7621_VALUE 0x10
+
+#define SSUSB_HDMA_CFG 0x950
+#define SSUSB_HDMA_CFG_MT7621_VALUE 0x10E0E0C
+
+#define U3_LTSSM_TIMING_PARAMETER3 0x2514
+#define U3_LTSSM_TIMING_PARAMETER3_VALUE 0x3E8012C
+
+#define U2_PHYD_CR1 0x64
+
+#define SSUSB_IP_SPAR0 0xC8
+
+#define SYNC_HS_EOF 0x938
+#define SYNC_HS_EOF_VALUE 0x201F3
+
+#define HSCH_CFG1 0x960
+#define SCH2_FIFO_DEPTH_OFFSET 16
+
+
+/******** Defined for PHY calibration ************/
+
+#define U3_CNR (1<<11)
+#define U3_PLS_OFFSET 5
+#define U3_P1_SC (SSUSB_U3_XHCI_BASE+0x420) //0xf0040420
+#define U3_P1_LTSSM (SSUSB_U3_MAC_BASE+0x134) //0xBFB92534
+#define U3_LINK_ERR_COUNT (SSUSB_U3_SYS_BASE+0x14) //0xBFB92614
+#define U3_RECOVERY_COUNT (SSUSB_U3_SYS_BASE+0xd8) //0xBFB926d8
+#define U3_XHCI_CMD_ADDR (SSUSB_U3_XHCI_BASE+0x20)
+#define U3_XHCI_STS_ADDR (SSUSB_U3_XHCI_BASE+0x24)
+
+#define SSUSB_IP_PW_CTRL (SIFSLV_IPPC+0x0)
+#define SSUSB_IP_SW_RST (1<<0)
+#define SSUSB_IP_PW_CTRL_1 (SIFSLV_IPPC+0x4)
+#define SSUSB_IP_PDN (1<<0)
+#define SSUSB_U3_CTRL(p) (SIFSLV_IPPC+0x30+(p*0x08))
+#define SSUSB_U3_PORT_DIS (1<<0)
+#define SSUSB_U3_PORT_PDN (1<<1)
+#define SSUSB_U3_PORT_HOST_SEL (1<<2)
+#define SSUSB_U3_PORT_CKBG_EN (1<<3)
+#define SSUSB_U3_PORT_MAC_RST (1<<4)
+#define SSUSB_U3_PORT_PHYD_RST (1<<5)
+#define SSUSB_U2_CTRL(p) (SIFSLV_IPPC+(0x50)+(p*0x08))
+#define SSUSB_U2_PORT_DIS (1<<0)
+#define SSUSB_U2_PORT_PDN (1<<1)
+#define SSUSB_U2_PORT_HOST_SEL (1<<2)
+#define SSUSB_U2_PORT_CKBG_EN (1<<3)
+#define SSUSB_U2_PORT_MAC_RST (1<<4)
+#define SSUSB_U2_PORT_PHYD_RST (1<<5)
+#define SSUSB_IP_CAP (SIFSLV_IPPC+0x024)
+
+#define SSUSB_U3_PORT_NUM(p) (p & 0xff)
+#define SSUSB_U2_PORT_NUM(p) ((p>>8) & 0xff)
+/***************************************************/
+
+
+
+struct ixia_dev
+{
+ struct usb_device *udev;
+ int ep_out;
+ int ep_in;
+};
+
+int f_enable_port(int index);
+int f_disconnect_port(int index);
+int f_enable_slot(struct usb_device *dev);
+int f_disable_slot();
+int f_address_slot(char isBSR, struct usb_device *dev);
+int f_slot_reset_device(int slot_id, char isWarmReset);
+int f_udev_add_ep(struct usb_host_endpoint *ep, struct usb_device *udev);
+int f_xhci_config_ep(struct usb_device *udev);
+int f_evaluate_context(int max_exit_latency, int maxp0, int preping_mode, int preping, int besl, int besld);
+
+int f_power_suspend();
+int f_power_resume();
+int f_power_remotewakeup();
+int f_power_set_u1u2(int u_num, int value1, int value2);
+int f_power_send_fla(int value);
+
+int f_ring_stop_cmd();
+int f_ring_abort_cmd();
+int f_ring_enlarge(int ep_dir, int ep_num, int dev_num);
+int f_ring_stop_ep(int slot_id, int ep_index);
+int f_ring_set_tr_dequeue_pointer(int slot_id, int ep_index, struct urb *urb);
+
+int f_hub_setportfeature(int hdev_num, int wValue, int wIndex);
+int f_hub_clearportfeature(int hdev_num, int wValue, int wIndex);
+int f_hub_sethubfeature(int hdev_num, int wValue);
+int f_hub_config_subhub(int parent_hub_num, int hub_num, int port_num);
+int f_hub_configep(int hdev_num, int rh_port_index);
+int f_hub_configuredevice(int hub_num, int port_num, int dev_num
+ , int transfer_type, int maxp, int bInterval, char is_config_ep, char is_stress, int stress_config);
+int f_hub_reset_dev(struct usb_device *udev,int dev_num, int port_num, int speed);
+int f_hub_configure_eth_device(int hub_num, int port_num, int dev_num);
+
+int f_random_stop(int ep_1_num, int ep_2_num, int stop_count_1, int stop_count_2, int urb_dir_1, int urb_dir_2, int length);
+int f_add_random_stop_ep_thread(struct xhci_hcd *xhci, int slot_id, int ep_index);
+int f_add_random_ring_doorbell_thread(struct xhci_hcd *xhci, int slot_id, int ep_index);
+int f_config_ep(char ep_num,int ep_dir,int transfer_type, int maxp,int bInterval, int burst, int mult, struct usb_device *udev,int config_xhci);
+int f_deconfig_ep(char is_all, char ep_num,int ep_dir,struct usb_device *usbdev,int config_xhci);
+int f_add_str_threads(int dev_num, int ep_num, int maxp, char isCompare, struct usb_device *usbdev, char isEP0);
+int f_add_ixia_thread(struct xhci_hcd *xhci, int dev_num, struct ixia_dev *ix_dev);
+int f_fill_urb(struct urb *urb,int ep_num,int data_length, int start_add,int dir, int iso_num_packets, int psize, struct usb_device *usbdev);
+int f_fill_urb_with_buffer(struct urb *urb,int ep_num,int data_length,void *buffer,int start_add,int dir, int iso_num_packets, int psize
+ , dma_addr_t dma_mapping, struct usb_device *usbdev);
+int f_queue_urb(struct urb *urb,int wait, struct usb_device *dev);
+int f_update_hub_device(struct usb_device *udev, int num_ports);
+
+int f_loopback_loop(int ep_out, int ep_in, int data_length, int start_add, struct usb_device *usbdev);
+int f_loopback_sg_loop(int ep_out, int ep_in, int data_length, int start_add, int sg_len, struct usb_device *usbdev);
+int f_loopback_loop_gpd(int ep_out, int ep_in, int data_length, int start_add, int gpd_length, struct usb_device *usbdev);
+int f_loopback_sg_loop_gpd(int ep_out, int ep_in, int data_length, int start_add, int sg_len, int gpd_length, struct usb_device *usbdev);
+
+int mtk_xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int msec);
+
+int f_test_lib_init();
+int f_test_lib_cleanup();
+
+struct urb *alloc_ctrl_urb(struct usb_ctrlrequest *dr, char *buffer, struct usb_device *udev);
+int f_ctrlrequest(struct urb *urb, struct usb_device *udev);
+
+int get_port_index(int port_id);
+void print_speed(int speed);
+int wait_event_on_timeout(volatile int *ptr, int done, int msecs);
+//int poll_event_on_timeout(int *ptr, int done, int msecs);
+int poll_event_on_timeout(volatile int *ptr, int done, int msecs);
+
+void start_port_reenabled(int index, int speed);
+int f_reenable_port(int index);
+
+int f_enable_dev_note();
+
+/* IP configuration */
+#define RH_PORT_NUM 2
+#define DEV_NUM 4
+#define HUB_DEV_NUM 4
+
+/* constant define */
+#define MAX_DATA_LENGTH 65535
+
+
+/* timeout */
+#define ATTACH_TIMEOUT 2000
+#define CMD_TIMEOUT 1000
+#define TRANS_TIMEOUT 30000
+#define SYNC_DELAY 100
+
+/* return code */
+#define RET_SUCCESS 0
+#define RET_FAIL 1
+
+/* command stage */
+#define CMD_RUNNING 0
+#define CMD_DONE 1
+#define CMD_FAIL 2
+
+/* transfer stage */
+#define TRANS_INPROGRESS 0
+#define TRANS_DONE 1
+
+/* loopback stage */
+#define LOOPBACK_OUT 0
+#define LOOPBACK_IN 1
+
+/* USB spec constant */
+/* EP descriptor */
+#define EPADD_NUM(n) ((n)<<0)
+#define EPADD_OUT 0
+#define EPADD_IN (1<<7)
+
+#define EPATT_CTRL 0
+#define EPATT_ISO 1
+#define EPATT_BULK 2
+#define EPATT_INT 3
+
+#define MAX_DATA_LENGTH 65535
+
+/* control request code */
+#define REQ_GET_STATUS 0
+#define REQ_SET_FEATURE 3
+#define REQ_CLEAR_FEATURE 1
+
+/* hub request code */
+#define HUB_FEATURE_PORT_POWER 8
+#define HUB_FEATURE_PORT_RESET 4
+#define HUB_FEATURE_PORT_SUSPEND 2
+#define HUB_FEATURE_C_PORT_CONNECTION 16
+#define HUB_FEATURE_C_PORT_RESET 20
+
+#define HUB_FEATURE_PORT_LINK_STATE 5
+
+/* global structure */
+typedef enum
+{
+ DISCONNECTED = 0,
+ CONNECTED,
+ RESET,
+ ENABLED
+}XHCI_PORT_STATUS;
+
+struct xhci_port
+{
+ int port_id;
+ XHCI_PORT_STATUS port_status;
+ int port_speed;
+ int port_reenabled;
+};
+
+/* for stress test */
+#define TOTAL_URB 30
+#define URB_STATUS_IDLE 150
+#define URB_STATUS_RX 151
+#define GPD_LENGTH (16*1024)
+#define GPD_LENGTH_RDN (10*1024)
+
+/* global parameters */
+#ifdef MTK_TEST_LIB
+#define AUTOEXT
+#else
+#define AUTOEXT extern
+#endif
+AUTOEXT volatile char g_port_connect;
+AUTOEXT volatile char g_port_reset;
+AUTOEXT volatile int g_port_id;
+AUTOEXT volatile int g_slot_id;
+AUTOEXT volatile int g_speed;
+AUTOEXT volatile int g_cmd_status;
+AUTOEXT volatile char g_event_full;
+AUTOEXT volatile char g_got_event_full;
+AUTOEXT volatile int g_device_reconnect;
+AUTOEXT struct usb_device *dev_list[DEV_NUM];
+AUTOEXT struct usb_device *hdev_list[HUB_DEV_NUM];
+AUTOEXT struct usb_hcd *my_hcd;
+AUTOEXT struct xhci_port *rh_port[RH_PORT_NUM];
+AUTOEXT volatile int g_stress_status;
+AUTOEXT struct ixia_dev *ix_dev_list[4];
+AUTOEXT volatile char g_stopped;
+AUTOEXT volatile char g_correct;
+AUTOEXT volatile int g_dev_notification;
+AUTOEXT volatile long g_dev_not_value;
+AUTOEXT volatile long g_intr_handled;
+AUTOEXT volatile int g_mfindex_event;
+AUTOEXT volatile char g_port_occ;
+AUTOEXT volatile char g_is_bei;
+AUTOEXT volatile char g_td_to_noop;
+AUTOEXT volatile char g_iso_frame;
+AUTOEXT volatile char g_test_random_stop_ep;
+AUTOEXT volatile char g_stopping_ep;
+AUTOEXT volatile char g_port_resume;
+AUTOEXT volatile int g_cmd_ring_pointer1;
+AUTOEXT volatile int g_cmd_ring_pointer2;
+AUTOEXT volatile char g_idt_transfer;
+AUTOEXT volatile char g_port_plc;
+AUTOEXT volatile char g_power_down_allowed;
+AUTOEXT volatile char g_hs_block_reset;
+AUTOEXT volatile char g_concurrent_resume;
+
+// Billionton Definition
+#define AX_CMD_SET_SW_MII 0x06
+#define AX_CMD_READ_MII_REG 0x07
+#define AX_CMD_WRITE_MII_REG 0x08
+#define AX_CMD_READ_MII_OPERATION_MODE 0x09
+#define AX_CMD_SET_HW_MII 0x0a
+#define AX_CMD_READ_EEPROM 0x0b
+#define AX_CMD_WRITE_EEPROM 0x0c
+#define AX_CMD_READ_RX_CONTROL_REG 0x0f
+#define AX_CMD_WRITE_RX_CTL 0x10
+#define AX_CMD_READ_IPG012 0x11
+#define AX_CMD_WRITE_IPG0 0x12
+#define AX_CMD_WRITE_IPG1 0x13
+#define AX_CMD_WRITE_IPG2 0x14
+#define AX_CMD_READ_MULTIFILTER_ARRAY 0x15
+#define AX_CMD_WRITE_MULTI_FILTER 0x16
+#define AX_CMD_READ_NODE_ID 0x17
+#define AX_CMD_READ_PHY_ID 0x19
+#define AX_CMD_READ_MEDIUM_MODE 0x1a
+#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
+#define AX_CMD_READ_MONITOR_MODE 0x1c
+#define AX_CMD_WRITE_MONITOR_MODE 0x1d
+#define AX_CMD_READ_GPIOS 0x1e
+#define AX_CMD_WRITE_GPIOS 0x1f
+
+#define AX_CMD_GUSBKR3_BREQ 0x05
+#define AX_CMD_GUSBKR3_12e 0x12e
+#define AX_CMD_GUSBKR3_120 0x120
+#define AX_CMD_GUSBKR3_126 0x126
+#define AX_CMD_GUSBKR3_134 0x134
+#define AX_CMD_GUSBKR3_12f 0x12f
+#define AX_CMD_GUSBKR3_130 0x130
+#define AX_CMD_GUSBKR3_137 0x137
+#define AX_CMD_GUSBKR3_02 0x02
+#define AX_CMD_GUSBKR3_13e 0x13e
+
+/*
+* USB directions
+*/
+#define MUSB_DIR_OUT 0
+#define MUSB_DIR_IN 0x80
+
+/*
+* USB request types
+*/
+#define MUSB_TYPE_MASK (0x03 << 5)
+#define MUSB_TYPE_STANDARD (0x00 << 5)
+#define MUSB_TYPE_CLASS (0x01 << 5)
+#define MUSB_TYPE_VENDOR (0x02 << 5)
+#define MUSB_TYPE_RESERVED (0x03 << 5)
+
+/*
+* USB recipients
+*/
+#define MUSB_RECIP_MASK 0x1f
+#define MUSB_RECIP_DEVICE 0x00
+#define MUSB_RECIP_INTERFACE 0x01
+#define MUSB_RECIP_ENDPOINT 0x02
+#define MUSB_RECIP_OTHER 0x03
+
+/*
+* Standard requests
+*/
+#define MUSB_REQ_GET_STATUS 0x00
+#define MUSB_REQ_CLEAR_FEATURE 0x01
+#define MUSB_REQ_SET_FEATURE 0x03
+#define MUSB_REQ_SET_ADDRESS 0x05
+#define MUSB_REQ_GET_DESCRIPTOR 0x06
+#define MUSB_REQ_SET_DESCRIPTOR 0x07
+#define MUSB_REQ_GET_CONFIGURATION 0x08
+#define MUSB_REQ_SET_CONFIGURATION 0x09
+#define MUSB_REQ_GET_INTERFACE 0x0A
+#define MUSB_REQ_SET_INTERFACE 0x0B
+#define MUSB_REQ_SYNCH_FRAME 0x0C
+#define VENDOR_CONTROL_NAKTIMEOUT_TX 0x20
+#define VENDOR_CONTROL_NAKTIMEOUT_RX 0x21
+#define VENDOR_CONTROL_DISPING 0x22
+#define VENDOR_CONTROL_ERROR 0x23
+#define VENDOR_CONTROL_RXSTALL 0x24
+
+/*
+* Descriptor types
+*/
+#define MUSB_DT_DEVICE 0x01
+#define MUSB_DT_CONFIG 0x02
+#define MUSB_DT_STRING 0x03
+#define MUSB_DT_INTERFACE 0x04
+#define MUSB_DT_ENDPOINT 0x05
+#define MUSB_DT_DEVICE_QUALIFIER 0x06
+#define MUSB_DT_OTHER_SPEED 0X07
+#define MUSB_DT_INTERFACE_POWER 0x08
+#define MUSB_DT_OTG 0x09
+
+struct MUSB_DeviceRequest
+{
+ unsigned char bmRequestType;
+ unsigned char bRequest;
+ unsigned short wValue;
+ unsigned short wIndex;
+ unsigned short wLength;
+};
+
+
+struct ethenumeration_t
+{
+ unsigned char* pDesciptor;
+ struct MUSB_DeviceRequest sDevReq;
+};
+
+struct MUSB_ConfigurationDescriptor
+{
+ unsigned char bLength;
+ unsigned char bDescriptorType;
+ unsigned short wTotalLength;
+ unsigned char bNumInterfaces;
+ unsigned char bConfigurationValue;
+ unsigned char iConfiguration;
+ unsigned char bmAttributes;
+ unsigned char bMaxPower;
+};
+
+struct MUSB_InterfaceDescriptor
+{
+ unsigned char bLength;
+ unsigned char bDescriptorType;
+ unsigned char bInterfaceNumber;
+ unsigned char bAlternateSetting;
+ unsigned char bNumEndpoints;
+ unsigned char bInterfaceClass;
+ unsigned char bInterfaceSubClass;
+ unsigned char bInterfaceProtocol;
+ unsigned char iInterface;
+};
+
+struct MUSB_EndpointDescriptor
+{
+ unsigned char bLength;
+ unsigned char bDescriptorType;
+ unsigned char bEndpointAddress;
+ unsigned char bmAttributes;
+ unsigned short wMaxPacketSize;
+ unsigned char bInterval;
+};
+
+struct MUSB_DeviceDescriptor
+{
+ unsigned char bLength;
+ unsigned char bDescriptorType;
+ unsigned short bcdUSB;
+ unsigned char bDeviceClass;
+ unsigned char bDeviceSubClass;
+ unsigned char bDeviceProtocol;
+ unsigned char bMaxPacketSize0;
+ unsigned short idVendor;
+ unsigned short idProduct;
+ unsigned short bcdDevice;
+ unsigned char iManufacturer;
+ unsigned char iProduct;
+ unsigned char iSerialNumber;
+ unsigned char bNumConfigurations;
+};
+
+#endif
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-test.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-test.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,9437 @@
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <linux/usb/ch9.h>
+#include <asm/uaccess.h>
+#include "mtk-test.h"
+#include "xhci.h"
+#include "mtk-usb-hcd.h"
+#include <linux/usb/hcd.h>
+#include "mtk-test-lib.h"
+#include "mtk-protocol.h"
+#include <linux/dma-mapping.h>
+#include "xhci-mtk-scheduler.h"
+#include "mtk-phy.h"
+#include <asm/tc3162/tc3162.h>
+
+#include "xhci-mtk-power.h"
+#include <linux/dmapool.h>
+
+int f_power_reset_u1u2_counter(int u_num);
+int f_power_get_u1u2_counter(int u_num);
+int f_power_reset_L1_counter(int direct);
+int f_power_get_L1_counter(int direct);
+int f_power_get_l1s(void);
+int f_power_send_fla(int value);
+char fgEyeScanHelper_CheckPtInRegion(struct strucScanRegion * prEye, char bX, char bY);
+int UPhyWriteField(int addr, int offset, int len, int value);
+char fgEyeScanHelper_CalNextPoint(void);
+void EyeScanHelper_RunTest(void);
+void _U3_Write_Bank(int bankValue);
+PHY_INT32 _U3Write_Reg(PHY_INT32 address, PHY_INT32 value);
+PHY_INT32 _U3Read_Reg(PHY_INT32 address);
+int UPhyWriteReg(int addr, int offset, int len, int value, int value1);
+int bUPhyReadReg(int addr, int offset, int len, int value);
+int fUPhyReadField(int addr, int offset, int len, int value);
+int UPhyWriteField(int addr, int offset, int len, int value);
+
+int f_add_rdn_len_str_threads(int dev_num, int ep_num, int maxp, char isCompare, struct usb_device *usbdev, char isEP0);
+int f_add_str_threads(int dev_num, int ep_num, int maxp, char isCompare, struct usb_device *usbdev, char isEP0);
+int f_power_config_lpm(u32 slot_id, u32 hirdm, u32 L1_timeout, u32 rwe, u32 besl, u32 besld, u32 hle
+ , u32 int_nak_active, u32 bulk_nyet_active);
+int f_ring_enlarge(int ep_dir, int ep_num, int dev_num);
+int f_ring_stop_ep(int slot_id, int ep_index);
+int f_ring_set_tr_dequeue_pointer(int slot_id, int ep_index, struct urb *urb);
+int f_ring_stop_cmd();
+int f_ring_abort_cmd();
+int f_hub_getPortStatus(int hdev_num, int port_num, u32 *status);
+int f_hub_sethubfeature(int hdev_num, int wValue);
+int f_hub_setportfeature(int hdev_num, int wValue, int wIndex);
+int f_hub_clearportfeature(int hdev_num, int wValue, int wIndex);
+int f_hub_configep(int hdev_num, int rh_port_index);
+int f_hub_config_subhub(int parent_hub_num, int hub_num, int port_num);
+int f_hub_init_device(int hub_num, int port_num, int dev_num);
+int f_hub_reset_dev(struct usb_device *udev,int dev_num, int port_num, int speed);
+int f_add_random_access_reg_thread(struct xhci_hcd *xhci, int port_id, int port_rev, int power_required);
+int f_port_set_pls(int port_id, int pls);
+int f_ctrlrequest_nowait(struct urb *urb, struct usb_device *udev);
+int wait_not_event_on_timeout(int *ptr, int value, int msecs);
+
+extern u32 xhci_port_state_to_neutral(u32 state);
+
+struct file_operations xhci_mtk_test_fops;
+/********************************************************
+ * slt.disable
+ * Send disable slot command, release current slot id
+ * args:
+*********************************************************/
+static int t_slot_disable_slot(int argc, char** argv);
+/********************************************************
+ * slt.configep
+ * Config an bulk EP to let slot go to configured state
+ * args:
+*********************************************************/
+static int t_slot_reset_device(int argc, char** argv);
+/********************************************************
+ * slt.reconfig
+ * After reset device, set address command and getConfigure, setConfigure
+ * args:
+*********************************************************/
+/********************************************************
+ * slt.enable
+ * Attach device, send enable slot command
+ * args:
+*********************************************************/
+static int t_slot_enable_slot(int argc, char** argv);
+/********************************************************
+ * slt.addslt
+ * Send address command to current slot
+ * args:
+*********************************************************/
+static int t_slot_address(int argc, char** argv);
+/********************************************************
+ * slt.getdesc
+ * Do GetDescriptor control transfer
+ * args:
+*********************************************************/
+static int t_slot_getdescriptor(int argc, char** argv);
+static int t_slot_forcegetdescriptor(int argc, char** argv);
+static int t_slot_getbos(int argc, char** argv);
+static int t_slot_setconf(int argc, char** argv);
+static int t_slot_setu1u2(int argc, char** argv);
+static int t_slot_getdevstatus(int argc, char** argv);
+static int t_slt_resetport(int argc, char** argv);
+/********************************************************
+ * slt.getdesc
+ * Wait device to desicon, disable slot
+ * args:
+*********************************************************/
+static int t_slot_discon(int argc, char** argv);
+static int t_slot_evaluate_context(int argc, char** argv);
+static int t_slt_ped(int argc, char** argv);
+/* loopback */
+static int t_loopback_loop(int argc, char** argv);
+static int t_loopback_configep(int argc, char** argv);
+static int t_loopback_deconfigep(int argc, char** argv);
+
+/********************************************************
+ * pw.suspend
+ * suspend U2 device
+ * args:
+*********************************************************/
+static int t_power_suspend(int argc, char** argv);
+/********************************************************
+ * pw.resume
+ * resume U2 device
+ * args:
+*********************************************************/
+static int t_power_resume(int argc, char** argv);
+/********************************************************
+ * pw.wakeup
+ * When in suspend(U3) state, wait for device to wakeup
+ * After got remote wakeup signal, back to U0 state
+ * args:
+*********************************************************/
+static int t_power_remotewakeup(int argc, char** argv);
+/********************************************************
+ * pw.u1u2
+ * Adjust U1/U2 timeout regs value
+ * args:
+ * 1. u?(1/2): 1=u1, 2=u2
+ * 2. value: u1(0~127, 255), u2(0~254, 255)
+*********************************************************/
+static int t_power_u1u2(int argc, char** argv);
+static int t_power_suspendport(int argc, char** argv);
+static int t_power_resumeport(int argc, char** argv);
+static int t_power_fla(int argc, char** argv);
+static int t_power_occ(int argc, char** argv);
+static int t_power_u2_lpm(int argc, char** argv);
+static int t_power_u2_swlpm(int argc, char** argv);
+static int t_power_random_access_regs(int argc, char** argv);
+/********************************************************
+ * ring.erfull
+ * Test event ring full error event
+ * args:
+*********************************************************/
+static int t_ring_er_full(int argc, char** argv);
+/********************************************************
+ * ring.stopcmd
+ * Stop command ring
+ * args:
+*********************************************************/
+static int t_ring_stop_cmd(int argc, char** argv);
+/********************************************************
+ * ring.abortcmd
+ * Abort command when executing address device command
+ * args:
+*********************************************************/
+static int t_ring_abort_cmd_add(int argc, char** argv);
+static int t_ring_stop_ep(int argc, char** argv);
+static int t_ring_random_ring_doorbell(int argc, char** argv);
+static int t_ring_random_stop_ep(int argc, char** argv);
+static int t_ring_enlarge(int argc, char** argv);
+static int t_ring_shrink(int argc, char**argv);
+static int t_ring_intr_moderation(int argc, char** argv);
+static int t_ring_bei(int argc, char** argv);
+static int t_ring_idt(int argc, char** argv);
+static int t_ring_noop_transfer(int argc, char** argv);
+static int t_u3auto_hw_lpm(int argc, char** argv);
+
+static int t_u3auto_ctrl_loopback(int argc, char** argv);
+static int t_u3auto_loopback(int argc, char** argv);
+static int t_u3auto_loopback_scan(int argc, char** argv);
+static int t_u3auto_loopback_sg(int argc, char** argv);
+static int t_u3auto_loopback_scan_sg(int argc, char** argv);
+static int t_u3auto_random_suspend(int argc, char** argv);
+static int t_u3auto_random_wakeup(int argc, char**argv);
+static int t_u3auto_randomstop_dev(int argc, char** argv);
+static int t_u3auto_stress(int argc, char** argv);
+static int t_u3auto_isoc_frame_id(int argc, char** argv);
+static int t_u3auto_concurrent_remotewakeup(int argc, char** argv);
+static int t_u3auto_concurrent_u1u2_enter(int argc, char** argv);
+static int t_u3auto_concurrent_u1u2_exit(int argc, char** argv);
+
+static int t_hub_configurehub(int argc, char** argv);
+static int t_hub_configuresubhub(int argc, char** argv);
+static int t_hub_configuredevice(int argc, char** argv);
+static int t_hub_ixia_stress(int argc, char** argv);
+static int t_hub_loop_stress(int argc, char** argv);
+static int t_hub_loop(int argc, char** argv);
+static int t_hub_reset_dev(int argc, char** argv);
+static int t_hub_remotewakeup_dev(int argc, char** argv);
+static int t_hub_selsuspend(int argc, char** argv);
+static int t_hub_selresume(int argc, char** argv);
+static int t_hub_configure_eth_device(int argc, char** argv);
+static int t_hub_queue_intr(int argc, char** argv);
+static int t_hub_set_u1u2(int argc, char** argv);
+static int t_hub_force_pm(int argc, char** argv);
+
+static int t_hcd_init(int argc, char** argv);
+static int t_hcd_cleanup(int argc, char** argv);
+/********************************************************
+ * dbg.portstatus
+ * Print current port status register value
+ * args:
+*********************************************************/
+static int dbg_printportstatus(int argc, char** argv);
+/********************************************************
+ * dbg.dbgslt
+ * Print current slot context content
+ * args:
+*********************************************************/
+static int dbg_printslotcontext(int argc, char** argv);
+/********************************************************
+ * dbg.hccparams
+ * Print HCCPARAM values
+ * args:
+*********************************************************/
+static int dbg_printhccparams(int argc, char** argv);
+/********************************************************
+ * dbg.r
+ * Print xhci related register value
+ * args:
+ * 1. address: offset
+ * 2. length
+*********************************************************/
+static int dbg_read_xhci(int argc, char** argv);
+/********************************************************
+ * dbg.dr
+ * print all xhci registers
+ * args:
+*********************************************************/
+static int dbg_dump_regs(int argc, char** argv);
+
+static int dbg_port_set_pls(int argc, char** argv);
+static int dbg_port_set_ped(int argc, char** argv);
+static int dbg_port_reset(int argc, char** argv);
+
+static int dbg_delayms(int argc, char** argv);
+static int dbg_u3w(int argc, char**argv);
+static int dbg_u3r(int argc, char**argv);
+static int dbg_u3PHY_init(int argc, char**argv);
+static int dbg_u3_calibration(int argc, char** argv);
+static int dbg_phy_eyescan(int argc, char** argv);
+static int dbg_u2_testmode(int argc, char** argv);
+
+static int dbg_u3ct_lecroy(int argc, char** argv);
+static int dbg_u3ct_ellisys(int argc, char** argv);
+static int dbg_u2ct(int argc, char** argv);
+
+static int dbg_memorywrite(int argc, char** argv);
+static int dbg_memoryread(int argc, char** argv);
+static int dbg_sch_algorithm(int argc, char** argv);
+static int dbg_reg_ewe(int argc, char** argv);
+
+static int t_class_keyboard(int argc, char** argv);
+static int t_ellysis_TD7_36(int argc, char** argv);
+
+static int t_dev_config_ep(int argc,char * * argv);
+static int t_dev_polling_status(int argc,char * * argv);
+static int t_dev_query_status(int argc,char * * argv);
+static int t_dev_remotewakeup(int argc,char * * argv);
+static int t_dev_reset(int argc,char * * argv);
+static int t_dev_init(int argc, char** argv);
+static int t_dev_notification(int argc, char** argv);
+static int t_dev_u1u2(int argc, char** argv);
+static int t_dev_lpm(int argc, char** argv);
+
+
+/*******************************************************/
+
+#define CLI_MAGIC 'CLI'
+#define IOCTL_READ _IOR(CLI_MAGIC, 0, int)
+#define IOCTL_WRITE _IOW(CLI_MAGIC, 1, int)
+
+#define BUF_SIZE 200
+#define MAX_ARG_SIZE 20
+
+/*******************************************************/
+
+int u2_initialize(int argc, char** argv);
+
+/*******************************************************/
+
+typedef struct
+{
+ char name[256];
+ int (*cb_func)(int argc, char** argv);
+} CMD_TBL_T;
+
+CMD_TBL_T _arPCmdTbl[] =
+{
+ {"hcd.init", &t_hcd_init},
+ {"hcd.cleanup", &t_hcd_cleanup},
+ {"slt.discon", &t_slot_discon},
+ {"slt.disable", &t_slot_disable_slot},
+ {"slt.reset", &t_slot_reset_device},
+ {"slt.resetp", &t_slt_resetport},
+ {"slt.enable", &t_slot_enable_slot},
+ {"slt.addslt", &t_slot_address},
+ {"slt.getdesc", &t_slot_forcegetdescriptor},
+ {"slt.getbos", &t_slot_getbos},
+ {"slt.setconf", &t_slot_setconf},
+ {"slt.setu1u2", &t_slot_setu1u2},
+ {"slt.devstat", &t_slot_getdevstatus},
+ {"slt.evalctx", &t_slot_evaluate_context},
+ {"slt.ped", &t_slt_ped},
+ {"lb.config", &t_loopback_configep},
+ {"lb.deconfig", &t_loopback_deconfigep},
+ {"lb.loop", &t_loopback_loop},
+ {"pw.suspend", &t_power_suspend},
+ {"pw.suspendp", &t_power_suspendport},
+ {"pw.resume", &t_power_resume},
+ {"pw.resumep", &t_power_resumeport},
+ {"pw.wakeup", &t_power_remotewakeup},
+ {"pw.u1u2", &t_power_u1u2},
+ {"pw.fla", &t_power_fla},
+ {"pw.occ", &t_power_occ},
+ {"pw.lpm", &t_power_u2_lpm},
+ {"pw.swlpm", &t_power_u2_swlpm},
+ {"pw.rdnreg", &t_power_random_access_regs},
+ {"ring.erfull", &t_ring_er_full},
+ {"ring.stopcmd", &t_ring_stop_cmd},
+ {"ring.abortcmd", &t_ring_abort_cmd_add},
+ {"ring.stopep", &t_ring_stop_ep},
+ {"ring.rrd", &t_ring_random_ring_doorbell},
+ {"ring.rstp", &t_ring_random_stop_ep},
+ {"ring.enlarge", &t_ring_enlarge},
+ {"ring.shrink", &t_ring_shrink},
+ {"ring.intrmod", &t_ring_intr_moderation},
+ {"ring.bei", &t_ring_bei},
+ {"ring.idt", &t_ring_idt},
+ {"ring.noop", &t_ring_noop_transfer},
+ {"auto.lbctrl", &t_u3auto_ctrl_loopback},
+ {"auto.lb", &t_u3auto_loopback},
+ {"auto.lbscan", &t_u3auto_loopback_scan},
+ {"auto.lbsg", &t_u3auto_loopback_sg},
+ {"auto.hwlpm", &t_u3auto_hw_lpm},
+ {"auto.lbsgscan", &t_u3auto_loopback_scan_sg},
+ {"auto.randomsuspend", &t_u3auto_random_suspend},
+ {"auto.randomwakeup", &t_u3auto_random_wakeup},
+ {"auto.devrandomstop", &t_u3auto_randomstop_dev},
+ {"auto.stress", &t_u3auto_stress},
+ {"auto.isofrm", &t_u3auto_isoc_frame_id},
+ {"auto.conresume", &t_u3auto_concurrent_remotewakeup},
+ {"auto.conu1u2", &t_u3auto_concurrent_u1u2_enter},
+ {"auto.conu1u2exit", &t_u3auto_concurrent_u1u2_exit},
+ {"hub.config", &t_hub_configurehub},
+ {"hub.subhub", &t_hub_configuresubhub},
+ {"hub.dev", &t_hub_configuredevice},
+ {"hub.ixia", &t_hub_ixia_stress},
+ {"hub.lbstress", &t_hub_loop_stress},
+ {"hub.loop", &t_hub_loop},
+ {"hub.reset", &t_hub_reset_dev},
+ {"hub.wakeup", &t_hub_remotewakeup_dev},
+ {"hub.selsuspend", &t_hub_selsuspend},
+ {"hub.selresume", &t_hub_selresume},
+ {"hub.deveth", &t_hub_configure_eth_device},
+ {"hub.intr", &t_hub_queue_intr},
+ {"hub.u1u2", &t_hub_set_u1u2},
+ {"hub.forcepm", &t_hub_force_pm},
+ {"dbg.portstatus", &dbg_printportstatus},
+ {"dbg.dbgslt", &dbg_printslotcontext},
+ {"dbg.hccparams", &dbg_printhccparams},
+ {"dbg.r", &dbg_read_xhci},
+ {"dbg.dr", &dbg_dump_regs},
+ {"dbg.setpls", &dbg_port_set_pls},
+ {"dbg.setped", &dbg_port_set_ped},
+ {"dbg.portreset", &dbg_port_reset},
+ {"dbg.mdelay", &dbg_delayms},
+ {"dbg.u3w", &dbg_u3w},
+ {"dbg.u3r", &dbg_u3r},
+ {"dbg.u3i", &dbg_u3PHY_init},
+ {"dbg.u3c", &dbg_u3_calibration},
+ {"dbg.u3eyescan", &dbg_phy_eyescan},
+ {"dbg.mw", &dbg_memorywrite},
+ {"dbg.mr", &dbg_memoryread},
+ {"dbg.kb", &t_ellysis_TD7_36},
+ {"dbg.sch", &dbg_sch_algorithm},
+ {"dbg.ewe", &dbg_reg_ewe},
+ {"dbg.u2t", &dbg_u2_testmode},
+ {"dbg.u3lect", &dbg_u3ct_lecroy},
+ {"dbg.u3elct", &dbg_u3ct_ellisys},
+ {"dbg.u2ct", &dbg_u2ct},
+ {"dev.reset", &t_dev_reset},
+ {"dev.pollstatus", &t_dev_polling_status},
+ {"dev.qrystatus", &t_dev_query_status},
+ {"dev.configep", &t_dev_config_ep},
+ {"dev.wakeup", &t_dev_remotewakeup},
+ {"dev.note", &t_dev_notification},
+ {"dev.u1u2", &t_dev_u1u2},
+ {"dev.lpm", &t_dev_lpm},
+ {"dev.init", &t_dev_init},
+ {NULL, NULL}
+};
+
+/*******************************************************/
+
+char w_buf[BUF_SIZE];
+char r_buf[BUF_SIZE] = "this is a test";
+
+/*******************************************************/
+
+int call_function(char *buf)
+{
+ int i;
+ int argc;
+ char *argv[MAX_ARG_SIZE];
+
+ argc = 0;
+ do
+ {
+ argv[argc] = strsep(&buf, " ");
+ printk(KERN_DEBUG "[%d] %s\r\n", argc, argv[argc]);
+ argc++;
+ } while (buf);
+
+ for (i = 0; i < sizeof(_arPCmdTbl)/sizeof(CMD_TBL_T); i++)
+ {
+ if ((!strcmp(_arPCmdTbl[i].name, argv[0])) && (_arPCmdTbl[i].cb_func != NULL))
+ return _arPCmdTbl[i].cb_func(argc, argv);
+ }
+
+ return -1;
+}
+
+struct numsection
+{
+ int min;
+ int max;
+ int current_value;
+ struct numsection *next;
+};
+
+struct numsection *init_num_sec(int min, int max){
+ struct numsection *tmp;
+ tmp = kmalloc(sizeof(struct numsection), GFP_NOIO);
+ tmp->min = min;
+ tmp->max = max;
+ tmp->current_value = min;
+ tmp->next = NULL;
+ return tmp;
+}
+
+void clear_num_secs(struct numsection *num_sec){
+ struct numsection *next;
+ struct numsection *cur;
+ cur = num_sec;
+ while(cur != NULL){
+ next = cur->next;
+ kfree(cur);
+ cur = next;
+ }
+}
+
+void add_num_sec(int min, int max, struct numsection *sec){
+ struct numsection *tmp, *cur;
+ cur = sec;
+ while(cur->next != NULL){
+ cur = cur->next;
+ }
+ tmp = kmalloc(sizeof(struct numsection), GFP_NOIO);
+ tmp->min = min;
+ tmp->max = max;
+ tmp->current_value = min;
+ tmp->next = NULL;
+ cur->next = tmp;
+}
+
+struct numsection *find_next_num(struct numsection *sec){
+ struct numsection *cur;
+ cur = sec;
+ cur->current_value++;
+ if(cur->current_value > cur->max){
+ cur->current_value = cur->min;
+ cur = cur->next;
+ }
+ return cur;
+}
+
+static int xhci_mtk_test_open(struct inode *inode, struct file *file)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test open: successful\n");
+ return 0;
+}
+
+static int xhci_mtk_test_release(struct inode *inode, struct file *file)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test release: successful\n");
+ return 0;
+}
+
+static ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test read: returning zero bytes\n");
+ return 0;
+}
+
+static ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test write: accepting zero bytes\n");
+ return 0;
+}
+
+static long xhci_mtk_test_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+ int len = BUF_SIZE;
+
+ switch (cmd) {
+ case IOCTL_READ:
+ copy_to_user((char *) arg, r_buf, len);
+ printk(KERN_DEBUG "IOCTL_READ: %s\r\n", r_buf);
+ break;
+ case IOCTL_WRITE:
+ copy_from_user(w_buf, (char *) arg, len);
+ printk(KERN_DEBUG "IOCTL_WRITE: %s\r\n", w_buf);
+
+ /* invoke function */
+ return call_function(w_buf);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return len;
+}
+
+static int xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+ int len = BUF_SIZE;
+
+ switch (cmd) {
+ case IOCTL_READ:
+ copy_to_user((char *) arg, r_buf, len);
+ printk(KERN_DEBUG "IOCTL_READ: %s\r\n", r_buf);
+ break;
+ case IOCTL_WRITE:
+ copy_from_user(w_buf, (char *) arg, len);
+ printk(KERN_DEBUG "IOCTL_WRITE: %s\r\n", w_buf);
+
+ /* invoke function */
+ return call_function(w_buf);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return len;
+}
+
+
+static void print_slot_state(int slot_state){
+ switch (slot_state) {
+ case 0:
+ printk(KERN_DEBUG "slot state: enabled/disabled\n");
+ break;
+ case 1:
+ printk(KERN_DEBUG "slot state: default\n");
+ break;
+ case 2:
+ printk(KERN_DEBUG "slot state: addressed\n");
+ break;
+ case 3:
+ printk(KERN_DEBUG "slot state: configured\n");
+ break;
+ default:
+ printk(KERN_DEBUG "slot state: reserved\n");
+ break;
+ }
+}
+
+static int t_hcd_init(int argc, char** argv){
+ return f_test_lib_init();
+}
+
+static int t_hcd_cleanup(int argc, char** argv){
+ return f_test_lib_cleanup();
+}
+
+int u3auto_hcd_reset(void)
+{
+ if(f_test_lib_init() != RET_SUCCESS)
+ return RET_FAIL;
+ if(f_enable_port(0) != RET_SUCCESS)
+ return RET_FAIL;
+ if(f_enable_slot(NULL) != RET_SUCCESS)
+ return RET_FAIL;
+ if(f_address_slot(false, NULL) != RET_SUCCESS)
+ return RET_FAIL;
+
+ return RET_SUCCESS;
+
+}
+
+static int t_slot_discon(int argc, char** argv){
+ int ret;
+
+ ret = f_disconnect_port(0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_disable_slot();
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int t_slot_disable_slot(int argc, char** argv){
+ int ret;
+ struct xhci_hcd *xhci;
+ ret = 0;
+ if(my_hcd == NULL){
+ printk(KERN_ERR "[ERROR]host controller driver not initiated\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+
+ ret = f_disable_slot();
+
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int t_slot_evaluate_context(int argc, char** argv){
+ int ret;
+ int max_exit_latency, maxp0, preping_mode, preping;
+ int besld, besl;
+
+ ret = 0;
+ max_exit_latency = 0;
+ maxp0 = 64;
+ preping_mode = 0;
+ preping = 0;
+
+ if(my_hcd == NULL){
+ printk(KERN_ERR "[ERROR]host controller driver not initiated\n");
+ return RET_FAIL;
+ }
+ if(argc > 1){
+ max_exit_latency = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ maxp0 = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ preping_mode = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ preping = (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ besl = (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+ if(argc > 6){
+ besld = (int)simple_strtol(argv[6], &argv[6], 10);
+ }
+ ret = f_evaluate_context(max_exit_latency, maxp0, preping_mode, preping, besl, besld);
+
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int t_slot_getbos(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ void *buf;
+ short *tmp_length, total_length;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ /* get total length in bos */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_BOS << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_BOS_SIZE);
+ buf = kmalloc(USB_DT_BOS_SIZE, GFP_KERNEL);
+ memset(buf, 0, USB_DT_BOS_SIZE);
+
+ urb = alloc_ctrl_urb(dr, buf, udev);
+ ret = f_ctrlrequest(urb,udev);
+
+ tmp_length = urb->transfer_buffer+2;
+ total_length = *tmp_length;
+ xhci_dbg(xhci, "total_length: %d\n", total_length);
+
+ kfree(dr);
+ kfree(buf);
+ usb_free_urb(urb);
+
+ /* get bos */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_BOS << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(total_length);
+ buf = kmalloc(total_length, GFP_KERNEL);
+ memset(buf, 0 , total_length);
+ urb = alloc_ctrl_urb(dr, buf, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ kfree(buf);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int t_slot_forcegetdescriptor(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ if(urb->status == -EINPROGRESS){
+ /* timeout, stop endpoint, set TR dequeue pointer */
+ f_ring_stop_ep(g_slot_id, 0);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, 0, urb);
+ }
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int t_slot_getdescriptor(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int t_slot_setconf(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ int i;
+ char *tmp;
+ char isConfigMouse;
+ ret = 0;
+
+ isConfigMouse = false;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "mouse")){
+ isConfigMouse = true;
+ }
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ if(isConfigMouse){
+ /* config mouse for Ellysis T7.36 test case */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0x21;
+ dr->bRequest = 0x0a;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ else{
+ /* set configuration */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ }
+ return ret;
+}
+
+static int t_slot_setu1u2(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ /* set feature - u1 enable */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_FEATURE;
+ dr->wValue = cpu_to_le16(USB_U1_ENABLE);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+
+ usb_free_urb(urb);
+ kfree(dr);
+
+ /* set feature - u2 enable */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_FEATURE;
+ dr->wValue = cpu_to_le16(USB_U2_ENABLE);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+
+ kfree(dr);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int t_slot_getdevstatus(int argc, char** argv){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ /* get status */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_STATUS;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_STATUS_SIZE);
+ desc = kmalloc(USB_DT_STATUS_SIZE, GFP_KERNEL);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ return ret;
+}
+
+static int t_slt_resetport(int argc, char** argv){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+ isWarmReset = false;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "true")){
+ printk(KERN_DEBUG "Do Warm Reset\n");
+ isWarmReset = true;
+ }
+ }
+ if(argc > 2){
+ port_id = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+
+ if(isWarmReset){
+ /* do warm reset */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "after reset port %d = 0x%x\n", port_id-1, temp);
+
+ }
+ else{
+ /* hot reset port */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ }
+
+ return RET_SUCCESS;
+}
+
+static int t_slt_ped(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int ret;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp |= PORT_PE;
+ xhci_writel(xhci, temp, addr);
+ ret = f_disconnect_port(0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_disable_slot();
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ return ret;
+}
+
+/* simply address a slot */
+static int t_slot_address(int argc, char** argv){
+ int ret;
+ char isBSR;
+
+ isBSR = false;
+ if(argc > 1){
+ if(!strcmp(argv[1], "true")){
+ printk(KERN_DEBUG "test BSR=true\n");
+ isBSR = true;
+ }
+ }
+
+ return f_address_slot(isBSR, NULL);
+}
+
+static int t_slot_enable_slot(int argc, char** argv){
+ char isEnablePort;
+ isEnablePort = true;
+ if(argc > 1){
+ if(!strcmp(argv[1], "false")){
+ printk(KERN_DEBUG "test BSR=true\n");
+ isEnablePort = false;
+ }
+ }
+ if(isEnablePort){
+ if(f_enable_port(0) != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ }
+ return f_enable_slot(NULL);
+}
+
+static int t_slot_reset_device(int argc, char** argv){
+ char isWarmReset;
+ isWarmReset = false;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "true")){
+ printk(KERN_DEBUG "test WarmReset=true\n");
+ isWarmReset = true;
+ }
+ }
+ return f_slot_reset_device(0, isWarmReset);
+
+}
+
+static int t_u3auto_ctrl_loopback(int argc, char** argv){
+ int ret,loop,length,num=0;
+ char isFullSpeed, isReset;
+
+ isFullSpeed = false;
+ ret = 0;
+ num = 1;
+ length = 8;
+ isReset = false;
+ if(argc > 1){
+ num = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ length = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "ss")){
+ printk(KERN_ERR "test super speed\n");
+ isFullSpeed = false;
+ }
+ if(!strcmp(argv[3], "hs")){
+ printk(KERN_ERR "test high speed\n");
+ isFullSpeed = false;
+ }
+ if(!strcmp(argv[3], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ isFullSpeed = true;
+ }
+ }
+ if(argc > 4){
+ if(!strcmp(argv[4], "true")){
+ printk(KERN_ERR "Reset device\n");
+ isReset = true;
+ }
+ }
+#if 0
+ if(u3auto_hcd_reset() != RET_SUCCESS)
+ return RET_FAIL;
+#endif
+ if(isFullSpeed && isReset){
+ start_port_reenabled(0, DEV_SPEED_FULL);
+
+ ret=dev_reset(DEV_SPEED_FULL,NULL);
+ if(ret){
+ printk(KERN_ERR "Set device to full speed failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret){
+ printk(KERN_ERR "reenable port failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!\n");
+ return RET_FAIL;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address device failed!!\n");
+ return RET_FAIL;
+ }
+ }
+ /*
+ * num = 0, loop forever
+ * num = 1, follow the length that user input
+ * num != 1, loopback incremental length data
+ */
+ int max_length = 2048;
+ if(num == 0){
+ for(length=1; length <= max_length;){
+ printk(KERN_ERR "Do CTRL loopback, length %d\n", length);
+ ret=dev_ctrl_loopback(length,NULL);
+
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl loop fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ break;
+ }
+ length = length + 1;
+ }
+ }
+ else{
+ for(loop=0;loop<num;loop++){
+ if(num != 1){
+ length=((((loop+1)*102400)+loop*40)%2048);
+ }
+
+ if(!length){
+ length=2048;
+ }
+ printk(KERN_ERR "Do CTRL loopback, length %d\n", length);
+ ret=dev_ctrl_loopback(length,NULL);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl loop fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ break;
+ }
+
+ }
+ }
+ if(ret){
+ printk(KERN_ERR "[FAIL] Ctrl loop back test failed\n");
+ }
+ else{
+ printk(KERN_ERR "[PASS] ctrl loop back [%d] round\n",num);
+ }
+ return ret;
+}
+
+#define SG_SS_BULK_INTERVAL_SIZE 1
+#define SG_SS_BULK_MAXP_SIZE 1
+#define SG_SS_BULK_OUT_EP_SIZE 1
+#define SG_SS_BULK_IN_EP_SIZE 1
+#define SG_SS_BULK_SG_LEN_SIZE 4
+#define SG_SS_BULK_BURST_SIZE 1
+#define SG_SS_BULK_MULT_SIZE 1
+
+#define SG_SS_INTR_INTERVAL_SIZE 1
+#define SG_SS_INTR_MAXP_SIZE 1
+#define SG_SS_INTR_OUT_EP_SIZE 1
+#define SG_SS_INTR_IN_EP_SIZE 1
+#define SG_SS_INTR_SG_LEN_SIZE 3
+#define SG_SS_INTR_BURST_SIZE 1
+#define SG_SS_INTR_MULT_SIZE 1
+
+#define SG_HS_BULK_INTERVAL_SIZE 1
+#define SG_HS_BULK_MAXP_SIZE 1
+#define SG_HS_BULK_OUT_EP_SIZE 1
+#define SG_HS_BULK_IN_EP_SIZE 1
+#define SG_HS_BULK_SG_LEN_SIZE 4
+
+#define SG_HS_INTR_INTERVAL_SIZE 1
+#define SG_HS_INTR_MAXP_SIZE 1
+#define SG_HS_INTR_OUT_EP_SIZE 1
+#define SG_HS_INTR_IN_EP_SIZE 1
+#define SG_HS_INTR_SG_LEN_SIZE 3
+
+#define SG_FS_BULK_INTERVAL_SIZE 1
+#define SG_FS_BULK_MAXP_SIZE 1
+#define SG_FS_BULK_OUT_EP_SIZE 1
+#define SG_FS_BULK_IN_EP_SIZE 1
+#define SG_FS_BULK_SG_LEN_SIZE 4
+
+#define SG_FS_INTR_INTERVAL_SIZE 1
+#define SG_FS_INTR_MAXP_SIZE 1
+#define SG_FS_INTR_OUT_EP_SIZE 1
+#define SG_FS_INTR_IN_EP_SIZE 1
+#define SG_FS_INTR_SG_LEN_SIZE 3
+
+#define DF_BURST_SIZE 1 /* default for hs, fs */
+#define DF_MULT_SIZE 1 /* default for hs, fs */
+
+static int t_u3auto_loopback_scan_sg(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int sg_len;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int start_add_index, ep_out_index, ep_in_index, maxp_index, interval_index, sg_len_index, mult_index, burst_index;
+ int interval_arr_size, maxp_arr_size, out_ep_arr_size, in_ep_arr_size, sg_len_arr_size, mult_arr_size, burst_arr_size;
+ int min_length, max_length, min_start_add, max_start_add;
+ int *arr_interval, *arr_maxp, *arr_ep_out, *arr_ep_in, *arr_sg_len, *arr_mult, *arr_burst;
+ int mult_dev, burst, mult;
+ int dram_offset, extension;
+ struct numsection *sec, *cur_sec;
+
+ int sg_ss_bulk_interval[SG_SS_BULK_INTERVAL_SIZE] = {0};
+ int sg_ss_bulk_maxp[SG_SS_BULK_MAXP_SIZE] = {1024};
+ int sg_ss_bulk_out_ep[SG_SS_BULK_OUT_EP_SIZE] = {1};
+ int sg_ss_bulk_in_ep[SG_SS_BULK_IN_EP_SIZE] = {1};
+ int sg_ss_bulk_sg_len[SG_SS_BULK_SG_LEN_SIZE] = {512, 1024, 2048, 4096};
+ int sg_ss_bulk_burst[SG_SS_BULK_BURST_SIZE] = {15};
+ int sg_ss_bulk_mult[SG_SS_BULK_MULT_SIZE] = {0};
+
+ int sg_ss_intr_interval[SG_SS_INTR_INTERVAL_SIZE] = {1};
+ int sg_ss_intr_maxp[SG_SS_INTR_MAXP_SIZE] = {1024};
+ int sg_ss_intr_out_ep[SG_SS_INTR_OUT_EP_SIZE] = {1};
+ int sg_ss_intr_in_ep[SG_SS_INTR_IN_EP_SIZE] = {1};
+ int sg_ss_intr_sg_len[SG_SS_INTR_SG_LEN_SIZE] = {64,512,1024};
+ int sg_ss_intr_burst[SG_SS_INTR_BURST_SIZE] = {0/*,1,2*/};
+ int sg_ss_intr_mult[SG_SS_INTR_MULT_SIZE] = {0};
+
+ int sg_hs_bulk_interval[SG_HS_BULK_INTERVAL_SIZE] = {0};
+ int sg_hs_bulk_maxp[SG_HS_BULK_MAXP_SIZE] = {512};
+ int sg_hs_bulk_out_ep[SG_HS_BULK_OUT_EP_SIZE] = {1};
+ int sg_hs_bulk_in_ep[SG_HS_BULK_IN_EP_SIZE] = {1};
+ int sg_hs_bulk_sg_len[SG_HS_BULK_SG_LEN_SIZE] = {512, 1024, 2048, 4096};
+
+ int sg_hs_intr_interval[SG_HS_INTR_INTERVAL_SIZE] = {1};
+ int sg_hs_intr_maxp[SG_HS_INTR_MAXP_SIZE] = {1024}; /* 32, 512, 1024 */
+ int sg_hs_intr_out_ep[SG_HS_INTR_OUT_EP_SIZE] = {2};
+ int sg_hs_intr_in_ep[SG_HS_INTR_IN_EP_SIZE] = {2};
+ int sg_hs_intr_sg_len[SG_HS_INTR_SG_LEN_SIZE] = {64, 512, 1024};
+
+ int sg_fs_bulk_interval[SG_FS_BULK_INTERVAL_SIZE] = {0};
+ int sg_fs_bulk_maxp[SG_FS_BULK_MAXP_SIZE] = {64};
+ int sg_fs_bulk_out_ep[SG_FS_BULK_OUT_EP_SIZE] = {1};
+ int sg_fs_bulk_in_ep[SG_FS_BULK_IN_EP_SIZE] = {1};
+ int sg_fs_bulk_sg_len[SG_FS_BULK_SG_LEN_SIZE] = {64, 512, 1024, 2048};
+
+ int sg_fs_intr_interval[SG_FS_INTR_INTERVAL_SIZE] = {1};/* 1, 127,255 */
+ int sg_fs_intr_maxp[SG_FS_INTR_MAXP_SIZE] = {32}; /* 8, 32, 64 */
+ int sg_fs_intr_out_ep[SG_FS_INTR_OUT_EP_SIZE] = {2};
+ int sg_fs_intr_in_ep[SG_FS_INTR_IN_EP_SIZE] = {2};
+ int sg_fs_intr_sg_len[SG_FS_INTR_SG_LEN_SIZE] = {64,512,1024};
+
+ int df_burst[DF_BURST_SIZE] = {0};
+ int df_mult[DF_MULT_SIZE] = {0};
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ mult_dev = 3;
+ mult = 0;
+ burst = 8;
+ dram_offset = 0;
+ extension = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER; /* TODO: superspeed */
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ }
+
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_BULK){
+ arr_maxp = sg_ss_bulk_maxp;
+ maxp_arr_size = SG_SS_BULK_MAXP_SIZE;
+ arr_interval = sg_ss_bulk_interval;
+ interval_arr_size = SG_SS_BULK_INTERVAL_SIZE;
+ /* length 1~65535 */
+ sec = init_num_sec(513, 1025);
+ add_num_sec(10240-10, 10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = sg_ss_bulk_out_ep;
+ out_ep_arr_size = SG_SS_BULK_OUT_EP_SIZE;
+ arr_ep_in = sg_ss_bulk_in_ep;
+ in_ep_arr_size = SG_SS_BULK_IN_EP_SIZE;
+
+ /* sg_len */
+ arr_sg_len = sg_ss_bulk_sg_len;
+ sg_len_arr_size = SG_SS_BULK_SG_LEN_SIZE;
+
+ arr_burst = sg_ss_bulk_burst;
+ burst_arr_size = SG_SS_BULK_BURST_SIZE;
+ arr_mult = sg_ss_bulk_mult;
+ mult_arr_size = SG_SS_BULK_MULT_SIZE;
+ }
+
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_INT){
+ arr_maxp = sg_ss_intr_maxp;
+ maxp_arr_size = SG_SS_INTR_MAXP_SIZE;
+ arr_interval = sg_ss_intr_interval;
+ interval_arr_size = SG_SS_INTR_INTERVAL_SIZE;
+ /* length 1~65535 */
+ sec = init_num_sec(513, 1025);
+ add_num_sec(10240-10, 10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = sg_ss_intr_out_ep;
+ out_ep_arr_size = SG_SS_INTR_OUT_EP_SIZE;
+ arr_ep_in = sg_ss_intr_in_ep;
+ in_ep_arr_size = SG_SS_INTR_IN_EP_SIZE;
+
+ /* sg_len */
+ arr_sg_len = sg_ss_intr_sg_len;
+ sg_len_arr_size = SG_SS_INTR_SG_LEN_SIZE;
+
+ arr_burst = sg_ss_intr_burst;
+ burst_arr_size = SG_SS_INTR_BURST_SIZE;
+ arr_mult = sg_ss_intr_mult;
+ mult_arr_size = SG_SS_INTR_MULT_SIZE;
+ }
+
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_BULK){
+ arr_maxp = sg_hs_bulk_maxp;
+ maxp_arr_size = SG_HS_BULK_MAXP_SIZE;
+ arr_interval = sg_hs_bulk_interval;
+ interval_arr_size = SG_HS_BULK_INTERVAL_SIZE;
+ /* length 4096~65535 */
+ sec = init_num_sec(1,513);
+ add_num_sec(10240-10,10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1~15 */
+ arr_ep_out = sg_hs_bulk_out_ep;
+ out_ep_arr_size = SG_HS_BULK_OUT_EP_SIZE;
+ /* ep_in_num 1~15 */
+ arr_ep_in = sg_hs_bulk_in_ep;
+ in_ep_arr_size = SG_HS_BULK_IN_EP_SIZE;
+ /* sg_len */
+ arr_sg_len = sg_hs_bulk_sg_len;
+ sg_len_arr_size = SG_HS_BULK_SG_LEN_SIZE;
+
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_INT){
+ arr_maxp = sg_hs_intr_maxp;
+ maxp_arr_size = SG_HS_INTR_MAXP_SIZE;
+ arr_interval = sg_hs_intr_interval;
+ interval_arr_size = SG_HS_INTR_INTERVAL_SIZE;
+ /* length 1~2048 */
+ sec = init_num_sec(1, 1025);
+ add_num_sec(10240-10, 10240+10,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1,8,15 */
+ arr_ep_out = sg_hs_intr_out_ep;
+ out_ep_arr_size = SG_HS_INTR_OUT_EP_SIZE;
+ /* ep_in_num 1,8,15 */
+ arr_ep_in = sg_hs_intr_in_ep;
+ in_ep_arr_size = SG_HS_INTR_IN_EP_SIZE;
+ /* sg_len */
+ arr_sg_len = sg_hs_intr_sg_len;
+ sg_len_arr_size = SG_HS_INTR_SG_LEN_SIZE;
+
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_FULL && transfer_type == EPATT_BULK){
+ arr_maxp = sg_fs_bulk_maxp;
+ maxp_arr_size = SG_FS_BULK_MAXP_SIZE;
+ arr_interval = sg_fs_bulk_interval;
+ interval_arr_size = SG_FS_BULK_INTERVAL_SIZE;
+ /* length 4096~10241 */
+ sec = init_num_sec(513, 1025);
+ add_num_sec(10240-10, 10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1~15 */
+ arr_ep_out = sg_fs_bulk_out_ep;
+ out_ep_arr_size = SG_FS_BULK_OUT_EP_SIZE;
+ /* ep_in_num 1~15 */
+ arr_ep_in = sg_fs_bulk_in_ep;
+ in_ep_arr_size = SG_FS_BULK_IN_EP_SIZE;
+ /* sg_len */
+ arr_sg_len = sg_fs_bulk_sg_len;
+ sg_len_arr_size = SG_FS_BULK_SG_LEN_SIZE;
+
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+
+ }
+ if(speed == DEV_SPEED_FULL && transfer_type == EPATT_INT){
+ arr_maxp = sg_fs_intr_maxp;
+ maxp_arr_size = SG_FS_INTR_MAXP_SIZE;
+ arr_interval = sg_fs_intr_interval;
+ interval_arr_size = SG_FS_INTR_INTERVAL_SIZE;
+ /* length 1024~4097 */
+ sec = init_num_sec(1, 1025);
+ add_num_sec(2048-10, 2048+10,sec);
+ add_num_sec(3072-10, 3072+10,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1,8,15 */
+ arr_ep_out = sg_fs_intr_out_ep;
+ out_ep_arr_size = SG_FS_INTR_OUT_EP_SIZE;
+ /* ep_in_num 1,8,15 */
+ arr_ep_in = sg_fs_intr_in_ep;
+ in_ep_arr_size = SG_FS_INTR_IN_EP_SIZE;
+ /* sg_len */
+ arr_sg_len = sg_fs_intr_sg_len;
+ sg_len_arr_size = SG_FS_INTR_SG_LEN_SIZE;
+
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+#if 0
+ if(u3auto_hcd_reset() != RET_SUCCESS)
+ return RET_FAIL;
+#endif
+ for(ep_in_index = 0; ep_in_index < in_ep_arr_size; ep_in_index++){
+ ep_in_num = *(arr_ep_in + ep_in_index);
+ for(ep_out_index = 0; ep_out_index < out_ep_arr_size; ep_out_index++){
+ ep_out_num = *(arr_ep_out + ep_out_index);
+ for(interval_index = 0; interval_index < interval_arr_size; interval_index++){
+ bInterval = *(arr_interval + interval_index);
+ for(maxp_index = 0; maxp_index < maxp_arr_size; maxp_index++){
+ maxp = *(arr_maxp + maxp_index);
+ for(mult_index = 0; mult_index < mult_arr_size; mult_index++){
+ mult = *(arr_mult+mult_index);
+ for(burst_index = 0; burst_index < burst_arr_size; burst_index++){
+ burst = *(arr_burst+burst_index);
+ printk(KERN_ERR "[TEST]*************************************\n");
+ printk(KERN_ERR " OUT_EP: %d***************************\n", ep_out_num);
+ printk(KERN_ERR " IN_EP: %d ***************************\n", ep_in_num);
+ printk(KERN_ERR " MAXP: %d ***************************\n", maxp);
+ printk(KERN_ERR " INTERVAL: %d ***************************\n", bInterval);
+ printk(KERN_ERR " MULT: %d **************************\n", mult);
+ printk(KERN_ERR " BURST: %d *************************\n", burst);
+ /* ==phase 0 : device reset==*/
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+ mtk_xhci_scheduler_init();
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ for(start_add = min_start_add; start_add <= max_start_add; start_add++){
+ cur_sec = sec;
+ while(cur_sec != NULL){
+ length = cur_sec->current_value;
+ for(sg_len_index = 0; sg_len_index < sg_len_arr_size; sg_len_index++){
+ sg_len = *(arr_sg_len + sg_len_index);
+ if((length/sg_len) > 61)
+ continue;
+ printk(KERN_ERR " LENGTH: %d *************\n", length);
+ printk(KERN_ERR " SG_LEN: %d *************\n", sg_len);
+ /* ==phase 2 : loopback==*/
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=4096;
+
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size, dram_offset, extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ f_power_suspend();
+ return ret;
+ }
+
+ ret = f_loopback_sg_loop_gpd(
+ ep_out_num,ep_in_num,length,start_add,sg_len, gpd_buf_size,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ f_power_suspend();
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ } /* sg_num */
+ cur_sec = find_next_num(cur_sec);
+ } /* length */
+ } /* start_add */
+ } /* burst */
+ } /* mult */
+ } /* maxp */
+ } /* interval */
+ } /* ep_out */
+ } /* ep_in */
+ return ret;
+}
+
+#define SS_BULK_INTERVAL_SIZE 1
+#define SS_BULK_MAXP_SIZE 1
+#define SS_BULK_OUT_EP_SIZE 1
+#define SS_BULK_IN_EP_SIZE 1
+#define SS_BULK_BURST_SIZE 2
+#define SS_BULK_MULT_SIZE 1
+
+#define SS_INTR_INTERVAL_SIZE 2
+#define SS_INTR_MAXP_SIZE 2
+#define SS_INTR_OUT_EP_SIZE 1
+#define SS_INTR_IN_EP_SIZE 1
+#define SS_INTR_BURST_SIZE 1
+#define SS_INTR_MULT_SIZE 1
+
+#define SS_HB_INTR_INTERVAL_SIZE 1
+#define SS_HB_INTR_MAXP_SIZE 1
+#define SS_HB_INTR_OUT_EP_SIZE 1
+#define SS_HB_INTR_IN_EP_SIZE 1
+#define SS_HB_INTR_BURST_SIZE 2
+#define SS_HB_INTR_MULT_SIZE 1
+
+#define SS_ISOC_INTERVAL_SIZE 2
+#define SS_ISOC_MAXP_SIZE 2
+#define SS_ISOC_OUT_EP_SIZE 1
+#define SS_ISOC_IN_EP_SIZE 1
+#define SS_ISOC_BURST_SIZE 1
+#define SS_ISOC_MULT_SIZE 1
+
+#define SS_HB_ISOC_INTERVAL_SIZE 2
+#define SS_HB_ISOC_MAXP_SIZE 1
+#define SS_HB_ISOC_OUT_EP_SIZE 1
+#define SS_HB_ISOC_IN_EP_SIZE 1
+#define SS_HB_ISOC_BURST_SIZE 2
+#define SS_HB_ISOC_MULT_SIZE 2
+
+#define HS_BULK_INTERVAL_SIZE 1
+#define HS_BULK_MAXP_SIZE 1
+#define HS_BULK_OUT_EP_SIZE 1
+#define HS_BULK_IN_EP_SIZE 1
+
+#define HS_INTR_INTERVAL_SIZE 2
+#define HS_INTR_MAXP_SIZE 2
+#define HS_INTR_OUT_EP_SIZE 1
+#define HS_INTR_IN_EP_SIZE 1
+
+#define HS_HB_INTR_INTERVAL_SIZE 2
+#define HS_HB_INTR_MAXP_SIZE 1
+#define HS_HB_INTR_OUT_EP_SIZE 1
+#define HS_HB_INTR_IN_EP_SIZE 1
+#define HS_HB_INTR_MULT_SIZE 2
+
+#define HS_ISOC_INTERVAL_SIZE 2
+#define HS_ISOC_MAXP_SIZE 2
+#define HS_ISOC_OUT_EP_SIZE 1
+#define HS_ISOC_IN_EP_SIZE 1
+
+#define HS_HB_ISOC_INTERVAL_SIZE 2
+#define HS_HB_ISOC_MAXP_SIZE 1
+#define HS_HB_ISOC_OUT_EP_SIZE 1
+#define HS_HB_ISOC_IN_EP_SIZE 1
+#define HS_HB_ISOC_MULT_SIZE 2
+
+#define FS_BULK_INTERVAL_SIZE 1
+#define FS_BULK_MAXP_SIZE 3
+#define FS_BULK_OUT_EP_SIZE 1
+#define FS_BULK_IN_EP_SIZE 1
+
+#define FS_INTR_INTERVAL_SIZE 2
+#define FS_INTR_MAXP_SIZE 2
+#define FS_INTR_OUT_EP_SIZE 1
+#define FS_INTR_IN_EP_SIZE 1
+
+#define FS_ISOC_INTERVAL_SIZE 2
+#define FS_ISOC_MAXP_SIZE 2
+#define FS_ISOC_OUT_EP_SIZE 1
+#define FS_ISOC_IN_EP_SIZE 1
+
+#define DF_BURST_SIZE 1 /* default for hs, fs */
+#define DF_MULT_SIZE 1 /* default for hs, fs */
+
+
+static int t_u3auto_loopback_scan(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int start_add_index, ep_out_index, ep_in_index, maxp_index, interval_index, burst_index, mult_index;
+ int interval_arr_size, maxp_arr_size, out_ep_arr_size, in_ep_arr_size, burst_arr_size, mult_arr_size;
+ int min_length, max_length, min_start_add, max_start_add;
+ int *arr_interval, *arr_maxp, *arr_ep_out, *arr_ep_in, *arr_burst, *arr_mult;
+ int mult_dev, mult, burst;
+ int dram_offset, extension;
+ struct numsection *sec, *cur_sec;
+ char isHB;
+
+ int ss_bulk_interval[SS_BULK_INTERVAL_SIZE] = {0};
+ int ss_bulk_maxp[SS_BULK_MAXP_SIZE] = {1024};
+ int ss_bulk_out_ep[SS_BULK_OUT_EP_SIZE] = {1/*,2*/};
+ int ss_bulk_in_ep[SS_BULK_IN_EP_SIZE] = {1/*,2*/};
+ int ss_bulk_burst[SS_BULK_BURST_SIZE] = {0,15,/*0*/};
+ int ss_bulk_mult[SS_BULK_MULT_SIZE] = {0};
+
+ int ss_intr_interval[SS_INTR_INTERVAL_SIZE] = {1, 2/*, 16*/};
+ int ss_intr_maxp[SS_INTR_MAXP_SIZE] = {8, /*32, 128, 512, */1024};
+ int ss_intr_out_ep[SS_INTR_OUT_EP_SIZE] = {1};
+ int ss_intr_in_ep[SS_INTR_IN_EP_SIZE] = {1};
+ int ss_intr_burst[SS_INTR_BURST_SIZE] = {0};
+ int ss_intr_mult[SS_INTR_MULT_SIZE] = {0};
+
+ int ss_hb_intr_interval[SS_HB_INTR_INTERVAL_SIZE] = {1};
+ int ss_hb_intr_maxp[SS_HB_INTR_MAXP_SIZE] = {1024};
+ int ss_hb_intr_out_ep[SS_HB_INTR_OUT_EP_SIZE] = {1};
+ int ss_hb_intr_in_ep[SS_HB_INTR_IN_EP_SIZE] = {1};
+ int ss_hb_intr_burst[SS_HB_INTR_BURST_SIZE] = {1,2};
+ int ss_hb_intr_mult[SS_HB_INTR_MULT_SIZE] = {0};
+
+ int ss_isoc_interval[SS_ISOC_INTERVAL_SIZE] = {1, 2/*, 16*/};
+ int ss_isoc_maxp[SS_ISOC_MAXP_SIZE] = {/*64, 512, */64,1024};
+ int ss_isoc_out_ep[SS_ISOC_OUT_EP_SIZE] = {1};
+ int ss_isoc_in_ep[SS_ISOC_IN_EP_SIZE] = {1};
+ int ss_isoc_burst[SS_ISOC_BURST_SIZE] = {0};
+ int ss_isoc_mult[SS_ISOC_MULT_SIZE] = {0};
+
+ int ss_hb_isoc_interval[SS_HB_ISOC_INTERVAL_SIZE] = {1, 2};
+ int ss_hb_isoc_maxp[SS_HB_ISOC_MAXP_SIZE] = {1024};
+ int ss_hb_isoc_out_ep[SS_HB_ISOC_OUT_EP_SIZE] = {1};
+ int ss_hb_isoc_in_ep[SS_HB_ISOC_IN_EP_SIZE] = {1};
+ int ss_hb_isoc_burst[SS_HB_ISOC_BURST_SIZE] = {/*0,*/1,2};
+ int ss_hb_isoc_mult[SS_HB_ISOC_MULT_SIZE] = {0,1/*,2*/};
+
+ int hs_bulk_interval[HS_BULK_INTERVAL_SIZE] = {0};
+ int hs_bulk_maxp[HS_BULK_MAXP_SIZE] = {512};
+ int hs_bulk_out_ep[HS_BULK_OUT_EP_SIZE] = {1};
+ int hs_bulk_in_ep[HS_BULK_IN_EP_SIZE] = {1};
+
+ int hs_intr_interval[HS_INTR_INTERVAL_SIZE] = {1/*,7*/,2}; /* 0,8,15 */
+ int hs_intr_maxp[HS_INTR_MAXP_SIZE] = {8/*,32,128,512*/,1024}; /* 8,16,128,512, 1024, 2048, 3072 */
+ int hs_intr_out_ep[HS_INTR_OUT_EP_SIZE] = {1};
+ int hs_intr_in_ep[HS_INTR_IN_EP_SIZE] = {1};
+
+ int hs_hb_intr_interval[HS_HB_INTR_INTERVAL_SIZE] = {1/*,7*/,2}; /* 0,8,15 */
+ int hs_hb_intr_maxp[HS_HB_INTR_MAXP_SIZE] = {1024}; /* 8,16,128,512, 1024, 2048, 3072 */
+ int hs_hb_intr_out_ep[HS_HB_INTR_OUT_EP_SIZE] = {1};
+ int hs_hb_intr_in_ep[HS_HB_INTR_IN_EP_SIZE] = {1};
+ int hs_hb_intr_mult[HS_HB_INTR_MULT_SIZE] = {1,2};
+
+ int hs_isoc_interval[HS_ISOC_INTERVAL_SIZE] = {1,2};
+ int hs_isoc_maxp[HS_ISOC_MAXP_SIZE] = {/*8,32,*/128,/*512,*/1024};
+ int hs_isoc_out_ep[HS_ISOC_OUT_EP_SIZE] = {1};
+ int hs_isoc_in_ep[HS_ISOC_IN_EP_SIZE] = {1};
+
+ int hs_hb_isoc_interval[HS_HB_ISOC_INTERVAL_SIZE] = {1,2};
+ int hs_hb_isoc_maxp[HS_HB_ISOC_MAXP_SIZE] = {1024};
+ int hs_hb_isoc_out_ep[HS_HB_ISOC_OUT_EP_SIZE] = {1};
+ int hs_hb_isoc_in_ep[HS_HB_ISOC_IN_EP_SIZE] = {1};
+ int hs_hb_isoc_mult[HS_HB_ISOC_MULT_SIZE] = {1,2};
+
+ int fs_bulk_interval[FS_BULK_INTERVAL_SIZE] = {0};
+ int fs_bulk_maxp[FS_BULK_MAXP_SIZE] = {8/*,16*/,32,64}; /* 8,16,32,64 */
+ int fs_bulk_out_ep[FS_BULK_OUT_EP_SIZE] = {1};
+ int fs_bulk_in_ep[FS_BULK_IN_EP_SIZE] = {1};
+
+ int fs_intr_interval[FS_INTR_INTERVAL_SIZE] = {1, 2}; /* 1, 127 ,255 */
+ int fs_intr_maxp[FS_INTR_MAXP_SIZE] = {8,/*32,*/64}; /* 8,32,64 */
+ int fs_intr_out_ep[FS_INTR_OUT_EP_SIZE] = {1};
+ int fs_intr_in_ep[FS_INTR_IN_EP_SIZE] = {1};
+
+ int fs_isoc_interval[FS_ISOC_INTERVAL_SIZE] = {1, 2};
+ int fs_isoc_maxp[FS_ISOC_MAXP_SIZE] = {/*8,32,*/128,1023};
+ int fs_isoc_out_ep[FS_ISOC_OUT_EP_SIZE] = {1};
+ int fs_isoc_in_ep[FS_ISOC_IN_EP_SIZE] = {1};
+
+ int df_burst[DF_BURST_SIZE] = {0};
+ int df_mult[DF_MULT_SIZE] = {0};
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ isHB = false;
+ length = 65535;
+ mult_dev = 3;
+ mult = 0;
+ burst = 8;
+ dram_offset = 0;
+ extension = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "true")){
+ printk(KERN_ERR "Test high bandwidth\n");
+ isHB = true;
+ }
+ else{
+ printk(KERN_ERR "Doesn't test high bandwidth\n");
+ isHB = false;
+ }
+ }
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_BULK){
+ arr_maxp = ss_bulk_maxp;
+ maxp_arr_size = SS_BULK_MAXP_SIZE;
+ arr_interval = ss_bulk_interval;
+ interval_arr_size = SS_BULK_INTERVAL_SIZE;
+ /* 1~1025, 10240+-512, 65535-1024~65535 */
+ sec = init_num_sec(1,1025);
+ add_num_sec(10240-10,10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = ss_bulk_out_ep;
+ out_ep_arr_size = SS_BULK_OUT_EP_SIZE;
+ arr_ep_in = ss_bulk_in_ep;
+ in_ep_arr_size = SS_BULK_IN_EP_SIZE;
+ arr_burst = ss_bulk_burst;
+ burst_arr_size = SS_BULK_BURST_SIZE;
+ arr_mult = ss_bulk_mult;
+ mult_arr_size = SS_BULK_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_INT && (!isHB)){
+ arr_maxp = ss_intr_maxp;
+ maxp_arr_size = SS_INTR_MAXP_SIZE;
+ arr_interval = ss_intr_interval;
+ interval_arr_size = SS_INTR_INTERVAL_SIZE;
+ /* length 1~1025,10241-1024~10241 */
+ sec = init_num_sec(1,1025);
+ add_num_sec(2048-10,2048+10, sec);
+ add_num_sec(3072-10,3072+10, sec);
+ add_num_sec(10240-10,10240+10,sec);
+
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = ss_intr_out_ep;
+ out_ep_arr_size = SS_INTR_OUT_EP_SIZE;
+ arr_ep_in = ss_intr_in_ep;
+ in_ep_arr_size = SS_INTR_IN_EP_SIZE;
+ arr_burst = ss_intr_burst;
+ burst_arr_size = SS_INTR_BURST_SIZE;
+ arr_mult = ss_intr_mult;
+ mult_arr_size = SS_INTR_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_INT && isHB){
+ arr_maxp = ss_hb_intr_maxp;
+ maxp_arr_size = SS_HB_INTR_MAXP_SIZE;
+ arr_interval = ss_hb_intr_interval;
+ interval_arr_size = SS_HB_INTR_INTERVAL_SIZE;
+ /* length 1~1025,10241-1024~10241 */
+ sec = init_num_sec(1,1025);
+ add_num_sec(2048-10,2048+10, sec);
+ add_num_sec(3072-10,3072+10, sec);
+ add_num_sec(10241-3,10241,sec);
+
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = ss_hb_intr_out_ep;
+ out_ep_arr_size = SS_HB_INTR_OUT_EP_SIZE;
+ arr_ep_in = ss_hb_intr_in_ep;
+ in_ep_arr_size = SS_HB_INTR_IN_EP_SIZE;
+ arr_burst = ss_hb_intr_burst;
+ burst_arr_size = SS_HB_INTR_BURST_SIZE;
+ arr_mult = ss_hb_intr_mult;
+ mult_arr_size = SS_HB_INTR_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_ISO && (!isHB)){
+ arr_maxp = ss_isoc_maxp;
+ maxp_arr_size = SS_ISOC_MAXP_SIZE;
+ arr_interval = ss_isoc_interval;
+ interval_arr_size = SS_ISOC_INTERVAL_SIZE;
+
+ /* length 1~1025, 10241-1024~10241, 65535-1024~65535 */
+ sec = init_num_sec(1,3);
+ add_num_sec(1024-3,1024+3,sec);
+ add_num_sec(10241-3,10241,sec);
+
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = ss_isoc_out_ep;
+ out_ep_arr_size = SS_ISOC_OUT_EP_SIZE;
+ arr_ep_in = ss_isoc_in_ep;
+ in_ep_arr_size = SS_ISOC_IN_EP_SIZE;
+ arr_burst = ss_isoc_burst;
+ burst_arr_size = SS_ISOC_BURST_SIZE;
+ arr_mult = ss_isoc_mult;
+ mult_arr_size = SS_ISOC_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_ISO && isHB){
+ arr_maxp = ss_hb_isoc_maxp;
+ maxp_arr_size = SS_HB_ISOC_MAXP_SIZE;
+ arr_interval = ss_hb_isoc_interval;
+ interval_arr_size = SS_HB_ISOC_INTERVAL_SIZE;
+ /* length 1~1025, 10241-1024~10241, 65535-1024~65535 */
+ sec = init_num_sec(1,3);
+ add_num_sec(1024-3,1024+3,sec);
+ add_num_sec(10241-3,10240+3,sec);
+ add_num_sec(65535-3,65535,sec);
+
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+ arr_ep_out = ss_hb_isoc_out_ep;
+ out_ep_arr_size = SS_HB_ISOC_OUT_EP_SIZE;
+ arr_ep_in = ss_hb_isoc_in_ep;
+ in_ep_arr_size = SS_HB_ISOC_IN_EP_SIZE;
+ arr_burst = ss_hb_isoc_burst;
+ burst_arr_size = SS_HB_ISOC_BURST_SIZE;
+ arr_mult = ss_hb_isoc_mult;
+ mult_arr_size = SS_HB_ISOC_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_BULK){
+ arr_maxp = hs_bulk_maxp;
+ maxp_arr_size = HS_BULK_MAXP_SIZE;
+ arr_interval = hs_bulk_interval;
+ interval_arr_size = HS_BULK_INTERVAL_SIZE;
+ /* length 1~65535 */
+ sec = init_num_sec(1,513);
+ add_num_sec(10240-10, 10240+10,sec);
+ add_num_sec(65535-10, 65535,sec);
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1~15 */
+ arr_ep_out = hs_bulk_out_ep;
+ out_ep_arr_size = HS_BULK_OUT_EP_SIZE;
+ /* ep_in_num 1~15 */
+ arr_ep_in = hs_bulk_in_ep;
+ in_ep_arr_size = HS_BULK_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_INT && (!isHB)){
+ arr_maxp = hs_intr_maxp;
+ maxp_arr_size = HS_INTR_MAXP_SIZE;
+ arr_interval = hs_intr_interval;
+ interval_arr_size = HS_INTR_INTERVAL_SIZE;
+ /* length 1~2048 */
+ sec = init_num_sec(1,1025);
+ add_num_sec(2048-3,2048+3,sec);
+ add_num_sec(3072-3,3072+3,sec);
+ add_num_sec(10240-3,10240+3,sec);
+
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1,8,15 */
+ arr_ep_out = hs_intr_out_ep;
+ out_ep_arr_size = HS_INTR_OUT_EP_SIZE;
+ /* ep_in_num 1,8,15 */
+ arr_ep_in = hs_intr_in_ep;
+ in_ep_arr_size = HS_INTR_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_INT && (isHB)){
+ arr_maxp = hs_hb_intr_maxp;
+ maxp_arr_size = HS_HB_INTR_MAXP_SIZE;
+ arr_interval = hs_hb_intr_interval;
+ interval_arr_size = HS_HB_INTR_INTERVAL_SIZE;
+ /* length 1~2048 */
+ sec = init_num_sec(1,1025);
+ add_num_sec(2048-3,2048+3,sec);
+ add_num_sec(3072-3,3072+3,sec);
+ add_num_sec(10240-3,10240+3,sec);
+
+ /* start_add 0~63 */
+ min_start_add = 0;
+ max_start_add = 0;
+ /* ep_out_num 1,8,15 */
+ arr_ep_out = hs_hb_intr_out_ep;
+ out_ep_arr_size = HS_HB_INTR_OUT_EP_SIZE;
+ /* ep_in_num 1,8,15 */
+ arr_ep_in = hs_hb_intr_in_ep;
+ in_ep_arr_size = HS_HB_INTR_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = hs_hb_intr_mult;
+ mult_arr_size = HS_HB_INTR_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_ISO && (!isHB)){
+ arr_maxp = hs_isoc_maxp;
+ maxp_arr_size = HS_ISOC_MAXP_SIZE;
+ arr_interval = hs_isoc_interval;
+ interval_arr_size = HS_ISOC_INTERVAL_SIZE;
+ /* length 1~10241 */
+ sec = init_num_sec(1,3);
+ add_num_sec(1024-3,1024+3,sec);
+ add_num_sec(2048-3,2048+3,sec);
+ add_num_sec(3072-3,3072+3,sec);
+ add_num_sec(10240-3,10240+3,sec);
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+
+ arr_ep_out = hs_isoc_out_ep;
+ out_ep_arr_size = HS_ISOC_OUT_EP_SIZE;
+ arr_ep_in = hs_isoc_in_ep;
+ in_ep_arr_size = HS_ISOC_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_ISO && (isHB)){
+ arr_maxp = hs_hb_isoc_maxp;
+ maxp_arr_size = HS_HB_ISOC_MAXP_SIZE;
+ arr_interval = hs_hb_isoc_interval;
+ interval_arr_size = HS_HB_ISOC_INTERVAL_SIZE;
+ /* length 1~10241 */
+ sec = init_num_sec(1,3);
+ add_num_sec(1024-3,1024+3,sec);
+ add_num_sec(2048-3,2048+3,sec);
+ add_num_sec(3072-3,3072+3,sec);
+ add_num_sec(10240-3,10240+3,sec);
+ /* start_add 0 */
+ min_start_add = 0;
+ max_start_add = 0;
+
+ arr_ep_out = hs_hb_isoc_out_ep;
+ out_ep_arr_size = HS_HB_ISOC_OUT_EP_SIZE;
+ arr_ep_in = hs_hb_isoc_in_ep;
+ in_ep_arr_size = HS_HB_ISOC_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = hs_hb_isoc_mult;
+ mult_arr_size = HS_HB_ISOC_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_FULL && transfer_type == EPATT_BULK){
+ arr_maxp = fs_bulk_maxp;
+ maxp_arr_size = FS_BULK_MAXP_SIZE;
+ arr_interval = hs_bulk_interval;
+ interval_arr_size = FS_BULK_INTERVAL_SIZE;
+ /* length 1~10241 */
+ sec = init_num_sec(1,65);
+ add_num_sec(10240-10,10240+10,sec);
+ add_num_sec(65535-10,65535,sec);
+
+ min_start_add = 0;
+ max_start_add = 0;
+
+ arr_ep_out = fs_bulk_out_ep;
+ out_ep_arr_size = FS_BULK_OUT_EP_SIZE;
+ arr_ep_in = fs_bulk_in_ep;
+ in_ep_arr_size = FS_BULK_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_FULL && transfer_type == EPATT_INT){
+ arr_maxp = fs_intr_maxp;
+ maxp_arr_size = FS_INTR_MAXP_SIZE;
+ arr_interval = fs_intr_interval;
+ interval_arr_size = FS_INTR_INTERVAL_SIZE;
+ /* length 1~10241 */
+ sec = init_num_sec(1, 65);
+ add_num_sec(3072-3,3072+3,sec);
+ min_start_add = 0;
+ max_start_add = 0;
+
+ arr_ep_out = fs_intr_out_ep;
+ out_ep_arr_size = FS_INTR_OUT_EP_SIZE;
+ arr_ep_in = fs_intr_in_ep;
+ in_ep_arr_size = FS_INTR_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+ if(speed == DEV_SPEED_FULL && transfer_type == EPATT_ISO){
+ arr_maxp = fs_isoc_maxp;
+ maxp_arr_size = FS_ISOC_MAXP_SIZE;
+ arr_interval = fs_isoc_interval;
+ interval_arr_size = FS_ISOC_INTERVAL_SIZE;
+ sec = init_num_sec(1,3);
+ add_num_sec(1023-3,1023+3,sec);
+ add_num_sec(3072-3,3072+3,sec);
+ min_start_add = 0;
+ max_start_add = 0;
+
+ arr_ep_out = fs_isoc_out_ep;
+ out_ep_arr_size = FS_ISOC_OUT_EP_SIZE;
+ arr_ep_in = fs_isoc_in_ep;
+ in_ep_arr_size = FS_ISOC_IN_EP_SIZE;
+ /* burst & mult all default value */
+ arr_burst = df_burst;
+ burst_arr_size = DF_BURST_SIZE;
+ arr_mult = df_mult;
+ mult_arr_size = DF_MULT_SIZE;
+ }
+
+ for(ep_in_index = 0; ep_in_index < in_ep_arr_size; ep_in_index++){
+ ep_in_num = *(arr_ep_in + ep_in_index);
+ for(ep_out_index = 0; ep_out_index < out_ep_arr_size; ep_out_index++){
+ ep_out_num = *(arr_ep_out + ep_out_index);
+ for(maxp_index = 0; maxp_index < maxp_arr_size; maxp_index++){
+ maxp = *(arr_maxp + maxp_index);
+ for(interval_index = 0; interval_index < interval_arr_size; interval_index++){
+ bInterval = *(arr_interval + interval_index);
+ for(mult_index = 0; mult_index < mult_arr_size; mult_index++){
+ mult = *(arr_mult+mult_index);
+ for(burst_index = 0; burst_index < burst_arr_size; burst_index++){
+ burst = *(arr_burst+burst_index);
+ printk(KERN_ERR "[TEST]*************************************\n");
+ printk(KERN_ERR " OUT_EP: %d***************************\n", ep_out_num);
+ printk(KERN_ERR " IN_EP: %d ***************************\n", ep_in_num);
+ printk(KERN_ERR " MAXP: %d ***************************\n", maxp);
+ printk(KERN_ERR " INTERVAL: %d ***************************\n", bInterval);
+ printk(KERN_ERR " MULT: %d **************************\n", mult);
+ printk(KERN_ERR " BURST: %d *************************\n", burst);
+ /* ==phase 0 : device reset==*/
+
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_ISO && mult > 0 && burst > 1){
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed, NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* reset SW scheduler algorithm */
+ mtk_xhci_scheduler_init();
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ for(start_add = min_start_add; start_add <= max_start_add; start_add++){
+ cur_sec = sec;
+ while(cur_sec != NULL){
+ length = cur_sec->current_value;
+ printk(KERN_ERR " LENGTH: %d *************\n", length);
+ if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_ISO
+ && (maxp == 2048 || maxp == 3072) && (length%maxp != 0)
+ && (length % 1024 == 0)){
+ printk(KERN_ERR " SKIP!!\n");
+ cur_sec = find_next_num(cur_sec);
+ continue;
+ }
+
+ /* ==phase 2 : loopback==*/
+ if(speed == DEV_SPEED_SUPER && transfer_type == EPATT_ISO){
+ if(length % maxp == 0){
+ cur_sec = find_next_num(cur_sec);
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ else if((burst==0) && (length/maxp>250)){
+ cur_sec = find_next_num(cur_sec);
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ else{
+ bdp=1;
+ gpd_buf_size = length;
+ bd_buf_size = 4096;
+ }
+ }
+ else if(speed == DEV_SPEED_HIGH && transfer_type == EPATT_ISO){
+ if(length % maxp == 0){
+ cur_sec = find_next_num(cur_sec);
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ else if((mult == 0) && (length/maxp>250)){
+ cur_sec = find_next_num(cur_sec);
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ else{
+ bdp=1;
+ gpd_buf_size = length;
+ bd_buf_size = 4096;
+ }
+ }
+ else if(transfer_type == EPATT_ISO){
+ if(length/maxp > 250){
+ cur_sec = find_next_num(cur_sec);
+ printk(KERN_ERR " SKIP!!\n");
+ continue;
+ }
+ else{
+ bdp=1;
+ gpd_buf_size = length;
+ bd_buf_size = 4096;
+ }
+ }
+ else{
+ bdp=1;
+ gpd_buf_size = length;
+ bd_buf_size = 4096;
+ }
+
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ f_power_suspend();
+ return ret;
+ }
+
+ ret = f_loopback_loop_gpd(
+ ep_out_num, ep_in_num, length, start_add, gpd_buf_size, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ f_power_suspend();
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ f_power_suspend();
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ cur_sec = find_next_num(cur_sec);
+ }
+ }
+ } /* burst */
+ } /* mult */
+ } /* interval */
+ } /* maxp */
+ } /* ep_out */
+ } /* ep_in */
+ clear_num_secs(sec);
+ return ret;
+}
+
+static int t_u3auto_loopback(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult, burst, dev_mult;
+ int dram_offset, extension;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ dev_mult = 1;
+ burst = 8;
+ mult = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER; /* TODO: superspeed */
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ }
+ if(argc > 3){
+ maxp = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+ if(argc > 4){
+ bInterval = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "interval set to %d\n", bInterval);
+ }
+ if(argc > 5){
+ length = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "length set to %d\n", length);
+ }
+ if(argc > 6){
+ start_add = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "start add offset set to %d\n", start_add);
+ }
+ if(argc > 7){
+ ep_out_num = (int)simple_strtol(argv[7], &argv[7], 10);
+ printk(KERN_ERR "ep out num set to %d\n", ep_out_num);
+ }
+ if(argc > 8){
+ ep_in_num = (int)simple_strtol(argv[8], &argv[8], 10);
+ printk(KERN_ERR "ep in num set to %d\n", ep_in_num);
+ }
+ if(argc > 9){
+ burst = (int)simple_strtol(argv[9], &argv[9], 10);
+ printk(KERN_ERR "burst set to %d\n", burst);
+ }
+ if(argc > 10){
+ mult = (int)simple_strtol(argv[10], &argv[10], 10);
+ printk(KERN_ERR "mult set to %d\n", mult);
+ }
+ printk(KERN_ERR "/*=========loopback===========*/\n");
+
+ /* ==phase 0 : device reset==*/
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ /* ==phase 2 : loopback==*/
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=8192;
+ /* TODO: device should turn off extension length feature */
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_loop_gpd(ep_out_num, ep_in_num, length, start_add, gpd_buf_size, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int t_u3auto_loopback_sg(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int sg_len;
+ int mult,burst, dev_mult;
+ int dram_offset, extension;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ bInterval = 0;
+ length = 65535;
+ mult=0;
+ burst=8;
+ dev_mult=1;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ }
+ if(argc > 3){
+ maxp = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+ if(argc > 4){
+ bInterval = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "interval set to %d\n", bInterval);
+ }
+ if(argc > 5){
+ length = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "length set to %d\n", length);
+ }
+ if(argc > 6){
+ start_add = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "start add offset set to %d\n", start_add);
+ }
+ if(argc > 7){
+ sg_len = (int)simple_strtol(argv[7], &argv[7], 10);
+ printk(KERN_ERR "sg length set to %d\n", sg_len);
+ if(sg_len == 0){
+ printk(KERN_ERR "random sg length\n");
+ }
+ }
+ if(argc > 8){
+ ep_out_num = (int)simple_strtol(argv[8], &argv[8], 10);
+ printk(KERN_ERR "ep out num set to %d\n", ep_out_num);
+ }
+ if(argc > 9){
+ ep_in_num = (int)simple_strtol(argv[9], &argv[9], 10);
+ printk(KERN_ERR "ep in num set to %d\n", ep_in_num);
+ }
+ if(argc > 10){
+ burst = (int)simple_strtol(argv[10], &argv[10], 10);
+ printk(KERN_ERR "burst set to %d\n", burst);
+ }
+ if(argc > 11){
+ mult = (int)simple_strtol(argv[11], &argv[11], 10);
+ printk(KERN_ERR "mult set to %d\n", mult);
+ }
+ printk(KERN_ERR "/*=========loopback===========*/\n");
+
+ /* ==phase 0 : device reset==*/
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult,NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ /* ==phase 2 : loopback==*/
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=4096;
+ /* TODO: device should turn off extension length feature */
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_sg_loop(ep_out_num, ep_in_num, length, start_add, sg_len, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ return ret;
+}
+
+/*BESL TO HIRD Encoding array for USB2 LPM*/
+static int xhci_besl_encoding[16] = {125,150,200,300,400,500,1000,2000,
+ 3000,4000,5000,6000,7000,8000,9000,10000};
+
+static int t_u3auto_hw_lpm(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult, burst, dev_mult;
+ int dram_offset, extension;
+ int hle, hirdm, rwe, l1_timeout, besl, besld;
+ int lpm_mode, wakeup, beslck, beslck_u3, beslckd, cond, cond_en;
+ int max_exit_latency,maxp0,preping_mode,preping,besl_preping,besld_preping;
+ int l1_timeout_min,l1_timeout_mid,l1_timeout_max;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 10241;
+ dev_mult = 1;
+ burst = 8;
+ mult = 0;
+ dram_offset = 0;
+ extension = 0;
+ start_add = 0;
+
+ hle = 0;
+ hirdm = 0;
+ rwe = 0;
+ l1_timeout = 0;
+ lpm_mode = 0;
+ wakeup = 0;
+ beslck = 0;
+ beslck_u3 = 0;
+ beslckd = 0;
+ cond = 0;
+ cond_en = 0;
+
+ max_exit_latency = 0;
+ maxp0 = 64;
+ preping_mode = 0;
+ preping = 0;
+ besl_preping = 0;
+ besld_preping = 0;
+
+ l1_timeout_min = 0;
+ l1_timeout_mid = 0;
+ l1_timeout_max = 0;
+
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ else if(!strcmp(argv[2], "ctrl")){
+ printk(KERN_ERR "Test ctrl transfer\n");
+ transfer_type = EPATT_CTRL;
+ }
+
+ }
+
+ if (transfer_type == EPATT_BULK){
+ if (speed == DEV_SPEED_FULL){
+ maxp = 64;
+ }
+ else if (speed == DEV_SPEED_HIGH){
+ maxp = 512;
+ }
+ }
+ else if (transfer_type == EPATT_INT){
+ if (speed == DEV_SPEED_FULL){
+ maxp = 64;
+ }
+ else if (speed == DEV_SPEED_HIGH){
+ maxp = 1024;
+ }
+ }
+ else if (transfer_type == EPATT_ISO){
+ if (speed == DEV_SPEED_FULL){
+ maxp = 1023;
+ }
+ else if (speed == DEV_SPEED_HIGH){
+ maxp = 1024;
+ }
+ }
+ printk(KERN_ERR " maxp = %d\n",maxp);
+
+ for(hirdm = 0; hirdm < 2; hirdm++){
+ for (rwe = 0; rwe < 2; rwe++){
+ for (besl = 0; besl < 8; besl++){
+ for (besld = besl + 6; besld < 10; besld++){
+ for (bInterval = 6; bInterval < 9; bInterval++){
+ if (speed == DEV_SPEED_HIGH){
+ l1_timeout_min = 0;
+ l1_timeout_mid = ((1<<(bInterval-1)) * 125) / (256*2) + 1;
+ l1_timeout_max = ((1<<(bInterval-1)) * 125) / 256;
+ }
+ else if(speed == DEV_SPEED_FULL){
+ l1_timeout_min = 0;
+ l1_timeout_mid = (bInterval*1000) / (256*2) + 1;
+ l1_timeout_max = (bInterval*1000) / 256;
+ }
+
+ for (l1_timeout = l1_timeout_min; l1_timeout <= l1_timeout_max; l1_timeout++){
+
+ if (l1_timeout == l1_timeout_mid + 1){
+ l1_timeout = l1_timeout_max - 1;
+ }
+ else if ((l1_timeout > 1) && (l1_timeout < (l1_timeout_max - 1))){
+ l1_timeout = l1_timeout_mid;
+ }
+
+ if (rwe == 1 && l1_timeout == 0){
+ l1_timeout = l1_timeout_mid;
+ }
+
+ if ((l1_timeout != l1_timeout_mid) && (rwe == 1)){
+ break;
+ }
+
+ /* for bulk and control endpoint : set the bInterval to be 0 */
+ if ((transfer_type == EPATT_BULK) || (transfer_type == EPATT_CTRL)){
+ bInterval = 0;
+ }
+
+ /* ==phase 0 : device reset== */
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* if XHCI_DEBUG = 1, the debug log is so much that it affects the timing :
+ * when we read the port status, it is in connected already and the disconnected status is ignored
+ * so, if XHCI_DEBUG = 1, we only wait the device connect to the host.
+ */
+#if XHCI_DEBUG
+ ret = f_enable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device enable failed!!!!!!!!!!\n");
+ return ret;
+ }
+#else
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+#endif
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* reset SW scheduler algorithm */
+ mtk_xhci_scheduler_init();
+
+ if (speed == DEV_SPEED_HIGH){
+ besl_preping = xhci_besl_encoding[besl] / 125 + 1;
+ besld_preping = xhci_besl_encoding[besld] / 125 + 1;
+ }
+ else if (speed == DEV_SPEED_FULL){
+ besl_preping = ((xhci_besl_encoding[besl] / 125 + 8)/8) * 8;
+ besld_preping = ((xhci_besl_encoding[besld] / 125 + 8)/8) * 8;
+ }
+
+ /* evaluate the slot context to update besl_preping/besld_preping */
+ ret = f_evaluate_context(max_exit_latency, maxp0, preping_mode, preping, besl_preping, besld_preping);
+
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "evaluate slot context fail!!\n");
+ return RET_FAIL;
+ }
+
+ /* ==phase 1 : config EP== */
+ if (transfer_type != EPATT_CTRL){
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+ }
+
+ /* ==phase 2 : config lpm== */
+
+ lpm_mode = 0;
+
+ if (rwe == 1){
+ wakeup = 2;
+ }
+ else{
+ wakeup = 2;
+ }
+
+ beslck = 0;
+ beslck_u3 = 0;
+ beslckd = 7;
+ cond = 0;
+ cond_en = 0;
+
+ printk(KERN_ERR "U3D LPM config:\n");
+ printk(KERN_ERR " lpm_mode = %d\n",lpm_mode);
+ printk(KERN_ERR " wakeup = %d\n",wakeup);
+ printk(KERN_ERR " beslck = %d, beslck_u3 = %d, beslckd = %d\n",beslck,beslck_u3,beslckd);
+ printk(KERN_ERR " cond = %d, cond_en = %d\n",cond,cond_en);
+
+ dev_lpm_config(lpm_mode, wakeup, beslck, beslck_u3, beslckd, cond, cond_en);
+#if 0
+ if ((transfer_type == EPATT_ISO) || (transfer_type == EPATT_INT && speed == DEV_SPEED_HIGH)){
+ l1_timeout = ((1<<(bInterval-1)) * 125) / (256*2) + 1;
+ }
+ else if(transfer_type == EPATT_INT && speed == DEV_SPEED_FULL){
+ l1_timeout = bInterval / (256*2) + 1;
+ }
+ else{/*bulk and control transfer*/
+ l1_timeout = 4;
+ }
+#endif
+ hle = 1;
+
+ printk(KERN_ERR "U3H LPM config:\n");
+ printk(KERN_ERR " hle = %d\n",hle);
+ printk(KERN_ERR " rwe = %d\n",rwe);
+ printk(KERN_ERR " hirdm = %d\n",hirdm);
+ printk(KERN_ERR " besl = %d, besld = %d\n",besl,besld);
+ printk(KERN_ERR " besl_preping = %d, besld_preping = %d\n",besl_preping,besld_preping);
+ printk(KERN_ERR " bInterval = %d, l1_timeout = %d\n",bInterval,l1_timeout);
+
+ f_power_config_lpm(g_slot_id, hirdm, l1_timeout, rwe, besl, besld, hle, 0, 0);
+ /* ==phase 3 : loopback== */
+ if (transfer_type != EPATT_CTRL){
+ printk(KERN_ERR "Do loopback, length %d\n", length);
+
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=8192;
+ /* TODO: device should turn off extension length feature */
+#if 0
+ if(((length-10)%(bd_buf_size+6))<7){
+ length+=12;
+ }
+#endif
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_loop_gpd(ep_out_num, ep_in_num, length, start_add, gpd_buf_size, NULL);
+ if(ret){
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+ }
+ else{
+ printk(KERN_ERR "Do CTRL loopback, length %d\n", length);
+ ret=dev_ctrl_loopback(length,NULL);
+
+ if(ret)
+ {
+ printk(KERN_ERR "ctrl loop fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ break;
+ }
+ }
+
+ /* ==phase 4: get device status== */
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+
+ /* ==phase 5: disable hwlpm == */
+ hle = 0;
+
+ f_power_config_lpm(g_slot_id, hirdm, l1_timeout, rwe, besl, besld, hle, 0, 0);
+ } /* end of l1_timeout */
+
+ /* for bulk and control endpoint :ignore the bInterval */
+ if ((transfer_type == EPATT_BULK) || (transfer_type == EPATT_CTRL)){
+ break;
+ }
+ } /* end of bInterval */
+ } /* end of besld */
+ } /* end of besl */
+ } /* end of rwe */
+ } /* end of hirdm */
+ return ret;
+}
+
+/* always use bulk transfer */
+static int t_u3auto_random_suspend(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult, burst, dev_mult;
+ int dram_offset, extension;
+ int suspend_count, suspend_boundry;
+ char isSuspend;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ dev_mult = 1;
+ burst = 8;
+ mult = 0;
+ suspend_count = 0;
+ suspend_boundry = 10;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER; /* TODO: superspeed */
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ suspend_boundry = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "suspend_boundry set to %d\n", suspend_boundry);
+ }
+ if(speed == DEV_SPEED_SUPER){
+ maxp = 1024;
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp = 512;
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp = 64;
+ }
+ if(argc > 3){
+ maxp = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+
+ bInterval = 0;
+ /* random length */
+ start_add = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ burst = 8;
+ mult = 0;
+
+ /* ==phase 0 : device reset==*/
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+ mtk_xhci_scheduler_init();
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP RX fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP TX fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP OUT fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP IN fail!!\n");
+ return ret;
+ }
+ suspend_count = 0;
+ while(suspend_count < suspend_boundry){
+ isSuspend = (char)(get_random_int() % 2);
+ if(isSuspend){
+ printk(KERN_ERR "Suspend/Resume\n");
+ ret = f_power_suspend();
+ if(ret){
+ return ret;
+ }
+ mdelay(1000);
+ ret = f_power_resume();
+ if(ret){
+ return ret;
+ }
+ suspend_count++;
+ }
+ length = (get_random_int() % 65535)+1;
+ printk(KERN_ERR "loopback %d length\n", length);
+ /* ==phase 2 : loopback==*/
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=8192;
+ /* TODO: device should turn off extension length feature */
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_loop(ep_out_num, ep_in_num, length, start_add,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+
+ }
+ return ret;
+}
+
+static int t_u3auto_random_wakeup(int argc, char**argv){
+ int ret,length,start_add, random_delay;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult, burst, dev_mult;
+ int dram_offset, extension;
+ int suspend_count, suspend_boundry;
+ char isSuspend;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_BULK;
+ maxp = 512;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ dev_mult = 1;
+ burst = 8;
+ mult = 0;
+ suspend_count = 0;
+ suspend_boundry = 10;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ suspend_boundry = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "wakeup_boundary set to %d\n", suspend_boundry);
+ }
+ if(speed == DEV_SPEED_SUPER){
+ maxp = 1024;
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp = 512;
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp = 64;
+ }
+ if(argc > 3){
+ maxp = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+
+ bInterval = 0;
+ /* random length */
+ start_add = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ burst = 8;
+ mult = 0;
+
+ /* ==phase 0 : device reset==*/
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,dev_mult,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ mtk_xhci_scheduler_init();
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+ suspend_count = 0;
+ while(suspend_count < suspend_boundry){
+ length = (get_random_int() % 65535)+1;
+ printk(KERN_ERR "loopback %d length\n", length);
+ random_delay = (get_random_int() % 300000)+1;
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=8192;
+ isSuspend = (char)(get_random_int() % 2);
+ if(isSuspend){
+ printk(KERN_ERR "Suspend/Resume\n");
+ ret = dev_remotewakeup(random_delay, NULL);
+ ret = f_power_remotewakeup();
+ if(ret){
+ return ret;
+ }
+ suspend_count++;
+ }
+
+ /* ==phase 2 : loopback==*/
+
+ /* TODO: device should turn off extension length feature */
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_loop(ep_out_num, ep_in_num, length, start_add,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+
+ }
+ return ret;
+}
+
+static int t_u3auto_reconfig(int argc, char** argv){
+ int dev_speed;
+ unsigned int rnd_transfer_type, rdn_maxp, rdn_interval;
+ unsigned int length;
+ int rdn_ep_num;
+ int num_ep, round;
+ int i;
+
+ round = 2;
+ num_ep = 1;
+ rdn_ep_num = 1;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ dev_speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ dev_speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ dev_speed = DEV_SPEED_FULL;
+ }
+ else if(!strcmp(argv[1], "stop")){
+ printk(KERN_ERR "STOP!!\n");
+ g_correct = false;
+ return RET_SUCCESS;
+ }
+ }
+ if(argc > 2){
+ round = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ num_ep = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+
+ for(i=0; i<round; i++){
+ /* random new endpoint */
+ rdn_ep_num = (get_random_int()%3)+1;
+ rnd_transfer_type = (get_random_int()%3)+1;
+ if(rnd_transfer_type == EPATT_ISO){
+ rdn_interval = get_random_int()%3+1;
+ rdn_maxp = (get_random_int()%8+1)*128;
+ }
+ else if(rnd_transfer_type == EPATT_BULK){
+ rdn_interval = 0;
+ if(dev_speed == DEV_SPEED_SUPER){
+ rdn_maxp = 1024;
+ }
+ else if(dev_speed == DEV_SPEED_HIGH){
+ rdn_maxp = 512;
+ }
+ else if(dev_speed == DEV_SPEED_FULL){
+ rdn_maxp = 64;
+ }
+ }
+ else if(rnd_transfer_type == EPATT_INT){
+ rdn_interval = get_random_int()%4+1;
+ rdn_maxp = (get_random_int()%8+1)*128;
+ }
+ /* de-configure device endpoint */
+
+ /* de-configure endpoint */
+ if(i>0){
+ f_deconfig_ep(1, 0, 0, NULL, 0);
+ }
+ /* configure endpoints */
+
+ /* do loopback for 100 rounds */
+
+ }
+}
+
+static int t_u3auto_stress(int argc, char** argv){
+ int ret, num_ep, i, speed;
+ int transfer_type[5];
+ int maxp[5],interval[5], burst[5],mult[5];
+ char isCompare;
+ char isEP0;
+ int length;
+ isCompare = true;
+ isEP0 = false;
+ int dev_num;
+ int port_num;
+ int dev_slot;
+ struct usb_device *udev;
+ int cur_index;
+
+ num_ep = 2;
+ dev_slot = 3;
+ ret = 0;
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ else if(!strcmp(argv[1], "stop")){
+ printk(KERN_ERR "STOP!!\n");
+ g_correct = false;
+ return RET_SUCCESS;
+ }
+ }
+ if(argc > 2){
+ dev_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ /* num of eps */
+ num_ep = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ if(!strcmp(argv[4], "false")){
+ isCompare = false;
+ }
+ }
+ if(argc > 5){
+ if(!strcmp(argv[5], "true")){
+ isEP0 = true;
+ }
+ }
+ /* arg 6~9 */
+ for(i=6; i<=9; i++){
+ if(argc > i){
+ cur_index = i-5;
+ if(!strcmp(argv[i], "bulk")){
+ printk(KERN_ERR "Test bulk transfer for ep %d\n", cur_index);
+ transfer_type[cur_index] = EPATT_BULK;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 8;
+ mult[cur_index] = 0;
+ interval[cur_index] = 1;
+
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp[cur_index] = 512;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 1;
+
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp[cur_index] = 64;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 1;
+
+ }
+ }
+ else if(!strcmp(argv[i], "intr")){
+ printk(KERN_ERR "Test intr transfer for ep %d\n", (cur_index));
+ transfer_type[cur_index] = EPATT_INT;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 1;
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ if(dev_num >= 5){
+ /* for LPM test */
+ interval[cur_index] = 4;
+ }
+ else{
+ interval[cur_index] = 1;
+ }
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp[cur_index] = 64;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ if(dev_num >= 5){
+ /* for LPM test */
+ interval[cur_index] = 3;
+ }
+ else{
+ interval[cur_index] = 1;
+ }
+ }
+ }
+ else if(!strcmp(argv[i], "intr_pm")){
+ printk(KERN_ERR "Test intr transfer for ep %d\n", (cur_index));
+ transfer_type[cur_index] = EPATT_INT;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 4;
+ }
+ }
+ else if(!strcmp(argv[i], "intr_pm_u2")){
+ printk(KERN_ERR "Test intr transfer for ep %d\n", (cur_index));
+ transfer_type[cur_index] = EPATT_INT;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 5;
+ }
+ }
+ else if(!strcmp(argv[i], "isoc")){
+ printk(KERN_ERR "Test isoc transfer for ep %d\n", (cur_index));
+ transfer_type[cur_index] = EPATT_ISO;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 4;
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ if(dev_num >= 5){
+ /* for LPM test */
+ interval[cur_index] = 4;
+ }
+ else{
+ interval[cur_index] = 1;
+ }
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp[cur_index] = 1023;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ if(dev_num >= 5){
+ /* for LPM test */
+ interval[cur_index] = 3;
+ }
+ else{
+ interval[cur_index] = 1;
+ }
+
+ }
+ }
+ else if(!strcmp(argv[i], "isoc_pm")){
+ printk(KERN_ERR "Test isoc transfer for ep %d\n", (cur_index));
+ transfer_type[cur_index] = EPATT_ISO;
+ if(speed == DEV_SPEED_SUPER){
+ maxp[cur_index] = 1024;
+ burst[cur_index] = 0;
+ mult[cur_index] = 0;
+ interval[cur_index] = 5;
+ }
+ }
+ }
+ }
+ udev = NULL;
+ /* ==phase 0 : device reset==*/
+ if(dev_num == 0){
+ }
+ else if(dev_num < 5){
+ udev = dev_list[dev_num-1];
+ port_num = udev->portnum;
+ f_hub_reset_dev(udev, dev_num, port_num, speed);
+ udev = dev_list[dev_num-1];
+ }
+ /* ==phase 1 : config EP==*/
+ for(i=1; i<=num_ep; i++){
+ if(transfer_type[i] == EPATT_ISO){
+ dev_slot = 1;
+ }
+ else{
+ dev_slot = 1;
+ }
+ dev_config_ep(i,USB_RX, transfer_type[i], maxp[i], interval[i], dev_slot, burst[i],mult[i], udev);
+ dev_config_ep(i,USB_TX, transfer_type[i], maxp[i], interval[i], dev_slot, burst[i],mult[i], udev);
+
+ f_config_ep(i,EPADD_OUT,transfer_type[i],maxp[i],interval[i],burst[i],mult[i],udev,0);
+ if(i == num_ep){
+ f_config_ep(i, EPADD_IN, transfer_type[i], maxp[i], interval[i],burst[i],mult[i],udev,1);
+ }
+ else{
+ f_config_ep(i, EPADD_IN, transfer_type[i], maxp[i], interval[i],burst[i],mult[i],udev,0);
+ }
+ if(transfer_type[i] == EPATT_ISO){
+ f_ring_enlarge(EPADD_OUT, i, -1);
+ f_ring_enlarge(EPADD_OUT, i, -1);
+ f_ring_enlarge(EPADD_IN, i, -1);
+ f_ring_enlarge(EPADD_IN, i, -1);
+ }
+ }
+
+
+ g_correct = true;
+ ret=dev_stress(0,GPD_LENGTH_RDN ,GPD_LENGTH_RDN,0,num_ep, udev);
+ msleep(2000);
+ for(i=1; i<=num_ep; i++){
+ f_add_rdn_len_str_threads(dev_num, i, maxp[i], isCompare, udev, isEP0);
+ }
+ if(ret){
+ printk(KERN_ERR "stress request failed!!!!!!!!!!\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int t_u3auto_isoc_frame_id(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult_dev, mult, burst;
+ int dram_offset, extension;
+ int i;
+
+ ret = 0;
+ speed = DEV_SPEED_HIGH;;
+ transfer_type = EPATT_ISO;
+ bInterval = 1;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 10241;
+ start_add = 0;
+ mult_dev = 3;
+ mult = 0;
+ burst = 0;
+ dram_offset = 0;
+ extension = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(speed == DEV_SPEED_SUPER){
+ maxp = 1024;
+ }
+ else if(speed == DEV_SPEED_HIGH){
+ maxp = 1024;
+ }
+ else if(speed == DEV_SPEED_FULL){
+ maxp = 1023;
+ }
+
+
+ /* start test */
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed, NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ /* 10 round */
+ g_iso_frame = true;
+ bdp=1;
+ gpd_buf_size = length;
+ bd_buf_size = 4096;
+ for(i=0; i<10; i++){
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ f_power_suspend();
+ return ret;
+ }
+ ret = f_loopback_loop_gpd(
+ ep_out_num, ep_in_num, length, start_add, gpd_buf_size, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ f_power_suspend();
+ return ret;
+ }
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ f_power_suspend();
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ }
+
+ g_iso_frame = false;
+ return RET_SUCCESS;
+}
+
+static int t_ellysis_TD7_36(int argc, char** argv){
+ int ret;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i,j;
+ char *tmp;
+ struct urb *urb_rx;
+ struct usb_host_endpoint *ep_rx;
+ int ep_index_rx;
+ void *buffer_rx;
+ char *buffer;
+ ret = 0;
+
+ if(u3auto_hcd_reset() != RET_SUCCESS)
+ return RET_FAIL;
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ xhci_dbg(xhci, "device speed %d\n", udev->speed);
+ /* get descriptor (device) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(8);
+ desc = kmalloc(8, GFP_KERNEL);
+ memset(desc, 0, 8);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (device) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (configure) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_CONFIG << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_CONFIG_SIZE);
+ desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_CONFIG_SIZE);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (configure) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_CONFIG << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(40);
+ desc = kmalloc(40, GFP_KERNEL);
+ memset(desc, 0, 40);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* set configuration */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* set idle */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ dr->bRequest = 0x0A;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* get descriptor (HID report) */
+ ret = f_config_ep(1, EPADD_IN, EPATT_INT, 4, 7,0,0, udev, 1);
+ /* interrupt input */
+ ep_rx = udev->ep_in[1];
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ xhci_err(xhci, "[INPUT]\n");
+ for(i=0; i<10; i++){
+ urb_rx = usb_alloc_urb(0, GFP_KERNEL);
+ ret = f_fill_urb(urb_rx,1,4,0,EPADD_IN, 0, 4, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_flags &= ~URB_ZERO_PACKET;
+ ret = f_queue_urb(urb_rx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]rx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ dma_sync_single_for_cpu(dev,urb_rx->transfer_dma, 4,DMA_BIDIRECTIONAL);
+ for(j=0; j<urb_rx->transfer_buffer_length; j++){
+ tmp = urb_rx->transfer_buffer+i;
+ xhci_err(xhci, "%x ", *tmp);
+ }
+ xhci_err(xhci, "\n");
+ usb_free_urb(urb_rx);
+ }
+
+ return ret;
+}
+
+static int t_class_keyboard(int argc, char** argv){
+ int ret;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ int i;
+ char *tmp;
+ struct urb *urb_rx;
+ struct usb_host_endpoint *ep_rx;
+ int ep_index_rx;
+ void *buffer_rx;
+ char *buffer;
+ ret = 0;
+
+ if(u3auto_hcd_reset() != RET_SUCCESS)
+ return RET_FAIL;
+
+ /* set configuration */
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ xhci_dbg(xhci, "device speed %d\n", udev->speed);
+ /* set configuration */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* set idle */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0x21;
+ dr->bRequest = 0x0a;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* set report */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = 0x21;
+ dr->bRequest = 0x09;
+ dr->wValue = cpu_to_le16(0x200);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(1);
+ buffer = kmalloc(1, GFP_KERNEL);
+ *buffer = 0x01;
+ urb = alloc_ctrl_urb(dr, buffer, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ kfree(buffer);
+
+ /* config a interrupt IN endpiont, ep_num=1 */
+ ret = f_config_ep(1, EPADD_IN, EPATT_INT, 8, 10,0,0, udev, 1);
+
+ /* continuous queue interrupt transfer for 10 times */
+ ep_rx = udev->ep_in[1];
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ xhci_err(xhci, "[INPUT]\n");
+ for(i=0; i<100; i++){
+ urb_rx = usb_alloc_urb(0, GFP_KERNEL);
+ ret = f_fill_urb(urb_rx,1,8,0,EPADD_IN, 0, 8, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_flags &= ~URB_ZERO_PACKET;
+ ret = f_queue_urb(urb_rx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]rx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ dma_sync_single_for_cpu(dev,urb_rx->transfer_dma, 8,DMA_BIDIRECTIONAL);
+ for(i=0; i<urb_rx->transfer_buffer_length; i++){
+ tmp = urb_rx->transfer_buffer+i;
+ xhci_err(xhci, "%x ", *tmp);
+ }
+ xhci_err(xhci, "\n");
+ usb_free_urb(urb_rx);
+ }
+
+ return ret;
+}
+
+static int t_u3auto_randomstop_dev(int argc, char** argv){
+ int ret;
+ int speed, transfer_type_1, transfer_type_2, maxp_1, maxp_2, gpd_buf_size, bdp
+ , bd_buf_size, ep_1_num, ep_2_num, dir_1, dir_2, dev_dir_1, dev_dir_2
+ , urb_dir_1, urb_dir_2, length;
+ int stop_count_1, stop_count_2;
+
+ /* static parameters */
+ int bInterval = 1;
+ int mult_dev = 1;
+ int burst = 0;
+
+ speed = DEV_SPEED_HIGH;
+ transfer_type_1 = transfer_type_2 = EPATT_BULK;
+ maxp_1 = maxp_2 = 512;
+ gpd_buf_size = 16*1024;
+ bd_buf_size = 0;
+ ep_1_num = 1;
+ dir_1 = EPADD_OUT;
+ dev_dir_1 = USB_RX;
+ urb_dir_1 = URB_DIR_OUT;
+
+ ep_2_num = 2;
+ dir_2 = EPADD_IN;
+ dev_dir_2 = USB_TX;
+ urb_dir_2 = URB_DIR_IN;
+
+ bInterval =0;
+ mult_dev =1;
+ burst =4;
+
+ stop_count_1 = stop_count_2 = 3;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER; /* TODO: superspeed */
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "bulk")){
+ printk(KERN_ERR "Tx BULK transfer\n");
+ transfer_type_1 = EPATT_BULK;
+ }
+ else if(!strcmp(argv[2], "intr")){
+ printk(KERN_ERR "Tx INTERRUPT transfer\n");
+ transfer_type_1 = EPATT_INT;
+ }
+ else if(!strcmp(argv[2], "isoc")){
+ printk(KERN_ERR "Tx ISOC transfer\n");
+ transfer_type_1 = EPATT_ISO;
+ }
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "bulk")){
+ printk(KERN_ERR "Rx BULK transfer\n");
+ transfer_type_2 = EPATT_BULK;
+ }
+ else if(!strcmp(argv[3], "intr")){
+ printk(KERN_ERR "Rx INTERRUPT transfer\n");
+ transfer_type_2 = EPATT_INT;
+ }
+ else if(!strcmp(argv[3], "isoc")){
+ printk(KERN_ERR "Rx ISOC transfer\n");
+ transfer_type_2 = EPATT_ISO;
+ }
+ }
+ if(argc > 4){
+ ep_1_num= (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ ep_2_num= (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+ if(argc > 6){
+ if(!strcmp(argv[6], "OUT")){
+ dir_1 = EPADD_OUT;
+ dev_dir_1 = USB_RX;
+ urb_dir_1 = URB_DIR_OUT;
+ }
+ else{
+ dir_1 = EPADD_IN;
+ dev_dir_1 = USB_TX;
+ urb_dir_1 = URB_DIR_IN;
+ }
+ }
+ if(argc > 7){
+ if(!strcmp(argv[7], "OUT")){
+ dir_2 = EPADD_OUT;
+ dev_dir_2 = USB_RX;
+ urb_dir_2 = URB_DIR_OUT;
+ }
+ else{
+ dir_2 = EPADD_IN;
+ dev_dir_2 = USB_TX;
+ urb_dir_2 = URB_DIR_IN;
+ }
+ }
+ if(argc > 8){
+ maxp_1 = (int)simple_strtol(argv[8], &argv[8], 10);
+ }
+ if(argc > 9){
+ maxp_2 = (int)simple_strtol(argv[9], &argv[9], 10);
+ }
+ if(argc > 10){
+ gpd_buf_size = (int)simple_strtol(argv[10], &argv[10], 10);
+ }
+ if(argc > 11){
+ bd_buf_size = (int)simple_strtol(argv[11], &argv[11], 10);
+ }
+ if(argc > 12){
+ stop_count_1 = (int)simple_strtol(argv[12], &argv[12], 10);
+ }
+ if(argc > 13){
+ stop_count_2 = (int)simple_strtol(argv[13], &argv[13], 10);
+ }
+ /* config EP */
+ ret=dev_config_ep(ep_1_num,dev_dir_1,transfer_type_1,maxp_1, bInterval,mult_dev,burst,0, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_config_ep(ep_2_num,dev_dir_2,transfer_type_2,maxp_2, bInterval,mult_dev,burst,0, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_1_num,dir_1,transfer_type_1,maxp_1,bInterval,8,0, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_2_num,dir_2,transfer_type_2,maxp_2,bInterval,8,0, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+
+ length = gpd_buf_size;
+
+ ret=dev_random_stop(length, gpd_buf_size, bd_buf_size, dev_dir_1, dev_dir_2, stop_count_1, stop_count_2);
+ if(ret){
+ printk(KERN_ERR "random stop request failed!!!!!!!!!!\n");
+ return ret;
+ }
+ ret = f_random_stop(ep_1_num, ep_2_num, stop_count_1, stop_count_2, urb_dir_1, urb_dir_2, length);
+
+
+ return ret;
+}
+
+
+static int t_ring_random_ring_doorbell(int argc, char** argv){
+ int ep_num, ep_dir, ep_index;
+ struct xhci_hcd *xhci;
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "ep num set to %d\n", ep_num);
+ }
+ if(ep_num == 0){
+ ep_index = 0;
+ }
+ else{
+ if(ep_dir == EPADD_OUT){
+ ep_index = ep_num * 2 - 1;
+ }
+ else{
+ ep_index = ep_num * 2;
+ }
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ f_add_random_ring_doorbell_thread(xhci, g_slot_id, ep_index);
+ return RET_SUCCESS;
+}
+
+static int t_power_random_access_regs(int argc, char** argv){
+ int port_id, port_rev, power_required;
+ struct xhci_hcd *xhci;
+
+ if(argc < 4){
+ printk(KERN_ERR "arg: port_id port_rev is_power_required\n");
+ }
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ port_rev = (int)simple_strtol(argv[2], &argv[2], 10);
+ if(!strcmp(argv[3], "true")){
+ power_required = 1;
+ }
+ else{
+ power_required = 0;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ f_add_random_access_reg_thread(xhci, port_id, port_rev, power_required);
+ return RET_SUCCESS;
+}
+
+static int t_ring_random_stop_ep(int argc, char** argv){
+ int ep_num, ep_dir, ep_index;
+ struct xhci_hcd *xhci;
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "ep num set to %d\n", ep_num);
+ }
+ if(ep_num == 0){
+ ep_index = 0;
+ }
+ else{
+ if(ep_dir == EPADD_OUT){
+ ep_index = ep_num * 2 - 1;
+ }
+ else{
+ ep_index = ep_num * 2;
+ }
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ g_test_random_stop_ep = true;
+ f_add_random_stop_ep_thread(xhci, g_slot_id, ep_index);
+ return RET_SUCCESS;
+}
+
+static int t_loopback_configep(int argc, char** argv){
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_num, ep_dir;
+ char is_config;
+ int mult, burst;
+
+
+ transfer_type = EPATT_BULK;
+ ep_dir = EPADD_OUT;
+ maxp = 512;
+ ep_num = 1;
+ bInterval = 0;
+ is_config=0;
+ mult= 0;
+ burst= 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ transfer_type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[1], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[1], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ transfer_type = EPATT_ISO;
+ }
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[2], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 3){
+ maxp = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+ if(argc > 4){
+ bInterval = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "interval set to %d\n", bInterval);
+ }
+ if(argc > 5){
+ ep_num = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "ep num set to %d\n", ep_num);
+ }
+ if(argc > 6){
+ is_config = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "is_config set to %d\n", is_config);
+ }
+ if(argc > 7){
+ burst = (int)simple_strtol(argv[7], &argv[7], 10);
+ printk(KERN_ERR "burst set to %d\n", burst);
+ }
+ if(argc > 8){
+ mult = (int)simple_strtol(argv[8], &argv[8], 10);
+ printk(KERN_ERR "mult set to %d\n", mult);
+ }
+ return f_config_ep(ep_num, ep_dir, transfer_type, maxp, bInterval,burst,mult, NULL, is_config);
+}
+
+static int t_loopback_deconfigep(int argc, char** argv){
+ /* all or one-by-one */
+ int ep_num, ep_dir;
+ char is_config;
+ char is_all;
+
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+ is_all = false;
+ is_config=0;
+
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ else if(!strcmp(argv[1], "all")){
+ printk(KERN_ERR "all EP\n");
+ is_all = true;
+ }
+
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "ep num set to %d\n", ep_num);
+ }
+ if(argc > 3){
+ is_config = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "is_config set to %d\n", is_config);
+ }
+ f_deconfig_ep(is_all, ep_num, ep_dir, NULL, is_config);
+}
+
+static int t_loopback_loop(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int sg_len;
+ int round, i;
+ int dram_offset, extension;
+
+ ret =0;
+ speed = DEV_SPEED_HIGH;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ round=1;
+ dram_offset = 0;
+ extension = 0;
+ sg_len = 0;
+
+ if(argc > 1){
+ length = (int)simple_strtol(argv[1], &argv[1], 10);
+ printk(KERN_ERR "length set to %d\n", length);
+ }
+ if(argc > 2){
+ start_add = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "start add offset set to %d\n", start_add);
+ }
+ if(argc > 3){
+ ep_out_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ printk(KERN_ERR "ep out num set to %d\n", ep_out_num);
+ }
+ if(argc > 4){
+ ep_in_num = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "ep in num set to %d\n", ep_in_num);
+ }
+
+ if(argc > 5){
+ round = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "Execute %d round\n", round);
+ }
+
+ if(argc > 6){
+ sg_len = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "sg_len set to %d\n", sg_len);
+ }
+
+ bdp=1;
+ gpd_buf_size=length;
+ bd_buf_size=4096;
+ /* TODO: device should turn off extension length feature */
+#if 0
+ if(((length-10)%(bd_buf_size+6))<7){
+ length+=12;
+ }
+#endif
+ for(i=0; i<round; i++){
+ if(round==1){}
+ else{
+ length = (get_random_int() % 65535) + 1;
+ start_add = get_random_int() % 64;
+ gpd_buf_size = length;
+ printk(KERN_ERR "ROUND[%d] length[%d] start_add[%d]\n", i, length, start_add);
+ }
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+ if(sg_len == 0){
+ ret = f_loopback_loop(ep_out_num, ep_in_num, length, start_add,NULL);
+ }
+ else{
+ ret = f_loopback_sg_loop(ep_out_num,ep_in_num,length,start_add,sg_len,NULL);
+ }
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int t_power_suspend(int argc, char** argv){
+ return f_power_suspend();
+}
+
+static int t_power_resume(int argc, char** argv){
+ return f_power_resume();
+}
+
+static int t_power_suspendport(int argc, char** argv){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int t_power_resumeport(int argc, char** argv){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ int i;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ if(PORT_PLS(temp) != (3 << 5)){
+ xhci_err(xhci, "port not in U3 state\n");
+ return RET_FAIL;
+ }
+
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+
+ if(DEV_SUPERSPEED(temp)){
+ /* superspeed direct set U0 */
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ }
+ else{
+ /* HS/FS, set resume for 20ms, then set U0 */
+ temp = (temp | (15 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mdelay(20);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ }
+ for(i=0; i<200; i++){
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) == 0){
+ break;
+ }
+ msleep(1);
+
+ }
+ if(PORT_PLS_VALUE(temp) != 0){
+ xhci_err(xhci, "port not return U0 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+
+static int t_power_remotewakeup(int argc, char** argv){
+ return f_power_remotewakeup();
+}
+
+static int t_power_u1u2(int argc, char** argv){
+ int ret;
+ int u_num;
+ int value1, value2;
+
+ u_num = 1;
+ value1 = 1;
+ value2 = 1;
+ if(argc > 1){
+ u_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "u_num set to %d\n", u_num);
+ }
+ if(argc > 2){
+ value1 = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "value1 set to %d\n", value1);
+ }
+ if(argc > 3){
+ value2 = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "value2 set to %d\n", value2);
+ }
+ if(u_num < 4){
+ return f_power_set_u1u2(u_num, value1, value2);
+ }
+ else{
+ if(u_num == 4){
+ /* reset counter */
+ f_power_reset_u1u2_counter(1);
+ f_power_reset_u1u2_counter(2);
+ return RET_SUCCESS;
+
+ }
+ else if (u_num == 5){
+ /* print counter */
+ printk(KERN_ERR "u1 counter = %d\n", f_power_get_u1u2_counter(1));
+ printk(KERN_ERR "u2 counter = %d\n", f_power_get_u1u2_counter(2));
+ return RET_SUCCESS;
+ }
+ }
+}
+
+static int t_power_u2_lpm(int argc, char** argv){
+ int ret;
+ int hle, rwe, hirdm, besl, besld, pdn, int_nak_active, bulk_nyet_active;
+ int L1_timeout;
+ if(argc > 1){
+ hle = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ rwe = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ hirdm = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ L1_timeout = (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ besl = (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+ if(argc > 6){
+ besld = (int)simple_strtol(argv[6], &argv[6], 10);
+ }
+ if(argc > 7){
+ pdn = (int)simple_strtol(argv[7], &argv[7], 10);
+ }
+ if(argc > 8){
+ int_nak_active = (int)simple_strtol(argv[8], &argv[8], 10);
+ }
+ if(argc > 9){
+ bulk_nyet_active = (int)simple_strtol(argv[9], &argv[9], 10);
+ }
+ /* program hle, rwe */
+ f_power_config_lpm(g_slot_id, hirdm, L1_timeout, rwe, besl, besld, hle, int_nak_active, bulk_nyet_active);
+
+ return RET_SUCCESS;
+}
+
+static int t_power_u2_swlpm(int argc, char** argv){
+ int ret;
+ int expected_L1S;
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ char is_wakeup;
+ int num_u3_port;
+
+ ret = RET_SUCCESS;
+ is_wakeup = false;
+ if(argc > 1){
+ /* 0:resume 1:accept, 2:NYET, 3:STALL, 4:timeout, 5:remote_wakeup, 6:reset counter, 7:print counter */
+ expected_L1S = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(expected_L1S == 5){
+ expected_L1S = 1;
+ is_wakeup = true;
+
+ }
+ if(expected_L1S == 6){
+ f_power_reset_L1_counter(1);
+ f_power_reset_L1_counter(2);
+ return RET_SUCCESS;
+ }
+ if(expected_L1S == 7){
+ /* print counter */
+ printk(KERN_ERR "L1 entr counter = %d\n", f_power_get_L1_counter(1));
+ printk(KERN_ERR "L1 exit counter = %d\n", f_power_get_L1_counter(2));
+ return RET_SUCCESS;
+ }
+ g_port_plc = 0;
+ xhci = hcd_to_xhci(my_hcd);
+
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ enablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ if(expected_L1S == 0){
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ ret = mtk_xhci_handshake(xhci, addr, (0xf << 5), expected_L1S, ATTACH_TIMEOUT);
+ if(ret != 0){
+ xhci_err(xhci, "resume failed\n");
+ return RET_FAIL;
+ }
+ else{
+ return RET_SUCCESS;
+ }
+ }
+ else{
+ temp = (temp | (2 << 5) | PORT_LINK_STROBE);
+ }
+ if(is_wakeup){
+ g_port_plc = 0;
+ }
+ xhci_writel(xhci, temp, addr);
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+
+ ret = mtk_xhci_handshake(xhci, addr, 0x7, expected_L1S, ATTACH_TIMEOUT);
+ if(ret != 0){
+ xhci_err(xhci, "L1S doesn't as expected, expected[%d], actual[%d]\n"
+ , expected_L1S, xhci_readl(xhci, addr));
+ return RET_FAIL;
+ }
+ mdelay(50);
+ if(expected_L1S == 2 || expected_L1S == 3 || expected_L1S == 4){
+ if(g_port_plc == 0){
+ xhci_err(xhci, "Doesn't get port PLC event\n");
+ return RET_FAIL;
+ }
+ }
+ /* Mark for Unit Test
+ * if(expected_L1S == 1){
+ * disablePortClockPower((g_port_id-1-num_u3_port), 0x2);
+ * }
+ */
+
+ if(is_wakeup){
+ poll_event_on_timeout(&g_port_plc, 1, 5000);
+ if(g_port_plc == 0){
+ xhci_err(xhci, "No port state change event\n");
+ return RET_FAIL;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ ret = mtk_xhci_handshake(xhci, addr, (0xf << 5), 0, ATTACH_TIMEOUT);
+ if(ret != 0){
+ xhci_err(xhci, "Remote wakeup failed\n");
+ }
+ }
+ return ret;
+}
+
+static int t_power_fla(int argc, char** argv){
+ int ret;
+ int fla_value;
+
+ if(argc > 1){
+ fla_value = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "fla_value set to %d\n", fla_value);
+ }
+ return f_power_send_fla(fla_value);
+}
+
+static int t_power_occ(int argc, char** argv){
+ int ret;
+ u32 __iomem *addr;
+ int temp;
+ struct xhci_hcd *xhci;
+ struct xhci_port *port;
+ int i, port_id;
+ USB_DEV_SPEED speed;
+
+ speed = DEV_SPEED_SUPER;
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ speed = DEV_SPEED_SUPER;
+ }
+ if(!strcmp(argv[1], "hs")){
+ speed = DEV_SPEED_HIGH;
+ }
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ g_port_occ = false;
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ /* disable U2 port first */
+
+ for(i=1; i<=g_num_u2_port; i++){
+ port_id=i+g_num_u3_port;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+
+ xhci_writel(xhci, temp, addr);
+ }
+ addr = IPRESET_ADDR;
+ temp = readl(addr);
+ temp |= (1<<IPRESET_BIT1);
+
+ msleep(13);
+ if(g_port_occ == true){
+ ret = RET_SUCCESS;
+ }
+ else{
+ printk(KERN_ERR "[ERROR] doesn't get over-current event\n");
+ ret = RET_FAIL;
+ }
+ /* check if PP=0
+ * turn on PP, re-enable device
+ * disable slot, port connection, enable...
+ */
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ if((temp & PORT_POWER)){
+ printk(KERN_ERR "[ERROR] port_power bit is still 1\n");
+ }
+ addr = IPRESET_ADDR;
+ temp = readl(addr);
+ temp &= ~(1<<IPRESET_BIT1);
+
+ if (ret == RET_FAIL) {
+ return ret;
+ }
+
+ f_disable_slot();
+
+ printk(KERN_ERR "g_port_id = %d.\n", g_port_id);
+ if (speed == DEV_SPEED_SUPER) {
+ enablePortClockPower((g_port_id-1), 0x3);
+ }else {
+ enablePortClockPower((g_port_id-1-g_num_u3_port), 0x2);
+ }
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ port = rh_port[0];
+ port->port_status = DISCONNECTED;
+
+ msleep(1000);
+
+ if(f_enable_port(0) != RET_SUCCESS){
+ printk(KERN_ERR "[ERROR] port not reconnectted after set PP\n");
+ }
+ ret = f_enable_slot(NULL);
+ ret = f_address_slot(false, NULL);
+
+ return ret;
+}
+
+static int t_ring_enlarge(int argc, char** argv){
+ int ep_dir, ep_num, ep_index, dev_num, slot_id;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_segment *next, *prev;
+ u32 val, cycle_bit;
+ int i, ret;
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+ dev_num = -1;
+ ret = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "ep_num set to %d\n", ep_num);
+ }
+ if(argc > 3){
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev_num == -1){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ slot_id = udev->slot_id;
+ }
+ else{
+ udev = dev_list[dev_num-1];
+ slot_id = udev->slot_id;
+ }
+ virt_dev = xhci->devs[udev->slot_id];
+ if(ep_dir == EPADD_OUT){
+ ep = udev->ep_out[ep_num];
+ }
+ else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ring = (&(virt_dev->eps[ep_index]))->ring;
+
+ prev = ep_ring->enq_seg;
+ next = xhci_segment_alloc(xhci, GFP_NOIO);
+ next->next = prev->next;
+ next->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = cpu_to_le64(prev->next->dma);
+ val = le32_to_cpu(next->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ val |= TRB_CHAIN;
+ next->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+ (unsigned long long)prev->dma,
+ (unsigned long long)next->dma);
+ /* adjust cycle bit */
+ if(ep_ring->cycle_state == 1){
+ cycle_bit = 0;
+ }
+ else{
+ cycle_bit = 1;
+ }
+ for(i=0; i<TRBS_PER_SEGMENT; i++){
+ val = le32_to_cpu(next->trbs[i].generic.field[3]);
+ if(cycle_bit == 1){
+ val |= cycle_bit;
+ }
+ else{
+ val &= ~cycle_bit;
+ }
+ next->trbs[i].generic.field[3] = cpu_to_le32(val);
+ xhci_dbg(xhci, "Set new segment trb %d cycle bit 0x%x\n", i, val);
+ }
+ xhci_link_segments(xhci, prev, next, true);
+ if(prev->trbs[TRBS_PER_SEGMENT-1].link.control & cpu_to_le32(LINK_TOGGLE)){
+ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val &= ~LINK_TOGGLE;
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ val = le32_to_cpu(next->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val |= LINK_TOGGLE;
+ next->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ }
+ return ret;
+}
+
+static int t_ring_shrink(int argc, char**argv){
+ int ep_dir, ep_num, ep_index, dev_num, slot_id;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_segment *next, *prev;
+ u32 val, cycle_bit;
+ int i, ret;
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+ dev_num = -1;
+ ret = 0;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "ep_num set to %d\n", ep_num);
+ }
+ if(argc > 3){
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev_num == -1){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ slot_id = udev->slot_id;
+ }
+ else{
+ udev = dev_list[dev_num-1];
+ slot_id = udev->slot_id;
+ }
+ virt_dev = xhci->devs[udev->slot_id];
+ if(ep_dir == EPADD_OUT){
+ ep = udev->ep_out[ep_num];
+ }
+ else{
+ ep = udev->ep_in[ep_num];
+ }
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ring = (&(virt_dev->eps[ep_index]))->ring;
+
+ prev = ep_ring->enq_seg;
+ next = prev->next;
+ if(prev == next){
+ printk(KERN_ERR "This is the last segment, can not be remove\n");
+ return RET_FAIL;
+ }
+ if(le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control) & LINK_TOGGLE){
+ ep_ring->first_seg = next->next;
+ }
+ else if(le32_to_cpu(next->trbs[TRBS_PER_SEGMENT-1].link.control) & LINK_TOGGLE){
+ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val |= LINK_TOGGLE;
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ }
+ xhci_link_segments(xhci, prev, next->next, true);
+ xhci_segment_free(xhci, next);
+ return RET_SUCCESS;
+}
+
+static int t_ring_stop_ep(int argc, char** argv){
+ int ep_dir, ep_num, ep_index, dev_num, slot_id;
+ struct xhci_hcd *xhci;
+ struct usb_host_endpoint *ep;
+ struct xhci_virt_device *virt_dev;
+ struct usb_device *udev, *rhdev;
+
+ ep_dir = EPADD_OUT;
+ ep_num = 1;
+ dev_num = -1;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "out")){
+ printk(KERN_ERR "OUT EP\n");
+ ep_dir = EPADD_OUT;
+ }
+ else if(!strcmp(argv[1], "in")){
+ printk(KERN_ERR "IN EP\n");
+ ep_dir = EPADD_IN;
+ }
+ }
+ if(argc > 2){
+ ep_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "ep_num set to %d\n", ep_num);
+ }
+ if(argc > 3){
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ if(dev_num == -1){
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ slot_id = udev->slot_id;
+ }
+ else{
+ udev = dev_list[dev_num-1];
+ slot_id = udev->slot_id;
+ }
+ virt_dev = xhci->devs[udev->slot_id];
+ if(ep_dir == EPADD_OUT){
+ ep = udev->ep_out[ep_num];
+ }
+ else{
+ ep = udev->ep_in[ep_num];
+ }
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ return f_ring_stop_ep(slot_id, ep_index);
+
+}
+
+static int t_ring_stop_cmd(int argc, char** argv){
+ return f_ring_stop_cmd();
+}
+
+static int t_ring_abort_cmd_add(int argc, char** argv){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+
+ ret = f_enable_port(0);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+ ret = f_enable_slot(NULL);
+ if(ret != RET_SUCCESS){
+ return RET_FAIL;
+ }
+
+ /* queue address slot command but not waiting for cmd complete */
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ virt_dev = xhci->devs[udev->slot_id];
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id, false);
+
+ /* abort command ring right now */
+ return f_ring_abort_cmd();
+}
+
+static int t_ring_intr_moderation(int argc, char** argv){
+ int i;
+ int intr_mod_value;
+ u32 temp;
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(my_hcd);
+ intr_mod_value = 0;
+
+ if(argc > 1){
+ intr_mod_value = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "intr_mod_value set to %d\n", intr_mod_value);
+ }
+
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ temp &= ~0xFFFF;
+ temp |=intr_mod_value;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+ g_intr_handled = 0;
+ for(i=0; i<1000; i++){
+ mtk_xhci_setup_one_noop(xhci);
+ }
+ msleep(10);
+ xhci_err(xhci, "interrupt handler executed %d times\n", g_intr_handled);
+ g_intr_handled = -1;
+ return RET_SUCCESS;
+}
+
+static int t_ring_er_full(int argc, char** argv){
+ int ret;
+ struct xhci_hcd *xhci;
+ int i;
+ u32 temp;
+ union xhci_trb *event;
+
+ g_event_full = true;
+ if(my_hcd == NULL){
+ printk(KERN_ERR "[ERROR]host controller driver not initiated\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ i = 1000;
+ /* turn off interrupt first */
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp &= (~CMD_EIE);
+ xhci_dbg(xhci, "/* Disable interrupts, cmd = 0x%x.\n", temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+ /* continuous queue no-op command */
+ while(i>0){
+ event = xhci->event_ring->dequeue;
+ struct xhci_generic_trb *event_trb = &event->generic;
+ if((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) == xhci->event_ring->cycle_state){
+ xhci_dbg(xhci, "SW own current event\n");
+ if(GET_COMP_CODE(le32_to_cpu(event_trb->field[2])) == COMP_ER_FULL){
+ xhci_dbg(xhci, "Got event ring full\n");
+ return RET_SUCCESS;
+ }
+ else{
+ xhci_dbg(xhci, "Increase command ring dequeue pointer\n");
+ inc_deq(xhci, xhci->cmd_ring, false);
+ }
+ inc_deq(xhci, xhci->event_ring, true);
+ }
+ xhci_dbg(xhci, "Queue No-Op command\n");
+ mdelay(50);
+ mtk_xhci_setup_one_noop(xhci);
+ i--;
+ }
+ return RET_FAIL;
+}
+
+static int t_ring_idt(int argc, char** argv){
+ /* do loopback set IDT */
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult_dev, mult, burst;
+
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ u8 *tmp1, *tmp2;
+ struct urb *urb_tx, *urb_rx;
+ int iso_num_packets;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+ void *buffer_tx, *buffer_rx;
+ dma_addr_t mapping_tx, mapping_rx;
+
+ ret = 0;
+ speed = DEV_SPEED_SUPER;
+ transfer_type = EPATT_BULK;
+ maxp = 1024;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 7;
+ mult_dev = 3;
+ mult = 0;
+ burst = 8;
+ start_add = 0;
+ bdp=1;
+ gpd_buf_size = 65535;
+ bd_buf_size = 4096;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "[DEV]Reset device to super speed\n");
+ speed = DEV_SPEED_SUPER;
+ maxp = 1024;
+ burst = 8;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "[DEV]Reset device to high speed\n");
+ speed = DEV_SPEED_HIGH;
+ maxp = 512;
+ burst = 0;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "[DEV]Reset device to full speed\n");
+ speed = DEV_SPEED_FULL;
+ maxp = 64;
+ burst = 0;
+ }
+ }
+
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed, NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+ g_idt_transfer = true;
+
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,0,0,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+
+ ret = f_loopback_loop_gpd(ep_out_num, ep_in_num, length, start_add, gpd_buf_size, NULL);
+ if(ret){
+ printk(KERN_ERR "loopback fail!!\n");
+ return ret;
+ }
+
+ g_idt_transfer = false;
+
+ return RET_SUCCESS;
+}
+
+static int t_ring_bei(int argc, char** argv){
+ /* do loopback set BEI
+ * after tx round should not get URB complete status
+ * queue no-op without set BEI
+ * get URB complete status
+ * do the same to Rx round
+ */
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult_dev, mult, burst;
+
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ u8 *tmp1, *tmp2;
+ struct urb *urb_tx, *urb_rx;
+ int iso_num_packets;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+ void *buffer_tx, *buffer_rx;
+ dma_addr_t mapping_tx, mapping_rx;
+
+ ret = 0;
+ speed = DEV_SPEED_SUPER;
+ transfer_type = EPATT_BULK;
+ maxp = 1024;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ mult_dev = 3;
+ mult = 0;
+ burst = 8;
+ start_add = 0;
+ bdp=1;
+ gpd_buf_size = 65535;
+ bd_buf_size = 4096;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "[DEV]Reset device to super speed\n");
+ speed = DEV_SPEED_SUPER;
+ maxp = 1024;
+ burst = 8;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "[DEV]Reset device to high speed\n");
+ speed = DEV_SPEED_HIGH;
+ maxp = 512;
+ burst = 0;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "[DEV]Reset device to full speed\n");
+ speed = DEV_SPEED_FULL;
+ maxp = 64;
+ burst = 0;
+ }
+ }
+ if(argc > 2){
+ maxp = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "maxp = %d\n", maxp);
+ }
+
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed, NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+ g_is_bei = true;
+
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,0,0,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ f_power_suspend();
+ return ret;
+ }
+ /* do Tx with BEI */
+ iso_num_packets = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ ep_tx = udev->ep_out[ep_out_num];
+ ep_rx = udev->ep_in[ep_in_num];
+
+ ret = 0;
+ buffer_tx = kmalloc(length, GFP_KERNEL);
+ mapping_tx = dma_map_single(dev, buffer_tx,length, DMA_BIDIRECTIONAL);
+ urb_tx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_tx, ep_out_num, length, buffer_tx
+ , start_add, URB_DIR_OUT, iso_num_packets, max_esit_payload, mapping_tx, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_queue_urb(urb_tx,0,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ msleep(100);
+ if(urb_tx->status == 0){
+ xhci_err(xhci, "URB_TX status become 0, BEI seems doesn't work\n");
+ return RET_FAIL;
+ }
+ /* queue a no-op */
+ mtk_xhci_setup_one_noop(xhci);
+ msleep(10);
+ if(urb_tx->status != 0){
+ xhci_err(xhci, "URB_TX status doesn't become 0 after interrupt\n");
+ return RET_FAIL;
+ }
+ usb_free_urb(urb_tx);
+
+ buffer_rx = kmalloc(length, GFP_KERNEL);
+ memset(buffer_rx, 0, length);
+ mapping_rx = dma_map_single(dev, buffer_rx,length, DMA_BIDIRECTIONAL);
+ urb_rx = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_rx, ep_in_num, length, buffer_rx
+ , start_add, URB_DIR_IN, iso_num_packets, max_esit_payload, mapping_rx,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_queue_urb(urb_rx,0,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ msleep(100);
+ if(urb_rx->status == 0){
+ xhci_err(xhci, "URB_RX status become 0, BEI seems doesn't work\n");
+ return RET_FAIL;
+ }
+ /* queue a no-op */
+ mtk_xhci_setup_one_noop(xhci);
+ msleep(10);
+ if(urb_tx->status != 0){
+ xhci_err(xhci, "URB_RX status doesn't become 0 after interrupt\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_buffer = NULL;
+ urb_rx->transfer_dma = NULL;
+ usb_free_urb(urb_rx);
+ kfree(buffer_tx);
+ kfree(buffer_rx);
+
+ g_is_bei = false;
+
+ return RET_SUCCESS;
+}
+
+static int t_ring_noop_transfer(int argc, char** argv){
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int transfer_type;
+ int maxp;
+ int bInterval;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int mult_dev, mult, burst;
+
+ struct xhci_hcd *xhci;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ u8 *tmp1, *tmp2;
+ struct urb *urb_tx1, *urb_tx2, *urb_rx1, *urb_rx2;
+ int iso_num_packets;
+ struct usb_host_endpoint *ep_tx, *ep_rx;
+ int max_esit_payload;
+ void *buffer_tx1, *buffer_tx2, *buffer_rx1, *buffer_rx2;
+ dma_addr_t mapping_tx1, mapping_tx2, mapping_rx1, mapping_rx2;
+
+ ret = 0;
+ speed = DEV_SPEED_SUPER;
+ transfer_type = EPATT_BULK;
+ maxp = 1024;
+ bInterval = 0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ mult_dev = 3;
+ mult = 0;
+ burst = 8;
+ start_add = 0;
+ bdp=1;
+ gpd_buf_size = 65535;
+ bd_buf_size = 4096;
+
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "[DEV]Reset device to super speed\n");
+ speed = DEV_SPEED_SUPER;
+ maxp = 1024;
+ burst = 8;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "[DEV]Reset device to high speed\n");
+ speed = DEV_SPEED_HIGH;
+ maxp = 512;
+ burst = 0;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "[DEV]Reset device to full speed\n");
+ speed = DEV_SPEED_FULL;
+ maxp = 64;
+ burst = 0;
+ }
+ }
+
+ if(argc > 2){
+ maxp = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "maxp = %d\n", maxp);
+ }
+
+ start_port_reenabled(0, speed);
+ ret=dev_reset(speed, NULL);
+ if(ret){
+ printk(KERN_ERR "device reset failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_reenable_port(0);
+ if(ret != RET_SUCCESS){
+ printk(KERN_ERR "device reenable failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ /* ==phase 1 : config EP==*/
+ ret=dev_config_ep(ep_out_num,USB_RX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+ ret=dev_config_ep(ep_in_num,USB_TX,transfer_type,maxp,bInterval,mult_dev,burst,mult,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "config dev EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_out_num,EPADD_OUT,transfer_type,maxp,bInterval,burst,mult, NULL,0);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret = f_config_ep(ep_in_num,EPADD_IN,transfer_type,maxp,bInterval,burst,mult, NULL,1);
+ if(ret)
+ {
+ printk(KERN_ERR "config EP fail!!\n");
+ return ret;
+ }
+
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,0,0,NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ f_power_suspend();
+ return ret;
+ }
+
+ /* queue 2 URB, the first is false, and doesn't ring doorbell */
+
+ iso_num_packets = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ ep_tx = udev->ep_out[ep_out_num];
+ ep_rx = udev->ep_in[ep_in_num];
+
+ ret = 0;
+ /* first OUT urb */
+ buffer_tx1 = kmalloc(1000, GFP_KERNEL);
+ mapping_tx1 = dma_map_single(dev, buffer_tx1,1000, DMA_BIDIRECTIONAL);
+ urb_tx1 = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_tx1, ep_out_num, 1000, buffer_tx1
+ , start_add, URB_DIR_OUT, iso_num_packets, max_esit_payload, mapping_tx1, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ g_td_to_noop = true;
+ ret = f_queue_urb(urb_tx1,0,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ g_td_to_noop = false;
+
+ /* second OUT urb */
+ buffer_tx2 = kmalloc(length, GFP_KERNEL);
+ mapping_tx2 = dma_map_single(dev, buffer_tx2,length, DMA_BIDIRECTIONAL);
+ urb_tx2 = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_tx2, ep_out_num, length, buffer_tx2
+ , start_add, URB_DIR_OUT, iso_num_packets, max_esit_payload, mapping_tx2, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb_tx2,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_tx1->transfer_buffer = NULL;
+ urb_tx2->transfer_buffer = NULL;
+ urb_tx1->transfer_dma = NULL;
+ urb_tx2->transfer_dma = NULL;
+ usb_free_urb(urb_tx1);
+ usb_free_urb(urb_tx2);
+
+ /* first IN urb */
+ buffer_rx1 = kmalloc(1000, GFP_KERNEL);
+ memset(buffer_rx1, 0, 1000);
+ mapping_rx1 = dma_map_single(dev, buffer_rx1,1000, DMA_BIDIRECTIONAL);
+ urb_rx1 = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_rx1, ep_in_num, 1000, buffer_rx1
+ , start_add, URB_DIR_IN, iso_num_packets, max_esit_payload, mapping_rx1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+ g_td_to_noop = true;
+ ret = f_queue_urb(urb_rx1,0,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ g_td_to_noop = false;
+
+ /* second IN urb */
+ buffer_rx2 = kmalloc(length, GFP_KERNEL);
+ memset(buffer_rx2, 0, length);
+ mapping_rx2 = dma_map_single(dev, buffer_rx2,length, DMA_BIDIRECTIONAL);
+ urb_rx2 = usb_alloc_urb(iso_num_packets, GFP_KERNEL);
+ f_fill_urb_with_buffer(urb_rx2, ep_in_num, length, buffer_rx2
+ , start_add, URB_DIR_IN, iso_num_packets, max_esit_payload, mapping_rx2,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill tx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ ret = f_queue_urb(urb_rx2,1,NULL);
+ if(ret){
+ xhci_err(xhci, "[ERROR]queue rx urb Error!!\n");
+ return RET_FAIL;
+ }
+
+ urb_rx1->transfer_buffer = NULL;
+ urb_rx1->transfer_dma = NULL;
+ urb_rx2->transfer_buffer = NULL;
+ urb_rx2->transfer_dma = NULL;
+ usb_free_urb(urb_rx1);
+ usb_free_urb(urb_rx2);
+ kfree(buffer_tx1);
+ kfree(buffer_tx2);
+ kfree(buffer_rx1);
+ kfree(buffer_rx2);
+
+ return RET_SUCCESS;
+
+}
+
+static int t_hub_selsuspendss(int argc, char** argv){
+ int hub_num, port_num;
+ int ret;
+
+ hub_num = 1;
+ port_num = 1;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ return f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_LINK_STATE, port_num | (3<<8));
+}
+
+static int t_hub_selsuspend(int argc, char** argv){
+ int hub_num, port_num;
+ int ret;
+ int speed;
+
+ hub_num = 1;
+ port_num = 1;
+ speed = USB_SPEED_HIGH;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "ss")){
+ printk(KERN_ERR "SUPERSPEED\n");
+ speed = USB_SPEED_SUPER;
+ }
+ }
+ if(speed == USB_SPEED_HIGH){
+ return f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_SUSPEND, port_num);
+ }
+ else if(speed == USB_SPEED_SUPER){
+ return f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_LINK_STATE, port_num | (3<<8));
+ }
+ else{
+ printk(KERN_ERR "Error speed value\n");
+ }
+}
+
+
+static int t_hub_selresume(int argc, char** argv){
+
+ int hub_num, port_num;
+ int ret;
+ int speed;
+
+ hub_num = 1;
+ port_num = 1;
+ speed = USB_SPEED_HIGH;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "ss")){
+ printk(KERN_ERR "SUPERSPEED\n");
+ speed = USB_SPEED_SUPER;
+ }
+ }
+ if(speed == USB_SPEED_HIGH){
+ return f_hub_clearportfeature(hub_num, HUB_FEATURE_PORT_SUSPEND, port_num);
+ }
+ else if(speed == USB_SPEED_SUPER){
+ return f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_LINK_STATE, port_num);
+ }
+ else{
+ printk(KERN_ERR "Error speed value\n");
+ }
+}
+
+static int t_hub_configurehub(int argc, char** argv){
+ int ret;
+ struct xhci_hcd *xhci;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *config_cmd;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ int i;
+ int port_index;
+ struct xhci_port *port;
+ port_index = 0;
+
+ if(my_hcd == NULL){
+ printk(KERN_ERR "my_hcd is NULL\n");
+ return RET_FAIL;
+ }
+ if(argc > 1){
+ port_index = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "port_index set to %d\n", port_index);
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ ret = f_enable_port(port_index);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] enable port failed\n");
+ return RET_FAIL;
+ }
+ port = rh_port[port_index];
+ xhci_err(xhci, "Port[%d] speed: %d\n", port_index, port->port_speed);
+ ret = f_enable_slot(NULL);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] enable slot failed\n");
+ return RET_FAIL;
+ }
+ ret = f_address_slot(false, NULL);
+ if(ret != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] address device failed\n");
+ return RET_FAIL;
+ }
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[port->port_id-1];
+ virt_dev = xhci->devs[udev->slot_id];
+ g_slot_id = 0;
+ hdev_list[port_index] = udev;
+ f_update_hub_device(udev, 4);
+ if(f_hub_configep(port_index+1, port_index) != RET_SUCCESS){
+ xhci_err(xhci, "config hub endpoint failed\n");
+ return RET_FAIL;
+ }
+ xhci_dbg_slot_ctx(xhci, virt_dev->out_ctx);
+ /* set port_power */
+ for(i=1; i<=4; i++){
+ if(f_hub_setportfeature((port_index+1), HUB_FEATURE_PORT_POWER, i) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] set port_power 1 failed\n");
+ return RET_FAIL;
+ }
+ }
+ /* clear C_PORT_CONNECTION */
+ for(i=1; i<=4; i++){
+ if(f_hub_clearportfeature((port_index+1), HUB_FEATURE_C_PORT_CONNECTION, i) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear c_port_connection failed\n");
+ }
+ }
+
+ return RET_SUCCESS;
+}
+
+static int t_hub_ixia_stress(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ struct mutex *lock;
+ int dev_count;
+ int i;
+
+ if(argc > 1){
+ dev_count = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "dev_count set to %d\n", dev_count);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ for(i=0; i<dev_count; i++){
+ f_add_ixia_thread(xhci,i+1, ix_dev_list[i]);
+ }
+ return RET_SUCCESS;
+}
+
+static int t_hub_loop_stress(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ struct mutex *lock;
+ int dev_count, ep_count;
+ int i,j;
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+ char isEP0;
+ int maxp;
+ int ret;
+
+ isEP0 = false;
+
+ if(argc > 1){
+ dev_count = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "dev_count set to %d\n", dev_count);
+ }
+ if(argc > 2){
+ ep_count = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "ep_count set to %d\n", ep_count);
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "true")){
+ isEP0 = true;
+ }
+ }
+ g_correct = true;
+ for(i=0; i<dev_count; i++){
+ udev = dev_list[i];
+ ret=dev_stress(0,GPD_LENGTH ,GPD_LENGTH,0,ep_count, udev);
+ if(ret){
+ printk(KERN_ERR "stress request failed!!!!!!!!!!\n");
+ return ret;
+ }
+ }
+ for(i=0; i<dev_count; i++){
+ udev = dev_list[i];
+ for(j=1; j<=ep_count; j++){
+ ep = udev->ep_out[j];
+ maxp = ep->desc.wMaxPacketSize & 0x7ff;
+ f_add_str_threads(i,j,maxp,true, udev, isEP0);
+ }
+ }
+ return RET_SUCCESS;
+}
+
+static int t_hub_loop(int argc, char** argv){
+ struct usb_device *udev;
+ int ret,length,start_add;
+ char bdp;
+ int gpd_buf_size,bd_buf_size;
+ int ep_out_num, ep_in_num;
+ int speed;
+ int sg_len;
+ int hub_num, port_num, dev_num, round, i;
+ int dram_offset, extension;
+
+ ret =0;
+ ep_out_num = 1;
+ ep_in_num = 1;
+ length = 65535;
+ round=1;
+ dram_offset = 0;
+ extension = 0;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){ /* device number */
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+ if(argc > 4){
+ length = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "length set to %d\n", length);
+ }
+ if(argc > 5){
+ ep_out_num = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "ep out num set to %d\n", ep_out_num);
+ }
+ if(argc > 6){
+ ep_in_num = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "ep in num set to %d\n", ep_in_num);
+ }
+ if(argc > 7){
+ round = (int)simple_strtol(argv[7], &argv[7], 10);
+ printk(KERN_ERR "Execute %d round\n", round);
+ }
+ if(argc > 8){
+ sg_len = (int)simple_strtol(argv[8], &argv[8], 10);
+ printk(KERN_ERR "sg_len %d\n", sg_len);
+ }
+
+ bdp=0;
+ gpd_buf_size=0xFC00;
+ bd_buf_size=0x1FFF;
+ udev = dev_list[dev_num-1];
+ /* TODO: device should turn off extension length feature */
+
+ if(ep_out_num == 0 || ep_in_num == 0){
+ if(round==1){
+ ret = dev_ctrl_loopback(length, udev);
+ if(ret){
+ printk(KERN_ERR "Control loopback failed!!\n");
+ return ret;
+ }
+ return RET_SUCCESS;
+ }
+ else{
+ for(i=0; i<round; i++){
+ length = get_random_int() % 2048;
+ length = length - (length %4);
+ if(length == 0)
+ length = 2048;
+ printk(KERN_ERR "Loopback control length[%d]\n", length);
+ ret = dev_ctrl_loopback(length, udev);
+ if(ret){
+ printk(KERN_ERR "Control loopback failed!!\n");
+ return ret;
+ }
+ }
+ return RET_SUCCESS;
+ }
+ }
+ if(round == 1){
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,udev);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+ if(sg_len == 0){
+ ret = f_loopback_loop(ep_out_num, ep_in_num, length, start_add,udev);
+ }
+ else{
+ ret = f_loopback_sg_loop(ep_out_num,ep_in_num, length, start_add
+ , sg_len, udev);
+ }
+ ret=dev_polling_status(udev);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ return RET_SUCCESS;
+ }
+ for(i=0; i<round; i++){
+ length = (get_random_int() % 3072/*65535 for bulk, 3072 for intr*/) + 1;
+ start_add = (get_random_int() % 63) + 1;
+ sg_len = (get_random_int() % 9) * 8/*512 for bulk, 8 for intr*/;
+ printk(KERN_ERR "Loopback length[%d] start_add[%d] sg_len[%d]\n", length, start_add, sg_len);
+ if((sg_len != 0) && (length/sg_len) > 61){
+ i--;
+ printk(KERN_ERR "SKIP\n");
+ continue;
+ }
+ ret=dev_loopback(bdp,length,gpd_buf_size,bd_buf_size,dram_offset,extension,udev);
+ if(ret)
+ {
+ printk(KERN_ERR "loopback request fail!!\n");
+ return ret;
+ }
+ if(sg_len == 0){
+ ret = f_loopback_loop(ep_out_num, ep_in_num, length, start_add,udev);
+ }
+ else{
+ ret = f_loopback_sg_loop(ep_out_num,ep_in_num, length, start_add
+ , sg_len, udev);
+ }
+ if(ret)
+ {
+ printk(KERN_ERR "loopback fail!!\n");
+ printk(KERN_ERR "length : %d\n",length);
+ return ret;
+ }
+
+ /* ==phase 3: get device status==*/
+ ret=dev_polling_status(udev);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ }
+ return RET_SUCCESS;
+}
+
+static int t_hub_configuresubhub(int argc, char** argv){
+ int parent_hub_num, hub_num, port_num;
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(my_hcd);
+ parent_hub_num = 1;
+ hub_num = parent_hub_num+1;
+
+ if(argc > 1){
+ parent_hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "parent_hub_num set to %d\n", parent_hub_num);
+ }
+ if(argc > 2){
+ hub_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 3){
+ port_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ return f_hub_config_subhub(parent_hub_num, hub_num, port_num);
+}
+
+static int t_hub_configure_eth_device(int argc, char** argv){
+ int hub_num, port_num, dev_num;
+ int ret;
+
+ ret = 0;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){ /* device number */
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+
+ ret = f_hub_configuredevice(hub_num, port_num, dev_num
+ , 0, 0, 0, false, false, 0);
+ if(ret)
+ {
+ printk(KERN_ERR "Config device failed\n");
+ return ret;
+ }
+ ret = f_hub_configure_eth_device(hub_num, port_num, dev_num);
+ if(ret)
+ {
+ printk(KERN_ERR "Config eth device failed\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int t_hub_configuredevice(int argc, char** argv){
+ int hub_num, port_num, dev_num;
+ int transfer_type, bInterval, maxp;
+ char is_config_ep, is_stress;
+ int stress_config;
+ int ret;
+
+ hub_num = 1;
+ port_num = 1;
+ dev_num = 1;
+ transfer_type = EPATT_BULK;
+ bInterval = 0;
+ maxp = 512;
+ is_config_ep = true;
+ is_stress = false;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){ /* device number */
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+ if(argc > 4){ /* transfer type */
+ if(!strcmp(argv[4], "bulk")){
+ transfer_type = EPATT_BULK;
+ xhci_dbg(xhci, "transfer type set to BULK\n");
+ }
+ else if(!strcmp(argv[4], "intr")){
+ xhci_dbg(xhci, "transfer type set to INTR\n");
+ transfer_type = EPATT_INT;
+ }
+ else if(!strcmp(argv[4], "iso")){
+ xhci_dbg(xhci, "transfer type set to ISO\n");
+ transfer_type = EPATT_ISO;
+ }
+ else if(!strcmp(argv[4], "stress1")){
+ xhci_dbg(xhci, "transfer type set to STRESS1 BULK+INT\n");
+ is_stress = true;
+ stress_config = 1;
+ is_config_ep = false;
+ }
+ else if(!strcmp(argv[4], "stress2")){
+ xhci_dbg(xhci, "transfer type set to STRESS2 BULK_ISO\n");
+ is_stress = true;
+ stress_config = 2;
+ is_config_ep = false;
+ }
+ }
+ else{
+ is_config_ep = false;
+ }
+ if(argc > 5){ /* maxp */
+ maxp = (int)simple_strtol(argv[5], &argv[5], 10);
+ xhci_dbg(xhci, "maxp set to %d\n", maxp);
+ }
+ if(argc > 6){ /* interval */
+ bInterval= (int)simple_strtol(argv[6], &argv[6], 10);
+ xhci_dbg(xhci, "bInterval set to %d\n", bInterval);
+ }
+
+ return f_hub_configuredevice(hub_num, port_num, dev_num
+ , transfer_type, maxp, bInterval, is_config_ep, is_stress, stress_config);
+}
+
+static int t_hub_reset_dev(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ int hub_num, port_num, dev_num;
+ int transfer_type, bInterval, maxp;
+ struct usb_device *udev;
+ USB_DEV_SPEED speed;
+ int ret;
+
+ hub_num = 1;
+ port_num = 1;
+ dev_num = 1;
+ ret = 0;
+ xhci = hcd_to_xhci(my_hcd);
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){ /* device number */
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+ if(argc > 4){
+ if(!strcmp(argv[4], "hs")){
+ printk(KERN_ERR "[DEV]Reset device to high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[4], "fs")){
+ printk(KERN_ERR "[DEV]Reset device to full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+
+ udev = dev_list[dev_num-1];
+ g_slot_id = udev->slot_id;
+ ret = dev_reset(speed,udev);
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_PORT_POWER, dev_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear port_power %d failed\n", dev_num);
+ return RET_FAIL;
+ }
+ mdelay(500);
+ if(f_hub_setportfeature(hub_num, HUB_FEATURE_PORT_POWER, dev_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] set port_power %d failed\n", dev_num);
+ return RET_FAIL;
+ }
+ if(f_hub_clearportfeature(hub_num, HUB_FEATURE_C_PORT_CONNECTION, dev_num) != RET_SUCCESS){
+ xhci_err(xhci, "[ERROR] clear c_port_connection failed\n");
+ }
+ f_disable_slot();
+ kfree(udev);
+ dev_list[dev_num-1] = NULL;
+ return RET_SUCCESS;
+}
+
+static int t_hub_queue_intr(int argc, char** argv){
+ u32 status;
+ int ret;
+ struct usb_device *hdev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int ep_index, data_length;
+
+ xhci = hcd_to_xhci(my_hcd);
+ hdev = hdev_list[0];
+ ep = hdev->ep_in[1];
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ data_length = 2;
+
+ ret = f_fill_urb(urb,1,data_length,0,URB_DIR_IN, 0, 2, hdev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill urb Error!!\n");
+ return RET_FAIL;
+ }
+ ret = f_queue_urb(urb,0,hdev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int t_hub_remotewakeup_dev(int argc, char** argv){
+ int hub_num, port_num, dev_num;
+ int transfer_type, bInterval, maxp;
+ struct usb_device *udev;
+ USB_DEV_SPEED speed;
+ int ret;
+ int length;
+ int bdp, gpd_buf_size, bd_buf_size;
+
+ bdp=0;
+ gpd_buf_size=0xFC00;
+ bd_buf_size=0x1FFF;
+ length=513;
+ hub_num = 1;
+ port_num = 1;
+ dev_num = 1;
+ ret = 0;
+
+ if(argc > 1){ /* hub number */
+ hub_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "hub_num set to %d\n", hub_num);
+ }
+ if(argc > 2){ /* port number */
+ port_num = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+ if(argc > 3){ /* device number */
+ dev_num = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "dev_num set to %d\n", dev_num);
+ }
+ if(argc > 4){
+ length = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "length set to %d\n", length);
+ }
+
+ udev = dev_list[dev_num-1];
+ g_slot_id = udev->slot_id;
+ return dev_remotewakeup(length, udev);
+}
+
+#define HUB_FEATURE_PORT_U1_TIMEOUT 23
+#define HUB_FEATURE_PORT_U2_TIMEOUT 24
+
+static int t_hub_set_u1u2(int argc, char** argv){
+ int u_num, value1, value2, value;
+ int feature_selector;
+ int port_num;
+
+ u_num = 1;
+ value1 = 1;
+ value2 = 1;
+ value = 1;
+ port_num = 1;
+
+
+ if(argc > 1){
+ u_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "u_num set to %d\n", u_num);
+ }
+ if(argc > 2){
+ value1 = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "value1 set to %d\n", value1);
+ }
+ if(argc > 3){
+ value2 = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "value2 set to %d\n", value2);
+ }
+ if(argc > 4){
+ port_num = (int)simple_strtol(argv[4], &argv[4], 10);
+ xhci_dbg(xhci, "port_num set to %d\n", port_num);
+ }
+
+ if(u_num == 1){
+ feature_selector = HUB_FEATURE_PORT_U1_TIMEOUT;
+ value = value1;
+ }
+ else if(u_num == 2){
+ feature_selector = HUB_FEATURE_PORT_U2_TIMEOUT;
+ value = value2;
+ }
+
+ f_hub_setportfeature(1, feature_selector, (port_num | (value<<8)));
+ return RET_SUCCESS;
+}
+
+#define HUB_FEATURE_FORCE_LINKPM_ACCEPT 30
+static int t_hub_force_pm(int argc, char** argv){
+ f_hub_sethubfeature(1, HUB_FEATURE_FORCE_LINKPM_ACCEPT);
+ return RET_SUCCESS;
+}
+
+static int t_dev_notification(int argc, char** argv){
+ int ret;
+ int type;
+ int type_value;
+ int value;
+
+ ret = 0;
+ type = 1;
+ value = 1;
+
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ if(argc > 1){
+ /* [DEV_NOTIF_FUNC_WAKE: 1]; [DEV_NOTIF_LTM: 2]; */
+ /* [DEV_NOTIF_BUS_INT_ADJ: 3];[VENDOR_DEV_TEST: 4] */
+ type = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ value = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ f_enable_dev_note();
+ g_dev_notification = 256;
+ g_dev_not_value = 0;
+
+ /* TODO: device notification interface has been modified */
+ ret = dev_notifiaction(type,value & 0xffffffff,0/*(value >> 32) & 0xffffffff*/);
+
+ if(ret)
+ {
+ printk(KERN_ERR "set device notification failed!!\n");
+ return ret;
+ }
+ ret=dev_polling_status(NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "query request fail!!\n");
+ return ret;
+ }
+ ret = wait_event_on_timeout(&g_dev_notification, type,CMD_TIMEOUT);
+ if(ret){
+ printk(KERN_ERR "device notification event not received!!\n");
+ return ret;
+ }
+ if(g_dev_not_value != value){
+ printk(KERN_ERR "device notification value error, expected %d, recived %ld!!\n", value, g_dev_not_value);
+ return RET_FAIL;
+ }
+ return ret;
+}
+
+static int t_dev_init(int argc, char** argv){
+
+ if(u3auto_hcd_reset() != RET_SUCCESS)
+ return RET_FAIL;
+ else
+ return RET_SUCCESS;
+}
+
+static int t_dev_u1u2(int argc, char** argv){
+ int ret;
+ int value1, value2;
+ char en_u1, en_u2;
+ int dev_num;
+ int mode;
+ struct usb_device *udev;
+
+ value1 = 0;
+ value2 = 0;
+ en_u1 = 1;
+ en_u2 = 1;
+ udev = NULL;
+ mode = 0;
+
+ if(argc > 1){
+ value1 = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "u1 value set to %d\n", value1);
+ }
+ if(argc > 2){
+ value2 = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "u2 value set to %d\n", value2);
+ }
+ if(argc > 3){
+ en_u1 = (int)simple_strtol(argv[3], &argv[3], 10);
+ xhci_dbg(xhci, "en_u1 set to %d\n", en_u1);
+ }
+ if(argc > 4){
+ en_u2 = (int)simple_strtol(argv[4], &argv[4], 10);
+ xhci_dbg(xhci, "en_u2 set to %d\n", en_u2);
+ }
+ if(argc > 5){
+ dev_num = (int)simple_strtol(argv[5], &argv[5], 10);
+ udev = dev_list[dev_num-1];
+ }
+ if(argc > 6){
+ mode = (int)simple_strtol(argv[6], &argv[6], 10);
+ }
+ return dev_power(mode, value1, value2, en_u1, en_u2, udev);
+}
+
+static int t_dev_lpm(int argc, char** argv){
+ int ret;
+ int lpm_mode, wakeup, beslck, beslck_u3, beslckd, cond, cond_en;
+
+ lpm_mode = 0;
+ wakeup = 0;
+ beslck = 0;
+ beslck_u3 = 0;
+ beslckd = 0;
+ cond = 0;
+ cond_en = 0;
+
+ if(argc > 1){
+ lpm_mode = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ wakeup = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ beslck = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ beslck_u3 = (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ beslckd = (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+ if(argc > 6){
+ cond = (int)simple_strtol(argv[6], &argv[6], 10);
+ }
+ if(argc > 7){
+ cond_en = (int)simple_strtol(argv[7], &argv[7], 10);
+ }
+
+ dev_lpm_config(lpm_mode, wakeup, beslck, beslck_u3, beslckd, cond, cond_en);
+}
+
+static int t_dev_reset(int argc, char** argv){
+ USB_DEV_SPEED speed;
+ int ret;
+
+ ret = 0;
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ speed = DEV_SPEED_HIGH;
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "[DEV]Reset device to super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "[DEV]Reset device to high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "[DEV]Reset device to full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+ start_port_reenabled(0, speed);
+ ret = dev_reset(speed,NULL);
+ if(ret){
+ printk(KERN_ERR "[ERROR] reset device failed\n");
+ return RET_FAIL;
+ }
+ ret = f_disable_slot();
+ if(ret){
+ printk(KERN_ERR "disable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+ ret = f_reenable_port(0);
+ if(ret){
+ printk(KERN_ERR "[ERROR] port reenable failed\n");
+ return RET_FAIL;
+ }
+ ret = f_enable_slot(NULL);
+ if(ret){
+ printk(KERN_ERR "enable slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+
+ ret=f_address_slot(false, NULL);
+ if(ret){
+ printk(KERN_ERR "address slot failed!!!!!!!!!!\n");
+ return ret;
+ }
+ /* reset SW scheduler algorithm */
+ mtk_xhci_scheduler_init();
+ return ret;
+}
+
+static int t_dev_query_status(int argc, char** argv){
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ return dev_query_status(NULL);
+}
+
+static int t_dev_polling_status(int argc, char** argv){
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ return dev_polling_status(NULL);
+}
+
+static int t_dev_config_ep(int argc, char** argv){
+ char ep_num;
+ char dir;
+ char type;
+ short int maxp;
+ char bInterval;
+ int mult_dev, burst, mult;
+
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ /* ep_out_num,USB_RX,transfer_type,maxp,bInterval */
+ ep_num = 1;
+ dir= USB_TX;
+ type=EPATT_BULK;
+ maxp=512;
+ bInterval=0;
+ mult_dev = 1;
+ burst = 8;
+ mult = 0;
+
+ if(argc > 1){
+ ep_num = (int)simple_strtol(argv[1], &argv[1], 10);
+ printk(KERN_ERR "ep_num set to %d\n", ep_num);
+ }
+ if(argc > 2){
+ if(!strcmp(argv[2], "tx")){
+ printk(KERN_ERR "TX endpoint\n");
+ dir = USB_TX;
+ }
+ else if(!strcmp(argv[2], "rx")){
+ printk(KERN_ERR "RX endpoint\n");
+ dir = USB_RX;
+ }
+ }
+ if(argc > 3){
+ if(!strcmp(argv[3], "bulk")){
+ printk(KERN_ERR "Test bulk transfer\n");
+ type = EPATT_BULK;
+ }
+ else if(!strcmp(argv[3], "intr")){
+ printk(KERN_ERR "Test intr transfer\n");
+ type = EPATT_INT;
+ }
+ else if(!strcmp(argv[3], "isoc")){
+ printk(KERN_ERR "Test isoc transfer\n");
+ type = EPATT_ISO;
+ }
+ }
+ if(argc > 4){
+ maxp = (int)simple_strtol(argv[4], &argv[4], 10);
+ printk(KERN_ERR "maxp set to %d\n", maxp);
+ }
+ if(argc > 5){
+ bInterval = (int)simple_strtol(argv[5], &argv[5], 10);
+ printk(KERN_ERR "bInterval set to %d\n", bInterval);
+ }
+ if(argc > 6){
+ burst = (int)simple_strtol(argv[6], &argv[6], 10);
+ printk(KERN_ERR "burst set to %d\n", burst);
+ }
+ if(argc > 7){
+ mult = (int)simple_strtol(argv[7], &argv[7], 10);
+ printk(KERN_ERR "mult set to %d\n", mult);
+ }
+ return dev_config_ep(ep_num, dir, type, maxp, bInterval,mult_dev,burst,mult,NULL);
+}
+
+static int t_dev_remotewakeup(int argc, char** argv){
+ int delay_us;
+ int bdp, gpd_buf_size, bd_buf_size;
+
+ bdp=0;
+ gpd_buf_size=0xFC00;
+ bd_buf_size=0x1FFF;
+
+ if(!g_port_reset){
+ printk(KERN_ERR "[ERROR] device not reset\n");
+ return RET_FAIL;
+ }
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot not enabled\n");
+ return RET_FAIL;
+ }
+ if(argc > 1){
+ delay_us = (int)simple_strtol(argv[1], &argv[1], 10);
+ printk(KERN_ERR "Device delay %d us\n", delay_us);
+ }
+ return dev_remotewakeup(delay_us, NULL);
+}
+
+/* Test concurrently resume case */
+static int t_u3auto_concurrent_remotewakeup(int argc, char** argv){
+ int ret,loop,length,j,i;
+ char bdp;
+ short gpd_buf_size,bd_buf_size;
+ int speed,host_speed;
+ int maxp, count,mult;
+ uint rand_num;
+ uint dev_delay_us, host_delay_ms;
+
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int delay_time;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+
+ ret = 0;
+
+ speed = DEV_SPEED_HIGH;
+ count = 100;
+ delay_time = 40;
+ if(argc > 1){
+ if(!strcmp(argv[1], "ss")){
+ printk(KERN_ERR "Test super speed\n");
+ speed = DEV_SPEED_SUPER;
+ }
+ else if(!strcmp(argv[1], "hs")){
+ printk(KERN_ERR "Test high speed\n");
+ speed = DEV_SPEED_HIGH;
+ }
+ else if(!strcmp(argv[1], "fs")){
+ printk(KERN_ERR "Test full speed\n");
+ speed = DEV_SPEED_FULL;
+ }
+ }
+
+ if(argc > 2){
+ count = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ delay_time = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+
+ maxp = 1024;
+ g_concurrent_resume = true;
+ /* ==phase 1 : config EP==*/
+
+ for(i=0;i<count;i++)
+ {
+ dev_delay_us = 0;
+
+ printk(KERN_ERR "count: %d, host_delay: %d, dev_delay: %d\n", i, host_delay_ms, dev_delay_us);
+
+ ret = dev_remotewakeup(dev_delay_us, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "remote wakeup request fail!!\n");
+ g_concurrent_resume = false;
+ return ret;
+ }
+
+ ret = f_power_suspend();
+ if(ret)
+ {
+ printk(KERN_ERR "suspend fail!!!\n");
+ }
+
+ /* enhanced remote wakeup test logic */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+ mdelay(10);
+ udelay(delay_time);
+
+ ret=f_power_resume(0);
+ if(ret)
+ {
+ printk(KERN_ERR "wakeup fail!!\n");
+ g_concurrent_resume = false;
+ return ret;
+ }
+
+ mdelay(500);
+ }
+ g_concurrent_resume = false;
+ return ret;
+}
+
+static int t_u3auto_concurrent_u1u2_exit(int argc, char** argv){
+ int ret,loop,length,j,i;
+ char bdp;
+ short gpd_buf_size,bd_buf_size;
+ int speed,host_speed;
+ int maxp, count,mult;
+ uint rand_num;
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int dev_delay_time, host_delay_time, do_ux;
+ int port_id;
+
+ struct usb_ctrlrequest *dr;
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct protocol_query *query;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+
+ ret = 0;
+
+ speed = DEV_SPEED_SUPER;
+ count = 100;
+ port_id = 1;
+
+ if(argc > 1){
+ count = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ dev_delay_time = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ host_delay_time = (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ do_ux = (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ port_id = (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+ for(i=0; i<count; i++){
+ ret = dev_remotewakeup(dev_delay_time, NULL);
+ if(ret)
+ {
+ printk(KERN_ERR "remote wakeup request fail!!\n");
+ return ret;
+ }
+ mdelay(100);
+ ret = f_port_set_pls(port_id, do_ux);
+ if(ret){
+ continue;
+ }
+ udelay(host_delay_time);
+ ret = f_port_set_pls(1, 0);
+ }
+
+ return RET_SUCCESS;
+}
+
+static int t_u3auto_concurrent_u1u2_enter(int argc, char** argv){
+ int ret,loop,length,j,i;
+ char bdp;
+ short gpd_buf_size,bd_buf_size;
+ int speed,host_speed;
+ int maxp, count,mult;
+ uint rand_num;
+ uint dev_delay_ms, host_delay_ms;
+ uint do_ux;
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int delay_time, dev_u1_delay, dev_u2_delay;
+
+ struct usb_ctrlrequest *dr;
+ struct usb_device *udev, *rhdev;
+ struct urb *urb;
+ struct protocol_query *query;
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((g_port_id-1) & 0xff);
+
+ ret = 0;
+
+ speed = DEV_SPEED_SUPER;
+ count = 100;
+ delay_time = 40;
+
+ dev_u1_delay = 0;
+ dev_u2_delay = 0;
+
+ if(argc > 1){
+ count = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ delay_time = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ if(argc > 3){
+ dev_u1_delay= (int)simple_strtol(argv[3], &argv[3], 10);
+ }
+ if(argc > 4){
+ dev_u2_delay= (int)simple_strtol(argv[4], &argv[4], 10);
+ }
+ if(argc > 5){
+ do_ux = (int)simple_strtol(argv[5], &argv[5], 10);
+ }
+
+ maxp = 1024;
+
+ /* set device u1u2 timeout */
+ if(dev_u1_delay > 0){
+ dev_power(1, dev_u1_delay, 0, 1, 1, NULL);
+ }
+ else if(dev_u2_delay > 0){
+ dev_power(2, 0, dev_u2_delay, 1, 1, NULL);
+ }
+ else{
+ printk(KERN_ERR "[ERROR] Doesn't set device u1 or u2 timeout value\n");
+ return RET_FAIL;
+ }
+ /* host side only set accept u1u2 */
+ f_power_set_u1u2(3, 255, 255);
+ msleep(5);
+
+ for(i=0; i<count; i++){
+ /* ==phase 1 : do some transfer to back to U0== */
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ query= kmalloc(AT_CMD_ACK_DATA_LENGTH, GFP_NOIO);
+
+ memset(query, 0, AT_CMD_ACK_DATA_LENGTH);
+
+ dr->bRequestType = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;;
+ dr->bRequest = AT_CMD_ACK;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(AT_CMD_ACK_DATA_LENGTH);
+ urb = alloc_ctrl_urb(dr, query, udev);
+ ret = f_ctrlrequest_nowait(urb, udev);
+
+ /* **************************************************/
+ mdelay(3);
+ udelay(delay_time);
+ /* ==phase 2 : transfer U1 or U2 packet== */
+ f_port_set_pls(1, do_ux);
+ udelay(100);
+ if(do_ux == 3){
+ f_port_set_pls(1, 0);
+ }
+
+ wait_not_event_on_timeout(&(urb->status), -EINPROGRESS, TRANS_TIMEOUT);
+ xhci_urb_free_priv(xhci, urb->hcpriv);
+
+ /* * clear some trash left */
+ kfree(dr);
+ kfree(query);
+ usb_free_urb(urb);
+ /* ************************************************/
+ dev_query_status(NULL);
+ }
+ dev_power(0, 0, 0, 1, 1, NULL);
+
+ return RET_SUCCESS;
+}
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+static struct xhci_container_ctx *mtk_xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+ int type, gfp_t flags)
+{
+ struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
+ if (!ctx)
+ return NULL;
+
+ BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
+ ctx->type = type;
+ ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+ if (type == XHCI_CTX_TYPE_INPUT)
+ ctx->size += CTX_SIZE(xhci->hcc_params);
+
+ ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+ memset(ctx->bytes, 0, ctx->size);
+ return ctx;
+}
+
+static int dbg_reg_ewe(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int ret;
+
+ xhci = hcd_to_xhci(my_hcd);
+ g_mfindex_event = 0;
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= CMD_EWE;
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+ msleep(3000);
+ if(g_mfindex_event > 0){
+ ret = RET_SUCCESS;
+ }
+ else{
+ ret = RET_FAIL;
+ }
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp &= ~CMD_EWE;
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+ return ret;
+}
+
+static int ct_check_hcd(){
+ if(!my_hcd){
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7061(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7062(){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+
+ printk(KERN_ERR "STEP 2: get_descriptor, whether success or failed...\n");
+ ret = 0;
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ if(urb->status == -EINPROGRESS){
+ /* timeout, stop endpoint, set TR dequeue pointer */
+ f_ring_stop_ep(g_slot_id, 0);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, 0, urb);
+ }
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int u3ct_lecroy718(){
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "wait 1 sec\n");
+ msleep(1000);
+
+ printk(KERN_ERR "enable port, slot, address (BSR=1)\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy719(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "direct set u1 timeout value=0, u2 timeout value=127\n");
+ if(f_power_set_u1u2(3, 0, 127)){
+ printk(KERN_ERR "set u1/u2 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy720(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy721(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy729(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "hot reset port...\n");
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy731(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "hot reset port...\n");
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7341(){
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7342(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "warm reset port\n");
+ port_id = 1;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy735(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "suspend port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ return RET_FAIL;
+ }
+
+ printk(KERN_ERR "delay 1 sec\n");
+ udelay(1000);
+ printk(KERN_ERR "warm reset port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7361(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "suspend port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_lecroy7362(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+ int i;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "resume port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ for(i=0; i<200; i++){
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) == 0){
+ break;
+ }
+ msleep(1);
+
+ }
+ if(PORT_PLS_VALUE(temp) != 0){
+ xhci_err(xhci, "port not return U0 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+
+static int dbg_u3ct_lecroy(int argc, char** argv){
+ int td_no;
+
+ td_no = 0;
+ if(argc > 1){
+ td_no = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ switch(td_no){
+ case 706:
+ printk(KERN_ERR "(7061) STEP 1, (7062) STEP 2\n");
+ break;
+ case 7061:
+ return u3ct_lecroy7061();
+ break;
+ case 7062:
+ return u3ct_lecroy7062();
+ break;
+ case 718:
+ return u3ct_lecroy718();
+ break;
+ case 719:
+ return u3ct_lecroy719();
+ break;
+ case 720:
+ return u3ct_lecroy720();
+ break;
+ case 721:
+ return u3ct_lecroy721();
+ break;
+ case 729:
+ return u3ct_lecroy729();
+ break;
+ case 731:
+ return u3ct_lecroy731();
+ break;
+ case 734:
+ printk(KERN_ERR "(7341) STEP 1, (7342) STEP 2\n");
+ break;
+ case 7341:
+ return u3ct_lecroy7341();
+ break;
+ case 7342:
+ return u3ct_lecroy7342();
+ break;
+ case 735:
+ return u3ct_lecroy735();
+ break;
+ case 736:
+ printk(KERN_ERR "(7361) STEP 1, (7362) STEP 2\n");
+ break;
+ case 7361:
+ return u3ct_lecroy7361();
+ break;
+ case 7362:
+ return u3ct_lecroy7362();
+ break;
+ default:
+ printk(KERN_ERR "enter td number\n");
+ break;
+ }
+}
+
+static int u3ct_ellisys7061(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys7062(){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+
+ printk(KERN_ERR "STEP 2: get_descriptor, whether success or failed...\n");
+ ret = 0;
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ if(urb->status == -EINPROGRESS){
+ /* timeout, stop endpoint, set TR dequeue pointer */
+ f_ring_stop_ep(g_slot_id, 0);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, 0, urb);
+ }
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int u3ct_ellisys718(){
+ printk(KERN_ERR "Reset host controller\n");
+
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "wait 1 sec\n");
+ msleep(1000);
+
+ printk(KERN_ERR "enable port, slot, address (BSR=1)\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys719(){
+ printk(KERN_ERR "direct set u1 timeout value=0, u2 timeout value=127\n");
+ if(f_power_set_u1u2(3, 0, 127)){
+ printk(KERN_ERR "set u1/u2 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys720(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys721(){
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(true, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "set u1 timeout value=127\n");
+ if(f_power_set_u1u2(3, 127, 0)){
+ printk(KERN_ERR "set u1 timeout FAILED!!\n");
+ return RET_FAIL;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys729(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "hot reset port...\n");
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys731(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "hot reset port...\n");
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys7341(){
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys7342(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ char isWarmReset;
+ struct xhci_hcd *xhci;
+
+ printk(KERN_ERR "warm reset port\n");
+ port_id = 1;
+ xhci = hcd_to_xhci(my_hcd);
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys735(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "suspend port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ return RET_FAIL;
+ }
+
+ printk(KERN_ERR "delay 1 sec\n");
+ udelay(1000);
+ printk(KERN_ERR "warm reset port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys7361(){
+ int ret;
+ struct device *dev;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i,j;
+ char *tmp;
+ struct urb *urb_rx;
+ struct usb_host_endpoint *ep_rx;
+ int ep_index_rx;
+ void *buffer_rx;
+ char *buffer;
+ ret = 0;
+
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(u3auto_hcd_reset() != RET_SUCCESS){
+ printk(KERN_ERR "init host controller, enable slot, address dev FAILED\n");
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+ dev = xhci_to_hcd(xhci)->self.controller;/* dma stream buffer */
+ xhci_dbg(xhci, "device speed %d\n", udev->speed);
+ /* get descriptor (device) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(8);
+ desc = kmalloc(8, GFP_KERNEL);
+ memset(desc, 0, 8);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (device) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (configure) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_CONFIG << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_CONFIG_SIZE);
+ desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_CONFIG_SIZE);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* get descriptor (configure) */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_CONFIG << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(40);
+ desc = kmalloc(40, GFP_KERNEL);
+ memset(desc, 0, 40);
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+ /* set configuration */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT;
+ dr->bRequest = USB_REQ_SET_CONFIGURATION;
+ dr->wValue = cpu_to_le16(1);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* set idle */
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ dr->bRequest = 0x0A;
+ dr->wValue = cpu_to_le16(0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(0);
+ urb = alloc_ctrl_urb(dr, NULL, udev);
+ ret = f_ctrlrequest(urb,udev);
+ kfree(dr);
+ usb_free_urb(urb);
+ /* get descriptor (HID report) */
+ ret = f_config_ep(1, EPADD_IN, EPATT_INT, 4, 7,0,0, udev, 1);
+ /* interrupt input */
+ ep_rx = udev->ep_in[1];
+ ep_index_rx = xhci_get_endpoint_index(&ep_rx->desc);
+ xhci_err(xhci, "[INPUT]\n");
+ for(i=0; i<10; i++){
+ urb_rx = usb_alloc_urb(0, GFP_KERNEL);
+ ret = f_fill_urb(urb_rx,1,4,0,EPADD_IN, 0, 4, udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]fill rx urb Error!!\n");
+ return RET_FAIL;
+ }
+ urb_rx->transfer_flags &= ~URB_ZERO_PACKET;
+ ret = f_queue_urb(urb_rx,1,udev);
+ if(ret){
+ xhci_err(xhci, "[ERROR]rx urb transfer failed!!\n");
+ return RET_FAIL;
+ }
+ dma_sync_single_for_cpu(dev,urb_rx->transfer_dma, 4,DMA_BIDIRECTIONAL);
+ for(j=0; j<urb_rx->transfer_buffer_length; j++){
+ tmp = urb_rx->transfer_buffer+i;
+ }
+ usb_free_urb(urb_rx);
+ }
+
+ return ret;
+}
+
+static int u3ct_ellisys7362(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "suspend port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "before reset port %d = 0x%x\n", port_id-1, temp);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | (3 << 5) | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ mtk_xhci_handshake(xhci, addr, (15<<5), (3<<5), 30*1000);
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) != 3){
+ xhci_err(xhci, "port not enter U3 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u3ct_ellisys7363(){
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+ struct xhci_hcd *xhci;
+ int i;
+
+ port_id = g_port_id;
+ xhci = hcd_to_xhci(my_hcd);
+
+ printk(KERN_ERR "resume port\n");
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp & ~(0xf << 5));
+ temp = (temp | PORT_LINK_STROBE);
+ xhci_writel(xhci, temp, addr);
+ for(i=0; i<200; i++){
+ temp = xhci_readl(xhci, addr);
+ if(PORT_PLS_VALUE(temp) == 0){
+ break;
+ }
+ msleep(1);
+
+ }
+ if(PORT_PLS_VALUE(temp) != 0){
+ xhci_err(xhci, "port not return U0 state\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+
+static int dbg_u3ct_ellisys(int argc, char** argv){
+ int td_no;
+
+ td_no = 0;
+ if(argc > 1){
+ td_no = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ switch(td_no){
+ case 706:
+ printk(KERN_ERR "(7061) STEP 1, (7062) STEP 2\n");
+ break;
+ case 7061:
+ return u3ct_ellisys7061();
+ break;
+ case 7062:
+ return u3ct_ellisys7062();
+ break;
+ case 718:
+ return u3ct_ellisys718();
+ break;
+ case 719:
+ return u3ct_ellisys719();
+ break;
+ case 720:
+ return u3ct_ellisys720();
+ break;
+ case 721:
+ return u3ct_ellisys721();
+ break;
+ case 729:
+ return u3ct_ellisys729();
+ break;
+ case 731:
+ return u3ct_ellisys731();
+ break;
+ case 734:
+ printk(KERN_ERR "(7341) STEP 1, (7342) STEP 2\n");
+ break;
+ case 7341:
+ return u3ct_ellisys7341();
+ break;
+ case 7342:
+ return u3ct_ellisys7342();
+ break;
+ case 735:
+ return u3ct_ellisys735();
+ break;
+ case 736:
+ printk(KERN_ERR "(7361) STEP 1, (7362) STEP 2, (7363) STEP 3\n");
+ break;
+ case 7361:
+ return u3ct_ellisys7361();
+ break;
+ case 7362:
+ return u3ct_ellisys7362();
+ break;
+ case 7363:
+ return u3ct_ellisys7363();
+ break;
+ default:
+ printk(KERN_ERR "enter td number\n");
+ break;
+ }
+}
+
+static int u2ct_signal_quality(int port_no){
+ u32 __iomem *addr;
+ struct xhci_hcd *xhci;
+ u32 temp, port_id;
+ int test_value;
+
+
+ /* initial host controller */
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ /* turn off port power */
+ if (port_no == 1) {
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u3_port+1;
+ }
+ else {
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u2_port+1;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+
+ printk(KERN_ERR "issue test packet\n");
+ /* test mode - test packet */
+ test_value = 4;
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int u2ct_packet_parameter_init(){
+
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "STEP 1: enable port and slot, address device(BSR=1)...\n");
+ if(f_enable_port(0)){
+ printk(KERN_ERR "enable port FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_enable_slot(NULL)){
+ printk(KERN_ERR "enable slot FAILED!!\n");
+ return RET_FAIL;
+ }
+ if(f_address_slot(false, NULL)){
+ printk(KERN_ERR "address device (BSR=1) FAILED!!\n");
+ return RET_FAIL;
+ }
+}
+
+static int u2ct_packet_parameter_getdesc(){
+ int ret;
+ struct usb_device *udev, *rhdev;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_hcd *xhci;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int i;
+ char *tmp;
+ ret = 0;
+
+ if(g_slot_id == 0){
+ printk(KERN_ERR "[ERROR] slot ID not valid\n");
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ rhdev = my_hcd->self.root_hub;
+ udev = rhdev->children[g_port_id-1];
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16((USB_DT_DEVICE << 8) + 0);
+ dr->wIndex = cpu_to_le16(0);
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ desc = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ memset(desc, 0, USB_DT_DEVICE_SIZE);
+
+ urb = alloc_ctrl_urb(dr, desc, udev);
+ ret = f_ctrlrequest(urb, udev);
+ if(urb->status == -EINPROGRESS){
+ /* timeout, stop endpoint, set TR dequeue pointer */
+ f_ring_stop_ep(g_slot_id, 0);
+ f_ring_set_tr_dequeue_pointer(g_slot_id, 0, urb);
+ }
+ kfree(dr);
+ kfree(desc);
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static int u2ct_disconnect_detect(int port_no){
+ u32 __iomem *addr;
+ struct xhci_hcd *xhci;
+ u32 temp, port_id;
+ int test_value;
+
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ /* turn off power */
+ xhci = hcd_to_xhci(my_hcd);
+ /* turn off port power */
+ if (port_no == 1) {
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u3_port+1;
+ }
+ else {
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u2_port+1;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ /* let SW doesn't do reset after get discon/conn events */
+ g_hs_block_reset = true;
+ g_port_connect = false;
+ /* test mode - force enable */
+ test_value = 5;
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+ printk(KERN_ERR "Please connect device in 30 secs\n");
+ /* waiting for conn event */
+ wait_event_on_timeout(&g_port_connect, true, TRANS_TIMEOUT);
+ if(!g_port_connect){
+ printk(KERN_ERR "Port not connected\n");
+ g_hs_block_reset = false;
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "Device connected\n");
+ printk(KERN_ERR "Please disconnect device in 30 secs\n");
+ /* waiting for discon event */
+ wait_event_on_timeout(&g_port_connect, false, TRANS_TIMEOUT);
+ if(g_port_connect){
+ printk(KERN_ERR "Port not disconnected\n");
+ g_hs_block_reset = false;
+ return RET_FAIL;
+ }
+ printk(KERN_ERR "Device disconnected\n");
+ g_hs_block_reset = false;
+ return RET_SUCCESS;
+}
+
+static int u2ct_chirp_timing(){
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u2ct_suspend_resume_timing_init(){
+ printk(KERN_ERR "Reset host controller\n");
+ if(my_hcd){
+ if(f_test_lib_cleanup()){
+ printk(KERN_ERR "cleanup host controller FAILED\n");
+ return RET_FAIL;
+ }
+ }
+ if(f_test_lib_init()){
+ printk(KERN_ERR "init host controller FAILED\n");
+ return RET_FAIL;
+ }
+ return RET_SUCCESS;
+}
+
+static int u2ct_suspend_resume_timing_suspend(){
+ return f_power_suspend();
+}
+
+static int u2ct_suspend_resume_timing_resume(){
+ return f_power_resume();
+}
+
+static int u2ct_j(int port_no){
+ u32 __iomem *addr;
+ struct xhci_hcd *xhci;
+ u32 temp, port_id;
+ int test_value;
+
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ /* turn off port power */
+ if (port_no == 1) {
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u3_port+1;
+ }
+ else {
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u2_port+1;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ /* test mode - test j */
+ test_value = 1;
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int u2ct_k(int port_no){
+ u32 __iomem *addr;
+ struct xhci_hcd *xhci;
+ u32 temp, port_id;
+ int test_value;
+
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* turn off port power */
+ if (port_no == 1) {
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u3_port+1;
+ }
+ else {
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u2_port+1;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ /* test mode - test k */
+ test_value = 2;
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int u2ct_se0_nak(int port_no){
+ u32 __iomem *addr;
+ struct xhci_hcd *xhci;
+ u32 temp, port_id;
+ int test_value;
+
+ if(ct_check_hcd()){
+ return RET_FAIL;
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* turn off port power */
+ if (port_no == 1) {
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u3_port+1;
+ }
+ else {
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ port_id = g_num_u2_port+1;
+ }
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ /* test mode - test k */
+ test_value = 3;
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+
+ return RET_SUCCESS;
+}
+
+static int dbg_u2ct(int argc, char** argv){
+ int td_no, port_no;
+
+ td_no = 0;
+ if(argc > 1){
+ td_no = (int)simple_strtol(argv[1], &argv[1], 10);
+ }
+ if(argc > 2){
+ port_no = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ else
+ port_no = 1;
+
+
+ switch(td_no){
+ case 6: case 3: case 7:
+ return u2ct_signal_quality(port_no);
+ break;
+ case 21: case 25: case 23: case 22: case 55:
+ printk(KERN_ERR "(211, 251, 231, 221, 551) STEP 1, (212, 252, 232, 222, 552) STEP 2, (213, 253, 233, 223, 553) STEP 3\n");
+ break;
+ case 211: case 251: case 231: case 221: case 551:
+ return u2ct_packet_parameter_init();
+ break;
+ case 212: case 213: case 252: case 253: case 232: case 233: case 222: case 223: case 552: case 553:
+ return u2ct_packet_parameter_getdesc();
+ break;
+ case 37: case 36:
+ return u2ct_disconnect_detect(port_no);
+ break;
+ case 33: case 34: case 35:
+ return u2ct_chirp_timing();
+ break;
+ case 39: case 41:
+ printk(KERN_ERR "(391,411) STEP 1, (392, 412) STEP 2, (393, 413) STEP 3\n");
+ break;
+ case 391: case 411:
+ return u2ct_suspend_resume_timing_init();
+ break;
+ case 392: case 412:
+ return u2ct_suspend_resume_timing_suspend();
+ break;
+ case 393: case 413:
+ return u2ct_suspend_resume_timing_resume();
+ break;
+ case 8:
+ printk(KERN_ERR "(81) J test (82) K test\n");
+ break;
+ case 81:
+ return u2ct_j(port_no);
+ break;
+ case 82:
+ return u2ct_k(port_no);
+ break;
+ case 9:
+ return u2ct_se0_nak(port_no);
+ break;
+ default:
+ printk(KERN_ERR "please enter EL number\n");
+ break;
+ }
+}
+
+#define EP_SPEED_SS 1
+#define EP_SPEED_HS 2
+#define EP_SPEED_FS 3
+#define EP_SPEED_LS 4
+#define EP_SPEED_RANDOM 0
+#define EP_OUT 1
+#define EP_IN 0
+
+#define MAX_EPNUM 100
+
+static int dbg_sch_algorithm(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ struct usb_device *udev_ss, *udev_hs, *udev_fs, *udev_ls, *rhdev, *udev;
+ int test_speed, cur_speed;
+ struct usb_host_endpoint *ep;
+ struct xhci_ep_ctx *ep_ctx;
+ char is_continue, ep_boundary, ep_num;
+ int ep_dir, transfer_type, maxp, bInterval, mult, burst;
+ int ret;
+ int ep_index, i;
+ struct usb_host_endpoint *ep_list[MAX_EPNUM];
+ struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
+ int interval;
+ int isTT;
+ struct sch_ep *sch_ep;
+ test_speed = EP_SPEED_HS;
+ ep_boundary = 30;
+
+ if(argc > 1){
+ /* 0: all(except for ss), 1: ss, 2: hs, 3: fs, 4: ls */
+ test_speed = (int)simple_strtol(argv[1], &argv[1], 10);
+ printk(KERN_ERR "Test speed %d\n", test_speed);
+ }
+ if(argc > 2){
+ ep_boundary = (int)simple_strtol(argv[2], &argv[2], 10);
+ printk(KERN_ERR "Try %d EPs\n", ep_boundary);
+ if(ep_boundary > MAX_EPNUM){
+ printk(KERN_ERR "EP num too much!!\n");
+ return RET_FAIL;
+ }
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ /* create 4 device, ss, hs, fs(tt), ls(tt) */
+ rhdev = my_hcd->self.root_hub;
+ /* ss */
+ udev_ss = mtk_usb_alloc_dev(rhdev, rhdev->bus, 1);
+ udev_ss->speed = USB_SPEED_SUPER;
+ udev_ss->level = rhdev->level + 1;
+ udev_ss->slot_id = 1;
+ xhci_alloc_virt_device(xhci, 1, udev_ss, GFP_KERNEL);
+ /* hs */
+ udev_hs = mtk_usb_alloc_dev(rhdev, rhdev->bus, 2);
+ udev_hs->speed = USB_SPEED_HIGH;
+ udev_hs->level = rhdev->level + 1;
+ udev_hs->slot_id = 2;
+ xhci_alloc_virt_device(xhci, 2, udev_hs, GFP_KERNEL);
+ /* fs */
+ udev_fs = mtk_usb_alloc_dev(rhdev, udev_hs->bus, 1);
+ udev_fs->speed = USB_SPEED_FULL;
+ udev_fs->level = udev_hs->level + 1;
+ udev_fs->tt = kzalloc(sizeof(struct usb_tt), GFP_KERNEL);
+ udev_fs->tt->hub = udev_hs;
+ udev_fs->tt->multi = false;
+ udev_fs->tt->think_time = 0;
+ udev_fs->ttport = 1;
+ udev_fs->slot_id = 3;
+ xhci_alloc_virt_device(xhci, 3, udev_fs, GFP_KERNEL);
+ virt_dev = xhci->devs[3];
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ slot_ctx->tt_info = 0x1;
+ /* ls */
+ udev_ls = mtk_usb_alloc_dev(rhdev, udev_hs->bus, 2);
+ udev_ls->speed = USB_SPEED_LOW;
+ udev_ls->level = udev_hs->level + 1;
+ udev_ls->tt = kzalloc(sizeof(struct usb_tt), GFP_KERNEL);
+ udev_ls->tt->hub = udev_hs;
+ udev_ls->tt->multi = false;
+ udev_ls->tt->think_time = 0;
+ udev_ls->ttport = 2;
+ udev_ls->slot_id = 4;
+ xhci_alloc_virt_device(xhci, 4, udev_ls, GFP_KERNEL);
+ virt_dev = xhci->devs[4];
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ slot_ctx->tt_info = 0x1;
+ /*
+ * 1.random add ep (speed(fix or random), iso or intr, maxp, interrupt, mult, burst)
+ * 2.record this EP
+ * 3.directly call add/remove ep scheduler API
+ * 4.check if success, if failed over 10 times next, else goto 1
+ */
+ ep_num = 0;
+ ep_index = 0;
+ while(ep_num < ep_boundary){
+ ep_dir = get_random_int() % 2;
+ burst = 0;
+ mult = 0;
+ isTT=0;
+ if(test_speed == 0){
+ cur_speed = (get_random_int()%3)+2;
+ }
+ else{
+ cur_speed = test_speed;
+ }
+ switch(cur_speed){
+ case EP_SPEED_SS:
+ udev = udev_ss;
+ transfer_type = get_random_int() % 2;
+ if(transfer_type == 0){
+ transfer_type = EPATT_INT;
+ burst = get_random_int() % 3;
+ if(burst > 0){
+ maxp = 1024;
+ }
+ else{
+ maxp = ((get_random_int() % 16)+1)*64;
+ }
+ bInterval = (get_random_int()%8)+1;
+ }
+ else{
+ burst = get_random_int() % 16;
+ if(burst > 0){
+ maxp = 1024;
+ }
+ else{
+ maxp = ((get_random_int() % 4)+1)*256;
+ }
+ mult = get_random_int() % 3;
+ bInterval = (get_random_int()%8)+1;
+ }
+ break;
+ case EP_SPEED_HS:
+ udev = udev_hs;
+ transfer_type = get_random_int() % 2;
+ if(transfer_type == 0){
+ transfer_type = EPATT_INT;
+ maxp = ((get_random_int() % 16)+1)*64;
+ mult = get_random_int() % 3;
+ burst = 0;
+ bInterval = (get_random_int()%8)+1;
+ }
+ else{
+ maxp = ((get_random_int() % 16) + 1)*64;
+ mult = get_random_int() % 3;
+ burst = 0;
+ bInterval = (get_random_int()%8)+1;
+ }
+ break;
+ case EP_SPEED_FS:
+ udev = udev_fs;
+ transfer_type = get_random_int() % 2;
+ if(transfer_type == 0){
+ transfer_type = EPATT_INT;
+ maxp = ((get_random_int() % 8)+1)*8;
+ burst = 0;
+ mult = 0;
+ bInterval = (get_random_int()%256)+1;
+ isTT=1;
+ }
+ else{
+ maxp = ((get_random_int() % 16) + 1)*64;
+ if(maxp==1024){
+ maxp = 1023;
+ }
+ burst = 0;
+ mult = 0;
+ bInterval = (get_random_int()%6)+1;
+ isTT=1;
+ }
+ break;
+ case EP_SPEED_LS:
+ udev = udev_ls;
+ transfer_type = EPATT_INT;
+ maxp = 8;
+ bInterval = (get_random_int()%256)+1;
+ isTT=1;
+ break;
+ }
+ ep = kmalloc(sizeof(struct usb_host_endpoint), GFP_NOIO);
+ ep->desc.bDescriptorType = USB_DT_ENDPOINT;
+ if(ep_dir == EP_OUT){
+ ep->desc.bEndpointAddress = EPADD_NUM(1) | EPADD_OUT;
+ }
+ else{
+ ep->desc.bEndpointAddress = EPADD_NUM(1) | EPADD_IN;
+ }
+ ep->desc.bmAttributes = transfer_type;
+ if(cur_speed == EP_SPEED_SS){
+ ep->desc.wMaxPacketSize = maxp;
+ ep->ss_ep_comp.bmAttributes = mult;
+ ep->ss_ep_comp.bMaxBurst = burst;
+ }
+ else{
+ ep->desc.wMaxPacketSize = maxp | (mult << 11);
+ ep->ss_ep_comp.bmAttributes = 0;
+ ep->ss_ep_comp.bMaxBurst = 0;
+ }
+ ep->desc.bInterval = bInterval;
+ interval = 1<<(bInterval-1);
+ ep_ctx = mtk_xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, GFP_NOIO);
+ if(transfer_type == EPATT_INT && (cur_speed == EP_SPEED_FS || cur_speed == EP_SPEED_LS)){
+ /* rounding interval */
+ interval = fls(8 * ep->desc.bInterval) - 1;
+ interval = clamp_val(interval, 3, 10);
+ printk(KERN_ERR "ROUNDING interval to %d\n", interval);
+ ep_ctx->ep_info = EP_INTERVAL(interval);
+ }
+ printk(KERN_ERR "[EP]Speed[%d]dir[%d]transfer_type[%d]bInterval[%d]maxp[%d]burst[%d]mult[%d]"
+ , cur_speed, ep_dir, transfer_type, bInterval, maxp, burst, mult);
+#if MTK_SCH_NEW
+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
+ ret = mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc), isTT
+ , transfer_type, maxp, interval, burst, mult, (u32 *)ep, (u32 *)ep_ctx, sch_ep);
+#else
+ ret = mtk_xhci_scheduler_add_ep(xhci, udev, ep, ep_ctx);
+#endif
+ if(ret == SCH_SUCCESS){
+ printk(KERN_ERR "......Success\n");
+ ep_list[ep_index] = ep;
+ ep = NULL;
+ ep_index++;
+ }
+ else{
+ printk(KERN_ERR "......Failed\n");
+ }
+ ep_num++;
+ }
+ return RET_SUCCESS;
+}
+
+static int dbg_u3_calibration(int argc, char** argv){
+ char u1TimeDelay;
+ char u1TxSel;
+ char u1RxSel;
+ char u1PhyDrv;
+ char u1PortPlsVal;
+ char u1TmpPortPlsVal;
+ char u1Macltssm;
+ char u1TmpMacltssm;
+ int u4ErrorCnt;
+ int u4RecoveryCnt;
+ int u4CorrectCnt;
+ char u1IsPass;
+ int i;
+ int retval;
+ int port_id;
+ u32 data;
+ int PASSCNT = 0;
+ int temp;
+ int num_u3_port, num_u2_port;
+
+ struct xhci_hcd *xhci;
+ PHY_INT32 value;
+ __u32 __iomem *addr;
+
+ struct device *dev;
+ struct mtk_u3h_hw *u3h_hw;
+
+ __u32 __iomem *ip_reset_addr;
+ __u32 __iomem *latch_sel_addr;
+ __u32 __iomem *u3_cmd_addr;
+ __u32 __iomem *u3_sts_addr;
+ __u32 __iomem *u3_p1_sc_addr;
+ __u32 __iomem *u3_p1_ltssm_addr;
+ __u32 __iomem *u3_link_err_count_addr;
+ __u32 __iomem *u3_recovery_count_addr;
+
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ latch_sel_addr = U3_PIPE_LATCH_SEL_ADD;
+ u3_cmd_addr = U3_XHCI_CMD_ADDR;
+ u3_sts_addr = U3_XHCI_STS_ADDR;
+ u3_p1_sc_addr = U3_P1_SC;
+ u3_p1_ltssm_addr = U3_P1_LTSSM;
+ u3_link_err_count_addr = U3_LINK_ERR_COUNT;
+ u3_recovery_count_addr = U3_RECOVERY_COUNT;
+
+
+ dev = my_hcd->self.controller;
+ u3h_hw = dev->platform_data;
+ xhci = hcd_to_xhci(my_hcd);
+
+ u4ErrorCnt = 0;
+ u4RecoveryCnt = 0;
+ port_id = 1;
+
+ printk(KERN_ERR "****** USB3 Pipe Calibration ******\n");
+
+ u1TxSel = 0;
+
+ for(u1RxSel = 0; u1RxSel<2; u1RxSel++){
+ u1PhyDrv = 2;
+ for(u1TimeDelay=0; u1TimeDelay<32; u1TimeDelay++)
+ {
+ printk(KERN_ERR "Test TxSel[0x%02x] RxSel[0x%02x] PhyDrv[0x%02x] TimingDelay[0x%02x]\n"
+ , u1TxSel, u1RxSel, u1PhyDrv, u1TimeDelay);
+
+ /* write phase, drv */
+ u3phy_ops->change_pipe_phase(u3phy, u1PhyDrv, u1TimeDelay);
+
+ /* reinitIP(); */
+ enableAllClockPower();
+
+ /* set pipe latch */
+ addr = U3_PIPE_LATCH_SEL_ADD;
+ data = (u1TxSel<<2) | u1RxSel;
+ writel(data, addr);
+
+ /* enable port power */
+ temp = readl(u3_p1_sc_addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= PORT_POWER;
+ writel(temp, u3_p1_sc_addr);
+ /* Wait 100 ms */
+ msleep(100);
+
+ /* polling PLS */
+ u4CorrectCnt=0;
+ u1IsPass=1;
+ u1TmpMacltssm = 0;
+ u1TmpPortPlsVal = 0;
+
+ /* polling 200 times
+ * if PLS in compliance, set warm reset
+ * if polling status in U0 more than 3 times, check recovery & link error count, report PASS
+ * print phase information
+ * else report FAIL
+ */
+ for(i=0; i<200; i++){
+ /* read port link status */
+ addr = U3_P1_SC;
+ data = readl(addr);
+ u1PortPlsVal = (data & PORT_PLS_MASK) >> 5;
+
+ /* read usb3 mac LTSSM */
+ addr = U3_P1_LTSSM;
+ data = readl(addr);
+ u1Macltssm = (data & 0x1f);
+ /* u1Macltssm & 0x1f */
+
+ if(u1TmpMacltssm != u1Macltssm || u1TmpPortPlsVal != u1PortPlsVal){
+ printk(KERN_ERR "Polling %d PLS=%d LTSSM=%d\n", i, u1PortPlsVal, u1Macltssm);
+ u1TmpMacltssm = u1Macltssm;
+ u1TmpPortPlsVal = u1PortPlsVal;
+ }
+
+ /* if PLS in compliance, set warm reset */
+ if(u1PortPlsVal == 10){
+ addr = U3_P1_SC;
+ data = readl(addr);
+ data |= PORT_WR;
+ writel(data, addr);
+ }
+ if(u1PortPlsVal == 0){
+ u4CorrectCnt++;
+ }
+
+ if(u4CorrectCnt==3){
+ /* clear recovery count */
+ addr = U3_RECOVERY_COUNT;
+ writel((1<<16), U3_RECOVERY_COUNT);
+ u1IsPass = 0;
+ /* read error count */
+ addr = U3_LINK_ERR_COUNT;
+ u4ErrorCnt = readl(addr);
+
+ msleep(500);
+
+ /* read error count and recovery count in these period */
+ addr = U3_LINK_ERR_COUNT;
+ u4ErrorCnt = readl(addr);
+
+ addr = U3_RECOVERY_COUNT;
+ u4RecoveryCnt = readl(addr);
+
+ u4CorrectCnt=4;
+ }
+ msleep(10);
+ }
+ if(u1IsPass == 0){
+ PASSCNT++;
+ printk(KERN_ERR "[PASS] PLS=0, Error=%d, Recovery=%d, TxSel[0x%02x] RxSel[0x%02x] PhyDrv[0x%02x] TimingDelay[0x%02x]\n\n"
+ , u4ErrorCnt, u4RecoveryCnt, u1TxSel, u1RxSel, u1PhyDrv, u1TimeDelay);
+ }
+ else{
+ printk(KERN_ERR "[FAIL] PLS=%d\n\n", u1PortPlsVal);
+ }
+ msleep(3);
+ }
+ }
+ if(PASSCNT){
+ printk(KERN_ERR "[PASS] PASSCNT=%d ^_^\n", PASSCNT);
+ return 0;
+ }
+ else{
+ printk(KERN_ERR "[FAIL] ALL FAIL ~_~\n");
+ return RET_FAIL;
+ }
+
+}
+
+static int dbg_printhccparams(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(my_hcd);
+ u32 __iomem *addr;
+ int temp, tmp_add, i;
+
+ xhci_dbg(xhci, "hcs_params1 add: 0x%x\n", &xhci->cap_regs->hcs_params1);
+ xhci_dbg(xhci, "hcs_params2 add: 0x%x\n", &xhci->cap_regs->hcs_params2);
+ xhci_dbg(xhci, "hcs_params3 add: 0x%x\n", &xhci->cap_regs->hcs_params3);
+ xhci_dbg(xhci, "hc_capbase add: 0x%x\n", &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "hcc_params add: 0x%x\n", &xhci->cap_regs->hcc_params);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_dbg(xhci, "hcc_params: 0x%x\n", temp);
+ tmp_add = temp >> 16;
+ addr = ((&xhci->cap_regs->hc_capbase) + (temp >>16));
+ i = 1;
+ while(tmp_add != 0){
+ xhci_dbg(xhci, "cap pointer %d[0x%x]: \n",i, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "0x%x\n", temp);
+ tmp_add = (temp >> 8) & 0xff;
+ temp = xhci_readl(xhci, addr+1);
+ xhci_dbg(xhci, "0x%x\n", temp);
+ temp = xhci_readl(xhci, addr+2);
+ xhci_dbg(xhci, "0x%x\n", temp);
+ temp = xhci_readl(xhci, addr+3);
+ xhci_dbg(xhci, "0x%x\n", temp);
+ i++;
+ addr = addr + tmp_add;
+ }
+
+ return RET_SUCCESS;
+}
+
+static dbg_port_set_pls(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int pls, port_id;
+
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "port_id = %d\n", port_id);
+ }
+
+ if(argc > 2){
+ pls = (int)simple_strtol(argv[2], &argv[2], 10);
+ xhci_dbg(xhci, "pls set to %d\n", pls);
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = temp & ~(PORT_PLS_MASK);
+ temp = temp | (pls << 5) | PORT_LINK_STROBE;
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int dbg_port_set_ped(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp;
+ int port_id;
+
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "port_id = %d\n", port_id);
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = temp | PORT_PE;
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int dbg_port_reset(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp, port_id;
+ char isWarmReset;
+
+ port_id = g_port_id;
+ isWarmReset = true;
+
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "port_id = %d\n", port_id);
+ }
+
+ if(argc > 2){
+ if(!strcmp(argv[2], "true")){
+ printk(KERN_DEBUG "test WarmReset=true\n");
+ isWarmReset = true;
+ }
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ if(isWarmReset){
+ /* do warm reset */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_WR);
+ xhci_writel(xhci, temp, addr);
+ }
+ else{
+ /* hot reset port */
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ }
+ return RET_SUCCESS;
+}
+
+static int dbg_printportstatus(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp, port_id;
+ int pls = 0;
+
+ port_id = g_port_id;
+ if(argc > 1){
+ port_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "port_id = %d\n", port_id);
+ }
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ pls = (temp >> 5) & 0xf;
+ xhci_err(xhci, "port %d, 0x%x = 0x%x ", port_id, addr, temp);
+ switch(pls){
+ case 0:
+ xhci_err(xhci, "@Link is in the U0 State");
+ break;
+ case 1:
+ xhci_err(xhci, "@Link is in the U1 State");
+ break;
+ case 2:
+ xhci_err(xhci, "@Link is in the U2 State");
+ break;
+ case 3:
+ xhci_err(xhci, "@Link is in the U3 State (Device Suspended)");
+ break;
+ case 4:
+ xhci_err(xhci, "@Link is in the Disabled State");
+ break;
+ case 5:
+ xhci_err(xhci, "@Link is in the RxDetect State");
+ break;
+ case 6:
+ xhci_err(xhci, "@LLink is in the Inactive State");
+ break;
+ case 7:
+ xhci_err(xhci, "@Link is in the Polling State");
+ break;
+ case 8:
+ xhci_err(xhci, "@Link is in the Recovery State");
+ break;
+ case 9:
+ xhci_err(xhci, "@Link is in the Hot Reset State");
+ break;
+ case 10:
+ xhci_err(xhci, "@Link is in the Compliance Mode State");
+ break;
+ case 11:
+ xhci_err(xhci, "@Link is in the Test Modef State");
+ break;
+ case 12:
+ xhci_err(xhci, "@Reserved");
+ break;
+ case 13:
+ xhci_err(xhci, "@Reserved");
+ break;
+ case 14:
+ xhci_err(xhci, "@Link is in the Reset state (MTK)");
+ break;
+ case 15:
+ xhci_err(xhci, "@Link is in the Resume State");
+ break;
+ }
+
+ return RET_SUCCESS;
+}
+
+static int dbg_delayms(int argc, char** argv){
+ int msecs;
+
+ msecs = 10;
+ if(argc > 1){
+ msecs = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "delay %d msecs\n", msecs);
+ }
+
+ msleep(msecs);
+ return RET_SUCCESS;
+}
+
+
+static int dbg_u3w(int argc, char**argv)
+{
+ int u4TimingValue;
+ char u1TimingValue;
+ int u4TimingAddress;
+
+ if (argc<3)
+ {
+ printk(KERN_ERR "Arg: address value\n");
+ return RET_FAIL;
+ }
+ u3phy_init();
+
+ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16);
+ u4TimingValue = (int)simple_strtol(argv[2], &argv[2], 16);
+ u1TimingValue = u4TimingValue & 0xff;
+ if(!isFPGA){
+ /* access MMIO directly */
+ writel(u1TimingValue, u4TimingAddress);
+ }else{
+ /* access through I2C or GPIO window */
+ _U3Write_Reg(u4TimingAddress, u1TimingValue);
+ }
+ printk(KERN_ERR "Write done\n");
+ return RET_SUCCESS;
+
+}
+
+static int dbg_u3r(int argc, char**argv)
+{
+ char u1ReadTimingValue;
+ int u4TimingAddress;
+ if (argc<2)
+ {
+ printk(KERN_ERR "Arg: address\n");
+ return 0;
+ }
+ u3phy_init();
+ mdelay(500);
+ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16);
+ if(!isFPGA){
+ /* access MMIO directly */
+ u1ReadTimingValue = readl(u4TimingAddress);
+ }else{
+ /* access through I2C or GPIO window */
+ u1ReadTimingValue = _U3Read_Reg(u4TimingAddress);
+ }
+ printk(KERN_ERR "Value = 0x%x\n", u1ReadTimingValue);
+ return 0;
+}
+
+static int dbg_u3PHY_init(int argc, char**argv)
+{
+ int PhyDrv;
+ int TimeDelay;
+ if(isFPGA){
+ PhyDrv = 2;
+ TimeDelay = 0x0c;
+ u3phy_init();
+ if (u3phy_ops->u2_slew_rate_calibration) {
+ u3phy_ops->u2_slew_rate_calibration(u3phy);
+ }
+ else{
+ printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n");
+ }
+ if(argc > 1){
+ TimeDelay = (int)simple_strtol(argv[1], &argv[1], 16);
+ }
+ u3phy_ops->init(u3phy);
+ if((u3phy_ops->change_pipe_phase(u3phy, PhyDrv, TimeDelay)) != PHY_TRUE){
+ printk(KERN_ERR "WARN: PHY change_pipe_phase FAIL\n");
+ return PHY_FALSE;
+ }
+ u3phy_init_FPGA();
+ clear_reset();
+ }else{
+ u3phy_config();
+ }
+ return RET_SUCCESS;
+}
+
+static int dbg_phy_eyescan(int argc, char** argv){
+ _rEye1.bX_tl = (int)simple_strtol(argv[1], &argv[1], 10);
+ _rEye1.bY_tl = (int)simple_strtol(argv[2], &argv[2], 10);
+ _rEye1.bX_br = (int)simple_strtol(argv[3], &argv[3], 10);
+ _rEye1.bY_br = (int)simple_strtol(argv[4], &argv[4], 10);
+ _rEye1.bDeltaX = (int)simple_strtol(argv[5], &argv[5], 10);
+ _rEye1.bDeltaY = (int)simple_strtol(argv[6], &argv[6], 10);
+
+ _rEye2.bX_tl = (int)simple_strtol(argv[1], &argv[1], 10);
+ _rEye2.bY_tl = (int)simple_strtol(argv[2], &argv[2], 10);
+ _rEye2.bX_br = (int)simple_strtol(argv[3], &argv[3], 10);
+ _rEye2.bY_br = (int)simple_strtol(argv[4], &argv[4], 10);
+ _rEye2.bDeltaX = (int)simple_strtol(argv[5], &argv[5], 10);
+ _rEye2.bDeltaY = (int)simple_strtol(argv[6], &argv[6], 10);
+
+ _rTestCycle.wEyeCnt = (int)simple_strtol(argv[7], &argv[7], 10);
+ _rTestCycle.bNumOfEyeCnt = (int)simple_strtol(argv[8], &argv[8], 10);
+ _rTestCycle.bNumOfIgnoreCnt = (int)simple_strtol(argv[9], &argv[9], 10);
+ _rTestCycle.bPICalEn = 1;
+
+ u3phy_init();
+ u3phy_ops->eyescan_init(u3phy);
+ if(u3auto_hcd_reset() != RET_SUCCESS){
+ printk(KERN_ERR "init host controller, enable slot, address dev FAILED\n");
+ return RET_FAIL;
+ }
+
+ u3phy_ops->eyescan(u3phy, _rEye1.bX_tl, _rEye1.bY_tl, _rEye1.bX_br, _rEye1.bY_br, _rEye1.bDeltaX, _rEye1.bDeltaY
+ , _rTestCycle.wEyeCnt, _rTestCycle.bNumOfEyeCnt, _rTestCycle.bPICalEn, _rTestCycle.bNumOfIgnoreCnt);
+ return RET_SUCCESS;
+}
+
+static int dbg_u2_testmode(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ u32 __iomem *addr;
+ int temp, port_id;
+ int test_value;
+
+ if(argc<2){
+ printk(KERN_ERR "Args: test mode value\n");
+ printk(KERN_ERR " (0)not enabled (1)J_STATE (2)K_STATE (3)SE0_NAK (4)Packet (5)FORCE_ENABLE (15)Port test control error\n");
+ return RET_FAIL;
+ }
+ if(argc>2){
+ port_id = (int)simple_strtol(argv[2], &argv[2], 10);
+ }
+ else
+ port_id = 1;
+
+ test_value = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "test_value = %d\n", test_value);
+
+ xhci = hcd_to_xhci(my_hcd);
+ addr = &xhci->op_regs->port_power_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp &= ~(0xf<<28);
+ temp |= (test_value<<28);
+ xhci_writel(xhci, temp, addr);
+ return RET_SUCCESS;
+}
+
+static int dbg_memorywrite(int argc, char** argv){
+ unsigned int addr, value;
+ if (argc<3)
+ {
+ printk(KERN_ERR "Arg: address value\n");
+ return RET_FAIL;
+ }
+
+ addr = (int)simple_strtol(argv[1], &argv[1], 16);
+ value = (int)simple_strtol(argv[2], &argv[2], 16);
+
+ writel(value, addr);
+}
+
+static int dbg_memoryread(int argc, char** argv){
+ unsigned int addr, value;
+ if (argc<2)
+ {
+ printk(KERN_ERR "Arg: address\n");
+ return RET_FAIL;
+ }
+ addr = (int)simple_strtol(argv[1], &argv[1], 16);
+ value = readl(addr);
+ printk(KERN_ERR "Addr: 0x%x, Value: 0x%x\n", addr, value);
+}
+
+static int dbg_printslotcontext(int argc, char** argv){
+ struct xhci_hcd *xhci;
+ struct xhci_container_ctx *out_ctx, *in_ctx;
+ int slot_id;
+
+ slot_id = g_slot_id;
+
+ if(argc > 1){
+ slot_id = (int)simple_strtol(argv[1], &argv[1], 10);
+ xhci_dbg(xhci, "slot id set to %d\n", slot_id);
+ }
+
+ xhci = hcd_to_xhci(my_hcd);
+ in_ctx = xhci->devs[slot_id]->in_ctx;
+ xhci_dbg_ctx(xhci, in_ctx, 5);
+ out_ctx = xhci->devs[slot_id]->out_ctx;
+ mtk_xhci_dbg_ctx(xhci, out_ctx, 5);
+ return RET_SUCCESS;
+}
+
+static int dbg_read_xhci(int argc, char** argv)
+{
+ u32 u4SrcAddr;
+ u32 u4Addr;
+ u32 u4Len;
+ u32 u4Idx;
+ u32 temp1, temp2, temp3, temp4;
+
+ struct xhci_hcd *xhci = hcd_to_xhci(my_hcd);
+
+ /* 4-bytes alignment */
+ u4SrcAddr = simple_strtoul(argv[1], NULL, 16) & 0xfffffffc;
+ u4Len = simple_strtoul(argv[2], NULL, 16);
+
+ /* no operation is needed */
+ if (u4Len == 0)
+ {
+ return 0;
+ }
+
+ /* the maximum number of bytes */
+ if (u4Len > 0x1000)
+ {
+ u4Len = 0x1000;
+ }
+
+ /* sum together xhci register base address and offset */
+ u4SrcAddr += ((u32)xhci->cap_regs);
+
+ for (u4Idx = 0; u4Idx < u4Len; u4Idx += 16)
+ {
+ temp1 = xhci_readl(xhci, (u4SrcAddr + u4Idx + 0));
+ temp2 = xhci_readl(xhci, (u4SrcAddr + u4Idx + 4));
+ temp3 = xhci_readl(xhci, (u4SrcAddr + u4Idx + 8));
+ temp4 = xhci_readl(xhci, (u4SrcAddr + u4Idx + 12));
+
+ printk("0x%08x | %08x %08x %08x %08x\r\n", (u4SrcAddr + u4Idx), temp1, temp2, temp3, temp4);
+ }
+
+ return 0;
+}
+
+static int dbg_dump_regs(int argc, char** argv)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(my_hcd);
+
+ xhci_print_registers(xhci);
+
+ return 0;
+}
+
+struct file_operations xhci_mtk_test_fops = {
+ owner: THIS_MODULE,
+ read: xhci_mtk_test_read,
+ write: xhci_mtk_test_write,
+ unlocked_ioctl: xhci_mtk_test_unlock_ioctl,
+ open: xhci_mtk_test_open,
+ release: xhci_mtk_test_release,
+};
+
+
+
+static int __init mtk_test_init(void)
+{
+ int retval = 0;
+ printk(KERN_DEBUG "xchi_mtk_test Init\n");
+ retval = register_chrdev(XHCI_MTK_TEST_MAJOR, DEVICE_NAME, &xhci_mtk_test_fops);
+ if(retval < 0)
+ {
+ printk(KERN_DEBUG "xchi_mtk_test Init failed, %d\n", retval);
+ goto fail;
+ }
+ g_port_connect = false;
+ g_port_reset = false;
+ g_event_full = false;
+ g_port_id = 0;
+ g_slot_id = 0;
+ g_speed = 0; /* UNKNOWN_SPEED */
+ g_cmd_status = CMD_DONE;
+ return 0;
+ fail:
+ return retval;
+}
+module_init(mtk_test_init);
+
+static void __exit mtk_test_cleanup(void)
+{
+ printk(KERN_DEBUG "xchi_mtk_test End\n");
+ unregister_chrdev(XHCI_MTK_TEST_MAJOR, DEVICE_NAME);
+ if(my_hcd != NULL){
+ mtk_xhci_hcd_cleanup();
+ }
+}
+module_exit(mtk_test_cleanup);
+
+MODULE_LICENSE("GPL");
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-test.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-test.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,68 @@
+
+#define XHCI_MTK_TEST_MAJOR 234
+#define DEVICE_NAME "xhci_mtk_test"
+
+/* for auto test struct defs */
+
+typedef enum
+{
+ USB_TX = 0,
+ USB_RX
+} USB_DIR;
+
+typedef enum
+{
+ Ctrol_Transfer = 0,
+ Bulk_Random,
+ Test_Loopback,
+ Test_End
+} USB_TEST_CASE;
+
+/* CTRL, BULK, INTR, ISO endpoint */
+typedef enum
+{
+ USB_CTRL = 0,
+ USB_BULK,
+ USB_INTR,
+ USB_ISO
+}USB_TRANSFER_TYPE;
+
+typedef enum
+{
+ SPEED_HIGH = 0,
+ SPEED_FULL
+}USB_SPEED;
+
+typedef enum
+{
+ BUSY = 0,
+ READY,
+ END
+}state;
+
+typedef enum
+{
+ TRANSFER_SUCCESS = 0,
+ TRANSFER_FAIL
+}status;
+
+typedef struct
+{
+ unsigned char type;
+ unsigned char speed;
+ unsigned int length;
+ unsigned short maxp;
+ unsigned char state;
+ unsigned char status;
+}USB_TRANSFER;
+
+
+typedef struct
+{
+ unsigned short header;
+ unsigned char testcase;
+ USB_TRANSFER transfer;
+ unsigned short end;
+}USB_MSG;
+
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-usb-hcd.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-usb-hcd.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,812 @@
+//#include "mtk-usb-hcd.h"
+//#include <linux/usb/hcd.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <asm/unaligned.h>
+#include "xhci.h"
+#include "mtk-test.h"
+
+#if 0
+/* Device for a quirk */
+#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#endif
+
+/* FIXME tune these based on pool statistics ... */
+static const size_t pool_max [HCD_BUFFER_POOLS] = {
+ /* platforms without dma-friendly caches might need to
+ * prevent cacheline sharing...
+ */
+ 32,
+ 128,
+ 512,
+ PAGE_SIZE / 2
+ /* bigger --> allocate pages */
+};
+
+#if 0
+/**
+ * hcd_buffer_create - initialize buffer pools
+ * @hcd: the bus whose buffer pools are to be initialized
+ * Context: !in_interrupt()
+ *
+ * Call this as part of initializing a host controller that uses the dma
+ * memory allocators. It initializes some pools of dma-coherent memory that
+ * will be shared by all drivers using that controller, or returns a negative
+ * errno value on error.
+ *
+ * Call hcd_buffer_destroy() to clean up after using those pools.
+ */
+int hcd_buffer_create(struct usb_hcd *hcd)
+{
+ char name[16];
+ int i, size;
+
+ if (!hcd->self.controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM))
+ return 0;
+
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ size = pool_max[i];
+ if (!size)
+ continue;
+ snprintf(name, sizeof name, "buffer-%d", size);
+ hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
+ size, size, 0);
+ if (!hcd->pool [i]) {
+ hcd_buffer_destroy(hcd);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+#endif
+#if 0
+/**
+ * hcd_buffer_destroy - deallocate buffer pools
+ * @hcd: the bus whose buffer pools are to be destroyed
+ * Context: !in_interrupt()
+ *
+ * This frees the buffer pools created by hcd_buffer_create().
+ */
+void hcd_buffer_destroy(struct usb_hcd *hcd)
+{
+ int i;
+
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ struct dma_pool *pool = hcd->pool[i];
+ if (pool) {
+ dma_pool_destroy(pool);
+ hcd->pool[i] = NULL;
+ }
+ }
+}
+#endif
+#if 0
+/**
+ * usb_hcd_irq - hook IRQs to HCD framework (bus glue)
+ * @irq: the IRQ being raised
+ * @__hcd: pointer to the HCD whose IRQ is being signaled
+ *
+ * If the controller isn't HALTed, calls the driver's irq handler.
+ * Checks whether the controller is now dead.
+ */
+irqreturn_t usb_hcd_irq (int irq, void *__hcd)
+{
+ struct usb_hcd *hcd = __hcd;
+ unsigned long flags;
+ irqreturn_t rc;
+
+ /* IRQF_DISABLED doesn't work correctly with shared IRQs
+ * when the first handler doesn't use it. So let's just
+ * assume it's never used.
+ */
+ local_irq_save(flags);
+ if (unlikely(hcd->state == HC_STATE_HALT ||
+ !test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ rc = IRQ_NONE;
+ } else if (hcd->driver->irq(hcd) == IRQ_NONE) {
+ rc = IRQ_NONE;
+ } else {
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ if (unlikely(hcd->state == HC_STATE_HALT))
+ usb_hc_died(hcd);
+ rc = IRQ_HANDLED;
+ }
+
+ local_irq_restore(flags);
+ return rc;
+}
+#endif
+#if 0
+/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
+static unsigned usb_bus_is_wusb(struct usb_bus *bus)
+{
+ struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self);
+ return hcd->wireless;
+}
+#endif
+
+struct usb_device *mtk_usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1){
+ struct usb_device *dev;
+ struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
+ unsigned root_hub = 0;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ if (!usb_get_hcd(bus_to_hcd(bus))) {
+ kfree(dev);
+ return NULL;
+ }
+ device_initialize(&dev->dev);
+
+ /* Save readable and stable topology id, distinguishing devices
+ * by location for diagnostics, tools, driver model, etc. The
+ * string is a path along hub ports, from the root. Each device's
+ * dev->devpath will be stable until USB is re-cabled, and hubs
+ * are often labeled with these port numbers. The name isn't
+ * as stable: bus->busnum changes easily from modprobe order,
+ * cardbus or pci hotplugging, and so on.
+ */
+ if (unlikely(!parent)) {
+ dev->devpath[0] = '0';
+ dev->route = 0;
+
+ dev->dev.parent = bus->controller;
+ dev_set_name(&dev->dev, "usb%d", bus->busnum);
+ root_hub = 1;
+ } else {
+ /* match any labeling on the hubs; it's one-based */
+ if (parent->devpath[0] == '0') {
+ snprintf(dev->devpath, sizeof dev->devpath, "%d", port1);
+ /* Root ports are not counted in route string */
+ dev->route = 0;
+ printk(KERN_DEBUG "device attached on roothub\n");
+ } else {
+ snprintf(dev->devpath, sizeof dev->devpath, "%s.%d", parent->devpath, port1);
+ /* Route string assumes hubs have less than 16 ports */
+ if (port1 < 15)
+ dev->route = parent->route +
+ (port1 << ((parent->level - 1)*4));
+ else
+ dev->route = parent->route +
+ (15 << ((parent->level - 1)*4));
+ printk(KERN_DEBUG "device route string %d\n", dev->route);
+ printk(KERN_DEBUG "parent level %d\n", parent->level);
+ printk(KERN_DEBUG "parent route string %d\n", parent->route);
+ }
+
+ dev->dev.parent = &parent->dev;
+ dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
+
+ /* hub driver sets up TT records */
+ }
+ dev->portnum = port1;
+ dev->bus = bus;
+ dev->parent = parent;
+ if (root_hub) /* Root hub always ok [and always wired] */
+ dev->authorized = 1;
+ else {
+ dev->authorized = usb_hcd->authorized_default;
+// dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
+ }
+
+ dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+ dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
+ dev->ep0.enabled = 1;
+ dev->ep_in[0] = &dev->ep0;
+ dev->ep_out[0] = &dev->ep0;
+#if 0
+ /* ep0 maxpacket comes later, from device descriptor */
+ usb_enable_endpoint(dev, &dev->ep0, false);
+#endif
+ dev->can_submit = 1;
+
+ return dev;
+}
+#if 0
+void *hcd_buffer_alloc(
+ struct usb_bus *bus,
+ size_t size,
+ gfp_t mem_flags,
+ dma_addr_t *dma
+)
+{
+ struct usb_hcd *hcd = bus_to_hcd(bus);
+ int i;
+
+ /* some USB hosts just use PIO */
+ if (!bus->controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM)) {
+ *dma = ~(dma_addr_t) 0;
+ return kmalloc(size, mem_flags);
+ }
+
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i])
+ return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
+ }
+ return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
+}
+
+void hcd_buffer_free(
+ struct usb_bus *bus,
+ size_t size,
+ void *addr,
+ dma_addr_t dma
+)
+{
+ struct usb_hcd *hcd = bus_to_hcd(bus);
+ int i;
+
+ if (!addr)
+ return;
+
+ if (!bus->controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM)) {
+ kfree(addr);
+ return;
+ }
+
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i]) {
+ dma_pool_free(hcd->pool [i], addr, dma);
+ return;
+ }
+ }
+ dma_free_coherent(hcd->self.controller, size, addr, dma);
+}
+#endif
+
+/*
+ * Some usb host controllers can only perform dma using a small SRAM area.
+ * The usb core itself is however optimized for host controllers that can dma
+ * using regular system memory - like pci devices doing bus mastering.
+ *
+ * To support host controllers with limited dma capabilites we provide dma
+ * bounce buffers. This feature can be enabled using the HCD_LOCAL_MEM flag.
+ * For this to work properly the host controller code must first use the
+ * function dma_declare_coherent_memory() to point out which memory area
+ * that should be used for dma allocations.
+ *
+ * The HCD_LOCAL_MEM flag then tells the usb code to allocate all data for
+ * dma using dma_alloc_coherent() which in turn allocates from the memory
+ * area pointed out with dma_declare_coherent_memory().
+ *
+ * So, to summarize...
+ *
+ * - We need "local" memory, canonical example being
+ * a small SRAM on a discrete controller being the
+ * only memory that the controller can read ...
+ * (a) "normal" kernel memory is no good, and
+ * (b) there's not enough to share
+ *
+ * - The only *portable* hook for such stuff in the
+ * DMA framework is dma_declare_coherent_memory()
+ *
+ * - So we use that, even though the primary requirement
+ * is that the memory be "local" (hence addressible
+ * by that device), not "coherent".
+ *
+ */
+
+static int hcd_alloc_coherent(struct usb_bus *bus,
+ gfp_t mem_flags, dma_addr_t *dma_handle,
+ void **vaddr_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned char *vaddr;
+
+ vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr),
+ mem_flags, dma_handle);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /*
+ * Store the virtual address of the buffer at the end
+ * of the allocated dma buffer. The size of the buffer
+ * may be uneven so use unaligned functions instead
+ * of just rounding up. It makes sense to optimize for
+ * memory footprint over access speed since the amount
+ * of memory available for dma may be limited.
+ */
+ put_unaligned((unsigned long)*vaddr_handle,
+ (unsigned long *)(vaddr + size));
+
+ if (dir == DMA_TO_DEVICE)
+ memcpy(vaddr, *vaddr_handle, size);
+
+ *vaddr_handle = vaddr;
+ return 0;
+}
+
+void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
+ void **vaddr_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned char *vaddr = *vaddr_handle;
+
+ vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size));
+
+ if (dir == DMA_FROM_DEVICE)
+ memcpy(vaddr, *vaddr_handle, size);
+
+ hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle);
+
+ *vaddr_handle = vaddr;
+ *dma_handle = 0;
+}
+
+#if 0
+void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ enum dma_data_direction dir;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ hcd_free_coherent(urb->dev->bus,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+}
+
+
+int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ int ret = 0;
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ /* Map the URB's buffers for DMA access.
+ * Lower level HCD code should use *_dma exclusively,
+ * unless it uses pio or talks to another transport,
+ * or uses the provided scatter gather list for bulk.
+ */
+ ret = hcd_alloc_coherent(
+ urb->dev->bus, mem_flags,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
+ return ret;
+}
+
+void rh_port_clear_change(struct xhci_hcd *xhci, int port_id){
+ u32 temp,status;
+ u32 __iomem *addr;
+ port_id--;
+ status = 0;
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "to clear port change, actual port %d status = 0x%x\n", port_id, temp);
+ temp = xhci_port_state_to_clear_change(temp);
+ xhci_writel(xhci, temp, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port change, actual port %d status = 0x%x\n", port_id, temp);
+}
+
+int rh_get_port_status(struct xhci_hcd *xhci, int port_id){
+ u32 temp,status;
+ u32 __iomem *addr;
+
+ port_id--;
+ status = 0;
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", port_id, temp);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ if ((temp & PORT_OCC))
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ if ((temp & PORT_RC))
+ status |= USB_PORT_STAT_C_RESET << 16;
+ if ((temp & PORT_PLC))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+ /*
+ * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+ * changes
+ */
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= xhci_port_speed(temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+ if (temp & PORT_OC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (temp & PORT_POWER)
+ status |= USB_PORT_STAT_POWER;
+ xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ temp = xhci_port_state_to_neutral(temp);
+ xhci_writel(xhci, temp, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port change, actual port %d status = 0x%x\n", port_id, temp);
+#if 0
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+#endif
+ return status;
+}
+#endif
+
+int mtk_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags){
+ enum dma_data_direction dir;
+ int ret = 0;
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ /* Map the URB's buffers for DMA access.
+ * Lower level HCD code should use *_dma exclusively,
+ * unless it uses pio or talks to another transport,
+ * or uses the provided scatter gather list for bulk.
+ */
+ ret = hcd_alloc_coherent(
+ urb->dev->bus, mem_flags,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
+ return ret;
+}
+void mtk_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb){
+ enum dma_data_direction dir;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ hcd_free_coherent(urb->dev->bus,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+}
+
+/**
+ * usb_alloc_dev - usb device constructor (usbcore-internal)
+ * @parent: hub to which device is connected; null to allocate a root hub
+ * @bus: bus used to access the device
+ * @port1: one-based index of port; ignored for root hubs
+ * Context: !in_interrupt()
+ *
+ * Only hub drivers (including virtual root hub drivers for host
+ * controllers) should ever call this.
+ *
+ * This call may not be used in a non-sleeping context.
+ */
+struct usb_device *mtk_usb_alloc_rhdev(struct usb_device *parent,
+ struct usb_bus *bus, unsigned port1)
+{
+ struct usb_device *dev;
+ struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
+ unsigned root_hub = 0;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ if (!usb_get_hcd(bus_to_hcd(bus))) {
+ kfree(dev);
+ return NULL;
+ }
+ device_initialize(&dev->dev);
+ dev->dev.dma_mask = bus->controller->dma_mask;
+ atomic_set(&dev->urbnum, 0);
+ dev->can_submit = 1;
+
+ /* Save readable and stable topology id, distinguishing devices
+ * by location for diagnostics, tools, driver model, etc. The
+ * string is a path along hub ports, from the root. Each device's
+ * dev->devpath will be stable until USB is re-cabled, and hubs
+ * are often labeled with these port numbers. The name isn't
+ * as stable: bus->busnum changes easily from modprobe order,
+ * cardbus or pci hotplugging, and so on.
+ */
+ if (unlikely(!parent)) {
+ dev->devpath[0] = '0';
+ dev->route = 0;
+ dev->dev.parent = bus->controller;
+ dev_set_name(&dev->dev, "usb%d", bus->busnum);
+ root_hub = 1;
+ }
+
+ dev->portnum = port1;
+ dev->bus = bus;
+ dev->parent = parent;
+
+ dev->authorized = 1;
+ return dev;
+}
+
+
+/**
+ * usb_add_hcd - finish generic HCD structure initialization and register
+ * @hcd: the usb_hcd structure to initialize
+ * @irqnum: Interrupt line to allocate
+ * @irqflags: Interrupt type flags
+ *
+ * Finish the remaining parts of generic HCD initialization: allocate the
+ * buffers of consistent memory, register the bus, request the IRQ line,
+ * and call the driver's reset() and start() routines.
+ */
+int mtk_usb_add_hcd(struct usb_hcd *hcd,
+ unsigned int irqnum, unsigned long irqflags)
+{
+ int retval;
+ struct usb_device *rhdev;
+ dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
+
+ hcd->authorized_default = hcd->wireless? 0 : 1;
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ /* HC is in reset state, but accessible. Now do the one-time init,
+ * bottom up so that hcds can customize the root hubs before khubd
+ * starts talking to them. (Note, bus id is assigned early too.)
+ */
+ if ((retval = hcd_buffer_create(hcd)) != 0) {
+ dev_dbg(hcd->self.controller, "pool alloc failed\n");
+ return retval;
+ }
+
+ if ((rhdev = mtk_usb_alloc_rhdev(NULL, &hcd->self, 0)) == NULL) {
+ dev_err(hcd->self.controller, "unable to allocate root hub\n");
+ retval = -ENOMEM;
+ goto err_allocate_root_hub;
+ }
+ hcd->self.root_hub = rhdev;
+#if 0
+ if ((retval = usb_register_bus(&hcd->self)) < 0)
+ goto err_register_bus;
+
+ if ((rhdev = usb_alloc_dev(NULL, &hcd->self, 0)) == NULL) {
+ dev_err(hcd->self.controller, "unable to allocate root hub\n");
+ retval = -ENOMEM;
+ goto err_allocate_root_hub;
+ }
+
+ switch (hcd->driver->flags & HCD_MASK) {
+ case HCD_USB11:
+ rhdev->speed = USB_SPEED_FULL;
+ break;
+ case HCD_USB2:
+ rhdev->speed = USB_SPEED_HIGH;
+ break;
+ case HCD_USB3:
+ rhdev->speed = USB_SPEED_SUPER;
+ break;
+ default:
+ goto err_allocate_root_hub;
+ }
+ hcd->self.root_hub = rhdev;
+
+ /* wakeup flag init defaults to "everything works" for root hubs,
+ * but drivers can override it in reset() if needed, along with
+ * recording the overall controller's system wakeup capability.
+ */
+ device_init_wakeup(&rhdev->dev, 1);
+#endif
+
+ /* "reset" is misnamed; its role is now one-time init. the controller
+ * should already have been reset (and boot firmware kicked off etc).
+ */
+ printk(KERN_DEBUG "call xhci_mtk_setup\n");
+ if (hcd->driver->reset && (retval = hcd->driver->reset(hcd)) < 0) {
+ dev_err(hcd->self.controller, "can't setup\n");
+ goto err_hcd_driver_setup;
+ }
+#if 0
+ /* NOTE: root hub and controller capabilities may not be the same */
+ if (device_can_wakeup(hcd->self.controller)
+ && device_can_wakeup(&hcd->self.root_hub->dev))
+ dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
+#endif
+ /* enable irqs just before we start the controller */
+ if (hcd->driver->irq) {
+
+ /* IRQF_DISABLED doesn't work as advertised when used together
+ * with IRQF_SHARED. As usb_hcd_irq() will always disable
+ * interrupts we can remove it here.
+ */
+ if (irqflags & IRQF_SHARED)
+ irqflags &= ~IRQF_DISABLED;
+
+ snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+ hcd->driver->description, hcd->self.busnum);
+ if ((retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
+ hcd->irq_descr, hcd)) != 0) {
+ dev_err(hcd->self.controller,
+ "request interrupt %d failed\n", irqnum);
+ goto err_request_irq;
+ }
+ hcd->irq = irqnum;
+ dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
+ (hcd->driver->flags & HCD_MEMORY) ?
+ "io mem" : "io base",
+ (unsigned long long)hcd->rsrc_start);
+ } else {
+ hcd->irq = -1;
+ if (hcd->rsrc_start)
+ dev_info(hcd->self.controller, "%s 0x%08llx\n",
+ (hcd->driver->flags & HCD_MEMORY) ?
+ "io mem" : "io base",
+ (unsigned long long)hcd->rsrc_start);
+ }
+
+ if ((retval = hcd->driver->start(hcd)) < 0) {
+ dev_err(hcd->self.controller, "startup error %d\n", retval);
+ goto err_hcd_driver_start;
+ }
+#if 0
+ /* starting here, usbcore will pay attention to this root hub */
+ rhdev->bus_mA = min(500u, hcd->power_budget);
+ if ((retval = register_root_hub(hcd)) != 0)
+ goto err_register_root_hub;
+
+ retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
+ if (retval < 0) {
+ printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
+ retval);
+ goto error_create_attr_group;
+ }
+ if (hcd->uses_new_polling && hcd->poll_rh)
+ usb_hcd_poll_rh_status(hcd);
+#endif
+ return retval;
+
+error_create_attr_group:
+ mutex_lock(&usb_bus_list_lock);
+ usb_disconnect(&hcd->self.root_hub);
+ mutex_unlock(&usb_bus_list_lock);
+err_register_root_hub:
+ hcd->driver->stop(hcd);
+err_hcd_driver_start:
+ if (hcd->irq >= 0)
+ free_irq(irqnum, hcd);
+err_request_irq:
+err_hcd_driver_setup:
+ hcd->self.root_hub = NULL;
+ usb_put_dev(rhdev);
+#if 1
+err_allocate_root_hub:
+ hcd->driver->stop(hcd);
+#endif
+err_register_bus:
+ hcd_buffer_destroy(hcd);
+ return retval;
+}
+
+
+/**
+ * usb_remove_hcd - shutdown processing for generic HCDs
+ * @hcd: the usb_hcd structure to remove
+ * Context: !in_interrupt()
+ *
+ * Disconnects the root hub, then reverses the effects of usb_add_hcd(),
+ * invoking the HCD's stop() method.
+ */
+void mtk_usb_remove_hcd(struct usb_hcd *hcd)
+{
+ dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
+
+ if (HC_IS_RUNNING (hcd->state))
+ hcd->state = HC_STATE_QUIESCING;
+
+ dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
+#if 0
+ spin_lock_irq (&hcd_root_hub_lock);
+ hcd->rh_registered = 0;
+ spin_unlock_irq (&hcd_root_hub_lock);
+#endif
+#if 0 //#ifdef CONFIG_USB_SUSPEND
+ cancel_work_sync(&hcd->wakeup_work);
+#endif
+#if 0
+ sysfs_remove_group(&hcd->self.root_hub->dev.kobj, &usb_bus_attr_group);
+ mutex_lock(&usb_bus_list_lock);
+ usb_disconnect(&hcd->self.root_hub);
+ mutex_unlock(&usb_bus_list_lock);
+#endif
+ hcd->driver->stop(hcd);
+ hcd->state = HC_STATE_HALT;
+#if 0
+ hcd->poll_rh = 0;
+ del_timer_sync(&hcd->rh_timer);
+#endif
+ if (hcd->irq >= 0)
+ free_irq(hcd->irq, hcd);
+#if 0
+ usb_deregister_bus(&hcd->self);
+#endif
+ hcd_buffer_destroy(hcd);
+}
+/**
+ * usb_bus_init - shared initialization code
+ * @bus: the bus structure being initialized
+ *
+ * This code is used to initialize a usb_bus structure, memory for which is
+ * separately managed.
+ */
+static void mtk_usb_bus_init (struct usb_bus *bus)
+{
+ memset (&bus->devmap, 0, sizeof(struct usb_devmap));
+
+ bus->devnum_next = 1;
+
+ bus->root_hub = NULL;
+ bus->busnum = -1;
+ bus->bandwidth_allocated = 0;
+ bus->bandwidth_int_reqs = 0;
+ bus->bandwidth_isoc_reqs = 0;
+
+ INIT_LIST_HEAD (&bus->bus_list);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * usb_create_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data. Initialize the generic members of the
+ * hcd structure.
+ *
+ * If memory is unavailable, returns NULL.
+ */
+struct usb_hcd *mtk_usb_create_hcd (const struct hc_driver *driver,
+ struct device *dev, const char *bus_name)
+{
+ struct usb_hcd *hcd;
+
+ hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
+ if (!hcd) {
+ dev_dbg (dev, "hcd alloc failed\n");
+ return NULL;
+ }
+ dev_set_drvdata(dev, hcd);
+ kref_init(&hcd->kref);
+
+ mtk_usb_bus_init(&hcd->self);
+ hcd->self.controller = dev;
+ hcd->self.bus_name = bus_name;
+ hcd->self.uses_dma = (dev->dma_mask != NULL);
+
+// init_timer(&hcd->rh_timer);
+// hcd->rh_timer.function = rh_timer_func;
+// hcd->rh_timer.data = (unsigned long) hcd;
+#if 0 //#ifdef CONFIG_USB_SUSPEND
+ INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
+#endif
+ mutex_init(&hcd->bandwidth_mutex);
+
+ hcd->driver = driver;
+ hcd->product_desc = (driver->product_desc) ? driver->product_desc :
+ "USB Host Controller";
+ return hcd;
+}
+
+
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/mtk-usb-hcd.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/mtk-usb-hcd.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,227 @@
+#ifndef __MTK_USB_HCD_H
+#define __MTK_USB_HCD_H
+
+#include <linux/types.h> /* size_t */
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+//#include <linux/pci.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+//#include <linux/usb/hcd.h>
+#include "xhci.h"
+
+/*
+ * Hub request types
+ */
+
+#define USB_RT_HUB (USB_TYPE_CLASS | USB_RECIP_DEVICE)
+#define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER)
+
+
+/*
+ * Descriptor types ... USB 2.0 spec table 9.5
+ */
+#define USB_DT_DEVICE 0x01
+#define USB_DT_CONFIG 0x02
+#define USB_DT_STRING 0x03
+#define USB_DT_INTERFACE 0x04
+#define USB_DT_ENDPOINT 0x05
+#define USB_DT_DEVICE_QUALIFIER 0x06
+#define USB_DT_OTHER_SPEED_CONFIG 0x07
+#define USB_DT_INTERFACE_POWER 0x08
+/* these are from a minor usb 2.0 revision (ECN) */
+#define USB_DT_OTG 0x09
+#define USB_DT_DEBUG 0x0a
+#define USB_DT_INTERFACE_ASSOCIATION 0x0b
+/* these are from the Wireless USB spec */
+#define USB_DT_SECURITY 0x0c
+#define USB_DT_KEY 0x0d
+#define USB_DT_ENCRYPTION_TYPE 0x0e
+#define USB_DT_BOS 0x0f
+#define USB_DT_DEVICE_CAPABILITY 0x10
+#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
+#define USB_DT_WIRE_ADAPTER 0x21
+#define USB_DT_RPIPE 0x22
+#define USB_DT_CS_RADIO_CONTROL 0x23
+/* From the T10 UAS specification */
+#define USB_DT_PIPE_USAGE 0x24
+/* From the USB 3.0 spec */
+#define USB_DT_SS_ENDPOINT_COMP 0x30
+
+/* The following flags are used internally by usbcore and HCDs */
+#define URB_DIR_IN 0x0200 /* Transfer from device to host */
+#define URB_DIR_OUT 0
+#define URB_DIR_MASK URB_DIR_IN
+
+#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */
+#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */
+#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */
+#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */
+#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
+#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
+#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
+
+/*
+ * USB directions
+ *
+ * This bit flag is used in endpoint descriptors' bEndpointAddress field.
+ * It's also one of three fields in control requests bRequestType.
+ */
+#define USB_DIR_OUT 0 /* to device */
+#define USB_DIR_IN 0x80 /* to host */
+#define USB_DIR_MASK 0x80 /* to host */
+
+/*
+* USB request types
+*/
+#define USB_TYPE_STANDARD (0x00 << 5)
+#define USB_TYPE_CLASS (0x01 << 5)
+#define USB_TYPE_VENDOR (0x02 << 5)
+#define USB_TYPE_RESERVED (0x03 << 5)
+
+/*
+* USB recipients
+*/
+#define USB_RECIP_DEVICE 0x00
+#define USB_RECIP_INTERFACE 0x01
+#define USB_RECIP_ENDPOINT 0x02
+#define USB_RECIP_OTHER 0x03
+
+
+
+
+/*
+ * wPortStatus bit field
+ * See USB 2.0 spec Table 11-21
+ */
+#define USB_PORT_STAT_CONNECTION 0x0001
+#define USB_PORT_STAT_ENABLE 0x0002
+#define USB_PORT_STAT_SUSPEND 0x0004
+#define USB_PORT_STAT_OVERCURRENT 0x0008
+#define USB_PORT_STAT_RESET 0x0010
+#define USB_PORT_STAT_L1 0x0020
+/* bits 6 to 7 are reserved */
+#define USB_PORT_STAT_POWER 0x0100
+#define USB_PORT_STAT_LOW_SPEED 0x0200
+#define USB_PORT_STAT_HIGH_SPEED 0x0400
+#define USB_PORT_STAT_TEST 0x0800
+#define USB_PORT_STAT_INDICATOR 0x1000
+/* bits 13 to 15 are reserved */
+#define USB_PORT_STAT_SUPER_SPEED 0x8000 /* Linux-internal */
+
+/*
+ * wPortChange bit field
+ * See USB 2.0 spec Table 11-22
+ * Bits 0 to 4 shown, bits 5 to 15 are reserved
+ */
+#define USB_PORT_STAT_C_CONNECTION 0x0001
+#define USB_PORT_STAT_C_ENABLE 0x0002
+#define USB_PORT_STAT_C_SUSPEND 0x0004
+#define USB_PORT_STAT_C_OVERCURRENT 0x0008
+#define USB_PORT_STAT_C_RESET 0x0010
+#define USB_PORT_STAT_C_L1 0x0020
+
+/* Create various pipes... */
+#define usb_sndctrlpipe(dev, endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvctrlpipe(dev, endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndisocpipe(dev, endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvisocpipe(dev, endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndbulkpipe(dev, endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvbulkpipe(dev, endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndintpipe(dev, endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvintpipe(dev, endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+
+#define USB_DT_DEVICE_SIZE 18
+#define USB_DT_CONFIG_SIZE 9
+#define USB_DT_STATUS_SIZE 2
+#define USB_DT_BOS_SIZE 5
+#define USB_HUB_PORT_STATUS_SIZE 4
+
+/*
+ * Standard requests, for the bRequest field of a SETUP packet.
+ *
+ * These are qualified by the bRequestType field, so that for example
+ * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
+ * by a GET_STATUS request.
+ */
+#define USB_REQ_GET_STATUS 0x00
+#define USB_REQ_CLEAR_FEATURE 0x01
+#define USB_REQ_SET_FEATURE 0x03
+#define USB_REQ_SET_ADDRESS 0x05
+#define USB_REQ_GET_DESCRIPTOR 0x06
+#define USB_REQ_SET_DESCRIPTOR 0x07
+#define USB_REQ_GET_CONFIGURATION 0x08
+#define USB_REQ_SET_CONFIGURATION 0x09
+#define USB_REQ_GET_INTERFACE 0x0A
+#define USB_REQ_SET_INTERFACE 0x0B
+#define USB_REQ_SYNCH_FRAME 0x0C
+#define USB_REQ_SET_SEL 0x30
+
+#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
+#define USB_REQ_GET_ENCRYPTION 0x0E
+#define USB_REQ_RPIPE_ABORT 0x0E
+#define USB_REQ_SET_HANDSHAKE 0x0F
+#define USB_REQ_RPIPE_RESET 0x0F
+#define USB_REQ_GET_HANDSHAKE 0x10
+#define USB_REQ_SET_CONNECTION 0x11
+#define USB_REQ_SET_SECURITY_DATA 0x12
+#define USB_REQ_GET_SECURITY_DATA 0x13
+#define USB_REQ_SET_WUSB_DATA 0x14
+#define USB_REQ_LOOPBACK_DATA_WRITE 0x15
+#define USB_REQ_LOOPBACK_DATA_READ 0x16
+#define USB_REQ_SET_INTERFACE_DS 0x17
+
+/*
+ * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
+ * are read as a bit array returned by USB_REQ_GET_STATUS. (So there
+ * are at most sixteen features of each type.) Hubs may also support a
+ * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend.
+ */
+#define USB_DEVICE_SELF_POWERED 0 /* (read only) */
+#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
+#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */
+#define USB_DEVICE_BATTERY 2 /* (wireless) */
+#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */
+#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/
+#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */
+#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
+#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
+
+#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
+#define USB_FUNCTION_SUSPEND 0
+#define USB_U1_ENABLE 48
+#define USB_U2_ENABLE 49
+#define USB_LTM_ENABLE 50
+
+
+int mtk_xhci_hcd_init(void);
+void mtk_xhci_hcd_cleanup(void);
+int mtk_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+void mtk_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb);
+struct usb_device *mtk_usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+struct usb_hcd *mtk_usb_create_hcd (const struct hc_driver *driver,
+ struct device *dev, const char *bus_name);
+int mtk_usb_add_hcd(struct usb_hcd *hcd,
+ unsigned int irqnum, unsigned long irqflags);
+void mtk_usb_remove_hcd(struct usb_hcd *hcd);
+
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/Kbuild
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/Kbuild 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,12 @@
+obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
+
+whci-hcd-y := \
+ asl.o \
+ debug.o \
+ hcd.o \
+ hw.o \
+ init.o \
+ int.o \
+ pzl.o \
+ qset.o \
+ wusb.o
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/asl.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/asl.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,389 @@
+/*
+ * Wireless Host Controller (WHC) asynchronous schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
+ struct whc_qset **next, struct whc_qset **prev)
+{
+ struct list_head *n, *p;
+
+ BUG_ON(list_empty(&whc->async_list));
+
+ n = qset->list_node.next;
+ if (n == &whc->async_list)
+ n = n->next;
+ p = qset->list_node.prev;
+ if (p == &whc->async_list)
+ p = p->prev;
+
+ *next = container_of(n, struct whc_qset, list_node);
+ *prev = container_of(p, struct whc_qset, list_node);
+
+}
+
+static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->async_list);
+ qset->in_sw_list = true;
+}
+
+static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *next, *prev;
+
+ qset_clear(whc, qset);
+
+ /* Link into ASL. */
+ qset_get_next_prev(whc, qset, &next, &prev);
+ whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
+ whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
+ qset->in_hw_list = true;
+}
+
+static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *prev, *next;
+
+ qset_get_next_prev(whc, qset, &next, &prev);
+
+ list_move(&qset->list_node, &whc->async_removed_list);
+ qset->in_sw_list = false;
+
+ /*
+ * No more qsets in the ASL? The caller must stop the ASL as
+ * it's no longer valid.
+ */
+ if (list_empty(&whc->async_list))
+ return;
+
+ /* Remove from ASL. */
+ whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
+ qset->in_hw_list = false;
+}
+
+/**
+ * process_qset - process any recently inactivated or halted qTDs in a
+ * qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
+ * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
+ */
+static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+ int t;
+
+ t = qset->td_start;
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ /* A halted qTD always triggers an update
+ because the qset was either removed or
+ reactivated. */
+ update |= WHC_UPDATE_UPDATED;
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ if (!qset->remove)
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * Remove this qset from the ASL if requested, but only if has
+ * no qTDs.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ asl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+ return update;
+}
+
+void asl_start(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+
+ le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
+
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
+ 1000, "start ASL");
+}
+
+void asl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, 0,
+ 1000, "stop ASL");
+}
+
+/**
+ * asl_update - request an ASL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the ASL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void asl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ long t;
+
+ mutex_lock(&wusbhc->mutex);
+ if (wusbhc->active) {
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ t = wait_event_timeout(
+ whc->async_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
+ msecs_to_jiffies(1000));
+ if (t == 0)
+ whc_hw_error(whc, "ASL update timeout");
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/**
+ * scan_async_work - scan the ASL for qsets to process.
+ *
+ * Process each qset in the ASL in turn and then signal the WHC that
+ * the ASL has been updated.
+ *
+ * Then start, stop or update the asynchronous schedule as required.
+ */
+void scan_async_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, async_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+
+ spin_lock_irq(&whc->lock);
+
+ /*
+ * Transerve the software list backwards so new qsets can be
+ * safely inserted into the ASL without making it non-circular.
+ */
+ list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
+ if (!qset->in_hw_list) {
+ asl_qset_insert(whc, qset);
+ update |= WHC_UPDATE_ADDED;
+ }
+
+ update |= process_qset(whc, qset);
+ }
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
+ asl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the ASL is updated, complete the removal of any
+ * removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * ASL if it has pending transfers.
+ */
+ spin_lock_irq(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ asl_qset_insert_begin(whc, qset);
+ queue_work(whc->workqueue, &whc->async_work);
+ }
+ }
+ }
+
+ spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the ASL.
+ */
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (err < 0) {
+ spin_unlock_irqrestore(&whc->lock, flags);
+ return err;
+ }
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ if (!qset->in_sw_list && !qset->remove)
+ asl_qset_insert_begin(whc, qset);
+ } else
+ usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->async_work);
+
+ return err;
+}
+
+/**
+ * asl_urb_dequeue - remove an URB (qset) from the async list.
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed from the ASL so the qTDs
+ * can be removed.
+ */
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ bool has_qtd = false;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb) {
+ if (std->qtd)
+ has_qtd = true;
+ qset_free_std(whc, std);
+ } else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ if (has_qtd) {
+ asl_qset_remove(whc, qset);
+ wurb->status = status;
+ wurb->is_async = true;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+ } else
+ qset_remove_urb(whc, qset, urb, status);
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * asl_qset_delete - delete a qset from the ASL
+ */
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->async_work);
+ qset_delete(whc, qset);
+}
+
+/**
+ * asl_init - initialize the asynchronous schedule list
+ *
+ * A dummy qset with no qTDs is added to the ASL to simplify removing
+ * qsets (no need to stop the ASL when the last qset is removed).
+ */
+int asl_init(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = qset_alloc(whc, GFP_KERNEL);
+ if (qset == NULL)
+ return -ENOMEM;
+
+ asl_qset_insert_begin(whc, qset);
+ asl_qset_insert(whc, qset);
+
+ return 0;
+}
+
+/**
+ * asl_clean_up - free ASL resources
+ *
+ * The ASL is stopped and empty except for the dummy qset.
+ */
+void asl_clean_up(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ if (!list_empty(&whc->async_list)) {
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+ list_del(&qset->list_node);
+ qset_free(whc, qset);
+ }
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/debug.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/debug.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,202 @@
+/*
+ * Wireless Host Controller (WHC) debug.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_dbg {
+ struct dentry *di_f;
+ struct dentry *asl_f;
+ struct dentry *pzl_f;
+};
+
+static void qset_print(struct seq_file *s, struct whc_qset *qset)
+{
+ static const char *qh_type[] = {
+ "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
+ struct whc_std *std;
+ struct urb *urb = NULL;
+ int i;
+
+ seq_printf(s, "qset %08x", (u32)qset->qset_dma);
+ if (&qset->list_node == qset->whc->async_list.prev) {
+ seq_printf(s, " (dummy)\n");
+ } else {
+ seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
+ qset->qh.info1 & 0x0f,
+ (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
+ qh_type[(qset->qh.info1 >> 5) & 0x7],
+ (qset->qh.info1 >> 16) & 0xffff);
+ }
+ seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
+ seq_printf(s, " info: %08x %08x %08x\n",
+ qset->qh.info1, qset->qh.info2, qset->qh.info3);
+ seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
+ qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
+ seq_printf(s, " TD: sts: %08x opts: %08x\n",
+ qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+
+ for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
+ seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
+ i == qset->td_start ? 'S' : ' ',
+ i == qset->td_end ? 'E' : ' ',
+ i, qset->qtd[i].status, qset->qtd[i].options,
+ (u32)qset->qtd[i].page_list_ptr);
+ }
+ seq_printf(s, " ntds: %d\n", qset->ntds);
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (urb != std->urb) {
+ urb = std->urb;
+ seq_printf(s, " urb %p transferred: %d bytes\n", urb,
+ urb->actual_length);
+ }
+ if (std->qtd)
+ seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n",
+ std->qtd - &qset->qtd[0],
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ else
+ seq_printf(s, " sTD[-]: %zd bytes @ %08x\n",
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ }
+}
+
+static int di_print(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ char buf[72];
+ int d;
+
+ for (d = 0; d < whc->n_devices; d++) {
+ struct di_buf_entry *di = &whc->di_buf[d];
+
+ bitmap_scnprintf(buf, sizeof(buf),
+ (unsigned long *)di->availability_info, UWB_NUM_MAS);
+
+ seq_printf(s, "DI[%d]\n", d);
+ seq_printf(s, " availability: %s\n", buf);
+ seq_printf(s, " %c%c key idx: %d dev addr: %d\n",
+ (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
+ (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
+ (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
+ (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
+ }
+ return 0;
+}
+
+static int asl_print(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ struct whc_qset *qset;
+
+ list_for_each_entry(qset, &whc->async_list, list_node) {
+ qset_print(s, qset);
+ }
+
+ return 0;
+}
+
+static int pzl_print(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ struct whc_qset *qset;
+ int period;
+
+ for (period = 0; period < 5; period++) {
+ seq_printf(s, "Period %d\n", period);
+ list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
+ qset_print(s, qset);
+ }
+ }
+ return 0;
+}
+
+static int di_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, di_print, inode->i_private);
+}
+
+static int asl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, asl_print, inode->i_private);
+}
+
+static int pzl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pzl_print, inode->i_private);
+}
+
+static const struct file_operations di_fops = {
+ .open = di_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations asl_fops = {
+ .open = asl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations pzl_fops = {
+ .open = pzl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+void whc_dbg_init(struct whc *whc)
+{
+ if (whc->wusbhc.pal.debugfs_dir == NULL)
+ return;
+
+ whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL);
+ if (whc->dbg == NULL)
+ return;
+
+ whc->dbg->di_f = debugfs_create_file("di", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &di_fops);
+ whc->dbg->asl_f = debugfs_create_file("asl", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &asl_fops);
+ whc->dbg->pzl_f = debugfs_create_file("pzl", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &pzl_fops);
+}
+
+void whc_dbg_clean_up(struct whc *whc)
+{
+ if (whc->dbg) {
+ debugfs_remove(whc->dbg->pzl_f);
+ debugfs_remove(whc->dbg->asl_f);
+ debugfs_remove(whc->dbg->di_f);
+ kfree(whc->dbg);
+ }
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/hcd.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/hcd.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,367 @@
+/*
+ * Wireless Host Controller (WHC) driver.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * One time initialization.
+ *
+ * Nothing to do here.
+ */
+static int whc_reset(struct usb_hcd *usb_hcd)
+{
+ return 0;
+}
+
+/*
+ * Start the wireless host controller.
+ *
+ * Start device notification.
+ *
+ * Put hc into run state, set DNTS parameters.
+ */
+static int whc_start(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u8 bcid;
+ int ret;
+
+ mutex_lock(&wusbhc->mutex);
+
+ le_writel(WUSBINTR_GEN_CMD_DONE
+ | WUSBINTR_HOST_ERR
+ | WUSBINTR_ASYNC_SCHED_SYNCED
+ | WUSBINTR_DNTS_INT
+ | WUSBINTR_ERR_INT
+ | WUSBINTR_INT,
+ whc->base + WUSBINTR);
+
+ /* set cluster ID */
+ bcid = wusb_cluster_id_get();
+ ret = whc_set_cluster_id(whc, bcid);
+ if (ret < 0)
+ goto out;
+ wusbhc->cluster_id = bcid;
+
+ /* start HC */
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
+
+ usb_hcd->uses_new_polling = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
+ usb_hcd->state = HC_STATE_RUNNING;
+
+out:
+ mutex_unlock(&wusbhc->mutex);
+ return ret;
+}
+
+
+/*
+ * Stop the wireless host controller.
+ *
+ * Stop device notification.
+ *
+ * Wait for pending transfer to stop? Put hc into stop state?
+ */
+static void whc_stop(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ mutex_lock(&wusbhc->mutex);
+
+ /* stop HC */
+ le_writel(0, whc->base + WUSBINTR);
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
+ 100, "HC to halt");
+
+ wusb_cluster_id_put(wusbhc->cluster_id);
+
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static int whc_get_frame_number(struct usb_hcd *usb_hcd)
+{
+ /* Frame numbers are not applicable to WUSB. */
+ return -ENOSYS;
+}
+
+
+/*
+ * Queue an URB to the ASL or PZL
+ */
+static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ case PIPE_ISOCHRONOUS:
+ dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * Remove a queued URB from the ASL or PZL.
+ */
+static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_dequeue(whc, urb, status);
+ break;
+ case PIPE_ISOCHRONOUS:
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_dequeue(whc, urb, status);
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * Wait for all URBs to the endpoint to be completed, then delete the
+ * qset.
+ */
+static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct whc_qset *qset;
+
+ qset = ep->hcpriv;
+ if (qset) {
+ ep->hcpriv = NULL;
+ if (usb_endpoint_xfer_bulk(&ep->desc)
+ || usb_endpoint_xfer_control(&ep->desc))
+ asl_qset_delete(whc, qset);
+ else
+ pzl_qset_delete(whc, qset);
+ }
+}
+
+static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct whc_qset *qset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ qset = ep->hcpriv;
+ if (qset) {
+ qset->remove = 1;
+ qset->reset = 1;
+
+ if (usb_endpoint_xfer_bulk(&ep->desc)
+ || usb_endpoint_xfer_control(&ep->desc))
+ queue_work(whc->workqueue, &whc->async_work);
+ else
+ queue_work(whc->workqueue, &whc->periodic_work);
+ }
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+
+static struct hc_driver whc_hc_driver = {
+ .description = "whci-hcd",
+ .product_desc = "Wireless host controller",
+ .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
+ .irq = whc_int_handler,
+ .flags = HCD_USB2,
+
+ .reset = whc_reset,
+ .start = whc_start,
+ .stop = whc_stop,
+ .get_frame_number = whc_get_frame_number,
+ .urb_enqueue = whc_urb_enqueue,
+ .urb_dequeue = whc_urb_dequeue,
+ .endpoint_disable = whc_endpoint_disable,
+ .endpoint_reset = whc_endpoint_reset,
+
+ .hub_status_data = wusbhc_rh_status_data,
+ .hub_control = wusbhc_rh_control,
+ .bus_suspend = wusbhc_rh_suspend,
+ .bus_resume = wusbhc_rh_resume,
+ .start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int whc_probe(struct umc_dev *umc)
+{
+ int ret = -ENOMEM;
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc = NULL;
+ struct whc *whc = NULL;
+ struct device *dev = &umc->dev;
+
+ usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
+ if (usb_hcd == NULL) {
+ dev_err(dev, "unable to create hcd\n");
+ goto error;
+ }
+
+ usb_hcd->wireless = 1;
+ usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
+
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ whc = wusbhc_to_whc(wusbhc);
+ whc->umc = umc;
+
+ ret = whc_init(whc);
+ if (ret)
+ goto error;
+
+ wusbhc->dev = dev;
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
+ if (!wusbhc->uwb_rc) {
+ ret = -ENODEV;
+ dev_err(dev, "cannot get radio controller\n");
+ goto error;
+ }
+
+ if (whc->n_devices > USB_MAXCHILDREN) {
+ dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
+ whc->n_devices);
+ wusbhc->ports_max = USB_MAXCHILDREN;
+ } else
+ wusbhc->ports_max = whc->n_devices;
+ wusbhc->mmcies_max = whc->n_mmc_ies;
+ wusbhc->start = whc_wusbhc_start;
+ wusbhc->stop = whc_wusbhc_stop;
+ wusbhc->mmcie_add = whc_mmcie_add;
+ wusbhc->mmcie_rm = whc_mmcie_rm;
+ wusbhc->dev_info_set = whc_dev_info_set;
+ wusbhc->bwa_set = whc_bwa_set;
+ wusbhc->set_num_dnts = whc_set_num_dnts;
+ wusbhc->set_ptk = whc_set_ptk;
+ wusbhc->set_gtk = whc_set_gtk;
+
+ ret = wusbhc_create(wusbhc);
+ if (ret)
+ goto error_wusbhc_create;
+
+ ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "cannot add HCD: %d\n", ret);
+ goto error_usb_add_hcd;
+ }
+
+ ret = wusbhc_b_create(wusbhc);
+ if (ret) {
+ dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
+ goto error_wusbhc_b_create;
+ }
+
+ whc_dbg_init(whc);
+
+ return 0;
+
+error_wusbhc_b_create:
+ usb_remove_hcd(usb_hcd);
+error_usb_add_hcd:
+ wusbhc_destroy(wusbhc);
+error_wusbhc_create:
+ uwb_rc_put(wusbhc->uwb_rc);
+error:
+ whc_clean_up(whc);
+ if (usb_hcd)
+ usb_put_hcd(usb_hcd);
+ return ret;
+}
+
+
+static void whc_remove(struct umc_dev *umc)
+{
+ struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (usb_hcd) {
+ whc_dbg_clean_up(whc);
+ wusbhc_b_destroy(wusbhc);
+ usb_remove_hcd(usb_hcd);
+ wusbhc_destroy(wusbhc);
+ uwb_rc_put(wusbhc->uwb_rc);
+ whc_clean_up(whc);
+ usb_put_hcd(usb_hcd);
+ }
+}
+
+static struct umc_driver whci_hc_driver = {
+ .name = "whci-hcd",
+ .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
+ .probe = whc_probe,
+ .remove = whc_remove,
+};
+
+static int __init whci_hc_driver_init(void)
+{
+ return umc_driver_register(&whci_hc_driver);
+}
+module_init(whci_hc_driver_init);
+
+static void __exit whci_hc_driver_exit(void)
+{
+ umc_driver_unregister(&whci_hc_driver);
+}
+module_exit(whci_hc_driver_exit);
+
+/* PCI device ID's that we handle (so it gets loaded) */
+static struct pci_device_id whci_hcd_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+ { /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
+
+MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/hw.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/hw.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,104 @@
+/*
+ * Wireless Host Controller (WHC) hardware access helpers.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 cmd;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ cmd = le_readl(whc->base + WUSBCMD);
+ cmd = (cmd & ~mask) | val;
+ le_writel(cmd, whc->base + WUSBCMD);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
+ * @whc: the WHCI HC
+ * @cmd: command to start.
+ * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
+ * @addr: pointer to any data for the command (may be NULL).
+ * @len: length of the data (if any).
+ */
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
+{
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int t;
+ int ret = 0;
+
+ mutex_lock(&whc->mutex);
+
+ /* Wait for previous command to complete. */
+ t = wait_event_timeout(whc->cmd_wq,
+ (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
+ WHC_GENCMD_TIMEOUT_MS);
+ if (t == 0) {
+ dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
+ le_readl(whc->base + WUSBGENCMDSTS),
+ le_readl(whc->base + WUSBGENCMDPARAMS));
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (addr) {
+ memcpy(whc->gen_cmd_buf, addr, len);
+ dma_addr = whc->gen_cmd_buf_dma;
+ } else
+ dma_addr = 0;
+
+ /* Poke registers to start cmd. */
+ spin_lock_irqsave(&whc->lock, flags);
+
+ le_writel(params, whc->base + WUSBGENCMDPARAMS);
+ le_writeq(dma_addr, whc->base + WUSBGENADDR);
+
+ le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
+ whc->base + WUSBGENCMDSTS);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+out:
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+/**
+ * whc_hw_error - recover from a hardware error
+ * @whc: the WHCI HC that broke.
+ * @reason: a description of the failure.
+ *
+ * Recover from broken hardware with a full reset.
+ */
+void whc_hw_error(struct whc *whc, const char *reason)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+
+ dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
+ wusbhc_reset_all(wusbhc);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/init.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/init.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,189 @@
+/*
+ * Wireless Host Controller (WHC) initialization.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * Reset the host controller.
+ */
+static void whc_hw_reset(struct whc *whc)
+{
+ le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
+ 100, "reset");
+}
+
+static void whc_hw_init_di_buf(struct whc *whc)
+{
+ int d;
+
+ /* Disable all entries in the Device Information buffer. */
+ for (d = 0; d < whc->n_devices; d++)
+ whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
+
+ le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
+}
+
+static void whc_hw_init_dn_buf(struct whc *whc)
+{
+ /* Clear the Device Notification buffer to ensure the V (valid)
+ * bits are clear. */
+ memset(whc->dn_buf, 0, 4096);
+
+ le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
+}
+
+int whc_init(struct whc *whc)
+{
+ u32 whcsparams;
+ int ret, i;
+ resource_size_t start, len;
+
+ spin_lock_init(&whc->lock);
+ mutex_init(&whc->mutex);
+ init_waitqueue_head(&whc->cmd_wq);
+ init_waitqueue_head(&whc->async_list_wq);
+ init_waitqueue_head(&whc->periodic_list_wq);
+ whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
+ if (whc->workqueue == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_WORK(&whc->dn_work, whc_dn_work);
+
+ INIT_WORK(&whc->async_work, scan_async_work);
+ INIT_LIST_HEAD(&whc->async_list);
+ INIT_LIST_HEAD(&whc->async_removed_list);
+
+ INIT_WORK(&whc->periodic_work, scan_periodic_work);
+ for (i = 0; i < 5; i++)
+ INIT_LIST_HEAD(&whc->periodic_list[i]);
+ INIT_LIST_HEAD(&whc->periodic_removed_list);
+
+ /* Map HC registers. */
+ start = whc->umc->resource.start;
+ len = whc->umc->resource.end - start + 1;
+ if (!request_mem_region(start, len, "whci-hc")) {
+ dev_err(&whc->umc->dev, "can't request HC region\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ whc->base_phys = start;
+ whc->base = ioremap(start, len);
+ if (!whc->base) {
+ dev_err(&whc->umc->dev, "ioremap\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc_hw_reset(whc);
+
+ /* Read maximum number of devices, keys and MMC IEs. */
+ whcsparams = le_readl(whc->base + WHCSPARAMS);
+ whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
+ whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
+ whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
+
+ dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
+ whc->n_devices, whc->n_keys, whc->n_mmc_ies);
+
+ whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
+ sizeof(struct whc_qset), 64, 0);
+ if (whc->qset_pool == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = asl_init(whc);
+ if (ret < 0)
+ goto error;
+ ret = pzl_init(whc);
+ if (ret < 0)
+ goto error;
+
+ /* Allocate and initialize a buffer for generic commands, the
+ Device Information buffer, and the Device Notification
+ buffer. */
+
+ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ &whc->gen_cmd_buf_dma, GFP_KERNEL);
+ if (whc->gen_cmd_buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ &whc->dn_buf_dma, GFP_KERNEL);
+ if (!whc->dn_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_dn_buf(whc);
+
+ whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct di_buf_entry) * whc->n_devices,
+ &whc->di_buf_dma, GFP_KERNEL);
+ if (!whc->di_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_di_buf(whc);
+
+ return 0;
+
+error:
+ whc_clean_up(whc);
+ return ret;
+}
+
+void whc_clean_up(struct whc *whc)
+{
+ resource_size_t len;
+
+ if (whc->di_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
+ whc->di_buf, whc->di_buf_dma);
+ if (whc->dn_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ whc->dn_buf, whc->dn_buf_dma);
+ if (whc->gen_cmd_buf)
+ dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
+
+ pzl_clean_up(whc);
+ asl_clean_up(whc);
+
+ if (whc->qset_pool)
+ dma_pool_destroy(whc->qset_pool);
+
+ len = whc->umc->resource.end - whc->umc->resource.start + 1;
+ if (whc->base)
+ iounmap(whc->base);
+ if (whc->base_phys)
+ release_mem_region(whc->base_phys, len);
+
+ if (whc->workqueue)
+ destroy_workqueue(whc->workqueue);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/int.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/int.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,94 @@
+/*
+ * Wireless Host Controller (WHC) interrupt handling.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void transfer_done(struct whc *whc)
+{
+ queue_work(whc->workqueue, &whc->async_work);
+ queue_work(whc->workqueue, &whc->periodic_work);
+}
+
+irqreturn_t whc_int_handler(struct usb_hcd *hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 sts;
+
+ sts = le_readl(whc->base + WUSBSTS);
+ if (!(sts & WUSBSTS_INT_MASK))
+ return IRQ_NONE;
+ le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
+
+ if (sts & WUSBSTS_GEN_CMD_DONE)
+ wake_up(&whc->cmd_wq);
+
+ if (sts & WUSBSTS_HOST_ERR)
+ dev_err(&whc->umc->dev, "FIXME: host system error\n");
+
+ if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
+ wake_up(&whc->async_list_wq);
+
+ if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
+ wake_up(&whc->periodic_list_wq);
+
+ if (sts & WUSBSTS_DNTS_INT)
+ queue_work(whc->workqueue, &whc->dn_work);
+
+ /*
+ * A transfer completed (see [WHCI] section 4.7.1.2 for when
+ * this occurs).
+ */
+ if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
+ transfer_done(whc);
+
+ return IRQ_HANDLED;
+}
+
+static int process_dn_buf(struct whc *whc)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct dn_buf_entry *dn;
+ int processed = 0;
+
+ for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
+ if (dn->status & WHC_DN_STATUS_VALID) {
+ wusbhc_handle_dn(wusbhc, dn->src_addr,
+ (struct wusb_dn_hdr *)dn->dn_data,
+ dn->msg_size);
+ dn->status &= ~WHC_DN_STATUS_VALID;
+ processed++;
+ }
+ }
+ return processed;
+}
+
+void whc_dn_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, dn_work);
+ int processed;
+
+ do {
+ processed = process_dn_buf(whc);
+ } while (processed);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/pzl.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/pzl.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,417 @@
+/*
+ * Wireless Host Controller (WHC) periodic schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
+{
+ switch (period) {
+ case 0:
+ whc_qset_set_link_ptr(&whc->pz_list[0], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[2], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[4], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[6], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[8], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[10], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[12], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[14], addr);
+ break;
+ case 1:
+ whc_qset_set_link_ptr(&whc->pz_list[1], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[5], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[9], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[13], addr);
+ break;
+ case 2:
+ whc_qset_set_link_ptr(&whc->pz_list[3], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[11], addr);
+ break;
+ case 3:
+ whc_qset_set_link_ptr(&whc->pz_list[7], addr);
+ break;
+ case 4:
+ whc_qset_set_link_ptr(&whc->pz_list[15], addr);
+ break;
+ }
+}
+
+/*
+ * Return the 'period' to use for this qset. The minimum interval for
+ * the endpoint is used so whatever urbs are submitted the device is
+ * polled often enough.
+ */
+static int qset_get_period(struct whc *whc, struct whc_qset *qset)
+{
+ uint8_t bInterval = qset->ep->desc.bInterval;
+
+ if (bInterval < 6)
+ bInterval = 6;
+ if (bInterval > 10)
+ bInterval = 10;
+ return bInterval - 6;
+}
+
+static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
+{
+ int period;
+
+ period = qset_get_period(whc, qset);
+
+ qset_clear(whc, qset);
+ list_move(&qset->list_node, &whc->periodic_list[period]);
+ qset->in_sw_list = true;
+}
+
+static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->periodic_removed_list);
+ qset->in_hw_list = false;
+ qset->in_sw_list = false;
+}
+
+/**
+ * pzl_process_qset - process any recently inactivated or halted qTDs
+ * in a qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns the schedule updates required.
+ */
+static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+ int t;
+
+ t = qset->td_start;
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ /* A halted qTD always triggers an update
+ because the qset was either removed or
+ reactivated. */
+ update |= WHC_UPDATE_UPDATED;
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ if (!qset->remove)
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * If there are no qTDs in this qset, remove it from the PZL.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ pzl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+
+ return update;
+}
+
+/**
+ * pzl_start - start the periodic schedule
+ * @whc: the WHCI host controller
+ *
+ * The PZL must be valid (e.g., all entries in the list should have
+ * the T bit set).
+ */
+void pzl_start(struct whc *whc)
+{
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
+ 1000, "start PZL");
+}
+
+/**
+ * pzl_stop - stop the periodic schedule
+ * @whc: the WHCI host controller
+ */
+void pzl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, 0,
+ 1000, "stop PZL");
+}
+
+/**
+ * pzl_update - request a PZL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the PZL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void pzl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ long t;
+
+ mutex_lock(&wusbhc->mutex);
+ if (wusbhc->active) {
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ t = wait_event_timeout(
+ whc->periodic_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
+ msecs_to_jiffies(1000));
+ if (t == 0)
+ whc_hw_error(whc, "PZL update timeout");
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static void update_pzl_hw_view(struct whc *whc)
+{
+ struct whc_qset *qset, *t;
+ int period;
+ u64 tmp_qh = 0;
+
+ for (period = 0; period < 5; period++) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
+ tmp_qh = qset->qset_dma;
+ qset->in_hw_list = true;
+ }
+ update_pzl_pointers(whc, period, tmp_qh);
+ }
+}
+
+/**
+ * scan_periodic_work - scan the PZL for qsets to process.
+ *
+ * Process each qset in the PZL in turn and then signal the WHC that
+ * the PZL has been updated.
+ *
+ * Then start, stop or update the periodic schedule as required.
+ */
+void scan_periodic_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, periodic_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+ int period;
+
+ spin_lock_irq(&whc->lock);
+
+ for (period = 4; period >= 0; period--) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ if (!qset->in_hw_list)
+ update |= WHC_UPDATE_ADDED;
+ update |= pzl_process_qset(whc, qset);
+ }
+ }
+
+ if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
+ update_pzl_hw_view(whc);
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
+ pzl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the PZL is updated, complete the removal of any
+ * removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * PZL if it has pending transfers.
+ */
+ spin_lock_irq(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ qset_insert_in_sw_list(whc, qset);
+ queue_work(whc->workqueue, &whc->periodic_work);
+ }
+ }
+ }
+
+ spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the PZL.
+ */
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (err < 0) {
+ spin_unlock_irqrestore(&whc->lock, flags);
+ return err;
+ }
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ if (!qset->in_sw_list && !qset->remove)
+ qset_insert_in_sw_list(whc, qset);
+ } else
+ usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->periodic_work);
+
+ return err;
+}
+
+/**
+ * pzl_urb_dequeue - remove an URB (qset) from the periodic list
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed so the qTDs can be safely
+ * removed.
+ */
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ bool has_qtd = false;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb) {
+ if (std->qtd)
+ has_qtd = true;
+ qset_free_std(whc, std);
+ } else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ if (has_qtd) {
+ pzl_qset_remove(whc, qset);
+ update_pzl_hw_view(whc);
+ wurb->status = status;
+ wurb->is_async = false;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+ } else
+ qset_remove_urb(whc, qset, urb, status);
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * pzl_qset_delete - delete a qset from the PZL
+ */
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->periodic_work);
+ qset_delete(whc, qset);
+}
+
+/**
+ * pzl_init - initialize the periodic zone list
+ * @whc: the WHCI host controller
+ */
+int pzl_init(struct whc *whc)
+{
+ int i;
+
+ whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
+ &whc->pz_list_dma, GFP_KERNEL);
+ if (whc->pz_list == NULL)
+ return -ENOMEM;
+
+ /* Set T bit on all elements in PZL. */
+ for (i = 0; i < 16; i++)
+ whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ return 0;
+}
+
+/**
+ * pzl_clean_up - free PZL resources
+ * @whc: the WHCI host controller
+ *
+ * The PZL is stopped and empty.
+ */
+void pzl_clean_up(struct whc *whc)
+{
+ if (whc->pz_list)
+ dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
+ whc->pz_list_dma);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/qset.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/qset.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,832 @@
+/*
+ * Wireless Host Controller (WHC) qset management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ dma_addr_t dma;
+
+ qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
+ if (qset == NULL)
+ return NULL;
+ memset(qset, 0, sizeof(struct whc_qset));
+
+ qset->qset_dma = dma;
+ qset->whc = whc;
+
+ INIT_LIST_HEAD(&qset->list_node);
+ INIT_LIST_HEAD(&qset->stds);
+
+ return qset;
+}
+
+/**
+ * qset_fill_qh - fill the static endpoint state in a qset's QHead
+ * @qset: the qset whose QH needs initializing with static endpoint
+ * state
+ * @urb: an urb for a transfer to this endpoint
+ */
+static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+ bool is_out;
+ uint8_t phy_rate;
+
+ is_out = usb_pipeout(urb->pipe);
+
+ qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+
+ epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+ if (epcd) {
+ qset->max_seq = epcd->bMaxSequence;
+ qset->max_burst = epcd->bMaxBurst;
+ } else {
+ qset->max_seq = 2;
+ qset->max_burst = 1;
+ }
+
+ /*
+ * Initial PHY rate is 53.3 Mbit/s for control endpoints or
+ * the maximum supported by the device for other endpoints
+ * (unless limited by the user).
+ */
+ if (usb_pipecontrol(urb->pipe))
+ phy_rate = UWB_PHY_RATE_53;
+ else {
+ uint16_t phy_rates;
+
+ phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
+ phy_rate = fls(phy_rates) - 1;
+ if (phy_rate > whc->wusbhc.phy_rate)
+ phy_rate = whc->wusbhc.phy_rate;
+ }
+
+ qset->qh.info1 = cpu_to_le32(
+ QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
+ | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
+ | usb_pipe_to_qh_type(urb->pipe)
+ | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
+ | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
+ );
+ qset->qh.info2 = cpu_to_le32(
+ QH_INFO2_BURST(qset->max_burst)
+ | QH_INFO2_DBP(0)
+ | QH_INFO2_MAX_COUNT(3)
+ | QH_INFO2_MAX_RETRY(3)
+ | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
+ );
+ /* FIXME: where can we obtain these Tx parameters from? Why
+ * doesn't the chip know what Tx power to use? It knows the Rx
+ * strength and can presumably guess the Tx power required
+ * from that? */
+ qset->qh.info3 = cpu_to_le32(
+ QH_INFO3_TX_RATE(phy_rate)
+ | QH_INFO3_TX_PWR(0) /* 0 == max power */
+ );
+
+ qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * qset_clear - clear fields in a qset so it may be reinserted into a
+ * schedule.
+ *
+ * The sequence number and current window are not cleared (see
+ * qset_reset()).
+ */
+void qset_clear(struct whc *whc, struct whc_qset *qset)
+{
+ qset->td_start = qset->td_end = qset->ntds = 0;
+
+ qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
+ qset->qh.err_count = 0;
+ qset->qh.scratch[0] = 0;
+ qset->qh.scratch[1] = 0;
+ qset->qh.scratch[2] = 0;
+
+ memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
+
+ init_completion(&qset->remove_complete);
+}
+
+/**
+ * qset_reset - reset endpoint state in a qset.
+ *
+ * Clears the sequence number and current window. This qset must not
+ * be in the ASL or PZL.
+ */
+void qset_reset(struct whc *whc, struct whc_qset *qset)
+{
+ qset->reset = 0;
+
+ qset->qh.status &= ~QH_STATUS_SEQ_MASK;
+ qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * get_qset - get the qset for an async endpoint
+ *
+ * A new qset is created if one does not already exist.
+ */
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+
+ qset = urb->ep->hcpriv;
+ if (qset == NULL) {
+ qset = qset_alloc(whc, mem_flags);
+ if (qset == NULL)
+ return NULL;
+
+ qset->ep = urb->ep;
+ urb->ep->hcpriv = qset;
+ qset_fill_qh(whc, qset, urb);
+ }
+ return qset;
+}
+
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 0;
+ list_del_init(&qset->list_node);
+ complete(&qset->remove_complete);
+}
+
+/**
+ * qset_add_qtds - add qTDs for an URB to a qset
+ *
+ * Returns true if the list (ASL/PZL) must be updated because (for a
+ * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
+ */
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_std *std;
+ enum whc_update update = 0;
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ struct whc_qtd *qtd;
+ uint32_t status;
+
+ if (qset->ntds >= WHCI_QSET_TD_MAX
+ || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
+ break;
+
+ if (std->qtd)
+ continue; /* already has a qTD */
+
+ qtd = std->qtd = &qset->qtd[qset->td_end];
+
+ /* Fill in setup bytes for control transfers. */
+ if (usb_pipecontrol(std->urb->pipe))
+ memcpy(qtd->setup, std->urb->setup_packet, 8);
+
+ status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
+
+ if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
+ status |= QTD_STS_LAST_PKT;
+
+ /*
+ * For an IN transfer the iAlt field should be set so
+ * the h/w will automatically advance to the next
+ * transfer. However, if there are 8 or more TDs
+ * remaining in this transfer then iAlt cannot be set
+ * as it could point to somewhere in this transfer.
+ */
+ if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
+ int ialt;
+ ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
+ status |= QTD_STS_IALT(ialt);
+ } else if (usb_pipein(std->urb->pipe))
+ qset->pause_after_urb = std->urb;
+
+ if (std->num_pointers)
+ qtd->options = cpu_to_le32(QTD_OPT_IOC);
+ else
+ qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
+ qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
+
+ qtd->status = cpu_to_le32(status);
+
+ if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
+ update = WHC_UPDATE_UPDATED;
+
+ if (++qset->td_end >= WHCI_QSET_TD_MAX)
+ qset->td_end = 0;
+ qset->ntds++;
+ }
+
+ return update;
+}
+
+/**
+ * qset_remove_qtd - remove the first qTD from a qset.
+ *
+ * The qTD might be still active (if it's part of a IN URB that
+ * resulted in a short read) so ensure it's deactivated.
+ */
+static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
+{
+ qset->qtd[qset->td_start].status = 0;
+
+ if (++qset->td_start >= WHCI_QSET_TD_MAX)
+ qset->td_start = 0;
+ qset->ntds--;
+}
+
+static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
+{
+ struct scatterlist *sg;
+ void *bounce;
+ size_t remaining, offset;
+
+ bounce = std->bounce_buf;
+ remaining = std->len;
+
+ sg = std->bounce_sg;
+ offset = std->bounce_offset;
+
+ while (remaining) {
+ size_t len;
+
+ len = min(sg->length - offset, remaining);
+ memcpy(sg_virt(sg) + offset, bounce, len);
+
+ bounce += len;
+ remaining -= len;
+
+ offset += len;
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ }
+
+}
+
+/**
+ * qset_free_std - remove an sTD and free it.
+ * @whc: the WHCI host controller
+ * @std: the sTD to remove and free.
+ */
+void qset_free_std(struct whc *whc, struct whc_std *std)
+{
+ list_del(&std->list_node);
+ if (std->bounce_buf) {
+ bool is_out = usb_pipeout(std->urb->pipe);
+ dma_addr_t dma_addr;
+
+ if (std->num_pointers)
+ dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
+ else
+ dma_addr = std->dma_addr;
+
+ dma_unmap_single(whc->wusbhc.dev, dma_addr,
+ std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!is_out)
+ qset_copy_bounce_to_sg(whc, std);
+ kfree(std->bounce_buf);
+ }
+ if (std->pl_virt) {
+ if (std->dma_addr)
+ dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+ std->num_pointers * sizeof(struct whc_page_list_entry),
+ DMA_TO_DEVICE);
+ kfree(std->pl_virt);
+ std->pl_virt = NULL;
+ }
+ kfree(std);
+}
+
+/**
+ * qset_remove_qtds - remove an URB's qTDs (and sTDs).
+ */
+static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb != urb)
+ break;
+ if (std->qtd != NULL)
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+ }
+}
+
+/**
+ * qset_free_stds - free any remaining sTDs for an URB.
+ */
+static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb)
+ qset_free_std(qset->whc, std);
+ }
+}
+
+static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
+{
+ dma_addr_t dma_addr = std->dma_addr;
+ dma_addr_t sp, ep;
+ size_t pl_len;
+ int p;
+
+ /* Short buffers don't need a page list. */
+ if (std->len <= WHCI_PAGE_SIZE) {
+ std->num_pointers = 0;
+ return 0;
+ }
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + std->len;
+ std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->pl_virt = kmalloc(pl_len, mem_flags);
+ if (std->pl_virt == NULL)
+ return -ENOMEM;
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+
+ for (p = 0; p < std->num_pointers; p++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+ }
+
+ return 0;
+}
+
+/**
+ * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
+ */
+static void urb_dequeue_work(struct work_struct *work)
+{
+ struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
+ struct whc_qset *qset = wurb->qset;
+ struct whc *whc = qset->whc;
+ unsigned long flags;
+
+ if (wurb->is_async == true)
+ asl_update(whc, WUSBCMD_ASYNC_UPDATED
+ | WUSBCMD_ASYNC_SYNCED_DB
+ | WUSBCMD_ASYNC_QSET_RM);
+ else
+ pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
+ | WUSBCMD_PERIODIC_SYNCED_DB
+ | WUSBCMD_PERIODIC_QSET_RM);
+
+ spin_lock_irqsave(&whc->lock, flags);
+ qset_remove_urb(whc, qset, wurb->urb, wurb->status);
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_std *std;
+
+ std = kzalloc(sizeof(struct whc_std), mem_flags);
+ if (std == NULL)
+ return NULL;
+
+ std->urb = urb;
+ std->qtd = NULL;
+
+ INIT_LIST_HEAD(&std->list_node);
+ list_add_tail(&std->list_node, &qset->stds);
+
+ return std;
+}
+
+static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ size_t remaining;
+ struct scatterlist *sg;
+ int i;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ struct whc_page_list_entry *entry;
+ dma_addr_t prev_end = 0;
+ size_t pl_len;
+ int p = 0;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ dma_addr_t dma_addr;
+ size_t dma_remaining;
+ dma_addr_t sp, ep;
+ int num_pointers;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ dma_addr = sg_dma_address(sg);
+ dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
+
+ while (dma_remaining) {
+ size_t dma_len;
+
+ /*
+ * We can use the previous std (if it exists) provided that:
+ * - the previous one ended on a page boundary.
+ * - the current one begins on a page boundary.
+ * - the previous one isn't full.
+ *
+ * If a new std is needed but the previous one
+ * was not a whole number of packets then this
+ * sg list cannot be mapped onto multiple
+ * qTDs. Return an error and let the caller
+ * sort it out.
+ */
+ if (!std
+ || (prev_end & (WHCI_PAGE_SIZE-1))
+ || (dma_addr & (WHCI_PAGE_SIZE-1))
+ || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
+ if (std && std->len % qset->max_packet != 0)
+ return -EINVAL;
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL) {
+ return -ENOMEM;
+ }
+ ntds++;
+ p = 0;
+ }
+
+ dma_len = dma_remaining;
+
+ /*
+ * If the remainder of this element doesn't
+ * fit in a single qTD, limit the qTD to a
+ * whole number of packets. This allows the
+ * remainder to go into the next qTD.
+ */
+ if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
+ dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
+ * qset->max_packet - std->len;
+ }
+
+ std->len += dma_len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + dma_len;
+ num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+ std->num_pointers += num_pointers;
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+
+ std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+ if (std->pl_virt == NULL) {
+ return -ENOMEM;
+ }
+
+ for (;p < std->num_pointers; p++, entry++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+ }
+
+ prev_end = dma_addr = ep;
+ dma_remaining -= dma_len;
+ remaining -= dma_len;
+ }
+ }
+
+ /* Now the number of stds is know, go back and fill in
+ std->ntds_remaining. */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining == -1) {
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->ntds_remaining = ntds--;
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
+ pl_len, DMA_TO_DEVICE);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
+ *
+ * If the URB contains an sg list whose elements cannot be directly
+ * mapped to qTDs then the data must be transferred via bounce
+ * buffers.
+ */
+static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ bool is_out = usb_pipeout(urb->pipe);
+ size_t max_std_len;
+ size_t remaining;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ void *bounce = NULL;
+ struct scatterlist *sg;
+ int i;
+
+ /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
+ max_std_len = qset->max_burst * qset->max_packet;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ size_t len;
+ size_t sg_remaining;
+ void *orig;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ sg_remaining = min_t(size_t, remaining, sg->length);
+ orig = sg_virt(sg);
+
+ while (sg_remaining) {
+ if (!std || std->len == max_std_len) {
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ return -ENOMEM;
+ std->bounce_buf = kmalloc(max_std_len, mem_flags);
+ if (std->bounce_buf == NULL)
+ return -ENOMEM;
+ std->bounce_sg = sg;
+ std->bounce_offset = orig - sg_virt(sg);
+ bounce = std->bounce_buf;
+ ntds++;
+ }
+
+ len = min(sg_remaining, max_std_len - std->len);
+
+ if (is_out)
+ memcpy(bounce, orig, len);
+
+ std->len += len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ bounce += len;
+ orig += len;
+ sg_remaining -= len;
+ remaining -= len;
+ }
+ }
+
+ /*
+ * For each of the new sTDs, map the bounce buffers, create
+ * page lists (if necessary), and fill in std->ntds_remaining.
+ */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining != -1)
+ continue;
+
+ std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
+ is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ return -ENOMEM;
+
+ std->ntds_remaining = ntds--;
+ }
+
+ return 0;
+}
+
+/**
+ * qset_add_urb - add an urb to the qset's queue.
+ *
+ * The URB is chopped into sTDs, one for each qTD that will required.
+ * At least one qTD (and sTD) is required even if the transfer has no
+ * data (e.g., for some control transfers).
+ */
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_urb *wurb;
+ int remaining = urb->transfer_buffer_length;
+ u64 transfer_dma = urb->transfer_dma;
+ int ntds_remaining;
+ int ret;
+
+ wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
+ if (wurb == NULL)
+ goto err_no_mem;
+ urb->hcpriv = wurb;
+ wurb->qset = qset;
+ wurb->urb = urb;
+ INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+
+ if (urb->num_sgs) {
+ ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
+ if (ret == -EINVAL) {
+ qset_free_stds(qset, urb);
+ ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
+ }
+ if (ret < 0)
+ goto err_no_mem;
+ return 0;
+ }
+
+ ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+ if (ntds_remaining == 0)
+ ntds_remaining = 1;
+
+ while (ntds_remaining) {
+ struct whc_std *std;
+ size_t std_len;
+
+ std_len = remaining;
+ if (std_len > QTD_MAX_XFER_SIZE)
+ std_len = QTD_MAX_XFER_SIZE;
+
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ goto err_no_mem;
+
+ std->dma_addr = transfer_dma;
+ std->len = std_len;
+ std->ntds_remaining = ntds_remaining;
+
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ goto err_no_mem;
+
+ ntds_remaining--;
+ remaining -= std_len;
+ transfer_dma += std_len;
+ }
+
+ return 0;
+
+err_no_mem:
+ qset_free_stds(qset, urb);
+ return -ENOMEM;
+}
+
+/**
+ * qset_remove_urb - remove an URB from the urb queue.
+ *
+ * The URB is returned to the USB subsystem.
+ */
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct whc_urb *wurb = urb->hcpriv;
+
+ usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
+ /* Drop the lock as urb->complete() may enqueue another urb. */
+ spin_unlock(&whc->lock);
+ wusbhc_giveback_urb(wusbhc, urb, status);
+ spin_lock(&whc->lock);
+
+ kfree(wurb);
+}
+
+/**
+ * get_urb_status_from_qtd - get the completed urb status from qTD status
+ * @urb: completed urb
+ * @status: qTD status
+ */
+static int get_urb_status_from_qtd(struct urb *urb, u32 status)
+{
+ if (status & QTD_STS_HALTED) {
+ if (status & QTD_STS_DBE)
+ return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
+ else if (status & QTD_STS_BABBLE)
+ return -EOVERFLOW;
+ else if (status & QTD_STS_RCE)
+ return -ETIME;
+ return -EPIPE;
+ }
+ if (usb_pipein(urb->pipe)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && urb->actual_length < urb->transfer_buffer_length)
+ return -EREMOTEIO;
+ return 0;
+}
+
+/**
+ * process_inactive_qtd - process an inactive (but not halted) qTD.
+ *
+ * Update the urb with the transfer bytes from the qTD, if the urb is
+ * completely transfered or (in the case of an IN only) the LPF is
+ * set, then the transfer is complete and the urb should be returned
+ * to the system.
+ */
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ uint32_t status;
+ bool complete;
+
+ status = le32_to_cpu(qtd->status);
+
+ urb->actual_length += std->len - QTD_STS_TO_LEN(status);
+
+ if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
+ complete = true;
+ else
+ complete = whc_std_last(std);
+
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+
+ /*
+ * Transfers for this URB are complete? Then return it to the
+ * USB subsystem.
+ */
+ if (complete) {
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
+
+ /*
+ * If iAlt isn't valid then the hardware didn't
+ * advance iCur. Adjust the start and end pointers to
+ * match iCur.
+ */
+ if (!(status & QTD_STS_IALT_VALID))
+ qset->td_start = qset->td_end
+ = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
+ qset->pause_after_urb = NULL;
+ }
+}
+
+/**
+ * process_halted_qtd - process a qset with a halted qtd
+ *
+ * Remove all the qTDs for the failed URB and return the failed URB to
+ * the USB subsystem. Then remove all other qTDs so the qset can be
+ * removed.
+ *
+ * FIXME: this is the point where rate adaptation can be done. If a
+ * transfer failed because it exceeded the maximum number of retries
+ * then it could be reactivated with a slower rate without having to
+ * remove the qset.
+ */
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ int urb_status;
+
+ urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
+
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, urb_status);
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (qset->ntds == 0)
+ break;
+ qset_remove_qtd(whc, qset);
+ std->qtd = NULL;
+ }
+
+ qset->remove = 1;
+}
+
+void qset_free(struct whc *whc, struct whc_qset *qset)
+{
+ dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
+}
+
+/**
+ * qset_delete - wait for a qset to be unused, then free it.
+ */
+void qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ wait_for_completion(&qset->remove_complete);
+ qset_free(whc, qset);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/whcd.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/whcd.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,215 @@
+/*
+ * Wireless Host Controller (WHC) private header.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __WHCD_H
+#define __WHCD_H
+
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+#include <linux/workqueue.h>
+
+#include "whci-hc.h"
+
+/* Generic command timeout. */
+#define WHC_GENCMD_TIMEOUT_MS 100
+
+struct whc_dbg;
+
+struct whc {
+ struct wusbhc wusbhc;
+ struct umc_dev *umc;
+
+ resource_size_t base_phys;
+ void __iomem *base;
+ int irq;
+
+ u8 n_devices;
+ u8 n_keys;
+ u8 n_mmc_ies;
+
+ u64 *pz_list;
+ struct dn_buf_entry *dn_buf;
+ struct di_buf_entry *di_buf;
+ dma_addr_t pz_list_dma;
+ dma_addr_t dn_buf_dma;
+ dma_addr_t di_buf_dma;
+
+ spinlock_t lock;
+ struct mutex mutex;
+
+ void * gen_cmd_buf;
+ dma_addr_t gen_cmd_buf_dma;
+ wait_queue_head_t cmd_wq;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct dn_work;
+
+ struct dma_pool *qset_pool;
+
+ struct list_head async_list;
+ struct list_head async_removed_list;
+ wait_queue_head_t async_list_wq;
+ struct work_struct async_work;
+
+ struct list_head periodic_list[5];
+ struct list_head periodic_removed_list;
+ wait_queue_head_t periodic_list_wq;
+ struct work_struct periodic_work;
+
+ struct whc_dbg *dbg;
+};
+
+#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
+
+/**
+ * struct whc_std - a software TD.
+ * @urb: the URB this sTD is for.
+ * @offset: start of the URB's data for this TD.
+ * @len: the length of data in the associated TD.
+ * @ntds_remaining: number of TDs (starting from this one) in this transfer.
+ *
+ * @bounce_buf: a bounce buffer if the std was from an urb with a sg
+ * list that could not be mapped to qTDs directly.
+ * @bounce_sg: the first scatterlist element bounce_buf is for.
+ * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
+ *
+ * Queued URBs may require more TDs than are available in a qset so we
+ * use a list of these "software TDs" (sTDs) to hold per-TD data.
+ */
+struct whc_std {
+ struct urb *urb;
+ size_t len;
+ int ntds_remaining;
+ struct whc_qtd *qtd;
+
+ struct list_head list_node;
+ int num_pointers;
+ dma_addr_t dma_addr;
+ struct whc_page_list_entry *pl_virt;
+
+ void *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned bounce_offset;
+};
+
+/**
+ * struct whc_urb - per URB host controller structure.
+ * @urb: the URB this struct is for.
+ * @qset: the qset associated to the URB.
+ * @dequeue_work: the work to remove the URB when dequeued.
+ * @is_async: the URB belongs to async sheduler or not.
+ * @status: the status to be returned when calling wusbhc_giveback_urb.
+ */
+struct whc_urb {
+ struct urb *urb;
+ struct whc_qset *qset;
+ struct work_struct dequeue_work;
+ bool is_async;
+ int status;
+};
+
+/**
+ * whc_std_last - is this sTD the URB's last?
+ * @std: the sTD to check.
+ */
+static inline bool whc_std_last(struct whc_std *std)
+{
+ return std->ntds_remaining <= 1;
+}
+
+enum whc_update {
+ WHC_UPDATE_ADDED = 0x01,
+ WHC_UPDATE_REMOVED = 0x02,
+ WHC_UPDATE_UPDATED = 0x04,
+};
+
+/* init.c */
+int whc_init(struct whc *whc);
+void whc_clean_up(struct whc *whc);
+
+/* hw.c */
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
+void whc_hw_error(struct whc *whc, const char *reason);
+
+/* wusb.c */
+int whc_wusbhc_start(struct wusbhc *wusbhc);
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie);
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size);
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size);
+int whc_set_cluster_id(struct whc *whc, u8 bcid);
+
+/* int.c */
+irqreturn_t whc_int_handler(struct usb_hcd *hcd);
+void whc_dn_work(struct work_struct *work);
+
+/* asl.c */
+void asl_start(struct whc *whc);
+void asl_stop(struct whc *whc);
+int asl_init(struct whc *whc);
+void asl_clean_up(struct whc *whc);
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_async_work(struct work_struct *work);
+
+/* pzl.c */
+int pzl_init(struct whc *whc);
+void pzl_clean_up(struct whc *whc);
+void pzl_start(struct whc *whc);
+void pzl_stop(struct whc *whc);
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_periodic_work(struct work_struct *work);
+
+/* qset.c */
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
+void qset_free(struct whc *whc, struct whc_qset *qset);
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+void qset_delete(struct whc *whc, struct whc_qset *qset);
+void qset_clear(struct whc *whc, struct whc_qset *qset);
+void qset_reset(struct whc *whc, struct whc_qset *qset);
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags);
+void qset_free_std(struct whc *whc, struct whc_std *std);
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status);
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
+void pzl_update(struct whc *whc, uint32_t wusbcmd);
+void asl_update(struct whc *whc, uint32_t wusbcmd);
+
+/* debug.c */
+void whc_dbg_init(struct whc *whc);
+void whc_dbg_clean_up(struct whc *whc);
+
+#endif /* #ifndef __WHCD_H */
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/whci-hc.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/whci-hc.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,414 @@
+/*
+ * Wireless Host Controller (WHC) data structures.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef _WHCI_WHCI_HC_H
+#define _WHCI_WHCI_HC_H
+
+#include <linux/list.h>
+
+/**
+ * WHCI_PAGE_SIZE - page size use by WHCI
+ *
+ * WHCI assumes that host system uses pages of 4096 octets.
+ */
+#define WHCI_PAGE_SIZE 4096
+
+
+/**
+ * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
+ * qtd.
+ *
+ * This is 2^20 - 1.
+ */
+#define QTD_MAX_XFER_SIZE 1048575
+
+
+/**
+ * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
+ *
+ * This describes the data for a bulk, control or interrupt transfer.
+ *
+ * [WHCI] section 3.2.4
+ */
+struct whc_qtd {
+ __le32 status; /*< remaining transfer len and transfer status */
+ __le32 options;
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
+ __u8 setup[8]; /*< setup data for control transfers */
+} __attribute__((packed));
+
+#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
+#define QTD_STS_HALTED (1 << 30) /* transfer halted */
+#define QTD_STS_DBE (1 << 29) /* data buffer error */
+#define QTD_STS_BABBLE (1 << 28) /* babble detected */
+#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
+#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
+#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
+#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
+#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
+#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
+#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
+
+#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
+#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
+
+/**
+ * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
+ *
+ * This describes the data and other parameters for an isochronous
+ * transfer.
+ *
+ * [WHCI] section 3.2.5
+ */
+struct whc_itd {
+ __le16 presentation_time; /*< presentation time for OUT transfers */
+ __u8 num_segments; /*< number of data segments in segment list */
+ __u8 status; /*< command execution status */
+ __le32 options; /*< misc transfer options */
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list */
+ __le64 seg_list_ptr; /*< physical pointer to segment list */
+} __attribute__((packed));
+
+#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
+#define ITD_STS_DBE (1 << 5) /* data buffer error */
+#define ITD_STS_BABBLE (1 << 4) /* babble detected */
+#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
+
+#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
+#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
+
+/**
+ * Page list entry.
+ *
+ * A TD's page list must contain sufficient page list entries for the
+ * total data length in the TD.
+ *
+ * [WHCI] section 3.2.4.3
+ */
+struct whc_page_list_entry {
+ __le64 buf_ptr; /*< physical pointer to buffer */
+} __attribute__((packed));
+
+/**
+ * struct whc_seg_list_entry - Segment list entry.
+ *
+ * Describes a portion of the data buffer described in the containing
+ * qTD's page list.
+ *
+ * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
+ * + qtd->seg_list_ptr[seg].offset;
+ *
+ * Segments can't cross page boundries.
+ *
+ * [WHCI] section 3.2.5.5
+ */
+struct whc_seg_list_entry {
+ __le16 len; /*< segment length */
+ __u8 idx; /*< index into page list */
+ __u8 status; /*< segment status */
+ __le16 offset; /*< 12 bit offset into page */
+} __attribute__((packed));
+
+/**
+ * struct whc_qhead - endpoint and status information for a qset.
+ *
+ * [WHCI] section 3.2.6
+ */
+struct whc_qhead {
+ __le64 link; /*< next qset in list */
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le16 status;
+ __le16 err_count; /*< transaction error count */
+ __le32 cur_window;
+ __le32 scratch[3]; /*< h/w scratch area */
+ union {
+ struct whc_qtd qtd;
+ struct whc_itd itd;
+ } overlay;
+} __attribute__((packed));
+
+#define QH_LINK_PTR_MASK (~0x03Full)
+#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
+#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
+#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
+#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
+
+#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
+#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
+#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
+#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
+#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
+#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
+#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
+#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
+#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
+#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
+#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
+
+#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
+#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
+#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
+#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
+#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
+#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
+#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
+#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
+
+#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
+#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
+
+#define QH_STATUS_FLOW_CTRL (1 << 15)
+#define QH_STATUS_ICUR(i) ((i) << 5)
+#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
+#define QH_STATUS_SEQ_MASK 0x1f
+
+/**
+ * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
+ *
+ * Returns the QH type field for a USB core pipe type.
+ */
+static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
+{
+ static const unsigned type[] = {
+ [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
+ [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
+ [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
+ [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
+ };
+ return type[usb_pipetype(pipe)];
+}
+
+/**
+ * Maxiumum number of TDs in a qset.
+ */
+#define WHCI_QSET_TD_MAX 8
+
+/**
+ * struct whc_qset - WUSB data transfers to a specific endpoint
+ * @qh: the QHead of this qset
+ * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
+ * transfers)
+ * @itd: up to 8 iTDs (for qsets for isochronous transfers)
+ * @qset_dma: DMA address for this qset
+ * @whc: WHCI HC this qset is for
+ * @ep: endpoint
+ * @stds: list of sTDs queued to this qset
+ * @ntds: number of qTDs queued (not necessarily the same as nTDs
+ * field in the QH)
+ * @td_start: index of the first qTD in the list
+ * @td_end: index of next free qTD in the list (provided
+ * ntds < WHCI_QSET_TD_MAX)
+ *
+ * Queue Sets (qsets) are added to the asynchronous schedule list
+ * (ASL) or the periodic zone list (PZL).
+ *
+ * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
+ * Each TD may refer to at most 1 MiB of data. If a single transfer
+ * has > 8MiB of data, TDs can be reused as they are completed since
+ * the TD list is used as a circular buffer. Similarly, several
+ * (smaller) transfers may be queued in a qset.
+ *
+ * WHCI controllers may cache portions of the qsets in the ASL and
+ * PZL, requiring the WHCD to inform the WHC that the lists have been
+ * updated (fields changed or qsets inserted or removed). For safe
+ * insertion and removal of qsets from the lists the schedule must be
+ * stopped to avoid races in updating the QH link pointers.
+ *
+ * Since the HC is free to execute qsets in any order, all transfers
+ * to an endpoint should use the same qset to ensure transfers are
+ * executed in the order they're submitted.
+ *
+ * [WHCI] section 3.2.3
+ */
+struct whc_qset {
+ struct whc_qhead qh;
+ union {
+ struct whc_qtd qtd[WHCI_QSET_TD_MAX];
+ struct whc_itd itd[WHCI_QSET_TD_MAX];
+ };
+
+ /* private data for WHCD */
+ dma_addr_t qset_dma;
+ struct whc *whc;
+ struct usb_host_endpoint *ep;
+ struct list_head stds;
+ int ntds;
+ int td_start;
+ int td_end;
+ struct list_head list_node;
+ unsigned in_sw_list:1;
+ unsigned in_hw_list:1;
+ unsigned remove:1;
+ unsigned reset:1;
+ struct urb *pause_after_urb;
+ struct completion remove_complete;
+ uint16_t max_packet;
+ uint8_t max_burst;
+ uint8_t max_seq;
+};
+
+static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
+{
+ if (target)
+ *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
+ else
+ *ptr = QH_LINK_T;
+}
+
+/**
+ * struct di_buf_entry - Device Information (DI) buffer entry.
+ *
+ * There's one of these per connected device.
+ */
+struct di_buf_entry {
+ __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
+ __le32 addr_sec_info; /*< addressing and security info */
+ __le32 reserved[7];
+} __attribute__((packed));
+
+#define WHC_DI_SECURE (1 << 31)
+#define WHC_DI_DISABLE (1 << 30)
+#define WHC_DI_KEY_IDX(k) ((k) << 8)
+#define WHC_DI_KEY_IDX_MASK 0x0000ff00
+#define WHC_DI_DEV_ADDR(a) ((a) << 0)
+#define WHC_DI_DEV_ADDR_MASK 0x000000ff
+
+/**
+ * struct dn_buf_entry - Device Notification (DN) buffer entry.
+ *
+ * [WHCI] section 3.2.8
+ */
+struct dn_buf_entry {
+ __u8 msg_size; /*< number of octets of valid DN data */
+ __u8 reserved1;
+ __u8 src_addr; /*< source address */
+ __u8 status; /*< buffer entry status */
+ __le32 tkid; /*< TKID for source device, valid if secure bit is set */
+ __u8 dn_data[56]; /*< up to 56 octets of DN data */
+} __attribute__((packed));
+
+#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
+#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
+
+#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
+
+/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
+ data. [WHCI] section 2.4.7. */
+#define WHC_GEN_CMD_DATA_LEN 256
+
+/*
+ * HC registers.
+ *
+ * [WHCI] section 2.4
+ */
+
+#define WHCIVERSION 0x00
+
+#define WHCSPARAMS 0x04
+# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
+# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
+# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
+
+#define WUSBCMD 0x08
+# define WUSBCMD_BCID(b) ((b) << 16)
+# define WUSBCMD_BCID_MASK (0xff << 16)
+# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
+# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
+# define WUSBCMD_WUSBSI(s) ((s) << 8)
+# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
+# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
+# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
+# define WUSBCMD_ASYNC_UPDATED (1 << 5)
+# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
+# define WUSBCMD_ASYNC_EN (1 << 3)
+# define WUSBCMD_PERIODIC_EN (1 << 2)
+# define WUSBCMD_WHCRESET (1 << 1)
+# define WUSBCMD_RUN (1 << 0)
+
+#define WUSBSTS 0x0c
+# define WUSBSTS_ASYNC_SCHED (1 << 15)
+# define WUSBSTS_PERIODIC_SCHED (1 << 14)
+# define WUSBSTS_DNTS_SCHED (1 << 13)
+# define WUSBSTS_HCHALTED (1 << 12)
+# define WUSBSTS_GEN_CMD_DONE (1 << 9)
+# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
+# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBSTS_HOST_ERR (1 << 5)
+# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBSTS_DNTS_INT (1 << 2)
+# define WUSBSTS_ERR_INT (1 << 1)
+# define WUSBSTS_INT (1 << 0)
+# define WUSBSTS_INT_MASK 0x3ff
+
+#define WUSBINTR 0x10
+# define WUSBINTR_GEN_CMD_DONE (1 << 9)
+# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
+# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBINTR_HOST_ERR (1 << 5)
+# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBINTR_DNTS_INT (1 << 2)
+# define WUSBINTR_ERR_INT (1 << 1)
+# define WUSBINTR_INT (1 << 0)
+# define WUSBINTR_ALL 0x3ff
+
+#define WUSBGENCMDSTS 0x14
+# define WUSBGENCMDSTS_ACTIVE (1 << 31)
+# define WUSBGENCMDSTS_ERROR (1 << 24)
+# define WUSBGENCMDSTS_IOC (1 << 23)
+# define WUSBGENCMDSTS_MMCIE_ADD 0x01
+# define WUSBGENCMDSTS_MMCIE_RM 0x02
+# define WUSBGENCMDSTS_SET_MAS 0x03
+# define WUSBGENCMDSTS_CHAN_STOP 0x04
+# define WUSBGENCMDSTS_RWP_EN 0x05
+
+#define WUSBGENCMDPARAMS 0x18
+#define WUSBGENADDR 0x20
+#define WUSBASYNCLISTADDR 0x28
+#define WUSBDNTSBUFADDR 0x30
+#define WUSBDEVICEINFOADDR 0x38
+
+#define WUSBSETSECKEYCMD 0x40
+# define WUSBSETSECKEYCMD_SET (1 << 31)
+# define WUSBSETSECKEYCMD_ERASE (1 << 30)
+# define WUSBSETSECKEYCMD_GTK (1 << 8)
+# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
+
+#define WUSBTKID 0x44
+#define WUSBSECKEY 0x48
+#define WUSBPERIODICLISTBASE 0x58
+#define WUSBMASINDEX 0x60
+
+#define WUSBDNTSCTRL 0x64
+# define WUSBDNTSCTRL_ACTIVE (1 << 31)
+# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
+# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
+
+#define WUSBTIME 0x68
+# define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff
+
+#define WUSBBPST 0x6c
+#define WUSBDIBUPDATED 0x70
+
+#endif /* #ifndef _WHCI_WHCI_HC_H */
Index: linux-3.18.21/drivers/usb/host/mtk_test/whci/wusb.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/whci/wusb.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,222 @@
+/*
+ * Wireless Host Controller (WHC) WUSB operations.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static int whc_update_di(struct whc *whc, int idx)
+{
+ int offset = idx / 32;
+ u32 bit = 1 << (idx % 32);
+
+ le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
+
+ return whci_wait_for(&whc->umc->dev,
+ whc->base + WUSBDIBUPDATED + offset, bit, 0,
+ 100, "DI update");
+}
+
+/*
+ * WHCI starts MMCs based on there being a valid GTK so these need
+ * only start/stop the asynchronous and periodic schedules and send a
+ * channel stop command.
+ */
+
+int whc_wusbhc_start(struct wusbhc *wusbhc)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ asl_start(whc);
+ pzl_start(whc);
+
+ return 0;
+}
+
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 stop_time, now_time;
+ int ret;
+
+ pzl_stop(whc);
+ asl_stop(whc);
+
+ now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK;
+ stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff;
+ ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0);
+ if (ret == 0)
+ msleep(delay);
+}
+
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = (interval << 24)
+ | (repeat_cnt << 16)
+ | (wuie->bLength << 8)
+ | handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
+}
+
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
+}
+
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (stream_index >= 0)
+ whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
+}
+
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int idx = wusb_dev->port_idx;
+ struct di_buf_entry *di = &whc->di_buf[idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
+ di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
+ di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
+
+ ret = whc_update_di(whc, idx);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+/*
+ * Set the number of Device Notification Time Slots (DNTS) and enable
+ * device notifications.
+ */
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 dntsctrl;
+
+ dntsctrl = WUSBDNTSCTRL_ACTIVE
+ | WUSBDNTSCTRL_INTERVAL(interval)
+ | WUSBDNTSCTRL_SLOTS(slots);
+
+ le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
+
+ return 0;
+}
+
+static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
+ const void *key, size_t key_size, bool is_gtk)
+{
+ uint32_t setkeycmd;
+ uint32_t seckey[4];
+ int i;
+ int ret;
+
+ memcpy(seckey, key, key_size);
+ setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
+ if (is_gtk)
+ setkeycmd |= WUSBSETSECKEYCMD_GTK;
+
+ le_writel(tkid, whc->base + WUSBTKID);
+ for (i = 0; i < 4; i++)
+ le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
+ le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
+
+ ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
+ WUSBSETSECKEYCMD_SET, 0, 100, "set key");
+
+ return ret;
+}
+
+/**
+ * whc_set_ptk - set the PTK to use for a device.
+ *
+ * The index into the key table for this PTK is the same as the
+ * device's port index.
+ */
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct di_buf_entry *di = &whc->di_buf[port_idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ if (ptk) {
+ ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
+ if (ret)
+ goto out;
+
+ di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
+ di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
+ } else
+ di->addr_sec_info &= ~WHC_DI_SECURE;
+
+ ret = whc_update_di(whc, port_idx);
+out:
+ mutex_unlock(&whc->mutex);
+ return ret;
+}
+
+/**
+ * whc_set_gtk - set the GTK for subsequent broadcast packets
+ *
+ * The GTK is stored in the last entry in the key table (the previous
+ * N_DEVICES entries are for the per-device PTKs).
+ */
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+int whc_set_cluster_id(struct whc *whc, u8 bcid)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
+ return 0;
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-dbg.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-dbg.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,770 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "xhci.h"
+
+#define XHCI_INIT_VALUE 0x0
+
+/* Add verbose debugging later, just print everything for now */
+
+void xhci_dbg_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+ xhci->cap_regs);
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+ &xhci->cap_regs->hc_capbase, temp);
+ xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+#if 0
+ xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+#endif
+
+ xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+ &xhci->cap_regs->run_regs_off,
+ (unsigned int) temp & RTSOFF_MASK);
+ xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+ xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+}
+
+static void xhci_print_cap_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
+ (unsigned int) HC_LENGTH(temp));
+ xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
+ (unsigned int) HC_VERSION(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Max device slots: %u\n",
+ (unsigned int) HCS_MAX_SLOTS(temp));
+ xhci_dbg(xhci, " Max interrupters: %u\n",
+ (unsigned int) HCS_MAX_INTRS(temp));
+ xhci_dbg(xhci, " Max ports: %u\n",
+ (unsigned int) HCS_MAX_PORTS(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
+ (unsigned int) HCS_IST(temp));
+ xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
+ (unsigned int) HCS_ERST_MAX(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
+ (unsigned int) temp);
+ xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
+ (unsigned int) HCS_U1_LATENCY(temp));
+ xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
+ (unsigned int) HCS_U2_LATENCY(temp));
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
+ xhci_dbg(xhci, " HC generates %s bit addresses\n",
+ HCC_64BIT_ADDR(temp) ? "64" : "32");
+ /* FIXME */
+ xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
+
+ temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+ xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+}
+
+static void xhci_print_command_reg(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & CMD_RUN) ? "running" : "being stopped");
+ xhci_dbg(xhci, " HC has %sfinished hard reset\n",
+ (temp & CMD_RESET) ? "not " : "");
+ xhci_dbg(xhci, " Event Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " Host System Error Interrupts %s\n",
+ (temp & CMD_EIE) ? "enabled " : "disabled");
+ xhci_dbg(xhci, " HC has %sfinished light reset\n",
+ (temp & CMD_LRESET) ? "not " : "");
+}
+
+static void xhci_print_status(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
+ xhci_dbg(xhci, " Event ring is %sempty\n",
+ (temp & STS_EINT) ? "not " : "");
+ xhci_dbg(xhci, " %sHost System Error\n",
+ (temp & STS_FATAL) ? "WARNING: " : "No ");
+ xhci_dbg(xhci, " HC is %s\n",
+ (temp & STS_HALT) ? "halted" : "running");
+}
+
+static void xhci_print_op_regs(struct xhci_hcd *xhci)
+{
+ xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+ xhci_print_command_reg(xhci);
+ xhci_print_status(xhci);
+}
+
+static void xhci_print_ports(struct xhci_hcd *xhci)
+{
+ u32 __iomem *addr;
+ int i, j;
+ int ports;
+ char *names[NUM_PORT_REGS] = {
+ "status",
+ "power",
+ "link",
+ "reserved",
+ };
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ addr = &xhci->op_regs->port_status_base;
+ for (i = 0; i < ports; i++) {
+ for (j = 0; j < NUM_PORT_REGS; ++j) {
+ xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+ addr, names[j],
+ (unsigned int) xhci_readl(xhci, addr));
+ addr++;
+ }
+ }
+}
+
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+{
+ void *addr;
+ u32 temp;
+ u64 temp_64;
+
+ addr = &ir_set->irq_pending;
+ temp = xhci_readl(xhci, addr);
+ if (temp == XHCI_INIT_VALUE)
+ return;
+
+ xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
+
+ xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->irq_control;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->erst_size;
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
+ (unsigned int)temp);
+
+ addr = &ir_set->rsvd;
+ temp = xhci_readl(xhci, addr);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
+ addr, (unsigned int)temp);
+
+ addr = &ir_set->erst_base;
+ temp_64 = xhci_read_64(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
+ addr, temp_64);
+
+ addr = &ir_set->erst_dequeue;
+ temp_64 = xhci_read_64(xhci, addr);
+ xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
+ addr, temp_64);
+}
+
+void xhci_print_run_regs(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int i;
+
+ xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+ temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
+ &xhci->run_regs->microframe_index,
+ (unsigned int) temp);
+ for (i = 0; i < 7; ++i) {
+ temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+ if (temp != XHCI_INIT_VALUE)
+ xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
+ &xhci->run_regs->rsvd[i],
+ i, (unsigned int) temp);
+ }
+}
+
+void xhci_print_registers(struct xhci_hcd *xhci)
+{
+ xhci_print_cap_regs(xhci);
+ xhci_print_op_regs(xhci);
+ xhci_print_ports(xhci);
+}
+
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+ i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+ u64 address;
+ u32 type = le32_to_cpu(xhci_readl(xhci, &trb->link.control)) & TRB_TYPE_BITMASK;
+
+ switch (type) {
+ case TRB_TYPE(TRB_LINK):
+ xhci_dbg(xhci, "Link TRB:\n");
+ xhci_print_trb_offsets(xhci, trb);
+
+ address = le64_to_cpu(trb->link.segment_ptr);
+ xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+ xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+ GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
+ xhci_dbg(xhci, "Cycle bit = %u\n",
+ (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE));
+ xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+ (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE));
+ xhci_dbg(xhci, "No Snoop bit = %u\n",
+ (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP));
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ address = le64_to_cpu(trb->trans_event.buffer);
+ /*
+ * FIXME: look at flags to figure out if it's an address or if
+ * the data is directly in the buffer field.
+ */
+ xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+ break;
+ case TRB_TYPE(TRB_COMPLETION):
+ address = le64_to_cpu(trb->event_cmd.cmd_trb);
+ xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+ xhci_dbg(xhci, "Completion status = %u\n",
+ GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
+ xhci_dbg(xhci, "Flags = 0x%x\n", le32_to_cpu(trb->event_cmd.flags));
+ break;
+ default:
+ xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+ (unsigned int) type>>10);
+ xhci_print_trb_offsets(xhci, trb);
+ break;
+ }
+}
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns? As long as we don't
+ * write, that should be fine... We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ int i;
+ u32 addr = (u32) seg->dma;
+ union xhci_trb *trb = seg->trbs;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+ trb = &seg->trbs[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+ lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ (unsigned int) le32_to_cpu(trb->link.intr_target),
+ (unsigned int) le32_to_cpu(trb->link.control));
+ addr += sizeof(*trb);
+ }
+}
+
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+ ring->dequeue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
+ ring->dequeue));
+ xhci_dbg(xhci, "Ring deq updated %u times\n",
+ ring->deq_updates);
+ xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+ ring->enqueue,
+ (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue));
+ xhci_dbg(xhci, "Ring enq updated %u times\n",
+ ring->enq_updates);
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring. Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ /* FIXME: Throw an error if any segment doesn't have a Link TRB */
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg = ring->first_seg;
+ xhci_debug_segment(xhci, first_seg);
+
+ if (!ring->enq_updates && !ring->deq_updates) {
+ xhci_dbg(xhci, " Ring has not been updated\n");
+ return;
+ }
+ for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+ xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep)
+{
+ int i;
+ struct xhci_ring *ring;
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ for (i = 1; i < ep->stream_info->num_streams; i++) {
+ ring = ep->stream_info->stream_rings[i];
+ xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
+ slot_id, ep_index, i);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
+ slot_id, ep_index);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+ u32 addr = (u32) erst->erst_dma_addr;
+ int i;
+ struct xhci_erst_entry *entry;
+
+ for (i = 0; i < erst->num_entries; ++i) {
+ entry = &erst->entries[i];
+ xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+ (unsigned int) addr,
+ lower_32_bits(le64_to_cpu(entry->seg_addr)),
+ upper_32_bits(le64_to_cpu(entry->seg_addr)),
+ (unsigned int) le32_to_cpu(entry->seg_size),
+ (unsigned int) le32_to_cpu(entry->rsvd));
+ addr += sizeof(*entry);
+ }
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+ u64 val;
+
+ val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
+ lower_32_bits(val));
+ xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
+ upper_32_bits(val));
+}
+
+/* Print the last 32 bytes for 64-byte contexts */
+static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
+{
+ int i;
+ for (i = 0; i < 4; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx "
+ "(dma) %#08llx - rsvd64[%d]\n",
+ &ctx[4 + i], (unsigned long long)dma,
+ ctx[4 + i], i);
+ dma += 8;
+ }
+}
+
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ return GET_SLOT_STATE(slot_ctx->dev_state);
+#if 0
+ switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
+ case 0:
+ return "enabled/disabled";
+ case 1:
+ return "default";
+ case 2:
+ return "addressed";
+ case 3:
+ return "configured";
+ default:
+ return "reserved";
+ }
+#endif
+}
+
+void mtk_xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+{
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ int i;
+
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ dma_addr_t dma = ctx->dma +
+ ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ xhci_err(xhci, "Slot Context:\n");
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+ &slot_ctx->dev_info,
+ (unsigned long long)dma, le32_to_cpu(slot_ctx->dev_info));
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+ &slot_ctx->dev_info2,
+ (unsigned long long)dma, le32_to_cpu(slot_ctx->dev_info2));
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+ &slot_ctx->tt_info,
+ (unsigned long long)dma, le32_to_cpu(slot_ctx->tt_info));
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+ &slot_ctx->dev_state,
+ (unsigned long long)dma, le32_to_cpu(slot_ctx->dev_state));
+ dma += field_size;
+ for (i = 0; i < 4; ++i) {
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &slot_ctx->reserved[i], (unsigned long long)dma,
+ le32_to_cpu(slot_ctx->reserved[i]), i);
+ dma += field_size;
+ }
+ switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
+ case 0:
+ printk("@ enabled/disabled\r\n");
+ break;
+ case 1:
+ printk("@ default\r\n");
+ break;
+ case 2:
+ printk("@ addressed\r\n");
+ break;
+ case 3:
+ printk("@ configured\r\n");
+ break;
+ default:
+ printk("@ reserved\r\n");
+ break;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
+}
+
+void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+{
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ int i;
+
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ dma_addr_t dma = ctx->dma +
+ ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ xhci_dbg(xhci, "Slot Context:\n");
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+ &slot_ctx->dev_info,
+ (unsigned long long)dma, slot_ctx->dev_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+ &slot_ctx->dev_info2,
+ (unsigned long long)dma, slot_ctx->dev_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+ &slot_ctx->tt_info,
+ (unsigned long long)dma, slot_ctx->tt_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+ &slot_ctx->dev_state,
+ (unsigned long long)dma, slot_ctx->dev_state);
+ dma += field_size;
+ for (i = 0; i < 4; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &slot_ctx->reserved[i], (unsigned long long)dma,
+ slot_ctx->reserved[i], i);
+ dma += field_size;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
+}
+
+void mtk_xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int last_ep)
+{
+ int i, j;
+ int last_ep_ctx = 31;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ if (last_ep < 31)
+ last_ep_ctx = last_ep + 1;
+ for (i = 0; i < last_ep_ctx; ++i) {
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
+ dma_addr_t dma = ctx->dma +
+ ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
+
+ xhci_err(xhci, "Endpoint %02d Context:\n", i);
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+ &ep_ctx->ep_info,
+ (unsigned long long)dma, le32_to_cpu(ep_ctx->ep_info));
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+ &ep_ctx->ep_info2,
+ (unsigned long long)dma, le32_to_cpu(ep_ctx->ep_info2));
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
+ &ep_ctx->deq,
+ (unsigned long long)dma, le64_to_cpu(ep_ctx->deq));
+ dma += 2*field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+ &ep_ctx->tx_info,
+ (unsigned long long)dma, le32_to_cpu(ep_ctx->tx_info));
+ dma += field_size;
+ for (j = 0; j < 3; ++j) {
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ep_ctx->reserved[j],
+ (unsigned long long)dma,
+ le32_to_cpu(ep_ctx->reserved[j]), j);
+ dma += field_size;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
+ }
+}
+
+
+void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int last_ep)
+{
+ int i, j;
+ int last_ep_ctx = 31;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ if (last_ep < 31)
+ last_ep_ctx = last_ep + 1;
+ for (i = 0; i < last_ep_ctx; ++i) {
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
+ dma_addr_t dma = ctx->dma +
+ ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
+
+ xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+ &ep_ctx->ep_info,
+ (unsigned long long)dma, ep_ctx->ep_info);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+ &ep_ctx->ep_info2,
+ (unsigned long long)dma, ep_ctx->ep_info2);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
+ &ep_ctx->deq,
+ (unsigned long long)dma, ep_ctx->deq);
+ dma += 2*field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+ &ep_ctx->tx_info,
+ (unsigned long long)dma, ep_ctx->tx_info);
+ dma += field_size;
+ for (j = 0; j < 3; ++j) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+ &ep_ctx->reserved[j],
+ (unsigned long long)dma,
+ ep_ctx->reserved[j], j);
+ dma += field_size;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
+ }
+}
+
+void mtk_xhci_dbg_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int last_ep)
+{
+ int i;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ struct xhci_slot_ctx *slot_ctx;
+ dma_addr_t dma = ctx->dma;
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ if (ctx->type == XHCI_CTX_TYPE_INPUT) {
+ struct xhci_input_control_ctx *ctrl_ctx =
+ xhci_get_input_control_ctx(xhci, ctx);
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+ &ctrl_ctx->drop_flags, (unsigned long long)dma,
+ ctrl_ctx->drop_flags);
+ dma += field_size;
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+ &ctrl_ctx->add_flags, (unsigned long long)dma,
+ ctrl_ctx->add_flags);
+ dma += field_size;
+ for (i = 0; i < 6; ++i) {
+ xhci_err(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
+ &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
+ ctrl_ctx->rsvd2[i], i);
+ dma += field_size;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
+ }
+
+ slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ mtk_xhci_dbg_slot_ctx(xhci, ctx);
+ mtk_xhci_dbg_ep_ctx(xhci, ctx, last_ep);
+}
+
+#define SSUSB_U3_XHCI_BASE 0xBFB90000
+#define XHCI_HSRAM_DBGCTL (SSUSB_U3_XHCI_BASE + 0x900)
+#define XHCI_HSRAM_DBGMODE (SSUSB_U3_XHCI_BASE + 0x904)
+#define XHCI_HSRAM_DBGSEL (SSUSB_U3_XHCI_BASE + 0x908)
+#define XHCI_HSRAM_DBGADR (SSUSB_U3_XHCI_BASE + 0x90c)
+#define XHCI_HSRAM_DBGGDR (SSUSB_U3_XHCI_BASE + 0x910)
+
+void xhci_sram_dbg_init(struct xhci_hcd *xhci, u32 dbg_ctl, u32 dbg_mode, u32 dbg_sel)
+{
+ __u32 __iomem *ptr;
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGMODE;
+ writel(dbg_mode, ptr);
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGSEL;
+ writel(dbg_sel, ptr);
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGCTL;
+ writel(dbg_ctl, ptr);
+}
+
+void xhci_sram_dbg_read(struct xhci_hcd *xhci, u32 dbg_adr)
+{
+ __u32 __iomem *ptr;
+ u32 debug_data[4];
+ u32 i, temp;
+
+ for(i=0; i<4; i++){
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGADR;
+ temp = (dbg_adr << 3);
+ temp |= i;
+ writel(temp, ptr);
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGGDR;
+ debug_data[i] = readl(ptr);
+ }
+ xhci_err(xhci, "0x%x: 0x%x 0x%x 0x%x 0x%x", dbg_adr, le32_to_cpu(debug_data[0]), le32_to_cpu(debug_data[1]), le32_to_cpu(debug_data[2]), le32_to_cpu(debug_data[3]));
+}
+
+void xhci_sram_dbg_write(struct xhci_hcd *xhci, u32 dbg_adr, u32 debug_data)
+{
+ __u32 __iomem *ptr;
+ u32 i, temp;
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGADR;
+ temp = (dbg_adr << 3);
+ temp |= i;
+ writel(temp, ptr);
+
+ ptr = (__u32 __iomem *) XHCI_HSRAM_DBGGDR;
+ writel(debug_data, ptr);
+}
+void xhci_dbg_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int last_ep)
+{
+ int i;
+ /* Fields are 32 bits wide, DMA addresses are in bytes */
+ int field_size = 32 / 8;
+ struct xhci_slot_ctx *slot_ctx;
+ dma_addr_t dma = ctx->dma;
+ int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+
+ if (ctx->type == XHCI_CTX_TYPE_INPUT) {
+ struct xhci_input_control_ctx *ctrl_ctx =
+ xhci_get_input_control_ctx(xhci, ctx);
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+ &ctrl_ctx->drop_flags, (unsigned long long)dma,
+ ctrl_ctx->drop_flags);
+ dma += field_size;
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+ &ctrl_ctx->add_flags, (unsigned long long)dma,
+ ctrl_ctx->add_flags);
+ dma += field_size;
+ for (i = 0; i < 6; ++i) {
+ xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
+ &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
+ ctrl_ctx->rsvd2[i], i);
+ dma += field_size;
+ }
+
+ if (csz)
+ dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
+ }
+
+ slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ xhci_dbg_slot_ctx(xhci, ctx);
+ xhci_dbg_ep_ctx(xhci, ctx, last_ep);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-ext-caps.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-ext-caps.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,148 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
+#define XHCI_MAX_HALT_USEC (16*125)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT (1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET 0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET 0x00
+#define XHCI_STS_OFFSET 0x04
+
+#define XHCI_MAX_EXT_CAPS 50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff)
+#define XHCI_EXT_CAPS_VAL(p) ((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY 1
+#define XHCI_EXT_CAPS_PROTOCOL 2
+#define XHCI_EXT_CAPS_PM 3
+#define XHCI_EXT_CAPS_VIRT 4
+#define XHCI_EXT_CAPS_ROUTE 5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG 10
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED (1 << 16)
+#define XHCI_HC_OS_OWNED (1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
+
+/* USB Legacy Support Control and Status Register - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
+/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN (1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE (1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE (1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE (1 << 10)
+
+#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR (1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Return the next extended capability pointer register.
+ *
+ * @base PCI register base address.
+ *
+ * @ext_offset Offset of the 32-bit register that contains the extended
+ * capabilites pointer. If searching for the first extended capability, pass
+ * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability,
+ * pass in the offset of the current extended capability register.
+ *
+ * Returns 0 if there is no next extended capability register or returns the register offset
+ * from the PCI registers base address.
+ */
+static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
+{
+ u32 next;
+
+ next = readl(base + ext_offset);
+
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
+ /* Find the first extended capability */
+ next = XHCI_HCC_EXT_CAPS(next);
+ ext_offset = 0;
+ } else {
+ /* Find the next extended capability */
+ next = XHCI_EXT_CAPS_NEXT(next);
+ }
+
+ if (!next)
+ return 0;
+ /*
+ * Address calculation from offset of extended capabilities
+ * (or HCCPARAMS) register - see section 5.3.6 and section 7.
+ */
+ return ext_offset + (next << 2);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base PCI MMIO registers base address.
+ * @ext_offset Offset from base of the first extended capability to look at,
+ * or the address of HCCPARAMS.
+ * @id Extended capability ID to search for.
+ *
+ * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
+ * to make sure that the list doesn't contain a loop.
+ */
+static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
+{
+ u32 val;
+ int limit = XHCI_MAX_EXT_CAPS;
+
+ while (ext_offset && limit > 0) {
+ val = readl(base + ext_offset);
+ if (XHCI_EXT_CAPS_ID(val) == id)
+ break;
+ ext_offset = xhci_find_next_cap_offset(base, ext_offset);
+ limit--;
+ }
+ if (limit > 0)
+ return ext_offset;
+ return 0;
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-hub.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-hub.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,342 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+#include "mtk-usb-hcd.h"
+
+static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+ struct usb_hub_descriptor *desc)
+{
+ int ports;
+ u16 temp;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* USB 3.0 hubs have a different descriptor, but we fake this for now */
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* Why does core/hcd.h define bitmap? It's just confusing. */
+ //memset(&desc->DeviceRemovable[0], 0, temp);
+ //memset(&desc->DeviceRemovable[temp], 0xff, temp);
+
+ /* Ugh, these should be #defines, FIXME */
+ /* Using table 11-13 in USB 2.0 spec. */
+ temp = 0;
+ /* Bits 1:0 - support port power switching, or power always on */
+ if (HCC_PPC(xhci->hcc_params))
+ temp |= 0x0001;
+ else
+ temp |= 0x0002;
+ /* Bit 2 - root hubs are not part of a compound device */
+ /* Bits 4:3 - individual port over current protection */
+ temp |= 0x0008;
+ /* Bits 6:5 - no TTs in root ports */
+ /* Bit 7 - no port indicators */
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+}
+
+unsigned int xhci_port_speed(unsigned int port_status)
+{
+ if (DEV_LOWSPEED(port_status))
+ return USB_PORT_STAT_LOW_SPEED;
+ if (DEV_HIGHSPEED(port_status))
+ return USB_PORT_STAT_HIGH_SPEED;
+ if (DEV_SUPERSPEED(port_status))
+ return USB_PORT_STAT_SUPER_SPEED;
+ /*
+ * FIXME: Yes, we should check for full speed, but the core uses that as
+ * a default in portspeed() in usb/core/hub.c (which is the only place
+ * USB_PORT_STAT_*_SPEED is used).
+ */
+ return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define XHCI_PORT_RW1S ((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
+
+#define XHCI_PORT_RW1C (0x7f<<17)
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define XHCI_PORT_RW ((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+u32 xhci_port_state_to_neutral(u32 state)
+{
+ /* Save read-only status and port state */
+ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+
+u32 xhci_port_state_to_clear_change(u32 state){
+ return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS) | (state & XHCI_PORT_RW1C);
+}
+
+static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
+ u32 __iomem *addr, u32 port_status)
+{
+ /* Write 1 to disable the port */
+ xhci_writel(xhci, port_status | PORT_PE, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
+ wIndex, port_status);
+}
+
+static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ u16 wIndex, u32 __iomem *addr, u32 port_status)
+{
+ char *port_change_bit;
+ u32 status;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ status = PORT_PEC;
+ port_change_bit = "enable/disable";
+ break;
+ default:
+ /* Should never happen */
+ return;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, port_status | status, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, port_status);
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ unsigned long flags;
+ u32 temp, status;
+ int retval = 0;
+ u32 __iomem *addr;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ switch (typeReq) {
+ case GetHubStatus:
+ /* No power source, over-current reported per port */
+ memset(buf, 0, 4);
+ break;
+ case GetHubDescriptor:
+ xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ if ((temp & PORT_OCC))
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ /*
+ * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+ * changes
+ */
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= xhci_port_speed(temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+ if (temp & PORT_OC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (temp & PORT_POWER)
+ status |= USB_PORT_STAT_POWER;
+ xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ break;
+ case SetPortFeature:
+ wIndex &= 0xff;
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /*
+ * Turn on ports, even if there isn't per-port switching.
+ * HC will report connect events even before this is set.
+ * However, khubd will ignore the roothub events until
+ * the roothub is registered.
+ */
+ xhci_writel(xhci, temp | PORT_POWER, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ case USB_PORT_FEAT_RESET:
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
+ break;
+ default:
+ goto error;
+ }
+ temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*(wIndex & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_ENABLE:
+ xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ addr, temp);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xhci_disable_port(xhci, wIndex, addr, temp);
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ unsigned long flags;
+ u32 temp, status;
+ int i, retval;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ports;
+ u32 __iomem *addr;
+
+ ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ /* Initial status is no changes */
+ retval = (ports + 8) / 8;
+ memset(buf, 0, retval);
+ status = 0;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* For each port, did anything change? If so, set that bit in buf. */
+ for (i = 0; i < ports; i++) {
+ addr = &xhci->op_regs->port_status_base +
+ NUM_PORT_REGS*i;
+ temp = xhci_readl(xhci, addr);
+ if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ status = 1;
+ }
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-mem.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-mem.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,2008 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+
+#include "xhci.h"
+#include "xhci-mtk-scheduler.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_segment *seg;
+ dma_addr_t dma;
+
+ seg = kzalloc(sizeof *seg, flags);
+ if (!seg)
+ return NULL;
+ xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
+
+ seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return NULL;
+ }
+ xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)dma);
+
+ memset(seg->trbs, 0, SEGMENT_SIZE);
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+}
+
+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ if (!seg)
+ return;
+ if (seg->trbs) {
+ xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
+ seg->trbs, (unsigned long long)seg->dma);
+ dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+ seg->trbs = NULL;
+ }
+ xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
+ kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs)
+{
+ u32 val;
+
+ if (!prev || !next)
+ return;
+ prev->next = next;
+ if (link_trbs) {
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
+ cpu_to_le64(next->dma);
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ /* Always set the chain bit with 0.95 hardware */
+ if (xhci_link_trb_quirk(xhci))
+ val |= TRB_CHAIN;
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+ }
+ xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+ (unsigned long long)prev->dma,
+ (unsigned long long)next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct xhci_segment *first_seg;
+
+ if (!ring || !ring->first_seg)
+ return;
+ first_seg = ring->first_seg;
+ seg = first_seg->next;
+ xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+ while (seg != first_seg) {
+ struct xhci_segment *next = seg->next;
+ xhci_segment_free(xhci, seg);
+ seg = next;
+ }
+ xhci_segment_free(xhci, first_seg);
+ ring->first_seg = NULL;
+ kfree(ring);
+}
+
+static void xhci_initialize_ring_info(struct xhci_ring *ring)
+{
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+ /* The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ */
+ ring->cycle_state = 1;
+ /* Not necessary for new rings, but needed for re-initialized rings */
+ ring->enq_updates = 0;
+ ring->deq_updates = 0;
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+ struct xhci_ring *ring;
+ struct xhci_segment *prev;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ xhci_dbg(xhci, "Allocating ring at %p\n", ring);
+ if (!ring)
+ return NULL;
+
+ INIT_LIST_HEAD(&ring->td_list);
+ if (num_segs == 0)
+ return ring;
+
+ ring->first_seg = xhci_segment_alloc(xhci, flags);
+ if (!ring->first_seg)
+ goto fail;
+ num_segs--;
+
+ prev = ring->first_seg;
+ while (num_segs > 0) {
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+ if (!next)
+ goto fail;
+ xhci_link_segments(xhci, prev, next, link_trbs);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+ if (link_trbs) {
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE);
+ xhci_dbg(xhci, "Wrote link toggle flag to"
+ " segment %p (virtual), 0x%llx (DMA)\n",
+ prev, (unsigned long long)prev->dma);
+ }
+ xhci_initialize_ring_info(ring);
+// spin_lock_init(&ring->lock);
+ return ring;
+
+fail:
+ xhci_ring_free(xhci, ring);
+ return NULL;
+}
+
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index)
+{
+ int rings_cached;
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
+ virt_dev->eps[ep_index].ring = NULL;
+}
+
+/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
+ * pointers to the beginning of the ring.
+ */
+static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ struct xhci_segment *seg = ring->first_seg;
+ do {
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+ /* All endpoint rings have link TRBs */
+ xhci_link_segments(xhci, seg, seg->next, 1);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+ xhci_initialize_ring_info(ring);
+ /* td list should be empty since all URBs have been cancelled,
+ * but just in case...
+ */
+ INIT_LIST_HEAD(&ring->td_list);
+}
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+ int type, gfp_t flags)
+{
+ struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
+ if (!ctx)
+ return NULL;
+
+ BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
+ ctx->type = type;
+ ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+ if (type == XHCI_CTX_TYPE_INPUT)
+ ctx->size += CTX_SIZE(xhci->hcc_params);
+
+ ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+ memset(ctx->bytes, 0, ctx->size);
+ return ctx;
+}
+
+static void xhci_free_container_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+ kfree(ctx);
+}
+
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
+ return (struct xhci_input_control_ctx *)ctx->bytes;
+}
+
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ if (ctx->type == XHCI_CTX_TYPE_DEVICE)
+ return (struct xhci_slot_ctx *)ctx->bytes;
+
+ return (struct xhci_slot_ctx *)
+ (ctx->bytes + CTX_SIZE(xhci->hcc_params));
+}
+
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int ep_index)
+{
+ /* increment ep index by offset of start of ep ctx array */
+ ep_index++;
+ if (ctx->type == XHCI_CTX_TYPE_INPUT)
+ ep_index++;
+
+ return (struct xhci_ep_ctx *)
+ (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
+}
+
+
+/***************** Streams structures manipulation *************************/
+
+void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+#if 0
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+#endif
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+#if 0
+ pci_free_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ stream_ctx, dma);
+#endif
+ dma_free_coherent(dev, sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_free(xhci->small_streams_pool,
+ stream_ctx, dma);
+ else
+ return dma_pool_free(xhci->medium_streams_pool,
+ stream_ctx, dma);
+}
+
+/*
+ * The stream context array for each endpoint with bulk streams enabled can
+ * vary in size, based on:
+ * - how many streams the endpoint supports,
+ * - the maximum primary stream array size the host controller supports,
+ * - and how many streams the device driver asks for.
+ *
+ * The stream context array must be a power of 2, and can be as small as
+ * 64 bytes or as large as 1MB.
+ */
+struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs, dma_addr_t *dma,
+ gfp_t mem_flags)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+#if 0
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+#endif
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+#if 0
+ return pci_alloc_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ dma);
+#endif
+ return dma_alloc_coherent(dev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs, dma, GFP_ATOMIC);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_alloc(xhci->small_streams_pool,
+ mem_flags, dma);
+ else
+ return dma_pool_alloc(xhci->medium_streams_pool,
+ mem_flags, dma);
+}
+
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address)
+{
+ if (ep->ep_state & EP_HAS_STREAMS)
+ return radix_tree_lookup(&ep->stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+ return ep->ring;
+}
+
+/* Only use this when you know stream_info is valid */
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static struct xhci_ring *dma_to_stream_ring(
+ struct xhci_stream_info *stream_info,
+ u64 address)
+{
+ return radix_tree_lookup(&stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+
+ if (stream_id == 0)
+ return ep->ring;
+ if (!ep->stream_info)
+ return NULL;
+
+ if (stream_id > ep->stream_info->num_streams)
+ return NULL;
+ return ep->stream_info->stream_rings[stream_id];
+}
+
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+ xhci_dbg(xhci, "slot_id %d, ep_index %d\n", slot_id, ep_index);
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ xhci_dbg(xhci, "ep->ep_state 0x%x\n", ep->ep_state);
+ xhci_dbg(xhci, "ep->ring 0x%x\n", ep->ring);
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static int xhci_test_radix_tree(struct xhci_hcd *xhci,
+ unsigned int num_streams,
+ struct xhci_stream_info *stream_info)
+{
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ u64 addr;
+
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ struct xhci_ring *mapped_ring;
+ int trb_size = sizeof(union xhci_trb);
+
+ cur_ring = stream_info->stream_rings[cur_stream];
+ for (addr = cur_ring->first_seg->dma;
+ addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
+ addr += trb_size) {
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (cur_ring != mapped_ring) {
+ xhci_warn(xhci, "WARN: DMA address 0x%08llx "
+ "didn't map to stream ID %u; "
+ "mapped to ring %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ /* One TRB after the end of the ring segment shouldn't return a
+ * pointer to the current ring (although it may be a part of a
+ * different ring).
+ */
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (mapped_ring != cur_ring) {
+ /* One TRB before should also fail */
+ addr = cur_ring->first_seg->dma - trb_size;
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ }
+ if (mapped_ring == cur_ring) {
+ xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
+ "mapped to valid stream ID %u; "
+ "mapped ring = %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs. The
+ * number of requested streams includes stream 0, which cannot be used by device
+ * drivers.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ *
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 64-byte aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct xhci_stream_info *stream_info;
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ unsigned long key;
+ u64 addr;
+ int ret;
+
+ xhci_dbg(xhci, "Allocating %u streams and %u "
+ "stream context array entries.\n",
+ num_streams, num_stream_ctxs);
+ if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
+ xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
+ return NULL;
+ }
+ xhci->cmd_ring_reserved_trbs++;
+
+ stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
+ if (!stream_info)
+ goto cleanup_trbs;
+
+ stream_info->num_streams = num_streams;
+ stream_info->num_stream_ctxs = num_stream_ctxs;
+
+ /* Initialize the array of virtual pointers to stream rings. */
+ stream_info->stream_rings = kzalloc(
+ sizeof(struct xhci_ring *)*num_streams,
+ mem_flags);
+ if (!stream_info->stream_rings)
+ goto cleanup_info;
+
+ /* Initialize the array of DMA addresses for stream rings for the HW. */
+ stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
+ num_stream_ctxs, &stream_info->ctx_array_dma,
+ mem_flags);
+ if (!stream_info->stream_ctx_array)
+ goto cleanup_ctx;
+ memset(stream_info->stream_ctx_array, 0,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
+
+ /* Allocate everything needed to free the stream rings later */
+ stream_info->free_streams_command =
+ xhci_alloc_command(xhci, true, true, mem_flags);
+ if (!stream_info->free_streams_command)
+ goto cleanup_ctx;
+
+ INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+
+ /* Allocate rings for all the streams that the driver will use,
+ * and add their segment DMA addresses to the radix tree.
+ * Stream 0 is reserved.
+ */
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ stream_info->stream_rings[cur_stream] =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (!cur_ring)
+ goto cleanup_rings;
+ cur_ring->stream_id = cur_stream;
+ /* Set deq ptr, cycle bit, and stream context type */
+ addr = cur_ring->first_seg->dma |
+ SCT_FOR_CTX(SCT_PRI_TR) |
+ cur_ring->cycle_state;
+ stream_info->stream_ctx_array[cur_stream].stream_ring =
+ cpu_to_le64(addr);
+ xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
+ cur_stream, (unsigned long long) addr);
+
+ key = (unsigned long)
+ (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
+ ret = radix_tree_insert(&stream_info->trb_address_map,
+ key, cur_ring);
+ if (ret) {
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ goto cleanup_rings;
+ }
+ }
+ /* Leave the other unused stream ring pointers in the stream context
+ * array initialized to zero. This will cause the xHC to give us an
+ * error if the device asks for a stream ID we don't have setup (if it
+ * was any other way, the host controller would assume the ring is
+ * "empty" and wait forever for data to be queued to that stream ID).
+ */
+#if 0 //XHCI_DEBUG
+ /* Do a little test on the radix tree to make sure it returns the
+ * correct values.
+ */
+ if (xhci_test_radix_tree(xhci, num_streams, stream_info))
+ goto cleanup_rings;
+#endif
+
+ return stream_info;
+
+cleanup_rings:
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+cleanup_ctx:
+ kfree(stream_info->stream_rings);
+cleanup_info:
+ kfree(stream_info);
+cleanup_trbs:
+ xhci->cmd_ring_reserved_trbs--;
+ return NULL;
+}
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info)
+{
+ u32 max_primary_streams;
+ /* MaxPStreams is the number of stream context array entries, not the
+ * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+ * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+ */
+ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+ xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ 1 << (max_primary_streams + 1));
+ ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+ ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+ | EP_HAS_LSA);
+ ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field to 0.
+ * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
+ * not at the beginning of the ring).
+ */
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep)
+{
+ dma_addr_t addr;
+ ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
+ addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
+ ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
+}
+
+/* Frees all stream contexts associated with the endpoint,
+ *
+ * Caller should fix the endpoint context streams fields.
+ */
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info)
+{
+ int cur_stream;
+ struct xhci_ring *cur_ring;
+ dma_addr_t addr;
+
+ if (!stream_info)
+ return;
+
+ for (cur_stream = 1; cur_stream < stream_info->num_streams;
+ cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+ xhci->cmd_ring_reserved_trbs--;
+ if (stream_info->stream_ctx_array)
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+
+ if (stream_info)
+ kfree(stream_info->stream_rings);
+ kfree(stream_info);
+}
+
+
+/***************** Device context manipulation *************************/
+
+static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep)
+{
+ init_timer(&ep->stop_cmd_timer);
+ ep->stop_cmd_timer.data = (unsigned long) ep;
+ ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
+ ep->xhci = xhci;
+}
+
+/* All the xhci_tds in the ring's TD list should be freed at this point */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *dev;
+ int i;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || !xhci->devs[slot_id])
+ return;
+
+ dev = xhci->devs[slot_id];
+ xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+ if (!dev)
+ return;
+
+ for (i = 0; i < 31; ++i) {
+ if (dev->eps[i].ring)
+ xhci_ring_free(xhci, dev->eps[i].ring);
+ if (dev->eps[i].stream_info)
+ xhci_free_stream_info(xhci,
+ dev->eps[i].stream_info);
+ }
+
+ if (dev->ring_cache) {
+ for (i = 0; i < dev->num_rings_cached; i++)
+ xhci_ring_free(xhci, dev->ring_cache[i]);
+ kfree(dev->ring_cache);
+ }
+
+ if (dev->in_ctx)
+ xhci_free_container_ctx(xhci, dev->in_ctx);
+ if (dev->out_ctx)
+ xhci_free_container_ctx(xhci, dev->out_ctx);
+
+ kfree(xhci->devs[slot_id]);
+ xhci->devs[slot_id] = NULL;
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ struct usb_device *udev, gfp_t flags)
+{
+ struct xhci_virt_device *dev;
+ int i;
+
+ /* Slot ID 0 is reserved */
+ if (slot_id == 0 || xhci->devs[slot_id]) {
+ xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+ return 0;
+ }
+
+ xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+ if (!xhci->devs[slot_id])
+ return 0;
+ dev = xhci->devs[slot_id];
+
+ /* Allocate the (output) device context that will be used in the HC. */
+ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+ if (!dev->out_ctx)
+ goto fail;
+
+ xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dev->out_ctx->dma);
+
+ /* Allocate the (input) device context for address device command */
+ dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
+ if (!dev->in_ctx)
+ goto fail;
+
+ xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+ (unsigned long long)dev->in_ctx->dma);
+#if 0
+ /* Initialize the cancellation list and watchdog timers for each ep */
+ for (i = 0; i < 31; i++) {
+ xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+ INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+ }
+#endif
+ /* Allocate endpoint 0 ring */
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!dev->eps[0].ring)
+ goto fail;
+
+ /* Allocate pointers to the ring cache */
+ dev->ring_cache = kzalloc(
+ sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
+ flags);
+ if (!dev->ring_cache)
+ goto fail;
+ dev->num_rings_cached = 0;
+#if 0
+ init_completion(&dev->cmd_completion);
+ INIT_LIST_HEAD(&dev->cmd_list);
+#endif
+ /* Point to output device context in dcbaa. */
+ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
+ xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+ slot_id,
+ &xhci->dcbaa->dev_context_ptrs[slot_id],
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
+
+ return 1;
+fail:
+ xhci_free_virt_device(xhci, slot_id);
+ return 0;
+}
+
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+ struct usb_device *udev)
+{
+ struct xhci_virt_device *virt_dev;
+ struct xhci_ep_ctx *ep0_ctx;
+ struct xhci_ring *ep_ring;
+
+ virt_dev = xhci->devs[udev->slot_id];
+ ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
+ ep_ring = virt_dev->eps[0].ring;
+ /*
+ * FIXME we don't keep track of the dequeue pointer very well after a
+ * Set TR dequeue pointer, so we're setting the dequeue pointer of the
+ * host to our enqueue pointer. This should only be called after a
+ * configured device has reset, so all control transfers should have
+ * been completed or cancelled before the reset.
+ */
+ ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
+ ep_ring->enqueue)
+ | ep_ring->cycle_state);
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+ struct xhci_virt_device *dev;
+ struct xhci_ep_ctx *ep0_ctx;
+ struct usb_device *top_dev;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+
+ dev = xhci->devs[udev->slot_id];
+ /* Slot ID 0 is reserved */
+ if (udev->slot_id == 0 || !dev) {
+ xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+ udev->slot_id);
+ return -EINVAL;
+ }
+ ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
+ slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
+
+ /* 2) New slot context and endpoint 0 context are valid*/
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+
+ /* 3) Only the control endpoint is valid - one endpoint context */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
+ xhci_dbg(xhci, "device route 0x%x\n", udev->route);
+ xhci_dbg(xhci, "dev_info 0x%x\n", le32_to_cpu(slot_ctx->dev_info));
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+ break;
+ case USB_SPEED_HIGH:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+ break;
+ case USB_SPEED_FULL:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+ break;
+ case USB_SPEED_LOW:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
+ break;
+ case USB_SPEED_WIRELESS:
+ xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+ return -EINVAL;
+ break;
+ default:
+ /* Speed was set earlier, this shouldn't happen. */
+ xhci_dbg(xhci, "BUG\n");
+ BUG();
+ }
+ /* Find the root hub port this device is under */
+ for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+ top_dev = top_dev->parent)
+ /* Found device below root hub */;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(top_dev->portnum));
+ xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+
+ /* Is this a LS/FS device under a HS hub? */
+ if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
+ udev->tt) {
+ slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
+ (udev->ttport << 8));
+ if (udev->tt->multi)
+ slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+ }
+#if 0
+ xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+ xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+#endif
+ /* Step 4 - ring already allocated */
+ /* Step 5 */
+ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+ /*
+ * XXX: Not sure about wireless USB devices.
+ */
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
+ break;
+ case USB_SPEED_HIGH:
+ /* USB core guesses at a 64-byte max packet first for FS devices */
+ case USB_SPEED_FULL:
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
+ break;
+ case USB_SPEED_LOW:
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
+ break;
+ case USB_SPEED_WIRELESS:
+ xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+ return -EINVAL;
+ break;
+ default:
+ /* New speed? */
+ xhci_dbg(xhci, "BUG\n");
+ BUG();
+ }
+ /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
+
+ ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
+ dev->eps[0].ring->cycle_state);
+
+ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+ xhci_dbg(xhci, "slot_ctx 0x%x 0x%x 0x%x 0x%x\n", slot_ctx->dev_info, slot_ctx->dev_info2, slot_ctx->tt_info, slot_ctx->dev_state);
+ xhci_dbg(xhci, "ep0_ctx 0x%x 0x%x 0x%x 0x%x 0x%x\n", ep0_ctx->ep_info, ep0_ctx->ep_info2, ep0_ctx->deq, ep0_ctx->tx_info);
+ xhci_dbg(xhci, "ctrl_ctx 0x%x 0x%x\n", ctrl_ctx->drop_flags, ctrl_ctx->add_flags);
+ return 0;
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes". If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval = 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ /* Max NAK rate */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ interval = ep->desc.bInterval;
+ /* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (ep->desc.bInterval == 0)
+ interval = 0;
+ else
+ interval = ep->desc.bInterval - 1;
+ if (interval > 15)
+ interval = 15;
+ if (interval != ep->desc.bInterval + 1)
+ dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress, 1 << interval);
+ }
+ break;
+ /* Convert bInterval (in 1-255 frames) to microframes and round down to
+ * nearest power of 2.
+ */
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = fls(8*ep->desc.bInterval) - 1;
+ if (interval > 10)
+ interval = 10;
+ if (interval < 3)
+ interval = 3;
+ if ((1 << interval) != 8*ep->desc.bInterval)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval"
+ " to %d microframes, "
+ "ep desc says %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+ 8*ep->desc.bInterval);
+ }
+ break;
+ default:
+ BUG();
+ }
+ return EP_INTERVAL(interval);
+}
+
+/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ if (udev->speed != USB_SPEED_SUPER ||
+ !usb_endpoint_xfer_isoc(&ep->desc))
+ return 0;
+ return ep->ss_ep_comp.bmAttributes;
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int in;
+ u32 type;
+
+ in = usb_endpoint_dir_in(&ep->desc);
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ type = EP_TYPE(CTRL_EP);
+ } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(BULK_IN_EP);
+ else
+ type = EP_TYPE(BULK_OUT_EP);
+ } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(ISOC_IN_EP);
+ else
+ type = EP_TYPE(ISOC_OUT_EP);
+ } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ if (in)
+ type = EP_TYPE(INT_IN_EP);
+ else
+ type = EP_TYPE(INT_OUT_EP);
+ } else {
+ BUG();
+ }
+ return type;
+}
+
+/* Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int max_burst;
+ int max_packet;
+
+ /* Only applies for interrupt or isochronous endpoints */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ return 0;
+
+ if (udev->speed == USB_SPEED_SUPER)
+ return (ep->ss_ep_comp.wBytesPerInterval);
+
+// max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ max_packet = ep->desc.wMaxPacketSize & 0x7ff;
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * (max_burst + 1);
+}
+
+int mtk_xhci_endpoint_scheduler_init(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep,
+ struct xhci_ep_ctx *ep_ctx)
+{
+ int interval;
+ int bPkts = 0;
+ int bCsCount = 0;
+ int bBm = 0;
+ int bOffset = 0;
+ int bRepeat = 0;
+
+ xhci_dbg(xhci, "mtk_xhci_endpoint_scheduler_init is called\n");
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ bPkts = ep->ss_ep_comp.bMaxBurst+1;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ interval = xhci_get_endpoint_interval(udev, ep);
+ if(interval == 0){
+ bPkts = (ep->ss_ep_comp.bMaxBurst+1) * (ep->ss_ep_comp.bmAttributes+1);
+ bBm = 1;
+ }
+ else{
+ bPkts = (ep->ss_ep_comp.bMaxBurst+1);
+ bRepeat = 1;
+ }
+ }
+ else{
+ bPkts = 0;
+ }
+ break;
+ case USB_SPEED_HIGH:
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ bPkts = ((ep->desc.wMaxPacketSize & 0x1800) >> 11) + 1;
+ }
+ if(usb_endpoint_xfer_isoc(&ep->desc)){
+ bPkts = ((ep->desc.wMaxPacketSize & 0x1800) >> 11) + 1;
+ }
+ break;
+ case USB_SPEED_FULL:
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ bPkts = 1;
+ }
+ if(usb_endpoint_xfer_isoc(&ep->desc)){
+ bPkts = 1;
+ }
+ break;
+ case USB_SPEED_LOW:
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ bPkts = 1;
+ }
+ break;
+ }
+ ep_ctx->reserved[0] |= (BPKTS(bPkts) | BCSCOUNT(bCsCount) | BBM(bBm));
+ ep_ctx->reserved[1] |= (BOFFSET(bOffset) | BREPEAT(bRepeat));
+ return 0;
+}
+
+
+/* Set up an endpoint with one ring segment. Do not allocate stream rings.
+ * Drivers will have to call usb_alloc_streams() to do that.
+ */
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep,
+ gfp_t mem_flags)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_ring *ep_ring;
+ unsigned int max_packet;
+ unsigned int max_burst;
+ u32 max_esit_payload;
+
+#if MTK_SCH_NEW
+ struct xhci_slot_ctx *slot_ctx;
+ struct sch_ep *sch_ep;
+ int isTT;
+ int ep_type;
+ int maxp;
+ int burst;
+ int mult;
+ int interval;
+#endif
+
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+
+ /* Set up the endpoint ring */
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
+ if (!virt_dev->eps[ep_index].new_ring) {
+ /* Attempt to use the ring cache */
+ if (virt_dev->num_rings_cached == 0)
+ return -ENOMEM;
+ virt_dev->eps[ep_index].new_ring =
+ virt_dev->ring_cache[virt_dev->num_rings_cached];
+ virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+ virt_dev->num_rings_cached--;
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+ }
+ ep_ring = virt_dev->eps[ep_index].new_ring;
+ ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
+
+ ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
+ | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
+
+ /* FIXME dig Mult and streams info out of ep companion desc */
+
+ /* Allow 3 retries for everything but isoc;
+ * error count = 0 means infinite retries.
+ */
+ if (!usb_endpoint_xfer_isoc(&ep->desc))
+ ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
+ else
+ ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
+
+ ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
+
+ /* Set the max packet size and max burst */
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = ep->desc.wMaxPacketSize;
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ /* dig out max burst from ep companion desc */
+ max_packet = ep->ss_ep_comp.bMaxBurst;
+ if (!max_packet)
+ xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
+ break;
+ case USB_SPEED_HIGH:
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
+ }
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ /* high speed intr allow 1024 bytes, should not & 0x3ff */
+ max_packet = ep->desc.wMaxPacketSize & 0x7ff;
+ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ break;
+ default:
+ BUG();
+ }
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
+
+ /*
+ * XXX no idea how to calculate the average TRB buffer length for bulk
+ * endpoints, as the driver gives us no clue how big each scatter gather
+ * list entry (or buffer) is going to be.
+ *
+ * For isochronous and interrupt endpoints, we set it to the max
+ * available, until we have new API in the USB core to allow drivers to
+ * declare how much bandwidth they actually need.
+ *
+ * Normally, it would be calculated by taking the total of the buffer
+ * lengths in the TD and then dividing by the number of TRBs in a TD,
+ * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
+ * use Event Data TRBs, and we don't chain in a link TRB on short
+ * transfers, we're basically dividing by 1.
+ */
+ ep_ctx->tx_info |=
+ cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
+
+ /* FIXME Debug endpoint context */
+ /* MTK scheduler parameters */
+ //mtk_xhci_endpoint_scheduler_init(xhci, udev, ep, ep_ctx);
+#if MTK_SCH_NEW
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if((le32_to_cpu(slot_ctx->tt_info) & 0xff) > 0){
+ isTT = 1;
+ }
+ else{
+ isTT = 0;
+ }
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ ep_type = USB_EP_INT;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ ep_type = USB_EP_ISOC;
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc)){
+ ep_type = USB_EP_BULK;
+ }
+ if(udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
+ || udev->speed == USB_SPEED_LOW){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->desc.wMaxPacketSize >> 11;
+ mult = 0;
+ }
+ else if(udev->speed == USB_SPEED_SUPER){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->ss_ep_comp.bMaxBurst;
+ mult = ep->ss_ep_comp.bmAttributes & 0x3;
+ }
+ interval = (1 << ((le32_to_cpu(ep_ctx->ep_info) >> 16) & 0xff));
+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
+ if(mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
+ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
+ , (mtk_u32 *)ep_ctx, sch_ep) != SCH_SUCCESS){
+ xhci_err(xhci, "[MTK] not enough bandwidth\n");
+ return -ENOSPC;
+ }
+#else
+ mtk_xhci_scheduler_add_ep(xhci, udev, ep, ep_ctx);
+ xhci_dbg(xhci, "Endpoint %02d Context: %#08x %#08x %#08llx %#08x %#08x %#08x %#08x\n"
+ , ep_index, ep_ctx->ep_info, ep_ctx->ep_info2, ep_ctx->deq, ep_ctx->tx_info
+ , ep_ctx->reserved[0], ep_ctx->reserved[1], ep_ctx->reserved[2]);
+#endif
+ return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq = 0;
+ ep_ctx->tx_info = 0;
+ /* Don't free the endpoint ring until the set interface or configuration
+ * request succeeds.
+ */
+}
+
+/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command.
+ */
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ unsigned int ep_index)
+{
+ struct xhci_ep_ctx *out_ep_ctx;
+ struct xhci_ep_ctx *in_ep_ctx;
+
+ out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+
+ in_ep_ctx->ep_info = out_ep_ctx->ep_info;
+ in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
+ in_ep_ctx->deq = out_ep_ctx->deq;
+ in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+}
+
+/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command. Only the context entries field matters,
+ * but we'll copy the whole thing anyway.
+ */
+void xhci_slot_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx)
+{
+ struct xhci_slot_ctx *in_slot_ctx;
+ struct xhci_slot_ctx *out_slot_ctx;
+
+ in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
+ out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
+
+ in_slot_ctx->dev_info = out_slot_ctx->dev_info;
+ in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
+ in_slot_ctx->tt_info = out_slot_ctx->tt_info;
+ in_slot_ctx->dev_state = out_slot_ctx->dev_state;
+}
+#if 0
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+ int i;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+ xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+
+ if (!num_sp)
+ return 0;
+
+ xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
+ if (!xhci->scratchpad)
+ goto fail_sp;
+
+ xhci->scratchpad->sp_array =
+ pci_alloc_consistent(to_pci_dev(dev),
+ num_sp * sizeof(u64),
+ &xhci->scratchpad->sp_dma);
+ if (!xhci->scratchpad->sp_array)
+ goto fail_sp2;
+
+ xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
+ if (!xhci->scratchpad->sp_buffers)
+ goto fail_sp3;
+
+ xhci->scratchpad->sp_dma_buffers =
+ kzalloc(sizeof(dma_addr_t) * num_sp, flags);
+
+ if (!xhci->scratchpad->sp_dma_buffers)
+ goto fail_sp4;
+
+ xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
+ for (i = 0; i < num_sp; i++) {
+ dma_addr_t dma;
+#if 0
+ void *buf = pci_alloc_consistent(to_pci_dev(dev),
+ xhci->page_size, &dma);
+#endif
+ void *buf = dma_alloc_coherent(dev,
+ xhci->page_size, &dma, GFP_ATOMIC);
+ if (!buf)
+ goto fail_sp5;
+
+ xhci->scratchpad->sp_array[i] = dma;
+ xhci->scratchpad->sp_buffers[i] = buf;
+ xhci->scratchpad->sp_dma_buffers[i] = dma;
+ }
+
+ return 0;
+
+ fail_sp5:
+ for (i = i - 1; i >= 0; i--) {
+#if 0
+ pci_free_consistent(to_pci_dev(dev), xhci->page_size,
+ xhci->scratchpad->sp_buffers[i],
+ xhci->scratchpad->sp_dma_buffers[i]);
+#endif
+ dma_free_coherent(dev, xhci->page_size,
+ xhci->scratchpad->sp_buffers[i],
+ xhci->scratchpad->sp_dma_buffers[i]);
+ }
+ kfree(xhci->scratchpad->sp_dma_buffers);
+
+ fail_sp4:
+ kfree(xhci->scratchpad->sp_buffers);
+
+ fail_sp3:
+ dma_free_coherent(dev, num_sp * sizeof(u64),
+ xhci->scratchpad->sp_array,
+ xhci->scratchpad->sp_dma);
+#if 0
+ pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
+ xhci->scratchpad->sp_array,
+ xhci->scratchpad->sp_dma);
+#endif
+
+ fail_sp2:
+ kfree(xhci->scratchpad);
+ xhci->scratchpad = NULL;
+
+ fail_sp:
+ return -ENOMEM;
+}
+#endif
+static void scratchpad_free(struct xhci_hcd *xhci)
+{
+ int num_sp;
+ int i;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+#if 0
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+#endif
+ if (!xhci->scratchpad)
+ return;
+
+ num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+ for (i = 0; i < num_sp; i++) {
+#if 0
+ pci_free_consistent(pdev, xhci->page_size,
+ xhci->scratchpad->sp_buffers[i],
+ xhci->scratchpad->sp_dma_buffers[i]);
+#endif
+ dma_free_coherent(dev, xhci->page_size,
+ xhci->scratchpad->sp_buffers[i],
+ xhci->scratchpad->sp_dma_buffers[i]);
+ }
+ kfree(xhci->scratchpad->sp_dma_buffers);
+ kfree(xhci->scratchpad->sp_buffers);
+#if 0
+ pci_free_consistent(pdev, num_sp * sizeof(u64),
+ xhci->scratchpad->sp_array,
+ xhci->scratchpad->sp_dma);
+#endif
+ dma_free_coherent(dev, num_sp * sizeof(u64),
+ xhci->scratchpad->sp_array,
+ xhci->scratchpad->sp_dma);
+ kfree(xhci->scratchpad);
+ xhci->scratchpad = NULL;
+}
+
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags)
+{
+ struct xhci_command *command;
+
+ command = kzalloc(sizeof(*command), mem_flags);
+ if (!command)
+ return NULL;
+
+ if (allocate_in_ctx) {
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command);
+ return NULL;
+ }
+ }
+
+ if (allocate_completion) {
+ command->completion =
+ kzalloc(sizeof(struct completion), mem_flags);
+ if (!command->completion) {
+ xhci_free_container_ctx(xhci, command->in_ctx);
+ kfree(command);
+ return NULL;
+ }
+ init_completion(command->completion);
+ }
+
+ command->status = 0;
+ INIT_LIST_HEAD(&command->cmd_list);
+ return command;
+}
+
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
+{
+ int last;
+
+ if (!urb_priv)
+ return;
+
+ last = urb_priv->length - 1;
+ if (last >= 0) {
+ int i;
+ for (i = 0; i <= last; i++)
+ kfree(urb_priv->td[i]);
+ }
+ kfree(urb_priv);
+}
+
+
+void xhci_free_command(struct xhci_hcd *xhci,
+ struct xhci_command *command)
+{
+ xhci_free_container_ctx(xhci,
+ command->in_ctx);
+ kfree(command->completion);
+ kfree(command);
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+#if 0
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+#endif
+ int size;
+ int i;
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+ if (xhci->ir_set) {
+ xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+ xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+ }
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+ if (xhci->erst.entries)
+ dma_free_coherent(dev, size,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+#if 0
+ pci_free_consistent(pdev, size,
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+#endif
+ xhci->erst.entries = NULL;
+ xhci_dbg(xhci, "Freed ERST\n");
+ if (xhci->event_ring)
+ xhci_ring_free(xhci, xhci->event_ring);
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
+ xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+
+ if (xhci->segment_pool)
+ dma_pool_destroy(xhci->segment_pool);
+ xhci->segment_pool = NULL;
+ xhci_dbg(xhci, "Freed segment pool\n");
+
+ if (xhci->device_pool)
+ dma_pool_destroy(xhci->device_pool);
+ xhci->device_pool = NULL;
+ xhci_dbg(xhci, "Freed device context pool\n");
+
+ if (xhci->small_streams_pool)
+ dma_pool_destroy(xhci->small_streams_pool);
+ xhci->small_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed small stream array pool\n");
+
+ if (xhci->medium_streams_pool)
+ dma_pool_destroy(xhci->medium_streams_pool);
+ xhci->medium_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed medium stream array pool\n");
+
+ xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
+ if (xhci->dcbaa)
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+#if 0
+ pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+#endif
+ xhci->dcbaa = NULL;
+
+ scratchpad_free(xhci);
+ xhci->page_size = 0;
+ xhci->page_shift = 0;
+}
+
+static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
+ struct xhci_segment *input_seg,
+ union xhci_trb *start_trb,
+ union xhci_trb *end_trb,
+ dma_addr_t input_dma,
+ struct xhci_segment *result_seg,
+ char *test_name, int test_number)
+{
+ unsigned long long start_dma;
+ unsigned long long end_dma;
+ struct xhci_segment *seg;
+
+ start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
+ end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
+
+ seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
+ if (seg != result_seg) {
+ xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
+ test_name, test_number);
+ xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+ "input DMA 0x%llx\n",
+ input_seg,
+ (unsigned long long) input_dma);
+ xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
+ "ending TRB %p (0x%llx DMA)\n",
+ start_trb, start_dma,
+ end_trb, end_dma);
+ xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+ result_seg, seg);
+ return -1;
+ }
+ return 0;
+}
+
+/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+{
+ struct {
+ dma_addr_t input_dma;
+ struct xhci_segment *result_seg;
+ } simple_test_vector [] = {
+ /* A zeroed DMA field should fail */
+ { 0, NULL },
+ /* One TRB before the ring start should fail */
+ { xhci->event_ring->first_seg->dma - 16, NULL },
+ /* One byte before the ring start should fail */
+ { xhci->event_ring->first_seg->dma - 1, NULL },
+ /* Starting TRB should succeed */
+ { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
+ /* Ending TRB should succeed */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
+ xhci->event_ring->first_seg },
+ /* One byte after the ring end should fail */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
+ /* One TRB after the ring end should fail */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
+ /* An address of all ones should fail */
+ { (dma_addr_t) (~0), NULL },
+ };
+ struct {
+ struct xhci_segment *input_seg;
+ union xhci_trb *start_trb;
+ union xhci_trb *end_trb;
+ dma_addr_t input_dma;
+ struct xhci_segment *result_seg;
+ } complex_test_vector [] = {
+ /* Test feeding a valid DMA address from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->event_ring->first_seg->trbs,
+ .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* Test feeding a valid end TRB from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->event_ring->first_seg->trbs,
+ .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* Test feeding a valid start and end TRB from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->cmd_ring->first_seg->trbs,
+ .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but after this TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[0],
+ .end_trb = &xhci->event_ring->first_seg->trbs[3],
+ .input_dma = xhci->event_ring->first_seg->dma + 4*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but before this TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[6],
+ .input_dma = xhci->event_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but after this wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->event_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but before this wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
+ .result_seg = NULL,
+ },
+ /* TRB not in this ring, and we have a wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ };
+
+ unsigned int num_tests;
+ int i, ret;
+
+ num_tests = ARRAY_SIZE(simple_test_vector);
+ for (i = 0; i < num_tests; i++) {
+ ret = xhci_test_trb_in_td(xhci,
+ xhci->event_ring->first_seg,
+ xhci->event_ring->first_seg->trbs,
+ &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ simple_test_vector[i].input_dma,
+ simple_test_vector[i].result_seg,
+ "Simple", i);
+ if (ret < 0)
+ return ret;
+ }
+
+ num_tests = ARRAY_SIZE(complex_test_vector);
+ for (i = 0; i < num_tests; i++) {
+ ret = xhci_test_trb_in_td(xhci,
+ complex_test_vector[i].input_seg,
+ complex_test_vector[i].start_trb,
+ complex_test_vector[i].end_trb,
+ complex_test_vector[i].input_dma,
+ complex_test_vector[i].result_seg,
+ "Complex", i);
+ if (ret < 0)
+ return ret;
+ }
+ xhci_dbg(xhci, "TRB math tests passed.\n");
+ return 0;
+}
+
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ dma_addr_t dma;
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ unsigned int val, val2;
+ u64 val_64;
+ struct xhci_segment *seg;
+ u32 page_size;
+ int i;
+
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ for (i = 0; i < 16; i++) {
+ if ((0x1 & page_size) != 0)
+ break;
+ page_size = page_size >> 1;
+ }
+ if (i < 16)
+ xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ else
+ xhci_warn(xhci, "WARN: no supported page size\n");
+ /* Use 4K pages, since that's common and the minimum the HC supports */
+ xhci->page_shift = 12;
+ xhci->page_size = 1 << xhci->page_shift;
+ xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+
+ /*
+ * Program the Number of Device Slots Enabled field in the CONFIG
+ * register with the max value of slots the HC can handle.
+ */
+ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+ xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
+ (unsigned int) val);
+ val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+ val |= (val2 & ~HCS_SLOTS_MASK);
+ xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
+ (unsigned int) val);
+ xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+ /*
+ * Section 5.4.8 - doorbell array must be
+ * "physically contiguous and 64-byte (cache line) aligned".
+ */
+ xhci->dcbaa = dma_alloc_coherent(dev,
+ sizeof(*xhci->dcbaa), &dma, GFP_ATOMIC);
+#if 0
+ xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(*xhci->dcbaa), &dma);
+#endif
+ if (!xhci->dcbaa)
+ goto fail;
+ memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+ xhci->dcbaa->dma = dma;
+ xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments,
+ * so we pick the greater alignment need.
+ */
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ SEGMENT_SIZE, 64, xhci->page_size);
+
+ /* See Table 46 and Note on Figure 55 */
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+ 2112, 64, xhci->page_size);
+ if (!xhci->segment_pool || !xhci->device_pool)
+ goto fail;
+
+ /* Linear stream context arrays don't have any boundary restrictions,
+ * and only need to be 16-byte aligned.
+ */
+ xhci->small_streams_pool =
+ dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ xhci->medium_streams_pool =
+ dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
+ * will be allocated with pci_alloc_consistent()
+ */
+
+ if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ goto fail;
+
+ /* Set up the command ring to have one segments for now. */
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+ /* Set the address in the Command Ring Control register */
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+ (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr\n", val);
+ xhci->dba = (void *) xhci->cap_regs + val;
+ xhci_dbg_regs(xhci);
+ xhci_print_run_regs(xhci);
+ /* Set ir_set to interrupt register set 0 */
+ xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST). Section 4.9.3.
+ */
+ xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+ if (!xhci->event_ring)
+ goto fail;
+ if (xhci_check_trb_in_td_math(xhci, flags) < 0)
+ goto fail;
+#if 0
+ xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+ sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+#endif
+ xhci->erst.entries = dma_alloc_coherent(dev,
+ sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma, GFP_ATOMIC);
+ if (!xhci->erst.entries)
+ goto fail;
+ xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ (unsigned long long)dma);
+
+ memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+ xhci->erst.num_entries = ERST_NUM_SEGS;
+ xhci->erst.erst_dma_addr = dma;
+ xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci->erst.num_entries,
+ xhci->erst.entries,
+ (unsigned long long)xhci->erst.erst_dma_addr);
+
+ /* set ring base address and size for each segment table entry */
+ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+ struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ /* set ERST count with the number of entries in the segment table */
+ val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ val);
+ xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+ xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ /* set the segment table base address */
+ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ (unsigned long long)xhci->erst.erst_dma_addr);
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ val_64 &= ERST_PTR_MASK;
+ val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
+
+ /* Set the event ring dequeue address */
+ xhci_set_hc_event_deq(xhci);
+ xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ /*
+ * XXX: Might need to set the Interrupter Moderation Register to
+ * something other than the default (~1ms minimum between interrupts).
+ * See section 5.5.1.2.
+ */
+ init_completion(&xhci->addr_dev);
+ for (i = 0; i < MAX_HC_SLOTS; ++i)
+ xhci->devs[i] = NULL;
+#if 0
+ if (scratchpad_alloc(xhci, flags))
+ goto fail;
+#endif
+ return 0;
+
+fail:
+ xhci_warn(xhci, "Couldn't initialize memory\n");
+ xhci_mem_cleanup(xhci);
+ return -ENOMEM;
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-power.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-power.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,257 @@
+#include "xhci-mtk-power.h"
+#include "xhci.h"
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#define CON_HOST_DEV 0
+
+#if CON_HOST_DEV
+//filter those ports assigned to device
+int getU3PortNumber(){
+ int port_num;
+ int real_port_num;
+ int i, temp;
+
+ //check if any port assigned to device
+ port_num = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ real_port_num = port_num;
+ for(i=0; i<port_num; i++){
+ temp = readl(SSUSB_U3_CTRL(i));
+ if(!(temp & SSUSB_U3_PORT_HOST_SEL)){
+ real_port_num--;
+ }
+ }
+ return real_port_num;
+}
+
+//filter those ports assigned to device
+int getU2PortNumber(){
+ int port_num;
+ int real_port_num;
+ int i, temp;
+
+ //check if any port assigned to device
+ port_num = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+ real_port_num = port_num;
+ for(i=0; i<port_num; i++){
+ temp = readl(SSUSB_U2_CTRL(i));
+ if(!(temp & SSUSB_U2_PORT_HOST_SEL)){
+ real_port_num--;
+ }
+ }
+ return real_port_num;
+}
+
+//filter those ports assigned to device
+int getRealPortIndex(int port_index, int port_rev){
+ int real_port_index, tmp_port_index;
+ int i, temp;
+ int portNum;
+
+ real_port_index = 0;
+ tmp_port_index = 0;
+ if(port_rev == 0x3){
+ //SS port
+ portNum = getU3PortNumber();
+ for(i=0; i<portNum; i++){
+ temp = SSUSB_U3_CTRL(i);
+ tmp_port_index++;
+ if(temp & SSUSB_U3_PORT_HOST_SEL){
+ real_port_index++;
+ if(real_port_index == port_index){
+ return tmp_port_index;
+ }
+ }
+ }
+ }
+ else{
+ //HS port
+ portNum = getU2PortNumber();
+ for(i=0; i<portNum; i++){
+ temp = SSUSB_U2_CTRL(i);
+ tmp_port_index++;
+ if(temp & SSUSB_U2_PORT_HOST_SEL){
+ real_port_index++;
+ if(real_port_index == port_index){
+ return tmp_port_index;
+ }
+ }
+ }
+ }
+ return port_index;
+}
+
+#endif
+
+/* set 1 to PORT_POWER of PORT_STATUS register of each port */
+void enableXhciAllPortPower(struct xhci_hcd *xhci){
+ int i;
+ u32 port_id, temp;
+ u32 __iomem *addr;
+
+#if CON_HOST_DEV
+ g_num_u3_port = getU3PortNumber();
+ g_num_u2_port = getU2PortNumber();
+#else
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+#endif
+ for(i=1; i<=g_num_u3_port; i++){
+ port_id=i;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ }
+ for(i=1; i<=g_num_u2_port; i++){
+ port_id=i+g_num_u3_port;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= PORT_POWER;
+ xhci_writel(xhci, temp, addr);
+ }
+}
+
+void enableAllClockPower(){
+
+ int i;
+ u32 temp;
+ int num_u3_port;
+ int num_u2_port;
+
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ //2. Enable xHC
+ writel(readl(SSUSB_IP_PW_CTRL) | (SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL);
+ writel(readl(SSUSB_IP_PW_CTRL) & (~SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL);
+ writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+
+ //1. Enable target ports
+ for(i=0; i<num_u3_port; i++){
+ temp = readl(SSUSB_U3_CTRL(i));
+#if CON_HOST_DEV
+ if(temp & SSUSB_U3_PORT_HOST_SEL){
+ temp = temp & (~SSUSB_U3_PORT_PDN) & (~SSUSB_U3_PORT_DIS);
+ }
+#else
+ temp = temp & (~SSUSB_U3_PORT_PDN) & (~SSUSB_U3_PORT_DIS);
+#endif
+
+ writel(temp, SSUSB_U3_CTRL(i));
+ }
+ for(i=0; i<num_u2_port; i++){
+ temp = readl(SSUSB_U2_CTRL(i));
+#if CON_HOST_DEV
+ if(temp & SSUSB_U2_PORT_HOST_SEL){
+ temp = temp & (~SSUSB_U2_PORT_PDN) & (~SSUSB_U2_PORT_DIS);
+ }
+#else
+ temp = temp & (~SSUSB_U2_PORT_PDN) & (~SSUSB_U2_PORT_DIS);
+#endif
+ writel(temp, SSUSB_U2_CTRL(i));
+
+ }
+ msleep(100);
+}
+
+//called after HC initiated
+void disableAllClockPower(){
+ int i;
+ u32 temp;
+ int num_u3_port;
+ int num_u2_port;
+
+ num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ //disable target ports
+ for(i=0; i<num_u3_port; i++){
+ temp = readl(SSUSB_U3_CTRL(i));
+#if CON_HOST_DEV
+ if(temp & SSUSB_U3_PORT_HOST_SEL){
+ temp = temp | SSUSB_U3_PORT_PDN;
+ }
+#else
+ temp = temp | SSUSB_U3_PORT_PDN;
+#endif
+ writel(temp, SSUSB_U3_CTRL(i));
+ }
+ for(i=0; i<num_u2_port; i++){
+ temp = readl(SSUSB_U2_CTRL(i));
+#if CON_HOST_DEV
+ if(temp & SSUSB_U2_PORT_HOST_SEL){
+ temp = temp | SSUSB_U2_PORT_PDN;
+ }
+#else
+ temp = temp | SSUSB_U2_PORT_PDN;
+#endif
+ writel(temp, SSUSB_U2_CTRL(i));
+ }
+ msleep(100);
+}
+
+//(X)disable clock/power of a port
+//(X)if all ports are disabled, disable IP ctrl power
+//disable all ports and IP clock/power, this is just mention HW that the power/clock of port
+//and IP could be disable if suspended.
+//If doesn't not disable all ports at first, the IP clock/power will never be disabled
+//(some U2 and U3 ports are binded to the same connection, that is, they will never enter suspend at the same time
+//port_index: port number
+//port_rev: 0x2 - USB2.0, 0x3 - USB3.0 (SuperSpeed)
+void disablePortClockPower(int port_index, int port_rev){
+ int i;
+ u32 temp;
+ int real_index;
+
+#if CON_HOST_DEV
+ real_index = getRealPortIndex(port_index, port_rev);
+#else
+ real_index = port_index;
+#endif
+
+
+ if(port_rev == 0x3){
+ temp = readl(SSUSB_U3_CTRL(real_index));
+ temp = temp | (SSUSB_U3_PORT_PDN);
+ writel(temp, SSUSB_U3_CTRL(real_index));
+ }
+ else if(port_rev == 0x2){
+ temp = readl(SSUSB_U2_CTRL(real_index));
+ temp = temp | (SSUSB_U2_PORT_PDN);
+ writel(temp, SSUSB_U2_CTRL(real_index));
+ }
+ writel(readl(SSUSB_IP_PW_CTRL_1) | (SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+}
+
+//if IP ctrl power is disabled, enable it
+//enable clock/power of a port
+//port_index: port number
+//port_rev: 0x2 - USB2.0, 0x3 - USB3.0 (SuperSpeed)
+void enablePortClockPower(int port_index, int port_rev){
+ int i;
+ u32 temp;
+ int real_index;
+
+#if CON_HOST_DEV
+ real_index = getRealPortIndex(port_index, port_rev);
+#else
+ real_index = port_index;
+#endif
+
+ writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+
+ if(port_rev == 0x3){
+ temp = readl(SSUSB_U3_CTRL(real_index));
+ temp = temp & (~SSUSB_U3_PORT_PDN);
+ writel(temp, SSUSB_U3_CTRL(real_index));
+ }
+ else if(port_rev == 0x2){
+ temp = readl(SSUSB_U2_CTRL(real_index));
+ temp = temp & (~SSUSB_U2_PORT_PDN);
+ writel(temp, SSUSB_U2_CTRL(real_index));
+ }
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-power.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-power.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,17 @@
+#ifndef _XHCI_MTK_POWER_H
+#define _XHCI_MTK_POWER_H
+
+#include <linux/usb.h>
+#include "xhci.h"
+#include "mtk-test-lib.h"
+
+static int g_num_u3_port;
+static int g_num_u2_port;
+
+void enableXhciAllPortPower(struct xhci_hcd *xhci);
+void enableAllClockPower();
+void disablePortClockPower();
+void disableAllClockPower();
+void enablePortClockPower(int port_index, int port_rev);
+
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-scheduler.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-scheduler.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,601 @@
+#include "xhci-mtk-scheduler.h"
+#include <linux/kernel.h> /* printk() */
+
+int mtk_xhci_scheduler_init(void){
+ int i;
+
+ for(i=0; i<MAX_EP_NUM; i++){
+ ss_out_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ ss_in_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ hs_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ tt_intr_eps[i] = NULL;
+ }
+ return 0;
+}
+
+int add_sch_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, int offset, int repeat, int pkts, int cs_count, int burst_mode
+ , int bw_cost, mtk_u32 *ep, struct sch_ep *tmp_ep){
+
+ struct sch_ep **ep_array;
+ int i;
+
+ if(is_in && dev_speed == USB_SPEED_SUPER ){
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else if(dev_speed == USB_SPEED_SUPER){
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+ else if(dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)){
+ ep_array = (struct sch_ep **)hs_eps;
+ }
+ else{
+ ep_array = (struct sch_ep **)tt_intr_eps;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ if(ep_array[i] == NULL){
+ tmp_ep->dev_speed = dev_speed;
+ tmp_ep->isTT = isTT;
+ tmp_ep->is_in = is_in;
+ tmp_ep->ep_type = ep_type;
+ tmp_ep->maxp = maxp;
+ tmp_ep->interval = interval;
+ tmp_ep->burst = burst;
+ tmp_ep->mult = mult;
+ tmp_ep->offset = offset;
+ tmp_ep->repeat = repeat;
+ tmp_ep->pkts = pkts;
+ tmp_ep->cs_count = cs_count;
+ tmp_ep->burst_mode = burst_mode;
+ tmp_ep->bw_cost = bw_cost;
+ tmp_ep->ep = ep;
+ ep_array[i] = tmp_ep;
+ return SCH_SUCCESS;
+ }
+ }
+ return SCH_FAIL;
+}
+
+int count_ss_bw(int is_in, int ep_type, int maxp, int interval, int burst, int mult, int offset, int repeat
+ , int td_size){
+ int i, j, k;
+ int bw_required[3];
+ int final_bw_required;
+ int bw_required_per_repeat;
+ int tmp_bw_required;
+ struct sch_ep *cur_sch_ep;
+ struct sch_ep **ep_array;
+ int cur_offset;
+ int cur_ep_offset;
+ int tmp_offset;
+ int tmp_interval;
+ int ep_offset;
+ int ep_interval;
+ int ep_repeat;
+ int ep_mult;
+
+ if(is_in){
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else{
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+
+ bw_required[0] = 0;
+ bw_required[1] = 0;
+ bw_required[2] = 0;
+
+ if(repeat == 0){
+ final_bw_required = 0;
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = ep_array[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_interval = cur_sch_ep->interval;
+ ep_offset = cur_sch_ep->offset;
+ if(cur_sch_ep->repeat == 0){
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ final_bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ else{
+ ep_repeat = cur_sch_ep->repeat;
+ ep_mult = cur_sch_ep->mult;
+ for(k=0; k<=ep_mult; k++){
+ cur_ep_offset = ep_offset+(k*ep_mult);
+ if(ep_interval >= interval){
+ tmp_offset = cur_ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - cur_ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ final_bw_required += cur_sch_ep->bw_cost;
+ break;
+ }
+ }
+ }
+ }
+ final_bw_required += td_size;
+ }
+ else{
+ bw_required_per_repeat = maxp * (burst+1);
+ for(j=0; j<=mult; j++){
+ tmp_bw_required = 0;
+ cur_offset = offset+(j*repeat);
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = ep_array[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_interval = cur_sch_ep->interval;
+ ep_offset = cur_sch_ep->offset;
+ if(cur_sch_ep->repeat == 0){
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - cur_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = cur_offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ tmp_bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ else{
+ ep_repeat = cur_sch_ep->repeat;
+ ep_mult = cur_sch_ep->mult;
+ for(k=0; k<=ep_mult; k++){
+ cur_ep_offset = ep_offset+(k*ep_repeat);
+ if(ep_interval >= interval){
+ tmp_offset = cur_ep_offset + ep_interval - cur_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = cur_offset + interval - cur_ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ tmp_bw_required += cur_sch_ep->bw_cost;
+ break;
+ }
+ }
+ }
+ }
+ bw_required[j] = tmp_bw_required;
+ }
+ final_bw_required = SS_BW_BOUND;
+ for(j=0; j<=mult; j++){
+ if(bw_required[j] < final_bw_required){
+ final_bw_required = bw_required[j];
+ }
+ }
+ final_bw_required += bw_required_per_repeat;
+ }
+ return final_bw_required;
+}
+
+int count_hs_bw(int ep_type, int maxp, int interval, int offset, int td_size){
+ int i;
+ int bw_required;
+ struct sch_ep *cur_sch_ep;
+ int tmp_offset;
+ int tmp_interval;
+ int ep_offset;
+ int ep_interval;
+ int cur_tt_isoc_interval; //for isoc tt check
+
+ bw_required = 0;
+ for(i=0; i<MAX_EP_NUM; i++){
+
+ cur_sch_ep = (struct sch_ep *)hs_eps[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+
+ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){
+ cur_tt_isoc_interval = ep_interval<<3;
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + cur_tt_isoc_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = cur_tt_isoc_interval;
+ }
+ if(cur_sch_ep->is_in){
+ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){
+ bw_required += 188;
+ }
+ }
+ else{
+ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){
+ bw_required += 188;
+ }
+ }
+ }
+ else{
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset%tmp_interval == 0){
+ bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ }
+ bw_required += td_size;
+ return bw_required;
+}
+
+int count_tt_isoc_bw(int is_in, int maxp, int interval, int offset, int td_size){
+ char is_cs;
+ int mframe_idx, frame_idx, s_frame, s_mframe, cur_mframe;
+ int bw_required, max_bw;
+ int ss_cs_count;
+ int cs_mframe;
+ int max_frame;
+ int i,j;
+ struct sch_ep *cur_sch_ep;
+ int ep_offset;
+ int ep_interval;
+ int ep_cs_count;
+ int tt_isoc_interval; //for isoc tt check
+ int cur_tt_isoc_interval; //for isoc tt check
+ int tmp_offset;
+ int tmp_interval;
+
+ is_cs = 0;
+
+ tt_isoc_interval = interval<<3; //frame to mframe
+ if(is_in){
+ is_cs = 1;
+ }
+ s_frame = offset/8;
+ s_mframe = offset%8;
+ ss_cs_count = (maxp + (188 - 1))/188;
+ if(is_cs){
+ cs_mframe = offset%8 + 2 + ss_cs_count;
+ if (cs_mframe <= 6)
+ ss_cs_count += 2;
+ else if (cs_mframe == 7)
+ ss_cs_count++;
+ else if (cs_mframe > 8)
+ return -1;
+ }
+ max_bw = 0;
+ if(is_in){
+ i=2;
+ }
+ for(cur_mframe = offset+i; i<ss_cs_count; cur_mframe++, i++){
+ bw_required = 0;
+ for(j=0; j<MAX_EP_NUM; j++){
+ cur_sch_ep = (struct sch_ep *)hs_eps[j];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){
+ //isoc tt
+ //check if mframe offset overlap
+ //if overlap, add 188 to the bw
+ cur_tt_isoc_interval = ep_interval<<3;
+ if(cur_tt_isoc_interval >= tt_isoc_interval){
+ tmp_offset = (ep_offset+cur_tt_isoc_interval) - cur_mframe;
+ tmp_interval = tt_isoc_interval;
+ }
+ else{
+ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset;
+ tmp_interval = cur_tt_isoc_interval;
+ }
+ if(cur_sch_ep->is_in){
+ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){
+ bw_required += 188;
+ }
+ }
+ else{
+ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){
+ bw_required += 188;
+ }
+ }
+
+ }
+ else if(cur_sch_ep->ep_type == USB_EP_INT || cur_sch_ep->ep_type == USB_EP_ISOC){
+ //check if mframe
+ if(ep_interval >= tt_isoc_interval){
+ tmp_offset = (ep_offset+ep_interval) - cur_mframe;
+ tmp_interval = tt_isoc_interval;
+ }
+ else{
+ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset%tmp_interval == 0){
+ bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ }
+ bw_required += 188;
+ if(bw_required > max_bw){
+ max_bw = bw_required;
+ }
+ }
+ return max_bw;
+}
+
+int count_tt_intr_bw(int interval, int frame_offset){
+ //check all eps in tt_intr_eps
+ int ret;
+ int i,j;
+ int ep_offset;
+ int ep_interval;
+ int tmp_offset;
+ int tmp_interval;
+ ret = SCH_SUCCESS;
+ struct sch_ep *cur_sch_ep;
+
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = (struct sch_ep *)tt_intr_eps[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - frame_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = frame_offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+
+ if(tmp_offset%tmp_interval==0){
+ return SCH_FAIL;
+ }
+ }
+ return SCH_SUCCESS;
+}
+
+struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep){
+ int i;
+ struct sch_ep **ep_array;
+ struct sch_ep *cur_ep;
+
+ if(is_in && dev_speed == USB_SPEED_SUPER){
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else if(dev_speed == USB_SPEED_SUPER){
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+ else if(dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)){
+ ep_array = (struct sch_ep **)hs_eps;
+ }
+ else{
+ ep_array = (struct sch_ep **)tt_intr_eps;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_ep = (struct sch_ep *)ep_array[i];
+ if(cur_ep != NULL && cur_ep->ep == ep){
+ ep_array[i] = NULL;
+ return cur_ep;
+ }
+ }
+ return NULL;
+}
+
+int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep){
+ mtk_u32 bPkts = 0;
+ mtk_u32 bCsCount = 0;
+ mtk_u32 bBm = 1;
+ mtk_u32 bOffset = 0;
+ mtk_u32 bRepeat = 0;
+ int ret;
+ struct mtk_xhci_ep_ctx *temp_ep_ctx;
+ int td_size;
+ int mframe_idx, frame_idx;
+ int bw_cost;
+ int cur_bw, best_bw, best_bw_idx,repeat, max_repeat, best_bw_repeat;
+ int cur_offset, cs_mframe;
+ int break_out;
+ int frame_interval;
+
+ printk(KERN_ERR "add_ep parameters, dev_speed %d, is_in %d, isTT %d, ep_type %d, maxp %d, interval %d, burst %d, mult %d, ep 0x%x, ep_ctx 0x%x, sch_ep 0x%x\n", dev_speed, is_in, isTT, ep_type, maxp
+ , interval, burst, mult, ep, ep_ctx, sch_ep);
+ if(isTT && ep_type == USB_EP_INT && ((dev_speed == USB_SPEED_LOW) || (dev_speed == USB_SPEED_FULL))){
+ frame_interval = interval >> 3;
+ for(frame_idx=0; frame_idx<frame_interval; frame_idx++){
+ printk(KERN_ERR "check tt_intr_bw interval %d, frame_idx %d\n", frame_interval, frame_idx);
+ if(count_tt_intr_bw(frame_interval, frame_idx) == SCH_SUCCESS){
+ printk(KERN_ERR "check OK............\n");
+ bOffset = frame_idx<<3;
+ bPkts = 1;
+ bCsCount = 3;
+ bw_cost = maxp;
+ bRepeat = 0;
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, frame_interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, maxp, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ break;
+ }
+ }
+ }
+ else if(isTT && ep_type == USB_EP_ISOC){
+ best_bw = HS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp;
+ break_out = 0;
+ frame_interval = interval>>3;
+ for(frame_idx=0; frame_idx<frame_interval && !break_out; frame_idx++){
+ for(mframe_idx=0; mframe_idx<8; mframe_idx++){
+ cur_offset = (frame_idx*8) + mframe_idx;
+ cur_bw = count_tt_isoc_bw(is_in, maxp, frame_interval, cur_offset, td_size);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = cur_offset;
+ best_bw = cur_bw;
+ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break_out = 1;
+ break;
+ }
+ }
+ }
+ }
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bPkts = 1;
+ bCsCount = (maxp + (188 - 1)) / 188;
+ if(is_in){
+ cs_mframe = bOffset%8 + 2 + bCsCount;
+ if (cs_mframe <= 6)
+ bCsCount += 2;
+ else if (cs_mframe == 7)
+ bCsCount++;
+ }
+ bw_cost = 188;
+ bRepeat = 0;
+ if(add_sch_ep( dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else if((dev_speed == USB_SPEED_FULL || dev_speed == USB_SPEED_LOW) && ep_type == USB_EP_INT){
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ else if(dev_speed == USB_SPEED_FULL && ep_type == USB_EP_ISOC){
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ else if(dev_speed == USB_SPEED_HIGH && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){
+ best_bw = HS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp*(burst+1);
+ for(cur_offset = 0; cur_offset<interval; cur_offset++){
+ cur_bw = count_hs_bw(ep_type, maxp, interval, cur_offset, td_size);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = cur_offset;
+ best_bw = cur_bw;
+ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break;
+ }
+ }
+ }
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bPkts = burst + 1;
+ bCsCount = 0;
+ bw_cost = td_size;
+ bRepeat = 0;
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else if(dev_speed == USB_SPEED_SUPER && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){
+ best_bw = SS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp * (mult+1) * (burst+1);
+ if(mult == 0){
+ max_repeat = 0;
+ }
+ else{
+ max_repeat = (interval-1)/(mult+1);
+ }
+ break_out = 0;
+ for(frame_idx = 0; (frame_idx < interval) && !break_out; frame_idx++){
+ for(repeat = max_repeat; repeat >= 0; repeat--){
+ cur_bw = count_ss_bw(is_in, ep_type, maxp, interval, burst, mult, frame_idx
+ , repeat, td_size);
+ printk(KERN_ERR "count_ss_bw, frame_idx %d, repeat %d, td_size %d, result bw %d\n"
+ , frame_idx, repeat, td_size, cur_bw);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = frame_idx;
+ best_bw_repeat = repeat;
+ best_bw = cur_bw;
+ if(cur_bw <= td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break_out = 1;
+ break;
+ }
+ }
+ }
+ }
+ printk(KERN_ERR "final best idx %d, best repeat %d\n", best_bw_idx, best_bw_repeat);
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bCsCount = 0;
+ bRepeat = best_bw_repeat;
+ if(bRepeat == 0){
+ bw_cost = (burst+1)*(mult+1)*maxp;
+ bPkts = (burst+1)*(mult+1);
+ }
+ else{
+ bw_cost = (burst+1)*maxp;
+ bPkts = (burst+1);
+ }
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else{
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ if(ret == SCH_SUCCESS){
+ temp_ep_ctx = (struct mtk_xhci_ep_ctx *)ep_ctx;
+ temp_ep_ctx->reserved[0] |= cpu_to_le32((BPKTS(bPkts) | BCSCOUNT(bCsCount) | BBM(bBm)));
+ temp_ep_ctx->reserved[1] |= cpu_to_le32((BOFFSET(bOffset) | BREPEAT(bRepeat)));
+ printk(KERN_ERR "[DBG] BPKTS: %x, BCSCOUNT: %x, BBM: %x\n", bPkts, bCsCount, bBm);
+ printk(KERN_ERR "[DBG] BOFFSET: %x, BREPEAT: %x\n", bOffset, bRepeat);
+ return SCH_SUCCESS;
+ }
+ else{
+ return SCH_FAIL;
+ }
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-scheduler.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-mtk-scheduler.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,82 @@
+#ifndef _XHCI_MTK_SCHEDULER_H
+#define _XHCI_MTK_SCHEDULER_H
+
+#define MTK_SCH_NEW 1
+
+#define SCH_SUCCESS 1
+#define SCH_FAIL 0
+
+#define MAX_EP_NUM 64
+#define SS_BW_BOUND 51000
+#define HS_BW_BOUND (6144 * 2)
+
+#define USB_EP_CONTROL 0
+#define USB_EP_ISOC 1
+#define USB_EP_BULK 2
+#define USB_EP_INT 3
+
+#define USB_SPEED_LOW 1
+#define USB_SPEED_FULL 2
+#define USB_SPEED_HIGH 3
+#define USB_SPEED_SUPER 5
+
+/* mtk scheduler bitmasks */
+#define BPKTS(p) ((p) & 0x3f)
+#define BCSCOUNT(p) (((p) & 0x7) << 8)
+#define BBM(p) ((p) << 11)
+#define BOFFSET(p) ((p) & 0x3fff)
+#define BREPEAT(p) (((p) & 0x7fff) << 16)
+
+
+#if 1
+typedef unsigned int mtk_u32;
+typedef unsigned long long mtk_u64;
+#endif
+
+#define NULL ((void *)0)
+
+struct mtk_xhci_ep_ctx {
+ mtk_u32 ep_info;
+ mtk_u32 ep_info2;
+ mtk_u64 deq;
+ mtk_u32 tx_info;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ mtk_u32 reserved[3];
+};
+
+
+struct sch_ep
+{
+ //device info
+ int dev_speed;
+ int isTT;
+ //ep info
+ int is_in;
+ int ep_type;
+ int maxp;
+ int interval;
+ int burst;
+ int mult;
+ //scheduling info
+ int offset;
+ int repeat;
+ int pkts;
+ int cs_count;
+ int burst_mode;
+ //other
+ int bw_cost; //bandwidth cost in each repeat; including overhead
+ mtk_u32 *ep; //address of usb_endpoint pointer
+};
+
+static struct sch_ep **ss_out_eps[MAX_EP_NUM];
+static struct sch_ep **ss_in_eps[MAX_EP_NUM];
+static struct sch_ep **hs_eps[MAX_EP_NUM]; //including tt isoc
+static struct sch_ep **tt_intr_eps[MAX_EP_NUM];
+
+int mtk_xhci_scheduler_init(void);
+int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep);
+struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep);
+
+
+#endif
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-platform.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-platform.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,317 @@
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+//add by lty
+//#include <linux/pci.h>
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+//#include <plat/usb.h>
+#include "xhci.h"
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include "mtk-test-lib.h"
+#include "mtk-usb-hcd.h"
+
+/* Device for a quirk */
+static const char hcd_name[] = "xhc_mtk";
+#ifdef TCSUPPORT_MIPS_1004K
+#include <asm/tc3162/tc3162.h>
+#define XHC_IRQ IRQ_RT3XXX_USB
+#else
+#define XHC_IRQ 18
+#endif
+#define XHC_IO_START 0x1FB90000
+#define XHC_IO_LENGTH 0x10000
+
+void setInitialReg();
+
+/*-------------------------------------------------------------------------*/
+
+/* called during probe() after chip reset completes */
+static int xhci_mtk_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci;
+// struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
+ u32 temp;
+
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+ xhci = hcd_to_xhci(hcd);
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci->hci_version = HC_VERSION(xhci->hcc_params);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ goto error;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Reset complete\n");
+ xhci_dbg(xhci, "Calling HCD init\n");
+
+ setInitialReg();
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Called HCD init\n");
+ return retval;
+error:
+ kfree(xhci);
+ return retval;
+}
+
+
+static const struct hc_driver xhci_versatile_hc_driver;
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_versatile_probe(struct platform_device *pdev)
+{
+ //struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
+ // struct xhci_hcd_versatile *vers;
+ // struct resource *res;
+ struct usb_hcd *hcd;
+ int i;
+ int irq;
+ char supply[7];
+ u32 value;
+ int ret = -ENODEV;
+ printk("hcd_versatile_probe is called\n");
+ /* Chiachun: don't use platform_device API first */
+
+ value = readl(0xbfb40004);
+
+ value |= (0x1<<18);
+ writel(value, 0xbfb40004);
+
+ irq = XHC_IRQ;
+ hcd = mtk_usb_create_hcd(&xhci_versatile_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+ ret = -ENOMEM;
+ goto err_create_hcd;
+ }
+ printk(KERN_ERR "Creat HCD success!\n");
+ hcd->rsrc_start = (uint32_t)XHC_IO_START;
+ hcd->rsrc_len = XHC_IO_LENGTH;
+
+ printk(KERN_ERR "Creat xHC Base address 0x%x!\n", hcd->rsrc_start);
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "XHCI ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
+ hcd->self.uses_dma = 1;
+
+ /* we know this is the memory we want, no need to ioremap again */
+
+// ret = mtk_usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_SAMPLE_RANDOM);
+ ret = mtk_usb_add_hcd(hcd, irq, IRQF_SHARED);
+ my_hcd = hcd;
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
+ printk(KERN_INFO "usb_add_hcd success!\n");
+ return 0;
+ err_add_hcd:
+ err_start:
+ err_tll_ioremap:
+ err_uhh_ioremap:
+ iounmap(hcd->regs);
+ err_ioremap:
+ usb_put_hcd(hcd);
+ err_create_hcd:
+ err_disabled:
+ err_pdata:
+ return ret;
+
+}
+
+
+static int usb_hcd_versatile_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ printk("hcd_versatile_remove is called\n");
+ hcd = platform_get_drvdata (pdev);
+ iounmap(hcd->regs);
+ mtk_usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+// kfree(vers);
+ printk("hcd_versatile_remove is completed\n");
+ return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci;
+// struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int retval;
+ u32 temp;
+
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
+ /* xHCI private pointer was set in xhci_pci_probe for the second
+ * registered roothub.
+ */
+ xhci = hcd_to_xhci(hcd);
+ xhci->cap_regs = hcd->regs;
+ xhci->op_regs = hcd->regs +
+ HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+ xhci->run_regs = hcd->regs +
+ (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+ /* Cache read-only capability registers */
+ xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+ xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+ xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci->hci_version = HC_VERSION(xhci->hcc_params);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+ xhci_print_registers(xhci);
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+ goto error;
+
+ xhci_dbg(xhci, "Resetting HCD\n");
+ /* Reset the internal HC memory state and registers. */
+ retval = xhci_reset(xhci);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Reset complete\n");
+ xhci_dbg(xhci, "Calling HCD init\n");
+ /* Initialize HCD and host controller data structures. */
+ retval = xhci_init(hcd);
+ if (retval)
+ goto error;
+ xhci_dbg(xhci, "Called HCD init\n");
+ return retval;
+error:
+ kfree(xhci);
+ return retval;
+}
+
+
+static void usb_hcd_versatile_shutdown(struct platform_device *pdev)
+{
+ printk("hcd_versatile_shutdown is called\n");
+ if (my_hcd->driver->shutdown)
+ my_hcd->driver->shutdown(my_hcd);
+ printk("hcd_versatile_shutdown is completed\n");
+}
+static const struct hc_driver xhci_versatile_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "xHCI MTK Test Host Controller",
+ .hcd_priv_size = sizeof(struct xhci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = xhci_mtk_irq,
+ .flags = HCD_MEMORY | HCD_USB3,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = xhci_mtk_setup,
+ .start = xhci_mtk_run,
+ /* suspend and resume implemented later */
+ .stop = xhci_mtk_stop,
+ .shutdown = xhci_mtk_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = xhci_mtk_urb_enqueue,
+ .urb_dequeue = xhci_mtk_urb_dequeue,
+ .alloc_dev = xhci_mtk_alloc_dev,
+ .free_dev = xhci_mtk_free_dev,
+ .alloc_streams = xhci_mtk_alloc_streams,
+ .free_streams = xhci_mtk_free_streams,
+ .add_endpoint = xhci_mtk_add_endpoint,
+ .drop_endpoint = xhci_mtk_drop_endpoint,
+ .endpoint_reset = xhci_mtk_endpoint_reset,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
+ .address_device = xhci_mtk_address_device,
+ .update_hub_device = xhci_mtk_update_hub_device,
+ .reset_device = xhci_mtk_reset_device,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = xhci_mtk_get_frame,
+
+ /* Root hub support */
+ .hub_control = xhci_mtk_hub_control,
+ .hub_status_data = xhci_mtk_hub_status_data,
+};
+static void xhci_hcd_release (struct device *dev)
+{
+ printk("xhci_hcd_release \n", (uint32_t)dev);
+ dev->parent = NULL;
+// printk(KERN_INFO, "dev = 0x%08X.\n", (uint32_t)dev);
+
+}
+
+static struct platform_driver xhci_versatile_driver = {
+
+ .probe = usb_hcd_versatile_probe,
+ .remove = usb_hcd_versatile_remove,
+ .shutdown = usb_hcd_versatile_shutdown,
+
+ .driver = {
+ .name = (char *) hcd_name,
+ }
+};
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci-ring.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci-ring.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,3591 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ * Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ * least one free TRB in the ring. This is useful if you want to turn that
+ * into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ * link TRB, then load the pointer with the address in the link TRB. If the
+ * link TRB had its toggle bit set, you may need to update the ring cycle
+ * state (see cycle bit rules). You may have to do this multiple times
+ * until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ * equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ * Update enqueue pointer between each write (which may update the ring
+ * cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ * and endpoint rings. If HC is the producer for the event ring,
+ * and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ * the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ * continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ * updates event ring dequeue pointer. HC is the consumer for the command and
+ * endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include "xhci.h"
+#include "mtk-test-lib.h"
+
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ union xhci_trb *trb)
+{
+ unsigned long segment_offset;
+
+ if (!seg || !trb || trb < seg->trbs)
+ return 0;
+ /* offset in TRBs */
+ segment_offset = trb - seg->trbs;
+ if (segment_offset > TRBS_PER_SEGMENT)
+ return 0;
+ return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+ (seg->next == xhci->event_ring->first_seg);
+ else
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment? I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
+{
+ if (ring == xhci->event_ring)
+ return trb == &seg->trbs[TRBS_PER_SEGMENT];
+ else
+ return TRB_TYPE_LINK_LE32(trb->link.control);
+}
+
+static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+{
+ struct xhci_link_trb *link = &ring->enqueue->link;
+ return TRB_TYPE_LINK_LE32(link->control);
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ struct xhci_segment **seg,
+ union xhci_trb **trb)
+{
+ if (last_trb(xhci, ring, *seg, *trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ (*trb)++;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+ union xhci_trb *next = ++(ring->dequeue);
+ unsigned long long addr;
+
+ ring->deq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->deq_seg, next)) {
+ if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ next = ring->dequeue;
+ }
+ addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ if (ring == xhci->event_ring)
+ xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
+ else if (ring == xhci->cmd_ring)
+ xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
+ else
+ xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set. This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before calling
+ * prepare_transfer()?
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ bool consumer, bool more_trbs_coming)
+{
+ u32 chain;
+ union xhci_trb *next;
+ unsigned long long addr;
+
+ chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+ next = ++(ring->enqueue);
+
+ ring->enq_updates++;
+ /* Update the dequeue pointer further if that was a link TRB or we're at
+ * the end of an event ring segment (which doesn't have link TRBS)
+ */
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+ if (!consumer) {
+ if (ring != xhci->event_ring) {
+ /*
+ * If the caller doesn't plan on enqueueing more
+ * TDs before ringing the doorbell, then we
+ * don't want to give the link TRB to the
+ * hardware just yet. We'll give the link TRB
+ * back in prepare_ring() just before we enqueue
+ * the TD at the top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
+
+ /* If we're not dealing with 0.95 hardware,
+ * carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!xhci_link_trb_quirk(xhci)) {
+ next->link.control &=
+ cpu_to_le32(~TRB_CHAIN);
+ next->link.control |=
+ cpu_to_le32(chain);
+ }
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ }
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+ ring,
+ (unsigned int) ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+ addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+ if (ring == xhci->event_ring)
+ xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
+ else if (ring == xhci->cmd_ring)
+ xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
+ else
+ xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring. See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs)
+{
+ int i;
+ union xhci_trb *enq = ring->enqueue;
+ struct xhci_segment *enq_seg = ring->enq_seg;
+ struct xhci_segment *cur_seg;
+ unsigned int left_on_ring;
+
+ /* If we are currently pointing to a link TRB, advance the
+ * enqueue pointer before checking for space */
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+
+ /* Check if ring is empty */
+ if (enq == ring->dequeue) {
+ /* Can't use link trbs */
+ left_on_ring = TRBS_PER_SEGMENT - 1;
+ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
+ cur_seg = cur_seg->next)
+ left_on_ring += TRBS_PER_SEGMENT - 1;
+
+ /* Always need one TRB free in the ring. */
+ left_on_ring -= 1;
+ if (num_trbs > left_on_ring) {
+ xhci_warn(xhci, "Not enough room on ring; "
+ "need %u TRBs, %u TRBs left\n",
+ num_trbs, left_on_ring);
+ return 0;
+ }
+ return 1;
+ }
+ /* Make sure there's an extra empty TRB available */
+ for (i = 0; i <= num_trbs; ++i) {
+ if (enq == ring->dequeue)
+ return 0;
+ enq++;
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
+ }
+ return 1;
+}
+
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u64 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+ xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+ u32 temp;
+
+ xhci_dbg(xhci, "// Ding dong!\n");
+ temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+ xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+#if 0
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+#endif
+}
+
+static void ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_state;
+ u32 field;
+ __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_state = ep->ep_state;
+ /* Don't ring the doorbell for this endpoint if there are pending
+ * cancellations because the we don't want to interrupt processing.
+ * We don't want to restart any stream rings if there's a set dequeue
+ * pointer command pending because the device can choose to start any
+ * stream once the endpoint is on the HW schedule.
+ * FIXME - check all the stream rings for pending cancellations.
+ */
+ if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
+ && !(ep_state & EP_HALTED)) {
+ field = xhci_readl(xhci, db_addr) & DB_MASK;
+ field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
+ xhci_writel(xhci, field, db_addr);
+ }
+}
+
+/* Ring the doorbell for any rings with pending URBs */
+static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ unsigned int stream_id;
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+ if (!(list_empty(&ep->ring->td_list)))
+ ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ struct xhci_stream_info *stream_info = ep->stream_info;
+ if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+ }
+}
+
+/*
+ * Find the segment that trb is in. Start searching in start_seg.
+ * If we must move past a segment that has a link TRB with a toggle cycle state
+ * bit set, then we will toggle the value pointed at by cycle_state.
+ */
+static struct xhci_segment *find_trb_seg(
+ struct xhci_segment *start_seg,
+ union xhci_trb *trb, int *cycle_state)
+{
+ struct xhci_segment *cur_seg = start_seg;
+ struct xhci_generic_trb *generic_trb;
+
+ while (cur_seg->trbs > trb ||
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+ generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+ if ((generic_trb->field[3] & cpu_to_le32(TRB_TYPE_BITMASK)) ==
+ cpu_to_le32(TRB_TYPE(TRB_LINK)) &&
+ (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+ *cycle_state = ~(*cycle_state) & 0x1;
+ cur_seg = cur_seg->next;
+ if (cur_seg == start_seg)
+ /* Looped over the entire list. Oops! */
+ return NULL;
+ }
+ return cur_seg;
+}
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the xHC stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
+ * any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state)
+{
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_ring *ep_ring;
+ struct xhci_generic_trb *trb;
+ struct xhci_ep_ctx *ep_ctx;
+ dma_addr_t addr;
+
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN can't find new dequeue state "
+ "for invalid stream ID %u.\n",
+ stream_id);
+ return;
+ }
+ state->new_cycle_state = 0;
+ xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
+ state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+ dev->eps[ep_index].stopped_trb,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ xhci_dbg(xhci, "Finding endpoint context\n");
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+
+ state->new_deq_ptr = cur_td->last_trb;
+ xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+ state->new_deq_ptr,
+ &state->new_cycle_state);
+ if (!state->new_deq_seg)
+ BUG();
+
+ trb = &state->new_deq_ptr->generic;
+ if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+ (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+ state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+ /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
+ state->new_deq_seg);
+ addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
+ xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
+ (unsigned long long) addr);
+ xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
+ ep_ring->dequeue = state->new_deq_ptr;
+ ep_ring->deq_seg = state->new_deq_seg;
+}
+
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ struct xhci_td *cur_td)
+{
+ struct xhci_segment *cur_seg;
+ union xhci_trb *cur_trb;
+
+ for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
+ true;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
+ /* Unchain any chained Link TRBs, but
+ * leave the pointers intact.
+ */
+ cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
+ xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
+ xhci_dbg(xhci, "Address = %p (0x%llx dma); "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ } else {
+ cur_trb->generic.field[0] = 0;
+ cur_trb->generic.field[1] = 0;
+ cur_trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB */
+ cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ cur_trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+ xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
+ "in seg %p (0x%llx dma)\n",
+ cur_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+ cur_seg,
+ (unsigned long long)cur_seg->dma);
+ }
+ if (cur_trb == cur_td->last_trb)
+ break;
+ }
+}
+
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state);
+
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
+ struct xhci_dequeue_state *deq_state)
+{
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+
+ xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+ "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+ deq_state->new_deq_seg,
+ (unsigned long long)deq_state->new_deq_seg->dma,
+ deq_state->new_deq_ptr,
+ (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
+ deq_state->new_cycle_state);
+ queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
+ deq_state->new_deq_seg,
+ deq_state->new_deq_ptr,
+ (u32) deq_state->new_cycle_state);
+ /* Stop the TD queueing code from ringing the doorbell until
+ * this command completes. The HC won't set the dequeue pointer
+ * if the ring is running, and ringing the doorbell starts the
+ * ring running.
+ */
+ ep->ep_state |= SET_DEQ_PENDING;
+}
+
+static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep)
+{
+ ep->ep_state &= ~EP_HALT_PENDING;
+ /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
+ * timer is running on another CPU, we don't decrement stop_cmds_pending
+ * (since we didn't successfully stop the watchdog timer).
+ */
+ if (del_timer(&ep->stop_cmd_timer))
+ ep->stop_cmds_pending--;
+}
+
+/* Must be called with xhci->lock held in interrupt context */
+static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+ struct xhci_td *cur_td, int status, char *adjective)
+{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ cur_td->urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, cur_td->urb, status);
+ kfree(cur_td);
+ spin_lock(&xhci->lock);
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
+}
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring. There are two ways to do that:
+ *
+ * 1. If the HW was in the middle of processing the TD that needs to be
+ * cancelled, then we must move the ring's dequeue pointer past the last TRB
+ * in the TD with a Set Dequeue Pointer Command.
+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ * bit cleared) so that the HW will skip over them.
+ */
+static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
+ struct list_head *entry;
+ struct xhci_td *cur_td = NULL;
+ struct xhci_td *last_unlinked_td;
+
+ struct xhci_dequeue_state deq_state;
+
+ memset(&deq_state, 0, sizeof(deq_state));
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+
+ if (list_empty(&ep->cancelled_td_list)) {
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
+ }
+
+ /* Fix up the ep ring first, so HW stops executing cancelled TDs.
+ * We have the xHCI lock, so nothing can modify this list until we drop
+ * it. We're also in the event handler, so we can't get re-interrupted
+ * if another Stop Endpoint command completes
+ */
+ list_for_each(entry, &ep->cancelled_td_list) {
+ cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
+ xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
+ cur_td->first_trb,
+ (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+ if (!ep_ring) {
+ /* This shouldn't happen unless a driver is mucking
+ * with the stream ID after submission. This will
+ * leave the TD on the hardware ring, and the hardware
+ * will try to execute it, and may access a buffer
+ * that has already been freed. In the best case, the
+ * hardware will execute it, and the event handler will
+ * ignore the completion event for that TD, since it was
+ * removed from the td_list for that endpoint. In
+ * short, don't muck with the stream ID after
+ * submission.
+ */
+ xhci_warn(xhci, "WARN Cancelled URB %p "
+ "has invalid stream ID %u.\n",
+ cur_td->urb,
+ cur_td->urb->stream_id);
+ goto remove_finished_td;
+ }
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the xHC endpoint ring dequeue pointer past this TD.
+ */
+ if (cur_td == ep->stopped_td)
+ xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
+ cur_td->urb->stream_id,
+ cur_td, &deq_state);
+ else
+ td_to_noop(xhci, ep_ring, cur_td);
+remove_finished_td:
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list. Keep it in
+ * the cancelled TD list for URB completion later.
+ */
+ list_del(&cur_td->td_list);
+ }
+ last_unlinked_td = cur_td;
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+
+ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+ xhci_queue_new_dequeue_state(xhci,
+ slot_id, ep_index,
+ ep->stopped_td->urb->stream_id,
+ &deq_state);
+ xhci_ring_cmd_db(xhci);
+ } else {
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+ * New TDs to be cancelled might be added to the end of the list before
+ * we can complete all the URBs for the TDs we already unlinked.
+ * So stop when we've completed the URB for the last TD we unlinked.
+ */
+ do {
+ cur_td = list_entry(ep->cancelled_td_list.next,
+ struct xhci_td, cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+
+ /* Clean up the cancelled URB */
+ /* Doesn't matter what we pass for status, since the core will
+ * just overwrite it (because the URB has been unlinked).
+ */
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
+
+ /* Stop processing the cancelled list if the watchdog timer is
+ * running.
+ */
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return;
+ } while (cur_td != last_unlinked_td);
+
+ /* Return to the event handler with xhci->lock re-acquired */
+}
+
+/* Watchdog timer function for when a stop endpoint command fails to complete.
+ * In this case, we assume the host controller is broken or dying or dead. The
+ * host may still be completing some other events, so we have to be careful to
+ * let the event ring handler and the URB dequeueing/enqueueing functions know
+ * through xhci->state.
+ *
+ * The timer may also fire if the host takes a very long time to respond to the
+ * command, and the stop endpoint command completion handler cannot delete the
+ * timer before the timer function is called. Another endpoint cancellation may
+ * sneak in before the timer function can grab the lock, and that may queue
+ * another stop endpoint command and add the timer back. So we cannot use a
+ * simple flag to say whether there is a pending stop endpoint command for a
+ * particular endpoint.
+ *
+ * Instead we use a combination of that flag and a counter for the number of
+ * pending stop endpoint commands. If the timer is the tail end of the last
+ * stop endpoint command, and the endpoint's command is still pending, we assume
+ * the host is dying.
+ */
+void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_virt_ep *temp_ep;
+ struct xhci_ring *ring;
+ struct xhci_td *cur_td;
+ int ret, i, j;
+
+ ep = (struct xhci_virt_ep *) arg;
+ xhci = ep->xhci;
+
+ spin_lock(&xhci->lock);
+
+ ep->stop_cmds_pending--;
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
+ "xHCI as DYING, exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+ if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+ xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
+ "exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+
+ xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+ xhci_warn(xhci, "Assuming host is dying, halting host.\n");
+ /* Oops, HC is dead or dying or at least not responding to the stop
+ * endpoint command.
+ */
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ /* Disable interrupts from the host controller and start halting it */
+ xhci_quiesce(xhci);
+ spin_unlock(&xhci->lock);
+
+ ret = xhci_halt(xhci);
+
+ spin_lock(&xhci->lock);
+ if (ret < 0) {
+ /* This is bad; the host is not responding to commands and it's
+ * not allowing itself to be halted. At least interrupts are
+ * disabled, so we can set HC_STATE_HALT and notify the
+ * USB core. But if we call usb_hc_died(), it will attempt to
+ * disconnect all device drivers under this host. Those
+ * disconnect() methods will wait for all URBs to be unlinked,
+ * so we must complete them.
+ */
+ xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
+ xhci_warn(xhci, "Completing active URBs anyway.\n");
+ /* We could turn all TDs on the rings to no-ops. This won't
+ * help if the host has cached part of the ring, and is slow if
+ * we want to preserve the cycle bit. Skip it and hope the host
+ * doesn't touch the memory.
+ */
+ }
+ for (i = 0; i < MAX_HC_SLOTS; i++) {
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; j++) {
+ temp_ep = &xhci->devs[i]->eps[j];
+ ring = temp_ep->ring;
+ if (!ring)
+ continue;
+ xhci_dbg(xhci, "Killing URBs for slot ID %u, "
+ "ep index %u\n", i, j);
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td,
+ td_list);
+ list_del(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ while (!list_empty(&temp_ep->cancelled_td_list)) {
+ cur_td = list_first_entry(
+ &temp_ep->cancelled_td_list,
+ struct xhci_td,
+ cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ }
+ }
+ spin_unlock(&xhci->lock);
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ xhci_dbg(xhci, "Calling usb_hc_died()\n");
+ usb_hc_died(xhci_to_hcd(xhci));
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again. We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void handle_set_deq_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event,
+ union xhci_trb *trb)
+{
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned int stream_id;
+ struct xhci_ring *ep_ring;
+ struct xhci_virt_device *dev;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+ dev = xhci->devs[slot_id];
+
+ ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN Set TR deq ptr command for "
+ "freed stream ID %u\n",
+ stream_id);
+ /* XXX: Harmless??? */
+ dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ return;
+ }
+
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+
+ if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
+ unsigned int ep_state;
+ unsigned int slot_state;
+
+ switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
+ "of stream ID configuration\n");
+ break;
+ case COMP_CTX_STATE:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
+ "to incorrect slot or ep state.\n");
+ ep_state = le32_to_cpu(ep_ctx->ep_info);
+ ep_state &= EP_STATE_MASK;
+ slot_state = le32_to_cpu(slot_ctx->dev_state);
+ slot_state = GET_SLOT_STATE(slot_state);
+ xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+ slot_state, ep_state);
+ break;
+ case COMP_EBADSLT:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
+ "slot %u was not enabled.\n", slot_id);
+ break;
+ default:
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
+ "completion code of %u.\n",
+ GET_COMP_CODE(le32_to_cpu(event->status)));
+ break;
+ }
+ /* OK what do we do now? The endpoint state is hosed, and we
+ * should never get to this point if the synchronization between
+ * queueing, and endpoint state are correct. This might happen
+ * if the device gets disconnected after we've finished
+ * cancelling URBs, which might not be an error...
+ */
+ } else {
+ xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
+ le64_to_cpu(ep_ctx->deq));
+ if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
+ dev->eps[ep_index].queued_deq_ptr) ==
+ (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
+ /* Update the ring's dequeue segment and dequeue pointer
+ * to reflect the new position.
+ */
+ ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
+ ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
+ } else {
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq "
+ "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+ dev->eps[ep_index].queued_deq_seg,
+ dev->eps[ep_index].queued_deq_ptr);
+ }
+ }
+
+ dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ dev->eps[ep_index].queued_deq_seg = NULL;
+ dev->eps[ep_index].queued_deq_ptr = NULL;
+ /* Restart any rings with pending URBs */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+}
+
+static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event,
+ union xhci_trb *trb)
+{
+ int slot_id;
+ unsigned int ep_index;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ /* This command will only fail if the endpoint wasn't halted,
+ * but we don't care.
+ */
+ xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
+ (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
+
+ /* HW with the reset endpoint quirk needs to have a configure endpoint
+ * command complete before the endpoint can be used. Queue that here
+ * because the HW can't handle two commands being queued in a row.
+ */
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+ xhci_dbg(xhci, "Queueing configure endpoint command\n");
+ xhci_queue_configure_endpoint(xhci,
+ xhci->devs[slot_id]->in_ctx->dma, slot_id,
+ false);
+ xhci_ring_cmd_db(xhci);
+ } else {
+ /* Clear our internal halted state and restart the ring(s) */
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+}
+
+/* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1. Return 0 if the
+ * completed command isn't at the head of the command list.
+ */
+static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct xhci_event_cmd *event)
+{
+ struct xhci_command *command;
+
+ if (list_empty(&virt_dev->cmd_list))
+ return 0;
+
+ command = list_entry(virt_dev->cmd_list.next,
+ struct xhci_command, cmd_list);
+ if (xhci->cmd_ring->dequeue != command->command_trb)
+ return 0;
+
+ command->status =
+ GET_COMP_CODE(le32_to_cpu(event->status));
+ list_del(&command->cmd_list);
+ if (command->completion)
+ complete(command->completion);
+ else
+ xhci_free_command(xhci, command);
+ return 1;
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
+ union xhci_trb *start_trb,
+ union xhci_trb *end_trb,
+ dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
+
+ start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+ cur_seg = start_seg;
+ printk(KERN_DEBUG "start_dma 0x%x\n", start_dma);
+ printk(KERN_DEBUG "start_seg 0x%x\n", start_seg);
+ printk(KERN_DEBUG "start_trb 0x%x\n", start_trb);
+ printk(KERN_DEBUG "end_trb 0x%x\n", end_trb);
+ printk(KERN_DEBUG "suspect_dma 0x%x\n", suspect_dma);
+
+ do {
+ if (start_dma == 0){
+ printk(KERN_DEBUG "return NULL 1\n");
+ return NULL;
+ }
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+ printk(KERN_DEBUG "end_trb_dma 0x%x\n", end_trb_dma);
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ printk(KERN_DEBUG "return NULL 2\n");
+ return NULL;
+ } else {
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
+ }
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != start_seg);
+ printk(KERN_DEBUG "return NULL 3\n");
+ return NULL;
+}
+
+static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
+ struct xhci_td *td, union xhci_trb *event_trb)
+{
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep->ep_state |= EP_HALTED;
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ ep->stopped_stream = stream_id;
+
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
+
+ xhci_ring_cmd_db(xhci);
+}
+
+/* Check if an error has halted the endpoint ring. The class driver will
+ * cleanup the halt for a non-default control endpoint if we indicate a stall.
+ * However, a babble and other errors also halt the endpoint ring, and the class
+ * driver won't clear the halt in that case, so we need to issue a Set Transfer
+ * Ring Dequeue Pointer command manually.
+ */
+static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ unsigned int trb_comp_code)
+{
+ xhci_dbg(xhci, "check required to cleanup halt ep\n");
+ xhci_dbg(xhci, "ep_info 0x%x\n", ep_ctx->ep_info);
+ /* TRB completion codes that may require a manual halt cleanup */
+ if (trb_comp_code == COMP_TX_ERR ||
+ trb_comp_code == COMP_BABBLE ||
+ trb_comp_code == COMP_SPLIT_ERR)
+ /* The 0.96 spec says a babbling control endpoint
+ * is not halted. The 0.96 spec says it is. Some HW
+ * claims to be 0.95 compliant, but it halts the control
+ * endpoint anyway. Check if a babble halted the
+ * endpoint.
+ */
+ if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == cpu_to_le32(EP_STATE_HALTED))
+ return 1;
+
+ return 0;
+}
+
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+{
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Finish the td processing, remove the td from td list;
+ * Return 1 if the urb can be given back.
+ */
+static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status, bool skip)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct urb *urb = NULL;
+ struct xhci_ep_ctx *ep_ctx;
+ int ret = 0;
+ struct urb_priv *urb_priv;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+ if (skip)
+ goto td_cleanup;
+
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP) {
+ /* The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ return 0;
+ } else {
+ if (trb_comp_code == COMP_STALL) {
+ /* The transfer is completed from the driver's
+ * perspective, but we need to issue a set dequeue
+ * command for this stalled endpoint to move the dequeue
+ * pointer past the TD. We can't do that here because
+ * the halt condition must be cleared first. Let the
+ * USB class driver clear the stall later.
+ */
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+ /* Other types of errors halt the endpoint, but the
+ * class driver doesn't call usb_reset_endpoint() unless
+ * the error is -EPIPE. Clear the halted status in the
+ * xHCI hardware manually.
+ */
+ xhci_dbg(xhci, "Need to cleanup halt ep, do it\n");
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, ep_ring->stream_id,
+ td, event_trb);
+ } else {
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+ }
+td_cleanup:
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than
+ * the buffer length, urb->actual_length will be a very big
+ * number (since it's unsigned). Play it safe and say we didn't
+ * transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB transfer length is wrong, "
+ "xHC issue? req. len = %u, "
+ "act. len = %u\n",
+ urb->transfer_buffer_length,
+ urb->actual_length);
+ urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+#if 0
+ spin_lock(&ep_ring->lock);
+ list_del(&td->td_list);
+ spin_unlock(&ep_ring->lock);
+#endif
+ //list_del(&td->td_list);
+ list_del_init(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
+
+ urb_priv->td_cnt++;
+ /* Giveback the urb when all the tds are completed */
+ if (urb_priv->td_cnt == urb_priv->length)
+ ret = 1;
+ }
+ return ret;
+}
+
+
+/*
+ * Process control tds, update urb status and actual_length.
+ */
+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 trb_comp_code;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ xdev = xhci->devs[slot_id];
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ if (event_trb == ep_ring->dequeue) {
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
+ "without IOC set??\n");
+ *status = -ESHUTDOWN;
+ } else if (event_trb != td->last_trb) {
+#if 1
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB "
+ "without IOC set??\n");
+#endif
+ *status = -ESHUTDOWN;
+ } else {
+ xhci_dbg(xhci, "Successful control transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ *status = 0;
+ break;
+ case COMP_STOP_INVAL:
+ case COMP_STOP:
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+ default:
+ xhci_dbg(xhci, "TRB error code %u, "
+ "halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
+ break;
+ /* else fall through */
+ case COMP_STALL:
+ /* Did we transfer part of the data (middle) phase? */
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length
+ - TRB_LEN(le32_to_cpu(event->transfer_len));
+ else
+ td->urb->actual_length = 0;
+
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, 0, td, event_trb);
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
+ return 0;
+ }
+ /*
+ * Did we transfer any data, despite the errors that might have
+ * happened? I.e. did we get past the setup stage?
+ */
+ if (event_trb != ep_ring->dequeue) {
+ /* The event was for the status stage */
+ if (event_trb == td->last_trb) {
+ if (td->urb->actual_length != 0) {
+ /* Don't overwrite a previously set error code
+ */
+ if ((*status == -EINPROGRESS || *status == 0) &&
+ (td->urb->transfer_flags
+ & URB_SHORT_NOT_OK))
+ /* Did we already see a short data
+ * stage? */
+ *status = -EREMOTEIO;
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ }
+ } else {
+ /* Maybe the event was for the data stage? */
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ /* We didn't stop on a link TRB in the middle */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
+ }
+ }
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+}
+
+/*
+ * Process bulk and interrupt tds, update urb status and actual_length.
+ */
+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb || TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ } else {
+ if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
+ xhci_dbg(xhci, "Successful bulk "
+ "transfer!\n");
+ else
+ xhci_dbg(xhci, "Successful interrupt "
+ "transfer!\n");
+ *status = 0;
+ }
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ break;
+ default:
+ /* Others already handled above */
+ break;
+ }
+ xhci_dbg(xhci,
+ "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+ if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+ TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
+ td->urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ /* Don't overwrite a previously set error code */
+ if (*status == -EINPROGRESS) {
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
+ }
+ } else {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length;
+ /* Ignore a short packet completion if the
+ * untransferred length was zero.
+ */
+ if (*status == -EREMOTEIO)
+ *status = 0;
+ }
+ } else {
+ /* Slow path - walk the list, starting from the dequeue
+ * pointer, to get the actual length transferred.
+ */
+ td->urb->actual_length = 0;
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+ cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
+ td->urb->actual_length +=
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ }
+ /* If the ring didn't stop on a Link or No-op TRB, add
+ * in the actual bytes transferred from the Normal TRB
+ */
+ if (trb_comp_code != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+ TRB_LEN(le32_to_cpu(event->transfer_len));
+ }
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+
+}
+
+
+/*
+ * Process isochronous tds, update urb packet status and actual_length.
+ */
+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ int idx;
+ int len = 0;
+ int skip_td = 0;
+ union xhci_trb *cur_trb;
+ struct xhci_segment *cur_seg;
+ u32 trb_comp_code;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ urb_priv = td->urb->hcpriv;
+ idx = urb_priv->td_cnt;
+
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ td->urb->iso_frame_desc[idx].status = 0;
+ xhci_dbg(xhci, "Successful isoc transfer!\n");
+ break;
+ case COMP_SHORT_TX:
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ td->urb->iso_frame_desc[idx].status =
+ -EREMOTEIO;
+ else
+ td->urb->iso_frame_desc[idx].status = 0;
+ break;
+ case COMP_BW_OVER:
+ td->urb->iso_frame_desc[idx].status = -ECOMM;
+ skip_td = 1;
+ break;
+ case COMP_BUFF_OVER:
+ case COMP_BABBLE:
+ td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
+ skip_td = 1;
+ break;
+ case COMP_STALL:
+ td->urb->iso_frame_desc[idx].status = -EPROTO;
+ skip_td = 1;
+ break;
+ case COMP_STOP:
+ case COMP_STOP_INVAL:
+ break;
+ default:
+ td->urb->iso_frame_desc[idx].status = -1;
+ break;
+ }
+
+ if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
+ td->urb->iso_frame_desc[idx].actual_length =
+ td->urb->iso_frame_desc[idx].length;
+ td->urb->actual_length +=
+ td->urb->iso_frame_desc[idx].length;
+ } else {
+ for (cur_trb = ep_ring->dequeue,
+ cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
+ len +=
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ }
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+ TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ td->urb->iso_frame_desc[idx].actual_length = len;
+ td->urb->actual_length += len;
+ }
+ }
+
+ if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS){
+ *status = 0;
+ /* mark for IC Unit Test
+ * td->urb->status = 0;
+ */
+ }
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+
+}
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ int slot_id;
+ u64 cmd_dma;
+ dma_addr_t cmd_dequeue_dma;
+ union xhci_trb *trb;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_virt_device *virt_dev;
+ unsigned int ep_index, ret;
+ struct xhci_ring *ep_ring;
+ unsigned int ep_state, port_id;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_device *udev, *rhdev;
+ struct xhci_slot_ctx *slot_ctx;
+ u64 temp_64;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ struct usb_config_descriptor *desc;
+ int status;
+
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ cmd_dma = le64_to_cpu(event->cmd_trb);
+ cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+ xhci->cmd_ring->dequeue);
+ trb = xhci->cmd_ring->dequeue;
+ /* Is the command ring deq ptr out of sync with the deq seg ptr? */
+ if (cmd_dequeue_dma == 0) {
+ xhci->error_bitmask |= 1 << 4;
+ return;
+ }
+ /* Does the DMA address match our internal dequeue pointer address? */
+ if (cmd_dma != (u64) cmd_dequeue_dma) {
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
+ xhci_dbg(xhci, "cmd : %d\n", ((le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) & TRB_TYPE_BITMASK)>>10));
+ xhci_dbg(xhci, "comp code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_CMD_NOOP):
+ break;
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+ if (GET_COMP_CODE(le32_to_cpu((event->status))) == COMP_SUCCESS){
+ xhci_dbg(xhci, "command enable slot success event\n");
+ g_slot_id = slot_id;
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ g_slot_id = 0;
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_DISABLE_SLOT):
+ if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ xhci_dbg(xhci, "command disable slot success event, slot_id: %d\n", slot_id);
+ g_slot_id = slot_id;
+ g_cmd_status = CMD_DONE;
+ }
+ break;
+ case TRB_TYPE(TRB_ADDR_DEV):
+ xhci_dbg(xhci, "comp_code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ xhci_dbg(xhci, "address device success\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT){
+ xhci_dbg(xhci, "address device command aborted\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_CONFIG_EP):
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ xhci_dbg(xhci, "config endpoint success\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "config endpoint fail.....\n");
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_RESET_DEV):
+ xhci_dbg(xhci, "TRB_RESET_DEV\n");
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ xhci_dbg(xhci, "reset dev success\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "reset dev failed, code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_STOP_RING):
+ xhci_dbg(xhci, "TRB_STOP_RING\n");
+ //xhci_err(xhci, "[DBG] stop ep event refer to 0x%x\n", event->cmd_trb);
+ if((((int)(le64_to_cpu(event->cmd_trb))) & 0xff0) != g_cmd_ring_pointer1 && (((int)(le64_to_cpu(event->cmd_trb))) & 0xff0) != g_cmd_ring_pointer2){
+ xhci_err(xhci, "[DBG] handle stop ep command pointer not equal to enqueued pointer, enqueue 0x%x , 0x%x, event refer 0x%x\n"
+ , g_cmd_ring_pointer1, g_cmd_ring_pointer2, (((int)(le64_to_cpu(event->cmd_trb))) & 0xff0));
+ while(1);
+ }
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ xhci_dbg(xhci, "stop ring success\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "stop ring failed, code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ g_cmd_status = CMD_FAIL;
+ }
+
+ break;
+ case TRB_TYPE(TRB_SET_DEQ):
+ xhci_dbg(xhci, "TRB_SET_DEQ\n");
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ virt_dev = xhci->devs[slot_id];
+ virt_dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "stop ring failed, code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_EVAL_CONTEXT):
+ xhci_dbg(xhci, "TRB_EVAL_CONTEXT\n");
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS){
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "eval context, code: %d\n", GET_COMP_CODE(le32_to_cpu(event->status)));
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+ case TRB_TYPE(TRB_RESET_EP):
+ xhci_dbg(xhci, "TRB_RESET_EP %d %d\n", slot_id, ep_index);
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+ g_cmd_status = CMD_DONE;
+ break;
+ default:
+ if(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP){
+ xhci_dbg(xhci, "command ring stopped\n");
+ g_cmd_status = CMD_DONE;
+ return;
+ }
+#if 0
+ case TRB_TYPE(TRB_COMPLETION):
+ if(GET_COMP_CODE(event->status) == COMP_CMD_STOP){
+ xhci_dbg(xhci, "stop command ring\n");
+ g_cmd_status = CMD_DONE;
+ }
+ else{
+ xhci_dbg(xhci, "stop command failed, code: %d\n", GET_COMP_CODE(event->status));
+ g_cmd_status = CMD_FAIL;
+ }
+ break;
+#endif
+ /* Skip over unknown commands on the event ring */
+ xhci->error_bitmask |= 1 << 6;
+ g_cmd_status = CMD_FAIL;
+ break;
+ }
+ inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+void rh_port_clear_change(struct xhci_hcd *xhci, int port_id){
+ u32 temp;
+ u32 __iomem *addr;
+ port_id--;
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id & 0xff);
+ temp = temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "to clear port change, actual port %d status = 0x%x\n", port_id, temp);
+ temp = xhci_port_state_to_clear_change(temp);
+ xhci_writel(xhci, temp, addr);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port change, actual port %d status = 0x%x\n", port_id, temp);
+}
+
+
+int rh_get_port_status(struct xhci_hcd *xhci, int port_id){
+ u32 temp,status;
+ u32 __iomem *addr;
+
+ port_id--;
+ status = 0;
+
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id & 0xff);
+ temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", port_id, temp);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ if ((temp & PORT_OCC))
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ if ((temp & PORT_RC))
+ status |= USB_PORT_STAT_C_RESET << 16;
+ if ((temp & PORT_PLC))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+ /*
+ * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+ * changes
+ */
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= xhci_port_speed(temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+ if (temp & PORT_OC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (temp & PORT_POWER)
+ status |= USB_PORT_STAT_POWER;
+ xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+ temp = xhci_port_state_to_neutral(temp);
+// xhci_writel(xhci, temp, addr);
+// temp = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "Actual port %d status = 0x%x\n", port_id, temp);
+#if 0
+ put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+#endif
+ return status;
+}
+
+
+static void handle_port_status(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 port_id, temp;
+ int ret, port_status;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 __iomem *addr;
+ int port_index;
+ struct xhci_port *port;
+
+ /* Port status change events always have a successful completion code */
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
+ xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+ xhci->error_bitmask |= 1 << 8;
+ }
+ /* FIXME: core doesn't care about all port link state changes yet */
+ port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+ port_index = get_port_index(port_id);
+ xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+ xhci_dbg(xhci, "port_index: %d\n", port_index);
+ if(port_index >= RH_PORT_NUM){
+ xhci_err(xhci, "[ERROR] RH_PORT_NUM not enough\n");
+ return;
+ }
+ port = rh_port[port_index];
+ port->port_id = port_id;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ port_status = rh_get_port_status(xhci, port_id);
+ xhci_dbg(xhci, "port_status %x\n", port_status);
+// rh_port_clear_change(xhci, port_id);
+
+ if(port_status & (USB_PORT_STAT_C_CONNECTION << 16)){
+ if(port_status & USB_PORT_STAT_CONNECTION){
+ xhci_dbg(xhci, "connect port status event, connected\n");
+ g_port_id = port_id;
+ g_port_connect = true;
+ port->port_status = CONNECTED;
+ if(!(port_status & USB_PORT_STAT_SUPER_SPEED)){
+ if(g_hs_block_reset){
+
+ }
+ else{
+ //reset status
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp = (temp | PORT_RESET);
+ xhci_writel(xhci, temp, addr);
+ port->port_status = RESET;
+ }
+ }
+ else{
+ if(port->port_reenabled == 1){
+ port->port_reenabled = 2;
+ }
+ if(g_device_reconnect == 1)
+ g_device_reconnect = 2;
+ g_speed = USB_SPEED_SUPER;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*((port_id-1) & 0xff);
+ temp = xhci_readl(xhci, addr);
+ if(((temp & PORT_RESET) == 0) && (temp & PORT_PE) && (PORT_PLS(temp) == 0)){
+ port->port_status = ENABLED;
+ port->port_speed = USB_SPEED_SUPER;
+ xhci_dbg(xhci, "port set: port_id %d, port_status %d, port_speed %d\n"
+ , port->port_id, port->port_status, port->port_speed);
+ g_port_reset = true;
+ }
+ else{
+ xhci_dbg(xhci, "Super speed port enabled failed!!\n");
+ xhci_dbg(xhci, "temp & PORT_RESET 0x%x\n", (temp & PORT_RESET));
+ xhci_dbg(xhci, "temp & PORT_PE 0x%x\n", (temp & PORT_PE));
+ xhci_dbg(xhci, "temp & PORT_PLS 0x%x\n", (PORT_PLS(temp)));
+ g_port_reset = false;
+ }
+ }
+ }
+ else{ //port disconnect
+ xhci_dbg(xhci, "connect port status event, disconnected\n");
+// port->port_id = 0;
+ port->port_speed = 0;
+ port->port_status = DISCONNECTED;
+ if(port->port_reenabled == 0){
+ port->port_reenabled = 1;
+ }
+ g_port_connect = false;
+ g_port_reset = false;
+ if(g_device_reconnect == 0)
+ g_device_reconnect = 1;
+ }
+ }
+ if((port_status & (USB_PORT_STAT_C_RESET << 16)) && (port_status & (USB_PORT_STAT_CONNECTION)) && !(port_status & USB_PORT_STAT_SUPER_SPEED)){
+ if(!(port_status & USB_PORT_STAT_RESET)){
+ if(port_status & USB_PORT_STAT_LOW_SPEED){
+ port->port_speed = USB_SPEED_LOW;
+ g_speed = USB_SPEED_LOW;
+ }
+ else if(port_status & USB_PORT_STAT_HIGH_SPEED){
+ port->port_speed = USB_SPEED_HIGH;
+ g_speed = USB_SPEED_HIGH;
+ }
+ else{
+ port->port_speed = USB_SPEED_FULL;
+ g_speed = USB_SPEED_FULL;
+ }
+ port->port_status = ENABLED;
+ if(port->port_reenabled == 1){
+ port->port_reenabled = 2;
+ }
+ if(g_device_reconnect == 1)
+ g_device_reconnect = 2;
+ g_port_reset = true;
+ }
+ else{
+ g_port_reset = false;
+ }
+ }
+ else if((port_status & (USB_PORT_STAT_C_RESET << 16)) && (port_status & (USB_PORT_STAT_CONNECTION))
+ && (port_status & USB_PORT_STAT_SUPER_SPEED)){
+ port->port_status = ENABLED;
+ }
+ if(port_status & (USB_PORT_STAT_C_SUSPEND << 16)){
+ xhci_dbg(xhci, "port link status changed, wake up \n");
+ //udelay(1000);
+ }
+ if(port_status & (USB_PORT_STAT_C_OVERCURRENT << 16)){
+ xhci_err(xhci, "port over current changed\n");
+ g_port_occ = true;
+ }
+ if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME){
+ g_port_resume = 1;
+ }
+ if ((temp & PORT_PLC)){
+ g_port_plc = 1;
+ }
+ rh_port_clear_change(xhci, port_id);
+ /* Update event ring dequeue pointer before dropping the lock */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+}
+
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+ struct xhci_transfer_event *event)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id;
+ int ep_index;
+ struct xhci_td *td = NULL;
+ dma_addr_t event_dma;
+ struct xhci_segment *event_seg;
+ union xhci_trb *event_trb;
+ struct urb *urb = NULL;
+ int status = -EINPROGRESS;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 trb_comp_code;
+ int i, ret;
+ char *tmp;
+ xhci_dbg(xhci, "Got tx complete event\n");
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "trb_comp_code: %d *********************\n", trb_comp_code);
+#if 1
+ if(trb_comp_code == COMP_UNDERRUN || trb_comp_code == COMP_OVERRUN){
+ if(trb_comp_code == COMP_UNDERRUN){
+ //xhci_err(xhci, "underrun event on endpoint\n");
+ }
+ else if(trb_comp_code == COMP_OVERRUN){
+ //xhci_err(xhci, "overrun event on endpoint\n");
+ }
+ goto cleanup;
+ }
+#endif
+#if 0
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP) {
+ /* The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ g_cmd_status = CMD_DONE;
+ goto cleanup;
+ }
+#endif
+#if 1
+ xhci_dbg(xhci, "In %s\n", __func__);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ xdev = xhci->devs[slot_id];
+ if (!xdev) {
+ xhci_err(xhci, "[ERROR] Transfer event pointed to bad slot\n");
+ return -ENODEV;
+ }
+
+ /* Endpoint ID is 1 based, our index is zero based */
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
+ ep = &xdev->eps[ep_index];
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ if (!ep_ring || (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_DISABLED) {
+ xhci_err(xhci, "[ERROR] Transfer event for disabled endpoint "
+ "or incorrect stream ring\n");
+ return -ENODEV;
+ }
+
+ event_dma = le64_to_cpu(event->buffer);
+ /* This TRB should be in the TD at the head of this ring's TD list */
+ xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
+ if (list_empty(&ep_ring->td_list)) {
+ if(!g_test_random_stop_ep){
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index);
+ xhci_warn(xhci, "Event TRB(0x%x): 0x%llx 0x%x 0x%x\n"
+ , event, le64_to_cpu(event->buffer), le32_to_cpu(event->transfer_len), le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (le32_to_cpu(event->flags) & TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ }
+ urb = NULL;
+ goto cleanup;
+ }
+ xhci_dbg(xhci, "%s - getting list entry\n", __func__);
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
+ /* Is this a TRB in the currently executing TD? */
+ xhci_dbg(xhci, "%s - looking for TD\n", __func__);
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb, event_dma);
+ xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
+ if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
+ /* The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ goto cleanup;
+ }
+ if (!event_seg) {
+ /* HC is busted, give up! */
+ xhci_err(xhci, "[ERROR] Transfer event TRB DMA ptr not part of current TD\n");
+ return -ESHUTDOWN;
+ }
+ event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (unsigned int) (le32_to_cpu(event->flags) & TRB_TYPE_BITMASK)>>10);
+ xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
+ lower_32_bits(le64_to_cpu(event->buffer)));
+ xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
+ upper_32_bits(le64_to_cpu(event->buffer)));
+ xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+ (unsigned int) le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+ (unsigned int) le32_to_cpu(event->flags));
+
+ /* Look for common error cases */
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+// xhci_dbg(xhci, "td->urb 0x%x\n", td->urb);
+ switch (trb_comp_code) {
+ /* Skip codes that require special handling depending on
+ * transfer type
+ */
+ case COMP_SUCCESS:
+#if 0
+ if(usb_endpoint_xfer_control(&td->urb->ep->desc) && g_con_is_enter){
+ udelay(g_con_delay_us);
+ f_port_set_pls(g_port_id, g_con_enter_ux);
+ udelay(100);
+ f_port_set_pls((int)g_port_id, 0);
+ }
+#endif
+ if(!usb_endpoint_xfer_isoc(&td->urb->ep->desc)){
+ td->urb->actual_length = td->urb->transfer_buffer_length - GET_TRANSFER_LENGTH(le32_to_cpu(event->transfer_len));
+ /* mark for IC Unit Test
+ *td->urb->status = 0;
+ */
+ xhci_dbg(xhci, "urb transfer buffer length: %d\n", td->urb->transfer_buffer_length);
+ xhci_dbg(xhci, "event trb transfer length: %d\n", GET_TRANSFER_LENGTH(le32_to_cpu(event->transfer_len)));
+ }
+ break;
+ case COMP_SHORT_TX:
+ if(!usb_endpoint_xfer_isoc(&td->urb->ep->desc)){
+ td->urb->actual_length = td->urb->transfer_buffer_length - GET_TRANSFER_LENGTH(le32_to_cpu(event->transfer_len));
+ td->urb->status = 0;
+ }
+ break;
+ case COMP_STOP:
+ xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+ break;
+ case COMP_STOP_INVAL:
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ break;
+ case COMP_STALL:
+ xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ ep->ep_state |= EP_HALTED;
+ td->urb->status = -EPIPE;
+ break;
+ case COMP_TRB_ERR:
+ xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+ td->urb->status = -EILSEQ;
+ break;
+ case COMP_SPLIT_ERR:
+ case COMP_TX_ERR:
+ xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ td->urb->status = -EPROTO;
+ break;
+ case COMP_BABBLE:
+ xhci_warn(xhci, "WARN: babble error on endpoint, ep_idx %d\n", ep_index);
+ td->urb->status = -EOVERFLOW;
+ break;
+ case COMP_DB_ERR:
+ xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+ td->urb->status = -ENOSR;
+ break;
+ case COMP_BW_OVER:
+ xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
+ break;
+ case COMP_BUFF_OVER:
+ xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
+ break;
+ case COMP_UNDERRUN:
+ /*
+ * When the Isoch ring is empty, the xHC will generate
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
+ * Underrun Event for OUT Isoch endpoint.
+ */
+ xhci_dbg(xhci, "underrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index);
+ break;
+ case COMP_OVERRUN:
+ xhci_dbg(xhci, "overrun event on endpoint\n");
+ if (!list_empty(&ep_ring->td_list))
+ xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
+ "still with TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index);
+ break;
+ case COMP_MISSED_INT:
+ /*
+ * When encounter missed service error, one or more isoc tds
+ * may be missed by xHC.
+ * Set skip flag of the ep_ring; Complete the missed tds as
+ * short transfer when process the ep_ring next time.
+ */
+ xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ break;
+ default:
+ if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
+ td->urb->status = 0;
+ break;
+ }
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted, comp_code %d\n", trb_comp_code);
+ urb = NULL;
+ return -ENODEV;
+ }
+ /* Now update the urb's actual_length and give back to
+ * the core
+ */
+ status = td->urb->status;
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+ ret = process_ctrl_td(xhci, td, event_trb, event, ep,
+ &status);
+ else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+ ret = process_isoc_td(xhci, td, event_trb, event, ep,
+ &status);
+ else
+ ret = process_bulk_intr_td(xhci, td, event_trb, event,
+ ep, &status);
+ td->urb->status = status;
+#if 0
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+#endif
+
+#if 0
+ urb = td->urb;
+ list_del(&td->td_list);
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE)) {
+ kfree(td);
+ }
+#endif
+ #if 0
+ //td cleanup
+ list_del(&td->td_list);
+ /* Was this TD slated to be cancelled but completed anyway? */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del(&td->cancelled_td_list);
+ /* Leave the TD around for the reset endpoint function to use
+ * (but only if it's not a control endpoint, since we already
+ * queued the Set TR dequeue pointer command for stalled
+ * control endpoints).
+ */
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE)) {
+ kfree(td);
+ }
+ #endif
+#if 0
+ if(dev_list[0] && dev_list[0]->slot_id==slot_id){
+ xhci_dbg(xhci, "dev 0 slot_id %d trans done\n", slot_id);
+ g_trans_status1 = TRANS_DONE;
+ }
+ else if(dev_list[1] && dev_list[1]->slot_id==slot_id){
+ xhci_dbg(xhci, "dev 1 slot_id %d trans done\n", slot_id);
+ g_trans_status2 = TRANS_DONE;
+ }
+ else if(dev_list[2] && dev_list[2]->slot_id==slot_id){
+ xhci_dbg(xhci, "dev 2 slot_id %d trans done\n", slot_id);
+ g_trans_status3 = TRANS_DONE;
+ }
+ else if(dev_list[3] && dev_list[3]->slot_id==slot_id){
+ xhci_dbg(xhci, "dev 3 slot_id %d trans done\n", slot_id);
+ g_trans_status4 = TRANS_DONE;
+ }
+#endif
+#if 0
+ if(ep_index == 1 || ep_index == 2 || (dev_list[0] && dev_list[0]->slot_id==slot_id)){
+ g_trans_status1 = TRANS_DONE;
+ }
+ else if(ep_index == 3 || ep_index == 4 || (dev_list[1] && dev_list[1]->slot_id==slot_id)){
+ g_trans_status2 = TRANS_DONE;
+ }
+ else if(ep_index == 5 || ep_index == 6 || (dev_list[2] && dev_list[2]->slot_id==slot_id)){
+ g_trans_status3 = TRANS_DONE;
+ }
+ else if(ep_index == 7 || ep_index == 8 || (dev_list[3] && dev_list[3]->slot_id==slot_id)){
+ g_trans_status4 = TRANS_DONE;
+ }
+#endif
+
+#endif
+cleanup:
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+
+ /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
+ return 0;
+}
+
+void xhci_handle_event(struct xhci_hcd *xhci){
+ union xhci_trb *event;
+ struct xhci_generic_trb *generic_event;
+ int update_ptrs = 1;
+ int ret;
+
+ event = xhci->event_ring->dequeue;
+#if 1
+ if(g_event_full){
+ struct xhci_generic_trb *event_trb = &event->generic;
+ if(GET_COMP_CODE(le32_to_cpu(event_trb->field[2])) == COMP_ER_FULL){
+ xhci_dbg(xhci, "Got event ring full\n");
+ g_got_event_full = true;
+ }
+ else{
+ xhci_dbg(xhci, "increase SW dequeue pointer\n");
+ inc_deq(xhci, xhci->event_ring, true);
+ return;
+ }
+ }
+#endif
+ if((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_MFINDEX_WRAP)){
+ g_mfindex_event++;
+ }
+ /* Does the HC or OS own the TRB? */
+ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
+ xhci->event_ring->cycle_state) {
+ xhci->error_bitmask |= 1 << 2;
+ return;
+ }
+ xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
+
+/*
+ * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * speculative reads of the event's flags/data below.
+ */
+ rmb();
+
+ /* FIXME: Handle more event types. */
+ switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
+ case TRB_TYPE(TRB_COMPLETION):
+#if 1
+ xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
+ handle_cmd_completion(xhci, &event->event_cmd);
+ xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
+#endif
+ break;
+
+ case TRB_TYPE(TRB_PORT_STATUS):
+#if 1
+ xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
+ handle_port_status(xhci, event);
+ xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
+ update_ptrs = 0;
+#endif
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+#if 1
+ xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
+ ret = handle_tx_event(xhci, &event->trans_event);
+ xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
+ if (ret < 0)
+ xhci->error_bitmask |= 1 << 9;
+ else
+ update_ptrs = 0;
+#endif
+ break;
+ case TRB_TYPE(TRB_DEV_NOTE):
+ xhci_dbg(xhci, "Got device notification packet\n");
+ generic_event = &event->generic;
+ xhci_dbg(xhci, "fields 0x%x 0x%x 0x%x 0x%x\n"
+ , le32_to_cpu(generic_event->field[0]), le32_to_cpu(generic_event->field[1]), le32_to_cpu(generic_event->field[2])
+ , le32_to_cpu(generic_event->field[3]));
+ g_dev_notification = TRB_DEV_NOTE_TYEP(le32_to_cpu(generic_event->field[0]));
+ xhci_dbg(xhci, "notification type %d\n", g_dev_notification);
+ g_dev_not_value = TRB_DEV_NOTE_VALUE_LO(le32_to_cpu(generic_event->field[0]));
+ //| (le32_to_cpu(generic_event->field[1]) << 32);
+ xhci_dbg(xhci, "notification value %d\n", g_dev_not_value);
+ break;
+ default:
+ break;
+#if 0
+ if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
+ handle_vendor_event(xhci, event);
+ else
+ xhci->error_bitmask |= 1 << 3;
+#endif
+ }
+ /* Any of the above functions may drop and re-acquire the lock, so check
+ * to make sure a watchdog timer didn't mark the host as non-responsive.
+ */
+ if (update_ptrs) {
+ /* Update SW and HC event ring dequeue pointer */
+ inc_deq(xhci, xhci->event_ring, true);
+ xhci_set_hc_event_deq(xhci);
+ }
+ /* Are there more items on the event ring? */
+ xhci_handle_event(xhci);
+}
+
+
+/**** Endpoint Ring Operations ****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before calling
+ * prepare_transfer()?
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ bool consumer, bool more_trbs_coming,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ struct xhci_generic_trb *trb;
+
+ trb = &ring->enqueue->generic;
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
+ trb->field[3] = cpu_to_le32(field4);
+ xhci_dbg(xhci, "Dump TRB: 0x%x 0x%x 0x%x 0x%x\n", trb->field[0], trb->field[1], trb->field[2], trb->field[3]);
+ inc_enq(xhci, ring, consumer, more_trbs_coming);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+ u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+ /* Make sure the endpoint has been added to xHC schedule */
+ xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
+ switch (ep_state) {
+ case EP_STATE_DISABLED:
+ /*
+ * USB core changed config/interfaces without notifying us,
+ * or hardware is reporting the wrong state.
+ */
+ xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+ return -ENOENT;
+ case EP_STATE_ERROR:
+ xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
+ /* FIXME event handling code for error needs to clear it */
+ /* XXX not sure if this should be -ENOENT or not */
+ return -EINVAL;
+ case EP_STATE_HALTED:
+ xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
+ case EP_STATE_STOPPED:
+ case EP_STATE_RUNNING:
+ break;
+ default:
+ xhci_err(xhci, "[ERROR] unknown endpoint state for ep\n");
+ /*
+ * FIXME issue Configure Endpoint command to try to get the HC
+ * back into a known state.
+ */
+ return -EINVAL;
+ }
+ if (!room_on_ring(xhci, ep_ring, num_trbs)) {
+ /* FIXME allocate more room */
+ xhci_err(xhci, "[ERROR] no room on ep ring, num_trbs %d\n", num_trbs);
+ return -ENOMEM;
+ }
+
+ if (enqueue_is_link_trb(ep_ring)) {
+ struct xhci_ring *ring = ep_ring;
+ union xhci_trb *next;
+
+ xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
+ next = ring->enqueue;
+
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+
+ /* If we're not dealing with 0.95 hardware,
+ * clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci))
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ else
+ next->link.control |= cpu_to_le32(TRB_CHAIN);
+
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt()) {
+ xhci_dbg(xhci, "queue_trb: Toggle cycle "
+ "state for ring %p = %i\n",
+ ring, (unsigned int)ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+ }
+
+ return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+ struct xhci_virt_device *xdev,
+ unsigned int ep_index,
+ unsigned int stream_id,
+ unsigned int num_trbs,
+ struct urb *urb,
+ unsigned int td_index,
+ gfp_t mem_flags)
+{
+ int ret;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ struct xhci_ring *ep_ring;
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+ ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
+ stream_id);
+ return -EINVAL;
+ }
+ xhci_dbg(xhci, "prepare transfer EP[%d]\n", ep_index);
+ ret = prepare_ring(xhci, ep_ring,
+ le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[td_index];
+
+ INIT_LIST_HEAD(&td->td_list);
+ INIT_LIST_HEAD(&td->cancelled_td_list);
+
+ td->urb = urb;
+ list_add_tail(&td->td_list, &ep_ring->td_list);
+ td->start_seg = ep_ring->enq_seg;
+ td->first_trb = ep_ring->enqueue;
+
+ urb_priv->td[td_index] = td;
+
+ return 0;
+}
+
+static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+{
+ int num_sgs, num_trbs, running_total, temp, i;
+ struct scatterlist *sg;
+
+ sg = NULL;
+ num_sgs = urb->num_sgs;
+ temp = urb->transfer_buffer_length;
+
+ xhci_dbg(xhci, "count sg list trbs: \n");
+ num_trbs = 0;
+ for_each_sg(urb->sg, sg, num_sgs, i) {
+ unsigned int previous_total_trbs = num_trbs;
+ unsigned int len = sg_dma_len(sg);
+
+ /* Scatter gather list entries may cross 64KB boundaries */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < sg_dma_len(sg)) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+ xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
+ i, (unsigned long long)sg_dma_address(sg),
+ len, len, num_trbs - previous_total_trbs);
+
+ len = min_t(int, len, temp);
+ temp -= len;
+ if (temp == 0)
+ break;
+ }
+ xhci_dbg(xhci, "\n");
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ num_trbs);
+ return num_trbs;
+}
+
+static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+{
+ if (num_trbs != 0)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ "TRBs, %d left\n", __func__,
+ urb->ep->desc.bEndpointAddress, num_trbs);
+ if (running_total != urb->transfer_buffer_length)
+ dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ "queued %#x (%d), asked for %#x (%d)\n",
+ __func__,
+ urb->ep->desc.bEndpointAddress,
+ running_total, running_total,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, unsigned int stream_id, int start_cycle,
+ struct xhci_generic_trb *start_trb, struct xhci_td *td)
+{
+ /*
+ * Pass all the TRBs to the hardware at once and make sure this write
+ * isn't reordered.
+ */
+ wmb();
+ if (start_cycle)
+ start_trb->field[3] |= cpu_to_le32(start_cycle);
+ else
+ start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+}
+
+/*
+ * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
+ * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+ int xhci_interval;
+ int ep_interval;
+
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
+ ep_interval = urb->interval;
+ /* Convert to microframes */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ ep_interval *= 8;
+ /* FIXME change this to a warning and a suggestion to use the new API
+ * to set the polling interval (once the API is added).
+ */
+ if (xhci_interval != ep_interval) {
+ if (!printk_ratelimit())
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
+ " (%d microframe%s) than xHCI "
+ "(%d microframe%s)\n",
+ ep_interval,
+ ep_interval == 1 ? "" : "s",
+ xhci_interval,
+ xhci_interval == 1 ? "" : "s");
+ urb->interval = xhci_interval;
+ /* Convert back to frames for LS/FS devices */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->interval /= 8;
+ }
+ return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
+/*
+ * The TD size is the number of bytes remaining in the TD (including this TRB),
+ * right shifted by 10.
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ */
+static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
+ , unsigned int maxp, unsigned trb_buffer_length)
+{
+ u32 max = 31;
+ int remainder, td_packet_count, packet_transferred;
+
+ //0 for the last TRB
+ //FIXME: need to workaround if there is ZLP in this TD
+ if(td_running_total + trb_buffer_length == td_transfer_size)
+ return 0;
+
+ //FIXME: need to take care of high-bandwidth (MAX_ESIT)
+ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
+ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
+ remainder = td_packet_count - packet_transferred;
+
+ if(remainder > max)
+ return max << 17;
+ else
+ return remainder << 17;
+
+}
+
+static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ unsigned int num_trbs;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ struct scatterlist *sg;
+ int num_sgs;
+ int trb_buff_len, this_sg_len, running_total;
+ bool first_trb;
+ u64 addr;
+ int max_packet;
+ bool more_trbs_coming;
+ bool zlp;
+// int td_packet_count, trb_tx_len_sum, packet_transferred, trb_residue, td_size;
+
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
+
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_sgs;
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id, num_trbs, urb, 0, mem_flags);
+ if (trb_buff_len < 0)
+ return trb_buff_len;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
+ zlp = false;
+ /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+ if((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)){
+ zlp = true;
+ }
+#if 0
+ td_packet_count = urb->transfer_buffer_length/max_packet + (urb->transfer_buffer_length%max_packet > 0 ? 1 : 0);
+ trb_tx_len_sum = 0;
+ packet_transferred = 0;
+#endif
+ /*****************************************************************/
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /*
+ * How much data is in the first TRB?
+ *
+ * There are three forces at work for TRB buffer pointers and lengths:
+ * 1. We don't want to walk off the end of this sg-list entry buffer.
+ * 2. The transfer length that the driver requested may be smaller than
+ * the amount of memory allocated for this scatter-gather list.
+ * 3. TRBs buffers can't cross 64KB boundaries.
+ */
+ sg = urb->sg;
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len = urb->transfer_buffer_length;
+ xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
+ trb_buff_len);
+
+ first_trb = true;
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 field = 0;
+ u32 length_field = 0;
+ u32 remainder = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb){
+ first_trb = false;
+ if (start_cycle == 0)
+ field |= 0x1;
+ }
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1 || zlp) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+ xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
+ "64KB boundary at %#x, end dma = %#x\n",
+ (unsigned int) addr, trb_buff_len, trb_buff_len,
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ if (TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+ xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+ (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+ (unsigned int) addr + trb_buff_len);
+ }
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+ TRB_INTR_TARGET(0);
+ if (num_trbs > 1 || zlp)
+ more_trbs_coming = true;
+ else
+ more_trbs_coming = false;
+ xhci_dbg(xhci, "queue trb, len[%d], addr[0x%x]\n", trb_buff_len, addr);
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer --
+ * Are we done queueing all the TRBs for this sg entry?
+ */
+ this_sg_len -= trb_buff_len;
+ if (this_sg_len == 0) {
+ --num_sgs;
+ if (num_sgs == 0)
+ break;
+ sg = sg_next(sg);
+ addr = (u64) sg_dma_address(sg);
+ this_sg_len = sg_dma_len(sg);
+ } else {
+ addr += trb_buff_len;
+ }
+
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+ if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ trb_buff_len =
+ urb->transfer_buffer_length - running_total;
+ } while (num_trbs > 0/*running_total < urb->transfer_buffer_length*/);
+ if(zlp){
+ u32 field = 0;
+ u32 length_field = 0;
+ length_field = TRB_LEN(0) | TRB_INTR_TARGET(0);
+ field |= ep_ring->cycle_state;
+ field |= TRB_IOC;
+ td->last_trb = ep_ring->enqueue;
+ xhci_dbg(xhci, "queue trb, len[0x%x], addr[0x%x]\n", length_field, addr);
+ queue_trb(xhci, ep_ring, false, false,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ }
+ check_trb_math(urb, num_trbs, running_total);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
+ return 0;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ int num_trbs;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ bool more_trbs_coming;
+ int start_cycle;
+ u32 field, length_field;
+ int max_packet;
+ int running_total, trb_buff_len, ret;
+ u64 addr;
+
+ max_packet = 0;
+
+ if (urb->num_sgs)
+ return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
+
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring){
+ xhci_err(xhci, "xhci_queue_bulk_tx, Get transfer ring failed\n");
+ return -EINVAL;
+ }
+ num_trbs = 0;
+ /* How much data is (potentially) left before the 64KB boundary? */
+ running_total = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+
+ /* If there's some data on this 64KB chunk, or we have to send a
+ * zero-length transfer, we need at least one TRB
+ */
+ if (running_total != 0 || urb->transfer_buffer_length == 0)
+ num_trbs++;
+ /* How many more 64KB chunks to transfer, how many more TRBs? */
+ while (running_total < urb->transfer_buffer_length) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+#if 1
+ /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ default:
+ break;
+ }
+ if((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)){
+ num_trbs++;
+ }
+ /*****************************************************************/
+#endif
+
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_trbs);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, 0, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (urb->transfer_buffer_length < trb_buff_len)
+ trb_buff_len = urb->transfer_buffer_length;
+
+ first_trb = true;
+
+ /* Queue the first TRB, even if it's zero-length */
+ do {
+ u32 remainder = 0;
+ field = 0;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb){
+ first_trb = false;
+ if (start_cycle == 0)
+ field |= 0x1;
+ }
+ else
+ field |= ep_ring->cycle_state;
+
+ /* Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (num_trbs > 1) {
+ field |= TRB_CHAIN;
+ } else {
+ /* FIXME - add check for ZERO_PACKET flag before this */
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ if(g_is_bei){
+ field |= TRB_BEI;
+ }
+ }
+ remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+ TRB_INTR_TARGET(0);
+ if (num_trbs > 1)
+ more_trbs_coming = true;
+ else
+ more_trbs_coming = false;
+// xhci_dbg(xhci, "queue trb, len[%d], addr[0x%x]\n", trb_buff_len, addr);
+
+ if(g_idt_transfer && !usb_endpoint_dir_in(&urb->ep->desc)){
+ struct xhci_generic_trb *trb;
+ u32 *idt_data;
+
+ idt_data = urb->transfer_buffer;
+ xhci_err(xhci, "idt_data: 0x%x\n", idt_data);
+ trb = &ep_ring->enqueue->generic;
+ trb->field[0] = (*idt_data);
+ idt_data++;
+ trb->field[1] = (*idt_data);
+ trb->field[2] = cpu_to_le32(length_field);
+ trb->field[3] = cpu_to_le32(field | TRB_ISP | TRB_TYPE(TRB_NORMAL) | TRB_IDT);
+ xhci_dbg(xhci, "Dump TRB: 0x%x 0x%x 0x%x 0x%x\n", trb->field[0], trb->field[1], trb->field[2], trb->field[3]);
+ inc_enq(xhci, ep_ring, false, more_trbs_coming);
+ }
+ else{
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+ }
+ --num_trbs;
+ running_total += trb_buff_len;
+
+ /* Calculate length for next transfer */
+ addr += trb_buff_len;
+ trb_buff_len = urb->transfer_buffer_length - running_total;
+ if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ trb_buff_len = TRB_MAX_BUFF_SIZE;
+ } while (num_trbs > 0/*running_total < urb->transfer_buffer_length*/);
+
+ check_trb_math(urb, num_trbs, running_total);
+ if(g_td_to_noop){
+ if (start_cycle)
+ start_trb->field[3] |= cpu_to_le32(start_cycle);
+ else
+ start_trb->field[3] &= cpu_to_le32(~0x1);
+ td_to_noop(xhci, ep_ring, td);
+ list_del(&td->td_list);
+ return 0;
+ }
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
+ return 0;
+}
+
+static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
+ struct urb *urb, int i)
+{
+ int num_trbs = 0;
+ u64 addr, td_len, running_total;
+
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+ td_len = urb->iso_frame_desc[i].length;
+
+ running_total = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (running_total != 0)
+ num_trbs++;
+
+ while (running_total < td_len) {
+ num_trbs++;
+ running_total += TRB_MAX_BUFF_SIZE;
+ }
+
+ return num_trbs;
+}
+
+
+/* This is for isoc transfer */
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ int num_tds, trbs_per_td;
+ struct xhci_generic_trb *start_trb;
+ bool first_trb;
+ int start_cycle;
+ u32 field, length_field;
+ int running_total, trb_buff_len, td_len, td_remain_len, ret;
+ u64 start_addr, addr;
+ int i, j;
+ bool more_trbs_coming;
+ int max_packet;
+ int max_esit_payload;
+ int frame_id;
+
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+
+ num_tds = urb->number_of_packets;
+ if (num_tds < 1) {
+ xhci_dbg(xhci, "Isoc URB with zero packets?\n");
+ return -EINVAL;
+ }
+
+ if (!in_interrupt())
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+ " addr = %#llx, num_tds = %d\n",
+ urb->ep->desc.bEndpointAddress,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ (unsigned long long)urb->transfer_dma,
+ num_tds);
+
+ start_addr = (u64) urb->transfer_dma;
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+ switch(urb->dev->speed){
+ case USB_SPEED_SUPER:
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
+ break;
+ }
+
+ /* Queue the first TRB, even if it's zero-length */
+ for (i = 0; i < num_tds; i++) {
+ first_trb = true;
+
+ running_total = 0;
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+
+ trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
+
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[i];
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+ field = 0;
+
+ if (first_trb) {
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+ if(g_iso_frame && i==0){
+ frame_id = xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+ frame_id &= 0x7ff;
+ frame_id--;
+ if(frame_id <0){
+ frame_id = 0x7ff;
+ }
+ field |= ((frame_id) << 20);
+ xhci_err(xhci, "[DBG]start frame id = %d\n", frame_id);
+ }
+ else{
+ field |= TRB_SIA;
+ }
+ if (i == 0) {
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
+ field |= ep_ring->cycle_state;
+ first_trb = false;
+ } else {
+ /* Queue other normal TRBs */
+ field |= TRB_TYPE(TRB_NORMAL);
+ field |= ep_ring->cycle_state;
+ }
+
+ /* Chain all the TRBs together; clear the chain bit in
+ * the last TRB to indicate it's the last TRB in the
+ * chain.
+ */
+ if (j < trbs_per_td - 1) {
+ field |= TRB_CHAIN;
+ more_trbs_coming = true;
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ }
+
+ /* Calculate TRB length */
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ if (trb_buff_len > td_remain_len)
+ trb_buff_len = td_remain_len;
+
+// remainder = xhci_td_remainder(td_len - running_total);
+ remainder = xhci_td_remainder(td_len, running_total, max_packet, trb_buff_len);
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+ TRB_INTR_TARGET(0);
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
+ lower_32_bits(addr),
+ upper_32_bits(addr),
+ length_field,
+ /* We always want to know if the TRB was short,
+ * or we won't get an event when it completes.
+ * (Unless we use event data TRBs, which are a
+ * waste of space and HC resources.)
+ */
+ field | TRB_ISP);
+ running_total += trb_buff_len;
+
+ addr += trb_buff_len;
+ td_remain_len -= trb_buff_len;
+ }
+
+ /* Check TD length */
+ if (running_total != td_len) {
+ xhci_err(xhci, "ISOC TD length unmatch\n");
+ return -EINVAL;
+ }
+ }
+
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
+ return 0;
+}
+
+/*
+ * Check transfer ring to guarantee there is enough room for the urb.
+ * Update ISO URB start_frame and interval.
+ * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
+ * update the urb->start_frame by now.
+ * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ */
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_virt_device *xdev;
+ struct xhci_ring *ep_ring;
+ struct xhci_ep_ctx *ep_ctx;
+ int start_frame;
+ int xhci_interval;
+ int ep_interval;
+ int num_tds, num_trbs, i;
+ int ret;
+
+ xdev = xhci->devs[slot_id];
+ ep_ring = xdev->eps[ep_index].ring;
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+ num_trbs = 0;
+ num_tds = urb->number_of_packets;
+ for (i = 0; i < num_tds; i++)
+ num_trbs += count_isoc_trbs_needed(xhci, urb, i);
+
+ /* Check the ring to guarantee there is enough room for the whole urb.
+ * Do not insert any td of the urb to the ring if the check failed.
+ */
+ ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+ num_trbs, mem_flags);
+ if (ret)
+ return ret;
+
+ start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame &= 0x3fff;
+
+ urb->start_frame = start_frame;
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->start_frame >>= 3;
+
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
+ ep_interval = urb->interval;
+ /* Convert to microframes */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ ep_interval *= 8;
+ /* FIXME change this to a warning and a suggestion to use the new API
+ * to set the polling interval (once the API is added).
+ */
+ if (xhci_interval != ep_interval) {
+ if (printk_ratelimit())
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
+ " (%d microframe%s) than xHCI "
+ "(%d microframe%s)\n",
+ ep_interval,
+ ep_interval == 1 ? "" : "s",
+ xhci_interval,
+ xhci_interval == 1 ? "" : "s");
+ urb->interval = xhci_interval;
+ /* Convert back to frames for LS/FS devices */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->interval /= 8;
+ }
+ return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ring *ep_ring;
+ int num_trbs;
+ int ret;
+ struct usb_ctrlrequest *setup;
+ struct xhci_generic_trb *start_trb;
+ int start_cycle;
+ u32 field, length_field;
+ struct urb_priv *urb_priv;
+ struct xhci_td *td;
+ int max_packet;
+ int remainder;
+ u32 trt;
+#if 0
+ xhci_dbg(xhci, "urb->ep->desc->bLength 0x%x\n", urb->ep->desc.bLength);
+ xhci_dbg(xhci, "urb->ep->desc->bDescriptorType 0x%x\n", urb->ep->desc.bDescriptorType);
+ xhci_dbg(xhci, "urb->ep->desc->bEndpointAddress 0x%x\n", urb->ep->desc.bEndpointAddress);
+ xhci_dbg(xhci, "urb->ep->desc->bmAttributes 0x%x\n", urb->ep->desc.bmAttributes);
+ xhci_dbg(xhci, "urb->ep->desc->wMaxPacketSize 0x%x\n", urb->ep->desc.wMaxPacketSize);
+ xhci_dbg(xhci, "urb->ep->desc->bInterval 0x%x\n", urb->ep->desc.bInterval);
+ xhci_dbg(xhci, "urb->ep->desc->bRefresh 0x%x\n", urb->ep->desc.bRefresh);
+ xhci_dbg(xhci, "urb->ep->desc->bSynchAddress 0x%x\n", urb->ep->desc.bSynchAddress);
+ xhci_dbg(xhci, "urb->setup_packet 0x%x\n", urb->setup_packet);
+#endif
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
+
+ /*
+ * Need to copy setup packet into setup TRB, so we can't use the setup
+ * DMA address.
+ */
+ if (!urb->setup_packet)
+ return -EINVAL;
+
+ if (!in_interrupt())
+ xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
+ slot_id, ep_index);
+ /* 1 TRB for setup, 1 for status */
+ num_trbs = 2;
+ /*
+ * Don't need to check if we need additional event data and normal TRBs,
+ * since data in control transfers will never get bigger than 16MB
+ * XXX: can we get a buffer that crosses 64KB boundaries?
+ */
+ if (urb->transfer_buffer_length > 0)
+ num_trbs++;
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, 0, mem_flags);
+ if (ret < 0)
+ return ret;
+
+ urb_priv = urb->hcpriv;
+ td = urb_priv->td[0];
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+#if 0
+ xhci_dbg(xhci, "start_trb 0x%x\n", &ep_ring->enqueue->generic);
+ xhci_dbg(xhci, "start_cycle 0x%x\n", ep_ring->cycle_state);
+#endif
+ /* Queue setup TRB - see section 6.4.1.2.1 */
+ /* FIXME better way to translate setup_packet into two u32 fields? */
+
+ setup = (struct usb_ctrlrequest *) urb->setup_packet;
+#if 0
+ xhci_dbg(xhci, "setup->bRequestType 0x%x\n", setup->bRequestType);
+ xhci_dbg(xhci, "setup->bRequest 0x%x\n", setup->bRequest);
+ xhci_dbg(xhci, "setup->wValue 0x%x\n", le16_to_cpu(setup->wValue));
+ xhci_dbg(xhci, "setup->wIndex 0x%x\n", le16_to_cpu(setup->wIndex));
+ xhci_dbg(xhci, "setup->wLength 0x%x\n", le16_to_cpu(setup->wLength));
+#endif
+ if(num_trbs ==2){
+ trt = TRB_TRT(TRT_NO_DATA);
+ }else if(setup->bRequestType & USB_DIR_IN){
+ trt = TRB_TRT(TRT_IN_DATA);
+ }else{
+ trt = TRB_TRT(TRT_OUT_DATA);
+ }
+ field = 0;
+ field |= TRB_IDT | TRB_TYPE(TRB_SETUP) | trt;
+ if (start_cycle == 0)
+ field |= 0x1;
+ queue_trb(xhci, ep_ring, false, true,
+ /* FIXME endianness is probably going to bite my ass here. */
+ setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
+ le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
+ TRB_LEN(8) | TRB_INTR_TARGET(0),
+ /* Immediate data in pointer */
+ field);
+ /* If there's data, queue data TRBs */
+ field = 0;
+// remainder = xhci_td_remainder(urb->transfer_buffer_length, 0, max_packet, urb->transfer_buffer_length);
+ length_field = TRB_LEN(urb->transfer_buffer_length) |
+// remainder |
+ TRB_INTR_TARGET(0);
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false, true,
+ lower_32_bits(urb->transfer_dma),
+ upper_32_bits(urb->transfer_dma),
+ length_field,
+ /* Event on short tx */
+ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+ }
+#if 1
+ max_packet = urb->ep->desc.wMaxPacketSize;
+ if((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)){
+ if (setup->bRequestType & USB_DIR_IN)
+ field |= TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false, true,
+ lower_32_bits(urb->transfer_dma),
+ upper_32_bits(urb->transfer_dma),
+ 0,
+ /* Event on short tx */
+ field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+ }
+#endif
+ /* Save the DMA address of the last TRB in the TD */
+ td->last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+ /* If the device sent data, the status stage is an OUT transfer */
+ if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+ field = 0;
+ else
+ field = TRB_DIR_IN;
+ queue_trb(xhci, ep_ring, false, false,
+ 0,
+ 0,
+ TRB_INTR_TARGET(0),
+ /* Event on completion */
+ field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+#if 1
+ giveback_first_trb(xhci, slot_id, ep_index, 0,
+ start_cycle, start_trb, td);
+#endif
+ return 0;
+}
+
+/**** Command Ring Operations ****/
+
+/* Generic function for queueing a command TRB on the command ring.
+ * Check to make sure there's room on the command ring for one command TRB.
+ * Also check that there's room reserved for commands that must not fail.
+ * If this is a command that must not fail, meaning command_must_succeed = TRUE,
+ * then only check for the number of reserved spots.
+ * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
+ * because the command event handler may want to resubmit a failed command.
+ */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed)
+{
+ int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ int ret;
+
+ if (!command_must_succeed)
+ reserved_trbs++;
+
+ ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
+ reserved_trbs, GFP_ATOMIC);
+ if (ret < 0) {
+ xhci_err(xhci, "[ERROR] No room for command on command ring\n");
+ if (command_must_succeed)
+ xhci_err(xhci, "[ERROR] Reserved TRB counting for "
+ "unfailable commands failed.\n");
+ return ret;
+ }
+ queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
+ field4 | xhci->cmd_ring->cycle_state);
+ return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+ return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+ if (queue_cmd_noop(xhci) < 0)
+ return NULL;
+ xhci->noops_submitted++;
+ return xhci_ring_cmd_db;
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *mtk_xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+ if (queue_cmd_noop(xhci) < 0)
+ return NULL;
+ xhci_ring_cmd_db(xhci);
+}
+
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, char isBSR)
+{
+
+ if(isBSR){
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) | ADDRESS_TRB_BSR,
+ false);
+ }
+ else{
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+ }
+}
+
+int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ return queue_command(xhci, field1, field2, field3, field4, false);
+}
+
+/* Queue a reset device command TRB */
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed)
+{
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
+ command_must_succeed);
+}
+
+int xhci_queue_deconfigure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed){
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id) | CONFIG_EP_TRB_DC,
+ command_must_succeed);
+}
+
+/* Queue an evaluate context command TRB */
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id)
+{
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+}
+
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index)
+{
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_STOP_RING);
+ //xhci_err(xhci, "[DBG] queue stop ep command, address 0x%x\n", xhci->cmd_ring->enqueue);
+ if(ep_index == 1){
+ if(TRB_FIELD_TO_TYPE(le32_to_cpu(xhci->cmd_ring->enqueue->generic.field[3])) == TRB_LINK){
+ g_cmd_ring_pointer1 = ((le64_to_cpu(xhci->cmd_ring->enqueue->link.segment_ptr)) & 0xff0);
+ }
+ else{
+ g_cmd_ring_pointer1 = ((((int)xhci->cmd_ring->enqueue) & 0xff0));
+ }
+ }
+ else if(ep_index == 2){
+ if(TRB_FIELD_TO_TYPE(le32_to_cpu(xhci->cmd_ring->enqueue->generic.field[3])) == TRB_LINK){
+ g_cmd_ring_pointer2 = ((le64_to_cpu(xhci->cmd_ring->enqueue->link.segment_ptr)) & 0xff0);
+ }
+ else{
+ g_cmd_ring_pointer2 = ((((int)xhci->cmd_ring->enqueue) & 0xff0));
+ }
+ }
+ return queue_command(xhci, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type, false);
+}
+
+/* Set Transfer Ring Dequeue Pointer command.
+ * This should not be used for endpoints that have streams enabled.
+ */
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
+{
+ dma_addr_t addr;
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 type = TRB_TYPE(TRB_SET_DEQ);
+ struct xhci_virt_ep *ep;
+
+ addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
+ if (addr == 0) {
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+ deq_seg, deq_ptr);
+ return 0;
+ }
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & SET_DEQ_PENDING)) {
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
+ return 0;
+ }
+ ep->queued_deq_seg = deq_seg;
+ ep->queued_deq_ptr = deq_ptr;
+ return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ upper_32_bits(addr), trb_stream_id,
+ trb_slot_id | trb_ep_index | type, false);
+}
+
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index)
+{
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_RESET_EP);
+
+ return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
+ false);
+}
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,1048 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+//#include <linux/pci.h>
+#include <asm/unaligned.h>
+//#include <linux/usb/hcd.h>
+#include "xhci.h"
+#include "mtk-test.h"
+#include "mtk-test-lib.h"
+#include "xhci-platform.c"
+#include "mtk-usb-hcd.h"
+#include "xhci-mtk-power.h"
+#include "xhci-mtk-scheduler.h"
+#include <asm/tc3162/tc3162.h>
+
+/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
+static int link_quirk;
+
+static void xhci_work(struct xhci_hcd *xhci){
+ u32 temp;
+ u64 temp_64;
+ /*
+ * Clear the op reg interrupt status first,
+ * so we can receive interrupts from other MSI-X interrupters.
+ * Write 1 to clear the interrupt status.
+ */
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp |= STS_EINT;
+ xhci_writel(xhci, temp, &xhci->op_regs->status);
+
+ /* Acknowledge the interrupt */
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ temp |= 0x3;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+
+ /* Flush posted writes */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+ xhci_handle_event(xhci);
+
+ /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+ /* Flush posted writes -- FIXME is this necessary? */
+ xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+irqreturn_t xhci_mtk_irq(struct usb_hcd *hcd){
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 temp, temp2;
+ union xhci_trb *trb;
+ spin_lock(&xhci->lock);
+ trb = xhci->event_ring->dequeue;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+ spin_unlock(&xhci->lock);
+ return IRQ_NONE;
+ }
+ xhci_warn(xhci, "Got interrupt\n");
+ xhci_dbg(xhci, "Got interrupt\n");
+ xhci_dbg(xhci, "op reg status = %08x\n", temp);
+ xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
+ xhci_dbg(xhci, "Event ring dequeue ptr:\n");
+ xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
+ (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
+ lower_32_bits(trb->link.segment_ptr),
+ upper_32_bits(trb->link.segment_ptr),
+ (unsigned int) trb->link.intr_target,
+ (unsigned int) trb->link.control);
+ if(g_intr_handled != -1){
+ g_intr_handled++;
+ }
+ xhci_work(xhci);
+ spin_unlock(&xhci->lock);
+ return IRQ_HANDLED;
+}
+
+// xhci original functions
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = xhci_readl(xhci, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Disable interrupts and begin the xHCI halting process.
+ */
+void xhci_quiesce(struct xhci_hcd *xhci)
+{
+ u32 halted;
+ u32 cmd;
+ u32 mask;
+
+ mask = ~(XHCI_IRQS);
+ halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+ if (!halted)
+ mask &= ~CMD_RUN;
+
+ cmd = xhci_readl(xhci, &xhci->op_regs->command);
+ cmd &= mask;
+ xhci_writel(xhci, cmd, &xhci->op_regs->command);
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+ xhci_dbg(xhci, "// Halt the HC\n");
+ xhci_quiesce(xhci);
+
+ return handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+}
+/*
+ * Set the run bit and wait for the host to be running.
+ */
+int xhci_start(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int ret;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_RUN);
+ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ /*
+ * Wait for the HCHalted Status bit to be 0 to indicate the host is
+ * running.
+ */
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, 0, XHCI_MAX_HALT_USEC);
+ if (ret == -ETIMEDOUT)
+ xhci_err(xhci, "[ERROR]Host took too long to start, "
+ "waited %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ return ret;
+}
+
+/*
+ * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+ u32 command;
+ u32 state;
+ int ret;
+
+ state = xhci_readl(xhci, &xhci->op_regs->status);
+ if ((state & STS_HALT) == 0) {
+ xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
+ return 0;
+ }
+
+ xhci_dbg(xhci, "// Reset the HC\n");
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command |= CMD_RESET;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+
+ ret = handshake(xhci, &xhci->op_regs->command,
+ CMD_RESET, 0, 250 * 1000);
+ if (ret)
+ return ret;
+
+ xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ /*
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+ return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+}
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+int xhci_init(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int retval = 0;
+
+ xhci_dbg(xhci, "xhci_init\n");
+ spin_lock_init(&xhci->lock);
+ if (link_quirk) {
+ xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
+ xhci->quirks |= XHCI_LINK_TRB_QUIRK;
+ } else {
+ xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
+ }
+ retval = xhci_mem_init(xhci, GFP_KERNEL);
+ xhci_dbg(xhci, "Finished xhci_init\n");
+
+ return retval;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static void xhci_event_ring_work(unsigned long arg)
+{
+ unsigned long flags;
+ int temp;
+ u64 temp_64;
+ struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+ int i, j;
+
+ xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
+ xhci_dbg(xhci, "HW died, polling stopped.\n");
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+ xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+ xhci->error_bitmask = 0;
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 &= ~ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+ xhci_dbg(xhci, "Command ring:\n");
+ xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+ for (i = 0; i < MAX_HC_SLOTS; ++i) {
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; ++j) {
+ xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
+ }
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ if (!xhci->zombie)
+ mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+ else
+ xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs. Find the index for an endpoint given its descriptor. Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index;
+ if (usb_endpoint_xfer_control(desc))
+ index = (unsigned int) (usb_endpoint_num(desc)*2);
+ else
+ index = (unsigned int) (usb_endpoint_num(desc)*2) +
+ (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+ return index;
+}
+
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
+{
+ return 1 << (ep_index + 1);
+}
+
+
+/* Compute the last valid endpoint context index. Basically, this is the
+ * endpoint index plus one. For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+ return fls(added_ctxs) - 1;
+}
+
+static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ u32 add_flags, u32 drop_flags)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx->add_flags = add_flags;
+ ctrl_ctx->drop_flags = drop_flags;
+ xhci_slot_copy(xhci, in_ctx, out_ctx);
+ ctrl_ctx->add_flags |= SLOT_FLAG;
+
+ xhci_dbg(xhci, "Input Context:\n");
+ xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
+}
+
+void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state)
+{
+ struct xhci_container_ctx *in_ctx;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 added_ctxs;
+ dma_addr_t addr;
+
+ xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+ in_ctx = xhci->devs[slot_id]->in_ctx;
+ ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+ if (addr == 0) {
+ xhci_warn(xhci, "WARN Cannot submit config ep after "
+ "reset ep command\n");
+ xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+ deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+ return;
+ }
+ ep_ctx->deq = addr | deq_state->new_cycle_state;
+
+ added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
+ xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
+}
+
+/* hc interface non-used functions */
+int xhci_mtk_run(struct usb_hcd *hcd){
+ printk("xhci_mtk_run is called\n");
+ u32 temp;
+ u64 temp_64;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ void (*doorbell)(struct xhci_hcd *) = NULL;
+
+ hcd->uses_new_polling = 1;
+// hcd->poll_rh = 0;
+
+ xhci_dbg(xhci, "xhci_run\n");
+#if 0 /* FIXME: MSI not setup yet */
+ /* Do this at the very last minute */
+ ret = xhci_setup_msix(xhci);
+ if (!ret)
+ return ret;
+
+ return -ENOSYS;
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ init_timer(&xhci->event_ring_timer);
+ xhci->event_ring_timer.data = (unsigned long) xhci;
+ xhci->event_ring_timer.function = xhci_event_ring_work;
+ /* Poll the event ring */
+ xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+ xhci->zombie = 0;
+ xhci_dbg(xhci, "Setting event ring polling timer\n");
+ add_timer(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "Command ring memory map follows:\n");
+ xhci_debug_ring(xhci, xhci->cmd_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+
+ xhci_dbg(xhci, "ERST memory map follows:\n");
+ xhci_dbg_erst(xhci, &xhci->erst);
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_ring(xhci, xhci->event_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 &= ~ERST_PTR_MASK;
+ xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+
+ xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= (u32) 0x10;
+ xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+ /* Set the HCD state before we enable the irqs */
+ hcd->state = HC_STATE_RUNNING;
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_EIE);
+ xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+ xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ if (NUM_TEST_NOOPS > 0)
+ doorbell = xhci_setup_one_noop(xhci);
+#if 0
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_queue_vendor_command(xhci, 0, 0, 0,
+ TRB_TYPE(TRB_NEC_GET_FW));
+#endif
+ if (xhci_start(xhci)) {
+ xhci_halt(xhci);
+ return -ENODEV;
+ }
+
+ xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
+ if (doorbell)
+ (*doorbell)(xhci);
+#if 0
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
+#endif
+ enableXhciAllPortPower(xhci);
+ msleep(50);
+ //disableAllClockPower();
+ xhci_dbg(xhci, "Finished xhci_run\n");
+ return 0;
+}
+
+void xhci_mtk_stop(struct usb_hcd *hcd){
+ printk("xhci_mtk_stop is called\n");
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0 /* No MSI yet */
+ xhci_cleanup_msix(xhci);
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Tell the event ring poll function not to reschedule */
+ xhci->zombie = 1;
+ del_timer_sync(&xhci->event_ring_timer);
+#endif
+
+ xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+ temp = xhci_readl(xhci, &xhci->op_regs->status);
+ xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
+ temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci_writel(xhci, ER_IRQ_DISABLE(temp),
+ &xhci->ir_set->irq_pending);
+ xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+ xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_mem_cleanup(xhci);
+ xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+void xhci_mtk_shutdown(struct usb_hcd *hcd){
+ printk("xhci_mtk_shutdown is called\n");
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+#if 0
+ xhci_cleanup_msix(xhci);
+#endif
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+int xhci_mtk_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags){
+ printk("xhci_mtk_urb_enqueue is called\n");
+}
+
+int xhci_mtk_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status){
+ printk("xhci_mtk_urb_dequeue is called\n");
+}
+
+int xhci_mtk_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_alloc_dev is called\n");
+}
+
+void xhci_mtk_free_dev(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_free_dev is called\n");
+}
+
+int xhci_mtk_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev
+ , struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags){
+ printk("xhci_mtk_alloc_streams is called\n");
+}
+
+int xhci_mtk_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags){
+ printk("xhci_mtk_free_streams is called\n");
+}
+
+int xhci_mtk_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep){
+ struct xhci_hcd *xhci;
+ struct xhci_container_ctx *in_ctx, *out_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 added_ctxs;
+ unsigned int last_ctx;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret = 0;
+#if 0
+ ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+ if (ret <= 0) {
+ /* So we won't queue a reset ep command for a root hub */
+ ep->hcpriv = NULL;
+ return ret;
+ }
+#endif
+ xhci = hcd_to_xhci(hcd);
+
+ added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+ last_ctx = xhci_last_valid_endpoint(added_ctxs);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ /* FIXME when we have to issue an evaluate endpoint command to
+ * deal with ep0 max packet size changing once we get the
+ * descriptors
+ */
+ xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+ __func__, added_ctxs);
+ return 0;
+ }
+
+ if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+ xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ /* If the HCD has already noted the endpoint is enabled,
+ * ignore this request.
+ */
+ if (le32_to_cpu(ctrl_ctx->add_flags) & xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ /*
+ * Configuration and alternate setting changes must be done in
+ * process context, not interrupt context (or so documenation
+ * for usb_set_interface() and usb_set_configuration() claim).
+ */
+ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+ udev, ep, GFP_NOIO) < 0) {
+ dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+ __func__, ep->desc.bEndpointAddress);
+ return -ENOMEM;
+ }
+
+ ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
+ new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+
+ /* If xhci_endpoint_disable() was called for this endpoint, but the
+ * xHC hasn't been notified yet through the check_bandwidth() call,
+ * this re-adds a new state for the endpoint from the new endpoint
+ * descriptors. We must drop and re-add this endpoint, so we leave the
+ * drop flags alone.
+ */
+ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
+ slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
+ /* Update the last valid endpoint context, if we just added one past */
+ if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
+ }
+ new_slot_info = le32_to_cpu(slot_ctx->dev_info);
+
+ /* Store the usb_device pointer for later use */
+ ep->hcpriv = udev;
+
+ xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags);
+ xhci_dbg(xhci, "new slot context 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n"
+ , le32_to_cpu(slot_ctx->dev_info), le32_to_cpu(slot_ctx->dev_info2), le32_to_cpu(slot_ctx->tt_info), le32_to_cpu(slot_ctx->dev_state)
+ , le32_to_cpu(slot_ctx->reserved[0]), le32_to_cpu(slot_ctx->reserved[1]), le32_to_cpu(slot_ctx->reserved[2]), le32_to_cpu(slot_ctx->reserved[3]));
+ return 0;
+}
+
+int xhci_mtk_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev
+ , struct usb_host_endpoint *ep){
+ struct xhci_hcd *xhci;
+ struct xhci_container_ctx *in_ctx, *out_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ unsigned int last_ctx;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 drop_flag;
+ u32 new_add_flags, new_drop_flags, new_slot_info;
+ int ret;
+
+ xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ drop_flag = xhci_get_endpoint_flag(&ep->desc);
+ if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+ xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+ __func__, drop_flag);
+ return 0;
+ }
+
+ in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+ out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+
+ /* If the HC already knows the endpoint is disabled,
+ * or the HCD has noted it is disabled, ignore this request
+ */
+ if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
+ EP_STATE_DISABLED ||
+ le32_to_cpu(ctrl_ctx->drop_flags) &
+ xhci_get_endpoint_flag(&ep->desc)) {
+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+ __func__, ep);
+ return 0;
+ }
+
+ ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
+ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
+ ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
+ new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+
+ last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
+ slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
+ /* Update the last valid endpoint context, if we deleted the last one */
+ if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
+ LAST_CTX(last_ctx)) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
+ }
+ new_slot_info = le32_to_cpu(slot_ctx->dev_info);
+
+ xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+ xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+ (unsigned int) new_drop_flags,
+ (unsigned int) new_add_flags,
+ (unsigned int) new_slot_info);
+ return 0;
+}
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ struct usb_device *udev, unsigned int ep_index)
+{
+ struct xhci_dequeue_state deq_state;
+ struct xhci_virt_ep *ep;
+
+ xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+ ep = &xhci->devs[udev->slot_id]->eps[ep_index];
+ /* We need to move the HW's dequeue pointer past this TD,
+ * or it will attempt to resend it on the next doorbell ring.
+ */
+ xhci_find_new_dequeue_state(xhci, udev->slot_id,
+ ep_index, ep->stopped_stream, ep->stopped_td,
+ &deq_state);
+
+ /* HW with the reset endpoint quirk will use the saved dequeue state to
+ * issue a configure endpoint command later.
+ */
+ if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
+ xhci_dbg(xhci, "Queueing new dequeue state\n");
+ xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+ ep_index, ep->stopped_stream, &deq_state);
+ } else {
+ /* Better hope no one uses the input context between now and the
+ * reset endpoint completion!
+ * XXX: No idea how this hardware will react when stream rings
+ * are enabled.
+ */
+ xhci_dbg(xhci, "Setting up input context for "
+ "configure endpoint command\n");
+ xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
+ ep_index, &deq_state);
+ }
+}
+
+void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ int i;
+
+ /* When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx->drop_flags = 0;
+ ctrl_ctx->add_flags = 0;
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ /* Endpoint 0 is always valid */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+ for (i = 1; i < 31; ++i) {
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+
+void xhci_mtk_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep){
+ printk("xhci_mtk_endpoint_reset is called\n");
+}
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_check_bandwidth is called\n");
+}
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_reset_bandwidth is called\n");
+}
+
+int xhci_mtk_address_device(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_address_device is called\n");
+}
+
+int xhci_mtk_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags){
+ printk("xhci_mtk_update_hub_device is called\n");
+}
+
+int xhci_mtk_reset_device(struct usb_hcd *hcd, struct usb_device *udev){
+ printk("xhci_mtk_reset_device is called\n");
+}
+
+int xhci_mtk_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength){
+ printk("xhci_mtk_hub_control is called\n");
+ return 0;
+}
+
+int xhci_mtk_hub_status_data(struct usb_hcd *hcd, char *buf){
+ printk("xhci_mtk_hub_status_data is called\n");
+ return 0;
+}
+
+int xhci_mtk_get_frame(struct usb_hcd *hcd){
+ printk("xhci_mtk_get_frame is called\n");
+}
+
+static u64 dummy_mask = DMA_BIT_MASK(32);
+static struct platform_device xhci_platform_dev = {
+ .name = hcd_name,
+ .id = -1,
+ .dev = {
+// .dma_mask = &dummy_mask,
+ .coherent_dma_mask = 0xffffffff,
+ .release = xhci_hcd_release,
+ },
+};
+
+#if 0
+#define U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR 0xf0041144
+
+void setMacFIFOWaitEmptyValue(){
+ __u32 __iomem *mac_tx_fifo_wait_empty_addr;
+ u32 mac_tx_fifo_wait_empty_value;
+ mac_tx_fifo_wait_empty_addr = U3_MAC_TX_FIFO_WAIT_EMPTY_ADDR;
+ mac_tx_fifo_wait_empty_value = 0x5;
+ writel(mac_tx_fifo_wait_empty_value, mac_tx_fifo_wait_empty_addr);
+}
+#endif
+
+
+//initial MAC3 register, should be called after HC reset and before set PP=1 of each port
+void setInitialReg(){
+ __u32 __iomem *addr;
+ u32 temp;
+
+ if(isFPGA){
+ /* set MAC reference clock speed */
+ addr = SSUSB_U3_MAC_BASE+U3_UX_EXIT_LFPS_TIMING_PAR;
+ temp = readl(addr);
+ temp &= ~(0xff << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
+ temp |= (U3_RX_UX_EXIT_LFPS_REF << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
+ writel(temp, addr);
+ addr = SSUSB_U3_MAC_BASE+U3_REF_CK_PAR;
+ temp = readl(addr);
+ temp &= ~(0xff);
+ temp |= U3_REF_CK_VAL;
+ writel(temp, addr);
+
+ /* set SYS_CK */
+ addr = SSUSB_U3_SYS_BASE+U3_TIMING_PULSE_CTRL;
+ temp = readl(addr);
+ temp &= ~(0xff);
+ temp |= CNT_1US_VALUE;
+ writel(temp, addr);
+ addr = SSUSB_U2_SYS_BASE+USB20_TIMING_PARAMETER;
+ temp &= ~(0xff);
+ temp |= TIME_VALUE_1US;
+ writel(temp, addr);
+
+ /* set LINK_PM_TIMER=3 */
+ addr = SSUSB_U3_SYS_BASE+LINK_PM_TIMER;
+ temp = readl(addr);
+ temp &= ~(0xf);
+ temp |= PM_LC_TIMEOUT_VALUE;
+ writel(temp, addr);
+ }else{
+ /* set SSUSB DMA burst size to 128B */
+ addr = SSUSB_U3_XHCI_BASE + SSUSB_HDMA_CFG;
+ temp = SSUSB_HDMA_CFG_MT7621_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+ /* extend U3 LTSSM Polling.LFPS timeout value */
+ addr = SSUSB_U3_XHCI_BASE + U3_LTSSM_TIMING_PARAMETER3;
+ temp = U3_LTSSM_TIMING_PARAMETER3_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+ /* EOF */
+ addr = SSUSB_U3_XHCI_BASE + SYNC_HS_EOF;
+ temp = SYNC_HS_EOF_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+ /* HSCH_CFG1: SCH2_FIFO_DEPTH */
+ addr = SSUSB_U3_XHCI_BASE + HSCH_CFG1;
+ temp = readl(addr);
+ temp &= ~(0x3 << SCH2_FIFO_DEPTH_OFFSET);
+ writel(temp, addr);
+
+ /* Doorbell handling */
+ addr = SIFSLV_IPPC + SSUSB_IP_SPAR0;
+ temp = 0x1;
+ writel(temp, addr);
+
+ /* Set SW PLL Stable mode to 1 for U2 LPM device remote wakeup */
+ /* Port 0 */
+ addr = U2_PHY_BASE + U2_PHYD_CR1;
+ temp = readl(addr);
+ temp &= ~(0x3 << 18);
+ temp |= (1 << 18);
+ writel(temp, addr);
+
+ /* Port 1 */
+ addr = U2_PHY_BASE_P1 + U2_PHYD_CR1;
+ temp = readl(addr);
+ temp &= ~(0x3 << 18);
+ temp |= (1 << 18);
+ writel(temp, addr);
+ }
+}
+
+void setLatchSel(){
+ __u32 __iomem *latch_sel_addr;
+ u32 latch_sel_value;
+ latch_sel_addr = U3_PIPE_LATCH_SEL_ADD;
+ latch_sel_value = ((U3_PIPE_LATCH_TX)<<2) | (U3_PIPE_LATCH_RX);
+ writel(latch_sel_value, latch_sel_addr);
+}
+
+void reinitIP(){
+ __u32 __iomem *ip_reset_addr;
+ u32 ip_reset_value;
+
+ //enable clock/gating, include re-init IP in IPPC
+ enableAllClockPower();
+ //set MAC3 PIPE latch
+ setLatchSel();
+ mtk_xhci_scheduler_init();
+
+}
+
+
+int mtk_xhci_hcd_init(void)
+{
+ int retval = 0;
+ __u32 __iomem *ip_reset_addr;
+ u32 ip_reset_value;
+ struct platform_device *pPlatformDev;
+
+ printk(KERN_ERR "Module Init start!\n");
+
+ if(!isFPGA){
+ printk("ASIC init phy\n");
+ u3phy_config();
+ }
+
+ //resetIP
+ reinitIP();
+
+ retval = platform_driver_register(&xhci_versatile_driver);
+ if (retval < 0)
+ {
+ printk(KERN_ERR "Problem registering platform driver.");
+ return retval;
+ }
+
+ pPlatformDev = &xhci_platform_dev;
+ memset(pPlatformDev, 0, sizeof(struct platform_device));
+ pPlatformDev->name = hcd_name;
+ pPlatformDev->id = -1;
+ pPlatformDev->dev.coherent_dma_mask = 0xffffffff;
+ pPlatformDev->dev.release = xhci_hcd_release;
+ retval = platform_device_register(&xhci_platform_dev);
+ if (retval < 0)
+ {
+ platform_driver_unregister (&xhci_versatile_driver);
+ }
+ printk(KERN_ERR "Module Init success!\n");
+ //setInitialReg();
+ /*
+ * Check the compiler generated sizes of structures that must be laid
+ * out in specific ways for hardware access.
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+ /* xhci_device_control has eight fields, and also
+ * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+ BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ return 0;
+}
+
+void mtk_xhci_hcd_cleanup(void)
+{
+ //xhci_unregister_pci();
+ uint32_t nCount;
+ uint32_t i;
+ struct platform_device *pPlatformDev;
+
+ platform_device_unregister(&xhci_platform_dev);
+ platform_driver_unregister(&xhci_versatile_driver);
+}
+
Index: linux-3.18.21/drivers/usb/host/mtk_test/xhci.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/mtk_test/xhci.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,1605 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/usb/hcd.h>
+
+/* Code sharing between pci-quirks and xhci hcd */
+#include "xhci-ext-caps.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET (0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS 256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS 127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase: length of the capabilities register and HC version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ */
+struct xhci_cap_regs {
+ __le32 hc_capbase;
+ __le32 hcs_params1;
+ __le32 hcs_params2;
+ __le32 hcs_params3;
+ __le32 hcc_params;
+ __le32 db_off;
+ __le32 run_regs_off;
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+enum xhci_ring_type {
+ TYPE_CTRL = 0,
+ TYPE_ISOC,
+ TYPE_BULK,
+ TYPE_INTR,
+ TYPE_STREAM,
+ TYPE_COMMAND,
+ TYPE_EVENT,
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+
+/* Number of registers per port */
+#define NUM_PORT_REGS 4
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command: USBCMD - xHC command register
+ * @status: USBSTS - xHC status register
+ * @page_size: This indicates the page size that the host controller
+ * supports. If bit n is set, the HC supports a page size
+ * of 2^(n+12), up to a 128MB page size.
+ * 4K is the minimum page size.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg: CONFIG - Configure Register
+ * @port_status_base: PORTSCn - base address for Port Status and Control
+ * Each port has a Port Status and Control register,
+ * followed by a Port Power Management Status and Control
+ * register, a Port Link Info register, and a reserved
+ * register.
+ * @port_power_base: PORTPMSCn - base address for
+ * Port Power Management Status and Control
+ * @port_link_base: PORTLIn - base address for Port Link Info (current
+ * Link PM state and control) for USB 2.1 and USB 3.0
+ * devices.
+ */
+struct xhci_op_regs {
+ __le32 command;
+ __le32 status;
+ __le32 page_size;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 dev_notification;
+ __le64 cmd_ring;
+ /* rsvd: offset 0x20-2F */
+ __le32 reserved3[4];
+ __le64 dcbaa_ptr;
+ __le32 config_reg;
+ /* rsvd: offset 0x3C-3FF */
+ __le32 reserved4[241];
+ /* port 1 registers, which serve as a base address for other ports */
+ __le32 port_status_base;
+ __le32 port_power_base;
+ __le32 port_link_base;
+ __le32 port_lpm_ctrl_base;
+ __le32 reserved5;
+ /* registers for ports 2-255 */
+ __le32 reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET (1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET (1 << 7)
+/* FIXME: ignoring host controller save/restore state for now. */
+#define CMD_CSS (1 << 8)
+#define CMD_CRS (1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX (1 << 11)
+/* bits 12:31 are reserved (and should be preserved on writes). */
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT XHCI_STS_HALT
+/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
+#define STS_FATAL (1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT (1 << 3)
+/* port change detect */
+#define STS_PORT (1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE (1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE (1 << 9)
+/* true: save or restore error */
+#define STS_SRE (1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE (1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define DEV_NOTE_MASK (0xffff)
+#define ENABLE_DEV_NOTE(x) (1 << (x))
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE (1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT (1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING (1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_RSVD_BITS (0x3f)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p) ((p) & 0xff)
+/* bits 8:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+#define PORT_PLS_VALUE(p) ((p>>5) & 0xf)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS(p) (p & (0xf << 5))
+#define PORT_PLS_MASK (0xf << 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_RESUME (0xf << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+/* bit 24 reserved */
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+/* Bits 24:31 for port testing */
+
+/* U2 port power */
+/* L1 status */
+#define L1S_INVALID 0
+#define L1S_SUCCESS 1
+#define L1S_NYET 2
+#define L1S_UNSUPPORTED 3
+#define L1S_ERROR_TIMEOUT 4
+#define L1S_UNDEFINED 5
+
+/* PORTSC */
+#define MSK_PLS (0xf<<5)
+#define PLS_VALUE_L0 0x0
+#define PLS_VALUE_L1 0x2
+#define PLS_VALUE_RESUME 0xf
+
+/* PORTPMSC */
+#define MSK_L1S (0x7<<0)
+#define MSK_RWE (0x1<<3)
+#define MSK_BESL (0xf<<4)
+#define MSK_L1_DEV_SLOT (0xff<<8)
+#define MSK_HLE (0x1<<16)
+
+
+/*add by TH.fei for setup trb TRT Field*/
+#define TRB_TRT(p) ((p)<<16)
+#define TRT_NO_DATA 0
+#define TRT_RERV 1
+#define TRT_OUT_DATA 2
+#define TRT_IN_DATA 3
+
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ * Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+ __le32 irq_pending;
+ __le32 irq_control;
+ __le32 erst_size;
+ __le32 rsvd;
+ __le64 erst_base;
+ __le64 erst_dequeue;
+};
+
+/* irq_pending bitmasks */
+#define ER_IRQ_PENDING(p) ((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
+#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
+#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK (0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK (0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define ERST_SIZE_MASK (0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK (0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB (1 << 3)
+#define ERST_PTR_MASK (0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ * MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+ __le32 microframe_index;
+ __le32 rsvd[7];
+ struct xhci_intr_reg ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+ __le32 doorbell[256];
+};
+
+#define DB_TARGET_MASK 0xFFFFFF00
+#define DB_STREAM_ID_MASK 0x0000FFFF
+#define DB_TARGET_HOST 0x0
+#define DB_VALUE_HOST 0x00000000
+#define DB_STREAM_ID_HOST 0x0
+#define DB_MASK (0xff << 8)
+
+/* Endpoint Target - bits 0:7 */
+#define EPI_TO_DB(p) (((p) + 1) & 0xff)
+#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
+
+
+/**
+ * struct xhci_container_ctx
+ * @type: Type of context. Used to calculated offsets to contained contexts.
+ * @size: Size of the context data
+ * @bytes: The raw context data given to HW
+ * @dma: dma address of the bytes
+ *
+ * Represents either a Device or Input context. Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct xhci_container_ctx {
+ unsigned type;
+#define XHCI_CTX_TYPE_DEVICE 0x1
+#define XHCI_CTX_TYPE_INPUT 0x2
+
+ int size;
+
+ u8 *bytes;
+ dma_addr_t dma;
+};
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info: Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2: Max exit latency for device number, root hub port number
+ * @tt_info: tt_info is used to construct split transaction tokens
+ * @dev_state: slot state and device address
+ *
+ * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+ __le32 dev_info;
+ __le32 dev_info2;
+ __le32 tt_info;
+ __le32 dev_state;
+ /* offset 0x10 to 0x1f reserved for HC internal use */
+ __le32 reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK (0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED (0xf << 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT (0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB (0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK (0x1f << 27)
+#define LAST_CTX(p) ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
+#define SLOT_FLAG (1 << 0)
+#define EP0_FLAG (1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT (0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
+/* Maximum number of ports under a hub device */
+#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device. '0' if attached to root hub port.
+ */
+#define TT_SLOT (0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT (0xff << 8)
+#define TT_THINK_TIME(p) (((p) & 0x3) << 16)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK (0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE (0x1f << 27)
+#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
+/* slot state value */
+#define SLOT_STATE_ENABLE_DISABLE 0
+#define SLOT_STATE_DEFAULT 1
+#define SLOT_STATE_ADDRESSED 2
+#define SLOT_STATE_CONFIGURED 3
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info: endpoint state, streams, mult, and interval information.
+ * @ep_info2: information on endpoint type, max packet size, max burst size,
+ * error count, and whether the HC will force an event for all
+ * transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ * defines one stream, this points to the endpoint transfer ring.
+ * Otherwise, it points to a stream context array, which has a
+ * ring pointer for each flow.
+ * @tx_info:
+ * Average TRB lengths for the endpoint ring and
+ * max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
+ * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+ __le32 ep_info;
+ __le32 ep_info2;
+ __le64 deq;
+ __le32 tx_info;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ __le32 reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK (0xf)
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p) ((p & 0x3) << 8)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p) ((p & 0xff) << 16)
+#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
+#define EP_MAXPSTREAMS_MASK (0x1f << 10)
+#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA (1 << 15)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define FORCE_EVENT (0x1)
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
+#define EP_TYPE(p) ((p) << 3)
+#define ISOC_OUT_EP 1
+#define BULK_OUT_EP 2
+#define INT_OUT_EP 3
+#define CTRL_EP 4
+#define ISOC_IN_EP 5
+#define BULK_IN_EP 6
+#define INT_IN_EP 7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p) (((p)&0xff) << 8)
+#define MAX_PACKET(p) (((p)&0xffff) << 16)
+#define MAX_PACKET_MASK (0xffff << 16)
+#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
+
+/* tx_info bitmasks */
+#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
+#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
+
+/* deq bitmasks */
+#define EP_CTX_CYCLE_MASK (1 << 0)
+
+
+/**
+ * struct xhci_input_control_context
+ * Input control context; see section 6.2.5.
+ *
+ * @drop_context: set the bit of the endpoint context you want to disable
+ * @add_context: set the bit of the endpoint context you want to enable
+ */
+struct xhci_input_control_ctx {
+ __le32 drop_flags;
+ __le32 add_flags;
+ __le32 rsvd2[6];
+};
+
+/* Represents everything that is needed to issue a command on the command ring.
+ * It's useful to pre-allocate these for commands that cannot fail due to
+ * out-of-memory errors, like freeing streams.
+ */
+struct xhci_command {
+ /* Input context for changing device state */
+ struct xhci_container_ctx *in_ctx;
+ u32 status;
+ /* If completion is null, no one is waiting on this command
+ * and the structure can be freed after the command completes.
+ */
+ struct completion *completion;
+ union xhci_trb *command_trb;
+ struct list_head cmd_list;
+};
+
+/* drop context bitmasks */
+#define DROP_EP(x) (0x1 << x)
+/* add context bitmasks */
+#define ADD_EP(x) (0x1 << x)
+
+struct xhci_stream_ctx {
+ /* 64-bit stream ring address, cycle state, and stream type */
+ __le64 stream_ring;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ __le32 reserved[2];
+};
+
+/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
+#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
+/* Secondary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_SEC_TR 0
+/* Primary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_PRI_TR 1
+/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
+#define SCT_SSA_8 2
+#define SCT_SSA_16 3
+#define SCT_SSA_32 4
+#define SCT_SSA_64 5
+#define SCT_SSA_128 6
+#define SCT_SSA_256 7
+
+/* Assume no secondary streams for now */
+struct xhci_stream_info {
+ struct xhci_ring **stream_rings;
+ /* Number of streams, including stream 0 (which drivers can't use) */
+ unsigned int num_streams;
+ /* The stream context array may be bigger than
+ * the number of streams the driver asked for
+ */
+ struct xhci_stream_ctx *stream_ctx_array;
+ unsigned int num_stream_ctxs;
+ dma_addr_t ctx_array_dma;
+ /* For mapping physical TRB addresses to segments in stream rings */
+ struct radix_tree_root trb_address_map;
+ struct xhci_command *free_streams_command;
+};
+
+#define SMALL_STREAM_ARRAY_SIZE 256
+#define MEDIUM_STREAM_ARRAY_SIZE 1024
+
+#define SET_DEQ_PENDING (1 << 0)
+#define EP_HALTED (1 << 1) /* For stall handling */
+#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
+/* Transitioning the endpoint to using streams, don't enqueue URBs */
+#define EP_GETTING_STREAMS (1 << 3)
+#define EP_HAS_STREAMS (1 << 4)
+/* Transitioning the endpoint to not using streams, don't enqueue URBs */
+#define EP_GETTING_NO_STREAMS (1 << 5)
+
+struct xhci_virt_ep {
+ struct xhci_ring *ring;
+ /* Related to endpoints that are configured to use stream IDs only */
+ struct xhci_stream_info *stream_info;
+ /* Temporary storage in case the configure endpoint command fails and we
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ring;
+ unsigned int ep_state;
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+ /* The TRB that was last reported in a stopped endpoint ring */
+ union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+ unsigned int stopped_stream;
+ /* Watchdog timer for stop endpoint command to cancel URBs */
+ struct timer_list stop_cmd_timer;
+ int stop_cmds_pending;
+ struct xhci_hcd *xhci;
+ /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
+ * command. We'll need to update the ring's dequeue segment and dequeue
+ * pointer after the command completes.
+ */
+ struct xhci_segment *queued_deq_seg;
+ union xhci_trb *queued_deq_ptr;
+ /*
+ * Sometimes the xHC can not process isochronous endpoint ring quickly
+ * enough, and it will miss some isoc tds on the ring and generate
+ * a Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
+ */
+ bool skip;
+};
+
+struct xhci_virt_device {
+ /*
+ * Commands to the hardware are passed an "input context" that
+ * tells the hardware what to change in its data structures.
+ * The hardware will return changes in an "output context" that
+ * software must allocate for the hardware. We need to keep
+ * track of input and output contexts separately because
+ * these commands might fail and we don't trust the hardware.
+ */
+ struct xhci_container_ctx *out_ctx;
+ /* Used for addressing devices and configuration changes */
+ struct xhci_container_ctx *in_ctx;
+ /* Rings saved to ensure old alt settings can be re-instated */
+ struct xhci_ring **ring_cache;
+ int num_rings_cached;
+#define XHCI_MAX_RINGS_CACHED 31
+ struct xhci_virt_ep eps[31];
+ struct completion cmd_completion;
+ /* Status of the last command issued for this device */
+ u32 cmd_status;
+ struct list_head cmd_list;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+ /* 64-bit device addresses; we only write 32-bit addresses */
+ __le64 dev_context_ptrs[MAX_HC_SLOTS];
+ /* private xHCD pointers */
+ dma_addr_t dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_transfer_event {
+ /* 64-bit buffer address, or immediate data */
+ __le64 buffer;
+ __le32 transfer_len;
+ /* This field is interpreted differently based on the type of TRB */
+ __le32 flags;
+};
+
+struct urb_priv {
+ int length;
+ int td_cnt;
+ struct xhci_td *td[0];
+};
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+#define GET_TRANSFER_LENGTH(p) ((p) & 0xffffff)
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS 1
+/* Data Buffer Error */
+#define COMP_DB_ERR 2
+/* Babble Detected Error */
+#define COMP_BABBLE 3
+/* USB Transaction Error */
+#define COMP_TX_ERR 4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR 5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL 6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM 7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR 8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS 9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR 10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT 11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP 12
+/* Short Packet */
+#define COMP_SHORT_TX 13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN 14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN 15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL 16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL 17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER 18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE 19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR 20
+/* Event Ring is full */
+#define COMP_ER_FULL 21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT 23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP 24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT 25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP 26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL 27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT 28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER 31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES 32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN 33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR 34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR 35
+/* Split Transaction Error */
+#define COMP_SPLIT_ERR 36
+
+struct xhci_link_trb {
+ /* 64-bit segment pointer*/
+ __le64 segment_ptr;
+ __le32 intr_target;
+ __le32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE (0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+ /* Pointer to command TRB, or the value passed by the event data trb */
+ __le64 cmd_trb;
+ __le32 status;
+ __le32 flags;
+};
+
+/* flags bitmasks */
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
+#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+
+/* Set TR Dequeue Pointer command TRB fields */
+#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define TRB_LEN(p) ((p) & 0x1ffff)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE (1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT (1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP (1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP (1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN (1<<4)
+/* Interrupt on completion */
+#define TRB_IOC (1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT (1<<6)
+
+#define TRB_BEI (1<<9)
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN (1<<16)
+
+/* Isochronous TRB specific fields */
+#define TRB_SIA (1<<31)
+
+struct xhci_generic_trb {
+ __le32 field[4];
+};
+
+union xhci_trb {
+ struct xhci_link_trb link;
+ struct xhci_transfer_event trans_event;
+ struct xhci_event_cmd event_cmd;
+ struct xhci_generic_trb generic;
+};
+
+/* TRB bit mask */
+#define TRB_TYPE_BITMASK (0xfc00)
+#define TRB_TYPE(p) ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL 1
+/* setup stage for control transfers */
+#define TRB_SETUP 2
+/* data stage for control transfers */
+#define TRB_DATA 3
+/* status stage for control transfers */
+#define TRB_STATUS 4
+/* isoc transfers */
+#define TRB_ISOC 5
+/* TRB for linking ring segments */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP 8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Endpoint Command */
+#define TRB_RESET_EP 14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ 16
+/* Reset Device Command */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT 18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH 19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT 20
+/* Get port bandwidth Command */
+#define TRB_GET_BW 21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER 32
+/* Command Completion Event */
+#define TRB_COMPLETION 33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS 34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT 35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL 36
+/* Host Controller Event */
+#define TRB_HC_EVENT 37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE 38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/* Nec vendor-specific command completion event. */
+#define TRB_NEC_CMD_COMP 48
+/* Get NEC firmware revision. */
+#define TRB_NEC_GET_FW 49
+
+#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+/* Above, but for __le32 types -- can avoid work by swapping constants: */
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
+
+#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff)
+#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff)
+
+#define TRB_DEV_NOTE_TYEP(p) (((p)>>4) & 0xf)
+#define TRB_DEV_NOTE_VALUE_LO(p) ((p)>>8)
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 254
+/* Allow two commands + a link TRB, along with any reserved command TRBs */
+#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
+#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
+ * Change this if you change TRBS_PER_SEGMENT!
+ */
+#define SEGMENT_SHIFT 10
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT 16
+//#define TRB_MAX_BUFF_SHIFT 8
+#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
+
+/* mtk scheduler bitmasks */
+#define BPKTS(p) ((p) & 0x3f)
+#define BCSCOUNT(p) (((p) & 0x7) << 8)
+#define BBM(p) ((p) << 11)
+#define BOFFSET(p) ((p) & 0x3fff)
+#define BREPEAT(p) (((p) & 0x7fff) << 16)
+
+
+struct xhci_segment {
+ union xhci_trb *trbs;
+ /* private to HCD */
+ struct xhci_segment *next;
+ dma_addr_t dma;
+};
+
+struct xhci_td {
+ struct list_head td_list;
+ struct list_head cancelled_td_list;
+ struct urb *urb;
+ struct xhci_segment *start_seg;
+ union xhci_trb *first_trb;
+ union xhci_trb *last_trb;
+};
+
+struct xhci_dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+ int new_cycle_state;
+};
+
+struct xhci_ring {
+ struct xhci_segment *first_seg;
+ union xhci_trb *enqueue;
+ struct xhci_segment *enq_seg;
+ unsigned int enq_updates;
+ union xhci_trb *dequeue;
+ struct xhci_segment *deq_seg;
+ unsigned int deq_updates;
+ struct list_head td_list;
+ /*
+ * Write the cycle state into the TRB cycle field to give ownership of
+ * the TRB to the host controller (if we are the producer), or to check
+ * if we own the TRB (if we are the consumer). See section 4.9.1.
+ */
+ u32 cycle_state;
+ unsigned int stream_id;
+// spinlock_t lock;
+};
+
+struct xhci_erst_entry {
+ /* 64-bit event ring segment address */
+ __le64 seg_addr;
+ __le32 seg_size;
+ /* Set to zero */
+ __le32 rsvd;
+};
+
+struct xhci_erst {
+ struct xhci_erst_entry *entries;
+ unsigned int num_entries;
+ /* xhci->event_ring keeps track of segment dma addresses */
+ dma_addr_t erst_dma_addr;
+ /* Num entries the ERST can contain */
+ unsigned int erst_size;
+};
+
+struct xhci_scratchpad {
+ u64 *sp_array;
+ dma_addr_t sp_dma;
+ void **sp_buffers;
+ dma_addr_t *sp_dma_buffers;
+};
+
+/*
+ * Each segment table entry is 4*32bits long. 1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define ERST_NUM_SEGS 1
+/* Initial allocated size of the ERST, in number of entries */
+#define ERST_SIZE 64
+/* Initial number of event segment rings allocated */
+#define ERST_ENTRIES 1
+/* Poll every 60 seconds */
+#define POLL_TIMEOUT 60
+/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
+#define XHCI_STOP_EP_CMD_TIMEOUT 5
+/* XXX: Make these module parameters */
+
+
+/* There is one ehci_hci structure per controller */
+struct xhci_hcd {
+ /* glue to PCI and HCD framework */
+ struct xhci_cap_regs __iomem *cap_regs;
+ struct xhci_op_regs __iomem *op_regs;
+ struct xhci_run_regs __iomem *run_regs;
+ struct xhci_doorbell_array __iomem *dba;
+ /* Our HCD's current interrupter register set */
+ struct xhci_intr_reg __iomem *ir_set;
+
+ /* Cached register copies of read-only HC data */
+ __u32 hcs_params1;
+ __u32 hcs_params2;
+ __u32 hcs_params3;
+ __u32 hcc_params;
+
+ spinlock_t lock;
+
+ /* packed release number */
+ u8 sbrn;
+ u16 hci_version;
+ u8 max_slots;
+ u8 max_interrupters;
+ u8 max_ports;
+ u8 isoc_threshold;
+ int event_ring_max;
+ int addr_64;
+ /* 4KB min, 128MB max */
+ int page_size;
+ /* Valid values are 12 to 20, inclusive */
+ int page_shift;
+ /* only one MSI vector for now, but might need more later */
+ int msix_count;
+ struct msix_entry *msix_entries;
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
+ unsigned int cmd_ring_reserved_trbs;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+ /* Scratchpad */
+ struct xhci_scratchpad *scratchpad;
+
+ /* slot enabling and address device helpers */
+ struct completion addr_dev;
+ int slot_id;
+ /* Internal mirror of the HW's dcbaa */
+ struct xhci_virt_device *devs[MAX_HC_SLOTS];
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
+ struct dma_pool *small_streams_pool;
+ struct dma_pool *medium_streams_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+ /* Poll the rings - for debugging */
+ struct timer_list event_ring_timer;
+ int zombie;
+#endif
+ /* Host controller watchdog timer structures */
+ unsigned int xhc_state;
+/* Host controller is dying - not responding to commands. "I'm not dead yet!"
+ *
+ * xHC interrupts have been disabled and a watchdog timer will (or has already)
+ * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code
+ * that sees this status (other than the timer that set it) should stop touching
+ * hardware immediately. Interrupt handlers should return immediately when
+ * they see this status (any time they drop and re-acquire xhci->lock).
+ * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
+ * putting the TD on the canceled list, etc.
+ *
+ * There are no reports of xHCI host controllers that display this issue.
+ */
+#define XHCI_STATE_DYING (1 << 0)
+#define XHCI_STATE_HALTED (1 << 1)
+ /* Statistics */
+ int noops_submitted;
+ int noops_handled;
+ int error_bitmask;
+ unsigned int quirks;
+#define XHCI_LINK_TRB_QUIRK (1 << 0)
+#define XHCI_RESET_EP_QUIRK (1 << 1)
+#define XHCI_NEC_HOST (1 << 2)
+};
+
+/* For testing purposes */
+#define NUM_TEST_NOOPS 0
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+ return (struct xhci_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+ return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+}
+
+#define XHCI_DEBUG 0
+
+#define xhci_dbg(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) printk( KERN_ERR fmt, ##args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) printk( KERN_ERR fmt, ##args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+ do { printk( KERN_ERR fmt, ##args); } while (0)
+#define xhci_warn(xhci, fmt, args...) \
+ do { printk( KERN_ERR fmt, ##args); } while (0)
+
+
+#if 0
+#define xhci_dbg(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+ do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+ dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#endif
+/* TODO: copied from ehci.h - can be refactored? */
+/* xHCI spec says all registers are little endian */
+static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
+ __u32 __iomem *regs)
+{
+ xhci_dbg(xhci,
+ "`MEM_READ_DWORD(0x%p = 0x%x);",
+ regs, readl(regs));
+ return readl(regs);
+}
+static inline void xhci_writel(struct xhci_hcd *xhci,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ xhci_dbg(xhci,
+ "`MEM_WRITE_DWORD(0x%p = 0x%x);",
+ regs, readl(regs));
+ writel(val, regs);
+}
+
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+ __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u64 val_lo = readl(ptr);
+ u64 val_hi = readl(ptr + 1);
+ xhci_dbg(xhci,
+ "`MEM_READ_DWORD(0x%p, 0x%0lx);",
+ regs, (long unsigned int) (val_lo + (val_hi << 32)));
+ return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+ const u64 val, __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u32 val_lo = lower_32_bits(val);
+ u32 val_hi = upper_32_bits(val);
+
+ xhci_dbg(xhci,
+ "`MEM_WRITE_DWORD(0x%p, 0x%0lx);",
+ regs, (long unsigned int) val);
+ writel(val_lo, ptr);
+ writel(val_hi, ptr + 1);
+}
+
+static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
+{
+ u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ return ((HC_VERSION(temp) == 0x95) &&
+ (xhci->quirks & XHCI_LINK_TRB_QUIRK));
+}
+
+/* xHCI debugging */
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
+void mtk_xhci_dbg_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx,
+ unsigned int last_ep);
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep);
+
+/* xHCI memory management */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+ struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ unsigned int ep_index);
+void xhci_slot_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ gfp_t mem_flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index);
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t flags);
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep);
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address);
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb);
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id);
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id);
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags);
+void xhci_free_command(struct xhci_hcd *xhci,
+ struct xhci_command *command);
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
+
+irqreturn_t xhci_mtk_irq(struct usb_hcd *hcd);
+//int xhci_mtk_pci_setup(struct usb_hcd *hcd);
+int xhci_mtk_run(struct usb_hcd *hcd);
+void xhci_mtk_stop(struct usb_hcd *hcd);
+void xhci_mtk_shutdown(struct usb_hcd *hcd);
+int xhci_mtk_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_mtk_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_mtk_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_mtk_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev
+ , struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+int xhci_mtk_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
+int xhci_mtk_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_mtk_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+void xhci_mtk_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_mtk_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_mtk_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
+int xhci_mtk_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_mtk_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+int xhci_mtk_hub_status_data(struct usb_hcd *hcd, char *buf);
+int xhci_mtk_get_frame(struct usb_hcd *hcd);
+
+void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev);
+//int xhci_register_pci(void);
+//void xhci_unregister_pci(void);
+
+
+/* xHCI host controller glue */
+void xhci_quiesce(struct xhci_hcd *xhci);
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_init(struct usb_hcd *hcd);
+int xhci_run(struct usb_hcd *hcd);
+void xhci_stop(struct usb_hcd *hcd);
+void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
+ union xhci_trb *start_trb, union xhci_trb *end_trb,
+ dma_addr_t suspect_dma);
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+void *xhci_setup_one_noop(struct xhci_hcd *xhci);
+void *mtk_xhci_setup_one_noop(struct xhci_hcd *xhci);
+void xhci_handle_event(struct xhci_hcd *xhci);
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, char isBSR);
+int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+ u32 field1, u32 field2, u32 field3, u32 field4);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed);
+int xhci_queue_deconfigure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed);
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id);
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state);
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
+ struct xhci_dequeue_state *deq_state);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ struct usb_device *udev, unsigned int ep_index);
+void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
+void xhci_stop_endpoint_command_watchdog(unsigned long arg);
+/* xHCI roothub code */
+u32 xhci_port_state_to_clear_change(u32 state);
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+//int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+/* xHCI contexts */
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer);
+
+void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
+struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+ struct xhci_segment *next, bool link_trbs);
+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg);
+unsigned int xhci_port_speed(unsigned int port_status);
+u32 xhci_port_state_to_neutral(u32 state);
+
+
+/* TRB MISC fields */
+#define ADDRESS_TRB_BSR (1<<9)
+#define CONFIG_EP_TRB_DC (1<<9)
+
+#endif /* __LINUX_XHCI_HCD_H */
Index: linux-3.18.21/drivers/usb/host/ohci-hcd.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/ohci-hcd.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/ohci-hcd.c 2018-02-05 13:21:06.000000000 +0800
@@ -1219,6 +1219,11 @@
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
+#if defined (CONFIG_MIPS_RT63365)
+#include "ohci-rt3xxx.c"
+#define PLATFORM_DRIVER ohci_hcd_rt3xxx_driver
+#endif
+
#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
#include "ohci-da8xx.c"
#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
Index: linux-3.18.21/drivers/usb/host/ohci-rt3xxx.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/ohci-rt3xxx.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,228 @@
+/*
+ * RT3883 OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2009 Ralink Tech Company
+ *
+ * Bus Glue for Ralink OHCI controller.
+ *
+ * Written by YYHuang <yy_huang@ralinktech.com.tw>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/signal.h>
+#include <linux/platform_device.h>
+#if 0
+static void try_wake_up(void)
+{
+ u32 val;
+
+ val = le32_to_cpu(*(volatile u_long *)(0xB0000030));
+ //if(val & 0x00040000)
+ // return; // Someone(OHCI?) has waked it up, then just return.
+ val = val | 0x00140000;
+ *(volatile u_long *)(0xB0000030) = cpu_to_le32(val);
+ udelay(10000); // enable port0 & port1 Phy clock
+
+ val = le32_to_cpu(*(volatile u_long *)(0xB0000034));
+ val = val & 0xFDBFFFFF;
+ *(volatile u_long *)(0xB0000034) = cpu_to_le32(val);
+ udelay(10000); // toggle reset bit 25 & 22 to 0
+}
+
+static void try_sleep(void)
+{
+ u32 val;
+
+ val = le32_to_cpu(*(volatile u_long *)(0xB0000030));
+ val = val & 0xFFEBFFFF;
+ *(volatile u_long *)(0xB0000030) = cpu_to_le32(val);
+ udelay(10000); // disable port0 & port1 Phy clock
+
+ val = le32_to_cpu(*(volatile u_long *)(0xB0000034));
+ val = val | 0x02400000;
+ *(volatile u_long *)(0xB0000034) = cpu_to_le32(val);
+ udelay(10000); // toggle reset bit 25 & 22 to 1
+}
+#endif
+static int usb_hcd_rt3xxx_probe(const struct hc_driver *driver, struct platform_device *pdev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+
+ if (pdev->resource[1].flags != IORESOURCE_IRQ) {
+ pr_debug("resource[1] is not IORESOURCE_IRQ");
+ return -ENOMEM;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, "rt3xxx-ohci");
+ if (hcd == NULL)
+ return -ENOMEM;
+
+ hcd->rsrc_start = pdev->resource[0].start;
+ hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ usb_put_hcd(hcd);
+ retval = -EBUSY;
+ goto err1;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+// usb_host_clock = clk_get(&pdev->dev, "usb_host");
+// ep93xx_start_hc(&pdev->dev);
+
+ //try_wake_up();
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED | IRQF_SHARED);
+ if (retval == 0)
+ return retval;
+
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static void usb_hcd_rt3xxx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ usb_remove_hcd(hcd);
+// ep93xx_stop_hc(&pdev->dev);
+// clk_put(usb_host_clock);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static int ohci_rt3xxx_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ if ((ret = ohci_init(ohci)) < 0)
+ return ret;
+
+ if ((ret = ohci_run(ohci)) < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct hc_driver ohci_rt3xxx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "RT3xxx OHCI Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+ .start = ohci_rt3xxx_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+ .get_frame_number = ohci_get_frame,
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+// .hub_irq_enable = ohci_rhsc_enable,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+extern int usb_disabled(void);
+
+
+static int ohci_hcd_rt3xxx_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = -ENODEV;
+ if (!usb_disabled()){
+ ret = usb_hcd_rt3xxx_probe(&ohci_rt3xxx_hc_driver, pdev);
+ }
+
+ return ret;
+}
+
+static int ohci_hcd_rt3xxx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_hcd_rt3xxx_remove(hcd, pdev);
+
+ //if(!usb_find_device(0x0, 0x0)) // No any other USB host controller.
+ //try_sleep();
+
+ return 0;
+}
+
+/*
+#ifdef CONFIG_PM
+static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ep93xx_stop_hc(&pdev->dev);
+ hcd->state = HC_STATE_SUSPENDED;
+ pdev->dev.power.power_state = PMSG_SUSPEND;
+
+ return 0;
+}
+
+static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int status;
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ep93xx_start_hc(&pdev->dev);
+ pdev->dev.power.power_state = PMSG_ON;
+ usb_hcd_resume_root_hub(hcd);
+
+ return 0;
+}
+#endif
+*/
+
+static struct platform_driver ohci_hcd_rt3xxx_driver = {
+ .probe = ohci_hcd_rt3xxx_drv_probe,
+ .remove = ohci_hcd_rt3xxx_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+/*
+#ifdef CONFIG_PM
+ .suspend = ohci_hcd_rt3xxx_drv_suspend,
+ .resume = ohci_hcd_rt3xxx_drv_resume,
+#endif
+*/
+ .driver = {
+ .name = "rt3xxx-ohci",
+ },
+};
+
Index: linux-3.18.21/drivers/usb/host/xhci-mem.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/xhci-mem.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/xhci-mem.c 2018-02-05 13:21:06.000000000 +0800
@@ -67,6 +67,8 @@
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
+ if (!seg)
+ return;
if (seg->trbs) {
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
@@ -1475,9 +1477,19 @@
max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
}
+ if(xhci_mtk_host(xhci)){
+ if ((max_packet % 4 == 2) && (max_packet % 16 != 14)
+ && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
+ max_packet += 2;
+ }
break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
+ if(xhci_mtk_host(xhci)){
+ if ((max_packet % 4 == 2) && (max_packet % 16 != 14)
+ && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
+ max_packet += 2;
+ }
break;
default:
BUG();
@@ -2096,7 +2108,7 @@
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
- /* WTF? "Valid values are 1 to MaxPorts" */
+ /* WTF? "Valid values are <20><>?<3F><>?to MaxPorts" */
return;
/* cache usb2 port capabilities */
Index: linux-3.18.21/drivers/usb/host/xhci-mtk-power.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk-power.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,115 @@
+#include "xhci-mtk.h"
+#include "xhci-mtk-power.h"
+#include "xhci.h"
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+static int g_num_u3_port;
+static int g_num_u2_port;
+
+
+void enableXhciAllPortPower(struct xhci_hcd *xhci){
+ int i;
+ u32 port_id, temp;
+ u32 __iomem *addr;
+
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ for(i=1; i<=g_num_u3_port; i++){
+ port_id=i;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff);
+ temp = readl(addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= PORT_POWER;
+ writel(temp, addr);
+ }
+ for(i=1; i<=g_num_u2_port; i++){
+ port_id=i+g_num_u3_port;
+ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff);
+ temp = readl(addr);
+ temp = xhci_port_state_to_neutral(temp);
+ temp |= PORT_POWER;
+ writel(temp, addr);
+ }
+}
+
+void enableAllClockPower(){
+
+ int i;
+ u32 temp;
+
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ //2. Enable xHC
+ writel(readl(SSUSB_IP_PW_CTRL) | (SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL);
+ writel(readl(SSUSB_IP_PW_CTRL) & (~SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL);
+ writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+
+ //1. Enable target ports
+ for(i=0; i<g_num_u3_port; i++){
+ temp = readl(SSUSB_U3_CTRL(i));
+ temp = temp & (~SSUSB_U3_PORT_PDN) & (~SSUSB_U3_PORT_DIS);
+ writel(temp, SSUSB_U3_CTRL(i));
+ }
+ for(i=0; i<g_num_u2_port; i++){
+ temp = readl(SSUSB_U2_CTRL(i));
+ temp = temp & (~SSUSB_U2_PORT_PDN) & (~SSUSB_U2_PORT_DIS);
+ writel(temp, SSUSB_U2_CTRL(i));
+ }
+ msleep(100);
+}
+
+
+//(X)disable clock/power of a port
+//(X)if all ports are disabled, disable IP ctrl power
+//disable all ports and IP clock/power, this is just mention HW that the power/clock of port
+//and IP could be disable if suspended.
+//If doesn't not disable all ports at first, the IP clock/power will never be disabled
+//(some U2 and U3 ports are binded to the same connection, that is, they will never enter suspend at the same time
+//port_index: port number
+//port_rev: 0x2 - USB2.0, 0x3 - USB3.0 (SuperSpeed)
+void disablePortClockPower(void){
+ int i;
+ u32 temp;
+
+ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP));
+ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP));
+
+ for(i=0; i<g_num_u3_port; i++){
+ temp = readl(SSUSB_U3_CTRL(i));
+ temp = temp | (SSUSB_U3_PORT_PDN);
+ writel(temp, SSUSB_U3_CTRL(i));
+ }
+ for(i=0; i<g_num_u2_port; i++){
+ temp = readl(SSUSB_U2_CTRL(i));
+ temp = temp | (SSUSB_U2_PORT_PDN);
+ writel(temp, SSUSB_U2_CTRL(i));
+ }
+ writel(readl(SSUSB_IP_PW_CTRL_1) | (SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+}
+
+//if IP ctrl power is disabled, enable it
+//enable clock/power of a port
+//port_index: port number
+//port_rev: 0x2 - USB2.0, 0x3 - USB3.0 (SuperSpeed)
+void enablePortClockPower(int port_index, int port_rev){
+ int i;
+ u32 temp;
+
+ writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1);
+
+ if(port_rev == 0x3){
+ temp = readl(SSUSB_U3_CTRL(port_index));
+ temp = temp & (~SSUSB_U3_PORT_PDN);
+ writel(temp, SSUSB_U3_CTRL(port_index));
+ }
+ else if(port_rev == 0x2){
+ temp = readl(SSUSB_U2_CTRL(port_index));
+ temp = temp & (~SSUSB_U2_PORT_PDN);
+ writel(temp, SSUSB_U2_CTRL(port_index));
+ }
+}
+
Index: linux-3.18.21/drivers/usb/host/xhci-mtk-power.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk-power.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,13 @@
+#ifndef _XHCI_MTK_POWER_H
+#define _XHCI_MTK_POWER_H
+
+#include <linux/usb.h>
+#include "xhci.h"
+#include "xhci-mtk.h"
+
+void enableXhciAllPortPower(struct xhci_hcd *xhci);
+void enableAllClockPower(void);
+void disablePortClockPower(void);
+void enablePortClockPower(int port_index, int port_rev);
+
+#endif
Index: linux-3.18.21/drivers/usb/host/xhci-mtk-scheduler.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk-scheduler.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,608 @@
+#include "xhci-mtk-scheduler.h"
+#include <linux/kernel.h> /* printk() */
+
+static struct sch_ep **ss_out_eps[MAX_EP_NUM];
+static struct sch_ep **ss_in_eps[MAX_EP_NUM];
+static struct sch_ep **hs_eps[MAX_EP_NUM]; //including tt isoc
+static struct sch_ep **tt_intr_eps[MAX_EP_NUM];
+
+
+int mtk_xhci_scheduler_init(void){
+ int i;
+
+ for(i=0; i<MAX_EP_NUM; i++){
+ ss_out_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ ss_in_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ hs_eps[i] = NULL;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ tt_intr_eps[i] = NULL;
+ }
+ return 0;
+}
+
+int add_sch_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, int offset, int repeat, int pkts, int cs_count, int burst_mode
+ , int bw_cost, mtk_u32 *ep, struct sch_ep *tmp_ep){
+
+ struct sch_ep **ep_array;
+ int i;
+
+ if(is_in && dev_speed == USB_SPEED_SUPER ){
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else if(dev_speed == USB_SPEED_SUPER){
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+ else if(dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)){
+ ep_array = (struct sch_ep **)hs_eps;
+ }
+ else{
+ ep_array = (struct sch_ep **)tt_intr_eps;
+ }
+ for(i=0; i<MAX_EP_NUM; i++){
+ if(ep_array[i] == NULL){
+ tmp_ep->dev_speed = dev_speed;
+ tmp_ep->isTT = isTT;
+ tmp_ep->is_in = is_in;
+ tmp_ep->ep_type = ep_type;
+ tmp_ep->maxp = maxp;
+ tmp_ep->interval = interval;
+ tmp_ep->burst = burst;
+ tmp_ep->mult = mult;
+ tmp_ep->offset = offset;
+ tmp_ep->repeat = repeat;
+ tmp_ep->pkts = pkts;
+ tmp_ep->cs_count = cs_count;
+ tmp_ep->burst_mode = burst_mode;
+ tmp_ep->bw_cost = bw_cost;
+ tmp_ep->ep = ep;
+ ep_array[i] = tmp_ep;
+ return SCH_SUCCESS;
+ }
+ }
+ return SCH_FAIL;
+}
+
+int count_ss_bw(int is_in, int ep_type, int maxp, int interval, int burst, int mult, int offset, int repeat
+ , int td_size){
+ int i, j, k;
+ int bw_required[3];
+ int final_bw_required;
+ int bw_required_per_repeat;
+ int tmp_bw_required;
+ struct sch_ep *cur_sch_ep;
+ struct sch_ep **ep_array;
+ int cur_offset;
+ int cur_ep_offset;
+ int tmp_offset;
+ int tmp_interval;
+ int ep_offset;
+ int ep_interval;
+ int ep_repeat;
+ int ep_mult;
+
+ if(is_in){
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else{
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+
+ bw_required[0] = 0;
+ bw_required[1] = 0;
+ bw_required[2] = 0;
+
+ if(repeat == 0){
+ final_bw_required = 0;
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = ep_array[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_interval = cur_sch_ep->interval;
+ ep_offset = cur_sch_ep->offset;
+ if(cur_sch_ep->repeat == 0){
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ final_bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ else{
+ ep_repeat = cur_sch_ep->repeat;
+ ep_mult = cur_sch_ep->mult;
+ for(k=0; k<=ep_mult; k++){
+ cur_ep_offset = ep_offset+(k*ep_mult);
+ if(ep_interval >= interval){
+ tmp_offset = cur_ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - cur_ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ final_bw_required += cur_sch_ep->bw_cost;
+ break;
+ }
+ }
+ }
+ }
+ final_bw_required += td_size;
+ }
+ else{
+ bw_required_per_repeat = maxp * (burst+1);
+ for(j=0; j<=mult; j++){
+ tmp_bw_required = 0;
+ cur_offset = offset+(j*repeat);
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = ep_array[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_interval = cur_sch_ep->interval;
+ ep_offset = cur_sch_ep->offset;
+ if(cur_sch_ep->repeat == 0){
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - cur_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = cur_offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ tmp_bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ else{
+ ep_repeat = cur_sch_ep->repeat;
+ ep_mult = cur_sch_ep->mult;
+ for(k=0; k<=ep_mult; k++){
+ cur_ep_offset = ep_offset+(k*ep_repeat);
+ if(ep_interval >= interval){
+ tmp_offset = cur_ep_offset + ep_interval - cur_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = cur_offset + interval - cur_ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset % tmp_interval == 0){
+ tmp_bw_required += cur_sch_ep->bw_cost;
+ break;
+ }
+ }
+ }
+ }
+ bw_required[j] = tmp_bw_required;
+ }
+ final_bw_required = SS_BW_BOUND;
+ for(j=0; j<=mult; j++){
+ if(bw_required[j] < final_bw_required){
+ final_bw_required = bw_required[j];
+ }
+ }
+ final_bw_required += bw_required_per_repeat;
+ }
+ return final_bw_required;
+}
+
+int count_hs_bw(int ep_type, int maxp, int interval, int offset, int td_size){
+ int i;
+ int bw_required;
+ struct sch_ep *cur_sch_ep;
+ int tmp_offset;
+ int tmp_interval;
+ int ep_offset;
+ int ep_interval;
+ int cur_tt_isoc_interval; //for isoc tt check
+
+ bw_required = 0;
+ for(i=0; i<MAX_EP_NUM; i++){
+
+ cur_sch_ep = (struct sch_ep *)hs_eps[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+
+ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){
+ cur_tt_isoc_interval = ep_interval<<3;
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + cur_tt_isoc_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = cur_tt_isoc_interval;
+ }
+ if(cur_sch_ep->is_in){
+ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){
+ bw_required += 188;
+ }
+ }
+ else{
+ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){
+ bw_required += 188;
+ }
+ }
+ }
+ else{
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset%tmp_interval == 0){
+ bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ }
+ bw_required += td_size;
+ return bw_required;
+}
+
+int count_tt_isoc_bw(int is_in, int maxp, int interval, int offset, int td_size){
+ char is_cs;
+ int mframe_idx, frame_idx, s_frame, s_mframe, cur_mframe;
+ int bw_required, max_bw;
+ int ss_cs_count;
+ int cs_mframe;
+ int max_frame;
+ int i,j;
+ struct sch_ep *cur_sch_ep;
+ int ep_offset;
+ int ep_interval;
+ int ep_cs_count;
+ int tt_isoc_interval; //for isoc tt check
+ int cur_tt_isoc_interval; //for isoc tt check
+ int tmp_offset;
+ int tmp_interval;
+
+ is_cs = 0;
+
+ tt_isoc_interval = interval<<3; //frame to mframe
+ if(is_in){
+ is_cs = 1;
+ }
+ s_frame = offset/8;
+ s_mframe = offset%8;
+ ss_cs_count = (maxp + (188 - 1))/188;
+ if(is_cs){
+ cs_mframe = offset%8 + 2 + ss_cs_count;
+ if (cs_mframe <= 6)
+ ss_cs_count += 2;
+ else if (cs_mframe == 7)
+ ss_cs_count++;
+ else if (cs_mframe > 8)
+ return -1;
+ }
+ max_bw = 0;
+ if(is_in){
+ i=2;
+ }
+ for(cur_mframe = offset+i; i<ss_cs_count; cur_mframe++, i++){
+ bw_required = 0;
+ for(j=0; j<MAX_EP_NUM; j++){
+ cur_sch_ep = (struct sch_ep *)hs_eps[j];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){
+ //isoc tt
+ //check if mframe offset overlap
+ //if overlap, add 188 to the bw
+ cur_tt_isoc_interval = ep_interval<<3;
+ if(cur_tt_isoc_interval >= tt_isoc_interval){
+ tmp_offset = (ep_offset+cur_tt_isoc_interval) - cur_mframe;
+ tmp_interval = tt_isoc_interval;
+ }
+ else{
+ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset;
+ tmp_interval = cur_tt_isoc_interval;
+ }
+ if(cur_sch_ep->is_in){
+ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){
+ bw_required += 188;
+ }
+ }
+ else{
+ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){
+ bw_required += 188;
+ }
+ }
+
+ }
+ else if(cur_sch_ep->ep_type == USB_EP_INT || cur_sch_ep->ep_type == USB_EP_ISOC){
+ //check if mframe
+ if(ep_interval >= tt_isoc_interval){
+ tmp_offset = (ep_offset+ep_interval) - cur_mframe;
+ tmp_interval = tt_isoc_interval;
+ }
+ else{
+ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset;
+ tmp_interval = ep_interval;
+ }
+ if(tmp_offset%tmp_interval == 0){
+ bw_required += cur_sch_ep->bw_cost;
+ }
+ }
+ }
+ bw_required += 188;
+ if(bw_required > max_bw){
+ max_bw = bw_required;
+ }
+ }
+ return max_bw;
+}
+
+int count_tt_intr_bw(int interval, int frame_offset){
+ //check all eps in tt_intr_eps
+ int ret;
+ int i,j;
+ int ep_offset;
+ int ep_interval;
+ int tmp_offset;
+ int tmp_interval;
+ ret = SCH_SUCCESS;
+ struct sch_ep *cur_sch_ep;
+
+ for(i=0; i<MAX_EP_NUM; i++){
+ cur_sch_ep = (struct sch_ep *)tt_intr_eps[i];
+ if(cur_sch_ep == NULL){
+ continue;
+ }
+ ep_offset = cur_sch_ep->offset;
+ ep_interval = cur_sch_ep->interval;
+ if(ep_interval >= interval){
+ tmp_offset = ep_offset + ep_interval - frame_offset;
+ tmp_interval = interval;
+ }
+ else{
+ tmp_offset = frame_offset + interval - ep_offset;
+ tmp_interval = ep_interval;
+ }
+
+ if(tmp_offset%tmp_interval==0){
+ return SCH_FAIL;
+ }
+ }
+ return SCH_SUCCESS;
+}
+
+struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep){
+ int i;
+ struct sch_ep **ep_array;
+ struct sch_ep *cur_ep;
+
+ if (is_in && dev_speed == USB_SPEED_SUPER) {
+ ep_array = (struct sch_ep **)ss_in_eps;
+ }
+ else if (dev_speed == USB_SPEED_SUPER) {
+ ep_array = (struct sch_ep **)ss_out_eps;
+ }
+ else if (dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)) {
+ ep_array = (struct sch_ep **)hs_eps;
+ }
+ else {
+ ep_array = (struct sch_ep **)tt_intr_eps;
+ }
+ for (i = 0; i < MAX_EP_NUM; i++) {
+ cur_ep = (struct sch_ep *)ep_array[i];
+ if(cur_ep != NULL && cur_ep->ep == ep){
+ ep_array[i] = NULL;
+ return cur_ep;
+ }
+ }
+ return NULL;
+}
+
+int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep){
+ mtk_u32 bPkts = 0;
+ mtk_u32 bCsCount = 0;
+ mtk_u32 bBm = 1;
+ mtk_u32 bOffset = 0;
+ mtk_u32 bRepeat = 0;
+ int ret;
+ struct mtk_xhci_ep_ctx *temp_ep_ctx;
+ int td_size;
+ int mframe_idx, frame_idx;
+ int bw_cost;
+ int cur_bw, best_bw, best_bw_idx,repeat, max_repeat, best_bw_repeat;
+ int cur_offset, cs_mframe;
+ int break_out;
+ int frame_interval;
+
+ printk(KERN_DEBUG "add_ep parameters, dev_speed %d, is_in %d, isTT %d, ep_type %d, maxp %d, interval %d, burst %d, mult %d, ep 0x%x, ep_ctx 0x%x, sch_ep 0x%x\n", dev_speed, is_in, isTT, ep_type, maxp
+ , interval, burst, mult, ep, ep_ctx, sch_ep);
+ if(isTT && ep_type == USB_EP_INT && ((dev_speed == USB_SPEED_LOW) || (dev_speed == USB_SPEED_FULL))){
+ frame_interval = interval >> 3;
+ for(frame_idx=0; frame_idx<frame_interval; frame_idx++){
+ printk(KERN_ERR "check tt_intr_bw interval %d, frame_idx %d\n", frame_interval, frame_idx);
+ if(count_tt_intr_bw(frame_interval, frame_idx) == SCH_SUCCESS){
+ printk(KERN_ERR "check OK............\n");
+ bOffset = frame_idx<<3;
+ bPkts = 1;
+ bCsCount = 3;
+ bw_cost = maxp;
+ bRepeat = 0;
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, frame_interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, maxp, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ break;
+ }
+ }
+ }
+ else if(isTT && ep_type == USB_EP_ISOC){
+ best_bw = HS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp;
+ break_out = 0;
+ frame_interval = interval>>3;
+ for(frame_idx=0; frame_idx<frame_interval && !break_out; frame_idx++){
+ for(mframe_idx=0; mframe_idx<8; mframe_idx++){
+ cur_offset = (frame_idx*8) + mframe_idx;
+ cur_bw = count_tt_isoc_bw(is_in, maxp, frame_interval, cur_offset, td_size);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = cur_offset;
+ best_bw = cur_bw;
+ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break_out = 1;
+ break;
+ }
+ }
+ }
+ }
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bPkts = 1;
+ bCsCount = (maxp + (188 - 1)) / 188;
+ if(is_in){
+ cs_mframe = bOffset%8 + 2 + bCsCount;
+ if (cs_mframe <= 6)
+ bCsCount += 2;
+ else if (cs_mframe == 7)
+ bCsCount++;
+ }
+ bw_cost = 188;
+ bRepeat = 0;
+ if(add_sch_ep( dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else if((dev_speed == USB_SPEED_FULL || dev_speed == USB_SPEED_LOW) && ep_type == USB_EP_INT){
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ else if(dev_speed == USB_SPEED_FULL && ep_type == USB_EP_ISOC){
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ else if(dev_speed == USB_SPEED_HIGH && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){
+ best_bw = HS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp*(burst+1);
+ for(cur_offset = 0; cur_offset<interval; cur_offset++){
+ cur_bw = count_hs_bw(ep_type, maxp, interval, cur_offset, td_size);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = cur_offset;
+ best_bw = cur_bw;
+ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break;
+ }
+ }
+ }
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bPkts = burst + 1;
+ bCsCount = 0;
+ bw_cost = td_size;
+ bRepeat = 0;
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else if(dev_speed == USB_SPEED_SUPER && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){
+ best_bw = SS_BW_BOUND;
+ best_bw_idx = -1;
+ cur_bw = 0;
+ td_size = maxp * (mult+1) * (burst+1);
+ if(mult == 0){
+ max_repeat = 0;
+ }
+ else{
+ max_repeat = (interval-1)/(mult+1);
+ }
+ break_out = 0;
+ for(frame_idx = 0; (frame_idx < interval) && !break_out; frame_idx++){
+ for(repeat = max_repeat; repeat >= 0; repeat--){
+ cur_bw = count_ss_bw(is_in, ep_type, maxp, interval, burst, mult, frame_idx
+ , repeat, td_size);
+ printk(KERN_DEBUG "count_ss_bw, frame_idx %d, repeat %d, td_size %d, result bw %d\n"
+ , frame_idx, repeat, td_size, cur_bw);
+ if(cur_bw >= 0 && cur_bw < best_bw){
+ best_bw_idx = frame_idx;
+ best_bw_repeat = repeat;
+ best_bw = cur_bw;
+ if(cur_bw <= td_size || cur_bw < (HS_BW_BOUND>>1)){
+ break_out = 1;
+ break;
+ }
+ }
+ }
+ }
+ printk(KERN_DEBUG "final best idx %d, best repeat %d\n", best_bw_idx, best_bw_repeat);
+ if(best_bw_idx == -1){
+ return SCH_FAIL;
+ }
+ else{
+ bOffset = best_bw_idx;
+ bCsCount = 0;
+ bRepeat = best_bw_repeat;
+ if(bRepeat == 0){
+ bw_cost = (burst+1)*(mult+1)*maxp;
+ bPkts = (burst+1)*(mult+1);
+ }
+ else{
+ bw_cost = (burst+1)*maxp;
+ bPkts = (burst+1);
+ }
+ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult
+ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){
+ return SCH_FAIL;
+ }
+ ret = SCH_SUCCESS;
+ }
+ }
+ else{
+ bPkts = 1;
+ ret = SCH_SUCCESS;
+ }
+ if(ret == SCH_SUCCESS){
+ temp_ep_ctx = (struct mtk_xhci_ep_ctx *)ep_ctx;
+ temp_ep_ctx->reserved[0] |= cpu_to_le32((BPKTS(bPkts) | BCSCOUNT(bCsCount) | BBM(bBm)));
+ temp_ep_ctx->reserved[1] |= cpu_to_le32((BOFFSET(bOffset) | BREPEAT(bRepeat)));
+
+ printk(KERN_DEBUG "[DBG] BPKTS: %x, BCSCOUNT: %x, BBM: %x\n", bPkts, bCsCount, bBm);
+ printk(KERN_DEBUG "[DBG] BOFFSET: %x, BREPEAT: %x\n", bOffset, bRepeat);
+ return SCH_SUCCESS;
+ }
+ else{
+ return SCH_FAIL;
+ }
+}
Index: linux-3.18.21/drivers/usb/host/xhci-mtk-scheduler.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk-scheduler.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,77 @@
+#ifndef _XHCI_MTK_SCHEDULER_H
+#define _XHCI_MTK_SCHEDULER_H
+
+#define MTK_SCH_NEW 1
+
+#define SCH_SUCCESS 1
+#define SCH_FAIL 0
+
+#define MAX_EP_NUM 64
+#define SS_BW_BOUND 51000
+#define HS_BW_BOUND 6144
+
+#define USB_EP_CONTROL 0
+#define USB_EP_ISOC 1
+#define USB_EP_BULK 2
+#define USB_EP_INT 3
+
+#define USB_SPEED_LOW 1
+#define USB_SPEED_FULL 2
+#define USB_SPEED_HIGH 3
+#define USB_SPEED_SUPER 5
+
+/* mtk scheduler bitmasks */
+#define BPKTS(p) ((p) & 0x3f)
+#define BCSCOUNT(p) (((p) & 0x7) << 8)
+#define BBM(p) ((p) << 11)
+#define BOFFSET(p) ((p) & 0x3fff)
+#define BREPEAT(p) (((p) & 0x7fff) << 16)
+
+
+#if 1
+typedef unsigned int mtk_u32;
+typedef unsigned long long mtk_u64;
+#endif
+
+#define NULL ((void *)0)
+
+struct mtk_xhci_ep_ctx {
+ mtk_u32 ep_info;
+ mtk_u32 ep_info2;
+ mtk_u64 deq;
+ mtk_u32 tx_info;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ mtk_u32 reserved[3];
+};
+
+
+struct sch_ep
+{
+ //device info
+ int dev_speed;
+ int isTT;
+ //ep info
+ int is_in;
+ int ep_type;
+ int maxp;
+ int interval;
+ int burst;
+ int mult;
+ //scheduling info
+ int offset;
+ int repeat;
+ int pkts;
+ int cs_count;
+ int burst_mode;
+ //other
+ int bw_cost; //bandwidth cost in each repeat; including overhead
+ mtk_u32 *ep; //address of usb_endpoint pointer
+};
+
+int mtk_xhci_scheduler_init(void);
+int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst
+ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep);
+struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep);
+
+
+#endif
Index: linux-3.18.21/drivers/usb/host/xhci-mtk.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk.c 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,354 @@
+#include "xhci-mtk.h"
+#include "xhci-mtk-power.h"
+#include "xhci.h"
+#include "mtk-phy.h"
+#ifdef CONFIG_C60802_SUPPORT
+#include "mtk-phy-c60802.h"
+#endif
+#include "xhci-mtk-scheduler.h"
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+
+#define CONFIG_EN7512_ASIC 1
+//#define CONFIG_EN7512_FPGA 1
+
+void en7512_en7516_full_speed_process(struct usb_hcd *hcd, struct urb *urb){
+ struct usb_device *udev;
+ char *ippc_base = (__u32 __iomem *)(SIFSLV_IPPC);
+ char *usb2_csr_base = (__u32 __iomem *)(SSUSB_U2_SYS_BASE);
+ char *usb_perf_reg;
+ u32 temp, probe_value;
+
+ udev = urb->dev;
+ if(udev->state != USB_STATE_NOTATTACHED)
+ return;
+ if((udev->speed != USB_SPEED_FULL) && (udev->speed != USB_SPEED_LOW))
+ return;
+ /*Setup Probe Point*/
+ writel(0x3, ippc_base + U3H_SSUSB_PRB_CTRL0);
+ writel(0x0, ippc_base + U3H_SSUSB_PRB_CTRL1);
+ writel(0x7571, ippc_base + U3H_SSUSB_PRB_CTRL3);
+
+ probe_value = readl(ippc_base + U3H_SSUSB_PRB_CTRL5);
+ usb_perf_reg = usb2_csr_base + (udev->portnum - 1)*0x200 + 0x20;
+ /*printk("Stats check %x\n", probe_value);*/
+ if(((probe_value & 0xFF) == 0xEE) || ((probe_value & 0xFF00) == 0xEE00)){
+ /*printk("issue hit port%d\n", (udev->portnum - 1));*/
+ temp = readl(usb_perf_reg);
+ temp |= SWRST;
+ writel(temp, usb_perf_reg);
+ }
+}
+
+
+void setInitialReg(void){
+ __u32 __iomem *addr;
+ u32 temp;
+
+#if defined (CONFIG_EN7512_FPGA)
+ //set MAC reference clock speed
+ addr = SSUSB_U3_MAC_BASE+U3_UX_EXIT_LFPS_TIMING_PAR;
+ temp = readl(addr);
+ temp &= ~(0xff << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
+ temp |= (U3_RX_UX_EXIT_LFPS_REF << U3_RX_UX_EXIT_LFPS_REF_OFFSET);
+ writel(temp, addr);
+ addr = SSUSB_U3_MAC_BASE+U3_REF_CK_PAR;
+ temp = readl(addr);
+ temp &= ~(0xff);
+ temp |= U3_REF_CK_VAL;
+ writel(temp, addr);
+
+ //set SYS_CK
+ addr = SSUSB_U3_SYS_BASE+U3_TIMING_PULSE_CTRL;
+ temp = readl(addr);
+ temp &= ~(0xff);
+ temp |= CNT_1US_VALUE;
+ writel(temp, addr);
+ addr = SSUSB_U2_SYS_BASE+USB20_TIMING_PARAMETER;
+ temp &= ~(0xff);
+ temp |= TIME_VALUE_1US;
+ writel(temp, addr);
+
+ //set LINK_PM_TIMER=3
+ addr = SSUSB_U3_SYS_BASE+LINK_PM_TIMER;
+ temp = readl(addr);
+ temp &= ~(0xf);
+ temp |= PM_LC_TIMEOUT_VALUE;
+ writel(temp, addr);
+#elif defined (CONFIG_EN7512_ASIC)
+ /* set SSUSB DMA burst size to 128B */
+ addr = (__u32 __iomem *)(SSUSB_U3_XHCI_BASE + SSUSB_HDMA_CFG);
+ temp = SSUSB_HDMA_CFG_EN7512_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+ /* extend U3 LTSSM Polling.LFPS timeout value */
+ addr = (__u32 __iomem *)(SSUSB_U3_XHCI_BASE + U3_LTSSM_TIMING_PARAMETER3);
+ temp = U3_LTSSM_TIMING_PARAMETER3_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+ /* EOF */
+ addr = (__u32 __iomem *)(SSUSB_U3_XHCI_BASE + SYNC_HS_EOF);
+ temp = SYNC_HS_EOF_VALUE;
+ writel(temp, addr);
+ mdelay(100);
+#if defined (CONFIG_PERIODIC_ENP)
+ /* HSCH_CFG1: SCH2_FIFO_DEPTH */
+ addr = (__u32 __iomem *)(SSUSB_U3_XHCI_BASE + HSCH_CFG1);
+ temp = readl(addr);
+ temp &= ~(0x3 << SCH2_FIFO_DEPTH_OFFSET);
+ writel(temp, addr);
+#endif
+
+ /* Doorbell handling */
+ addr = (__u32 __iomem *)(SIFSLV_IPPC + SSUSB_IP_SPAR0);
+ temp = 0x1;
+ writel(temp, addr);
+
+ /* Set SW PLL Stable mode to 1 for U2 LPM device remote wakeup */
+ /* Port 0 */
+ addr = (__u32 __iomem *)(U2_PHY_BASE + U2_PHYD_CR1);
+ temp = readl(addr);
+ temp &= ~(0x3 << 18);
+ temp |= (1 << 18);
+ writel(temp, addr);
+
+ /* Port 1 */
+ addr = (__u32 __iomem *)(U2_PHY_BASE_P1 + U2_PHYD_CR1);
+ temp = readl(addr);
+ temp &= ~(0x3 << 18);
+ temp |= (1 << 18);
+ writel(temp, addr);
+
+#endif
+}
+
+
+void setLatchSel(void){
+ __u32 __iomem *latch_sel_addr;
+ u32 latch_sel_value;
+ latch_sel_addr = U3_PIPE_LATCH_SEL_ADD;
+ latch_sel_value = ((U3_PIPE_LATCH_TX)<<2) | (U3_PIPE_LATCH_RX);
+ writel(latch_sel_value, latch_sel_addr);
+}
+
+void reinitIP(void){
+ __u32 __iomem *ip_reset_addr;
+ u32 ip_reset_value;
+
+ enableAllClockPower();
+#ifdef CONFIG_EN7512_FPGA
+ setLatchSel();
+#endif
+ mtk_xhci_scheduler_init();
+}
+
+void dbg_prb_out(void){
+ mtk_probe_init(0x0f0f0f0f);
+ mtk_probe_out(0xffffffff);
+ mtk_probe_out(0x01010101);
+ mtk_probe_out(0x02020202);
+ mtk_probe_out(0x04040404);
+ mtk_probe_out(0x08080808);
+ mtk_probe_out(0x10101010);
+ mtk_probe_out(0x20202020);
+ mtk_probe_out(0x40404040);
+ mtk_probe_out(0x80808080);
+ mtk_probe_out(0x55555555);
+ mtk_probe_out(0xaaaaaaaa);
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define RET_SUCCESS 0
+#define RET_FAIL 1
+
+static int dbg_u3w(int argc, char**argv)
+{
+ int u4TimingValue;
+ char u1TimingValue;
+ int u4TimingAddress;
+
+ if (argc<3)
+ {
+ printk(KERN_ERR "Arg: address value\n");
+ return RET_FAIL;
+ }
+ u3phy_init();
+
+ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16);
+ u4TimingValue = (int)simple_strtol(argv[2], &argv[2], 16);
+ u1TimingValue = u4TimingValue & 0xff;
+ /* access MMIO directly */
+ writel(u1TimingValue, u4TimingAddress);
+ printk(KERN_ERR "Write done\n");
+ return RET_SUCCESS;
+
+}
+
+static int dbg_u3r(int argc, char**argv)
+{
+ char u1ReadTimingValue;
+ int u4TimingAddress;
+ if (argc<2)
+ {
+ printk(KERN_ERR "Arg: address\n");
+ return 0;
+ }
+ u3phy_init();
+ mdelay(500);
+ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16);
+ /* access MMIO directly */
+ u1ReadTimingValue = readl(u4TimingAddress);
+ printk(KERN_ERR "Value = 0x%x\n", u1ReadTimingValue);
+ return 0;
+}
+
+static int dbg_u3init(int argc, char**argv)
+{
+ int ret;
+ ret = u3phy_init();
+ printk(KERN_ERR "phy registers and operations initial done\n");
+ if(u3phy_ops->u2_slew_rate_calibration){
+ u3phy_ops->u2_slew_rate_calibration(u3phy);
+ }
+ else{
+ printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n");
+ }
+ if(u3phy_ops->init(u3phy) == PHY_TRUE)
+ return RET_SUCCESS;
+ return RET_FAIL;
+}
+
+void dbg_setU1U2(int argc, char**argv){
+ struct xhci_hcd *xhci;
+ int u1_value;
+ int u2_value;
+ u32 port_id, temp;
+ u32 __iomem *addr;
+
+ if (argc<3)
+ {
+ printk(KERN_ERR "Arg: u1value u2value\n");
+ return RET_FAIL;
+ }
+
+ u1_value = (int)simple_strtol(argv[1], &argv[1], 10);
+ u2_value = (int)simple_strtol(argv[2], &argv[2], 10);
+ addr = (SSUSB_U3_XHCI_BASE + 0x424);
+ temp = readl(addr);
+ temp = temp & (~(0x0000ffff));
+ temp = temp | u1_value | (u2_value<<8);
+ writel(temp, addr);
+}
+///////////////////////////////////////////////////////////////////////////////
+
+int call_function(char *buf)
+{
+ int i;
+ int argc;
+ char *argv[80];
+
+ argc = 0;
+ do
+ {
+ argv[argc] = strsep(&buf, " ");
+ printk(KERN_DEBUG "[%d] %s\r\n", argc, argv[argc]);
+ argc++;
+ } while (buf);
+ if (!strcmp("dbg.r", argv[0]))
+ dbg_prb_out();
+ else if (!strcmp("dbg.u3w", argv[0]))
+ dbg_u3w(argc, argv);
+ else if (!strcmp("dbg.u3r", argv[0]))
+ dbg_u3r(argc, argv);
+ else if (!strcmp("dbg.u3i", argv[0]))
+ dbg_u3init(argc, argv);
+ else if (!strcmp("pw.u1u2", argv[0]))
+ dbg_setU1U2(argc, argv);
+ return 0;
+}
+
+long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ char w_buf[200];
+ char r_buf[200] = "this is a test";
+ int len = 200;
+
+ switch (cmd) {
+ case IOCTL_READ:
+ copy_to_user((char *) arg, r_buf, len);
+ printk(KERN_DEBUG "IOCTL_READ: %s\r\n", r_buf);
+ break;
+ case IOCTL_WRITE:
+ copy_from_user(w_buf, (char *) arg, len);
+ printk(KERN_DEBUG "IOCTL_WRITE: %s\r\n", w_buf);
+
+ //invoke function
+ return call_function(w_buf);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return len;
+}
+
+int xhci_mtk_test_open(struct inode *inode, struct file *file)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test open: successful\n");
+ return 0;
+}
+
+int xhci_mtk_test_release(struct inode *inode, struct file *file)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test release: successful\n");
+ return 0;
+}
+
+ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test read: returning zero bytes\n");
+ return 0;
+}
+
+ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos)
+{
+
+ printk(KERN_DEBUG "xhci_mtk_test write: accepting zero bytes\n");
+ return 0;
+}
+
+u32 mtk_xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total, unsigned int maxp, unsigned trb_buffer_length)
+{
+ u32 max = 31;
+ int remainder, td_packet_count, packet_transferred;
+
+ /*
+ * 0 for the last TRB
+ * FIXME: need to workaround if there is ZLP in this TD
+ */
+ if (td_running_total + trb_buffer_length == td_transfer_size)
+ return 0;
+
+ /* FIXME: need to take care of high-bandwidth (MAX_ESIT) */
+ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
+ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
+ remainder = td_packet_count - packet_transferred;
+
+ if (remainder > max)
+ return max << 17;
+ else
+ return remainder << 17;
+}
+
+
Index: linux-3.18.21/drivers/usb/host/xhci-mtk.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/drivers/usb/host/xhci-mtk.h 2018-02-05 13:21:06.000000000 +0800
@@ -0,0 +1,126 @@
+#ifndef _XHCI_MTK_H
+#define _XHCI_MTK_H
+
+#include <linux/usb.h>
+#include "xhci.h"
+
+#define SSUSB_U3_XHCI_BASE 0xBFB90000
+#define SSUSB_U3_MAC_BASE 0xBFB92400
+#define SSUSB_U3_SYS_BASE 0xBFB92600
+#define SSUSB_U2_SYS_BASE 0xBFB93400
+#define SSUB_SIF_SLV_TOP 0xBFA80000
+#define SIFSLV_IPPC (SSUB_SIF_SLV_TOP + 0x700)
+
+#define U3_PIPE_LATCH_SEL_ADD SSUSB_U3_MAC_BASE + 0x130
+#define U3_PIPE_LATCH_TX 0
+#define U3_PIPE_LATCH_RX 0
+
+#define U3_UX_EXIT_LFPS_TIMING_PAR 0xa0
+#define U3_REF_CK_PAR 0xb0
+#define U3_RX_UX_EXIT_LFPS_REF_OFFSET 8
+#define U3_RX_UX_EXIT_LFPS_REF 3
+#define U3_REF_CK_VAL 10
+
+#define U3_TIMING_PULSE_CTRL 0xb4
+#define CNT_1US_VALUE 63 //62.5MHz:63, 70MHz:70, 80MHz:80, 100MHz:100, 125MHz:125
+
+#define USB20_TIMING_PARAMETER 0x40
+#define TIME_VALUE_1US 63 //62.5MHz:63, 80MHz:80, 100MHz:100, 125MHz:125
+
+#define LINK_PM_TIMER 0x8
+#define PM_LC_TIMEOUT_VALUE 3
+
+#define XHCI_IMOD 0x624
+#define XHCI_IMOD_EN7512_VALUE 0x10
+
+#define SSUSB_HDMA_CFG 0x950
+#define SSUSB_HDMA_CFG_EN7512_VALUE 0x10E0E0C
+
+#define U3_LTSSM_TIMING_PARAMETER3 0x2514
+#define U3_LTSSM_TIMING_PARAMETER3_VALUE 0x3E8012C
+
+#define U2_PHYD_CR1 0x64
+
+#define SSUSB_IP_SPAR0 0xC8
+
+#define SYNC_HS_EOF 0x938
+#define SYNC_HS_EOF_VALUE 0x201F3
+
+#define HSCH_CFG1 0x960
+#define SCH2_FIFO_DEPTH_OFFSET 16
+
+#define U3H_SSUSB_PRB_CTRL0 0xb0
+#define U3H_SSUSB_PRB_CTRL1 0xb4
+#define U3H_SSUSB_PRB_CTRL2 0xb8
+#define U3H_SSUSB_PRB_CTRL3 0xbc
+#define U3H_SSUSB_PRB_CTRL5 0xc4
+#define SWRST (1<<17)
+
+#define SSUSB_IP_PW_CTRL (SIFSLV_IPPC+0x0)
+#define SSUSB_IP_SW_RST (1<<0)
+#define SSUSB_IP_PW_CTRL_1 (SIFSLV_IPPC+0x4)
+#define SSUSB_IP_PDN (1<<0)
+#define SSUSB_U3_CTRL(p) (SIFSLV_IPPC+0x30+(p*0x08))
+#define SSUSB_U3_PORT_DIS (1<<0)
+#define SSUSB_U3_PORT_PDN (1<<1)
+#define SSUSB_U3_PORT_HOST_SEL (1<<2)
+#define SSUSB_U3_PORT_CKBG_EN (1<<3)
+#define SSUSB_U3_PORT_MAC_RST (1<<4)
+#define SSUSB_U3_PORT_PHYD_RST (1<<5)
+#define SSUSB_U2_CTRL(p) (SIFSLV_IPPC+(0x50)+(p*0x08))
+#define SSUSB_U2_PORT_DIS (1<<0)
+#define SSUSB_U2_PORT_PDN (1<<1)
+#define SSUSB_U2_PORT_HOST_SEL (1<<2)
+#define SSUSB_U2_PORT_CKBG_EN (1<<3)
+#define SSUSB_U2_PORT_MAC_RST (1<<4)
+#define SSUSB_U2_PORT_PHYD_RST (1<<5)
+#define SSUSB_IP_CAP (SIFSLV_IPPC+0x024)
+
+#define SSUSB_U3_PORT_NUM(p) (p & 0xff)
+#define SSUSB_U2_PORT_NUM(p) ((p>>8) & 0xff)
+
+
+#define XHCI_MTK_TEST_MAJOR 234
+#define DEVICE_NAME "xhci_mtk_test"
+
+#define CLI_MAGIC 'CLI'
+#define IOCTL_READ _IOR(CLI_MAGIC, 0, int)
+#define IOCTL_WRITE _IOW(CLI_MAGIC, 1, int)
+
+void reinitIP(void);
+void setInitialReg(void);
+void dbg_prb_out(void);
+int call_function(char *buf);
+
+long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int xhci_mtk_test_open(struct inode *inode, struct file *file);
+int xhci_mtk_test_release(struct inode *inode, struct file *file);
+ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr);
+ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos);
+u32 mtk_xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total, unsigned int maxp, unsigned trb_buffer_length);
+/*
+ mediatek probe out
+*/
+/************************************************************************************/
+
+#define SW_PRB_OUT_ADDR (SIFSLV_IPPC+0xc0)
+#define PRB_MODULE_SEL_ADDR (SIFSLV_IPPC+0xbc)
+
+static inline void mtk_probe_init(const u32 byte){
+ __u32 __iomem *ptr = (__u32 __iomem *) PRB_MODULE_SEL_ADDR;
+ writel(byte, ptr);
+}
+
+static inline void mtk_probe_out(const u32 value){
+ __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR;
+ writel(value, ptr);
+}
+
+static inline u32 mtk_probe_value(void){
+ __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR;
+
+ return readl(ptr);
+}
+
+
+#endif
Index: linux-3.18.21/drivers/usb/host/xhci-plat.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/xhci-plat.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/xhci-plat.c 2018-02-05 13:21:06.000000000 +0800
@@ -33,6 +33,14 @@
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ /* MTK host controller gives a spurious successful event after a
+ * short transfer. Ignore it.
+ */
+ xhci->quirks |= XHCI_MTK_HOST;
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+#endif
}
/* called during probe() after chip reset completes */
@@ -79,13 +87,19 @@
driver = &xhci_plat_hc_driver;
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ irq = XHC_IRQ;
+#else
irq = platform_get_irq(pdev, 0);
+#endif
if (irq < 0)
return -ENODEV;
+#if !defined (CONFIG_USB_EN7512_XHCI_HCD)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
+#endif
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
@@ -100,6 +114,18 @@
if (!hcd)
return -ENOMEM;
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ hcd->rsrc_start = (uint32_t)XHC_IO_START;
+ hcd->rsrc_len = XHC_IO_LENGTH;
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ driver->description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto put_hcd;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+#else
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
@@ -108,6 +134,7 @@
ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
+#endif
/*
* Not all platforms have a clk so it is not an error if the
@@ -186,6 +213,9 @@
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
+ if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->quirks & XHCI_COMP_MODE_QUIRK))
+ del_timer_sync(&xhci->comp_mode_recovery_timer);
+
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
Index: linux-3.18.21/drivers/usb/host/xhci-ring.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/xhci-ring.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/xhci-ring.c 2018-02-05 13:21:06.000000000 +0800
@@ -68,6 +68,7 @@
#include <linux/slab.h>
#include "xhci.h"
#include "xhci-trace.h"
+#include "xhci-mtk.h"
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
@@ -259,10 +260,12 @@
if (ring->num_trbs_free < num_trbs)
return 0;
- if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
- num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
- if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
- return 0;
+ if(!(xhci_mtk_host(xhci))){
+ if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+ num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+ if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
+ return 0;
+ }
}
return 1;
@@ -1301,8 +1304,23 @@
}
/* Does the DMA address match our internal dequeue pointer address? */
if (cmd_dma != (u64) cmd_dequeue_dma) {
+ #ifdef TCSUPPORT_USBHOST
+ /* For MTK HC which will use link TRB in event ring completion.
+ * When COMP_CMD_STOP event occured, MTK HC will not skip link TRB.
+ * */
+ if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP) {
+ xhci_dbg(xhci,
+ "Deq pointer mismatch on COMP_CMD_STOP: (0x%llx dma)\n",
+ cmd_dma);
+ }
+ else {
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
+ #else
xhci->error_bitmask |= 1 << 5;
return;
+ #endif
}
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
@@ -2810,16 +2828,19 @@
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_trb_quirk(xhci) &&
- !(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ if(xhci_mtk_host(xhci))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
- else
- next->link.control |= cpu_to_le32(TRB_CHAIN);
-
+ else{
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci) &&
+ !(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ else
+ next->link.control |= cpu_to_le32(TRB_CHAIN);
+ }
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
@@ -2949,6 +2970,8 @@
start_trb->field[3] |= cpu_to_le32(start_cycle);
else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+ if(xhci_mtk_host(xhci))
+ wmb();
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
@@ -3141,14 +3164,20 @@
}
/* Set the TRB length, TD size, and interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- urb->transfer_buffer_length -
- running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(running_total,
- trb_buff_len, total_packet_count, urb,
- num_trbs - 1);
+ if(xhci_mtk_host(xhci)){
+ if (num_trbs > 1)
+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length,
+ running_total, usb_endpoint_maxp(&urb->ep->desc), trb_buff_len);
+ }else{
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ urb->transfer_buffer_length -
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+ trb_buff_len, total_packet_count, urb,
+ num_trbs - 1);
+ }
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
@@ -3212,6 +3241,7 @@
int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr;
+ int max_packet;
if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
@@ -3237,6 +3267,25 @@
running_total += TRB_MAX_BUFF_SIZE;
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+ if(xhci_mtk_host(xhci)){
+ switch (urb->dev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = usb_endpoint_maxp(&urb->ep->desc);
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ case USB_SPEED_WIRELESS:
+ case USB_SPEED_UNKNOWN:
+ default:
+ max_packet = usb_endpoint_maxp(&urb->ep->desc) & 0x7ff;
+ break;
+ }
+ if ((urb->transfer_flags & URB_ZERO_PACKET)
+ && ((urb->transfer_buffer_length % max_packet) == 0)) {
+ num_trbs++;
+ }
+ }
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
@@ -3296,14 +3345,18 @@
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- urb->transfer_buffer_length -
- running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(running_total,
- trb_buff_len, total_packet_count, urb,
- num_trbs - 1);
+ if(xhci_mtk_host(xhci))
+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ else{
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ urb->transfer_buffer_length -
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+ trb_buff_len, total_packet_count, urb,
+ num_trbs - 1);
+ }
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
@@ -3394,7 +3447,7 @@
field |= 0x1;
/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
- if (xhci->hci_version == 0x100) {
+ if (xhci->hci_version == 0x100 || xhci_mtk_host(xhci)) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
@@ -3416,10 +3469,14 @@
field = TRB_ISP | TRB_TYPE(TRB_DATA);
else
field = TRB_TYPE(TRB_DATA);
+ if(xhci_mtk_host(xhci))
+ /* CC: MTK style, no scatter-gather for control transfer */
+ length_field = 0;
+ else
+ length_field = xhci_td_remainder(urb->transfer_buffer_length);
+
+ length_field |= TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0);
- length_field = TRB_LEN(urb->transfer_buffer_length) |
- xhci_td_remainder(urb->transfer_buffer_length) |
- TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
@@ -3541,6 +3598,7 @@
u64 start_addr, addr;
int i, j;
bool more_trbs_coming;
+ int max_packet;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -3553,6 +3611,20 @@
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+ if(xhci_mtk_host(xhci)){
+ switch (urb->dev->speed) {
+ case USB_SPEED_SUPER:
+ max_packet = usb_endpoint_maxp(&urb->ep->desc);
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ case USB_SPEED_WIRELESS:
+ case USB_SPEED_UNKNOWN:
+ max_packet = usb_endpoint_maxp(&urb->ep->desc) & 0x7ff;
+ break;
+ }
+ }
urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
@@ -3625,9 +3697,8 @@
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
- if (xhci->hci_version == 0x100 &&
- !(xhci->quirks &
- XHCI_AVOID_BEI)) {
+ if((xhci_mtk_host(xhci)||xhci->hci_version == 0x100)&&
+ !(xhci->quirks & XHCI_AVOID_BEI)){
/* Set BEI bit except for the last td */
if (i < num_tds - 1)
field |= TRB_BEI;
@@ -3642,14 +3713,18 @@
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- td_len - running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(
- running_total, trb_buff_len,
- total_packet_count, urb,
- (trbs_per_td - j - 1));
+ if(xhci_mtk_host(xhci))
+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
+ else{
+ if (xhci->hci_version < 0x100) {
+ remainder = xhci_td_remainder(
+ td_len - running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(
+ running_total, trb_buff_len,
+ total_packet_count, urb,
+ (trbs_per_td - j - 1));
+ }
}
length_field = TRB_LEN(trb_buff_len) |
remainder |
Index: linux-3.18.21/drivers/usb/host/xhci.c
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/xhci.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/xhci.c 2018-02-05 13:21:06.000000000 +0800
@@ -31,6 +31,14 @@
#include "xhci.h"
#include "xhci-trace.h"
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include "mtk-phy.h"
+#include "xhci-mtk-scheduler.h"
+#include "xhci-mtk-power.h"
+#include "xhci-mtk.h"
+#endif
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -198,7 +206,7 @@
return ret;
}
-#ifdef CONFIG_PCI
+#if defined (CONFIG_PCI) && !defined (CONFIG_USB_EN7512_XHCI_HCD)
static int xhci_free_msi(struct xhci_hcd *xhci)
{
int i;
@@ -447,6 +455,10 @@
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Attempting compliance mode recovery");
hcd = xhci->shared_hcd;
+ if(xhci_mtk_host(xhci)){
+ temp |= (1 << 31);
+ writel(temp, xhci->usb3_ports[i]);
+ }
if (hcd->state == HC_STATE_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
@@ -542,11 +554,13 @@
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI doesn't need link TRB QUIRK");
}
+ if(xhci_mtk_host(xhci))
+ mtk_xhci_scheduler_init();
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */
- if (xhci_compliance_mode_recovery_timer_quirk_check()) {
+ if ((xhci_mtk_host(xhci)) || xhci_compliance_mode_recovery_timer_quirk_check()) {
xhci->quirks |= XHCI_COMP_MODE_QUIRK;
compliance_mode_recovery_timer_init(xhci);
}
@@ -626,7 +640,11 @@
"// Set the interrupt modulation register");
temp = readl(&xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
- temp |= (u32) 160;
+ /*
+ * the increment interval is 10 times as much as that defained
+ * in xHCI spec on MTK's controller
+ */
+ temp |= (u32) (xhci_mtk_host(xhci) ? 16 : 160);
writel(temp, &xhci->ir_set->irq_control);
/* Set the HCD state before we enable the irqs */
@@ -651,6 +669,8 @@
xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
}
+ if(xhci_mtk_host(xhci))
+ enableXhciAllPortPower(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
return 0;
@@ -1533,6 +1553,9 @@
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ en7512_en7516_full_speed_process(hcd, urb);
+#endif
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret || !urb->hcpriv)
@@ -1618,6 +1641,116 @@
return ret;
}
+int xhci_drop_endpoint_scheduler(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+#if MTK_SCH_NEW
+ struct xhci_slot_ctx *slot_ctx;
+ struct sch_ep *sch_ep = NULL;
+ int isTT;
+ int ep_type;
+
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx);
+ if ((slot_ctx->tt_info & 0xff) > 0) {
+ isTT = 1;
+ }
+ else {
+ isTT = 0;
+ }
+ if (usb_endpoint_xfer_int(&ep->desc)) {
+ ep_type = USB_EP_INT;
+ }
+ else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ ep_type = USB_EP_ISOC;
+ }
+ else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ ep_type = USB_EP_BULK;
+ }
+ else
+ ep_type = USB_EP_CONTROL;
+
+ sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc)
+ , isTT, ep_type, (mtk_u32 *)ep);
+ if (sch_ep != NULL) {
+ kfree(sch_ep);
+ }
+ else {
+ xhci_dbg(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n");
+ }
+#else
+ mtk_xhci_scheduler_remove_ep(xhci, udev, ep);
+#endif
+}
+
+xhci_add_endpoint_scheduler(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep,
+ struct xhci_container_ctx *in_ctx,
+ unsigned int ep_index)
+{
+ struct xhci_ep_ctx *in_ep_ctx;
+#if MTK_SCH_NEW
+ struct xhci_slot_ctx *slot_ctx;
+ struct sch_ep *sch_ep;
+ int isTT;
+ int ep_type;
+ int maxp = 0;
+ int burst = 0;
+ int mult = 0;
+ int interval;
+#endif
+
+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+#if MTK_SCH_NEW
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if ((slot_ctx->tt_info & 0xff) > 0) {
+ isTT = 1;
+ }
+ else {
+ isTT = 0;
+ }
+ if (usb_endpoint_xfer_int(&ep->desc)) {
+ ep_type = USB_EP_INT;
+ }
+ else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ ep_type = USB_EP_ISOC;
+ }
+ else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ ep_type = USB_EP_BULK;
+ }
+ else
+ ep_type = USB_EP_CONTROL;
+
+ if (udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
+ || udev->speed == USB_SPEED_LOW) {
+ maxp = usb_endpoint_maxp(&ep->desc) & 0x7FF;
+ burst = usb_endpoint_maxp(&ep->desc) >> 11;
+ mult = 0;
+ }
+ else if (udev->speed == USB_SPEED_SUPER) {
+ maxp = usb_endpoint_maxp(&ep->desc) & 0x7FF;
+ burst = ep->ss_ep_comp.bMaxBurst;
+ mult = ep->ss_ep_comp.bmAttributes & 0x3;
+ }
+ interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff));
+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
+ if (mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
+ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
+ , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS) {
+ xhci_err(xhci, "[MTK] not enough bandwidth\n");
+
+ return -ENOSPC;
+ }
+#else
+ if (mtk_xhci_scheduler_add_ep(xhci, udev, ep, in_ep_ctx) != SCH_SUCCESS) {
+ xhci_err(xhci, "[MTK] not enough bandwidth\n");
+ return -ENOSPC;
+ }
+#endif
+}
+
/* Drop an endpoint from a new bandwidth configuration for this device.
* Only one call to this function is allowed per endpoint before
* check_bandwidth() or reset_bandwidth() must be called.
@@ -1688,7 +1821,8 @@
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
-
+ if(xhci_mtk_host(xhci))
+ xhci_drop_endpoint_scheduler(xhci, udev, ep);
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
@@ -1786,7 +1920,8 @@
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
}
-
+ if(xhci_mtk_host(xhci))
+ xhci_add_endpoint_scheduler(xhci, virt_dev, udev, ep, in_ctx, ep_index);
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
@@ -4903,6 +5038,10 @@
goto error;
xhci_dbg(xhci, "Reset complete\n");
+ if(xhci_mtk_host(xhci)){
+ hcd->self.no_stop_on_short = 0;
+ setInitialReg();
+ }
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
@@ -4997,8 +5136,43 @@
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+static struct platform_device *xhci_platform_dev;
+static inline int xhci_mtk_hcd_init(void)
+{
+ int retval;
+ struct platform_device *pPlatformDev;
+
+ /*
+ * Config PHY
+ * ecnt_u3h_phy_init();
+ */
+ if(!ecnt_u3h_phy_init())
+ return 0;
+
+ reinitIP();
+
+ xhci_platform_dev = platform_device_alloc("xhci-hcd", -1);
+ pPlatformDev = xhci_platform_dev;
+ pPlatformDev->dev.coherent_dma_mask = 0xffffffff;
+ pPlatformDev->dev.dma_mask = &pPlatformDev->dev.coherent_dma_mask;
+
+ retval = platform_device_add(pPlatformDev);
+ if (retval < 0)
+ platform_driver_unregister (&xhci_platform_dev);
+}
+static void __exit xhci_hcd_cleanup(void)
+{
+ platform_device_unregister(xhci_platform_dev);
+}
+module_exit(xhci_hcd_cleanup);
+#endif
+
static int __init xhci_hcd_init(void)
{
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+ xhci_mtk_hcd_init();
+#endif
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
Index: linux-3.18.21/drivers/usb/host/xhci.h
===================================================================
--- linux-3.18.21.orig/drivers/usb/host/xhci.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/drivers/usb/host/xhci.h 2018-02-05 13:21:06.000000000 +0800
@@ -33,6 +33,23 @@
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+#if defined (CONFIG_USB_EN7512_XHCI_HCD)
+#ifdef TCSUPPORT_MIPS_1004K
+#include <asm/tc3162/tc3162.h>
+#define XHC_IRQ IRQ_RT3XXX_USB
+#else
+#define XHC_IRQ 18
+#endif
+#define XHC_IO_START 0x1FB90000
+#define XHC_IO_LENGTH 0x10000
+/* MTK scheduler bitmasks */
+#define BPKTS(p) ((p) & 0x3f)
+#define BCSCOUNT(p) (((p) & 0x7) << 8)
+#define BBM(p) ((p) << 11)
+#define BOFFSET(p) ((p) & 0x3fff)
+#define BREPEAT(p) (((p) & 0x7fff) << 16)
+#endif
+
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
@@ -1565,6 +1582,7 @@
/* For controllers with a broken beyond repair streams implementation */
#define XHCI_BROKEN_STREAMS (1 << 19)
#define XHCI_PME_STUCK_QUIRK (1 << 20)
+#define XHCI_MTK_HOST (1 << 21)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
@@ -1587,8 +1605,12 @@
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+#ifdef CONFIG_USB_EN7512_XHCI_HCD
+#define COMP_MODE_RCVRY_MSECS 5000
+#else
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
+#endif
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -1644,6 +1666,11 @@
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
}
+static inline int xhci_mtk_host(struct xhci_hcd *xhci)
+{
+ return xhci->quirks & XHCI_MTK_HOST;
+}
+
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
Index: linux-3.18.21/fs/Kconfig
===================================================================
--- linux-3.18.21.orig/fs/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/Kconfig 2018-02-05 13:21:11.000000000 +0800
@@ -191,6 +191,7 @@
source "fs/befs/Kconfig"
source "fs/bfs/Kconfig"
source "fs/efs/Kconfig"
+source "fs/yaffs2/Kconfig"
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
Index: linux-3.18.21/fs/Makefile
===================================================================
--- linux-3.18.21.orig/fs/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/Makefile 2018-02-05 13:21:11.000000000 +0800
@@ -126,3 +126,4 @@
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
Index: linux-3.18.21/fs/fcntl.c
===================================================================
--- linux-3.18.21.orig/fs/fcntl.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/fcntl.c 2018-02-05 13:21:13.000000000 +0800
@@ -375,6 +375,7 @@
out:
return err;
}
+EXPORT_SYMBOL(sys_fcntl);/*export for MTK RTP support*/
#if BITS_PER_LONG == 32
SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
Index: linux-3.18.21/fs/jffs2/os-linux.h
===================================================================
--- linux-3.18.21.orig/fs/jffs2/os-linux.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/jffs2/os-linux.h 2018-02-05 13:21:13.000000000 +0800
@@ -107,7 +107,11 @@
#define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE))
#endif
+#if 1/* default disbale cleanmarker_oob */
+#define jffs2_cleanmarker_oob(c) 0
+#else
#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
+#endif
#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
@@ -133,7 +137,11 @@
int jffs2_ubivol_setup(struct jffs2_sb_info *c);
void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
+#if 1/* default enable nor wbuf flash */
+#define jffs2_nor_wbuf_flash(c) 1
+#else
#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
+#endif
int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
void jffs2_dirty_trigger(struct jffs2_sb_info *c);
Index: linux-3.18.21/fs/proc/generic.c
===================================================================
--- linux-3.18.21.orig/fs/proc/generic.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/proc/generic.c 2018-02-05 13:21:15.000000000 +0800
@@ -272,14 +272,231 @@
.setattr = proc_notify_change,
};
+#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
+
+static ssize_t
+__proc_file_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct inode * inode = file->f_path.dentry->d_inode;
+ char *page;
+ ssize_t retval=0;
+ int eof=0;
+ ssize_t n, count;
+ char *start;
+ struct proc_dir_entry * dp;
+ unsigned long long pos;
+
+ /*
+ * Gaah, please just use "seq_file" instead. The legacy /proc
+ * interfaces cut loff_t down to off_t for reads, and ignore
+ * the offset entirely for writes..
+ */
+ pos = *ppos;
+ if (pos > MAX_NON_LFS)
+ return 0;
+ if (nbytes > MAX_NON_LFS - pos)
+ nbytes = MAX_NON_LFS - pos;
+
+ dp = PDE(inode);
+ if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
+ return -ENOMEM;
+
+ while ((nbytes > 0) && !eof) {
+ count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
+
+ start = NULL;
+ if (dp->read_proc) {
+ /*
+ * How to be a proc read function
+ * ------------------------------
+ * Prototype:
+ * int f(char *buffer, char **start, off_t offset,
+ * int count, int *peof, void *dat)
+ *
+ * Assume that the buffer is "count" bytes in size.
+ *
+ * If you know you have supplied all the data you
+ * have, set *peof.
+ *
+ * You have three ways to return data:
+ * 0) Leave *start = NULL. (This is the default.)
+ * Put the data of the requested offset at that
+ * offset within the buffer. Return the number (n)
+ * of bytes there are from the beginning of the
+ * buffer up to the last byte of data. If the
+ * number of supplied bytes (= n - offset) is
+ * greater than zero and you didn't signal eof
+ * and the reader is prepared to take more data
+ * you will be called again with the requested
+ * offset advanced by the number of bytes
+ * absorbed. This interface is useful for files
+ * no larger than the buffer.
+ * 1) Set *start = an unsigned long value less than
+ * the buffer address but greater than zero.
+ * Put the data of the requested offset at the
+ * beginning of the buffer. Return the number of
+ * bytes of data placed there. If this number is
+ * greater than zero and you didn't signal eof
+ * and the reader is prepared to take more data
+ * you will be called again with the requested
+ * offset advanced by *start. This interface is
+ * useful when you have a large file consisting
+ * of a series of blocks which you want to count
+ * and return as wholes.
+ * (Hack by Paul.Russell@rustcorp.com.au)
+ * 2) Set *start = an address within the buffer.
+ * Put the data of the requested offset at *start.
+ * Return the number of bytes of data placed there.
+ * If this number is greater than zero and you
+ * didn't signal eof and the reader is prepared to
+ * take more data you will be called again with the
+ * requested offset advanced by the number of bytes
+ * absorbed.
+ */
+ n = dp->read_proc(page, &start, *ppos,
+ count, &eof, dp->data);
+ } else
+ break;
+
+ if (n == 0) /* end of file */
+ break;
+ if (n < 0) { /* error */
+ if (retval == 0)
+ retval = n;
+ break;
+ }
+
+ if (start == NULL) {
+ if (n > PAGE_SIZE) {
+ printk(KERN_ERR
+ "proc_file_read: Apparent buffer overflow!\n");
+ n = PAGE_SIZE;
+ }
+ n -= *ppos;
+ if (n <= 0)
+ break;
+ if (n > count)
+ n = count;
+ start = page + *ppos;
+ } else if (start < page) {
+ if (n > PAGE_SIZE) {
+ printk(KERN_ERR
+ "proc_file_read: Apparent buffer overflow!\n");
+ n = PAGE_SIZE;
+ }
+ if (n > count) {
+ /*
+ * Don't reduce n because doing so might
+ * cut off part of a data block.
+ */
+ printk(KERN_WARNING
+ "proc_file_read: Read count exceeded\n");
+ }
+ } else /* start >= page */ {
+ unsigned long startoff = (unsigned long)(start - page);
+ if (n > (PAGE_SIZE - startoff)) {
+ printk(KERN_ERR
+ "proc_file_read: Apparent buffer overflow!\n");
+ n = PAGE_SIZE - startoff;
+ }
+ if (n > count)
+ n = count;
+ }
+
+ n -= copy_to_user(buf, start < page ? page : start, n);
+ if (n == 0) {
+ if (retval == 0)
+ retval = -EFAULT;
+ break;
+ }
+
+ *ppos += start < page ? (unsigned long)start : n;
+ nbytes -= n;
+ buf += n;
+ retval += n;
+ }
+ free_page((unsigned long) page);
+ return retval;
+}
+
+static ssize_t
+proc_file_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ ssize_t rv = -EIO;
+
+ spin_lock(&pde->pde_unload_lock);
+ if (!pde->proc_fops) {
+ spin_unlock(&pde->pde_unload_lock);
+ return rv;
+ }
+ pde->pde_users++;
+ spin_unlock(&pde->pde_unload_lock);
+
+ rv = __proc_file_read(file, buf, nbytes, ppos);
+
+ pde_users_dec(pde);
+ return rv;
+}
+
+static ssize_t
+proc_file_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ ssize_t rv = -EIO;
+
+ if (pde->write_proc) {
+ spin_lock(&pde->pde_unload_lock);
+ if (!pde->proc_fops) {
+ spin_unlock(&pde->pde_unload_lock);
+ return rv;
+ }
+ pde->pde_users++;
+ spin_unlock(&pde->pde_unload_lock);
+
+ /* FIXME: does this routine need ppos? probably... */
+ rv = pde->write_proc(file, buffer, count, pde->data);
+ pde_users_dec(pde);
+ }
+ return rv;
+}
+
+
+static loff_t
+proc_file_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t retval = -EINVAL;
+ switch (orig) {
+ case 1:
+ offset += file->f_pos;
+ /* fallthrough */
+ case 0:
+ if (offset < 0 || offset > MAX_NON_LFS)
+ break;
+ file->f_pos = retval = offset;
+ }
+ return retval;
+}
+
+static const struct file_operations proc_file_operations = {
+ .llseek = proc_file_lseek,
+ .read = proc_file_read,
+ .write = proc_file_write,
+};
+
static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
{
struct proc_dir_entry *tmp;
int ret;
ret = proc_alloc_inum(&dp->low_ino);
- if (ret)
+ if (ret){
+ printk("\nproc_regiser ret = %d", ret);
return ret;
+ }
if (S_ISDIR(dp->mode)) {
dp->proc_fops = &proc_dir_operations;
@@ -349,6 +566,39 @@
return ent;
}
+struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+ struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *ent;
+
+ if ((mode & S_IFMT) == 0)
+ mode |= S_IFREG;
+
+ if (!S_ISREG(mode)) {
+ WARN_ON(1); /* use proc_mkdir() */
+ return NULL;
+ }
+
+ if ((mode & S_IALLUGO) == 0)
+ mode |= S_IRUGO;
+
+ ent = __proc_create(&parent, name, mode, 1);
+
+ if (ent) {
+ if (ent->proc_fops == NULL)
+ ent->proc_fops = &proc_file_operations;
+ if (ent->proc_iops == NULL)
+ ent->proc_iops = &proc_file_inode_operations;
+ if (proc_register(parent, ent) < 0) {
+ printk("\n===>proc_register < 0");
+ kfree(ent);
+ ent = NULL;
+ }
+ }
+ return ent;
+}
+EXPORT_SYMBOL(create_proc_entry);
+
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
{
Index: linux-3.18.21/fs/proc/inode.c
===================================================================
--- linux-3.18.21.orig/fs/proc/inode.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/proc/inode.c 2018-02-05 13:21:15.000000000 +0800
@@ -29,6 +29,20 @@
#include "internal.h"
+static void __pde_users_dec(struct proc_dir_entry *pde)
+{
+ pde->pde_users--;
+ if (pde->pde_unload_completion && pde->pde_users == 0)
+ complete(pde->pde_unload_completion);
+}
+
+void pde_users_dec(struct proc_dir_entry *pde)
+{
+ spin_lock(&pde->pde_unload_lock);
+ __pde_users_dec(pde);
+ spin_unlock(&pde->pde_unload_lock);
+}
+
static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
Index: linux-3.18.21/fs/proc/internal.h
===================================================================
--- linux-3.18.21.orig/fs/proc/internal.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/proc/internal.h 2018-02-05 13:21:15.000000000 +0800
@@ -18,6 +18,7 @@
struct ctl_table_header;
struct mempolicy;
+#if 0
/*
* This is not completely implemented yet. The idea is to
* create an in-memory tree (like the actual /proc filesystem
@@ -67,6 +68,7 @@
struct proc_ns ns;
struct inode vfs_inode;
};
+#endif
/*
* General functions
Index: linux-3.18.21/fs/proc/meminfo.c
===================================================================
--- linux-3.18.21.orig/fs/proc/meminfo.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/proc/meminfo.c 2018-02-05 13:21:15.000000000 +0800
@@ -32,6 +32,9 @@
unsigned long pages[NR_LRU_LISTS];
struct zone *zone;
int lru;
+#ifdef CONFIG_TC3162_IMEM
+ extern unsigned int imem_remains;
+#endif
/*
* display in kilobytes.
@@ -138,6 +141,9 @@
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"AnonHugePages: %8lu kB\n"
#endif
+#ifdef CONFIG_TC3162_IMEM
+ "IMEM Remains: %8lu Bytes\n"
+#endif
,
K(i.totalram),
K(i.freeram),
@@ -193,6 +199,9 @@
,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR)
#endif
+#ifdef CONFIG_TC3162_IMEM
+ ,imem_remains
+#endif
);
hugetlb_report_meminfo(m);
Index: linux-3.18.21/fs/proc/root.c
===================================================================
--- linux-3.18.21.orig/fs/proc/root.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/proc/root.c 2018-02-05 13:21:15.000000000 +0800
@@ -186,6 +186,9 @@
#endif
proc_tty_init();
proc_mkdir("bus", NULL);
+#ifdef CONFIG_MIPS_TC3262
+ proc_mkdir("tc3162", NULL);
+#endif
proc_sys_init();
}
Index: linux-3.18.21/fs/squashfs/Kconfig
===================================================================
--- linux-3.18.21.orig/fs/squashfs/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/squashfs/Kconfig 2018-02-05 13:21:15.000000000 +0800
@@ -120,6 +120,12 @@
If unsure, say Y.
+config SQUASHFS_LZMA
+ bool "Include support for LZMA compressed file systems"
+ depends on SQUASHFS
+ select DECOMPRESS_LZMA
+ select DECOMPRESS_LZMA_NEEDED
+
config SQUASHFS_LZO
bool "Include support for LZO compressed file systems"
depends on SQUASHFS
Index: linux-3.18.21/fs/squashfs/Makefile
===================================================================
--- linux-3.18.21.orig/fs/squashfs/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/squashfs/Makefile 2018-02-05 13:21:15.000000000 +0800
@@ -14,3 +14,4 @@
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_LZMA) += lzma_wrapper.o
Index: linux-3.18.21/fs/squashfs/decompressor.c
===================================================================
--- linux-3.18.21.orig/fs/squashfs/decompressor.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/squashfs/decompressor.c 2018-02-05 13:21:15.000000000 +0800
@@ -67,7 +67,11 @@
&squashfs_zlib_comp_ops,
&squashfs_lzo_comp_ops,
&squashfs_xz_comp_ops,
+#ifdef CONFIG_SQUASHFS_LZMA
+ &squashfs_lzma_comp_ops,
+#else
&squashfs_lzma_unsupported_comp_ops,
+#endif
&squashfs_unknown_comp_ops
};
Index: linux-3.18.21/fs/squashfs/lzma_wrapper.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/squashfs/lzma_wrapper.c 2018-02-05 13:21:15.000000000 +0800
@@ -0,0 +1,211 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * lzma_wrapper.c
+ */
+
+#include <asm/unaligned.h>
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/decompress/unlzma.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+
+struct squashfs_lzma {
+ void *input;
+ void *output;
+};
+
+/* decompress_unlzma.c is currently non re-entrant... */
+//DEFINE_MUTEX(lzma_mutex);
+
+/* decompress_unlzma.c doesn't provide any context in its callbacks... */
+static int lzma_error;
+
+static void error(char *m)
+{
+ printk("unlzma error: %s\n", m);
+ lzma_error = 1;
+}
+
+
+static void *lzma_init(struct squashfs_sb_info *msblk, void *buff, int len)
+{
+ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+ struct squashfs_lzma *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->input = vmalloc(block_size);
+ if (stream->input == NULL)
+ goto failed;
+ stream->output = vmalloc(block_size);
+ if (stream->output == NULL)
+ goto failed2;
+
+ return stream;
+
+failed2:
+ vfree(stream->input);
+failed:
+ ERROR("failed to allocate lzma workspace\n");
+ kfree(stream);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void lzma_free(void *strm)
+{
+ struct squashfs_lzma *stream = strm;
+
+ if (stream) {
+ vfree(stream->input);
+ vfree(stream->output);
+ }
+ kfree(stream);
+}
+
+static int lzma_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct squashfs_lzma *stream = strm;
+ void *buff = stream->input, *data;
+ int avail, i, bytes = length, res;
+ size_t out_len = output->length;
+
+ for (i = 0; i < b; i++) {
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
+
+ lzma_error = 0;
+
+ res = unlzma(stream->input, length, NULL, NULL, stream->output, NULL,
+ error);
+ //printk("lzma_uncompress: 3333333333\r\n");
+ if (res || lzma_error)
+ goto failed;
+
+ /* uncompressed size is stored in the LZMA header (5 byte offset) */
+ res = bytes = get_unaligned_le32(stream->input + 5);
+
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_CACHE_SIZE) {
+ memcpy(data, buff, bytes);
+ break;
+ } else {
+ memcpy(data, buff, PAGE_CACHE_SIZE);
+ buff += PAGE_CACHE_SIZE;
+ bytes -= PAGE_CACHE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ }
+ squashfs_finish_page(output);
+
+ return res;
+
+failed:
+ return -EIO;
+}
+#if 0
+static int lzma_uncompress(struct squashfs_sb_info *msblk, void **buffer,
+ struct buffer_head **bh, int b, int offset, int length, int srclength,
+ int pages)
+{
+ struct squashfs_lzma *stream = msblk->stream;
+ void *buff = stream->input;
+ int avail, i, bytes = length, res;
+ size_t out_len = srclength;
+
+ //mutex_lock(&lzma_mutex);
+ //mutex_lock(&msblk->read_data_mutex);
+ printk("lzma_uncompress: 1111111 length is [%d] srclength is [%d] pages is [%d] b is [%d]\r\n", length, srclength, pages, b);
+
+ for (i = 0; i < b; i++) {
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ goto block_release;
+
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
+ printk("lzma_uncompress: 2222222222, unlzma ptr [%p]\r\n", unlzma);
+
+ lzma_error = 0;
+ res = unlzma(stream->input, length, NULL, NULL, stream->output, NULL,
+ error);
+ printk("lzma_uncompress: 3333333333\r\n");
+ if (res || lzma_error)
+ goto failed;
+
+ /* uncompressed size is stored in the LZMA header (5 byte offset) */
+ res = bytes = get_unaligned_le32(stream->input + 5);
+ for (i = 0, buff = stream->output; bytes && i < pages; i++) {
+ avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+ memcpy(buffer[i], buff, avail);
+ buff += avail;
+ bytes -= avail;
+ }
+ printk("lzma_uncompress: 4444444444444\r\n");
+ if (bytes)
+ goto failed;
+
+ //mutex_unlock(&lzma_mutex);
+ //mutex_unlock(&msblk->read_data_mutex);
+ return res;
+
+block_release:
+ for (; i < b; i++)
+ put_bh(bh[i]);
+
+failed:
+ //mutex_unlock(&lzma_mutex);
+ //mutex_unlock(&msblk->read_data_mutex);
+ ERROR("lzma decompression failed, data probably corrupt\n");
+ return -EIO;
+}
+#endif
+const struct squashfs_decompressor squashfs_lzma_comp_ops = {
+ .init = lzma_init,
+ .free = lzma_free,
+ .decompress = lzma_uncompress,
+ .id = LZMA_COMPRESSION,
+ .name = "lzma",
+ .supported = 1
+};
+
Index: linux-3.18.21/fs/squashfs/squashfs.h
===================================================================
--- linux-3.18.21.orig/fs/squashfs/squashfs.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/fs/squashfs/squashfs.h 2018-02-05 13:21:15.000000000 +0800
@@ -111,3 +111,5 @@
/* xattr.c */
extern const struct xattr_handler *squashfs_xattr_handlers[];
+
+extern const struct squashfs_decompressor squashfs_lzma_comp_ops;
\ No newline at end of file
Index: linux-3.18.21/fs/yaffs2/Kconfig
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/Kconfig 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,171 @@
+#
+# yaffs file system configurations
+#
+
+config YAFFS_FS
+ tristate "yaffs2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ yaffs2, or Yet Another Flash File System, is a file system
+ optimised for NAND Flash chips.
+
+ To compile the yaffs2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on yaffs2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable yaffs from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally yaffs only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BAD_BLOCK_MARKING
+ bool "Disable yaffs2 bad block marking"
+ depends on YAFFS_FS
+ default n
+ help
+ Useful during early flash bring up to prevent problems causing
+ lots of bad block marking.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
Index: linux-3.18.21/fs/yaffs2/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/Makefile 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
+yaffs-y += yaffs_mtdif.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_summary.o
+yaffs-y += yaffs_verify.o
+
Index: linux-3.18.21/fs/yaffs2/yaffs_allocator.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_allocator.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,357 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+/*
+ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
+ * of approx 100 objects that are themn allocated singly.
+ * This is basically a simplified slab allocator.
+ *
+ * We don't use the Linux slab allocator because slab does not allow
+ * us to dump all the objects in one hit when we do a umount and tear
+ * down all the tnodes and objects. slab requires that we first free
+ * the individual objects.
+ *
+ * Once yaffs has been mainlined I shall try to motivate for a change
+ * to slab to provide the extra features we need here.
+ */
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct list_head free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ BUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+/*--------------- yaffs_obj alloaction ------------------------
+ *
+ * Free yaffs_objs are stored in a list using obj->siblings.
+ * The blocks of allocated objects are stored in a linked list.
+ */
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->allocated_obj_list = NULL;
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+ allocator->allocated_obj_list = tmp;
+ }
+
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ kfree(new_objs);
+ new_objs = NULL;
+ kfree(list);
+ list = NULL;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj; i++)
+ list_add(&new_objs[i].siblings, &allocator->free_objs);
+
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct list_head *lh;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (list_empty(&allocator->free_objs))
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (!list_empty(&allocator->free_objs)) {
+ lh = allocator->free_objs.next;
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ list_del_init(lh);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ /* Link into the free list. */
+ list_add(&obj->siblings, &allocator->free_objs);
+ allocator->n_free_objects++;
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+
+ if (!dev->allocator) {
+ BUG();
+ return;
+ }
+
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (dev->allocator) {
+ BUG();
+ return;
+ }
+
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+}
+
Index: linux-3.18.21/fs/yaffs2/yaffs_allocator.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_allocator.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_attribs.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_attribs.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,132 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#define IATTR_UID ia_uid
+#define IATTR_GID ia_gid
+#else
+#define IATTR_UID ia_uid.val
+#define IATTR_GID ia_gid.val
+#endif
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->IATTR_UID;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->IATTR_GID;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->IATTR_UID = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->IATTR_GID = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_attribs.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_attribs.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_bitmap.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_bitmap.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,97 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ BUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ BUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_bitmap.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_bitmap.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_checkptrw.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_checkptrw.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,474 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+struct yaffs_checkpt_chunk_hdr {
+ int version;
+ int seq;
+ u32 sum;
+ u32 xor;
+} ;
+
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+static int apply_block_offset(struct yaffs_dev *dev, int block)
+{
+ return block - dev->block_offset;
+}
+
+static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ hdr.version = YAFFS_CHECKPOINT_VERSION;
+ hdr.seq = dev->checkpt_page_seq;
+ hdr.sum = dev->checkpt_sum;
+ hdr.xor = dev->checkpt_xor;
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
+}
+
+static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_chunk_hdr hdr;
+
+ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
+
+ dev->checkpt_byte_offs = sizeof(hdr);
+
+ return hdr.version == YAFFS_CHECKPOINT_VERSION &&
+ hdr.seq == dev->checkpt_page_seq &&
+ hdr.sum == dev->checkpt_sum &&
+ hdr.xor == dev->checkpt_xor;
+}
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->drv.drv_erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ int offset_i = apply_block_offset(dev, i);
+ int result;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ result = dev->drv.drv_erase_fn(dev, offset_i);
+ if(result) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->drv.drv_mark_bad_fn(dev, offset_i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi;
+
+ bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ enum yaffs_block_state state;
+ u32 seq;
+
+ dev->tagger.read_chunk_tags_fn(dev,
+ apply_chunk_offset(dev, chunk),
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
+ i, (int) state,
+ tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ continue;
+
+ dev->tagger.query_block_fn(dev,
+ apply_block_offset(dev, i),
+ &state, &seq);
+ if (state == YAFFS_BLOCK_STATE_DEAD)
+ continue;
+
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+ int i;
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->tagger.write_chunk_tags_fn ||
+ !dev->tagger.read_chunk_tags_fn ||
+ !dev->drv.drv_erase_fn ||
+ !dev->drv.drv_mark_bad_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ yaffs2_checkpt_init_chunk_hdr(dev);
+ return yaffs_checkpt_erase(dev);
+ }
+
+ /* Opening for a read */
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is
+ * (hopefully) going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int offset_chunk;
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+
+ dev->n_page_writes++;
+
+ dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ yaffs2_checkpt_init_chunk_hdr(dev);
+
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+ int chunk;
+ int offset_chunk;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0) {
+ ok = 0;
+ break;
+ }
+
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ offset_chunk = apply_chunk_offset(dev, chunk);
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->tagger.read_chunk_tags_fn(dev,
+ offset_chunk,
+ dev->checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ ok = 0;
+ break;
+ }
+ if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
+ ok = 0;
+ break;
+ }
+
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+
+ }
+
+ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs !=
+ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ for (i = 0;
+ i < dev->blocks_in_checkpt &&
+ dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+
+ if (dev->internal_start_block <= blk &&
+ blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_checkptrw.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_checkptrw.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_ecc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_ecc.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,281 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
+ * such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
+ * and therefore this bytes influence on the line parity.
+ */
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_ecc.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_ecc.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data.
+ * Thus, two such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_getblockinfo.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_getblockinfo.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ BUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_guts.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_guts.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,5136 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsmarshall.h"
+#include "yaffs_nand.h"
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve);
+
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size);
+
+/* Function to calculate chunk and offset */
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static inline u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static inline u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].in_use = 0;
+ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ dev->temp_buffer[i].buffer = buf;
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
+{
+ int i;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].in_use == 0) {
+ dev->temp_buffer[i].in_use = 1;
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].in_use = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer");
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) data;
+ (void) tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ (void) dev;
+ (void) nand_chunk;
+ (void) tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure,
+ * so mark the block for retirement.*/
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ if (n < 0)
+ n = -n;
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 *buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xff)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks");
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* No space unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block > 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 *data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retried_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ memset(&tags, 0, sizeof(tags));
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
+ const YCHAR *oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ (void) dev;
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
+ const YCHAR *name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ const YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so convert unicode to ascii */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* Unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ dev = dev;
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+}
+
+static u16 yaffs_calc_name_sum(const YCHAR *name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ if (!name)
+ return 0;
+
+ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*name) & 0x1f) * i;
+ i++;
+ name++;
+ }
+ return sum;
+}
+
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+
+ if (name && !name[0]) {
+ yaffs_fix_null_name(obj, obj->short_name,
+ YAFFS_SHORT_NAME_LENGTH);
+ name = obj->short_name;
+ } else if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH) {
+ strcpy(obj->short_name, name);
+ }
+
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+loff_t yaffs_max_file_size(struct yaffs_dev *dev)
+{
+ if(sizeof(loff_t) < 8)
+ return YAFFS_MAX_FILE_SIZE_32;
+ else
+ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ mask =
+ dev->tnode_mask >> bit_in_word;
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ (void) dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* add_find_tnode_0 finds the level 0 tnode if it exists,
+ * otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
+ * specified tn will be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0 ||
+ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, release it */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id &&
+ !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags,
+ obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file
+ * during scanning.
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ BUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we
+ * need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags
+ * so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real
+ * chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the
+ * real chunk id.
+ *
+ * We have a duplicate now we need to decide which
+ * one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what
+ * we use, dump the new one.
+ * YAFFS1: Get both sets of tags and compare serial
+ * numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise
+ * the chunks will not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to
+ * update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the
+ * existing one
+ * Delete the new one and return early so that
+ * the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
+ * the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls
+ * the chunk out of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks
+ * are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = YAFFS_NTNODES_INTERNAL - 1;
+ all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal[i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Can this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ }
+
+ /* level 0 */
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+ }
+ return 1;
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ BUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ BUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ BUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR *new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ BUG();
+ }
+
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are OK.
+ * else only proceed if the new name does not exist and we're putting
+ * it into a directory.
+ */
+ if (!(unlink_op || del_op || force ||
+ shadows > 0 || !existing_target) ||
+ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ return YAFFS_FAIL;
+
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a small number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_single_cache(struct yaffs_cache *cache, int discard)
+{
+
+ if (!cache || cache->locked)
+ return;
+
+ /* Write it out and free it up if need be.*/
+ if (cache->dirty) {
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes,
+ 1);
+
+ cache->dirty = 0;
+ }
+
+ if (discard)
+ cache->object = NULL;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj, int discard)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches < 1)
+ return;
+
+
+ /* Find the chunks for this object and flush them. */
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj)
+ yaffs_flush_single_cache(cache, discard);
+ }
+
+}
+
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev, int discard)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj, discard);
+ } while (obj);
+
+}
+
+/* Grab us an unused cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ int usage;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ /* First look for an unused cache */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (cache)
+ return cache;
+
+ /*
+ * Thery were all in use.
+ * Find the LRU cache and flush it if it is dirty.
+ */
+
+ usage = -1;
+ cache = NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage || !cache)) {
+ usage = dev->cache[i].last_use;
+ cache = &dev->cache[i];
+ }
+ }
+
+#if 1
+ yaffs_flush_single_cache(cache, 1);
+#else
+ yaffs_flush_file_cache(cache->object, 1);
+ cache = yaffs_grab_chunk_worker(dev);
+#endif
+
+ return cache;
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return;
+
+ if (dev->cache_last_use < 0 ||
+ dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+ dev->cache_last_use++;
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ struct yaffs_cache *cache;
+
+ if (object->my_dev->param.n_caches > 0) {
+ cache = yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj) {
+ BUG();
+ return;
+ }
+ dev = obj->my_dev;
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+ if (obj->parent)
+ BUG();
+ if (!list_empty(&obj->siblings))
+ BUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+ /* Iinvalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
+ /* Move to unlinked directory so we have a deletion record */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (!obj->deleted ||
+ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
+ obj->soft_del)
+ return;
+
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (!tn)
+ return tn;
+
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+ return tn;
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level < 1)
+ return YAFFS_OK;
+
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but
+ * the height is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* alloc_empty_obj gets us a clean Object.*/
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (!obj)
+ return obj;
+
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+ int found = 0;
+ struct list_head *i;
+ u32 n = (u32) bucket;
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+ /* Don't show if it is defered free */
+ if (in->defered_free)
+ return NULL;
+ return in;
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size =
+ yaffs_max_file_size(dev);
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ if (!obj)
+ return NULL;
+
+ obj->fake = 1; /* it is fake so it might not use NAND */
+ obj->rename_allowed = 0;
+ obj->unlink_allowed = 0;
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR *str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR *name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR *alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists.
+ * If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ kfree(str);
+ return NULL;
+ }
+
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ if (in)
+ yaffs_update_parent(parent);
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link obj */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
+ parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0))
+ return equiv_obj;
+
+ return NULL;
+
+}
+
+
+
+/*---------------------- Block Management and Page Allocation -------------*/
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (!dev->block_info)
+ goto alloc_error;
+
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ if (!dev->chunk_bits)
+ goto alloc_error;
+
+
+ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+
+alloc_error:
+ yaffs_deinit_blocks(dev);
+ return YAFFS_FAIL;
+}
+
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+ int erased_ok = 0;
+ int i;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc
+ * then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ /* Verify erasure if needed */
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
+ !yaffs_skip_verification(dev))) {
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased(dev,
+ block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (!erased_ok) {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ return;
+ }
+
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ bi->has_summary = 0;
+
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
+}
+
+static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int old_chunk, u8 *buffer)
+{
+ int new_chunk;
+ int mark_flash = 1;
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj *object;
+ int matching_chunk;
+ int ret_val = YAFFS_OK;
+
+ memset(&tags, 0, sizeof(tags));
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ /* Defeat the test */
+ matching_chunk = old_chunk;
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file,
+ * throw it away.
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget
+ * about it and fix up the object.
+ */
+
+ /* Free chunks already includes
+ * softdeleted chunks, how ever this
+ * chunk is going to soon be really
+ * deleted which will increment free
+ * chunks. We have to decrement free
+ * chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up obj */
+ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (object) {
+ /* It's either a data chunk in a live
+ * file or an ObjectHeader, so we're
+ * interested in it.
+ * NB Need to keep the ObjectHeaders of
+ * deleted files until the whole file
+ * has been deleted off
+ */
+ tags.serial_number++;
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the
+ * shrinkheader flags since its
+ * work is done.
+ * Also need to clean up
+ * shadowing.
+ */
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *) buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ yaffs_oh_size_load(oh,
+ object->variant.file_variant.file_size);
+ tags.extra_file_size =
+ object->variant.file_variant.file_size;
+ }
+
+ yaffs_verify_oh(object, oh, &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev, buffer, &tags, 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk = new_chunk;
+ object->serial = tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ yaffs_put_chunk_in_file(object, tags.chunk_id,
+ new_chunk, 0);
+ }
+ }
+ }
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
+ return ret_val;
+}
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int max_copies;
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ yaffs_summary_gc(dev, block);
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for (/* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0;
+ dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+ /* Page is in use and might need to be copied */
+ max_copies--;
+ ret_val = yaffs_gc_process_chunk(dev, bi,
+ old_chunk, buffer);
+ }
+ }
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ struct yaffs_obj *object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * find_gc_block() selects the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block
+ * gumming up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty
+ * block, and search harder.
+ * else (leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
+ i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1 ||
+ pages_used < dev->gc_pages_in_use) &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control_fn &&
+ (dev->param.gc_control_fn(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable)
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+
+ /* This loop should pass the first time.
+ * Only loops here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we
+ * should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+ dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ memset(&tags, 0, sizeof(tags));
+ tags.is_deleted = 1;
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+ yaffs_clear_chunk_bit(dev, block, page);
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ yaffs_block_became_dirty(dev, block);
+ }
+ }
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ BUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR *name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer;
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *buf;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
+ return;
+
+ dev = in->my_dev;
+ in->lazy_loaded = 0;
+ buf = yaffs_get_temp_buffer(dev);
+
+ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+ oh = (struct yaffs_obj_hdr *)buf;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned */
+ }
+ yaffs_release_temp_buffer(dev, buf);
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+ struct yaffs_dev *dev = in->my_dev;
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj_hdr *oh = NULL;
+ loff_t file_size = 0;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (in->fake && in != dev->root_dir && !force && !xmod)
+ return ret_val;
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
+ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
+ file_size = in->variant.file_variant.file_size;
+ yaffs_oh_size_load(oh, file_size);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_file_size = file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (new_chunk_id < 0)
+ return new_chunk_id;
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block
+ * that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+
+ return new_chunk_id;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than
+ * a whole chunk or we're using inband tags then use the cache
+ * (if there is caching) else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk ||
+ dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache,
+ * then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Read directly into the buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+ }
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ loff_t start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ loff_t chunk_start;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (((loff_t)chunk) *
+ dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %lld gives chunk %d start %d",
+ offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of
+ * file then we need to write back as much as was there
+ * before.
+ */
+
+ chunk_start = (((loff_t)(chunk - 1)) *
+ dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0 ||
+ n_writeback > dev->data_bytes_per_chunk)
+ BUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk ||
+ !dev->param.cache_bypass_aligned ||
+ dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both
+ * start and end chunk), or we're using inband tags,
+ * or we're forcing writes through the cache,
+ * so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+
+ /* If we can't find the data in the cache, then
+ * load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache &&
+ yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache
+ * item and no space check has been made
+ * for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_through) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+ } else {
+ chunk_written = -1; /* fail write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe
+ * both start and end chunk). Read into the
+ * local buffer then copy over and write back.
+ */
+
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Write directly from the buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data,
+ * we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+ int i;
+ int chunk_id;
+ u32 dummy;
+ int last_del;
+ int start_del;
+
+ if (old_size > 0)
+ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
+ else
+ last_del = 0;
+
+ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
+ &start_del, &dummy);
+ last_del++;
+ start_del++;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+
+ if (chunk_id < 1)
+ continue;
+
+ if (chunk_id <
+ (dev->internal_start_block * dev->param.chunks_per_block) ||
+ chunk_id >=
+ ((dev->internal_end_block + 1) *
+ dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in, 1);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in,
+ int update_time,
+ int data_sync,
+ int discard_cache)
+{
+ if (!in->dirty)
+ return YAFFS_OK;
+
+ yaffs_flush_file_cache(in, discard_cache);
+
+ if (data_sync)
+ return YAFFS_OK;
+
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
+ YAFFS_OK : YAFFS_FAIL;
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+ return ret_val;
+}
+
+static int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list associated with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+ return ret_val;
+}
+
+
+static void yaffs_empty_dir_to_dir(struct yaffs_obj *from_dir,
+ struct yaffs_obj *to_dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ list_for_each_safe(lh, n, &from_dir->variant.dir_variant.children) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_add_obj_to_dir(to_dir, obj);
+ }
+}
+
+struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
+ enum yaffs_obj_type type)
+{
+ /* Tear down the old variant */
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ /* Nuke file data */
+ yaffs_resize_file(obj, 0);
+ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Put the children in lost and found. */
+ yaffs_empty_dir_to_dir(obj, obj->my_dev->lost_n_found);
+ if (!list_empty(&obj->variant.dir_variant.dirty))
+ list_del_init(&obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ /* Nuke symplink data */
+ kfree(obj->variant.symlink_variant.alias);
+ obj->variant.symlink_variant.alias = NULL;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ list_del_init(&obj->hard_links);
+ break;
+ default:
+ break;
+ }
+
+ memset(&obj->variant, 0, sizeof(obj->variant));
+
+ /*Set up new variant if the memset is not enough. */
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ default:
+ break;
+ }
+
+ obj->variant_type = type;
+
+ return obj;
+
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+ int del_now = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory so that the rename works.
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
+ struct yaffs_obj *new_dir, const YCHAR *new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if (old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+ if (obj && obj->rename_allowed) {
+ /* Now handle an existing target, if there is one */
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)) {
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc here otherwise it can mess
+ * up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (backward_scanning) {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that
+ * it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info. */
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
+{
+ struct list_head *lh;
+ struct list_head *save;
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ list_for_each_safe(lh, save, hard_list) {
+ hl = list_entry(lh, struct yaffs_obj, hard_links);
+ in = yaffs_find_by_number(dev,
+ hl->variant.hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships
+ * between directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been
+ * deleted leaving the object "hanging" without being rooted in the
+ * directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to
+ * see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY &&
+ depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ BUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+ yaffs_unlink_obj(obj);
+ }
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR *name)
+{
+ int sum;
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ BUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ BUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ BUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
+ return l;
+ }
+ }
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+ yaffs_check_obj_details_loaded(obj);
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ } else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ } else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ return DT_REG;
+ break;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+ struct yaffs_tags_handler *tagger = &dev->tagger;
+
+ /* Common functions, gotta have */
+ if (!drv->drv_read_chunk_fn ||
+ !drv->drv_write_chunk_fn ||
+ !drv->drv_erase_fn)
+ return 0;
+
+ if (dev->param.is_yaffs2 &&
+ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
+ return 0;
+
+ /* Install the default tags marshalling functions if needed. */
+ yaffs_tags_compat_install(dev);
+ yaffs_tags_marshall_install(dev);
+
+ /* Check we now have the marshalling functions required. */
+ if (!tagger->write_chunk_tags_fn ||
+ !tagger->read_chunk_tags_fn ||
+ !tagger->query_block_fn ||
+ !tagger->mark_bad_fn)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost+found directories */
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+/* Low level init.
+ * Typically only used by yaffs_guts_initialise, but also used by the
+ * Low level yaffs driver tests.
+ */
+
+int yaffs_guts_ll_init(struct yaffs_dev *dev)
+{
+
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ if (dev->ll_init)
+ return YAFFS_OK;
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ return YAFFS_OK;
+}
+
+
+int yaffs_guts_format_dev(struct yaffs_dev *dev)
+{
+ int i;
+ enum yaffs_block_state state;
+ u32 dummy;
+
+ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ if(dev->is_mounted)
+ return YAFFS_FAIL;
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ yaffs_query_init_block_state(dev, i, &state, &dummy);
+ if (state != YAFFS_BLOCK_STATE_DEAD)
+ yaffs_erase_block(dev, i);
+ }
+
+ return YAFFS_OK;
+}
+
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished verifying the device, continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1;
+ /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && dev->param.is_yaffs2 &&
+ !dev->param.disable_summary &&
+ !yaffs_summary_init(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
+ YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted
+ * checkpoint load then scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retried_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ yaffs_summary_deinit(dev);
+
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ yaffs_deinit_nand(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count and subtract the number of dirty chunks in the cache. */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now figure checkpoint space and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+}
+
+
+
+/*
+ * Marshalling functions to get loff_t file sizes into and out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
+{
+ oh->file_size_low = (fsize & 0xFFFFFFFF);
+ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
+}
+
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
+{
+ loff_t retval;
+
+ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
+ retval = (((loff_t) oh->file_size_high) << 32) |
+ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
+ else
+ retval = (loff_t) oh->file_size_low;
+
+ return retval;
+}
+
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
+{
+ int i;
+ struct yaffs_block_info *bi;
+ int s;
+
+ for(i = 0; i < 10; i++)
+ bs[i] = 0;
+
+ for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ bi = yaffs_get_block_info(dev, i);
+ s = bi->block_state;
+ if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
+ bs[0]++;
+ else
+ bs[s]++;
+ }
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_guts.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_guts.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,1010 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xff
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941ff53
+
+/*
+ * Tnodes form a tree with the tnodes in "levels"
+ * Levels greater than 0 hold 8 slots which point to other tnodes.
+ * Those at level 0 hold 16 slots which point to chunks in NAND.
+ *
+ * A maximum level of 8 thust supports files of size up to:
+ *
+ * 2^(3*MAX_LEVEL+4)
+ *
+ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
+ * a maximum file size of around 512Gbytees with 2k chunks.
+ */
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 8
+#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
+ YAFFS_TNODES_INTERNAL_BITS * \
+ YAFFS_TNODES_MAX_LEVEL)
+#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
+
+#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff
+
+/* Constants for YAFFS1 mode */
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
+
+/* Binary data version stamps */
+#define YAFFS_SUMMARY_VERSION 1
+#define YAFFS_CHECKPOINT_VERSION 7
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Fake object Id for summary data */
+#define YAFFS_OBJECTID_SUMMARY 0x10
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* yaffs1 tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
+ * otherwise the structure size will get blown out.
+ */
+
+struct yaffs_tags {
+ u32 chunk_id:20;
+ u32 serial_number:2;
+ u32 n_bytes_lsb:10;
+ u32 obj_id:18;
+ u32 ecc:12;
+ u32 n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 this is not used */
+ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* Extra info available if not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ loff_t extra_file_size; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object for a hard link */
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCAN,
+ /* The block might have something on it (ie it is allocating or full,
+ * perhaps empty) but it needs to be scanned to determine its true
+ * state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of
+ * deciding
+ * However, if this state is returned on a YAFFS2 device,
+ * then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as
+ * full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ s32 soft_del_pages:10; /* number of soft deleted pages */
+ s32 pages_in_use:10; /* number of pages in use */
+ u32 block_state:4; /* One of the above block states. */
+ /* NB use unsigned because enum is sometimes
+ * an int */
+ u32 needs_retiring:1; /* Data has failed on this block, */
+ /*need to get valid data off and retire*/
+ u32 skip_erased_check:1;/* Skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
+ Block should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
+ failures on this block and tried to reuse it */
+ u32 has_summary:1; /* The block has a summary */
+
+ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to all object types except for hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ u32 file_size_low;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 file_size_high;
+ u32 reserved[1];
+ int shadows_obj; /* This object header shadows the
+ specified object if > 0 */
+
+ /* is_shrink applies to object headers written when wemake a hole. */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ loff_t file_size;
+ loff_t scanned_size;
+ loff_t shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file.*/
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects cannot be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available
+ * ie. file data chunks encountered before
+ * the header.
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and
+ * is missing some detail */
+
+ u8 defered_free:1; /* Object is removed from NAND, but is
+ * still in the inode cache.
+ * Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created
+ * so skip some verification checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way
+ * to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs
+ * or not. */
+ u8 has_xattr:1; /* This object has xattribs.
+ * Only valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND.*/
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in hash bucket */
+
+ struct list_head hard_links; /* hard linked object chain*/
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks for this file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ loff_t size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few.
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int in_use;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and default values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
+ be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* Tuneable so that we can reduce
+ * reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled,
+ * else the number of short op caches.
+ */
+ int cache_bypass_aligned; /* If non-zero then bypass the cache for
+ * aligned writes.
+ */
+
+ int use_nand_ecc; /* Flag to decide whether or not to use
+ * NAND driver ECC on data (yaffs1) */
+ int tags_9bytes; /* Use 9 byte tags */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC
+ * on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often to check for a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ int max_objects; /*
+ * Set to limit the number of objects created.
+ * 0 = no limit.
+ */
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj *obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev *dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control_fn) (struct yaffs_dev *dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use
+ * file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
+ * softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+
+ int disable_summary;
+ int disable_bad_block_marking;
+
+};
+
+struct yaffs_driver {
+ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len);
+ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result);
+ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
+ int (*drv_initialise_fn) (struct yaffs_dev *dev);
+ int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
+};
+
+struct yaffs_tags_handler {
+ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags);
+
+ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+ struct yaffs_driver drv;
+ struct yaffs_tags_handler tagger;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ int ll_init;
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpt pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store
+ * current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ u8 block_info_alt:1; /* allocated using alternative alloc */
+ u8 chunk_bits_alt:1; /* allocated using alternative alloc */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might
+ have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+ struct yaffs_summary_tags *gc_sum_tags;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
+ files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are
+ sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being
+ background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter.
+ * Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Summary */
+ int chunks_per_summary;
+ struct yaffs_summary_tags *sum_tags;
+
+ /* Statistics */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_bad_queries;
+ u32 n_bad_markings;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retried_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+ u32 tags_used;
+ u32 summary_used;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes
+ *at runtime and must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ * allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
+ enum yaffs_obj_type type);
+
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *in,
+ int update_time,
+ int data_sync,
+ int discard_cache);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev, int discard);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR *name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+int yaffs_guts_ll_init(struct yaffs_dev *dev);
+
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 *buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR *str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+
+int yaffs_guts_format_dev(struct yaffs_dev *dev);
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out);
+/*
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
+loff_t yaffs_max_file_size(struct yaffs_dev *dev);
+
+/*
+ * Debug function to count number of blocks in each state
+ * NB Needs to be called with correct number of integers
+ */
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]);
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_linux.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_linux.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,48 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+ int dirty;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+#else
+#define WRITE_SIZE_STR "oobblock"
+#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+#endif
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_mtdif.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_mtdif.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,317 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#include "uapi/linux/major.h"
+#endif
+
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_linux.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#define mtd_erase(m, ei) (m)->erase(m, ei)
+#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
+#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
+#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
+#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
+#endif
+
+
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ struct erase_info ei;
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+
+static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, int data_len,
+ const u8 *oob, int oob_len)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n",
+ dev, nand_chunk, data, data_len, oob, oob_len);
+
+ if (!data || !data_len) {
+ data = NULL;
+ data_len = 0;
+ }
+
+ if (!oob || !oob_len) {
+ oob = NULL;
+ oob_len = 0;
+ }
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = (u8 *)data;
+ ops.oobbuf = (u8 *)oob;
+
+ retval = mtd_write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, int data_len,
+ u8 *oob, int oob_len,
+ enum yaffs_ecc_result *ecc_result)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ loff_t addr;
+ struct mtd_oob_ops ops;
+ int retval;
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.len = (data) ? data_len : 0;
+ ops.ooblen = oob_len;
+ ops.datbuf = data;
+ ops.oobbuf = oob;
+
+#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+ */
+ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd_read_oob(mtd, addr, &ops);
+ if (retval)
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ default:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ if(ecc_result)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ return YAFFS_FAIL;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ loff_t addr;
+ struct erase_info ei;
+ int retval = 0;
+ u32 block_size;
+
+ block_size = dev->param.total_bytes_per_chunk *
+ dev->param.chunks_per_block;
+ addr = ((loff_t) block_no) * block_size;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = block_size;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
+
+ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no);
+
+ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
+
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev)
+{
+ struct yaffs_driver *drv = &dev->drv;
+
+ drv->drv_write_chunk_fn = yaffs_mtd_write;
+ drv->drv_read_chunk_fn = yaffs_mtd_read;
+ drv->drv_erase_fn = yaffs_mtd_erase;
+ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
+ drv->drv_check_bad_fn = yaffs_mtd_check_bad;
+ drv->drv_initialise_fn = yaffs_mtd_initialise;
+ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
+}
+
+
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
+{
+ struct mtd_info *mtd;
+
+ mtd = yaffs_get_mtd_device(sdev);
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
+#else
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+#endif
+
+ return mtd;
+}
+
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes)
+{
+ if (yaffs_version == 2) {
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes"
+ );
+ return -1;
+ }
+ } else {
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes"
+ );
+ return -1;
+ }
+
+ if (tags_9bytes && mtd->oobavail < 9) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support 9-byte tags");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+void yaffs_put_mtd_device(struct mtd_info *mtd)
+{
+ if(mtd)
+ put_mtd_device(mtd);
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_mtdif.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_mtdif.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,26 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev);
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
+void yaffs_put_mtd_device(struct mtd_info *mtd);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags,
+ int tags_9bytes);
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_nameval.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_nameval.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,208 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of
+* values and fits into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR *name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
+ name, size)) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -ENODATA;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR *name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos < 0 || pos >= xb_size)
+ return -ENODATA;
+
+ /* Find size, shift rest over this record,
+ * then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ /* If bsize is zero then this is a size query.
+ * Return the size, but don't copy.
+ */
+ if (!bsize)
+ return size;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+ }
+ if (pos >= 0)
+ return -ERANGE;
+
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) &&
+ size <= xb_size &&
+ (pos + size) < xb_size &&
+ !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_nameval.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_nameval.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_nand.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_nand.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,122 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_getblockinfo.h"
+#include "yaffs_summary.h"
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+ return chunk - dev->chunk_offset;
+}
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided use local tags. */
+ if (!tags)
+ tags = &local_tags;
+
+ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+ dev->n_page_writes++;
+
+ if (!tags) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+
+ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
+ buffer, tags);
+
+ yaffs_summary_add(dev, tags, nand_chunk);
+
+ return result;
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+ dev->n_bad_markings++;
+
+ if (dev->param.disable_bad_block_marking)
+ return YAFFS_OK;
+
+ return dev->tagger.mark_bad_fn(dev, block_no);
+}
+
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ block_no -= dev->block_offset;
+ return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ int result;
+
+ block_no -= dev->block_offset;
+ dev->n_erasures++;
+ result = dev->drv.drv_erase_fn(dev, block_no);
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_initialise_fn)
+ return dev->drv.drv_initialise_fn(dev);
+ return YAFFS_OK;
+}
+
+int yaffs_deinit_nand(struct yaffs_dev *dev)
+{
+ if (dev->drv.drv_deinitialise_fn)
+ return dev->drv.drv_deinitialise_fn(dev);
+ return YAFFS_OK;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_nand.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_nand.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+int yaffs_deinit_nand(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_packedtags1.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_packedtags1.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,56 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+static const u8 all_ff[20] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xffffffff;
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xffffffff)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_packedtags1.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_packedtags1.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ u32 chunk_id:20;
+ u32 serial_number:2;
+ u32 n_bytes:10;
+ u32 obj_id:18;
+ u32 ecc:12;
+ u32 deleted:1;
+ u32 unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_packedtags2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_packedtags2.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,197 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xf0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(
+ const struct yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
+{
+ if (t->chunk_id != 0 || !t->extra_available)
+ return 0;
+
+ /* Check if the file size is too long to store */
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
+ (t->extra_file_size >> 31) != 0)
+ return 0;
+ return 1;
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ /* Only store extra tags for object headers.
+ * If it is a file then only store if the file size is short\
+ * enough to fit.
+ */
+ if (yaffs_check_tags_extra_packable(t)) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = (unsigned) t->extra_file_size;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ if (ptt->seq_number == 0xffffffff)
+ return;
+
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
+ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
+ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_file_size = ptt->n_bytes;
+ }
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_packedtags2.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_packedtags2.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_summary.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_summary.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,312 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Summaries write the useful part of the tags for the chunks in a block into an
+ * an array which is written to the last n chunks of the block.
+ * Reading the summaries gives all the tags for the block in one read. Much
+ * faster.
+ *
+ * Chunks holding summaries are marked with tags making it look like
+ * they are part of a fake file.
+ *
+ * The summary could also be used during gc.
+ *
+ */
+
+#include "yaffs_summary.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_bitmap.h"
+
+/*
+ * The summary is built up in an array of summary tags.
+ * This gets written to the last one or two (maybe more) chunks in a block.
+ * A summary header is written as the first part of each chunk of summary data.
+ * The summary header must match or the summary is rejected.
+ */
+
+/* Summary tags don't need the sequence number because that is redundant. */
+struct yaffs_summary_tags {
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+/* Summary header */
+struct yaffs_summary_header {
+ unsigned version; /* Must match current version */
+ unsigned block; /* Must be this block */
+ unsigned seq; /* Must be this sequence number */
+ unsigned sum; /* Just add up all the bytes in the tags */
+};
+
+
+static void yaffs_summary_clear(struct yaffs_dev *dev)
+{
+ if (!dev->sum_tags)
+ return;
+ memset(dev->sum_tags, 0, dev->chunks_per_summary *
+ sizeof(struct yaffs_summary_tags));
+}
+
+
+void yaffs_summary_deinit(struct yaffs_dev *dev)
+{
+ kfree(dev->sum_tags);
+ dev->sum_tags = NULL;
+ kfree(dev->gc_sum_tags);
+ dev->gc_sum_tags = NULL;
+ dev->chunks_per_summary = 0;
+}
+
+int yaffs_summary_init(struct yaffs_dev *dev)
+{
+ int sum_bytes;
+ int chunks_used; /* Number of chunks used by summary */
+ int sum_tags_bytes;
+
+ sum_bytes = dev->param.chunks_per_block *
+ sizeof(struct yaffs_summary_tags);
+
+ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
+ (dev->data_bytes_per_chunk -
+ sizeof(struct yaffs_summary_header));
+
+ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ if (!dev->sum_tags || !dev->gc_sum_tags) {
+ yaffs_summary_deinit(dev);
+ return YAFFS_FAIL;
+ }
+
+ yaffs_summary_clear(dev);
+
+ return YAFFS_OK;
+}
+
+static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
+{
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int i;
+ unsigned sum = 0;
+
+ i = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ while (i > 0) {
+ sum += *sum_buffer;
+ sum_buffer++;
+ i--;
+ }
+
+ return sum;
+}
+
+static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int n_bytes;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
+ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
+ tags.chunk_id = 1;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ hdr.version = YAFFS_SUMMARY_VERSION;
+ hdr.block = blk;
+ hdr.seq = bi->seq_number;
+ hdr.sum = yaffs_summary_sum(dev);
+
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ memcpy(buffer, &hdr, sizeof(hdr));
+ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
+ tags.n_bytes = this_tx + sizeof(hdr);
+ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (result != YAFFS_OK)
+ break;
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ dev->n_free_chunks--;
+
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ tags.chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+
+ if (result == YAFFS_OK)
+ bi->has_summary = 1;
+
+
+ return result;
+}
+
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)st;
+ int n_bytes;
+ int chunk_id;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ int sum_tags_bytes;
+
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = blk * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ chunk_id = 1;
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (tags.chunk_id != chunk_id ||
+ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
+ tags.chunk_used == 0 ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.n_bytes != (this_tx + sizeof(hdr)))
+ result = YAFFS_FAIL;
+ if (result != YAFFS_OK)
+ break;
+
+ if (st == dev->sum_tags) {
+ /* If we're scanning then update the block info */
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ }
+ memcpy(&hdr, buffer, sizeof(hdr));
+ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (result == YAFFS_OK) {
+ /* Verify header */
+ if (hdr.version != YAFFS_SUMMARY_VERSION ||
+ hdr.seq != bi->seq_number ||
+ hdr.sum != yaffs_summary_sum(dev))
+ result = YAFFS_FAIL;
+ }
+
+ if (st == dev->sum_tags && result == YAFFS_OK)
+ bi->has_summary = 1;
+
+ return result;
+}
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_nand)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
+ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
+
+ if (!dev->sum_tags)
+ return YAFFS_OK;
+
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ yaffs_pack_tags2_tags_only(&tags_only, tags);
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ sum_tags->chunk_id = tags_only.chunk_id;
+ sum_tags->n_bytes = tags_only.n_bytes;
+ sum_tags->obj_id = tags_only.obj_id;
+
+ if (chunk_in_block == dev->chunks_per_summary - 1) {
+ /* Time to write out the summary */
+ yaffs_summary_write(dev, block_in_nand);
+ yaffs_summary_clear(dev);
+ yaffs_skip_rest_of_block(dev);
+ }
+ }
+ return YAFFS_OK;
+}
+
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ tags_only.chunk_id = sum_tags->chunk_id;
+ tags_only.n_bytes = sum_tags->n_bytes;
+ tags_only.obj_id = sum_tags->obj_id;
+ yaffs_unpack_tags2_tags_only(tags, &tags_only);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int i;
+
+ if (!bi->has_summary)
+ return;
+
+ for (i = dev->chunks_per_summary;
+ i < dev->param.chunks_per_block;
+ i++) {
+ if (yaffs_check_chunk_bit(dev, blk, i)) {
+ yaffs_clear_chunk_bit(dev, blk, i);
+ bi->pages_in_use--;
+ dev->n_free_chunks++;
+ }
+ }
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_summary.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_summary.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_SUMMARY_H__
+#define __YAFFS_SUMMARY_H__
+
+#include "yaffs_packedtags2.h"
+
+
+int yaffs_summary_init(struct yaffs_dev *dev);
+void yaffs_summary_deinit(struct yaffs_dev *dev);
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk);
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
+
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_tagscompat.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_tagscompat.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,595 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+#if 0
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+#endif
+
+
+/********** Tags ECC calculations *********/
+
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+ tags->ecc = ecc;
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+ return 0;
+}
+
+#if 0
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xff, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ struct yaffs_spare *spare)
+{
+ int data_size = dev->data_bytes_per_chunk;
+
+ return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, sizeof(*spare));
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+ int data_size;
+ int spare_size;
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ if (!spare) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+ data_size = dev->data_bytes_per_chunk;
+ spare_size = sizeof(struct yaffs_spare);
+
+ if (dev->param.use_nand_ecc)
+ return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *) spare, spare_size,
+ ecc_result);
+
+
+ /* Handle the ECC at this level. */
+
+ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, data_size,
+ (u8 *)spare, spare_size,
+ NULL);
+ if (!data || !correct_errors)
+ return ret_val;
+
+ /* Do ECC correction if needed. */
+ yaffs_ecc_calc(data, calc_ecc);
+ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_calc(&data[256], calc_ecc);
+ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
+ needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data) {
+ yaffs_ecc_calc(data, spare.ecc1);
+ yaffs_ecc_calc(&data[256], spare.ecc2);
+ }
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+ }
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ int deleted;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
+ data, &spare, &ecc_result, 1))
+ return YAFFS_FAIL;
+
+ /* ext_tags may be NULL */
+ if (!ext_tags)
+ return YAFFS_OK;
+
+ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ /* Look for bad block markers in the first two chunks */
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
+ NULL, &spare0, &dummy, 0);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 0);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+
+ return YAFFS_OK;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if(dev->param.is_yaffs2)
+ return;
+ if(!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
+ if(!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
+ if(!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+ if(!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
+#else
+
+#include "yaffs_packedtags1.h"
+
+static int yaffs_tags_compat_write(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ u8 tag_buf[9];
+ int retval;
+
+ /* we assume that yaffs_packed_tags1 and yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, tags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * tags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+ if (!dev->param.tags_9bytes) {
+ if (tags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+ memcpy(tag_buf, &pt1, 8);
+ } else {
+ if (tags->is_deleted) {
+ memset(tag_buf, 0xff, 8);
+ tag_buf[8] = 0;
+ } else {
+ memcpy(tag_buf, &pt1, 8);
+ tag_buf[8] = 0xff;
+ }
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data,
+ (data) ? dev->data_bytes_per_chunk : 0,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8);
+
+ return retval;
+}
+
+/* Return with empty extended tags but add ecc_result.
+ */
+static int return_empty_tags(struct yaffs_ext_tags *tags,
+ enum yaffs_ecc_result ecc_result,
+ int retval)
+{
+ if (tags) {
+ memset(tags, 0, sizeof(*tags));
+ tags->ecc_result = ecc_result;
+ }
+
+ return retval;
+}
+
+static int yaffs_tags_compat_read(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags1 pt1;
+ enum yaffs_ecc_result ecc_result;
+ int retval;
+ int deleted;
+ u8 tag_buf[9];
+
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ tag_buf,
+ (dev->param.tags_9bytes) ? 9 : 8,
+ &ecc_result);
+
+ switch (ecc_result) {
+ case YAFFS_ECC_RESULT_NO_ERROR:
+ case YAFFS_ECC_RESULT_FIXED:
+ break;
+
+ case YAFFS_ECC_RESULT_UNFIXED:
+ default:
+ return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ tags->block_bad = dev->drv.drv_check_bad_fn(dev, nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk. */
+ if (yaffs_check_ff(tag_buf, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_OK);
+ }
+
+ memcpy(&pt1, tag_buf, 8);
+
+ if (!dev->param.tags_9bytes) {
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+ } else {
+ deleted = (hweight8(tag_buf[8]) < 7) ? 1 : 0;
+ }
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible. */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (ecc_result == YAFFS_ECC_RESULT_NO_ERROR)
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return return_empty_tags(tags, YAFFS_ECC_RESULT_UNFIXED,
+ YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xffffffff;
+ yaffs_unpack_tags1(tags, &pt1);
+ tags->ecc_result = ecc_result;
+
+ /* Set deleted state */
+ tags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_ext_tags tags;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "%s %d", __func__, block_no);
+
+ *seq_number = 0;
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+ if (retval == YAFFS_FAIL) {
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ goto out;
+ }
+
+ yaffs_tags_compat_read(dev, block_no * dev->param.chunks_per_block,
+ NULL, &tags);
+
+ if (tags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block %d is marked bad",
+ block_no);
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else if (tags.chunk_used) {
+ *seq_number = tags.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ retval = YAFFS_OK;
+
+out:
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %u state %d",
+ *seq_number, *state);
+
+ return retval;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+ if (dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_tagscompat.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_tagscompat.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+
+#include "yaffs_guts.h"
+
+#if 0
+
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+
+#endif
+
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev);
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_tagsmarshall.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_tagsmarshall.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,199 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_packedtags2.h"
+
+static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags2 pt;
+ int retval;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_write chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ (dev->param.inband_tags) ? NULL : packed_tags_ptr,
+ (dev->param.inband_tags) ? 0 : packed_tags_size);
+
+ return retval;
+}
+
+static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = 0;
+ int local_data = 0;
+ u8 spare_buffer[100];
+ enum yaffs_ecc_result ecc_result;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "yaffs_tags_marshall_read chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev);
+ }
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ NULL, 0,
+ &ecc_result);
+ else if (tags)
+ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ spare_buffer, packed_tags_size,
+ &ecc_result);
+ else
+ BUG();
+
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)
+ &data[dev->data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else if (tags) {
+ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data);
+
+ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+
+ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
+ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+
+ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
+ block_no);
+
+ retval = dev->drv.drv_check_bad_fn(dev, block_no);
+
+ if (retval== YAFFS_FAIL) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+
+ yaffs_tags_marshall_read(dev,
+ block_no * dev->param.chunks_per_block,
+ NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block query returns seq %d state %d",
+ *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ return dev->drv.drv_mark_bad_fn(dev, block_no);
+
+}
+
+
+void yaffs_tags_marshall_install(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->tagger.write_chunk_tags_fn)
+ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
+
+ if (!dev->tagger.read_chunk_tags_fn)
+ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
+
+ if (!dev->tagger.query_block_fn)
+ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
+
+ if (!dev->tagger.mark_bad_fn)
+ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
+
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_tagsmarshall.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_tagsmarshall.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSMARSHALL_H__
+#define __YAFFS_TAGSMARSHALL_H__
+
+#include "yaffs_guts.h"
+void yaffs_tags_marshall_install(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_trace.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_trace.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xf0000000
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_verify.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_verify.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,529 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ (void) dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * const block_state_name[] = {
+ "Unknown",
+ "Needs scan",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0 ||
+ bi->pages_in_use > dev->param.chunks_per_block ||
+ bi->soft_del_pages < 0 ||
+ bi->soft_del_pages > dev->param.chunks_per_block ||
+ actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in
+ * which case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xff",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ u32 x;
+ int required_depth;
+ int actual_depth;
+ int last_chunk;
+ u32 offset_in_chunk;
+ u32 the_chunk;
+
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+
+ /* Check file size is consistent with tnode depth */
+ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
+ &last_chunk, &offset_in_chunk);
+ last_chunk++;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (!tn)
+ continue;
+
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ u32 chunk_min;
+ u32 chunk_max;
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent &&
+ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
+ BUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ BUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ BUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ (void) in;
+ return YAFFS_OK;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_verify.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_verify.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_vfs.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_vfs.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,3690 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * There are two variants of the VFS glue code. This variant should compile
+ * for any version of Linux.
+ */
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
+#define YAFFS_COMPILE_BACKGROUND
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
+#define YAFFS_COMPILE_FREEZER
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#define YAFFS_COMPILE_EXPORTFS
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_USE_SETATTR_COPY
+#define YAFFS_USE_TRUNCATE_SETSIZE
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_HAS_EVICT_INODE
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+#define YAFFS_NEW_FOLLOW_LINK 1
+#else
+#define YAFFS_NEW_FOLLOW_LINK 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_HAS_WRITE_SUPER
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#include <linux/smp_lock.h>
+#endif
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+#include <linux/namei.h>
+#endif
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+#include <linux/exportfs.h>
+#endif
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#endif
+#ifdef YAFFS_COMPILE_FREEZER
+#include <linux/freezer.h>
+#endif
+
+#include <asm/div64.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+/* FIXME: use sb->s_id instead ? */
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#else
+
+#include <linux/locks.h>
+#define BDEVNAME_SIZE 0
+#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+#define __user
+#endif
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define YPROC_ROOT (&proc_root)
+#else
+#define YPROC_ROOT NULL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define Y_INIT_TIMER(a) init_timer(a)
+#else
+#define Y_INIT_TIMER(a) init_timer_on_stack(a)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+#define YAFFS_USE_WRITE_BEGIN_END 1
+#else
+#define YAFFS_USE_WRITE_BEGIN_END 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_SUPER_HAS_DIRTY
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+#else
+#define YCALCBLOCKS(s, b) ((s)/(b))
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_getblockinfo.h"
+
+unsigned int yaffs_trace_mask =
+ YAFFS_TRACE_BAD_BLOCKS |
+ YAFFS_TRACE_ALWAYS |
+ 0;
+
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+unsigned int yaffs_auto_select = 1;
+/* Module Parameters */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+#else
+MODULE_PARM(yaffs_trace_mask, "i");
+MODULE_PARM(yaffs_wr_attempts, "i");
+MODULE_PARM(yaffs_auto_checkpoint, "i");
+MODULE_PARM(yaffs_gc_control, "i");
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+/* use iget and read_inode */
+#define Y_IGET(sb, inum) iget((sb), (inum))
+
+#else
+/* Call local equivalent */
+#define YAFFS_USE_OWN_IGET
+#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#else
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
+#endif
+
+#define yaffs_inode_to_obj(iptr) \
+ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+#else
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define Y_CLEAR_INODE(i) clear_inode(i)
+#else
+#define Y_CLEAR_INODE(i) end_writeback(i)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+#define YAFFS_USE_DIR_ITERATE
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
+#define YAFFS_NEW_PROCFS
+#include <linux/seq_file.h>
+#endif
+
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while (0)
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+ loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %lld, size %08x",
+ (long long)pos,
+ (unsigned)PAGE_CACHE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ BUG_ON(!PageLocked(pg));
+#else
+ if (!PageLocked(pg))
+ PAGE_BUG(pg);
+#endif
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+
+static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc)
+ lc->dirty = val;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb)
+ sb->s_dirt = val;
+ }
+#endif
+
+}
+
+static void yaffs_set_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 1);
+}
+
+static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
+{
+ yaffs_set_super_dirty_val(dev, 0);
+}
+
+static int yaffs_check_super_dirty(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+ if (lc && lc->dirty)
+ return 1;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+ {
+ struct super_block *sb = lc->super;
+
+ if (sb && sb->s_dirt)
+ return 1;
+ }
+#endif
+ return 0;
+
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int yaffs_writepage(struct page *page)
+#endif
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_CACHE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_CACHE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, inode size = %lld!!",
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT,
+ inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_CACHE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %lld, size %08x",
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %lld, ino = %lld",
+ obj->variant.file_variant.file_size, inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+/* For now we just assume few parallel writes and check against a small number. */
+/* Todo: need to do this with a counter to handle parallel reads better */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+#else
+ pg = __grab_cache_page(mapping, index);
+#endif
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ page_cache_release(pg);
+ }
+ return ret;
+}
+
+#else
+
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+ unsigned offset, unsigned to)
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
+
+ if (!Page_Uptodate(pg))
+ return yaffs_readpage_nolock(f, pg);
+ return 0;
+}
+#endif
+
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written;
+ loff_t ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ return -EINVAL;
+ }
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %lld bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %lld n_bytes %d",
+ addr, pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ page_cache_release(pg);
+ return ret;
+}
+#else
+
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+ unsigned to)
+{
+ void *addr, *kva;
+
+ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+ int n_bytes = to - offset;
+ int n_written;
+
+ kva = kmap(pg);
+ addr = kva + offset;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write addr %p pos %lld n_bytes %d",
+ addr, pos, n_bytes);
+
+ n_written = yaffs_file_write(f, addr, n_bytes, &pos);
+
+ if (n_written != n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write not same size n_written %d n_bytes %d",
+ n_written, n_bytes);
+ SetPageError(pg);
+ }
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_commit_write returning %d",
+ n_written == n_bytes ? 0 : n_written);
+
+ return n_written == n_bytes ? 0 : n_written;
+}
+#endif
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+#else
+ .prepare_write = yaffs_prepare_write,
+ .commit_write = yaffs_commit_write,
+#endif
+};
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+#else
+static int yaffs_file_flush(struct file *file)
+#endif
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id,
+ obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0, 1);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+static int yaffs_sync_object(struct file *file, int datasync)
+#else
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+ int datasync)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+ struct dentry *dentry = file->f_path.dentry;
+#endif
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync, 1);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static const struct file_operations yaffs_file_operations = {
+ .read = new_sync_read,
+ .read_iter = generic_file_read_iter,
+ .write = new_sync_write,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .sendfile = generic_file_sendfile,
+};
+
+#else
+
+static const struct file_operations yaffs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ .sendfile = generic_file_sendfile,
+#endif
+};
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+static void zero_user_segment(struct page *page, unsigned start, unsigned end)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + start, 0, end - start);
+ kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(page);
+}
+#endif
+
+
+static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
+{
+#ifdef YAFFS_USE_TRUNCATE_SETSIZE
+ truncate_setsize(inode, newsize);
+ return 0;
+#else
+ truncate_inode_pages(&inode->i_data, newsize);
+ return 0;
+#endif
+
+}
+
+
+static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
+{
+#ifdef YAFFS_USE_SETATTR_COPY
+ setattr_copy(inode, attr);
+ return 0;
+#else
+ return inode_setattr(inode, attr);
+#endif
+
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+#if 0
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+#endif
+
+ if (error == 0)
+ error = inode_change_ok(inode, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ error = yaffs_vfs_setattr(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_vfs_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
+ void *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+#else
+ ret = readlink_copy(buffer, buflen, alias);
+#endif
+ kfree(alias);
+ return ret;
+}
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ void *ret;
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ int ret
+#endif
+ unsigned char *alias;
+ int ret_int = 0;
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret_int = -ENOMEM;
+ goto out;
+ }
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ nd_set_link(nd, alias);
+ ret = alias;
+out:
+ if (ret_int)
+ ret = ERR_PTR(ret_int);
+ return ret;
+#else
+ ret = vfs_follow_link(nd, alias);
+ kfree(alias);
+out:
+ if (ret_int)
+ ret = ret_int;
+ return ret;
+#endif
+}
+
+
+#ifdef YAFFS_HAS_PUT_INODE
+
+/* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+static void yaffs_put_inode(struct inode *inode)
+{
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_put_inode: ino %d, count %d"),
+ (int)inode->i_ino, atomic_read(&inode->i_count);
+
+}
+#endif
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+ kfree(alias);
+}
+#endif
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+ .put_link = yaffs_put_link,
+#endif
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+#ifdef YAFFS_USE_OWN_IGET
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+#else
+
+static void yaffs_read_inode(struct inode *inode)
+{
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_inode for %d", (int)inode->i_ino);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+}
+
+#endif
+
+
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d", obj->obj_id);
+
+ inode = Y_IGET(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+#define YCRED(x) x
+#else
+#define YCRED(x) (x->cred)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#define YPROC_uid(p) (YCRED(p)->fsuid)
+#define YPROC_gid(p) (YCRED(p)->fsgid)
+#define EXTRACT_gid(x) x
+#define EXTRACT_uid(x) x
+#define MAKE_gid(x) x
+#define MAKE_uid(x) x
+#else
+#define YPROC_uid(p) from_kuid(&init_user_ns, YCRED(p)->fsuid)
+#define YPROC_gid(p) from_kgid(&init_user_ns, YCRED(p)->fsgid)
+#define EXTRACT_gid(x) from_kgid(&init_user_ns, x)
+#define EXTRACT_uid(x) from_kuid(&init_user_ns, x)
+#define MAKE_gid(x) make_kgid(&init_user_ns, x)
+#define MAKE_uid(x) make_kuid(&init_user_ns, x)
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t rdev)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ int rdev)
+#endif
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = YPROC_uid(current);
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? EXTRACT_gid(dir->i_gid) : YPROC_gid(current);
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+#else
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, rdev);
+#endif
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+#else
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ int ret_val;
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
+ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+ return ret_val;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool dummy)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ struct nameidata *n)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int dummy)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+#else
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+#endif
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+/* added NCB for 2.5/6 compatability - forces add even if inode is
+ * NULL which creates dentry hash */
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+/*
+ * Create a link...
+ */
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = YPROC_uid(current);
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? EXTRACT_gid(dir->i_gid) : YPROC_gid(current);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ return -ENAMETOOLONG;
+
+ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
+ YAFFS_MAX_ALIAS_LENGTH)
+ return -ENAMETOOLONG;
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target)
+ inode_dec_link_count(new_dentry->d_inode);
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+
+
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ inode_dec_link_count(dentry->d_inode);
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+};
+
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+
+}
+
+
+/*-----------------------------------------------------------------*/
+
+#ifdef YAFFS_USE_DIR_ITERATE
+static int yaffs_iterate(struct file *f, struct dir_context *dc)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ unsigned long curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ if (!dir_emit_dots(f, dc))
+ return 0;
+
+ curoffs = 1;
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= dc->pos) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (!dir_emit(dc,
+ name,
+ strlen(name),
+ this_inode,
+ this_type)) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ dc->pos++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+#else
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+#endif
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+#ifdef YAFFS_USE_DIR_ITERATE
+ .iterate = yaffs_iterate,
+#else
+ .readdir = yaffs_readdir,
+#endif
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = MAKE_uid(obj->yst_uid);
+ inode->i_gid = MAKE_gid(obj->yst_gid);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+ inode->i_blksize = inode->i_sb->s_blocksize;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+#else
+ inode->i_rdev = obj->yst_rdev;
+ inode->i_atime = obj->yst_atime;
+ inode->i_mtime = obj->yst_mtime;
+ inode->i_ctime = obj->yst_ctime;
+#endif
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ set_nlink(inode, yaffs_get_obj_link_count(obj));
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
+ inode->i_mode, obj->yst_uid, obj->yst_gid,
+ inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+#else
+ init_special_inode(inode, obj->yst_mode,
+ (dev_t) (obj->yst_rdev));
+#endif
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+
+}
+
+
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+
+void yaffs_background_waker(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct timer_list timer;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+#ifdef YAFFS_COMPILE_FREEZER
+ set_freezable();
+#endif
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+#ifdef YAFFS_COMPILE_FREEZER
+ if (try_to_freeze())
+ continue;
+#endif
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+#if 1
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&timer);
+ timer.expires = expires + 1;
+ timer.data = (unsigned long)current;
+ timer.function = yaffs_background_waker;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+#else
+ msleep(10);
+#endif
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+#else
+static int yaffs_bg_thread_fn(void *data)
+{
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ return 0;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+}
+#endif
+
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d",
+ obj->obj_id);
+ yaffs_flush_file(obj, 1, 0, 1);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev, 1);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ kfree(dev);
+
+ yaffs_put_mtd_device(mtd);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+ "yaffs_put_super done");
+}
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return Y_IGET(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = Y_IGET(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+#endif
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+#ifdef YAFFS_HAS_EVICT_INODE
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ Y_CLEAR_INODE(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+}
+#else
+
+/* clear is called to tell the fs to release any per-inode data it holds.
+ * The object might still exist on disk and is just being thrown out of the cache
+ * or else the object has actually been deleted and we're being called via
+ * the chain
+ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
+ */
+
+static void yaffs_clear_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_clear_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+/* delete is called when the link count is zero and the inode
+ * is put (ie. nobody wants to know about it anymore, time to
+ * delete the file).
+ * NB Must call clear_inode()
+ */
+static void yaffs_delete_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_delete_inode: ino %d, count %d %s",
+ (int)inode->i_ino, atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+ truncate_inode_pages(&inode->i_data, 0);
+#endif
+ clear_inode(inode);
+}
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#endif
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+ int dirty = yaffs_check_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ dirty ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (dirty || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ yaffs_clear_super_dirty(dev);
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+
+#ifdef YAFFS_HAS_WRITE_SUPER
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static void yaffs_write_super(struct super_block *sb)
+#else
+static int yaffs_write_super(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super %s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+ return 0;
+#endif
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+#else
+static int yaffs_sync_fs(struct super_block *sb)
+#endif
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+/* the function only is used to change dev->read_only when this file system
+ * is remounted.
+ */
+static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ int read_only = 0;
+ struct mtd_info *mtd;
+ struct yaffs_dev *dev = 0;
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device #%u doesn't appear to exist",
+ MINOR(sb->s_dev));
+ return 1;
+ }
+
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device is not NAND it's type %d",
+ mtd->type);
+ return 1;
+ }
+
+ read_only = ((*flags & MS_RDONLY) != 0);
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only");
+ *flags |= MS_RDONLY;
+ }
+
+ dev = sb->s_fs_info;
+ dev->read_only = read_only;
+
+ return 0;
+}
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+
+#ifndef YAFFS_USE_OWN_IGET
+ .read_inode = yaffs_read_inode,
+#endif
+#ifdef YAFFS_HAS_PUT_INODE
+ .put_inode = yaffs_put_inode,
+#endif
+ .put_super = yaffs_put_super,
+#ifdef YAFFS_HAS_EVICT_INODE
+ .evict_inode = yaffs_evict_inode,
+#else
+ .delete_inode = yaffs_delete_inode,
+ .clear_inode = yaffs_clear_inode,
+#endif
+ .sync_fs = yaffs_sync_fs,
+#ifdef YAFFS_HAS_WRITE_SUPER
+ .write_super = yaffs_write_super,
+#endif
+ .remount_fs = yaffs_remount_fs,
+};
+
+struct yaffs_options {
+ int inband_tags;
+ int tags_9bytes;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+ int disable_summary;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-9bytes")) {
+ options->tags_9bytes = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "disable-summary")) {
+ options->disable_summary = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+
+static struct dentry *yaffs_make_root(struct inode *inode)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+ struct dentry *root = d_alloc_root(inode);
+
+ if (!root)
+ iput(inode);
+
+ return root;
+#else
+ return d_make_root(inode);
+#endif
+}
+
+
+
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ if (!sb) {
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ return NULL;
+ }
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+ sb->s_export_op = &yaffs_export_ops;
+#endif
+
+ if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (IS_ENABLED(CONFIG_YAFFS_9BYTE_TAGS))
+ options.tags_9bytes = 1;
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (IS_ERR(mtd)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: MTD device %u either not valid or unavailable",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+
+ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 &&
+ (!options.inband_tags || options.tags_9bytes) &&
+ WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+
+ if (yaffs_version == 2 &&
+ mtd->oobavail < sizeof(struct yaffs_packed_tags2)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting inband tags");
+ options.inband_tags = 1;
+ }
+
+ err = yaffs_verify_mtd(mtd, yaffs_version, options.inband_tags,
+ options.tags_9bytes);
+ if (err < 0)
+ return NULL;
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only\n"
+ );
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ kfree(dev);
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: Failed trying to allocate struct yaffs_dev."
+ );
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+ sb->s_fs_info = dev;
+#else
+ sb->u.generic_sbp = dev;
+#endif
+
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = options.inband_tags;
+ param->tags_9bytes = options.tags_9bytes;
+
+ param->enable_xattr = 1;
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+ param->defered_dir_update = 1;
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+ param->empty_lost_n_found = 1;
+ param->refresh_period = 500;
+ param->disable_summary = options.disable_summary;
+
+
+#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING
+ param->disable_bad_block_marking = 1;
+#endif
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->is_yaffs2 = 1;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+#else
+ param->total_bytes_per_chunk = mtd->oobblock;
+ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
+#endif
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ param->is_yaffs2 = 0;
+ n_blocks = YCALCBLOCKS(mtd->size,
+ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
+
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ }
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+
+ yaffs_mtd_drv_install(dev);
+
+ param->sb_dirty_fn = yaffs_set_super_dirty;
+ param->gc_control_fn = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+ param->use_nand_ecc = 1;
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ sb->s_maxbytes = yaffs_max_file_size(dev);
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+ root = yaffs_make_root(inode);
+
+ if (!root)
+ return NULL;
+
+ sb->s_root = root;
+ if(!dev->is_checkpointed)
+ yaffs_set_super_dirty(dev);
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+ return sb;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs_mount,
+#else
+ .get_sb = yaffs_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ .mount = yaffs2_mount,
+#else
+ .get_sb = yaffs2_read_super,
+#endif
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs2_read_super(struct super_block *sb,
+ void *data, int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+ FS_REQUIRES_DEV);
+#endif
+
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+ int bs[10];
+
+ yaffs_count_blocks_by_state(dev,bs);
+
+ buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............ %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load.... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "disable_bad_block_mrk %d\n",
+ param->disable_bad_block_marking);
+ buf += sprintf(buf, "refresh_period....... %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks.... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased.. %d\n",
+ param->always_check_erased);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "block count by state\n");
+ buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n",
+ bs[0], bs[1], bs[2], bs[3], bs[4]);
+ buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n",
+ bs[5], bs[6], bs[7], bs[8], bs[9]);
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf += sprintf(buf, "max file size....... %lld\n",
+ (long long) yaffs_max_file_size(dev));
+ buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
+ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
+ buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
+ dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
+ buf += sprintf(buf, "passive_gc_count..... %u\n",
+ dev->passive_gc_count);
+ buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
+ buf += sprintf(buf, "n_retried_writes..... %u\n",
+ dev->n_retried_writes);
+ buf += sprintf(buf, "n_retired_blocks..... %u\n",
+ dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
+ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
+ dev->n_tags_ecc_fixed);
+ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
+ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
+ buf += sprintf(buf, "n_unlinked_files..... %u\n",
+ dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
+ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
+ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf +=
+ sprintf(buf,
+ "Multi-version YAFFS built:" __DATE__ " " __TIME__
+ "\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+/* Debug strings are of the form:
+ * .bnnn print info on block n
+ * .cobjn,chunkn print nand chunk id for objn:chunkn
+ */
+
+static int yaffs_proc_debug_write(struct file *file, const char *buf,
+ unsigned long count)
+{
+
+ char str[100];
+ char *p0;
+ char *p1;
+ long p1_val;
+ long p0_val;
+ char cmd;
+ struct list_head *item;
+
+ memset(str, 0, sizeof(str));
+ memcpy(str, buf, min((size_t)count, sizeof(str) -1));
+
+ cmd = str[1];
+
+ p0 = str + 2;
+
+ p1 = p0;
+
+ while (*p1 && *p1 != ',') {
+ p1++;
+ }
+ *p1 = '\0';
+ p1++;
+
+ p0_val = simple_strtol(p0, NULL, 0);
+ p1_val = simple_strtol(p1, NULL, 0);
+
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (cmd == 'b') {
+ struct yaffs_block_info *bi;
+
+ bi = yaffs_get_block_info(dev,p0_val);
+
+ if(bi) {
+ printk("Block %d: state %d, retire %d, use %d, seq %d\n",
+ (int)p0_val, bi->block_state,
+ bi->needs_retiring, bi->pages_in_use,
+ bi->seq_number);
+ }
+ } else if (cmd == 'c') {
+ struct yaffs_obj *obj;
+ int nand_chunk;
+
+ obj = yaffs_find_by_number(dev, p0_val);
+ if (!obj)
+ printk("No obj %d\n", (int)p0_val);
+ else {
+ if(p1_val == 0)
+ nand_chunk = obj->hdr_chunk;
+ else
+ nand_chunk =
+ yaffs_find_chunk_in_file(obj,
+ p1_val, NULL);
+ printk("Nand chunk for %d:%d is %d\n",
+ (int)p0_val, (int)p1_val, nand_chunk);
+ }
+ }
+ }
+
+ mutex_unlock(&yaffs_context_lock);
+
+ return count;
+}
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *ppos)
+#else
+static ssize_t yaffs_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+#endif
+{
+ if (buf[0] == '.')
+ return yaffs_proc_debug_write(file, buf, count);
+ return yaffs_proc_write_trace_options(file, buf, count);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+
+#ifdef YAFFS_NEW_PROCFS
+static int yaffs_proc_show(struct seq_file *m, void *v)
+{
+ /* FIXME: Unify in a better way? */
+ char buffer[512];
+ char *start;
+ int len;
+
+ len = yaffs_proc_read(buffer, &start, 0, sizeof(buffer), NULL, NULL);
+ seq_puts(m, buffer);
+ return 0;
+}
+
+static int yaffs_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, yaffs_proc_show, NULL);
+}
+
+static struct file_operations procfs_ops = {
+ .owner = THIS_MODULE,
+ .open = yaffs_proc_open,
+ .read = seq_read,
+ .write = yaffs_proc_write,
+};
+
+static int yaffs_procfs_init(void)
+{
+ /* Install the proc_fs entries */
+ my_proc_entry = proc_create("yaffs",
+ S_IRUGO | S_IFREG,
+ YPROC_ROOT,
+ &procfs_ops);
+
+ if (my_proc_entry) {
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+#else
+
+
+static int yaffs_procfs_init(void)
+{
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+#endif
+
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+ mutex_init(&yaffs_context_lock);
+
+ error = yaffs_procfs_init();
+ if (error)
+ return error;
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " removing.");
+
+ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
+MODULE_LICENSE("GPL");
Index: linux-3.18.21/fs/yaffs2/yaffs_yaffs1.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_yaffs1.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int alloc_failed = 0;
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0;
+ !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
+ tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in
+ *the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash
+ * (two data chunks with the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in &&
+ in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE &&
+ in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that
+ * has been reused but not yet deleted,
+ * and worse still it has changed type.
+ * Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate and need to
+ * resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the
+ * exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy
+ * this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.
+ children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, a problem....
+ * We're trying to use a
+ * non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+ in->variant.
+ file_variant.file_size
+ = yaffs_oh_to_size(oh);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ list_add(&in->hard_links,
+ &hard_list);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning,
+ * then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then
+ * treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add
+ * these hardlinks.
+ */
+
+ yaffs_link_fixup(dev, &hard_list);
+
+ /*
+ * Fix up any shadowed objects.
+ * There should not be more than one of these.
+ */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the
+ * shadowed object then setting the object header
+ to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_yaffs1.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_yaffs1.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yaffs_yaffs2.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_yaffs2.c 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,1534 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block &&
+ b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this
+ * one that have discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
+{
+ u32 b;
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
+ dev->n_obj;
+ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks,
+ * so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ /* Write block info */
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
+ if (!ok)
+ return 0;
+
+ /* Write chunk bits */
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+ u32 base_offset;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (!tn->internal[i])
+ continue;
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ return ok;
+ }
+
+ /* Level 0 tnode */
+ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
+ sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+
+ return ok;
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return ok;
+
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn)
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ else
+ ok = 0;
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok &&
+ obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes(obj);
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ LIST_HEAD(hard_list);
+
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ list_add(&obj->hard_links, &hard_list);
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, &hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start
+ * of hole marker.
+ */
+ loff_t old_file_size;
+ loff_t increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+ u8 *local_buffer = NULL;
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ loff_t pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+
+ /* If out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+
+ if (aseq == bseq)
+ return ablock - bblock;
+
+ return aseq - bseq;
+}
+
+static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int blk, int chunk_in_block,
+ int *found_chunks,
+ u8 *chunk_data,
+ struct list_head *hard_list,
+ int summary_available)
+{
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int equiv_id;
+ loff_t file_size;
+ int is_shrink;
+ int is_unlinked;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
+ struct yaffs_file_var *file_var;
+ struct yaffs_hardlink_var *hl_var;
+ struct yaffs_symlink_var *sl_var;
+
+ if (summary_available) {
+ result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
+ tags.seq_number = bi->seq_number;
+ }
+
+ if (!summary_available || tags.obj_id == 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+ dev->tags_used++;
+ } else {
+ dev->summary_used++;
+ }
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing
+ * the erased check. Just skip it so that it can
+ * be deleted.
+ * But, more typically, We get here when this is
+ * an unallocated chunk and his means that
+ * either the block is empty or this is the one
+ * being allocated from
+ */
+
+ if (*found_chunks) {
+ /* This is a chunk that was skipped due
+ * to failing the erased check */
+ } else if (chunk_in_block == 0) {
+ /* We're looking at the first chunk in
+ * the block so the block is unused */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number == bi->seq_number) {
+ /* Allocating from this block*/
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, chunk_in_block);
+
+ bi->block_state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = chunk_in_block;
+ dev->alloc_block_finder = blk;
+ } else {
+ /* This is a partially written block
+ * that is not the current
+ * allocation block.
+ */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected. gc will fix this.",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result ==
+ YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, chunk_in_block);
+ dev->n_free_chunks++;
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
+ (tags.chunk_id > 0 &&
+ tags.n_bytes > dev->data_bytes_per_chunk) ||
+ tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, chunk_in_block, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+ dev->n_free_chunks++;
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ loff_t endpos;
+ loff_t chunk_base = (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk;
+
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in)
+ /* Out of memory */
+ alloc_failed = 1;
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
+ chunk_base < in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by
+ * a resize */
+ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
+ chunk, -1))
+ alloc_failed = 1;
+
+ /* File size is calculated by looking at
+ * the data chunks if we have not
+ * seen an object header yet.
+ * Stop this practice once we find an
+ * object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid &&
+ in->variant.file_variant.scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+ } else if (in) {
+ /* This chunk has been invalidated by a
+ * resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make
+ * the object
+ */
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ tags.extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load) ||
+ tags.extra_shadows ||
+ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we
+ * need to read the chunk
+ * TODO In future we can probably defer
+ * reading the chunk and living with
+ * invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got
+ * corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ return YAFFS_FAIL;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be
+ * discarded, but we first have to suck
+ * out resize info if it is a file.
+ */
+ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extra_available &&
+ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ )) {
+ loff_t this_size = (oh) ?
+ yaffs_oh_to_size(oh) :
+ tags.extra_file_size;
+ u32 parent_obj_id = (oh) ?
+ oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink = (oh) ?
+ oh->is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked
+ * at start also means deleted)
+ * we treat the file size as
+ * being zeroed at this point.
+ */
+ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
+ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink &&
+ in->variant.file_variant.shrink_size >
+ this_size)
+ in->variant.file_variant.shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad type, %d != %d, for object %d at chunk %d during scan",
+ oh ? oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+ in = yaffs_retype_obj(in, oh ? oh->type : tags.extra_obj_type);
+ }
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+ if (oh) {
+ in->variant_type = oh->type;
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj(dev,
+ oh->shadows_obj, 1);
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ parent = yaffs_find_or_create_by_number(dev,
+ oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = yaffs_oh_to_size(oh);
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+ } else {
+ in->variant_type = tags.extra_obj_type;
+ parent = yaffs_find_or_create_by_number(dev,
+ tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_file_size;
+ is_shrink = tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent &&
+ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * Trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir) ||
+ (parent == dev->unlinked_dir);
+
+ if (is_shrink)
+ /* Mark the block */
+ bi->has_shrink_hdr = 1;
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent
+ * object is scanned we put them all in a list.
+ * After scanning is complete, we should have all the
+ * objects, so we run through this list and fix up all
+ * the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ file_var = &in->variant.file_variant;
+ if (file_var->scanned_size < file_size) {
+ /* This covers the case where the file
+ * size is greater than the data held.
+ * This will happen if the file is
+ * resized to be larger than its
+ * current data extents.
+ */
+ file_var->file_size = file_size;
+ file_var->scanned_size = file_size;
+ }
+
+ if (file_var->shrink_size > file_size)
+ file_var->shrink_size = file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ hl_var = &in->variant.hardlink_variant;
+ if (!is_unlinked) {
+ hl_var->equiv_id = equiv_id;
+ list_add(&in->hard_links, hard_list);
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ sl_var = &in->variant.symlink_variant;
+ if (oh) {
+ sl_var->alias =
+ yaffs_clone_str(oh->alias);
+ if (!sl_var->alias)
+ alloc_failed = 1;
+ }
+ break;
+ }
+ }
+ }
+ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+ enum yaffs_block_state state;
+ int c;
+ int deleted;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ u8 *chunk_data;
+ int found_chunks;
+ int alloc_failed = 0;
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+ int summary_available;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, bi->block_state, seq_number);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (bi->block_state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+ n_to_scan++;
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter;
+ !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+ bi = yaffs_get_block_info(dev, blk);
+ deleted = 0;
+
+ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ if (summary_available)
+ c = dev->chunks_per_summary - 1;
+ else
+ c = dev->param.chunks_per_block - 1;
+
+ for (/* c is already initialised */;
+ !alloc_failed && c >= 0 &&
+ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
+ c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+ if (yaffs2_scan_chunk(dev, bi, blk, c,
+ &found_chunks, chunk_data,
+ &hard_list, summary_available) ==
+ YAFFS_FAIL)
+ alloc_failed = 1;
+ }
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning, then the block
+ * is fully allocated. */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, &hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
Index: linux-3.18.21/fs/yaffs2/yaffs_yaffs2.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yaffs_yaffs2.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
Index: linux-3.18.21/fs/yaffs2/yportenv.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/fs/yaffs2/yportenv.h 2018-02-05 13:21:16.000000000 +0800
@@ -0,0 +1,85 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+/*
+ * Define the MTD version in terms of Linux Kernel versions
+ * This allows yaffs to be used independantly of the kernel
+ * as well as with it.
+ */
+
+#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+#ifdef YAFFS_OUT_OF_TREE
+#include "moduleconfig.h"
+#endif
+
+#include <linux/version.h>
+#define MTD_VERSION_CODE LINUX_VERSION_CODE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+/* These type wrappings are used to support Unicode names in WinCE. */
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+#else
+#define Y_CURRENT_TIME CURRENT_TIME
+#define Y_TIME_CONVERT(x) (x)
+#endif
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#define yaffs_printf(msk, fmt, ...) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__)
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if (yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+
+#endif
Index: linux-3.18.21/include/asm-generic/preempt.h
===================================================================
--- linux-3.18.21.orig/include/asm-generic/preempt.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/asm-generic/preempt.h 2018-02-05 13:21:17.000000000 +0800
@@ -74,9 +74,9 @@
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!preempt_count() && tif_need_resched());
+ return unlikely((preempt_count()==preempt_offset) && tif_need_resched());
}
#ifdef CONFIG_PREEMPT
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,188 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_H
+#define __LINUX_ENCT_HOOK_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+/* Responses from hook functions. */
+#define ECNT_MAX_SUBTYPE 8
+
+#define ECNT_REGISTER_FAIL -1
+#define ECNT_REGISTER_SUCCESS 0
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef enum {
+ ECNT_HOOK_ERROR = -1,
+ ECNT_RETURN_DROP= 0, /*free skb before return this type*/
+ ECNT_CONTINUE ,
+ ECNT_RETURN,
+}ecnt_ret_val;
+
+enum ecnt_maintype{
+ ECNT_NET_CORE_DEV,
+ ECNT_NET_VLAN_DEV,
+ ECNT_NET_BR_FDB,
+ ECNT_NET_BR_FORWARD,
+ ECNT_NET_BR_INPUT,
+ ECNT_NET_PPP_GENERIC,
+ ECNT_NET_UDP,
+ ECNT_NET_UDPV6,
+ ECNT_NET_AF,
+ ECNT_NET_SOCK,
+ ECNT_NET_IP_OUTPUT,
+ ECNT_NET_IGMP,
+ ECNT_NF_BR,
+ ECNT_NF_ARP,
+ ECNT_NF_IPV4,
+ ECNT_NF_IPV6,
+ ECNT_NF_TRACK_CORE,
+ ECNT_QDMA_WAN,
+ ECNT_QDMA_LAN,
+ ECNT_FE,
+ ECNT_PPE,
+ ECNT_ATM,
+ ECNT_PTM,
+ ECNT_ETHER_SWITCH,
+ ECNT_ETHER_PHY,
+ ECNT_XPON_MAC,
+ ECNT_XPON_PHY,
+ ECNT_QDMA_7510_20,
+ ECNT_PCIE,
+ ECNT_MULTICAST,
+ ECNT_L2TP,
+ ECNT_TRAFFIC_CLASSIFY,
+ ECNT_SMUX,
+ ECNT_VOIP,
+ ECNT_CRYPTO,
+ ECNT_HOOK_EVENT,
+ ECNT_MULTICAST_GENERAL,
+ ECNT_IRQ_NUM,
+ ECNT_SAR,
+ ECNT_TSO_WAN,
+ ECNT_TSO_LAN,
+ ECNT_SPI_NAND,
+ ECNT_AUTOBENCH_USB,
+ ECNT_AUTOBENCH_DMT,
+ ECNT_NUM_MAINTYPE
+};
+
+struct ecnt_data;
+
+typedef ecnt_ret_val ecnt_hookfn(struct ecnt_data *in_data);
+
+struct net_info_s{
+
+};
+
+struct nf_info_s{
+
+};
+
+struct ecnt_ops_info{
+ unsigned int drop_num;
+ union{
+ struct nf_info_s nf_info;
+ struct net_info_s net_info;
+ };
+};
+struct ecnt_hook_ops {
+ struct list_head list;
+ unsigned int hook_id;
+ struct ecnt_ops_info info;
+ /* User fills in from here down. */
+ const char *name;
+ unsigned int is_execute;
+ ecnt_hookfn *hookfn;
+ unsigned int maintype;
+ unsigned int subtype;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+extern struct list_head ecnt_hooks[ECNT_NUM_MAINTYPE][ECNT_MAX_SUBTYPE];
+
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+extern void ecnt_hook_init(void);
+extern int __ECNT_HOOK(unsigned int maintype, unsigned int subtype,struct ecnt_data *in_data);
+extern int ecnt_register_hook(struct ecnt_hook_ops *reg);
+extern void ecnt_unregister_hook(struct ecnt_hook_ops *reg);
+extern int show_all_ecnt_hookfn(void);
+extern int set_ecnt_hookfn_execute_or_not(unsigned int maintype, unsigned int subtype, unsigned int hook_id, unsigned int is_execute);
+extern int ecnt_ops_unregister(unsigned int maintype, unsigned int subtype, unsigned int hook_id);
+extern int get_ecnt_hookfn(unsigned int maintype, unsigned int subtype);
+extern int ecnt_register_hooks(struct ecnt_hook_ops *reg, unsigned int n);
+extern void ecnt_unregister_hooks(struct ecnt_hook_ops *reg, unsigned int n);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_dmt.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_dmt.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,107 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_AUTOBENCH_DMT_H_
+#define _ECNT_HOOK_AUTOBENCH_DMT_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_autobench_dmt_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int AUTOBENCH_DMT_API_LINKRATE_CHECK(void)
+{
+ struct ECNT_AUTOBENCH_DMT_DATA dmt_data;
+ int ret=0;
+
+ dmt_data.function_id = AUTOBENCH_DMT_FUNCTION_LINKRATE_CHECK;
+
+ ret = __ECNT_HOOK(ECNT_AUTOBENCH_DMT, ECNT_DRIVER_API, (struct ecnt_data *)&dmt_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return dmt_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int AUTOBENCH_DMT_API_INSMOD(void)
+{
+ struct ECNT_AUTOBENCH_DMT_DATA dmt_data;
+ int ret=0;
+
+ dmt_data.function_id = AUTOBENCH_DMT_FUNCTION_INSMOD;
+
+ ret = __ECNT_HOOK(ECNT_AUTOBENCH_DMT, ECNT_DRIVER_API, (struct ecnt_data *)&dmt_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return dmt_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_AUTOBENCH_DMT_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_dmt_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_dmt_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,80 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_AUTOBENCH_DMT_TYPE_H_
+#define _ECNT_HOOK_AUTOBENCH_DMT_TYPE_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+#define DMT_LINK_FAIL 1
+#define DMT_RATE_SNR_FAIL 2
+#define SNR_TIME 30
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ AUTOBENCH_DMT_FUNCTION_INSMOD = 0,
+ AUTOBENCH_DMT_FUNCTION_LINKRATE_CHECK,
+ AUTOBENCH_DMT_FUNCTION_MAX_NUM
+} AUTOBENCH_DMT_HookFunctionID_t ;
+
+struct ECNT_AUTOBENCH_DMT_DATA
+{
+ AUTOBENCH_DMT_HookFunctionID_t function_id;
+ int retValue;
+};
+
+typedef int (*autobench_dmt_op_t)(struct ECNT_AUTOBENCH_DMT_DATA *dmt_data);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_usb.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_usb.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,92 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_AUTOBENCH_USB_H_
+#define _ECNT_HOOK_AUTOBENCH_USB_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_autobench_usb_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int AUTOBENCH_USB_API_FILE_CHECK(void)
+{
+ struct ECNT_AUTOBENCH_USB_DATA usb_data;
+ int ret=0;
+
+ usb_data.function_id = AUTOBENCH_USB_FUNCTION_FILE_CHECK;
+
+ ret = __ECNT_HOOK(ECNT_AUTOBENCH_USB, ECNT_DRIVER_API, (struct ecnt_data *)&usb_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return usb_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_AUTOBENCH_USB_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_usb_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_autobench_usb_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,76 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_AUTOBENCH_USB_TYPE_H_
+#define _ECNT_HOOK_AUTOBENCH_USB_TYPE_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ AUTOBENCH_USB_FUNCTION_FILE_CHECK = 0,
+ AUTOBENCH_USB_FUNCTION_MAX_NUM
+} AUTOBENCH_USB_HookFunctionID_t ;
+
+struct ECNT_AUTOBENCH_USB_DATA
+{
+ AUTOBENCH_USB_HookFunctionID_t function_id;
+ int retValue;
+};
+
+typedef int (*autobench_usb_op_t)(struct ECNT_AUTOBENCH_USB_DATA *usb_data);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_cpu_interrupt_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_cpu_interrupt_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,141 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_CPU_INTERRUPT_TYPE_H_
+#define _ECNT_HOOK_CPU_INTERRUPT_TYPE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define IRQ_STRING_LEN_MAX 16
+
+#define IRQ_NAME_UART "uart"
+#define IRQ_NAME_DRAM_PROTECT "dram_protect"
+#define IRQ_NAME_TIMER0 "timer0"
+#define IRQ_NAME_TIMER1 "timer1"
+#define IRQ_NAME_TIMER2 "timer2"
+#define IRQ_NAME_WATCHDOG "watchdog"
+#define IRQ_NAME_GPIO "gpio"
+#define IRQ_NAME_PCM1 "pcm1"
+#define IRQ_NAME_GDMA "gdma"
+#define IRQ_NAME_GIGA_SWITCH "giga_switch"
+#define IRQ_NAME_UART2 "uart2"
+#define IRQ_NAME_USB "usb"
+#define IRQ_NAME_DYING_GASP "dying_gasp"
+#define IRQ_NAME_DMT "dmt"
+#define IRQ_NAME_QDMA_LAN0 "qdma_lan0"
+#define IRQ_NAME_QDMA_LAN1 "qdma_lan1"
+#define IRQ_NAME_QDMA_LAN2 "qdma_lan2"
+#define IRQ_NAME_QDMA_LAN3 "qdma_lan3"
+#define IRQ_NAME_QDMA_WAN0 "qdma_wan0"
+#define IRQ_NAME_QDMA_WAN1 "qdma_wan1"
+#define IRQ_NAME_QDMA_WAN2 "qdma_wan2"
+#define IRQ_NAME_QDMA_WAN3 "qdma_wan3"
+#define IRQ_NAME_PCIE0 "pcie0"
+#define IRQ_NAME_PCIE1 "pcie1"
+#define IRQ_NAME_PCIE_ERR "pcie_err"
+#define IRQ_NAME_XPON_MAC "xpon_mac"
+#define IRQ_NAME_XPON_PHY "xpon_phy"
+#define IRQ_NAME_CRYPTO "crypto"
+#define IRQ_NAME_PBUS_TIMEOUT "pbus_timeout"
+#define IRQ_NAME_PCM2 "pcm2"
+#define IRQ_NAME_SPI "spi"
+#define IRQ_NAME_USB2 "usb2"
+#define IRQ_NAME_FE_ERR "fe_err"
+#define IRQ_NAME_UART3 "uart3"
+#define IRQ_NAME_UART4 "uart4"
+#define IRQ_NAME_UART5 "uart5"
+#define IRQ_NAME_HSDMA "hsdma"
+#define IRQ_NAME_XSI_MAC "xsi_mac"
+#define IRQ_NAME_XSI_PHY "xsi_phy"
+#define IRQ_NAME_WOE0 "woe0"
+#define IRQ_NAME_WOE1 "woe1"
+#define IRQ_NAME_WDMA0_P0 "wdma0_p0"
+#define IRQ_NAME_WDMA0_P1 "wdma0_p1"
+#define IRQ_NAME_WDMA0_WOE "wdma0_woe"
+#define IRQ_NAME_WDMA1_P0 "wdma1_p0"
+#define IRQ_NAME_WDMA1_P1 "wdma1_p1"
+#define IRQ_NAME_WDMA1_WOE "wdma1_woe"
+#define IRQ_NAME_EFUSE_ERR0 "efuse_err0"
+#define IRQ_NAME_EFUSE_ERR1 "efuse_err1"
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+ typedef enum {
+ CPU_INTERRUPT_GET_IRQNUM = 0,
+ CPU_INTERRUPT_SHOW_INTERRUPTS,
+ CPU_INTERRUPT_CHECK_INTRNAME,
+
+ CPU_INTERRUPT_FUNCTION_MAX_NUM
+ } CPU_Interrupt_HookFunctionID_t ;
+
+
+ struct ecnt_cpu_interrupt_data
+ {
+ CPU_Interrupt_HookFunctionID_t function_id;
+ int retValue;
+ int irqNum;
+ char irqString[IRQ_STRING_LEN_MAX];
+ };
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_CPU_INTERRUPT_TYPE_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_crypto.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_crypto.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,279 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_CRYPTO_H_
+#define _ECNT_HOOK_CRYPTO_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include <net/xfrm.h>
+#include <net/mtk_esp.h>
+#include <linux/skbuff.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+#define DP_CRYPTO_E_0 100
+#define DP_CRYPTO_E_MAX 116 //modify when ipsec max entry index changes
+#define DP_CRYPTO_D_0 200
+#define DP_CRYPTO_D_MAX 216 //modify when ipsec max entry index changes
+
+typedef enum {
+ CRYPTO_FUNCTION_FREE_ADAPTER = 0,
+ CRYPTO_FUNCTION_DO_IPSEC_ESP_OUTPUT,
+ CRYPTO_FUNCTION_DO_IPSEC_ESP_INPUT,
+ CRYPTO_FUNCTION_DO_IPSEC_ESP_INPUT_PT,
+ CRYPTO_FUNCTION_GET_XFRM_HEADER_LEN,
+ CRYPTO_FUNCTION_GET_XFRM_INPUT_RET,
+ CRYPTO_FUNCTION_ESP_OUTPUT_PT_LEARN,
+ CRYPTO_FUNCTION_ESP_PKTPUT_PT_SPEED,
+ CRYPTO_FUNCTION_ESP_PKTPUT_FINISH_PT_SPEED,
+ CRYPTO_FUNCTION_ESP_ADAPTER_PARA_SET,
+ CRYPTO_FUNCTION_MAX_NUM,
+} CRYPTO_HookFunction_t ;
+
+struct ECNT_CRYPTO_Data {
+ CRYPTO_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+
+ struct xfrm_state *x;
+ struct sk_buff *skb;
+ unsigned int xferhr_len;
+ unsigned int adapter_spi;
+ struct{
+ int flag;
+ int entry;
+ } hwnat;
+};
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+static inline int CRYPTO_API_FREE_ADAPTER(unsigned int mainType, unsigned int spi){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+#ifdef MTK_CRYPTO_DRIVER
+ in_data.function_id = CRYPTO_FUNCTION_FREE_ADAPTER;
+ in_data.adapter_spi = spi ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+#endif
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int CRYPTO_API_DO_IPSEC_ESP_OUTPUT(
+ unsigned int mainType,
+ struct xfrm_state *x,
+ struct sk_buff *skb
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+#ifdef MTK_CRYPTO_DRIVER
+ in_data.function_id = CRYPTO_FUNCTION_DO_IPSEC_ESP_OUTPUT;
+ in_data.x = x;
+ in_data.skb = skb;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ if(ret == ECNT_RETURN)
+ return in_data.retValue;
+ else
+ return ECNT_CONTINUE;
+#else
+ return ECNT_CONTINUE;
+#endif
+}
+
+static inline int CRYPTO_API_DO_IPSEC_ESP_INPUT(
+ unsigned int mainType,
+ struct xfrm_state *x,
+ struct sk_buff *skb
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+#ifdef MTK_CRYPTO_DRIVER
+ in_data.function_id = CRYPTO_FUNCTION_DO_IPSEC_ESP_INPUT;
+ in_data.x = x;
+ in_data.skb = skb;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret == ECNT_RETURN)
+ return in_data.retValue;
+ else
+ return ECNT_CONTINUE;
+#else
+ return ECNT_CONTINUE;
+#endif
+}
+
+static inline int CRYPTO_API_DO_IPSEC_ESP_INPUT_PT(
+ unsigned int mainType,
+ struct xfrm_state *x,
+ struct sk_buff *skb
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+
+#if defined(TCSUPPORT_IPSEC_PASSTHROUGH) || defined(MTK_CRYPTO_DRIVER)
+ in_data.function_id = CRYPTO_FUNCTION_DO_IPSEC_ESP_INPUT_PT;
+ in_data.skb = skb;
+ in_data.x = x;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_CONTINUE)
+ return in_data.retValue;
+ else
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int CRYPTO_API_GET_XFRM_HEADER_LEN(
+ unsigned int mainType,
+ struct xfrm_state *x,
+ unsigned int *len_p
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+#ifdef MTK_CRYPTO_DRIVER
+ in_data.function_id = CRYPTO_FUNCTION_GET_XFRM_HEADER_LEN;
+ in_data.x = x;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ *len_p = in_data.xferhr_len;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+#endif
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int CRYPTO_API_GET_XFRM_INPUT_RET(
+ unsigned int mainType,
+ int retVal
+){
+#ifdef MTK_CRYPTO_DRIVER
+ if (retVal != -EINPROGRESS)
+ return ECNT_RETURN_DROP;
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int CRYPTO_API_ESP_OUTPUT_PT_LEARN(
+ unsigned int mainType,
+ struct xfrm_state *x,
+ struct sk_buff *skb
+){
+#if defined(TCSUPPORT_IPSEC_PASSTHROUGH)
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+
+ in_data.function_id = CRYPTO_FUNCTION_ESP_OUTPUT_PT_LEARN;
+ in_data.skb = skb;
+ in_data.x = x;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_CONTINUE)
+ return in_data.retValue;
+ else
+#endif
+ return ECNT_CONTINUE;
+}
+
+
+static inline int CRYPTO_API_ESP_PKTPUT_PT_SPEED(
+ unsigned int mainType,
+ struct sk_buff *skb,
+ int index
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret = ECNT_CONTINUE;
+
+#if defined(TCSUPPORT_IPSEC_PASSTHROUGH)
+ in_data.function_id = CRYPTO_FUNCTION_ESP_PKTPUT_PT_SPEED;
+ in_data.skb = skb;
+ in_data.hwnat.entry = index;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+#endif
+
+ return ret;
+}
+
+static inline int CRYPTO_API_ESP_PKTPUT_FINISH_PT(
+ unsigned int mainType,
+ struct sk_buff *skb,
+ int index
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret = ECNT_CONTINUE;
+
+#if defined(TCSUPPORT_IPSEC_PASSTHROUGH)
+ in_data.function_id = CRYPTO_FUNCTION_ESP_PKTPUT_FINISH_PT_SPEED;
+ in_data.skb = skb;
+ in_data.hwnat.entry = index;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+#endif
+
+ return ret;
+}
+
+static inline int CRYPTO_API_ESP_ADATPDER_SET(
+ unsigned int mainType,
+ struct sk_buff *skb
+){
+ struct ECNT_CRYPTO_Data in_data;
+ int ret=0;
+
+#if defined(TCSUPPORT_IPSEC_PASSTHROUGH)
+ in_data.function_id = CRYPTO_FUNCTION_ESP_ADAPTER_PARA_SET;
+ in_data.skb = skb;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+#endif
+
+ return ECNT_RETURN;
+}
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_ephy.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_ephy.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,288 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_EPHY_H
+#define _ECNT_HOOK_EPHY_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <ecnt_hook/ecnt_hook.h>
+
+#include <asm/tc3162/tc3162.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+
+/* Warning: same sequence with function array 'qdma_operation' */
+typedef enum {
+ EPHY_FUNCTION_MONITOR,
+ EPHY_FUNCTION_POWER_DOWN,
+ EPHY_FUNCTION_PHY_MII_WRITE_TRDBG,
+ EPHY_FUNCTION_MT_MII_REG_WRITE,
+ EPHY_FUNCTION_MT_MII_REG_READ,
+ EPHY_FUNCTION_MT_EMII_REG_WRITE,
+ EPHY_FUNCTION_MT_EMII_REG_READ,
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#if !defined(TCSUPPORT_CPU_EN7516) && !defined(TCSUPPORT_CPU_EN7527)
+ EPHY_FUNCTION_MT7512FE_READ_PROBE,
+ EPHY_FUNCTION_MT7512FE_SNR_SUM,
+ EPHY_FUNCTION_MT7512_FE_READ_ADC_SUM,
+#endif
+#endif
+ EPHY_FUNCTION_SLT_MODE,
+ EPHY_FUNCTION_MAX_NUM,
+} EPHY_HookFunction_t ;
+
+typedef struct ECNT_EPHY_Data {
+ EPHY_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+ char phy_add;
+ union {
+
+ struct{
+ uint8 port_num;
+ uint8 mode;
+ }probe;
+ struct{
+ uint8 port_num;
+ uint16 cnt;
+ }snr;
+ struct{
+ uint8 phyaddr;
+ char *type;
+ uint32 data_addr;
+ uint32 value;
+ uint8 ch_num;
+ }trDbg;
+ struct{
+ uint32 port_num;
+ uint32 dev_num;
+ uint32 reg_num;
+ uint32 reg_data;
+ }mii;
+ struct{
+ uint8 port_num;
+ uint8 reg_num;
+ }miiRead;
+ struct{
+ uint8 port_num;
+ }adc;
+ struct{
+ uint8 port_num;
+ uint16 speed_mode;
+ }slt;
+ } ephy_private;
+}ECNT_EPHY_Data_s;
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int EPHY_API_GET_MONITOR(void){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MONITOR;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int EPHY_API_POWER_DOWN(int phy_add){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_POWER_DOWN;
+ in_data.phy_add = phy_add;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+/* int mtPhyMiiWrite_TrDbg(uint8 phyaddr, char *type, uint32 data_addr ,uint32 value, uint8 ch_num) */
+static inline int EPHY_API_PHY_MII_WRITE_TRDBG(uint8 phyaddr, char *type, uint32 data_addr ,uint32 value, uint8 ch_num){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_PHY_MII_WRITE_TRDBG;
+
+ in_data.ephy_private.trDbg.phyaddr = phyaddr;
+ in_data.ephy_private.trDbg.type = type;
+ in_data.ephy_private.trDbg.data_addr = data_addr;
+ in_data.ephy_private.trDbg.value = value;
+ in_data.ephy_private.trDbg.ch_num = ch_num;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+/* void mtMiiRegWrite(uint32 port_num, uint32 reg_num, uint32 reg_data) */
+static inline int EPHY_API_MT_MII_REG_WRITE(uint32 port_num, uint32 reg_num, uint32 reg_data){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT_MII_REG_WRITE;
+ in_data.ephy_private.mii.port_num = port_num;
+ in_data.ephy_private.mii.reg_num = reg_num;
+ in_data.ephy_private.mii.reg_data = reg_data;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+/* uint32 mtMiiRegRead(uint8 port_num,uint8 reg_num) */
+static inline int EPHY_API_MT_MII_REG_READ(uint8 port_num,uint8 reg_num){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT_MII_REG_READ;
+ in_data.ephy_private.miiRead.port_num = port_num;
+ in_data.ephy_private.miiRead.reg_num = reg_num;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+/* void mtEMiiRegWrite(uint32 port_num, uint32 dev_num, uint32 reg_num, uint32 reg_data) */
+static inline int EPHY_API_MT_EMII_REG_WRITE(uint32 port_num, uint32 dev_num, uint32 reg_num, uint32 reg_data){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT_EMII_REG_WRITE;
+ in_data.ephy_private.mii.port_num = port_num;
+ in_data.ephy_private.mii.dev_num = dev_num;
+ in_data.ephy_private.mii.reg_num= reg_num;
+ in_data.ephy_private.mii.reg_data = reg_data;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+/* uint32 mtEMiiRegRead(uint32 port_num, uint32 dev_num, uint32 reg_num) */
+static inline int EPHY_API_MT_EMII_REG_READ(uint32 port_num, uint32 dev_num, uint32 reg_num){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT_EMII_REG_READ;
+ in_data.ephy_private.mii.port_num = port_num;
+ in_data.ephy_private.mii.dev_num = dev_num;
+ in_data.ephy_private.mii.reg_num = reg_num;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#if !defined(TCSUPPORT_CPU_EN7516) && !defined(TCSUPPORT_CPU_EN7527)
+/* int32 mt7512FEReadProbe(uint8 port_num, uint8 mode) */
+static inline int EPHY_API_MT7512FE_READ_PROBE(uint8 port_num, uint8 mode){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT7512FE_READ_PROBE;
+ in_data.ephy_private.probe.port_num = port_num;
+ in_data.ephy_private.probe.mode = mode;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+/* uint16 mt7512FEReadSnrSum(uint8 port_num, uint16 cnt) */
+static inline int EPHY_API_MT7512FE_SNR_SUM(uint8 port_num, uint16 cnt){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT7512FE_SNR_SUM;
+ in_data.ephy_private.snr.port_num = port_num;
+ in_data.ephy_private.snr.cnt = cnt;
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+
+}
+
+/* int32 mt7512FEReadAdcSum(uint8 port_num); */
+static inline int EPHY_API_MT7512_FE_READ_ADC_SUM(uint8 port_num){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_MT7512_FE_READ_ADC_SUM;
+ in_data.ephy_private.adc.port_num = port_num;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return in_data.retValue;
+}
+
+#endif
+#endif
+/* void ephySLTMode(uint8 port_num, uint16 speed_mode); */
+static inline int EPHY_API_SLT_MODE(uint8 port_num, uint16 speed_mode){
+ ECNT_EPHY_Data_s in_data;
+ int ret=0;
+ in_data.function_id = EPHY_FUNCTION_SLT_MODE;
+ in_data.ephy_private.slt.port_num = port_num;
+ in_data.ephy_private.slt.speed_mode = speed_mode;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_PHY, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+#endif /*_TCETHERPHY_HOOK_H*/
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_ether.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_ether.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,429 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_ETHER_H
+#define _ECNT_HOOK_ETHER_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <modules/eth_global_def.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+#ifdef TCSUPPORT_MAX_PACKET_2000
+#define GDM1_LONG_LEN_VALUE 4004 //(RX_MAX_PKT_LEN -20),RX_MAX_PKT_LEN is defined in femac.c,20 is used for 4*VLAN,
+#else
+#define GDM1_LONG_LEN_VALUE 1700 //(RX_MAX_PKT_LEN-20),RX_MAX_PKT_LEN is defined in femac.c,20 is used for 4*VLAN
+#endif
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+
+/* Warning: same sequence with function array 'qdma_operation' */
+typedef enum {
+ ETHER_FUNCTION_MDIO_READ,
+ ETHER_FUNCTION_MDIO_WRITE,
+ ETHER_FUNCTION_MAC_SNED,
+ ETHER_FUNCTION_GSW_PBUS_READ,
+ ETHER_FUNCTION_GSW_PBUS_WRITE,
+ ETHER_FUNCTION_EXT_GSW_PBUS_READ,
+ ETHER_FUNCTION_EXT_GSW_PBUS_WRITE,
+ ETHER_FUNCTION_SET_RATELIMIT_SWITCH,
+ ETHER_FUNCTION_SET_MACTABLE_SYNC_EN,
+ ETHER_FUNCTION_RGMII_SETTING,
+ ETHER_FUNCTION_RGMII_MODE,
+ ETHER_FUNCTION_SET_PORT_MATRIX,
+ ETHER_FUNCTION_GET_PORT_MAP,
+ ETHER_FUNCTION_SET_PORT_LINKSTATE,
+ ETHER_FUNCTION_SET_PER_VLAN_ACTION,
+ ETHER_FUNCTION_SET_PER_PORT_VLAN_ACTION,
+ ETHER_FUNCTION_MAC_AUTOBENCH_LOOPBACK,
+ ETHER_FUNCTION_MAX_NUM,
+} ETHER_HookFunction_t ;
+
+typedef enum{
+ PORT_LINK_DOWN = 0,
+ PORT_LINK_UP,
+}ETHER_PORT_LINKSTATE;
+
+typedef struct ECNT_ETHER_Data {
+ ETHER_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+
+ union {
+ struct {
+ uint add;
+ uint reg;
+ uint data;
+ } phy;
+ struct sk_buff *skb;
+ struct
+ {
+ u8 wan_type;
+ u8 interface;
+ u8 mode;
+ }traffic_setting;
+ unsigned char ratelimit_En ;
+ unsigned char enable ;
+ struct
+ {
+ u8 portMatrixGroup[4];
+ int type;
+ }matrix_setting;
+ struct
+ {
+ unsigned char lan_port;
+ unsigned char switch_port;
+ }port_map;
+ struct
+ {
+ unsigned char port_no;
+ unsigned char linkstate;
+ }port_state;
+ struct
+ {
+ unsigned char port_id;
+ unsigned int o_vid;
+ unsigned int n_vid;
+ ECNT_SWITCH_VLAN_MODE vlan_mode;
+ unsigned char enable;
+ }vlantable_setting;
+
+ } ether_private;
+}ECNT_ETHER_Data_s;
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int ETHER_MDIO_READ(unsigned int phy_add, unsigned int phy_reg){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_MDIO_READ;
+ in_data.ether_private.phy.add = phy_add;
+ in_data.ether_private.phy.reg = phy_reg;
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.ether_private.phy.data;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_MDIO_WRITE(unsigned int phy_add, unsigned int phy_reg, unsigned int phy_data){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_MDIO_WRITE;
+ in_data.ether_private.phy.add = phy_add;
+ in_data.ether_private.phy.reg = phy_reg;
+ in_data.ether_private.phy.data = phy_data;
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_MAC_SEND(struct sk_buff *skb){
+
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+ in_data.function_id = ETHER_FUNCTION_MAC_SNED;
+ in_data.ether_private.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_GSW_PBUS_READ(unsigned int pbus_addr){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_GSW_PBUS_READ;
+ in_data.ether_private.phy.add = pbus_addr;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.ether_private.phy.data;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_GSW_PBUS_WRITE(unsigned int pbus_addr, unsigned int pbus_data){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_GSW_PBUS_WRITE;
+ in_data.ether_private.phy.add = pbus_addr;
+ in_data.ether_private.phy.data = pbus_data;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_EXT_GSW_PBUS_READ(unsigned int pbus_addr){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_EXT_GSW_PBUS_READ;
+ in_data.ether_private.phy.add = pbus_addr;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.ether_private.phy.data;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_EXT_GSW_PBUS_WRITE(unsigned int pbus_addr, unsigned int pbus_data){
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_EXT_GSW_PBUS_WRITE;
+ in_data.ether_private.phy.add = pbus_addr;
+ in_data.ether_private.phy.data = pbus_data;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_SET_RATELIMIT_SWITCH(unsigned char enable){
+
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+ in_data.function_id = ETHER_FUNCTION_SET_RATELIMIT_SWITCH ;
+ in_data.ether_private.ratelimit_En = enable ;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_SET_MACTABLE_SYNC_EN(unsigned char enable){
+
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+ in_data.function_id = ETHER_FUNCTION_SET_MACTABLE_SYNC_EN ;
+ in_data.ether_private.ratelimit_En = enable ;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_TRGMII_SETTING(unsigned char wan_type, unsigned char interface)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_RGMII_SETTING;
+ in_data.ether_private.traffic_setting.wan_type = wan_type;
+ in_data.ether_private.traffic_setting.interface = interface;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_RGMII_MODE(unsigned char mode)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_RGMII_MODE;
+ in_data.ether_private.traffic_setting.interface = RGMII;
+ in_data.ether_private.traffic_setting.mode = mode;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_SET_PORTMATRIX(unsigned char group[4], int type)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+ int i = 0;
+
+ in_data.function_id = ETHER_FUNCTION_SET_PORT_MATRIX;
+ for(i = 0;i < 4;i++){
+ in_data.ether_private.matrix_setting.portMatrixGroup[i] = group[i];
+ }
+ in_data.ether_private.matrix_setting.type = type;
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_GET_PORTMAP(unsigned char lan_port,unsigned char* switch_port)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_GET_PORT_MAP;
+ in_data.ether_private.port_map.lan_port = lan_port;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ *switch_port = in_data.ether_private.port_map.switch_port;
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_SET_PORT_LINKSTATE(unsigned char port_no,unsigned char linkstate)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_SET_PORT_LINKSTATE;
+ in_data.ether_private.port_state.port_no = port_no;
+ in_data.ether_private.port_state.linkstate = linkstate;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_PER_VLAN_ACTION(unsigned char port_id, unsigned int o_vid, unsigned int n_vid, ECNT_SWITCH_VLAN_MODE vlan_mode, unsigned char enable)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_SET_PER_VLAN_ACTION;
+ in_data.ether_private.vlantable_setting.port_id = port_id;
+ in_data.ether_private.vlantable_setting.o_vid = o_vid;
+ in_data.ether_private.vlantable_setting.n_vid = n_vid;
+ in_data.ether_private.vlantable_setting.vlan_mode = vlan_mode;
+ in_data.ether_private.vlantable_setting.enable = enable;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_API_PER_PORT_VLAN_ACTION(unsigned char port_id, unsigned int n_vid, ECNT_SWITCH_VLAN_MODE vlan_mode, unsigned char enable)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_SET_PER_PORT_VLAN_ACTION;
+ in_data.ether_private.vlantable_setting.port_id = port_id;
+ in_data.ether_private.vlantable_setting.n_vid = n_vid;
+ in_data.ether_private.vlantable_setting.vlan_mode = vlan_mode;
+ in_data.ether_private.vlantable_setting.enable = enable;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int ETHER_MAC_AUTOBENCH_LOOPBACK(void)
+{
+ ECNT_ETHER_Data_s in_data;
+ int ret=0;
+
+ in_data.function_id = ETHER_FUNCTION_MAC_AUTOBENCH_LOOPBACK;
+
+ ret = __ECNT_HOOK(ECNT_ETHER_SWITCH, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+#endif /*_ETHER_HOOK_H*/
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_event.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_event.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,85 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_EVENT_H
+#define _ECNT_HOOK_EVENT_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef enum {
+ ECNT_HOOK_EVENT_SUB,
+ ECNT_HOOK_EVENT_MAX,
+}Ecnt_Event_SubType_t;
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int ECNT_EVENT_BROADCAST(struct sk_buff *skb){
+ int ret;
+
+ ret = __ECNT_HOOK(ECNT_HOOK_EVENT, ECNT_HOOK_EVENT_SUB, (struct ecnt_data *)skb);
+ if(ret == ECNT_HOOK_ERROR)
+ return ECNT_HOOK_ERROR;
+ return ECNT_CONTINUE;
+}
+
+#endif /*_ECNT_HOOK_EVENT_H*/
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_fe.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_fe.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,1108 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_FE_H_
+#define _ECNT_HOOK_FE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_fe_type.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int FE_API_SET_PACKET_LENGTH(FE_Gdma_Sel_t _gdm_sel, uint _length_long, uint _length_short) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_PACKET_LENGTH;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.pkt_len.length_long = _length_long;
+ in_data.api_data.pkt_len.length_short = _length_short;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CHANNEL_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, unchar _channel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CHANNEL_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ in_data.channel = _channel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_MAC_ADDR(FE_Gdma_Sel_t _gdm_sel, unchar *_mac, ushort _mask) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_MAC_ADDR;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.mac_addr.mac = _mac;
+ in_data.api_data.mac_addr.mask = _mask;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_MAC_ADDR_7516(FE_MacSet_Sel_t _macSet_sel, unsigned int _mac_h, unsigned int _mac_lmin,unsigned int _mac_lmax) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_MAC_ADDR_7516;
+ in_data.macSet_sel = _macSet_sel;
+ in_data.api_data.mac_addr_7516.mac_h = _mac_h;
+ in_data.api_data.mac_addr_7516.mac_lmin = _mac_lmin;
+ in_data.api_data.mac_addr_7516.mac_lmax = _mac_lmax;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_WAN_PORT_7516(FE_Enable_t wan1_en, FE_WanPort_Sel_t wan1_port, FE_WanPort_Sel_t wan0_port) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_WAN_PORT_7516;
+ in_data.api_data.wan_port_7516.wan1_en = wan1_en;
+ in_data.api_data.wan_port_7516.wan1_port = wan1_port;
+ in_data.api_data.wan_port_7516.wan0_port = wan0_port;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_HWFWD_CHANNEL(FE_Cdma_Sel_t _cdm_sel, unchar _channel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_HWFWD_CHANNEL;
+ in_data.cdm_sel = _cdm_sel;
+ in_data.channel = _channel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CHANNEL_RETIRE(FE_Gdma_Sel_t _gdm_sel, unchar _channel, FE_Linkup_t _mode) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CHANNEL_RETIRE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.channel = _channel;
+ in_data.api_data.link_mode = _mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CRC_STRIP(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CRC_STRIP;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_PADDING(FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_PADDING;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_EXT_TPID(uint _tpid) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_EXT_TPID;
+ in_data.reg_val = _tpid;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_EXT_TPID(uint *_tpid) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_EXT_TPID;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_tpid = in_data.reg_val;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_FW_CFG(FE_Gdma_Sel_t _gdm_sel, FE_Frame_type_t _dp_sel, FE_Frame_dp_t _dp_type) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_FW_CFG;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.fw_cfg.dp_sel = _dp_sel;
+ in_data.api_data.fw_cfg.dp_val = _dp_type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_FW_CFG(FE_Gdma_Sel_t _gdm_sel, uint* pval) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_FW_CFG;
+ in_data.gdm_sel = _gdm_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *pval = in_data.reg_val;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_DROP_UDP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_UDP_CHKSUM_ERR_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_DROP_TCP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_TCP_CHKSUM_ERR_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+static inline int FE_API_SET_DROP_IP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_IP_CHKSUM_ERR_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_DROP_CRC_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_CRC_ERR_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_DROP_RUNT_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_RUNT_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_DROP_LONG_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_DROP_LONG_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_VLAN_CHECK(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_VLAN_CHECK;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_OK_CNT(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_OK_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_RX_CRC_ERR_CNT(uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_RX_CRC_ERR_CNT;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_RX_DROP_FIFO_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_RX_DROP_FIFO_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+static inline int FE_API_GET_RX_DROP_ERR_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_RX_DROP_ERR_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+static inline int FE_API_GET_OK_BYTE_CNT(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_OK_BYTE_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_TX_GET_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_TX_GET_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_TX_DROP_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_TX_DROP_CNT;
+ in_data.gdm_sel = _gdm_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_TIEM_STAMP(uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_TIEM_STAMP;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_TIME_STAMP(uint ts) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_TIEM_STAMP;
+ in_data.timeStamp = ts & 0xffff;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_INS_VLAN_TPID(FE_Gdma_Sel_t _gdm_sel, uint _tpid) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_INS_VLAN_TPID;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.reg_val = _tpid;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+static inline int FE_API_SET_VLAN_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_VLAN_ENABLE;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_BLACK_LIST(FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_BLACK_LIST;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_ETHER_TYEP(uint _index, FE_Enable_t _enable, FE_PPPOE_t _is_pppoe, uint _value) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_ETHER_TYEP;
+ in_data.index = _index;
+ in_data.api_data.eth_cfg.enable = _enable;
+ in_data.api_data.eth_cfg.is_pppoe = _is_pppoe;
+ in_data.api_data.eth_cfg.value = _value;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_L2U_KEY(uint _index, FE_L2U_KEY_t _key_sel, uint _key0, uint _key1) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_L2U_KEY;
+ in_data.index = _index;
+ in_data.api_data.l2u_key.key_sel = _key_sel;
+ in_data.api_data.l2u_key.key0 = _key0;
+ in_data.api_data.l2u_key.key1 = _key1;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_AC_GROUP_PKT_CNT(uint _index, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_AC_GROUP_PKT_CNT;
+ in_data.index = _index;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_AC_GROUP_BYTE_CNT(uint _index, uint *_cnt, uint *_cnt_hi) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_AC_GROUP_BYTE_CNT;
+ in_data.index = _index;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ *_cnt_hi = in_data.cnt_hi;
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_METER_GROUP(uint _index, uint _value) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_METER_GROUP;
+ in_data.index = _index;
+ in_data.reg_val = _value;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_GDM_PCP_CODING(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, FE_PcpMode_t mode) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_GDM_PCP_CODING;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ in_data.api_data.coding_mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CDM_PCP_CODING(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, FE_PcpMode_t mode) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CDM_PCP_CODING;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ in_data.api_data.coding_mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_VIP_ENABLE(uint _index, FE_Enable_t _enable, FE_Patn_type _patten_type, uint _patten) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_VIP_ENABLE;
+ in_data.index = _index;
+ in_data.api_data.vip_cfg.enable = _enable;
+ in_data.api_data.vip_cfg.patten_type = _patten_type;
+ in_data.api_data.vip_cfg.patten = _patten;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_ETH_RX_CNT(FE_RxCnt_t *_rxCnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_ETH_RX_CNT;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_rxCnt = in_data.api_data.FE_RxCnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_ETH_TX_CNT(FE_TxCnt_t *_txCnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_ETH_TX_CNT;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_txCnt = in_data.api_data.FE_TxCnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_ETH_FRAME_CNT(FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_ETH_FRAME_CNT;
+ in_data.txrx_sel = _txrx_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_ETH_ERR_CNT(FE_Err_type_t _type, uint *_cnt) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_ETH_ERR_CNT;
+ in_data.err_type = _type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_cnt = in_data.cnt;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CLEAR_MIB(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CLEAR_MIB;
+ in_data.gdm_sel = _gdm_sel;
+ in_data.txrx_sel = _txrx_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CDM_RX_RED_DROP(FE_Cdma_Sel_t _cdm_sel, FE_RedDropQ_Sel_t _dropQ_sel, FE_RedDropMode_Sel_t _dropMode_sel) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CDM_RX_RED_DROP;
+ in_data.cdm_sel = _cdm_sel;
+ in_data.dropQ_sel = _dropQ_sel;
+ in_data.dropMode_sel = _dropMode_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_GET_CDM_RX_RED_DROP(FE_Cdma_Sel_t _cdm_sel, FE_RedDropQ_Sel_t _dropQ_sel, FE_RedDropMode_Sel_t *_dropMode_sel_p) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_GET_CDM_RX_RED_DROP;
+ in_data.cdm_sel = _cdm_sel;
+ in_data.dropQ_sel = _dropQ_sel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ *_dropMode_sel_p = in_data.dropMode_sel;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CHANNEL_RETIRE_ALL(FE_Gdma_Sel_t gdm_sel, unchar channel) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CHANNEL_RETIRE_ALL;
+ in_data.gdm_sel = gdm_sel;
+ in_data.channel = channel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_CHANNEL_RETIRE_ONE(FE_Gdma_Sel_t gdm_sel, unchar channel) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_CHANNEL_RETIRE_ONE;
+ in_data.gdm_sel = gdm_sel;
+ in_data.channel = channel;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_TX_RATE(uint rate,uint mode,uint maxBkSzie,uint tick)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_TX_RATE;
+ in_data.api_data.rate_cfg.rate = rate;
+ in_data.api_data.rate_cfg.mode = mode;
+ in_data.api_data.rate_cfg.maxBkSize = maxBkSzie;
+ in_data.api_data.rate_cfg.tick = tick;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_RXUC_RATE(uint rate,uint mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_RXUC_RATE;
+ in_data.api_data.rate_cfg.rate = rate;
+ in_data.api_data.rate_cfg.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_RXBC_RATE(uint rate,uint mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_RXBC_RATE;
+ in_data.api_data.rate_cfg.rate = rate;
+ in_data.api_data.rate_cfg.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_RXMC_RATE(uint rate,uint mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_RXMC_RATE;
+ in_data.api_data.rate_cfg.rate = rate;
+ in_data.api_data.rate_cfg.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_RXOC_RATE(uint rate,uint mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_RXOC_RATE;
+ in_data.api_data.rate_cfg.rate = rate;
+ in_data.api_data.rate_cfg.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_VIP_ETHER(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_VIP_ETHER;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_VIP_PPP(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_VIP_PPP;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_VIP_IP(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_VIP_IP;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_VIP_TCP(ushort src,ushort dst,unchar mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_VIP_TCP;
+ in_data.api_data.vip.src = src;
+ in_data.api_data.vip.dst = dst;
+ in_data.api_data.vip.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_VIP_UDP(ushort src,ushort dst)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_VIP_UDP;
+ in_data.api_data.vip.src = src;
+ in_data.api_data.vip.dst = dst;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+static inline int FE_API_DEL_VIP_ETHER(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_VIP_ETHER;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_VIP_PPP(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_VIP_PPP;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_VIP_IP(ushort type)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_VIP_IP;
+ in_data.api_data.vip.type = type;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_VIP_TCP(ushort src,ushort dst,unchar mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_VIP_TCP;
+ in_data.api_data.vip.src = src;
+ in_data.api_data.vip.dst = dst;
+ in_data.api_data.vip.mode = mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_VIP_UDP(ushort src,ushort dst)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_VIP_UDP;
+ in_data.api_data.vip.src = src;
+ in_data.api_data.vip.dst = dst;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+
+static inline int FE_API_ADD_L2LU_VLAN_DSCP(ushort svlan,ushort cvlan,unchar dscp,unchar mask)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_L2LU_VLAN_DSCP;
+ in_data.api_data.l2lu.svlan = svlan;
+ in_data.api_data.l2lu.cvlan = cvlan;
+ in_data.api_data.l2lu.dscp = dscp;
+ in_data.api_data.l2lu.mask = mask;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_ADD_L2LU_VLAN_TRFC(ushort svlan,ushort cvlan,unchar trfc,unchar mask)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_ADD_L2LU_VLAN_TRFC;
+ in_data.api_data.l2lu.svlan = svlan;
+ in_data.api_data.l2lu.cvlan = cvlan;
+ in_data.api_data.l2lu.dscp = trfc;
+ in_data.api_data.l2lu.mask = mask;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_L2LU_VLAN_DSCP(ushort svlan,ushort cvlan,unchar dscp,unchar mask)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_L2LU_VLAN_DSCP;
+ in_data.api_data.l2lu.svlan = svlan;
+ in_data.api_data.l2lu.cvlan = cvlan;
+ in_data.api_data.l2lu.dscp = dscp;
+ in_data.api_data.l2lu.mask = mask;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DEL_L2LU_VLAN_TRFC(ushort svlan,ushort cvlan,unchar trfc,unchar mask)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DEL_L2LU_VLAN_TRFC;
+ in_data.api_data.l2lu.svlan = svlan;
+ in_data.api_data.l2lu.cvlan = cvlan;
+ in_data.api_data.l2lu.dscp = trfc;
+ in_data.api_data.l2lu.mask = mask;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_SET_TX_FAVOR_OAM_ENABLE(FE_Enable_t _enable) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_SET_TX_FAVOR_OAM_ENABLE;
+ in_data.api_data.enable = _enable;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_TLS_FORWARD(void* skb,FE_Tls_forward_direction_t dir) {
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_TLS_FORWARD;
+ in_data.api_data.tls_forward.skb = skb;
+ in_data.api_data.tls_forward.dir = dir;
+
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int FE_API_DO_FE_RESET(FE_Reset_mode_t reset_mode)
+{
+ struct ecnt_fe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = FE_DO_RESET;
+ in_data.api_data.reset_mode = reset_mode;
+ ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+#endif /* _ECNT_HOOK_FE_H_ */
+
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_fe_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_fe_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,433 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_FE_TYPE_H_
+#define _ECNT_HOOK_FE_TYPE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#define FE_WAN_RX_UC_RATE 0
+#define FE_WAN_RX_BC_RATE 1
+#define FE_WAN_RX_MC_RATE 2
+#define FE_WAN_RX_OC_RATE 3
+
+#define CDM_TX_ENCODING_SHIFT (0)
+#define CDM_TX_ENCODING_MASK (0xF<<CDM_TX_ENCODING_SHIFT)
+#define CDM_RX_DECODING_SHIFT (4)
+#define CDM_RX_DECODING_MASK (0xF<<CDM_RX_DECODING_SHIFT)
+#define GDM_TX_ENCODING_SHIFT (8)
+#define GDM_TX_ENCODING_MASK (0xF<<GDM_TX_ENCODING_SHIFT)
+#define GDM_RX_DECODING_SHIFT (12)
+#define GDM_RX_DECODING_MASK (0xF<<GDM_RX_DECODING_SHIFT)
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ ECNT_FE_API,
+}FE_Api_SubType_t;
+
+typedef enum {
+ FE_GDM_SEL_GDMA1 = 0 ,
+ FE_GDM_SEL_GDMA2
+} FE_Gdma_Sel_t;
+
+typedef enum {
+ FE_MAC_SET_LAN = 0 ,
+ FE_MAC_SET_WAN
+} FE_MacSet_Sel_t;
+
+typedef enum {
+ FE_WANPORT_PSE_P2 = 2 ,
+ FE_WANPORT_GSW_P0 = 8,
+ FE_WANPORT_GSW_P1,
+ FE_WANPORT_GSW_P2,
+ FE_WANPORT_GSW_P3,
+ FE_WANPORT_GSW_P4,
+ FE_WANPORT_GSW_P5
+} FE_WanPort_Sel_t;
+
+typedef enum {
+ FE_CDM_SEL_CDMA1 = 0 ,
+ FE_CDM_SEL_CDMA2
+} FE_Cdma_Sel_t;
+
+typedef enum {
+ FE_RED_DROP_Q0 = 0 ,
+ FE_RED_DROP_Q1L,
+ FE_RED_DROP_Q1H
+} FE_RedDropQ_Sel_t;
+
+typedef enum {
+ FE_RATE_OR_FULL_DROP = 0 ,
+ FE_RATE_OR_THRL_DROP,
+ FE_RATE_AND_FULL_DROP,
+ FE_RATE_AND_THRL_DROP
+} FE_RedDropMode_Sel_t;
+
+typedef enum {
+ FE_GDM_SEL_TX = 0 ,
+ FE_GDM_SEL_RX
+} FE_TXRX_Sel_t;
+
+typedef enum {
+ FE_DISABLE = 0 ,
+ FE_ENABLE
+} FE_Enable_t;
+
+typedef enum {
+ FE_NOT_PPPOE = 0 ,
+ FE_IS_PPPOE
+} FE_PPPOE_t;
+
+typedef enum {
+ ETH_TYPE = 0 ,
+ PPPOE_TYPE,
+ IP_TYPE,
+ TCP_TYPE,
+ UDP_TYPE,
+} FE_Patn_type;
+
+typedef enum {
+ FE_KEY0 = 0 ,
+ FE_KEY1
+} FE_L2U_KEY_t;
+
+ typedef enum {
+ FE_LINKUP = 0,
+ FE_LINKDOWN
+ } FE_Linkup_t;
+
+typedef enum {
+ FE_PCP_MODE_DISABLE = 0,
+ FE_PCP_MODE_8B0D = 1,
+ FE_PCP_MODE_7B1D = 2,
+ FE_PCP_MODE_6B2D = 4,
+ FE_PCP_MODE_5B3D = 8
+} FE_PcpMode_t;
+
+typedef enum {
+ FE_DP_QDMA1_CPU = 0,
+ FE_DP_GDM1,
+ FE_DP_GDM2,
+ FE_DP_QDMA1_HWF,
+ FE_DP_PPE,
+ FE_DP_QDMA2_CPU,
+ FE_DP_QDMA2_HWF,
+ FE_DP_DISCARD
+} FE_Frame_dp_t;
+
+typedef enum {
+ FE_TYPE_OC = 0,
+ FE_TYPE_MC,
+ FE_TYPE_BC,
+ FE_TYPE_UC,
+} FE_Frame_type_t ;
+
+typedef enum {
+ FE_ERR_CRC = 0,
+ FE_ERR_LONG,
+ FE_ERR_RUNT,
+} FE_Err_type_t ;
+
+typedef enum{
+ FE_ADD_TLS_PORT=0,
+ FE_DEL_TLS_PORT,
+ FE_SET_TLS_VLAN,
+ FE_UNSET_TLS_VLAN,
+}FE_Tls_cmd_t;
+
+typedef enum{
+ FE_ADD_TLS_ENTRY=0,
+ FE_DEL_TLS_ENTRY,
+ FE_SHOW_TLS_ENTRY,
+}FE_Tls_Trunk_Mode_Cmd_t;
+
+typedef enum{
+ FE_TLS_FORWARD_UPSTREAM=0,
+ FE_TLS_FORWARD_DOWNSTREAM,
+}FE_Tls_forward_direction_t;
+
+typedef struct
+{
+ unsigned char uni_port_id;/*0~3*/
+ unsigned short uni_vlan;/*2~4094,4095 means not care*/
+ unsigned short gem_port;/*gemport id*/
+ unsigned short ani_vlan;/*2~4094,4095 means not care*/
+}FE_Tls_info_t;
+
+typedef enum{
+ FE_SCU_RESET = 0,
+ FE_CORE_RESET,
+}FE_Reset_mode_t;
+
+typedef enum {
+ FE_SET_PACKET_LENGTH = 0,
+ FE_SET_CHANNEL_ENABLE,
+ FE_SET_MAC_ADDR,
+ FE_SET_HWFWD_CHANNEL,
+ FE_SET_CHANNEL_RETIRE,
+ FE_SET_CRC_STRIP,
+ FE_SET_PADDING,
+ FE_GET_EXT_TPID,
+ FE_SET_EXT_TPID,
+ FE_GET_FW_CFG,
+ FE_SET_FW_CFG,
+ FE_SET_DROP_UDP_CHKSUM_ERR_ENABLE,
+ FE_SET_DROP_TCP_CHKSUM_ERR_ENABLE,
+ FE_SET_DROP_IP_CHKSUM_ERR_ENABLE,
+ FE_SET_DROP_CRC_ERR_ENABLE,
+ FE_SET_DROP_RUNT_ENABLE,
+ FE_SET_DROP_LONG_ENABLE,
+ FE_SET_VLAN_CHECK,
+ FE_GET_OK_CNT,
+ FE_GET_RX_CRC_ERR_CNT,
+ FE_GET_RX_DROP_FIFO_CNT,
+ FE_GET_RX_DROP_ERR_CNT,
+ FE_GET_OK_BYTE_CNT,
+ FE_GET_TX_GET_CNT,
+ FE_GET_TX_DROP_CNT,
+ FE_GET_TIEM_STAMP,
+ FE_SET_TIEM_STAMP,
+ FE_SET_INS_VLAN_TPID,
+ FE_SET_VLAN_ENABLE,
+ FE_SET_BLACK_LIST,
+ FE_SET_ETHER_TYEP,
+ FE_SET_L2U_KEY,
+ FE_GET_AC_GROUP_PKT_CNT,
+ FE_GET_AC_GROUP_BYTE_CNT,
+ FE_SET_METER_GROUP,
+ FE_GET_METER_GROUP,
+ FE_SET_GDM_PCP_CODING,
+ FE_SET_CDM_PCP_CODING,
+ FE_SET_VIP_ENABLE,
+ FE_GET_ETH_RX_CNT,
+ FE_GET_ETH_TX_CNT,
+ FE_GET_ETH_FRAME_CNT,
+ FE_GET_ETH_ERR_CNT,
+ FE_SET_CLEAR_MIB,
+ FE_SET_CDM_RX_RED_DROP,
+ FE_GET_CDM_RX_RED_DROP,
+ FE_SET_CHANNEL_RETIRE_ALL,
+ FE_SET_CHANNEL_RETIRE_ONE,
+ FE_SET_TX_RATE,
+ FE_SET_RXUC_RATE,
+ FE_SET_RXBC_RATE,
+ FE_SET_RXMC_RATE,
+ FE_SET_RXOC_RATE,
+ FE_ADD_VIP_ETHER,
+ FE_ADD_VIP_PPP,
+ FE_ADD_VIP_IP,
+ FE_ADD_VIP_TCP,
+ FE_ADD_VIP_UDP,
+ FE_DEL_VIP_ETHER,
+ FE_DEL_VIP_PPP,
+ FE_DEL_VIP_IP,
+ FE_DEL_VIP_TCP,
+ FE_DEL_VIP_UDP,
+ FE_ADD_L2LU_VLAN_DSCP,
+ FE_ADD_L2LU_VLAN_TRFC,
+ FE_DEL_L2LU_VLAN_DSCP,
+ FE_DEL_L2LU_VLAN_TRFC,
+ FE_SET_TX_FAVOR_OAM_ENABLE,
+ FE_SET_TLS_CFG,
+ FE_TLS_FORWARD,
+ FE_DO_RESET,
+ FE_SET_MAC_ADDR_7516,
+ FE_SET_WAN_PORT_7516,
+ FE_FUNCTION_MAX_NUM,
+} FE_HookFunctionID_t ;
+
+typedef struct FE_TxCnt_s{
+ uint txFrameCnt;
+ uint txFrameLen;
+ uint txDropCnt;
+ uint txBroadcastCnt;
+ uint txMulticastCnt;
+ uint txLess64Cnt;
+ uint txMore1518Cnt;
+ uint txEq64Cnt;
+ uint txFrom65To127Cnt;
+ uint txFrom128To255Cnt;
+ uint txFrom256To511Cnt;
+ uint txFrom512To1023Cnt;
+ uint txFrom1024To1518Cnt;
+}FE_TxCnt_t;
+
+typedef struct FE_RxCnt_s{
+ uint rxFrameCnt;
+ uint rxFrameLen;
+ uint rxDropCnt;
+ uint rxBroadcastCnt;
+ uint rxMulticastCnt;
+ uint rxCrcCnt;
+ uint rxFragFameCnt;
+ uint rxJabberFameCnt;
+ uint rxLess64Cnt;
+ uint rxMore1518Cnt;
+ uint rxEq64Cnt;
+ uint rxFrom65To127Cnt;
+ uint rxFrom128To255Cnt;
+ uint rxFrom256To511Cnt;
+ uint rxFrom512To1023Cnt;
+ uint rxFrom1024To1518Cnt;
+}FE_RxCnt_t;
+
+struct ecnt_fe_data {
+ FE_HookFunctionID_t function_id;
+ FE_Gdma_Sel_t gdm_sel;
+ FE_MacSet_Sel_t macSet_sel;
+ FE_Cdma_Sel_t cdm_sel;
+ FE_RedDropQ_Sel_t dropQ_sel;
+ FE_RedDropMode_Sel_t dropMode_sel;
+ FE_TXRX_Sel_t txrx_sel;
+ FE_Err_type_t err_type;
+ uint channel;
+ uint index;
+ uint reg_val;
+ uint cnt;
+ uint cnt_hi;
+ uint timeStamp;
+ union {
+ FE_Enable_t enable;
+ FE_Linkup_t link_mode;
+ FE_PcpMode_t coding_mode;
+ uint meter_rate;
+ FE_Reset_mode_t reset_mode;
+ struct {
+ uint byteCnt_L;
+ uint byteCnt_H;
+ }byteCnt;
+ struct {
+ uint length_long;
+ uint length_short;
+ } pkt_len;
+ struct {
+ unchar *mac;
+ ushort mask;
+ } mac_addr;
+ struct {
+ uint mac_h;
+ uint mac_lmin;
+ uint mac_lmax;
+ } mac_addr_7516;
+ struct {
+ FE_Enable_t wan1_en;
+ FE_WanPort_Sel_t wan1_port;
+ FE_WanPort_Sel_t wan0_port;
+ } wan_port_7516;
+ struct {
+ FE_Frame_type_t dp_sel;
+ FE_Frame_dp_t dp_val;
+ } fw_cfg;
+ struct {
+ FE_Enable_t enable;
+ FE_PPPOE_t is_pppoe;
+ uint value;
+ } eth_cfg;
+ struct {
+ FE_L2U_KEY_t key_sel;
+ uint key0;
+ uint key1;
+ } l2u_key;
+ struct {
+ FE_Enable_t enable;
+ FE_Patn_type patten_type;
+ uint patten;
+ } vip_cfg;
+ struct {
+ uint rate;
+ uint mode;
+ uint maxBkSize;
+ uint tick;
+ }rate_cfg;
+ struct {
+ ushort type;
+ ushort src;
+ ushort dst;
+ unchar mode;
+ }vip;
+ struct {
+ unchar mask;
+ unchar dscp;
+ ushort svlan;
+ ushort cvlan;
+ }l2lu;
+ struct{
+ unchar cmd;
+ unchar tls_port;
+ ushort tls_vlan;
+ FE_Tls_info_t tls_info;
+ }tls_cfg;
+ FE_RxCnt_t FE_RxCnt;
+ FE_TxCnt_t FE_TxCnt;
+ struct{
+ void* skb;
+ FE_Tls_forward_direction_t dir;
+ }tls_forward;
+ } api_data;
+ int retValue;
+};
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_QDMA_TYPE_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_irq_num.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_irq_num.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,88 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_IRQ_NUM_H_
+#define _ECNT_HOOK_IRQ_NUM_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+typedef enum {
+ IRQ_NUM_FUNCTION_GET_DMT = 0,
+ IRQ_NUM_FUNCTION_GET_PCM1,
+ IRQ_NUM_FUNCTION_GET_PCM2,
+
+ IRQ_NUM_FUNCTION_MAX_NUM ,
+} IRQ_NUM_HookFunction_t ;
+
+struct ECNT_IRQ_NUM_Data {
+ IRQ_NUM_HookFunction_t function_id; /* need put at first item */
+
+ int irqNum;
+};
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+static inline int IRQ_NUM_API_GET(unsigned int mainType, IRQ_NUM_HookFunction_t function_id, unsigned int *irqNum_p){
+ struct ECNT_IRQ_NUM_Data in_data;
+ int ret=0;
+
+ in_data.function_id = function_id;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ *irqNum_p = in_data.irqNum;
+
+ return ECNT_RETURN;
+}
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_l2tp.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_l2tp.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,124 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_L2TP_H
+#define _ECNT_HOOK_L2TP_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <ecnt_hook/ecnt_hook.h>
+
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#ifndef uint8
+#define uint8 unsigned char
+#endif
+
+#ifndef uint16
+#define uint16 unsigned short
+#endif
+
+#ifndef uint32
+#define uint32 unsigned long
+#endif
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+enum ECNT_L2TP_SUBTYPE
+{
+ ECNT_L2TP_RX_HOOK,
+};
+
+typedef struct
+{
+ struct sk_buff *skb;
+}l2tp_data_s;
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+
+/*___________________________________________________________________________
+** function name: ecnt_l2tp_api_rx_handle
+** descriptions:
+** handle l2tp pkt comes from half hwnat
+** input parameters:
+** skb: data flow
+** output parameters:
+** N/A
+**
+** return:
+** success: > 0
+** failure: -1
+**___________________________________________________________________________
+*/
+int ecnt_l2tp_api_rx_handle(struct sk_buff *skb)
+{
+ int ret = 0;
+ l2tp_data_s in_data;
+
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_L2TP,ECNT_L2TP_RX_HOOK,(struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+#endif /* _ECNT_HOOK_L2TP_H */
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_multicast.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_multicast.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,245 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_MULTICAST_H
+#define _ECNT_HOOK_MULTICAST_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <ecnt_hook/ecnt_hook.h>
+#include "../../modules/private/ecnt_igmp/ecnt_igmp.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#ifndef uint8
+#define uint8 unsigned char
+#endif
+
+#ifndef uint16
+#define uint16 unsigned short
+#endif
+
+#ifndef uint32
+#define uint32 unsigned long
+#endif
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef int (*mc_ctrl_packet_rx)(uint8 port_id,unsigned char *data,unsigned int data_len);
+
+enum ECNT_MULTICAST_SUBTYPE
+{
+ ECNT_MC_XMIT_HOOK,
+ ECNT_MC_RECV_HOOK,
+ ECNT_MC_ADD_ENTRY_HOOK,
+ ECNT_MC_DEL_ENTRY_HOOK,
+ ECNT_MC_SET_RX_CB_HOOK,
+ ECNT_MC_VLAN_ONETON_HOOK,
+ ECNT_MC_VLAN_HANDLE_HOOK,
+};
+
+typedef struct
+{
+ uint8 dest_port;
+ uint8 src_port;
+ void *data;
+ unsigned int data_len;
+}ecnt_mc_tx_info;
+
+typedef struct
+{
+ mc_ctrl_packet_rx rx_cb_func;
+}ecnt_mc_rx_info;
+
+typedef struct
+{
+ struct sk_buff *skb;
+ int cpu_reason;
+}ecnt_mc_data_s;
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+int ecnt_mc_api_set_recv_callback(mc_ctrl_packet_rx rx_cb)
+{
+ int ret = 0;
+
+ if (rx_cb)
+ {
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_SET_RX_CB_HOOK,(struct ecnt_data *)&rx_cb);
+ }
+
+ return ret;
+}
+
+int ecnt_mc_api_xmit(uint8 dest_port, uint8 src_port, void *data,unsigned int data_len)
+{
+ int ret = 0;
+ ecnt_mc_tx_info in_data;
+
+ memset(&in_data,0,sizeof(ecnt_mc_tx_info));
+
+ in_data.dest_port = dest_port;
+ in_data.src_port = src_port;
+ in_data.data_len = data_len;
+ in_data.data = data;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_XMIT_HOOK,(struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+int ecnt_mc_api_entry_add(mc_fwd_tbl_entry *pdata)
+{
+ int ret = 0;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_ADD_ENTRY_HOOK,(struct ecnt_data *)pdata);
+
+ return ret;
+}
+
+int ecnt_mc_api_entry_del(mc_fwd_tbl_entry *pdata)
+{
+ int ret = 0;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_DEL_ENTRY_HOOK,(struct ecnt_data *)pdata);
+
+ return ret;
+}
+
+/*___________________________________________________________________________
+** function name: ecnt_mc_api_vlan_IsOneToN
+** descriptions:
+** whether multicast vlan 1:N
+** input parameters:
+** skb: data flow
+** output parameters:
+** N/A
+**
+** return:
+** success: > 0
+** failure: -1
+**___________________________________________________________________________
+*/
+int ecnt_mc_api_vlan_IsOneToN(struct sk_buff *skb)
+{
+ int ret = 0;
+ ecnt_mc_data_s in_data;
+
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_VLAN_ONETON_HOOK,(struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+/*___________________________________________________________________________
+** function name: ecnt_mc_api_vlan_handle
+** descriptions:
+** multicast data vlan handle
+** input parameters:
+** skb: data flow
+** output parameters:
+** N/A
+**
+** return:
+** success: > 0
+** failure: -1
+**___________________________________________________________________________
+*/
+int ecnt_mc_api_vlan_handle(struct sk_buff *skb)
+{
+ int ret = 0;
+ ecnt_mc_data_s in_data;
+
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_VLAN_HANDLE_HOOK,(struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+
+/*___________________________________________________________________________
+** function name: ecnt_mc_api_recv
+** descriptions:
+** recv packet from ethernet rx
+** input parameters:
+** skb: data flow
+** cpu_reason: foe cpu reason
+** output parameters:
+** N/A
+**
+** return:
+** success: > 0
+** failure: -1
+**___________________________________________________________________________
+*/
+int ecnt_mc_api_recv(struct sk_buff *skb,int cpu_reason)
+{
+ int ret = 0;
+ ecnt_mc_data_s in_data;
+
+ in_data.skb = skb;
+ in_data.cpu_reason = cpu_reason;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST,ECNT_MC_RECV_HOOK,(struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+#endif /*_ECNT_HOOK_MULTICAST_H*/
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_multicast_general.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_multicast_general.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,303 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_MULTICAST_GENERAL_H
+#define _ECNT_HOOK_MULTICAST_GENERAL_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+enum ECNT_MULTICAST_GENERAL_SUBTYPE
+{
+ ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,
+ ECNT_MC_XPON_MULTICAST_EXTERNAL_HOOK
+};
+
+typedef struct
+{
+ unsigned char mac_address_byte [6];
+}ECNT_MAC_ADDRESS ;
+
+typedef struct
+{
+ unsigned char ip_address_array [16] ;
+} ECNT_IPV6_ADDRESS ;
+
+typedef struct
+{
+ union{
+ /* IPv4*/
+ unsigned int ipv4_address ;
+ /* IPv6*/
+ ECNT_IPV6_ADDRESS ipv6_address ;
+ }IP ;
+
+ /*True = ipv6, False = ipv4*/
+ bool is_ipv6 ;
+}ECNT_IP_ADDRESS ;
+
+typedef enum
+{
+ ECNT_VLAN_TRANSPARENT = 0,
+ ECNT_VLAN_REPLACE,
+ ECNT_VLAN_REPLACE_AND_REMARK,
+ ECNT_VLAN_STRIP,
+ ECNT_VLAN_ADD,
+ ECNT_VLAN_ADD_AND_REMARK
+}ECNT_VLAN_MODE;
+
+typedef struct
+{
+ ECNT_VLAN_MODE vlanMode;
+ unsigned short outerVid;
+ unsigned short innerVid;
+ unsigned char outerTci;
+ unsigned char innerTci;
+}ECNT_VLAN_ACTION;
+
+typedef enum
+{
+ ECNT_MC_INTERVAL_FUNCTION_DATA_HANDLE_HOOK = 0,
+ ECNT_MC_INTERVAL_FUNCTION_ADD_TABLE_HOOK,
+ ECNT_MC_INTERVAL_FUNCTION_UPDATE_TABLE_HOOK,
+ ECNT_MC_INTERVAL_FUNCTION_DEL_TABLE_HOOK,
+ ECNT_MC_INTERVAL_FUNCTION_UPDATE_VLAN_ACTION_HOOK,
+ ECNT_MC_INTERNAL_FUNCTION_GET_WIFI_NUM_HOOK,
+ ECNT_MC_INTERVAL_FUNCTION_MAX_NUM ,
+}ECNT_MULTICAST_INTERNAL_FUNC_ID;
+
+typedef enum
+{
+ ECNT_MC_EXTERNAL_FUNCTION_GET_PORTMASK_HOOK = 0,
+ ECNT_MC_EXTERNAL_FUNCTION_GET_VLAN_ACTION_HOOK,
+ ECNT_MC_EXTERVAL_FUNCTION_MAX_NUM ,
+}ECNT_MULTICAST_EXTERNAL_FUNC_ID;
+
+
+typedef struct
+{
+ ECNT_MULTICAST_INTERNAL_FUNC_ID in_function_id;
+ ECNT_MULTICAST_EXTERNAL_FUNC_ID ex_function_id;
+ struct sk_buff *skb;
+ union
+ {
+ struct
+ {
+ unsigned short vid;
+ ECNT_MAC_ADDRESS mac_addr;
+ ECNT_IP_ADDRESS src_ip;
+ ECNT_IP_ADDRESS group_ip;
+ unsigned int *fwd_port_mask;
+ }port_mask;
+ struct
+ {
+ unsigned int port_id;
+ ECNT_IP_ADDRESS group_ip;
+ ECNT_VLAN_ACTION *action;
+ }vlan_action;
+ struct
+ {
+ unsigned int foe_index;
+ unsigned int* wifi_num;
+ unsigned int* port_mask;
+ }wifi_cfg;
+ }mc_private;
+ int retValue;
+}ecnt_mc_general_data_s;
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+int ECNT_HOOK_MULTICAST_DATA_HANLDE(struct sk_buff *skb)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERVAL_FUNCTION_DATA_HANDLE_HOOK;
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_ADD_IPTV_TABLE_ENTRY(void)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERVAL_FUNCTION_ADD_TABLE_HOOK;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_UPDATE_IPTV_TABLE_ENTRY(void)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERVAL_FUNCTION_UPDATE_TABLE_HOOK;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_DEL_IPTV_TABLE_ENTRY(void)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERVAL_FUNCTION_DEL_TABLE_HOOK;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_UPDATE_VLAN_ACTION_ENTRY(void)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERVAL_FUNCTION_UPDATE_VLAN_ACTION_HOOK;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_MULTICAST_GET_WIFI_NUM_AND_MASK(unsigned int foe_index, unsigned int* wifi_num,unsigned int* port_mask)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.in_function_id = ECNT_MC_INTERNAL_FUNCTION_GET_WIFI_NUM_HOOK;
+ in_data.mc_private.wifi_cfg.foe_index = foe_index;
+ in_data.mc_private.wifi_cfg.wifi_num = wifi_num;
+ in_data.mc_private.wifi_cfg.port_mask = port_mask;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_INTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+int ECNT_HOOK_GET_FWD_PORTMAK(unsigned short vid,ECNT_MAC_ADDRESS mac_addr, ECNT_IP_ADDRESS src_ip,ECNT_IP_ADDRESS group_ip,unsigned int *fwd_port_mask)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.ex_function_id = ECNT_MC_EXTERNAL_FUNCTION_GET_PORTMASK_HOOK;
+ in_data.mc_private.port_mask.vid = vid;
+ memmove(&in_data.mc_private.port_mask.mac_addr,&mac_addr,sizeof(ECNT_MAC_ADDRESS));
+ memmove(&in_data.mc_private.port_mask.src_ip,&src_ip,sizeof(ECNT_IP_ADDRESS));
+ memmove(&in_data.mc_private.port_mask.group_ip,&group_ip,sizeof(ECNT_IP_ADDRESS));
+ in_data.mc_private.port_mask.fwd_port_mask = fwd_port_mask;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_EXTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+int ECNT_HOOK_MULTICAST_GET_VLAN_ACTION(unsigned int port_id, ECNT_IP_ADDRESS group_ip,ECNT_VLAN_ACTION* aciton)
+{
+ int ret = 0;
+ ecnt_mc_general_data_s in_data;
+
+ in_data.ex_function_id = ECNT_MC_EXTERNAL_FUNCTION_GET_VLAN_ACTION_HOOK;
+ in_data.mc_private.vlan_action.port_id = port_id;
+ memmove(&in_data.mc_private.vlan_action.group_ip,&group_ip,sizeof(ECNT_IP_ADDRESS));
+ in_data.mc_private.vlan_action.action = aciton;
+
+ ret = __ECNT_HOOK(ECNT_MULTICAST_GENERAL,ECNT_MC_XPON_MULTICAST_EXTERNAL_HOOK,(struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+#endif /*_ECNT_HOOK_MULTICAST_GENERAL_H*/
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_net.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_net.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,378 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_NET_H
+#define __LINUX_ENCT_HOOK_NET_H
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include "../../net/bridge/br_private.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+enum ecnt_net_core_dev_subtype{
+ ECNT_DEV_QUE_XMIT,
+ ECNT_NETIF_RCV_SKB
+};
+
+enum ecnt_net_vlan_dev_subtype{
+ ECNT_VLAN_SKB_RECV
+};
+
+enum ecnt_net_br_fdb_subtype{
+ ECNT_BR_FDB_CREATE,
+ ECNT_BR_FDB_DELETE,
+ ECNT_BR_FDB_UPDATE,
+ ECNT_BR_FDB_GET,
+};
+
+enum ecnt_net_br_forward_subtype{
+ ECNT_BR_FORWARD_BR_FORWARD,
+ ECNT_BR_FORWARD_BR_FLOOD,
+ ECNT_BR_FORWARD_BR_PB_FLOOD,
+ ECNT_BR_FORWARD_BR_FORWARD_FINISH,
+};
+
+
+enum ecnt_net_af_subtype{
+ ECNT_NET_AF_BIND,
+ ECNT_NET_AF_PACKET_RCV,
+};
+
+enum ecnt_net_sock_subtype{
+ ECNT_NET_SOCK_ALLOC,
+ ECNT_NET_SOCK_RELEASE
+};
+
+enum ecnt_net_br_input_subtype
+{
+ ECNT_BR_HANDLE_FRAME = 0,
+};
+
+enum ecnt_net_udp_subtype
+{
+ ECNT_NET_UDP_RCV = 0,
+ ECNT_NET_UDPV6_RCV,
+};
+
+enum ecnt_net_ppp_generic_subtype
+{
+ ECNT_NET_PPP_XMIT = 0,
+};
+
+enum ecnt_net_ip_output_subtype
+{
+ ECNT_NET_IP_LOCAL_OUT = 0,
+};
+enum ecnt_net_igmp_subtype
+{
+ ECNT_NET_IGMP_GROUP_ADDED = 0,
+ ECNT_NET_IGMP_GROUP_DROPPED = 1,
+};
+
+struct ecnt_vlan_skb_rcv_s{
+ struct net_device *vlan_dev;
+ struct net_device *orig_dev;
+ struct packet_type *ptype;
+};
+
+struct ecnt_br_fdb_s{
+ const unsigned char *addr;
+ struct net_bridge_fdb_entry *fdb;
+};
+
+struct ecnt_foe_copy_s{
+ struct sk_buff **fromskb;
+};
+
+struct net_data_s{
+ struct sk_buff **pskb;
+ union{
+ struct ecnt_vlan_skb_rcv_s vlan_skb_rcv_data;
+ struct ecnt_br_fdb_s br_fdb_data;
+ struct ecnt_foe_copy_s foe_copy_data;
+ };
+ struct net_bridge_port *source;
+};
+
+struct ecnt_net_af_bind_s{
+ unsigned short sin_port;
+ struct task_struct *task;
+};
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#define CALL_ECNT_HOOK(maintype,subtype,in_data) {int ret; \
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&in_data); \
+ if(ret == ECNT_RETURN_DROP){ \
+ kfree_skb(skb); \
+ return ret;} \
+ if(ret == ECNT_RETURN) \
+ return ret; \
+}
+
+#define ECNT_NET_HOOK(maintype, subtype, skb) { \
+ struct net_data_s net_data; \
+ net_data.pskb = &skb; \
+ CALL_ECNT_HOOK(maintype, subtype, net_data); \
+}
+
+#define ECNT_VLAN_SKB_RCV_HOOK(skb, orig_dev, vlan_dev, ptype){ \
+ struct net_data_s net_data; \
+ net_data.pskb = &skb; \
+ net_data.vlan_skb_rcv_data.orig_dev = orig_dev; \
+ net_data.vlan_skb_rcv_data.vlan_dev = vlan_dev; \
+ net_data.vlan_skb_rcv_data.ptype = ptype; \
+ CALL_ECNT_HOOK(ECNT_NET_VLAN_DEV, ECNT_VLAN_SKB_RECV, net_data); \
+}
+
+#define CALL_ECNT_NET_AF_BIND_HOOK(maintype, subtype, in_data) {int ret; \
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&in_data); \
+ if(ret == ECNT_RETURN_DROP){ \
+ goto out;} \
+}
+
+#define ECNT_NET_AF_BIND_HOOK(snum, tsk){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NET_AF][ECNT_NET_AF_BIND])){ \
+ struct ecnt_net_af_bind_s net_data; \
+ net_data.sin_port = snum; \
+ net_data.task = tsk; \
+ CALL_ECNT_NET_AF_BIND_HOOK(ECNT_NET_AF, ECNT_NET_AF_BIND, net_data); \
+ } \
+}
+
+#define CALL_ECNT_NET_SOCK_ALLOC_HOOK(maintype, subtype, in_data) {int ret; \
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&in_data); \
+ if(ret == ECNT_RETURN_DROP){ \
+ destroy_inode(inode); \
+ return NULL;} \
+}
+
+#define ECNT_NET_SOCK_ALLOC_HOOK(tsk){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NET_SOCK][ECNT_NET_SOCK_ALLOC])){ \
+ struct ecnt_net_af_bind_s net_data; \
+ net_data.sin_port = 0; \
+ net_data.task = tsk; \
+ CALL_ECNT_NET_SOCK_ALLOC_HOOK(ECNT_NET_SOCK, ECNT_NET_SOCK_ALLOC \
+ , net_data); \
+ } \
+}
+
+#define CALL_ECNT_NET_SOCK_RELEASE_HOOK(maintype, subtype, in_data) {int ret; \
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&in_data); \
+}
+
+#define ECNT_NET_SOCK_RELEASE_HOOK(tsk){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NET_SOCK][ECNT_NET_SOCK_RELEASE])){ \
+ struct ecnt_net_af_bind_s net_data; \
+ net_data.sin_port = 0; \
+ net_data.task = tsk; \
+ CALL_ECNT_NET_SOCK_RELEASE_HOOK(ECNT_NET_SOCK, ECNT_NET_SOCK_RELEASE \
+ , net_data); \
+ } \
+}
+
+#define ECNT_CORE_DEV_HOOK(subtype, skb){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NET_CORE_DEV][subtype])){ \
+ struct net_data_s net_data; \
+ if(skb != NULL) \
+ net_data.pskb = &skb; \
+ else \
+ net_data.pskb = NULL; \
+ CALL_ECNT_HOOK(ECNT_NET_CORE_DEV, subtype, net_data); \
+ } \
+}
+#define ECNT_BR_FRWARD_FINISH_HOOK(subtype, skb){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NET_BR_FORWARD][subtype])){ \
+ struct net_data_s br_data; \
+ br_data.pskb = &skb; \
+ __ECNT_HOOK(ECNT_NET_BR_FORWARD, subtype, (struct ecnt_data *)&br_data); \
+ } \
+}
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int ECNT_BR_FDB_HOOK(int subtype, struct net_data_s *net_data){
+ int ret;
+ if (!list_empty(&ecnt_hooks[ECNT_NET_BR_FDB][subtype])){
+ ret = __ECNT_HOOK(ECNT_NET_BR_FDB, subtype, (struct ecnt_data *)net_data);
+ return ret;
+ }
+ return ECNT_CONTINUE;
+}
+
+static inline int ECNT_BR_FORWARD_HOOK(int subtype, struct net_data_s *net_data){
+ int ret = ECNT_CONTINUE ;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_BR_FORWARD][subtype]))
+ {
+ ret = __ECNT_HOOK(ECNT_NET_BR_FORWARD, subtype, (struct ecnt_data *)net_data);
+ }
+ return ret;
+}
+
+static inline int ECNT_BR_INPUT_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_BR_INPUT][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_BR_INPUT, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_UDP_RCV_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_UDP][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_UDP, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_UDPV6_RCV_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_UDPV6][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_UDPV6, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_PPP_GENERIC_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_PPP_GENERIC][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_PPP_GENERIC, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_IP_OUTPUT_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_IP_OUTPUT][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_IP_OUTPUT, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_IGMP_HOOK(int subtype, struct net_data_s *data)
+{
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_IGMP][subtype]))
+ {
+
+ ret = __ECNT_HOOK(ECNT_NET_IGMP, subtype, (struct ecnt_data *)data);
+ }
+
+ return ret;
+}
+
+static inline int ECNT_PACKET_RCV_HOOK(int subtype, struct sk_buff *to, struct sk_buff *from)
+{
+ struct net_data_s net_data;
+ int ret = ECNT_CONTINUE;
+
+ if (!list_empty(&ecnt_hooks[ECNT_NET_AF][subtype])){
+ if(from != NULL)
+ net_data.foe_copy_data.fromskb = &from;
+ else
+ net_data.foe_copy_data.fromskb = NULL;
+
+ if(to != NULL)
+ net_data.pskb = &to;
+ else
+ net_data.pskb = NULL;
+
+ ret = __ECNT_HOOK(ECNT_NET_AF, subtype, (struct ecnt_data *)&net_data);
+ return ret;
+ }
+ return ECNT_CONTINUE;
+}
+
+#endif
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_nf.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_nf.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,146 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_NF_H
+#define __LINUX_ENCT_HOOK_NF_H
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+enum ecnt_nf_ip4_subtype{
+ ECNT_NF_IP4_PRE_ROUTING,
+ ECNT_NF_IP4_LOCAL_IN,
+ ECNT_NF_IP4_FORWARD,
+ ECNT_NF_IP4_LOCAL_OUT,
+ ECNT_NF_IP4_POST_ROUTING
+};
+
+enum ecnt_nf_ip6_subtype{
+ ECNT_NF_IP6_PRE_ROUTING,
+ ECNT_NF_IP6_LOCAL_IN,
+ ECNT_NF_IP6_FORWARD,
+ ECNT_NF_IP6_LOCAL_OUT,
+ ECNT_NF_IP6_POST_ROUTING
+};
+
+enum ecnt_nf_br_subtype{
+ ECNT_NF_BR_PRE_ROUTING,
+ ECNT_NF_BR_LOCAL_IN,
+ ECNT_NF_BR_FORWARD,
+ ECNT_NF_BR_LOCAL_OUT,
+ ECNT_NF_BR_POST_ROUTING
+};
+
+enum ecnt_nf_arp_subtype{
+ ECNT_NF_ARP_IN,
+ ECNT_NF_ARP_FORWARD,
+ ECNT_NF_ARP_OUT
+};
+
+enum ecnt_nf_track_subtype{
+ ECNT_NF_TRACK_INIT_FREE
+};
+
+struct nf_data_s{
+ struct sk_buff *skb;
+ const struct net_device *in;
+ const struct net_device *out;
+};
+
+struct ecnt_nf_track_s{
+ enum ip_conntrack_events event;
+ struct nf_conn *ct;
+ struct sk_buff *skb;
+};
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#define CALL_ECNT_NF_TRACK_INIT_HOOK(maintype, subtype, in_data) {int ret; \
+ ret = __ECNT_HOOK(maintype, subtype, (struct ecnt_data *)&in_data); \
+}
+
+#define ECNT_NF_TRACK_CORE_INIT_HOOK(_evt, _ct, _skb){ \
+ if (!list_empty(&ecnt_hooks[ECNT_NF_TRACK_CORE][ECNT_NF_TRACK_INIT_FREE])){\
+ struct ecnt_nf_track_s net_data; \
+ net_data.event = _evt; \
+ net_data.ct = _ct; \
+ net_data.skb = _skb; \
+ CALL_ECNT_NF_TRACK_INIT_HOOK(ECNT_NF_TRACK_CORE \
+ , ECNT_NF_TRACK_INIT_FREE \
+ , net_data); \
+ } \
+}
+
+#define ECNT_NF_TRACK_CORE_FREE_HOOK ECNT_NF_TRACK_CORE_INIT_HOOK
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+extern int ecnt_net_filter_hook_init(void);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_pcie.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_pcie.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,222 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_PCIEE_H_
+#define _ECNT_HOOK_PCIE_H_
+
+
+#include "ecnt_hook.h"
+#include "ecnt_hook_pcie_type.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+************************************************************************/
+
+/************************************************************************
+* M A C R O S
+************************************************************************/
+
+/************************************************************************
+* D A T A T Y P E S
+************************************************************************/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+************************************************************************/
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+************************************************************************/
+
+/************************************************************************
+* P U B L I C D A T A
+************************************************************************/
+
+/************************************************************************
+* P R I V A T E D A T A
+************************************************************************/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+************************************************************************/
+
+static inline int PCIE_API_GET_CONFREG(int idx, unsigned int offset, unsigned int* value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_GET_CONFREG;
+ in_data.idx = idx;
+ in_data.conf.off = offset;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ *value = in_data.retValue;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+
+}
+
+
+static inline int PCIE_API_SET_CONFREG(int idx, unsigned int offset,unsigned int value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_SET_CONFREG;
+ in_data.idx = idx;
+ in_data.conf.off = offset;
+ in_data.conf.val = value;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+
+}
+
+static inline int PCIE_API_GET_ASPM(int idx, unsigned int* value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_GET_ASPM;
+ in_data.idx = idx;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ *value = in_data.retValue;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+
+}
+
+
+static inline int PCIE_API_SET_ASPM(int idx, unsigned int value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_SET_ASPM;
+ in_data.idx = idx;
+ in_data.conf.val = value;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+
+}
+
+static inline int PCIE_API_GET_SPEED(int idx,unsigned int* value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_GET_SPEED;
+ in_data.idx = idx;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ *value = in_data.retValue;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PCIE_API_SET_SPEED(int idx,unsigned int value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_SET_SPEED;
+ in_data.idx = idx;
+ in_data.conf.val = value;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PCIE_API_GET_COUNT(int idx, struct ecnt_pcie_count_data* pcnt)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_GET_COUNT;
+ in_data.idx = idx;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ memcpy(pcnt,&in_data.cnt,sizeof (struct ecnt_pcie_count_data));
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+
+}
+
+static inline int PCIE_API_GET_LINKSTATE(int idx,unsigned int* value)
+{
+ struct ecnt_pcie_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PCIE_GET_LINKSTATE;
+ in_data.idx = idx;
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ *value = in_data.retValue;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PCIE_AUTOBENCH_LOOPBACK(void)
+{
+
+ struct ecnt_pcie_data in_data;
+ int ret=0;
+
+ in_data.function_id = PCIE_FUNCTION_AUTOBENCH_LOOPBACK;
+
+ ret = __ECNT_HOOK(ECNT_PCIE, ECNT_PCIE_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+#endif /* _ECNT_HOOK_FE_H_ */
+
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_pcie_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_pcie_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,113 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_PCIE_TYPE_H_
+#define _ECNT_HOOK_PCIE_TYPE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+ enum {
+ ECNT_PCIE_API = 0,
+ };
+
+
+ typedef enum {
+ PCIE_GET_CONFREG = 0,
+ PCIE_SET_CONFREG,
+ PCIE_GET_ASPM,
+ PCIE_SET_ASPM,
+ PCIE_GET_SPEED,
+ PCIE_SET_SPEED,
+ PCIE_GET_COUNT,
+ PCIE_GET_LINKSTATE,
+ PCIE_FUNCTION_AUTOBENCH_LOOPBACK,
+ PCIE_FUNCTION_MAX_NUM
+ } PCIE_HookFunctionID_t ;
+
+ struct ecnt_pcie_conf_data
+ {
+ unsigned int off;
+ unsigned int val;
+ };
+
+ struct ecnt_pcie_count_data
+ {
+ unsigned int err[6];
+ };
+
+ struct ecnt_pcie_data
+ {
+ PCIE_HookFunctionID_t function_id;
+ int retValue;
+ int idx;
+ union
+ {
+ struct ecnt_pcie_conf_data conf;
+ struct ecnt_pcie_count_data cnt;
+ };
+ };
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_QDMA_TYPE_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_pon_mac.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_pon_mac.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,249 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_PON_MAC_H
+#define __LINUX_ENCT_HOOK_PON_MAC_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum Event_Src_Module_e{
+ XPON_PHY_MODULE,
+ XPON_PUB_MODULE,
+}Event_Src_Module_t;
+
+typedef enum Pub_Sub_Type_e{
+ XPON_SN_SET,
+ XPON_MAC_MODE_GET,
+ XPON_MAC_RX_DIS_SET,
+ XPON_ONU_TYPE_GET,
+ XPON_TCONT_GET,
+ XPON_SLT,
+}Pub_Sub_Type_t;
+
+typedef enum PHY_Event_Source_e{
+ PON_PHY_EVENT_SOURCE_HW_IRQ , /* event comes from hard irq*/
+ PON_PHY_EVENT_SOURCE_SW_POLL , /* event comes from sw irq polling */
+}PHY_Event_Source_t;
+
+typedef enum PHY_Event_Type_e {
+ /*
+ phy interrupt event
+ */
+ PHY_EVENT_TRANS_LOS_INT = 0x00 ,
+ PHY_EVENT_PHY_LOF_INT ,
+ PHY_EVENT_TF_INT ,
+ PHY_EVENT_TRANS_INT ,
+ PHY_EVENT_TRANS_SD_FAIL_INT ,
+ PHY_EVENT_PHYRDY_INT ,
+ PHY_EVENT_PHY_ILLG_INT ,
+ PHY_EVENT_I2CM_INT ,
+ PHY_EVENT_TRANS_LOS_ILLG_INT , /* LOS and Illegal INT happen simultaneously */
+
+ /* all phy interrupt event id should be less than this */
+ PHY_EVENT_MAX_INT = 0x100 ,
+
+ /*
+ phy non-interrupt event
+ */
+ PHY_EVENT_START_ROGUE_MODE ,
+ PHY_EVENT_STOP_ROGUE_MODE ,
+
+ PHY_EVENT_CALIBRATION_START ,
+ PHY_EVENT_CALIBRATION_STOP ,
+ PHY_EVENT_TX_POWER_ON ,
+ PHY_EVENT_TX_POWER_OFF ,
+
+} PHY_Event_Type_t ;
+
+typedef struct PON_PHY_Event_data_s{
+ PHY_Event_Source_t src;
+ PHY_Event_Type_t id;
+} PON_PHY_Event_data_t;
+
+typedef enum ECNT_XPON_MAC_Mode_e {
+ ECNT_XPON_MAC_MODE_OFF = 0x0,
+ ECNT_XPON_MAC_MODE_GPON ,
+ ECNT_XPON_MAC_MODE_EPON ,
+}ECNT_XPON_MAC_Mode_t;
+
+
+typedef enum ECNT_XPON_MAC_Rx_Dis_e {
+ ECNT_XPON_MAC_RX_DISABLE = 0x0,
+ ECNT_XPON_MAC_RX_ENABLE ,
+}ECNT_XPON_MAC_Rx_Dis_t;
+
+
+enum ECNT_XPON_MAC_SUBTYPE {
+ ECNT_XPON_MAC_HOOK,
+};
+
+typedef struct gemportid_to_tcont_s{
+ uint gemportid;
+ int *tcont;
+}gemportid_to_tcont_t;
+
+
+typedef struct xpon_mac_Pub_info_s {
+ Pub_Sub_Type_t type;
+ union {
+ unsigned char sn[8];
+ unsigned char * mode;
+ unsigned char rx_dis;
+ unsigned int onu_type;
+ gemportid_to_tcont_t gemtotcont;
+ };
+}xpon_mac_Pub_info_t;
+
+typedef struct xpon_mac_hook_data_s {
+ Event_Src_Module_t src_module;
+ union {
+ PON_PHY_Event_data_t * pEvent;
+ xpon_mac_Pub_info_t pub_info;
+ };
+}xpon_mac_hook_data_t;
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline void ECNT_API_XPON_SN_SET(unsigned char sn[8])
+{
+ xpon_mac_hook_data_t data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_SN_SET;
+ memcpy(data.pub_info.sn, sn, 8);
+
+ if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data) ){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+}
+
+static inline void ECNT_API_XPON_MODE_GET(unsigned char * mode)
+{
+ struct xpon_mac_hook_data_s data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_MAC_MODE_GET;
+ data.pub_info.mode = mode;
+
+ if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data) ){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+}
+
+static inline void ECNT_API_XPON_MAC_RX_DIS_SET(unsigned char rx_dis)
+{
+ struct xpon_mac_hook_data_s data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_MAC_RX_DIS_SET;
+ data.pub_info.rx_dis = rx_dis;
+
+ if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data) ){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+}
+
+static inline void ECNT_API_XPON_ONU_TYPE_GET(unsigned int *onu_type)
+{
+ struct xpon_mac_hook_data_s data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_ONU_TYPE_GET;
+
+ if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data) ){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+
+ *onu_type = data.pub_info.onu_type;
+}
+
+static inline void ECNT_API_XPON_TCONT_GET(uint gemportid, int *tcont)
+{
+ struct xpon_mac_hook_data_s data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_TCONT_GET;
+ data.pub_info.gemtotcont.gemportid = gemportid;
+ data.pub_info.gemtotcont.tcont = tcont;
+
+ if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data) ){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+}
+
+static inline int ECNT_API_XPON_SLT(void)
+{
+ struct xpon_mac_hook_data_s data = {0} ;
+ data.src_module = XPON_PUB_MODULE;
+ data.pub_info.type = XPON_SLT;
+ int ret = 0;
+ ret = __ECNT_HOOK(ECNT_XPON_MAC, ECNT_XPON_MAC_HOOK, (struct ecnt_data * )&data);
+
+ if(ECNT_HOOK_ERROR == ret){
+ printk("ECNT_HOOK_ERROR occur. %s:%d\n", __FUNCTION__, __LINE__);
+ }
+
+ return ret;
+}
+
+#endif // __LINUX_ENCT_HOOK_PON_MAC_H
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_pon_phy.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_pon_phy.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,324 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_PON_PHY_H
+#define __LINUX_ENCT_HOOK_PON_PHY_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/jiffies.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/* SET function */
+
+#define PON_SET_PHY_STOP 0x0002 /* phy_stop() */
+#define PON_SET_PHY_START 0x0001 /* phy_start() */
+#define PON_SET_PHY_RESET_INIT 0x0003 /* pon_phy_reset_init() */
+#define PON_SET_PHY_DEV_INIT 0x0004 /* pon_dev_init() */
+#define PON_SET_PHY_FW_READY 0x0005 /* phy_fw_ready() */
+#define PON_SET_EPONMODE_PHY_RESET 0x0006 /* eponMode_phy_reset() */
+#define PON_SET_PHY_MODE_CONFIG 0x0007 /* phy_mode_config() */
+#define PON_SET_PHY_TRANS_MODEL_SETTING 0x0008 /* phy_trans_model_setting() */
+#define PON_SET_PHY_COUNTER_CLEAR 0x0009 /* phy_counter_clear() */
+#define PON_SET_PHY_BIT_DELAY 0x000a /* phy_bit_delay() */
+#define PON_SET_PHY_TX_POWER_CONFIG 0x000b /* phy_tx_power_config() */
+#define PON_SET_PHY_TRANS_POWER_SWITCH 0x000c /* phy_trans_power_switch() */
+#define PON_SET_PHY_TX_BURST_CONFIG 0x000d /* phy_tx_burst_config() */
+#define PON_SET_PHY_GPON_DELIMITER_GUARD 0x000e /* phy_gpon_delimiter_guard()*/
+
+#define PON_SET_PHY_TRANS_TX_SETTINGS 0x0010 /* phy_trans_tx_setting() */
+#define PON_SET_PHY_GPON_PREAMBLE 0x0011 /* phy_gpon_preamble() */
+#define PON_SET_PHY_GPON_EXTEND_PREAMBLE 0x0012 /* phy_gpon_extend_preamble()*/
+#define PON_SET_PHY_ROGUE_PRBS_CONFIG 0x0013 /* phy_rogue_prbs_config() */
+#define PON_SET_PHY_RX_FEC_SETTING 0x0014 /* phy_rx_fec_setting() */
+#define PON_SET_PHY_TRANS_RX_SETTING 0x0015 /* phy_trans_rx_setting() */
+#define PON_SET_PHY_RESET_COUNTER 0x0016 /* phy_reset_counter() */
+#ifdef TCSUPPORT_CPU_EN7521
+#define PON_SET_PHY_EPON_TS_CONTINUE_MODE 0x0017 /* phy_set_epon_ts_continue_mode() */
+#endif
+#define PON_SET_PHY_RESTORE_PREAMBLE_AND_GUARD_BIT 0x0019 /* resotre_preamble_and_guard_bit */
+#define PON_SET_PHY_NOTIFY_EVENT 0x001a
+#define PON_SET_PHY_TRAFFIC_STATUS 0x001b /* phy_set_traffic_status()*/
+
+
+/* Get Function */
+
+#define PON_GET_PHY_LOS_STATUS 0x8001 /* phy_los_status() */
+#define PON_GET_PHY_READY_STATUS 0x8002 /* phy_ready_status() */
+#define PON_GET_PHY_TRANS_PARAM_TEMPRATRUE 0x8003 /* phy_trans_param_status() */
+#define PON_GET_PHY_TRANS_PARAM_VOLTAGE 0x8004 /* phy_trans_param_status() */
+#define PON_GET_PHY_TRANS_PARAM_TX_CURRENT 0x8005 /* phy_trans_param_status() */
+#define PON_GET_PHY_TRANS_PARAM_TX_POWER 0x8006 /* phy_trans_param_status() */
+#define PON_GET_PHY_TRANS_PARAM_RX_POWER 0x8007 /* phy_trans_param_status() */
+#define PON_GET_PHY_TX_FEC_STATUS 0x8008 /* phy_tx_fec_status */
+#define PON_GET_PHY_TX_BURST_GETTING 0x8009 /* phy_tx_burst_getting */
+
+#define PON_GET_PHY_TRANS_TX 0x800a /* phy_trans_tx_getting */
+#define PON_GET_PHY_RX_FEC_COUNTER 0x800d /* phy_rx_fec_counter */
+
+#define PON_GET_PHY_BIP_COUNTER 0x8012 /* phy_bip_counter */
+#define PON_GET_PHY_RX_FRAME_COUNTER 0x8013 /* phy_rx_frame_counter */
+
+#define PON_GET_PHY_RX_FEC_STATUS 0x8016 /* phy_rx_fec_status */
+#define PON_GET_PHY_RX_FEC_GETTING 0x8017 /* phy_rx_fec_getting */
+#define PON_GET_PHY_TRANS_RX_GETTING 0x8018 /* phy_trans_rx_getting */
+
+
+#define PON_GET_PHY_IS_SYNC 0x8019 /* is_phy_sync */
+#define PON_GET_PHY_MODE 0x801a /* get_phy_mode */
+#define PON_GET_PHY_TX_LONG_FLAG 0x801b /* get_tx_log_flag */
+
+/* mt7570_param_status_real, phy_trans_param_status_real, phy_tx_alarm, phy_rx_power_alarm */
+#define PON_GET_PHY_TRANS_STATUS 0x801c
+#define PON_GET_PHY_GET_TX_POWER_EN_FLAG 0x801d
+#ifdef TCSUPPORT_CPU_EN7521
+#define PON_GET_PHY_ROUND_TRIP_DELAY 0x801e
+#endif
+#define PON_GET_PHY_EN7571_VER 0x801f
+
+#define PHY_LOS_HAPPEN (0x01)
+#define PHY_NO_LOS_HAPPEN (0x00)
+
+#define PHY_ENABLE (1)
+#define PHY_DISABLE (0)
+
+#define PHY_TRUE (1)
+#define PHY_FALSE (0)
+
+/*Tx Rogue send PRBS */
+#define PHY_TX_ROGUE_MODE (1)
+#define PHY_TX_NORMAL_MODE (0)
+
+#define PHY_TX_BURST_MODE (0)
+#define PHY_TX_CONT_MODE (1)
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+enum {
+ XPON_PHY_API_TYPE_GET = 0,
+ XPON_PHY_API_TYPE_SET,
+};
+
+#define PHY_NO_API (-1)
+#define PHY_SUCCESS (0)
+#define PHY_FAILURE (1)
+
+typedef enum XponPhy_Mode_e{
+ PHY_EPON_CONFIG,
+ PHY_GPON_CONFIG,
+ PHY_UNKNOWN_CONFIG,
+} Xpon_Phy_Mode_t ;
+
+typedef enum PHY_Los_Status_e{
+ PHY_LINK_STATUS_LOS,
+ PHY_LINK_STATUS_READY,
+ PHY_LINK_STATUS_UNKNOWN,
+}PHY_Los_Status_t;
+
+typedef enum {
+ PHY_CALLBACK_XPON_STATE_NOTIFY =8,
+} PHY_CbType_t ;
+
+
+/*transceiver alarm type*/
+typedef enum {
+ PHY_TRANS_NO_ALARM = 0x0,
+ PHY_TRANS_TX_HIGH_POWER_ALARM = 0x01,
+ PHY_TRANS_TX_LOW_POWER_ALARM = 0x02,
+ PHY_TRANS_TX_HIGH_CUR_ALARM = 0x04,
+ PHY_TRANS_TX_LOW_CUR_ALARM = 0x08,
+ PHY_TRANS_RX_HIGH_POWER_ALARM = 0x10,
+ PHY_TRANS_RX_LOW_POWER_ALARM = 0x20
+} ENUM_PhyTransAlarm ;
+
+
+/*phy counter clear*/
+typedef enum {
+ PHY_ERR_CNT_CLR = 0x01,
+ PHY_BIP_CNT_CLR = 0x02,
+ PHY_RXFRAME_CNT_CLR = 0x04,
+#if defined(CONFIG_USE_MT7520_ASIC) || defined(CONFIG_USE_A60928)
+ PHY_TXFRAME_CNT_CLR = 0x08,
+ PHY_EPON_ERR_CNT_CLR = 0x10
+#endif /* TCSUPPORT_WAN_EPON */
+} ENUM_PhyCounterClr;
+
+/*GPON Preamble*/
+typedef enum {
+ PHY_GUARD_BIT_NUM_EN = 0x01, /* set by HY_GponPreb_T->mask to modify guard_bit_num */
+ PHY_PRE_T1_NUM_EN = 0x02, /* set by HY_GponPreb_T->mask to modify preamble_t1_num */
+ PHY_PRE_T2_NUM_EN = 0x04, /* set by HY_GponPreb_T->mask to modify preamble_t2_num */
+ PHY_PRE_T3_PAT_EN = 0x08, /* set by HY_GponPreb_T->mask to modify preamble_t3_pat */
+ PHY_T3_O4_PRE_EN = 0x10, /* set by HY_GponPreb_T->mask to modify t3_O4_preamble */
+ PHY_T3_O5_PRE_EN = 0x20, /* set by HY_GponPreb_T->mask to modify t3_O5_preamble */
+ PHY_EXT_BUR_MODE_EN = 0x40, /* set by HY_GponPreb_T->mask to modify extend_burst_mode */
+ PHY_OPER_RANG_EN = 0x80, /* set by HY_GponPreb_T->mask to modify oper_ranged_st */
+ PHY_DIS_SCRAM_EN = 0x200,/* set by HY_GponPreb_T->mask to modify dis_scramble */
+#if defined(CONFIG_USE_MT7520_ASIC) || defined(CONFIG_USE_A60928)
+ PHY_EXTB_LENG_SEL_EN = 0x100//set by HY_GponPreb_T->mask to modify extb_length_sel
+#endif
+} ENUM_GponPreb_T ;
+
+/* traffic status */
+typedef enum {
+ TRAFFIC_STATUS_DOWN = 0x0,
+ TRAFFIC_STATUS_UP = 0x1,
+} ENUM_PhyTrafficStatus;
+
+enum ECNT_XPON_PHY_SUBTYPE {
+ ECNT_XPON_PHY_API,
+};
+
+typedef struct
+{
+ ushort temprature;
+ ushort supply_voltage;
+ ushort tx_current;
+ ushort tx_power;
+ ushort rx_power;
+}PHY_TransParam_T, *PPHY_TransParam_T;
+
+typedef struct{
+ PHY_TransParam_T params;
+ uint alarms;
+} PHY_Trans_Status_t;
+
+typedef struct{
+ Xpon_Phy_Mode_t mode ;
+ int txEnable;
+} PHY_Mode_Cfg_t;
+
+/* FEC error counter*/
+typedef struct
+{
+ uint correct_bytes; /* the number of corrected bytes */
+ uint correct_codewords; /* the number of corrected codewords */
+ uint uncorrect_codewords;/* the number of uncorrected codewords */
+ uint total_rx_codewords; /* the number of total received codewords */
+ uint fec_seconds; /* the count of second in previous 15min when there was an FEC anomaly */
+}PHY_FecCount_T, *PPHY_FecCount_T;
+/*received frame counter*/
+typedef struct
+{
+ uint frame_count_low; /* the number of received frame */
+ uint frame_count_high;
+ uint lof_counter; /* the number of LOS */
+}PHY_FrameCount_T, *PPHY_FrameCount_T;
+
+typedef struct{
+ uint delimiter ;
+ unchar guard_time ;
+} PHY_GPON_Delimiter_Guard_t;
+
+typedef struct
+{
+ unchar trans_tx_sd_inv_status;
+ unchar trans_burst_en_inv_status;
+ unchar trans_tx_fault_inv_status;
+}PHY_TransConfig_T, *PPHY_TransConfig_T;
+
+typedef struct
+{
+ unchar guard_bit_num; /**< [8 bits ]number of "guard time" bits */
+ unchar preamble_t1_num; /**< [8 bits ]number of "type 1 preamble" bits */
+ unchar preamble_t2_num; /**< [8 bits ]number of "type 2 preamble" bits */
+ unchar preamble_t3_pat; /**< [8 bits ]pattern of "type 3 preamble" */
+ unchar t3_O4_preamble; /**< [8 bits ]number of "type3 preamble bytes"(used in O3 ,O4) */
+ unchar t3_O5_preamble; /**< [8 bits ]number of "type3 preamble bytes"(used in O5) */
+ unchar extend_burst_mode; /**< [1 bit ]switch to extend burst mode */
+ unchar oper_ranged_st; /**< [2 bits]ranged state: 2'b10: O3,O4 state. 2'b11: O5 sate */
+ unchar dis_scramble; /**< [1 bit ]set to disable scrambler */
+#if defined(CONFIG_USE_MT7520_ASIC) || defined(CONFIG_USE_A60928)
+ /** [1 bit ] set to release the limit of extend burst mode overhead, used
+ * for ZTE C300
+ */
+ unchar extb_length_sel;
+#endif /* CONFIG_USE_MT7520_ASIC */
+ uint mask; /**< use ENUM_GponPreb_T to define it */
+}PHY_GponPreb_T, *PPHY_GponPreb_T;
+
+typedef struct xpon_phy_api_data_s {
+ int api_type ; /* [in ] get or set API */
+ unsigned int cmd_id ; /* [in ] API command ID */
+ int ret ; /* [out] API return value */
+
+ union{ /* [in|out] */
+ int * data ;
+ PHY_Mode_Cfg_t * phy_mode_cfg ;
+ PHY_Trans_Status_t * trasn_status ;
+ PHY_FecCount_T * rx_fec_cnt ;
+ PHY_FrameCount_T * rx_frame_cnt ;
+ PHY_GPON_Delimiter_Guard_t * delimiter_guard ;
+ PHY_TransConfig_T * tx_trans_cfg ;
+ PHY_GponPreb_T * gpon_preamble ;
+ PHY_GponPreb_T * gpon_ex_preamble;
+ ENUM_PhyTrafficStatus * traffic_status ;
+ void * raw ;
+ };
+}xpon_phy_api_data_t;
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif // __LINUX_ENCT_HOOK_PON_PHY_H
+
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_ppe.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_ppe.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,413 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_PPE_H_
+#define _ECNT_HOOK_PPE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef enum {
+ PPE_METER_BYTE_MODE = 0 ,
+ PPE_METER_PKT_MODE
+} PPE_Meter_Mode_t;
+
+typedef enum {
+ PPE_METER_TRTCM_DISABLE = 0 ,
+ PPE_METER_TRTCM_ENABLE
+} PPE_Meter_Trtcm_t;
+
+typedef enum {
+ PPE_METER_TICKSEL_0 = 0 ,
+ PPE_METER_TICKSEL_1,
+ PPE_METER_TICKSEL_2,
+ PPE_METER_TICKSEL_3
+} PPE_Meter_TickSel_t;
+
+typedef enum {
+ PPE_METER_MAXBKSIZE_0 = 0 ,
+ PPE_METER_MAXBKSIZE_1,
+ PPE_METER_MAXBKSIZE_2,
+ PPE_METER_MAXBKSIZE_3
+} PPE_Meter_MaxBkSize_t;
+
+enum {
+ ECNT_DRIVER_PPE_API = 0,
+};
+
+typedef enum {
+ PPE_API_ID_CLEAN_FOE_TABLE = 0,
+ PPE_API_ID_DUMP_FOE_PKT,
+ PPE_API_ID_ADD_FOE_ENTRY,
+ PPE_API_ID_DLF_CLEAR_INVALID,
+ PPE_API_ID_DLF_IF_ENABLE,
+ PPE_API_ID_SET_METER_GROUP,
+ PPE_API_ID_GET_METER_GROUP,
+ PPE_API_ID_GET_SOFT_CNT,
+ PPE_API_ID_TRAFFIC_CLASSIFY,
+ PPE_API_ID_SET_TLS_CFG,
+ PPE_API_ID_SET_TLS_VID,
+ PPE_API_ID_SET_KA_CFG,
+ PPE_API_ID_GET_AGING_CFG,
+ PPE_API_ID_SET_AGING_CFG,
+ PPE_API_ID_MAX_NUM
+} PPE_HookFunctionID_t ;
+
+struct ecnt_ppe_tls_cfg
+{
+ unchar vid_idx[5];/*VID_IDX0~4*/
+ unchar sp;
+ unchar mode;
+ unchar range;
+ unchar vid_en;
+ unchar ma;
+ unchar cm;
+ unchar en;
+};
+
+typedef enum
+{
+ KA_DISABLE = 0,
+ KA_UNICAST = 1,
+ KA_DUPLICATE = 3,
+}KA_Cfg_Value_t;
+
+struct ecnt_ppe_data {
+ PPE_HookFunctionID_t function_id;
+ int retValue;
+ uint index;
+ union {
+ void *skb;
+ void *hwnat_tuple;
+ struct
+ {
+ unchar mode;
+ unchar trtcmEn;
+ unchar tickSel;
+ uint tokenRate;
+ unchar maxBkSize;
+ }meter_cfg;
+ uint cntByte;
+ struct ecnt_ppe_tls_cfg *ppe_tls_cfg;
+ struct
+ {
+ ushort vid_lo;
+ ushort vid_hi;
+ }tls_vid;
+ KA_Cfg_Value_t ka_cfg;
+ uint aging_cfg;
+ };
+};
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int PPE_API_CLEAN_FOE_TABLE(void)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_CLEAN_FOE_TABLE;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_DUMP_FOE_PKT(void *skb)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_DUMP_FOE_PKT;
+
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_ADD_FOE_ENTRY(void *hwnat_tuple)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_ADD_FOE_ENTRY;
+
+ in_data.hwnat_tuple = hwnat_tuple;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_DLF_CLEAR_INVALID_INDEX(void)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_DLF_CLEAR_INVALID ;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_DLF_IF_ENABLE(void)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_DLF_IF_ENABLE ;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+static inline int PPE_API_SET_METER_GROUP(uint index, PPE_Meter_MaxBkSize_t maxBkSize,uint tokenRate,PPE_Meter_TickSel_t tickSel,PPE_Meter_Trtcm_t trtcmEn,PPE_Meter_Mode_t mode)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_SET_METER_GROUP ;
+ in_data.index = index;
+ in_data.meter_cfg.mode = mode;
+ in_data.meter_cfg.trtcmEn = trtcmEn;
+ in_data.meter_cfg.tickSel = tickSel;
+ in_data.meter_cfg.tokenRate= tokenRate;
+ in_data.meter_cfg.maxBkSize = maxBkSize;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_GET_METER_GROUP(uint index, PPE_Meter_MaxBkSize_t* maxBkSize,uint* tokenRate,PPE_Meter_TickSel_t* tickSel,PPE_Meter_Trtcm_t* trtcmEn,PPE_Meter_Mode_t* mode)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_GET_METER_GROUP ;
+ in_data.index = index;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ *maxBkSize = in_data.meter_cfg.maxBkSize;
+ *tokenRate = in_data.meter_cfg.tokenRate;
+ *tickSel = in_data.meter_cfg.tickSel;
+ *trtcmEn = in_data.meter_cfg.trtcmEn;
+ *mode = in_data.meter_cfg.mode;
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_GET_SOFT_CNT(uint index, uint* cntByte)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_GET_SOFT_CNT ;
+ in_data.index = index;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ *cntByte = in_data.cntByte;
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_TRAFFIC_CLASSIFY(void *skb)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_TRAFFIC_CLASSIFY ;
+
+ in_data.skb = skb;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+static inline int PPE_API_SET_TLS_CFG(uint index,struct ecnt_ppe_tls_cfg *ppe_tls_cfg)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_SET_TLS_CFG ;
+
+ in_data.index = index;
+ in_data.ppe_tls_cfg = ppe_tls_cfg;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_SET_TLS_VID(uint index,ushort vid_lo,ushort vid_hi)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_SET_TLS_VID ;
+
+ in_data.index = index;
+ in_data.tls_vid.vid_lo = vid_lo;
+ in_data.tls_vid.vid_hi = vid_hi;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_SET_KA_CFG(KA_Cfg_Value_t ka_cfg)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_SET_KA_CFG;
+
+ in_data.ka_cfg = ka_cfg;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_GET_AGING_CFG(uint* aging_cfg)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_GET_AGING_CFG;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ *aging_cfg = in_data.aging_cfg;
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int PPE_API_SET_AGING_CFG(uint aging_cfg)
+{
+ struct ecnt_ppe_data in_data;
+ int ret = 0;
+
+ in_data.function_id = PPE_API_ID_SET_AGING_CFG;
+
+ in_data.aging_cfg = aging_cfg;
+
+ ret = __ECNT_HOOK(ECNT_PPE, ECNT_DRIVER_PPE_API, (struct ecnt_data *)&in_data);
+
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+#endif /* _ECNT_HOOK_PPE_H_ */
+
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_ptm.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_ptm.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,92 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_PTM_H_
+#define _ECNT_HOOK_PTM_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_ptm_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int PTM_AUTOBENCH_LOOPBACK(void)
+{
+ struct ECNT_PTM_DATA ptm_data;
+ int ret=0;
+
+ ptm_data.function_id = PTM_FUNCTION_AUTOBENCH_LOOPBACK;
+
+ ret = __ECNT_HOOK(ECNT_PTM, ECNT_DRIVER_API, (struct ecnt_data *)&ptm_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return ptm_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_PTM_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_ptm_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_ptm_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,76 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_PTM_TYPE_H_
+#define _ECNT_HOOK_PTM_TYPE_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ PTM_FUNCTION_AUTOBENCH_LOOPBACK = 0,
+ PTM_FUNCTION_MAX_NUM
+} PTM_HookFunctionID_t ;
+
+struct ECNT_PTM_DATA
+{
+ PTM_HookFunctionID_t function_id;
+ int retValue;
+};
+
+typedef int (*ptm_op_t)(struct ECNT_PTM_DATA *ptm_data);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,1239 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_QDMA_H_
+#define _ECNT_HOOK_QDMA_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_qdma_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int QDMA_API_INIT(unsigned int mainType, QDMA_InitCfg_t *pInitCfg) {
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_INIT;
+ in_data.qdma_private.pInitCfg = pInitCfg;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_DEINIT(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_DEINIT;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_TX_DMA_MODE(unsigned int mainType, QDMA_Mode_t txMode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_TX_DMA_MODE;
+ in_data.qdma_private.mode = txMode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_RX_DMA_MODE(unsigned int mainType, QDMA_Mode_t rxMode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_RX_DMA_MODE;
+ in_data.qdma_private.mode = rxMode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_LOOPBACK_MODE(unsigned int mainType, QDMA_LoopbackMode_t lbMode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_LOOPBACK_MODE;
+ in_data.qdma_private.lbMode = lbMode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_REGISTER_HOOKFUNC(unsigned int mainType, QDMA_CbType_t type, void *pCbFun){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_REGISTER;
+ in_data.qdma_private.qdma_reg_cb.type = type ;
+ in_data.qdma_private.qdma_reg_cb.pCbFun = pCbFun ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_UNREGISTER_HOOKFUNC(unsigned int mainType, QDMA_CbType_t type){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_UNREGISTER;
+ in_data.qdma_private.qdma_reg_cb.type = type ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_ENABLE_RXPKT_INT(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_ENABLE_RXPKT_INT;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_DISABLE_RXPKT_INT(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_DISABLE_RXPKT_INT;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_RECEIVE_PACKETS(unsigned int mainType, int maxPkts){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_RECEIVE_PACKETS;
+ in_data.qdma_private.cnt = maxPkts ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_TRANSMIT_PACKETS(unsigned int mainType, struct sk_buff *skb, uint msg0, uint msg1, struct port_info *pMacInfo){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_TRANSMIT_PACKETS;
+ in_data.qdma_private.qdma_transmit.skb = skb ;
+ in_data.qdma_private.qdma_transmit.msg0 = msg0 ;
+ in_data.qdma_private.qdma_transmit.msg1 = msg1 ;
+ in_data.qdma_private.qdma_transmit.pMacInfo = pMacInfo ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TX_QOS_WEIGHT(unsigned int mainType, QDMA_TxQosWeightType_t weightBase, QDMA_TxQosWeightScale_t weightScale){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_QOS_WEIGHT;
+ in_data.qdma_private.qdma_tx_qos.weightBase = weightBase ;
+ in_data.qdma_private.qdma_tx_qos.weightScale = weightScale ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_QOS_WEIGHT(unsigned int mainType, QDMA_TxQosWeightType_t *pWeightBase, QDMA_TxQosWeightScale_t *pWeightScale){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_QOS_WEIGHT;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ *pWeightBase = in_data.qdma_private.qdma_tx_qos.weightBase ;
+ *pWeightScale = in_data.qdma_private.qdma_tx_qos.weightScale ;
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TX_QOS(unsigned int mainType, QDMA_TxQosScheduler_T *pTxQos){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_QOS;
+ in_data.qdma_private.qdma_tx_qos.pTxQos = pTxQos ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_QOS(unsigned int mainType, QDMA_TxQosScheduler_T *pTxQos){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_QOS;
+ in_data.qdma_private.qdma_tx_qos.pTxQos = pTxQos ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_MAC_LIMIT_THRESHOLD(unsigned int mainType, uint chnlThrh, uint queueThrh){
+ struct ECNT_QDMA_Data in_data;
+ QDMA_MacLimitThrh_T mac_limit_thrh;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_MAC_LIMIT_THRESHOLD;
+ mac_limit_thrh.chnlThrh = chnlThrh ;
+ mac_limit_thrh.queueThrh = queueThrh ;
+ in_data.qdma_private.qdma_mac_limit_thrh = &mac_limit_thrh ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_TRANSMIT_PACKETS_WIFI_FAST(unsigned int mainType, struct sk_buff *skb, int ringIdx){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_TRANSMIT_PACKET_WIFI_FAST;
+ in_data.qdma_private.qdma_transmit_wifi_fast.skb = skb ;
+ in_data.qdma_private.qdma_transmit_wifi_fast.ringIdx = ringIdx;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXBUF_THRESHOLD(unsigned int mainType, QDMA_TxBufCtrl_T *pTxBufCtrl){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXBUF_THRESHOLD;
+ in_data.qdma_private.pQdmaTxBufCtrl = pTxBufCtrl ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXBUF_THRESHOLD(unsigned int mainType, QDMA_TxBufCtrl_T *pTxBufCtrl){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXBUF_THRESHOLD;
+ in_data.qdma_private.pQdmaTxBufCtrl = pTxBufCtrl ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_PREFETCH_MODE(unsigned int mainType, QDMA_Mode_t prefecthMode, QDMA_Mode_t overDragMode, uint overDragCnt){
+ struct ECNT_QDMA_Data in_data;
+ QDMA_PrefetchMode_T qdma_prefetch;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_PREFETCH_MODE;
+ qdma_prefetch.prefecthMode = prefecthMode ;
+ qdma_prefetch.overDragMode = overDragMode ;
+ qdma_prefetch.overDragCnt = overDragCnt ;
+ in_data.qdma_private.qdma_prefetch = &qdma_prefetch ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_PKTSIZE_OVERHEAD_EN(unsigned int mainType, QDMA_Mode_t mode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_EN;
+ in_data.qdma_private.mode = mode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline QDMA_Mode_t QDMA_API_GET_PKTSIZE_OVERHEAD_EN(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_EN;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_PKTSIZE_OVERHEAD_VALUE(unsigned int mainType, int cnt){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_VALUE;
+ in_data.qdma_private.cnt = cnt ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline uint QDMA_API_GET_PKTSIZE_OVERHEAD_VALUE(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_VALUE;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_LMGR_LOW_THRESHOLD(unsigned int mainType, uint lowThrh){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_LMGR_LOW_THRESHOLD;
+ in_data.qdma_private.threshold = lowThrh ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline uint QDMA_API_GET_LMGR_LOW_THRESHOLD(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_LMGR_LOW_THRESHOLD;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+#if defined(TCSUPPORT_CPU_EN7580)
+static inline int QDMA_API_SET_GENERAL_TRTCM_INIT(unsigned int mainType, GENERAL_TrtcmCfg_T *generalTrtcmCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_SET_TRTCM_CFG;
+ in_data.qdma_private.generalTrtcmCfgPtr = generalTrtcmCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_GENERAL_TRTCM_INIT(unsigned int mainType, GENERAL_TrtcmCfg_T *generalTrtcmCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_GET_TRTCM_CFG;
+ in_data.qdma_private.generalTrtcmCfgPtr = generalTrtcmCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_GENERAL_RATELIMIT_CFG(unsigned int mainType, GENERAL_TrtcmRatelimitCfg_T *generalTrtcmRatelimitCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_SET_RATELIMIT_MODE_CFG;
+ in_data.qdma_private.generalTrtcmRatelimitCfgPtr = generalTrtcmRatelimitCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_GENERAL_RATELIMIT_CFG(unsigned int mainType, GENERAL_TrtcmRatelimitCfg_T *generalTrtcmRatelimitCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_GET_RATELIMIT_MODE_CFG;
+ in_data.qdma_private.generalTrtcmRatelimitCfgPtr = generalTrtcmRatelimitCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_GENERAL_RATELIMIT_VALUE(unsigned int mainType, GENERAL_TrtcmRatelimitSet_T *generalTrtcmRatelimitSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_SET_RATELIMIT_MODE_VALUE;
+ in_data.qdma_private.generalTrtcmRatelimitSetPtr = generalTrtcmRatelimitSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_GENERAL_RATELIMIT_VALUE(unsigned int mainType, GENERAL_TrtcmRatelimitSet_T *generalTrtcmRatelimitSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_GET_RATELIMIT_MODE_VALUE;
+ in_data.qdma_private.generalTrtcmRatelimitSetPtr = generalTrtcmRatelimitSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_GENERAL_TRTCM_CFG(unsigned int mainType, GENERAL_TrtcmCbsPbsCfg_T *generalTrtcmCbsPbsCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_SET_TRTCM_MODE_CFG;
+ in_data.qdma_private.generalTrtcmCbsPbsCfgPtr = generalTrtcmCbsPbsCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_GENERAL_TRTCM_CFG(unsigned int mainType, GENERAL_TrtcmCbsPbsCfg_T *generalTrtcmCbsPbsCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_GET_TRTCM_MODE_CFG;
+ in_data.qdma_private.generalTrtcmCbsPbsCfgPtr = generalTrtcmCbsPbsCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_GENERAL_TRTCM_VALUE(unsigned int mainType, GENERAL_TrtcmCbsPbsSet_T *generalTrtcmCbsPbsSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_SET_TRTCM_MODE_VALUE;
+ in_data.qdma_private.generalTrtcmCbsPbsSetPtr = generalTrtcmCbsPbsSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_GENERAL_TRTCM_VALUE(unsigned int mainType, GENERAL_TrtcmCbsPbsSet_T *generalTrtcmCbsPbsSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GENERAL_GET_TRTCM_MODE_VALUE;
+ in_data.qdma_private.generalTrtcmCbsPbsSetPtr = generalTrtcmCbsPbsSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+#endif
+
+static inline int QDMA_API_SET_TX_DROP_EN(unsigned int mainType, QDMA_Mode_t mode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_DROP_EN;
+ in_data.qdma_private.mode = mode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_DROP_EN(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_DROP_EN;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TX_RATEMETER(unsigned int mainType, QDMA_TxRateMeter_T *txRateMeterPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_RATEMETER;
+ in_data.qdma_private.txRateMeterPtr = txRateMeterPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_RATEMETER(unsigned int mainType, QDMA_TxRateMeter_T *txRateMeterPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_RATEMETER;
+ in_data.qdma_private.txRateMeterPtr = txRateMeterPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_ENABLE_TX_RATELIMIT(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_ENABLE_TX_RATELIMIT;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+#if 0
+static inline int QDMA_API_SET_TX_RATELIMIT_CFG(unsigned int mainType, QDMA_TxRateLimitCfg_T *txRateLimitCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_RATELIMIT_CFG;
+ in_data.qdma_private.txRateLimitCfgPtr = txRateLimitCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_RATELIMIT_CFG(unsigned int mainType, QDMA_TxRateLimitCfg_T *txRateLimitCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_RATELIMIT_CFG;
+ in_data.qdma_private.txRateLimitCfgPtr = txRateLimitCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+#endif
+
+static inline int QDMA_API_SET_TX_RATELIMIT(unsigned int mainType, QDMA_TxRateLimitSet_T *txRateLimitSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_RATELIMIT;
+ in_data.qdma_private.txRateLimitSetPtr = txRateLimitSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_RATELIMIT(unsigned int mainType, QDMA_TxRateLimitGet_T *txRateLimitGetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_RATELIMIT;
+ in_data.qdma_private.txRateLimitGetPtr = txRateLimitGetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TX_DBA_REPORT(unsigned int mainType, QDMA_TxDbaReport_T *txDbaReportPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TX_DBAREPORT;
+ in_data.qdma_private.txDbaReportPtr = txDbaReportPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TX_DBA_REPORT(unsigned int mainType, QDMA_TxDbaReport_T *txDbaReportPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TX_DBAREPORT;
+ in_data.qdma_private.txDbaReportPtr = txDbaReportPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_RX_PROTECT_EN(unsigned int mainType, QDMA_Mode_t mode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_RX_PROTECT_EN;
+ in_data.qdma_private.mode = mode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_RX_PROTECT_EN(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_RX_PROTECT_EN;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_RX_LOW_THRESHOLD(unsigned int mainType, QDMA_RxLowThreshold_T *rxLowThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_RX_LOW_THRESHOLD;
+ in_data.qdma_private.rxLowThresholdPtr = rxLowThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_RX_LOW_THRESHOLD(unsigned int mainType, QDMA_RxLowThreshold_T *rxLowThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_RX_LOW_THRESHOLD;
+ in_data.qdma_private.rxLowThresholdPtr = rxLowThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_RX_RATELIMIT_EN(unsigned int mainType, QDMA_Mode_t mode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT_EN;
+ in_data.qdma_private.mode = mode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_RX_RATELIMIT_PKT_MODE(unsigned int mainType, QDMA_RxPktMode_t pktMode )
+{
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT_PKT_MODE;
+ in_data.qdma_private.pktMode = pktMode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_RX_RATELIMIT_CFG(unsigned int mainType, QDMA_RxRateLimitCfg_T *rxRateLimitCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_RX_RATELIMIT_CFG;
+ in_data.qdma_private.rxRateLimitCfgPtr = rxRateLimitCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_RX_RATELIMIT(unsigned int mainType, QDMA_RxRateLimitSet_T *rxRateLimitSetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT;
+ in_data.qdma_private.rxRateLimitSetPtr = rxRateLimitSetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_RX_RATELIMIT(unsigned int mainType, QDMA_RxRateLimitGet_T *rxRateLimitGetPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_RX_RATELIMIT;
+ in_data.qdma_private.rxRateLimitGetPtr = rxRateLimitGetPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_DEI_DROP_MODE(unsigned int mainType, QDMA_Mode_t deiDropMode){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_DEI_DROP_MODE;
+ in_data.qdma_private.mode = deiDropMode ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline QDMA_Mode_t QDMA_API_GET_TXQ_DEI_DROP_MODE(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_DEI_DROP_MODE;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_MODE(unsigned int mainType, QDMA_Mode_t dynCngstEn){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_MODE;
+ in_data.qdma_private.mode = dynCngstEn ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline QDMA_Mode_t QDMA_API_GET_TXQ_CNGST_MODE(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_MODE;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_DEI_THRESHOLD_SCALE(unsigned int mainType, QDMA_TxDynCngstDeiThrhScale_t deiThrhScale){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_DEI_THRH_SCALE;
+ in_data.qdma_private.threshold = deiThrhScale ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline QDMA_TxDynCngstDeiThrhScale_t QDMA_API_GET_TXQ_DEI_THRESHOLD_SCALE(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_DEI_THRH_SCALE;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_AUTO_CONFIG(unsigned int mainType, QDMA_txCngstCfg_t *pTxCngstCfg){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_AUTO_CONFIG;
+ in_data.qdma_private.pTxCngstCfg = pTxCngstCfg ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_AUTO_CONFIG(unsigned int mainType, QDMA_txCngstCfg_t *pTxCngstCfg){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_AUTO_CONFIG;
+ in_data.qdma_private.pTxCngstCfg = pTxCngstCfg ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_DYNAMIC_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstThrh_T *txqCngstThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_DYNAMIC_THRESHOLD;
+ in_data.qdma_private.dynCngstThrhPtr = txqCngstThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_DYNAMIC_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstThrh_T *txqCngstThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_THRESHOLD;
+ in_data.qdma_private.dynCngstThrhPtr = txqCngstThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_TOTAL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_TOTAL_THRESHOLD;
+ in_data.qdma_private.totalThrhPtr = totalThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_TOTAL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_TOTAL_THRESHOLD;
+ in_data.qdma_private.totalThrhPtr = totalThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_CHANNEL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_THRESHOLD;
+ in_data.qdma_private.chnlThrhPtr = chnlThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_CHANNEL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_THRESHOLD;
+ in_data.qdma_private.chnlThrhPtr = chnlThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_QUEUE_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_THRESHOLD;
+ in_data.qdma_private.queueThrhPtr = queueThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_QUEUE_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_THRESHOLD;
+ in_data.qdma_private.queueThrhPtr = queueThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_QUEUE_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_NONBLOCKING;
+ in_data.qdma_private.txqCngstQueueCfgPtr = txqCngstQueueCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_QUEUE_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_NONBLOCKING;
+ in_data.qdma_private.txqCngstQueueCfgPtr = txqCngstQueueCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_CNGST_CHANNEL_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_NONBLOCKING;
+ in_data.qdma_private.txqCngstChannelCfgPtr = txqCngstChannelCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_CHANNEL_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_NONBLOCKING;
+ in_data.qdma_private.txqCngstChannelCfgPtr = txqCngstChannelCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+
+static inline int QDMA_API_SET_TXQ_PEEKRATE_PARAMS(unsigned int mainType, QDMA_PeekRateCfg_t *peekrateCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_PEEKRATE_PARAMS;
+ in_data.qdma_private.peekrateCfgPtr = peekrateCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_PEEKRATE_PARAMS(unsigned int mainType, QDMA_PeekRateCfg_t *peekrateCfgPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_PEEKRATE_PARAMS;
+ in_data.qdma_private.peekrateCfgPtr = peekrateCfgPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD(unsigned int mainType, QDMA_TxQStaticNormalCfg_T *normThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD;
+ in_data.qdma_private.normThrhPtr = normThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD(unsigned int mainType, QDMA_TxQStaticDeiCfg_T *deiThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD;
+ in_data.qdma_private.deiThrhPtr = deiThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_DYNAMIC_INFO(unsigned int mainType, QDMA_TxQDynamicCngstInfo_T *allThrhPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_INFO;
+ in_data.qdma_private.dynCfgPtr = allThrhPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_TXQ_CNGST_STATIC_INFO(unsigned int mainType, QDMA_TxQStaticCngstInfo_T *staticInfoPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_STATIC_INFO;
+ in_data.qdma_private.staticCfgPtr = staticInfoPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_VIRTUAL_CHANNEL_MODE(unsigned int mainType, QDMA_VirtualChannelMode_T *virChnlModePtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_MODE;
+ in_data.qdma_private.virChnlModePtr = virChnlModePtr;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_VIRTUAL_CHANNEL_MODE(unsigned int mainType, QDMA_VirtualChannelMode_T *virChnlModePtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_MODE;
+ in_data.qdma_private.virChnlModePtr = virChnlModePtr;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_VIRTUAL_CHANNEL_QOS(unsigned int mainType, QDMA_VirtualChannelQoS_T *virChnlQoSPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_QOS;
+ in_data.qdma_private.virChnlQoSPtr = virChnlQoSPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_GET_VIRTUAL_CHANNEL_QOS(unsigned int mainType, QDMA_VirtualChannelQoS_T *virChnlQoSPtr){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+
+ in_data.function_id = QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_QOS;
+ in_data.qdma_private.virChnlQoSPtr = virChnlQoSPtr ;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_READ_VIP_INFO(unsigned int mainType){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+ in_data.function_id = QDMA_FUNCTION_READ_VIP_INFO;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_SET_CNTR_CHANNEL(unsigned int mainType,unsigned channel_id){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+ in_data.function_id = QDMA_FUNCTION_SET_CHANNEL;
+ in_data.qdma_private.channel = channel_id;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int QDMA_API_DUMP_CNTR_CHANNEL(unsigned int mainType,unsigned channel_id){
+ struct ECNT_QDMA_Data in_data;
+ int ret=0;
+ in_data.function_id = QDMA_FUNCTION_DUMP_CHANNEL;
+ in_data.qdma_private.channel = channel_id;
+ ret = __ECNT_HOOK(mainType, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return in_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_QDMA_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma_7510_20.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma_7510_20.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,98 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_QDMA_7510_20_H_
+#define _ECNT_HOOK_QDMA_7510_20_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+struct qdma_green_drop_ctrl_s{
+ int hw_qos_flag;
+};
+
+enum ecnt_qdma_7510_20_subtype{
+ ECNT_QDMA_SET_QOS_FLAG,
+};
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int ECNT_QDMA_GREEN_DROP_CTRL_HOOK(int subtype, int pHwQosFlag){
+ struct qdma_green_drop_ctrl_s data;
+ int ret;
+
+ if (!list_empty(&ecnt_hooks[ECNT_QDMA_7510_20][subtype])){
+
+ data.hw_qos_flag = pHwQosFlag;
+
+ ret = __ECNT_HOOK(ECNT_QDMA_7510_20, subtype, (struct ecnt_data *)&data);
+
+ return ret;
+ }
+ return ECNT_CONTINUE;
+}
+
+#endif /* _ECNT_HOOK_QDMA_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_qdma_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,1080 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_QDMA_TYPE_H_
+#define _ECNT_HOOK_QDMA_TYPE_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+#ifdef QDMA_LAN
+#define ECNT_QDMA ECNT_QDMA_LAN
+#define storm_ctrl_shrehold storm_ctrl_shrehold_lan
+#define qdma_fwd_timer qdma_lan_fwd_timer
+#else
+#define ECNT_QDMA ECNT_QDMA_WAN
+#define storm_ctrl_shrehold storm_ctrl_shrehold_wan
+#define qdma_fwd_timer qdma_wan_fwd_timer
+#endif
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#define CONFIG_QDMA_QUEUE 8
+#define ECNT_DRIVER_API 0
+
+#ifdef TCSUPPORT_CPU_EN7580
+#define TX_RING_NUM (8)
+#else /*TCSUPPORT_CPU_EN7527 || TCSUPPORT_CPU_EN7516 and before */
+#define TX_RING_NUM (2)
+#endif
+
+#if defined(TCSUPPORT_CPU_EN7580) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+#define RX_RING_NUM (16)
+#else
+#define RX_RING_NUM (2)
+#endif
+
+#define RING_IDX_0 (0)
+#define RING_IDX_1 (1)
+#define RING_IDX_2 (2)
+#define RING_IDX_3 (3)
+#define RING_IDX_4 (4)
+#define RING_IDX_5 (5)
+#define RING_IDX_6 (6)
+#define RING_IDX_7 (7)
+#define RING_IDX_8 (8)
+#define RING_IDX_9 (9)
+#define RING_IDX_10 (10)
+#define RING_IDX_11 (11)
+#define RING_IDX_12 (12)
+#define RING_IDX_13 (13)
+#define RING_IDX_14 (14)
+#define RING_IDX_15 (15)
+
+#if defined(TCSUPPORT_CPU_EN7580) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+#define QDMA_INT_IDX_1 (1)
+#define QDMA_INT_IDX_2 (2)
+#define QDMA_INT_IDX_3 (3)
+#define QDMA_INT_IDX_4 (4)
+#endif
+
+#define QDMA_BUSY_TIMER_MAX 60 /* 60s */
+#define QDMA_REG_POLLING_MAX 60 /* 60s */
+
+#define PER_CHNL_TICKSEL_NUM (2)
+
+#define QDMA_MAGIC (0xAB)
+#define IO_QDMA_CMD_OPT _IOW(QDMA_MAGIC, 0, struct ECNT_QDMA_Data *)
+#if 0
+/* Interface IO Command */
+#define IO_QDMA_SET_QOS_CONFIG _IOW(QDMA_MAGIC, 0x01, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_QOS_VALUE _IOW(QDMA_MAGIC, 0x02, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_QOS _IOR(QDMA_MAGIC, 0x02, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_TX_RATELIMIT_VALUE _IOW(QDMA_MAGIC, 0x10, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_TX_RATELIMIT_VALUE _IOR(QDMA_MAGIC, 0x10, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_RX_RATELIMIT_VALUE _IOW(QDMA_MAGIC, 0x18, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_RX_RATELIMIT_VALUE _IOR(QDMA_MAGIC, 0x18, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_RX_RATELIMIT_EN _IOW(QDMA_MAGIC, 0x19, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_RX_RATELIMIT_PKT_MODE _IOW(QDMA_MAGIC, 0x1A, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_VIRTUAL_CHANNEL_CONFIG _IOW(QDMA_MAGIC, 0x20, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_VIRTUAL_CHANNEL_VALUE _IOW(QDMA_MAGIC, 0x21, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_VIRTUAL_CHANNEL _IOR(QDMA_MAGIC, 0x21, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_RX_LOW_THRESHOLD _IOW(QDMA_MAGIC, 0x24, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_RX_LOW_THRESHOLD _IOR(QDMA_MAGIC, 0x24, struct ECNT_QDMA_Data *)
+#define IO_QDMA_SET_DEI_DROP_EN _IOW(QDMA_MAGIC, 0x25, struct ECNT_QDMA_Data *)
+#define IO_QDMA_GET_DEI_DROP_EN _IOR(QDMA_MAGIC, 0x25, struct ECNT_QDMA_Data *)
+
+#define IO_QDMA_CLEAR_CPU_COUNTER _IOW(QDMA_MAGIC, 0xC1, struct ECNT_QDMA_Data *)
+#define IO_QDMA_DUMP_CPU_COUNTER _IOW(QDMA_MAGIC, 0xC2, struct ECNT_QDMA_Data *)
+#define IO_QDMA_DUMP_REGISTER _IOW(QDMA_MAGIC, 0xC3, struct ECNT_QDMA_Data *)
+#define IO_QDMA_DUMP_DESCRIPTOR _IOW(QDMA_MAGIC, 0xC4, struct ECNT_QDMA_Data *)
+#define IO_QDMA_DUMP_IRQ _IOW(QDMA_MAGIC, 0xC5, struct ECNT_QDMA_Data *)
+#define IO_QDMA_DUMP_HWFWD _IOW(QDMA_MAGIC, 0xC6, struct ECNT_QDMA_Data *)
+#endif
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+struct port_info;
+
+/***************************************
+ enum definition
+***************************************/
+typedef enum {
+ DBG_ERR ,
+ DBG_ST ,
+ DBG_WARN ,
+ DBG_MSG ,
+ DBG_LEVEL_MAX
+} QDMA_DebugLevel_t ;
+
+typedef enum {
+ QDMA_CALLBACK_RX_PACKET,
+#if defined(TCSUPPORT_FTTDP_V2) && defined(QDMA_LAN)
+ QDMA_CALLBACK_RX_PACKET_PTM_LAN,
+#endif
+ QDMA_CALLBACK_EVENT_HANDLER,
+ QDMA_CALLBACK_RECYCLE_PACKET,
+ QDMA_CALLBACK_GPON_MAC_HANDLER,
+ QDMA_CALLBACK_EPON_MAC_HANDLER,
+ QDMA_CALLBACK_XPON_PHY_HANDLER,
+ /*multi-INTs extend*/
+ QDMA_CALLBACK_INT2_RX_PACKET,
+ QDMA_CALLBACK_INT2_EVENT_HANDLER,
+ QDMA_CALLBACK_INT3_RX_PACKET,
+ QDMA_CALLBACK_INT3_EVENT_HANDLER,
+ QDMA_CALLBACK_INT4_RX_PACKET,
+ QDMA_CALLBACK_INT4_EVENT_HANDLER,
+} QDMA_CbType_t ;
+
+typedef enum {
+ QDMA_EVENT_RECV_PKTS = 0 ,
+ QDMA_EVENT_NO_RX_BUFFER ,
+ QDMA_EVENT_TX_CROWDED
+} QDMA_EventType_t ;
+
+typedef enum {
+ QDMA_LOOPBACK_DISABLE = 0 ,
+ QDMA_LOOPBACK_QDMA ,
+ QDMA_LOOPBACK_UMAC
+} QDMA_LoopbackMode_t ;
+
+typedef enum {
+ QDMA_TX_POLLING = 0 ,
+ QDMA_TX_INTERRUPT ,
+} QDMA_RecycleMode_t ;
+
+typedef enum {
+ QDMA_RX_POLLING = 0 ,
+ QDMA_RX_INTERRUPT ,
+ QDMA_RX_NAPI
+} QDMA_RecvMode_t ;
+
+typedef enum {
+ QDMA_DISABLE = 0 ,
+ QDMA_ENABLE
+} QDMA_Mode_t ;
+
+typedef enum {
+ QDMA_WAN_TYPE_GPON = 0,
+ QDMA_WAN_TYPE_EPON,
+ QDMA_WAN_TYPE_PTM,
+ QDMA_WAN_TYPE_SAR
+} QDMA_WanType_t ;
+
+typedef enum {
+ QDMA_TXQOS_WEIGHT_BY_PACKET = 0,
+ QDMA_TXQOS_WEIGHT_BY_BYTE,
+ QDMA_TXQOS_WEIGHT_MAX,
+} QDMA_TxQosWeightType_t ;
+
+typedef enum {
+ QDMA_TXQOS_WEIGHT_SCALE_64B = 0,
+ QDMA_TXQOS_WEIGHT_SCALE_16B,
+ QDMA_TXQOS_WEIGHT_SCALE_MAX,
+} QDMA_TxQosWeightScale_t ;
+
+typedef enum {
+ QDMA_TXQOS_TYPE_WRR = 0,
+ QDMA_TXQOS_TYPE_SP,
+ QDMA_TXQOS_TYPE_SPWRR7,
+ QDMA_TXQOS_TYPE_SPWRR6,
+ QDMA_TXQOS_TYPE_SPWRR5,
+ QDMA_TXQOS_TYPE_SPWRR4,
+ QDMA_TXQOS_TYPE_SPWRR3,
+ QDMA_TXQOS_TYPE_SPWRR2,
+ QDMA_TXQOS_TYPE_NUMS
+} QDMA_TxQosType_t ;
+
+typedef enum {
+ QDMA_VCHNL_TXQOS_TYPE_WRR = 0,
+ QDMA_VCHNL_TXQOS_TYPE_SP,
+ QDMA_VCHNL_TXQOS_TYPE_SPWRR3,
+ QDMA_VCHNL_TXQOS_TYPE_SPWRR2,
+ QDMA_VCHNL_TXQOS_TYPE_NUMS
+} QDMA_VirChnlQosType_t ;
+
+typedef enum {
+ QDMA_TRTCM_SCALE_1BYTE = 0,
+ QDMA_TRTCM_SCALE_2BYTE,
+ QDMA_TRTCM_SCALE_4BYTE,
+ QDMA_TRTCM_SCALE_8BYTE,
+ QDMA_TRTCM_SCALE_16BYTE,
+ QDMA_TRTCM_SCALE_32BYTE,
+ QDMA_TRTCM_SCALE_64BYTE,
+ QDMA_TRTCM_SCALE_128BYTE,
+ QDMA_TRTCM_SCALE_256BYTE,
+ QDMA_TRTCM_SCALE_512BYTE,
+ QDMA_TRTCM_SCALE_1KBYTE,
+ QDMA_TRTCM_SCALE_2KBYTE,
+ QDMA_TRTCM_SCALE_4KBYTE,
+ QDMA_TRTCM_SCALE_8KBYTE,
+ QDMA_TRTCM_SCALE_16KBYTE,
+ QDMA_TRTCM_SCALE_32KBYTE,
+ QDMA_TRTCM_SCALE_MAX_ITEMS
+} QDMA_TrtcmScale_t ;
+
+typedef enum {
+ QDMA_TRTCM_PARAM_CIR = 0,
+ QDMA_TRTCM_PARAM_CBS,
+ QDMA_TRTCM_PARAM_PIR,
+ QDMA_TRTCM_PARAM_PBS
+} QDMA_TrtcmParamType_t ;
+
+typedef enum {
+ QDMA_EPON_REPORT_WO_THRESHOLD = 0,
+ QDMA_EPON_REPORT_ONE_THRESHOLD,
+ QDMA_EPON_REPORT_TWO_THRESHOLD,
+ QDMA_EPON_REPORT_THREE_THRESHOLD
+} QDMA_EponReportMode_t ;
+
+typedef enum {
+ QDMA_TXQUEUE_SCALE_2_DSCP = 0,
+ QDMA_TXQUEUE_SCALE_4_DSCP,
+ QDMA_TXQUEUE_SCALE_8_DSCP,
+ QDMA_TXQUEUE_SCALE_16_DSCP,
+ QDMA_TXQUEUE_SCALE_ITEMS
+} QDMA_TxQueueThresholdScale_t ;
+
+typedef enum {
+ PSE_PCP_TYPE_CDM_TX = 0,
+ PSE_PCP_TYPE_CDM_RX,
+ PSE_PCP_TYPE_GDM_RX
+} PSE_PcpType_t ;
+
+typedef enum {
+ PSE_PCP_MODE_DISABLE = 0,
+ PSE_PCP_MODE_8B0D = 1,
+ PSE_PCP_MODE_7B1D = 2,
+ PSE_PCP_MODE_6B2D = 4,
+ PSE_PCP_MODE_5B3D = 8
+} PSE_PcpMode_t ;
+
+typedef enum {
+ QDMA_RX_RATE_LIMIT_BY_BYTE = 0,
+ QDMA_RX_RATE_LIMIT_BY_PACKET,
+} QDMA_RxPktMode_t ;
+
+#ifndef TCSUPPORT_CPU_EN7580
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+typedef enum {
+ QDMA_Rx_Rate_Limit_PIR = 2,
+ QDMA_Rx_Rate_Limit_PBS
+} QDMA_RxRateLimitType_t ;
+#else
+typedef enum {
+ QDMA_Rx_Rate_Limit_PIR = 0,
+ QDMA_Rx_Rate_Limit_PBS
+} QDMA_RxRateLimitType_t ;
+#endif
+#endif
+
+typedef enum {
+ QDMA_Tx_Rate_Limit_CIR = 0,
+ QDMA_Tx_Rate_Limit_CBS,
+ QDMA_Tx_Rate_Limit_PIR,
+ QDMA_Tx_Rate_Limit_PBS
+} QDMA_TxRateLimitType_t ;
+
+#if defined(TCSUPPORT_CPU_EN7580) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+typedef enum {
+ QDMA_INT1 = 1,
+ QDMA_INT2 ,
+ QDMA_INT3 ,
+ QDMA_INT4 ,
+ QDMA_INT_NUM,
+} QDMA_InterruptNo_t ;
+
+typedef enum {
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+ QDMA_INT_ENABLE0 = 0 ,
+#endif
+ QDMA_INT_ENABLE1 = 1 ,
+ QDMA_INT_ENABLE2 = 2 ,
+#if defined(TCSUPPORT_CPU_EN7580)
+ QDMA_INT_ENABLE_NUM = 2 ,
+#else
+ QDMA_INT_ENABLE_NUM = 3 ,
+#endif
+} QDMA_IntEnable_t ;
+#endif
+
+/*-----TCSUPPORT_CPU_EN7580 start------*/
+#if defined(TCSUPPORT_CPU_EN7580)
+typedef enum{
+ QDMA_DRAM_MODE=0,
+ QDMA_SRAM_MODE=1,
+}QDMA_RamMode_T;
+
+/*Ingress Ratelimit or Egress Ratelimit Module*/
+typedef enum{
+ INGRESS_TRTCM=0,
+ EGRESS_TRTCM
+} GENERAL_TrtcmModuleType_T;
+
+/*ratelimit module is enable nor not*/
+typedef enum{
+ TRTCM_DISABLE=0,
+ TRTCM_ENABLE
+} GENERAL_Trtcm_T;
+
+/*ratelimit module works as trtcm mode or ratelimit mode*/
+typedef enum{
+ TRTCM_RATELIMIT_MODE=0,
+ TRTCM_MODE
+} GENERAL_TrtcmMode_T;
+
+/*each meter enable or not*/
+typedef enum{
+ GENERAL_METER_DISABLE=0,
+ GENERAL_METER_ENABLE
+} GENERAL_TrtcmMeter_T;
+
+typedef enum{
+ TRTCM_FAST_TICK = 0,
+ TRTCM_SLOW_TICK
+} GENERAL_TrtcmTickSel_T;
+
+typedef enum{
+ TRTCM_MISC = 0, //include meter_en, ppsmode, ticksel
+ TRTCM_TOKEN_RATE,
+ TRTCM_BUCKETSIZE_SHIFT,
+ TRTCM_BUCKET_COUNTER
+} GENERAL_TrtcmParaType_T;
+
+typedef enum{
+ TRTCM_COMMIT_MODE = 0,
+ TRTCM_PEAK_MODE,
+ TRTCM_RATE_TYPE_MAX
+} GENERAL_TrtcmRateType_T ;
+
+typedef enum{
+ TRTCM_BYTE_MODE = 0, //pps
+ TRTCM_PACKET_MODE //bps
+}GENERAL_TrtcmPktMode_T;
+/*-----TCSUPPORT_CPU_EN7580 end------*/
+#endif
+
+typedef enum {
+ QDMA_TX_RATE_METER_TIME_DIVISOR_1 = 0,
+ QDMA_TX_RATE_METER_TIME_DIVISOR_2,
+ QDMA_TX_RATE_METER_TIME_DIVISOR_4,
+ QDMA_TX_RATE_METER_TIME_DIVISOR_8,
+ QDMA_TX_RATE_METER_TIME_DIVISOR_ITEMS
+} QDMA_TxRateMeterTimeDivisor_t ;
+
+typedef enum {
+ QDMA_DYNCNGST_DEI_THRH_SCALE_1_2 = 0,
+ QDMA_DYNCNGST_DEI_THRH_SCALE_1_4,
+ QDMA_DYNCNGST_DEI_THRH_SCALE_1_8,
+ QDMA_DYNCNGST_DEI_THRH_SCALE_1_16,
+ QDMA_DYNCNGST_DEI_THRH_SCALE_ITEMS
+} QDMA_TxDynCngstDeiThrhScale_t ;
+
+typedef enum {
+ QDMA_QUEUE_NONBLOCKING = 0 ,
+ QDMA_QUEUE_NORMAL
+} QDMA_TxQCngstQueueMode_t ;
+
+typedef enum {
+ QDMA_CHANNEL_NONBLOCKING = 0 ,
+ QDMA_CHANNEL_NORMAL
+} QDMA_TxQCngstChannelMode_t ;
+
+typedef enum {
+ QDMA_DBG_CNTR_SRC_CPU_TX = 0,
+ QDMA_DBG_CNTR_SRC_FWD_TX,
+ QDMA_DBG_CNTR_SRC_CPU_RX,
+ QDMA_DBG_CNTR_SRC_FWD_RX,
+ QDMA_DBG_CNTR_SRC_ITEMS
+} QDMA_DbgCntrSrc_t ;
+
+typedef enum {
+ QDMA_TXCNGST_DYNAMIC_NORMAL = 0,
+ QDMA_TXCNGST_DYNAMIC_PEAKRATE_MARGIN,
+ QDMA_TXCNGST_STATIC,
+} QDMA_TxCngstMode_t ;
+
+typedef enum {
+ QDMA_TXCNGST_PEEKRATE_MARGIN_0 = 0,
+ QDMA_TXCNGST_PEEKRATE_MARGIN_25,
+ QDMA_TXCNGST_PEEKRATE_MARGIN_50,
+ QDMA_TXCNGST_PEEKRATE_MARGIN_100,
+} QDMA_PeekRateMargin_t ;
+
+
+typedef enum {
+ QDMA_DBG_MEM_XS_MEMSEL_LOCAL_DSCP_INFO = 0,
+ QDMA_DBG_MEM_XS_MEMSEL_LOCAL_QUEUE_INFO,
+ QDMA_DBG_MEM_XS_MEMSEL_QOS_WEIGHT_COUNTER,
+ QDMA_DBG_MEM_XS_MEMSEL_LOCAL_DMA_INDEX,
+ QDMA_DBG_MEM_XS_MEMSEL_BUFFER_MONITOR,
+ QDMA_DBG_MEM_XS_MEMSEL_RATELIMIT_PARAM,
+ QDMA_DBG_MEM_XS_MEMSEL_VCHNL_QOS_WEIGHT,
+ QDMA_DBG_MEM_XS_MEMSEL_MAX,
+} QDMA_DbgMemXsMemSel_t ;
+
+typedef enum {
+ MAC_TYPE_ETHER = 0,
+ MAC_TYPE_XPON,
+ MAC_TYPE_PTM,
+ MAC_TYPE_ATM,
+} MAC_Type_t ;
+
+typedef enum {
+ QDMA_VIRTUAL_CHANNEL_16Queue = 0,
+ QDMA_VIRTUAL_CHANNEL_32Queue,
+} QDMA_VirChnlMapMode_t ;
+
+typedef enum {
+ QDMA_DBG_CNTR_CHNL_TXCPU = 0,
+ QDMA_DBG_CNTR_CHNL_TXFWD1,
+ QDMA_DBG_CNTR_CHNL_TXFWD2,
+ QDMA_DBG_CNTR_QUEUE,
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+ QDMA_DBG_CNTR_RING,
+#endif
+ QDMA_DBG_CNTR_MAX,
+} QDMA_DbgCntrChnlGroup_t ;
+/* Warning: same sequence with function array 'qdma_operation' */
+typedef enum {
+ /* init */
+ QDMA_FUNCTION_INIT = 0,
+ QDMA_FUNCTION_DEINIT ,
+ QDMA_FUNCTION_TX_DMA_MODE ,
+ QDMA_FUNCTION_RX_DMA_MODE ,
+ QDMA_FUNCTION_LOOPBACK_MODE ,
+ QDMA_FUNCTION_REGISTER ,
+ QDMA_FUNCTION_UNREGISTER ,
+ QDMA_FUNCTION_ENABLE_RXPKT_INT ,
+ QDMA_FUNCTION_DISABLE_RXPKT_INT ,
+ QDMA_FUNCTION_RECEIVE_PACKETS ,
+ QDMA_FUNCTION_TRANSMIT_PACKETS ,
+ QDMA_FUNCTION_SET_TX_QOS_WEIGHT ,
+ QDMA_FUNCTION_GET_TX_QOS_WEIGHT ,
+ QDMA_FUNCTION_SET_TX_QOS ,
+ QDMA_FUNCTION_GET_TX_QOS ,
+ QDMA_FUNCTION_SET_MAC_LIMIT_THRESHOLD ,
+ QDMA_FUNCTION_GET_MAC_LIMIT_THRESHOLD ,
+ QDMA_FUNCTION_TRANSMIT_PACKET_WIFI_FAST,
+
+ /* other */
+ QDMA_FUNCTION_SET_TXBUF_THRESHOLD ,
+ QDMA_FUNCTION_GET_TXBUF_THRESHOLD ,
+ QDMA_FUNCTION_SET_PREFETCH_MODE ,
+ QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_EN ,
+ QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_EN ,
+ QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_VALUE ,
+ QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_VALUE ,
+ QDMA_FUNCTION_SET_LMGR_LOW_THRESHOLD ,
+ QDMA_FUNCTION_GET_LMGR_LOW_THRESHOLD ,
+ QDMA_FUNCTION_GET_LMGR_STATUS ,
+
+ /* test */
+ QDMA_FUNCTION_SET_DEBUG_LEVEL,
+ QDMA_FUNCTION_DUMP_DMA_BUSY,
+ QDMA_FUNCTION_DUMP_REG_POLLING,
+ QDMA_FUNCTION_SET_FORCE_RX_RING1,
+
+#if defined(TCSUPPORT_CPU_EN7580)
+ QDMA_FUNCTION_GENERAL_SET_TRTCM_CFG,
+ QDMA_FUNCTION_GENERAL_GET_TRTCM_CFG,
+
+ QDMA_FUNCTION_GENERAL_SET_RATELIMIT_MODE_CFG,
+ QDMA_FUNCTION_GENERAL_GET_RATELIMIT_MODE_CFG,
+ QDMA_FUNCTION_GENERAL_SET_RATELIMIT_MODE_VALUE,
+ QDMA_FUNCTION_GENERAL_GET_RATELIMIT_MODE_VALUE,
+
+ QDMA_FUNCTION_GENERAL_SET_TRTCM_MODE_CFG,
+ QDMA_FUNCTION_GENERAL_GET_TRTCM_MODE_CFG,
+ QDMA_FUNCTION_GENERAL_SET_TRTCM_MODE_VALUE,
+ QDMA_FUNCTION_GENERAL_GET_TRTCM_MODE_VALUE,
+#endif
+
+ /* tx rate limit */
+ QDMA_FUNCTION_SET_TX_DROP_EN, //default enable
+ QDMA_FUNCTION_GET_TX_DROP_EN, //default enable
+ QDMA_FUNCTION_SET_TX_RATEMETER,
+ QDMA_FUNCTION_GET_TX_RATEMETER,
+ QDMA_FUNCTION_ENABLE_TX_RATELIMIT,
+ QDMA_FUNCTION_SET_TX_RATELIMIT_CFG,
+ QDMA_FUNCTION_GET_TX_RATELIMIT_CFG,
+ QDMA_FUNCTION_SET_TX_RATELIMIT,
+ QDMA_FUNCTION_GET_TX_RATELIMIT,
+ QDMA_FUNCTION_SET_TX_DBAREPORT,
+ QDMA_FUNCTION_GET_TX_DBAREPORT,
+
+ /* rx rate limit */
+ QDMA_FUNCTION_SET_RX_PROTECT_EN, //default enable
+ QDMA_FUNCTION_GET_RX_PROTECT_EN, //default enable
+ QDMA_FUNCTION_SET_RX_LOW_THRESHOLD,
+ QDMA_FUNCTION_GET_RX_LOW_THRESHOLD,
+ QDMA_FUNCTION_SET_RX_RATELIMIT_EN,
+ QDMA_FUNCTION_SET_RX_RATELIMIT_PKT_MODE,
+ QDMA_FUNCTION_GET_RX_RATELIMIT_CFG,
+ QDMA_FUNCTION_SET_RX_RATELIMIT,
+ QDMA_FUNCTION_GET_RX_RATELIMIT,
+
+ /* txq cngst */
+ QDMA_FUNCTION_SET_TXQ_DEI_DROP_MODE,
+ QDMA_FUNCTION_GET_TXQ_DEI_DROP_MODE,
+ QDMA_FUNCTION_SET_TXQ_CNGST_MODE,
+ QDMA_FUNCTION_GET_TXQ_CNGST_MODE,
+ QDMA_FUNCTION_SET_TXQ_DEI_THRH_SCALE,
+ QDMA_FUNCTION_GET_TXQ_DEI_THRH_SCALE,
+ QDMA_FUNCTION_SET_TXQ_CNGST_AUTO_CONFIG,
+ QDMA_FUNCTION_GET_TXQ_CNGST_AUTO_CONFIG,
+ QDMA_FUNCTION_SET_TXQ_CNGST_DYNAMIC_THRESHOLD,
+ QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_THRESHOLD,
+ QDMA_FUNCTION_SET_TXQ_CNGST_TOTAL_THRESHOLD,
+ QDMA_FUNCTION_GET_TXQ_CNGST_TOTAL_THRESHOLD,
+ QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_THRESHOLD,
+ QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_THRESHOLD,
+ QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_THRESHOLD,
+ QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_THRESHOLD,
+ QDMA_FUNCTION_SET_PEEKRATE_PARAMS,
+ QDMA_FUNCTION_GET_PEEKRATE_PARAMS,
+ QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD,
+ QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD,
+ QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_INFO,
+ QDMA_FUNCTION_GET_TXQ_CNGST_STATIC_INFO,
+ QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_NONBLOCKING,
+ QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_NONBLOCKING,
+ QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_NONBLOCKING,
+ QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_NONBLOCKING,
+
+ /* virtual channel */
+ QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_MODE,
+ QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_MODE,
+ QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_QOS,
+ QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_QOS,
+
+ /* dbg cntr */
+ QDMA_FUNCTION_SET_DBGCNTR_CHANNEL,
+ QDMA_FUNCTION_SET_DBGCNTR_QUEUE,
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+ QDMA_FUNCTION_SET_DBGCNTR_RING,
+#endif
+ QDMA_FUNCTION_CLEAR_DBGCNTR,
+ QDMA_FUNCTION_DUMP_DBGCNTR,
+ QDMA_FUNCTION_SET_CHANNEL,
+ QDMA_FUNCTION_DUMP_CHANNEL,
+
+ /* dump */
+ QDMA_FUNCTION_DUMP_TX_QOS,
+ QDMA_FUNCTION_DUMP_VIRTUAL_CHANNEL_QOS,
+ QDMA_FUNCTION_DUMP_TX_RATELIMIT,
+ QDMA_FUNCTION_DUMP_RX_RATELIMIT,
+ QDMA_FUNCTION_DUMP_TX_DBA_REPORT,
+ QDMA_FUNCTION_DUMP_TXQ_CNGST,
+
+ QDMA_FUNCTION_CLEAR_CPU_COUNTER,
+ QDMA_FUNCTION_DUMP_CPU_COUNTER,
+ QDMA_FUNCTION_DUMP_REGISTER_INFO,
+ QDMA_FUNCTION_DUMP_DESCRIPTOR_INFO,
+ QDMA_FUNCTION_DUMP_IRQ_INFO,
+ QDMA_FUNCTION_DUMP_HWFWD_INFO,
+ QDMA_FUNCTION_DUMP_INFO_ALL,
+
+ /*other function*/
+ QDMA_FUNCTION_READ_VIP_INFO,
+
+ QDMA_FUNCTION_MAX_NUM ,
+} QDMA_HookFunction_t ;
+
+typedef struct {
+ int ring_idx;
+} QDMA_RxInfo_T ;
+
+typedef int (*qdma_callback_recv_packet_with_info_t)(void *, uint, struct sk_buff *, uint, QDMA_RxInfo_T *) ;
+typedef int (*qdma_callback_recv_packet_t)(void *, uint, struct sk_buff *, uint) ;
+typedef int (*qdma_callback_event_handler_t)(QDMA_EventType_t) ;
+typedef void (*qdma_callback_int_handler_t)(void) ;
+typedef int (*qdma_callback_recycle_packet_t)(struct sk_buff *, uint) ;
+
+typedef struct {
+ MAC_Type_t macType ;
+ unchar txRecycleThrh ;
+ qdma_callback_recv_packet_t cbRecvPkts ;
+#if defined(TCSUPPORT_FTTDP_V2) && defined(QDMA_LAN)
+ qdma_callback_recv_packet_t cbRecvPktsPtmLan ;
+#endif
+ qdma_callback_event_handler_t cbEventHandler ;
+ qdma_callback_recycle_packet_t cbRecyclePkts ;
+ qdma_callback_int_handler_t cbGponMacHandler ;
+ qdma_callback_int_handler_t cbEponMacHandler ;
+ qdma_callback_int_handler_t cbXponPhyHandler ;
+
+ qdma_callback_recv_packet_with_info_t cbInt2RecvPkts ;
+ qdma_callback_event_handler_t cbInt2EventHandler ;
+ qdma_callback_recv_packet_with_info_t cbInt3RecvPkts ;
+ qdma_callback_event_handler_t cbInt3EventHandler ;
+ qdma_callback_recv_packet_with_info_t cbInt4RecvPkts ;
+ qdma_callback_event_handler_t cbInt4EventHandler ;
+} QDMA_InitCfg_t ;
+
+typedef struct {
+ QDMA_Mode_t mode ;
+ unchar chnThreshold ;
+ unchar totalThreshold ;
+} QDMA_TxBufCtrl_T ;
+
+typedef struct {
+ unchar channel ;
+ QDMA_TxQosType_t qosType ;
+ struct {
+ unchar weight ; //0 for don't care
+ } queue[CONFIG_QDMA_QUEUE] ;
+} QDMA_TxQosScheduler_T ;
+
+typedef struct {
+ unchar channel ;
+ ushort cir ;
+ ushort cbs ;
+ ushort pir ;
+ ushort pbs ;
+} QDMA_TrtcmParam_T ;
+
+typedef struct {
+ QDMA_TxQueueThresholdScale_t maxScale ;
+ QDMA_TxQueueThresholdScale_t minScale ;
+} QDMA_TxQueueCongestScale_T ;
+
+typedef struct {
+ unchar deiScale;
+ struct {
+ unchar staticDeiThreshold ;
+ unchar staticNormalThreshold ;
+ } queue[CONFIG_QDMA_QUEUE] ;
+} QDMA_TxQueueCongestCfg_T ;
+
+typedef struct {
+ uint normalThrh[CONFIG_QDMA_QUEUE] ; // unit is kByte.
+} QDMA_TxQStaticNormalCfg_T ;
+
+typedef struct {
+ uint deiThrh[CONFIG_QDMA_QUEUE] ; // unit is kByte.
+} QDMA_TxQStaticDeiCfg_T ;
+
+typedef struct {
+ unchar cntrIdx ;
+ unchar cntrEn ;
+ QDMA_DbgCntrSrc_t cntrSrc ;
+ unchar isChnlAll ;
+ unchar isQueueAll ;
+ unchar isDscpRingAll ;
+ unchar chnlIdx ;
+ unchar queueIdx ;
+ unchar dscpRingIdx ;
+ uint cntrVal ;
+} QDMA_DBG_CNTR_T ;
+
+#if defined(TCSUPPORT_CPU_EN7580)
+/*-----TCSUPPORT_CPU_EN7580 start------*/
+typedef struct {
+ GENERAL_TrtcmModuleType_T trtcmModule; /*set Ingress or Egress*/
+ GENERAL_Trtcm_T trtcmEn; /*enable trtcm function or not*/
+ GENERAL_TrtcmMode_T trtcmMode; /*work as trtcm mode or ratelimit mode*/
+ uint trtcmSlowTickRatio; /*slow_tick = slow_tickratio X fast_tick*/
+ uint trtcmFastTick; /*unit: us*/
+}GENERAL_TrtcmCfg_T ;
+
+/*for set ratelimit mode cfg*/
+typedef struct {
+ GENERAL_TrtcmModuleType_T trtcmModule;
+ unchar Index; /*Egress: means channel ; Ingress: means ring or meter*/
+ GENERAL_TrtcmMeter_T MeterEn;
+ GENERAL_TrtcmPktMode_T PktMode; /*Packet mode or Byte mode*/
+ GENERAL_TrtcmTickSel_T TickSel; /*slow tick or fast tick*/
+}GENERAL_TrtcmRatelimitCfg_T ;
+
+/*for set ratelimit mode value*/
+typedef struct {
+ GENERAL_TrtcmModuleType_T trtcmModule;
+ unchar Index;
+ uint RateLimitValue; /* for ratelimit mode */
+ uint BucketSize;
+}GENERAL_TrtcmRatelimitSet_T ;
+
+/*for set trtcm modet cfg*/
+typedef struct {
+ GENERAL_TrtcmModuleType_T trtcmModule;
+ unchar Index; /*Egress: means channel ; Ingress: means ring or meter*/
+ GENERAL_TrtcmMeter_T CbsMeterEn;
+ GENERAL_TrtcmPktMode_T CbsPktMode; /*Packet mode or Byte mode*/
+ GENERAL_TrtcmTickSel_T CbsTickSel; /*slow tick or fast tick*/
+ GENERAL_TrtcmMeter_T PbsMeterEn;
+ GENERAL_TrtcmPktMode_T PbsPktMode; /*Packet mode or Byte mode*/
+ GENERAL_TrtcmTickSel_T PbsTickSel; /*slow tick or fast tick*/
+}GENERAL_TrtcmCbsPbsCfg_T ;
+
+/*for set trtcm modet value*/
+typedef struct {
+ GENERAL_TrtcmModuleType_T trtcmModule;
+ unchar Index;
+ uint CbsParamValue; /* for trtcm mode, CIR value */
+ uint PbsParamValue; /* for trtcm mode, PIR value */
+ uint CbsBucketSize;
+ uint PbsBucketSize;
+}GENERAL_TrtcmCbsPbsSet_T ;
+/*-----TCSUPPORT_CPU_EN7580 end------*/
+#endif
+typedef struct {
+ ushort txRateMeterTimeSlice ;
+ QDMA_TxRateMeterTimeDivisor_t txRateMeterTimeDivisor ;
+} QDMA_TxRateMeter_T ;
+
+typedef struct {
+ ushort txRateLimitUnit[PER_CHNL_TICKSEL_NUM] ; //unit is kbps, scope is (1~65)
+ QDMA_TrtcmScale_t txRateLimitBucketScale ;
+} QDMA_TxRateLimitCfg_T ;
+
+typedef struct {
+ unchar chnlIdx ;
+ unchar chnlRateLimitEn ;
+ uint rateLimitValue ; //unit is kbps
+} QDMA_TxRateLimitSet_T ;
+
+typedef struct {
+ unchar chnlIdx ;
+ unchar chnlRateLimitEn ;
+ uint rateLimitValue ; //unit is kbps
+ uint pbsParamValue ;
+} QDMA_TxRateLimitGet_T ;
+
+typedef struct {
+ unchar tsid ;
+ ushort cirParamValue ; //The unit of CIR, PIR is 64Kbps
+ ushort cbsParamValue ; //The default trtcm scale of CBS,PBS is 128 Byte
+ ushort pirParamValue ;
+ ushort pbsParamValue ;
+} QDMA_TxQueueTrtcm_T ;
+
+typedef struct {
+ unchar channel ;
+ unchar queue ;
+ unchar thrIdx ;
+ ushort value ;
+} QDMA_EponQueueThreshold_T ;
+
+typedef struct {
+ unchar channel ;
+ uint cirParamValue ; // unit is Kbps
+ uint cbsParamValue ; // unit is Byte
+ uint pirParamValue ; // unit is Kbps
+ uint pbsParamValue ; // unit is Byte
+} QDMA_TxDbaReport_T ;
+
+#if defined(TCSUPPORT_CPU_EN7580) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+typedef struct {
+ uint rxRingLowThrh[RX_RING_NUM] ;
+
+} QDMA_RxLowThreshold_T ;
+#else
+
+typedef struct {
+ uint rxRing0LowThrh ;
+ uint rxRing1LowThrh ;
+} QDMA_RxLowThreshold_T ;
+#endif
+typedef struct {
+ unchar rxRateLimitEn ;
+ QDMA_RxPktMode_t rxRateLimitPktMode ;
+ ushort rxRateLimitUnit ; //unit is kbps, scope is (1~65), or pps, scope is (16~1000)
+ QDMA_TrtcmScale_t rxRateLimitBucketScale ;
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+ ushort rxRateLimitUnit0;
+#endif
+} QDMA_RxRateLimitCfg_T ;
+
+typedef struct {
+ unchar ringIdx ;
+ uint rateLimitValue ; //unit is kbps or pps
+} QDMA_RxRateLimitSet_T ;
+
+typedef struct {
+ unchar ringIdx ;
+ uint rateLimitValue ; //unit is kbps or pps
+ uint pbsParamValue ;
+#if defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+ unchar rxBindTicker;
+#endif
+} QDMA_RxRateLimitGet_T ;
+
+typedef struct {
+ unchar cngstDropEn ;
+ unchar cngstDeiDropEn ;
+ unchar dynCngstEn ;
+#if defined(TCSUPPORT_CPU_EN7580) || defined(TCSUPPORT_CPU_EN7527) || defined(TCSUPPORT_CPU_EN7516)
+#if defined(TCSUPPORT_CPU_EN7580)
+ unchar cngstWredEn ;
+#endif
+ unchar dynCngstMaxThrhTxEn[TX_RING_NUM] ;
+ unchar dynCngstMinThrhTxEn[TX_RING_NUM];
+#else
+ unchar dynCngstMaxThrhTx1En ;
+ unchar dynCngstMinThrhTx1En ;
+ unchar dynCngstMaxThrhTx0En ;
+ unchar dynCngstMinThrhTx0En ;
+#endif
+ unchar dynCngstModeConfigTrigEn ;
+ unchar dynCngstModePacketTrigEn ;
+ unchar dynCngstModeTimeTrigEn ;
+ ushort dynCngstTicksel ;
+ QDMA_TxDynCngstDeiThrhScale_t dynCngstDeiThrhScale ;
+} QDMA_TxQCngstCfg_T ;
+
+typedef struct {
+ uint dynCngstTotalMaxThrh ; // unit is kByte.
+ uint dynCngstTotalMinThrh ; // unit is kByte.
+} QDMA_TxQDynCngstTotalThrh_T ;
+
+typedef struct {
+ uint dynCngstChnlMaxThrh ; // unit is kByte.
+ uint dynCngstChnlMinThrh ; // unit is kByte.
+} QDMA_TxQDynCngstChnlThrh_T ;
+
+typedef struct {
+ uint dynCngstQueueMaxThrh ; // unit is kByte.
+ uint dynCngstQueueMinThrh ; // unit is kByte.
+} QDMA_TxQDynCngstQueueThrh_T ;
+
+typedef struct {
+ ushort dynCngstTotalMaxThrh ;
+ ushort dynCngstTotalMinThrh ;
+ ushort dynCngstChnlMaxThrh ;
+ ushort dynCngstChnlMinThrh ;
+ ushort dynCngstQueueMaxThrh ;
+ ushort dynCngstQueueMinThrh ;
+#if defined(TCSUPPORT_CPU_EN7580)
+ ushort dynCngstMinDscpThrh ;
+#endif
+} QDMA_TxQDynCngstThrh_T ;
+
+typedef struct {
+ unchar queueIdx ;
+ ushort staticDeiThrh ;
+ ushort staticNormalThrh ;
+} QDMA_TxQStaticCngstThrh_T ;
+
+typedef struct {
+ QDMA_TxCngstMode_t txCngstMode ;
+ QDMA_PeekRateMargin_t peekRateMargin ; // only used in peek rate mode
+ uint peekRateDuration ; // only used in peek rate mode, unit is ms
+} QDMA_txCngstCfg_t ;
+
+typedef struct {
+ QDMA_Mode_t peekRateEn ;
+ QDMA_PeekRateMargin_t peekRateMargin ; // only used in peek rate mode
+ uint peekRateDuration ; //only used in peek rate mode, unit is ms
+} QDMA_PeekRateCfg_t ;
+
+typedef struct {
+ QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
+ QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr;
+ QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr;
+ QDMA_PeekRateCfg_t *peekrateCfgPtr;
+} QDMA_TxQDynamicCngstInfo_T ;
+
+typedef struct {
+ QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
+ QDMA_TxQStaticNormalCfg_T *normThrhPtr;
+ QDMA_TxQStaticDeiCfg_T *deiThrhPtr;
+} QDMA_TxQStaticCngstInfo_T ;
+
+typedef struct {
+ QDMA_TxQCngstQueueMode_t queueMode;
+ uint queue;
+} QDMA_TxQCngstQueueCfg_T ;
+
+typedef struct {
+ QDMA_TxQCngstChannelMode_t channelMode;
+ uint channel;
+} QDMA_TxQCngstChannelCfg_T ;
+
+typedef struct {
+ QDMA_Mode_t virChnlEn;
+ QDMA_VirChnlMapMode_t virChnlMode;
+} QDMA_VirtualChannelMode_T ;
+
+typedef struct {
+ unchar phyChnl ;
+ QDMA_VirChnlQosType_t qosType ;
+ struct {
+ unchar weight ; //0 for don't care
+ } queue[4] ;
+} QDMA_VirtualChannelQoS_T ;
+
+typedef struct {
+ QDMA_CbType_t type;
+ void *pCbFun;
+} QDMA_RegisterCallBack_T ;
+
+typedef struct {
+ struct sk_buff *skb;
+ uint msg0;
+ uint msg1;
+ struct port_info *pMacInfo;
+} QDMA_Transmit_T ;
+
+typedef struct {
+ struct sk_buff *skb;
+ int ringIdx;
+} QDMA_Transmit_Wifi_Fast_T ;
+
+typedef struct {
+ QDMA_TxQosWeightType_t weightBase;
+ QDMA_TxQosWeightScale_t weightScale;
+ QDMA_TxQosScheduler_T *pTxQos;
+} QDMA_TxQos_T;
+
+typedef struct {
+ QDMA_Mode_t prefecthMode;
+ QDMA_Mode_t overDragMode;
+ uint overDragCnt;
+} QDMA_PrefetchMode_T;
+
+typedef struct {
+ uint freeLmgr;
+ uint usedLmgr;
+ uint usedBuf;
+} QDMA_LmgrStatus_T;
+
+typedef struct {
+ uint dbgLevel;
+ uint busyDuration; /* unit is second */
+ uint regOffset;
+ uint pollingDuration; /* unit is second */
+ uint forceEn;
+} QDMA_OldProc_T;
+
+typedef struct {
+ uint chnlThrh;
+ uint queueThrh;
+} QDMA_MacLimitThrh_T;
+
+struct ECNT_QDMA_Data {
+ QDMA_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+
+ union {
+ QDMA_InitCfg_t *pInitCfg;
+ QDMA_Mode_t mode;
+ QDMA_LoopbackMode_t lbMode;
+ QDMA_DbgCntrChnlGroup_t cntrSrc;
+ int cnt;
+ int channel;
+ uint threshold;
+
+ QDMA_RegisterCallBack_T qdma_reg_cb;
+ QDMA_Transmit_T qdma_transmit;
+ QDMA_Transmit_Wifi_Fast_T qdma_transmit_wifi_fast;
+ QDMA_TxQos_T qdma_tx_qos;
+ QDMA_TxBufCtrl_T *pQdmaTxBufCtrl;
+ QDMA_PrefetchMode_T *qdma_prefetch;
+ QDMA_LmgrStatus_T *qdma_lmgr_status;
+ QDMA_OldProc_T qdma_old_proc;
+ QDMA_MacLimitThrh_T *qdma_mac_limit_thrh;
+
+#if defined(TCSUPPORT_CPU_EN7580)
+ /*-----TCSUPPORT_CPU_EN7580 start------*/
+ GENERAL_TrtcmCfg_T *generalTrtcmCfgPtr;
+ GENERAL_TrtcmRatelimitCfg_T *generalTrtcmRatelimitCfgPtr;
+ GENERAL_TrtcmRatelimitSet_T *generalTrtcmRatelimitSetPtr;
+ GENERAL_TrtcmCbsPbsCfg_T *generalTrtcmCbsPbsCfgPtr;
+ GENERAL_TrtcmCbsPbsSet_T *generalTrtcmCbsPbsSetPtr;
+ /*-----TCSUPPORT_CPU_EN7580 endt------*/
+#endif
+ QDMA_TxRateMeter_T *txRateMeterPtr;
+ QDMA_TxRateLimitCfg_T *txRateLimitCfgPtr;
+ QDMA_TxRateLimitSet_T *txRateLimitSetPtr;
+ QDMA_TxRateLimitGet_T *txRateLimitGetPtr;
+ QDMA_TxDbaReport_T *txDbaReportPtr;
+
+ QDMA_RxLowThreshold_T *rxLowThresholdPtr;
+ QDMA_RxPktMode_t pktMode;
+ QDMA_RxRateLimitCfg_T *rxRateLimitCfgPtr;
+ QDMA_RxRateLimitSet_T *rxRateLimitSetPtr;
+ QDMA_RxRateLimitGet_T *rxRateLimitGetPtr;
+
+ QDMA_txCngstCfg_t *pTxCngstCfg;
+ QDMA_TxQDynCngstThrh_T *dynCngstThrhPtr;
+ QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
+ QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr;
+ QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr;
+ QDMA_PeekRateCfg_t *peekrateCfgPtr;
+
+ QDMA_TxQStaticDeiCfg_T *deiThrhPtr;
+ QDMA_TxQStaticNormalCfg_T *normThrhPtr;
+
+ QDMA_TxQDynamicCngstInfo_T *dynCfgPtr;
+ QDMA_TxQStaticCngstInfo_T *staticCfgPtr;
+
+ QDMA_VirtualChannelMode_T *virChnlModePtr;
+ QDMA_VirtualChannelQoS_T *virChnlQoSPtr;
+ QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr;
+ QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr;
+ } qdma_private;
+};
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_QDMA_TYPE_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_sar.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_sar.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,92 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_SAR_H_
+#define _ECNT_HOOK_SAR_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_sar_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int SAR_AUTOBENCH_LOOPBACK(void)
+{
+ struct ECNT_SAR_DATA sar_data;
+ int ret=0;
+
+ sar_data.function_id = SAR_FUNCTION_AUTOBENCH_LOOPBACK;
+
+ ret = __ECNT_HOOK(ECNT_SAR, ECNT_DRIVER_API, (struct ecnt_data *)&sar_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return sar_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_SAR_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_sar_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_sar_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,76 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_SAR_TYPE_H_
+#define _ECNT_HOOK_SAR_TYPE_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ SAR_FUNCTION_AUTOBENCH_LOOPBACK = 0,
+ SAR_FUNCTION_MAX_NUM
+} SAR_HookFunctionID_t ;
+
+struct ECNT_SAR_DATA
+{
+ SAR_HookFunctionID_t function_id;
+ int retValue;
+};
+
+typedef int (*sar_op_t)(struct ECNT_SAR_DATA *sar_data);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_smux.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_smux.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,97 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_SMUX_H
+#define __LINUX_ENCT_HOOK_SMUX_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/jiffies.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+#define SMUX_NO_API (-1)
+#define SMUX_ECNT_HOOK_PROCESS_FAIL (1)
+#define SMUX_ECNT_HOOK_PROCESS_SUCESS (0)
+
+
+/* SET function */
+
+
+/* Get Function */
+
+#define SMUX_GET_DEV_BY_SRC_MAC 0x8001 /* */
+
+
+
+enum {
+ SMUX_API_TYPE_GET = 0,
+ SMUX_API_TYPE_SET,
+};
+
+enum ECNT_SMUX_SUBTYPE {
+ ECNT_SMUX_API,
+};
+
+
+typedef struct{
+ struct sk_buff *skb;
+ struct net_device **dev;
+} SMUX_Dev_By_Mac;
+
+typedef struct smux_api_data_s {
+ int api_type ; /* [in ] get or set API */
+ unsigned int cmd_id ; /* [in ] API command ID */
+ int ret ; /* [out] API return value */
+
+ union{ /* [in|out] */
+ SMUX_Dev_By_Mac * smux_dev_by_mac ;
+ void * raw ;
+ };
+}smux_api_data_t;
+
+
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_spi_nand.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_spi_nand.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,92 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_SPI_NAND_H_
+#define _ECNT_HOOK_SPI_NAND_H_
+
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+#include "ecnt_hook_spi_nand_type.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+static inline int AUTOBENCH_FLASH_API_CHECK(void)
+{
+ struct ECNT_SPI_NAND_DATA spi_nand_data;
+ int ret=0;
+
+ spi_nand_data.function_id = AUTOBENCH_FLASH_FUNCTION_CHECK;
+
+ ret = __ECNT_HOOK(ECNT_SPI_NAND, ECNT_DRIVER_API, (struct ecnt_data *)&spi_nand_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return spi_nand_data.retValue;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+# I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif /* _ECNT_HOOK_SPI_NAND_H_ */
+
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_spi_nand_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_spi_nand_type.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,76 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_SPI_NAND_TYPE_H_
+#define _ECNT_HOOK_SPI_NAND_TYPE_H_
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include "ecnt_hook.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+typedef enum {
+ AUTOBENCH_FLASH_FUNCTION_CHECK = 0,
+ SPI_NAND_FUNCTION_MAX_NUM
+} SPI_NAND_HookFunctionID_t ;
+
+struct ECNT_SPI_NAND_DATA
+{
+ SPI_NAND_HookFunctionID_t function_id;
+ int retValue;
+};
+
+typedef int (*spi_nand_op_t)(struct ECNT_SPI_NAND_DATA *spi_nand_data);
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_traffic_classify.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_traffic_classify.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,156 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_TRAFFIC_CLASSIFY_H
+#define __LINUX_ENCT_TRAFFIC_CLASSIFY_H
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define PORT_MASK (0xF)
+#define PORT_MASK_OFFSET (0x1)
+
+#define QUQUE_REMARK_MASK (0x7)
+#define QUEUE_REMARK_OFFSET (0xB)
+
+#define LIMIT_GROUP_REMARK_MASK (0x3F)
+#define LIMIT_GROUP_REMARK_OFFSET (0x5)
+
+#define PRIO_REMARK_MASK (0x7)
+
+#define QUQUE_REMARK_EXIST_OFFSET (0x10)
+#define PRIO_REMARK_EXIST_OFFSET (0xF)
+
+#define QUEUE_MARK_MASK (0x0F)
+#define QUEUE_FROM_LAN (0x0E)
+#define QUEUE_FROM_WAN (0x0F)
+#define QUEUE_MAX_NUM (0x8)
+#define QUEUE_DSCP_MASK (0x7F0)
+#define QUEUE_DSCP_MASK_OFFSET (0x4)
+
+#define SKB_FROM_LAN_MASK (0x1)
+#define SKB_FROM_LAN_OFFSET (0x11)
+
+#define BRIDGE_WAN_NAME "nas0"
+
+typedef enum _port_list_
+{
+ E_ETH_PORT_0 = 0,
+ E_ETH_PORT_1 = 1,
+ E_ETH_PORT_2 = 2,
+ E_ETH_PORT_3 = 3,
+
+ E_WIFI_PORT_0 = 4,
+ E_WIFI_PORT_1 = 5,
+ E_WIFI_PORT_2 = 6,
+ E_WIFI_PORT_3 = 7,
+
+ E_WAN_PORT = 15,
+ E_MAX_PORT = 16,
+}e_port_list;
+
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+enum ecnt_traffic_classify_subtype
+{
+ /*Traffic classify control based on Filed*/
+ ECNT_TCCBF_SUBTYPE = 0,
+};
+
+enum _ecn_forward_drop_
+{
+ E_ECNT_FORWARD = 0,
+ E_ECNT_DROP = 1 ,
+};
+
+
+typedef struct _traffic_classify_data_s{
+ struct sk_buff *skb;
+ unsigned char *forward;
+
+}traffic_classify_data_t;
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+static inline int ECNT_TRAFFIC_CLASSIFY_HOOK (struct sk_buff *_skb, unsigned char *_f)
+{
+ int ret;
+ traffic_classify_data_t data;
+ memset(&data, 0, sizeof(data));
+
+ if(NULL != _skb && NULL != _f)
+ {
+ data.skb = _skb;
+ data.forward = _f;
+ }
+ else
+ {
+ return 0;
+ }
+
+ ret = __ECNT_HOOK(ECNT_TRAFFIC_CLASSIFY, ECNT_TCCBF_SUBTYPE, (struct ecnt_data *)&data);
+
+ return ret;
+}
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_tso.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_tso.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,136 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+#ifndef __LINUX_ENCT_HOOK_TSO_H
+#define __LINUX_ENCT_HOOK_TSO_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+
+#define ECNT_DRIVER_API 0
+
+/* Warning: same sequence with function array 'qdma_operation' */
+typedef enum {
+ TSO_FUNCTION_MATCH_V4,
+ TSO_FUNCTION_MATCH_V6,
+ TSO_FUNCTION_MAX_NUM,
+} TSO_HookFunction_t ;
+
+/*
+ tsoRuleEntry_t is used for white list entry.
+*/
+typedef struct{
+ __be16 sport;
+ __be16 dport;
+ __be32 saddr_v4;
+ __be32 daddr_v4;
+}tsoRuleEntry_v4_t;
+
+typedef struct{
+ __be16 sport;
+ __be16 dport;
+ struct in6_addr saddr_v6;
+ struct in6_addr daddr_v6;
+}tsoRuleEntry_v6_t;
+
+typedef struct ECNT_TSO_Data {
+ TSO_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+
+ union {
+ tsoRuleEntry_v4_t *entry_v4;
+ tsoRuleEntry_v6_t *entry_v6;
+ } tso_private;
+}ECNT_TSO_Data_s;
+
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int TSO_SET_SKB_MARK_V4(tsoRuleEntry_v4_t *entry){
+ ECNT_TSO_Data_s in_data = {0};
+ int ret=0;
+
+ in_data.function_id = TSO_FUNCTION_MATCH_V4;
+ in_data.tso_private.entry_v4 = entry;
+ in_data.retValue = -1;
+
+ //printk("before function_id:%d, in_data.retValue:%d\n", in_data.retValue);
+ ret = __ECNT_HOOK(ECNT_TSO_LAN, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret == ECNT_CONTINUE){
+ //printk("function_id:%d, in_data.retValue:%d\n", in_data.retValue);
+ return in_data.retValue;
+ }
+ else{
+ return ECNT_HOOK_ERROR;
+ }
+}
+
+static inline int TSO_SET_SKB_MARK_V6(tsoRuleEntry_v4_t *entry){
+ ECNT_TSO_Data_s in_data = {0};
+ int ret=0;
+
+ in_data.function_id = TSO_FUNCTION_MATCH_V6;
+ in_data.tso_private.entry_v6 = entry;
+ ret = __ECNT_HOOK(ECNT_TSO_LAN, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+
+ return ret;
+}
+
+
+
+#endif
Index: linux-3.18.21/include/ecnt_hook/ecnt_hook_voip.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/ecnt_hook/ecnt_hook_voip.h 2018-02-05 13:21:17.000000000 +0800
@@ -0,0 +1,122 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef _ECNT_HOOK_VOIP_H
+#define _ECNT_HOOK_VOIP_H
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+
+#include <ecnt_hook/ecnt_hook.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define ECNT_DRIVER_API 0
+#define ECNT_DRIVER_SLT 1
+
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+typedef enum {
+ VOIP_FUNCTION_SLIC_SHUTDOWN,
+ VOIP_FUNCTION_POWER_SAVE_MODE,
+ VOIP_FUNCTION_MAX_NUM,
+} VOIP_HookFunction_t ;
+
+typedef struct ECNT_VOIP_Data {
+ VOIP_HookFunction_t function_id; /* need put at first item */
+ int retValue;
+
+ union {
+ int enable;
+ };
+}ECNT_VOIP_Data_s;
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline int VOIP_API_SLIC_SHUTDOWN(void){
+ ECNT_VOIP_Data_s in_data;
+ int ret=0;
+ in_data.function_id = VOIP_FUNCTION_SLIC_SHUTDOWN;
+ ret = __ECNT_HOOK(ECNT_VOIP, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int VOIP_API_POWER_SAVE_MODE(int enable){
+ ECNT_VOIP_Data_s in_data;
+ int ret=0;
+ in_data.function_id = VOIP_FUNCTION_POWER_SAVE_MODE;
+ in_data.enable = enable;
+
+ ret = __ECNT_HOOK(ECNT_VOIP, ECNT_DRIVER_API, (struct ecnt_data *)&in_data);
+ if(ret != ECNT_HOOK_ERROR)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_HOOK_ERROR;
+}
+
+static inline int VOIP_API_PCM_SLT(void){
+ ECNT_VOIP_Data_s in_data;
+ int ret=0;
+ ret = __ECNT_HOOK(ECNT_VOIP, ECNT_DRIVER_SLT, (struct ecnt_data *)&in_data);
+ return ret;
+}
+
+
+#endif /*_TCETHERVOIP_HOOK_H*/
Index: linux-3.18.21/include/linux/decompress/unlzma_mm.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/decompress/unlzma_mm.h 2018-02-05 13:21:18.000000000 +0800
@@ -0,0 +1,16 @@
+#ifndef UNLZMA_MM_H
+#define UNLZMA_MM_H
+
+#ifdef STATIC
+/* Code active when included from pre-boot environment: */
+#define INIT
+#elif defined(CONFIG_DECOMPRESS_LZMA_NEEDED)
+/* Make it available to non initramfs/initrd code */
+#define INIT
+#include <linux/module.h>
+#else
+/* Compile for initramfs/initrd code only */
+#define INIT __init
+#endif
+
+#endif
Index: linux-3.18.21/include/linux/ecnt_skbuff.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/ecnt_skbuff.h 2018-02-05 14:20:40.000000000 +0800
@@ -0,0 +1,282 @@
+#ifndef _LINUX_ECNT_SKBUFF_H
+#define _LINUX_ECNT_SKBUFF_H
+
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+
+#include <linux/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/net.h>
+#include <linux/textsearch.h>
+#include <net/checksum.h>
+#include <linux/rcupdate.h>
+#include <linux/hrtimer.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
+#include <linux/sched.h>
+#include <net/flow_keys.h>
+#if defined(TCSUPPORT_RA_HWNAT)
+#include <linux/foe_hook.h>
+#endif
+
+
+extern void skbmgr_4k_pool_init(void);
+extern void skbmgr_pool_init(void);
+
+extern atomic_t g_used_skb_num;
+extern int g_max_skb_num;
+extern int peak_skb_num;
+
+#ifdef TCSUPPORT_DOWNSTREAM_QOS
+#define VOIP_RX_PORT_NUM 4
+#endif
+
+#define MODE_HGU 0
+#define MODE_SFU 1
+
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+//only one skbmgr pool for every CPU. shnwind 20101215.
+#define SKBMGR_SINGLE_QUEUE
+
+#ifdef SKBMGR_SINGLE_QUEUE
+#define SKBMGR_QUEUE_ID 0
+#define SKBMGR_MAX_QUEUE 1
+#else
+#define SKBMGR_QUEUE_ID smp_processor_id()
+#define SKBMGR_MAX_QUEUE NR_CPUS
+#endif
+
+extern atomic_t skbmgr_alloc_no;
+extern atomic_t skbmgr_4k_alloc_no;
+
+#define SKBMGR_INDICATION 1
+#define SKBMGR_4K_INDICATION 2
+#endif
+
+
+#define MULTICAST_MASK 0xf0000
+#define SKBUF_COPYTOLAN (1 << 26)
+
+#if 1//def CONFIG_QOS
+#define QOS_DEFAULT_MARK 0x00000008
+#define QOS_FILTER_MARK 0x000000f0
+#define QOS_HH_PRIORITY 0x00000010
+#define QOS_NODROP_MARK 0x00000001
+/* no queue marked packets to default queue */
+#define QOS_PRIORITY_DEFAULT 0x00000080
+#define QOS_DOT1P_MARK 0x00000f00
+#define QOS_RULE_INDEX_MARK 0x0000f002
+#define QOS_RTP_MARK 0x00000004
+#define LANIF_MASK 0xf0000000
+
+
+#define MULTICAST_MASK 0xf0000
+#define SKBUF_COPYTOLAN (1 << 26)
+#define SKBUF_TCCONSOLE (1 << 27)
+ /* the last bit is used as route policy mask */
+#define ROUTE_POLICY_MASK (1 << 24)
+ //#define QOS_WANIF_MARK 0xff000
+ //#define QOS_DSCP_MARK 0x3f00000
+#endif
+
+
+/******************************************************************************************************************
+*pon_mark(32bi)
+*31...............| 14..12 | 11 | 10 | 9..8 | 7 | 6 | 5 | 4...0 |
+* | | | | | | | |
+* | | | | | | | |----------0x1f QOS_TSID_MARK
+* | | | | | | |-----------0x20 QOS_TSE_MARK
+* | | | | | |------------0x40 DS_TRTCM_ENABLE_MARK
+* | | | | |-------------0x80 DS_QUEUE_ENABLE_MARK
+* | | | |--------------0x100 DS_PKT_MAPPING_TO_ONE |
+* | | |---------------0x200 DS_PKT_MAPPING_TO_MULTI |0x300 DS_PKT_MAPPING_MARK
+* | | --------------0x400 DS_PKT_FORM_WAN
+* |
+* |-----------------------0x7000 DS_QUEUE_ID_MARK
+*
+*
+*
+*******************************************************************************************************************/
+
+
+
+#define QOS_TSID_MARK 0x0000001f
+#define QOS_TSE_MARK 0x00000020
+#define DS_TRTCM_ENABLE_MARK 0x00000040
+#define DS_QUEUE_ENABLE_MARK 0x00000080
+#define DS_PKT_MAPPING_TO_ONE 0x00000100
+#define DS_PKT_MAPPING_TO_MULTI 0x00000200
+#define DS_PKT_MAPPING_MARK 0x00000300
+#define DS_PKT_FORM_WAN 0x00000400
+#define DS_QUEUE_ID_MARK 0x00007000
+#define DS_TRTCM_ID_MARK QOS_TSID_MARK
+#define setDownQueueID(x,y) do{(x) &= (~DS_QUEUE_ID_MARK); (x) |= ((y)&DS_QUEUE_ID_MARK);}while(0)
+#define getDownQueueID(x) ( ( (x) & DS_QUEUE_ID_MARK ) >> 12 )
+#define setDownQueueEnable(x,y) (x = (y ? (x|DS_QUEUE_ENABLE_MARK) : (x & ~DS_QUEUE_ENABLE_MARK)))
+#define getDownQueueEnable(x) ((x & DS_QUEUE_ENABLE_MARK) ? 1 : 0)
+
+#define PON_PKT_FROM_CPE (1<<0)
+#define PON_PKT_FROM_LAN (1<<1)
+#define PON_PKT_FROM_WLAN (1<<2)
+#define PON_PKT_FROM_WAN (1<<3)
+#define PON_PKT_FROM_USB (1<<4)
+#define PON_PKT_FROM_IGMP (1<<5)
+#define PON_PKT_INSERT_FLAG (1<<6)
+#define PON_PKT_ROUTING_FLAG (1<<7)
+#define PON_PKT_SEND_TO_WAN (1<<8)
+#define PON_VLAN_RX_CALL_HOOK (1<<9)
+#define PON_VLAN_TX_CALL_HOOK (1<<10)
+#define PON_USER_GROUP_FLAG (1<<11)
+#define PON_PKT_VOIP_RX (1<<12)
+#define PON_PKT_VOIP_TX (1<<13)
+#define PON_MULTICAST_ANI_FILTER_FLAG (1<<14)
+#define PON_LEAVE_PKT_DEAL (1<<15)
+
+#define PON_PKT_TR69_RX (1<<17)
+#define PON_PKT_TR69_TX (1<<18)
+#define PON_PKT_DROP_FLAG (1<<19)
+
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+#define PKT_FROM_LAN (1<<0)
+#define PKT_FROM_WAN (1<<1)
+#define PKT_SEND_TO_WAN (1<<2)
+#define PKT_FILTER_FLAG (1<<3)
+#define PON_MAC_FILTER_RX_CALL_HOOK (1<<4)
+#define PON_MAC_FILTER_TX_CALL_HOOK (1<<5)
+#define PKT_SEND_TO_LAN (1<<6)
+#endif
+
+#if defined(TCSUPPORT_VLAN_TAG) || defined(TCSUPPORT_CT_VLAN_TAG)
+#define VLAN_PACKET (1<<0)
+#define VLAN_2TAGS_PACKET (1<<1)
+#define ROUTING_MODE_PACKET (1<<2)
+#define VLAN_TAG_FROM_INDEV (1<<3)
+#define VLAN_TAG_INSERT_FLAG (1<<4)
+#define VLAN_TAG_CHECK_FLAG (1<<5)
+#define VLAN_TAG_FROM_WAN (1<<6)
+#ifdef TR143
+#define VLAN_TAG_PBIT_RESERVE0 (1<<8)
+#define VLAN_TAG_PBIT_RESERVE1 (1<<9)
+#define VLAN_TAG_PBIT_RESERVE2 (1<<10)
+#define VLAN_TAG_PBIT_RESERVE3 (1<<11)
+#endif
+#ifdef TCSUPPORT_LAN_VLAN
+#define VLAN_TAG_FOR_BOARDCAST (1<<12)
+#endif
+#endif
+#define VLAN_TAG_FOR_CFI (1<<14)
+
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
+
+#define XPON_IGMP_IS_MULTICAST (1<<0)
+#define XPON_IGMP_UPSTREAM_RESTORE (1<<1)
+#define XPON_IGMP_UPSTREAM_RECOVERY (1<<2)
+#define XPON_IGMP_DOWNSTREAM_VLAN_HANDLE (1<<3)
+
+#if !defined(TCSUPPORT_CT)
+#ifdef TCSUPPORT_PORTBIND /*CONFIG_PORT_BINDING*/
+#define MASK_ORIGIN_DEV 0x1 /* flag for port bind set origin dev name */
+#define MASK_OUT_DEV 0x2 /* flag for port bind set origin dev name */
+#define IFNAMSIZ 16
+#endif
+#endif
+
+/*simulation unmatch flag in skb->mark,downstream only*/
+#define DOWNSTREAM_SIMULATION_MASK (1<<31)
+
+//#ifdef CONFIG_SMUX /*include/linux/if.h*/
+/* smux calls */
+#define SIOCSIFSMUX 0x89c0 /* add or rem smux interface */
+//#endif
+
+#ifdef TCSUPPORT_XPON_HAL_API_EXT
+#define PKTQOS_QUEUE_LENGTH (256)
+#else
+#define PKTQOS_QUEUE_LENGTH (64) //(128) /* 16*8 */
+#endif
+
+
+
+#if defined(TCSUPPORT_WAN_GPON)
+#define IS_PKT_FROM_DS_UNICAST_GEM(skb) \
+ ({ \
+ typecheck(struct sk_buff *, skb); \
+ ((skb->pon_mark & DS_PKT_FORM_WAN) && (0 == skb->gem_type) ); \
+ })
+#define IS_PKT_FROM_DS_MULTICAST_GEM(skb) \
+ ({ \
+ typecheck(struct sk_buff *, skb); \
+ ((skb->pon_mark & DS_PKT_FORM_WAN) && (1 == skb->gem_type) ); \
+ })
+#endif
+
+#if (defined(TCSUPPORT_WAN_GPON) || defined (TCSUPPORT_WAN_EPON))
+#define SKB_GET_PON_MARK(skb) \
+ ({ \
+ typecheck(struct sk_buff *, skb); \
+ (skb->pon_mark); \
+ })
+#else
+#define SKB_GET_PON_MARK(skb) (0)
+#endif
+
+struct ecnt_sk_buff {
+};
+typedef struct ecnt_sk_buff ecnt_sk_buff_t;
+
+#ifdef TCSUPPORT_XPON_HAL_API_MCST
+typedef struct IGMP_HWNATEntry_s
+{
+ struct list_head list;
+ #ifdef TCSUPPORT_MULTICAST_SPEED
+ struct rcu_head rcu;
+ #endif
+ int proto; //ipv6 or ipv4
+ int index; //get from skb buff
+ int vlan_tag_num;
+ int outer_vlan;
+ int inner_vlan;
+ short int hwnat_vid;
+ short int wifinum; //wifi ra counts
+ //short int lannum; //lan counts
+ // bit0-->bit3 for eth0.1-->eth0.4 bit8-->bit11 for ra0-->ra3 bit12~bit15 for rai1~rai4
+ unsigned long mask; //port mask
+ short int bindvid;
+ unsigned char grp_addr[16];
+ unsigned char src_addr[16];
+ struct timer_list age_timer;
+}IGMP_HWNATEntry_t;
+#else
+/*move from br_private.h*/
+/*this struct is also for hw nat using, not add compile option*/
+typedef struct IGMP_HWNATEntry_s
+{
+ struct list_head list;
+ #ifdef TCSUPPORT_MULTICAST_SPEED
+ struct rcu_head rcu;
+ #endif
+ int proto;
+ int index;
+
+// bit0-->bit3 for eth0.1-->eth0.4 bit8-->bit11 for ra0-->ra3 bit12~bit15 for rai1~rai4
+ unsigned long mask;
+ unsigned char wifinum;
+ unsigned char grp_addr[16];
+ unsigned char src_addr[16];
+ struct timer_list age_timer;
+}IGMP_HWNATEntry_t;
+#endif
+
+typedef struct
+{
+ struct list_head list;
+ int index;
+ unsigned long port_mask;
+}multicast_flood_hwentry_t;
+
+#endif
Index: linux-3.18.21/include/linux/ecnt_vlan_bind.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/ecnt_vlan_bind.h 2018-02-05 14:20:40.000000000 +0800
@@ -0,0 +1,11 @@
+#ifndef __VLAN_BIND_H
+#define __VLAN_BIND_H
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <uapi/linux/in.h>
+#include <linux/if_vlan.h>
+
+#endif
Index: linux-3.18.21/include/linux/ecnt_voip_proc.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/ecnt_voip_proc.h 2018-02-05 14:20:40.000000000 +0800
@@ -0,0 +1,4 @@
+#ifndef __VOIP_PROC_H
+#define __VOIP_PROC_H
+
+#endif
Index: linux-3.18.21/include/linux/foe_hook.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/foe_hook.h 2018-02-05 13:21:18.000000000 +0800
@@ -0,0 +1,243 @@
+#ifndef __FOE_HOOK_H
+#define __FOE_HOOK_H
+#include <uapi/linux/foe_hook.h>
+
+#define FOE_MAGIC_PCI 0x7273
+#define FOE_MAGIC_WLAN 0x7274
+#define FOE_MAGIC_GE 0x7275
+#define FOE_MAGIC_PPE 0x7276
+#define FOE_MAGIC_ATM 0x7277
+
+#ifdef TCSUPPORT_MT7510_FE
+#define FOE_MAGIC_PTM 0x7278
+#define FOE_MAGIC_EPON 0x7279
+#define FOE_MAGIC_GPON 0x727a
+//#define FOE_MAGIC_CRYPTO 0x727b
+
+#define FOE_MAGIC_CRYPTO_E_1 0x727b
+#define FOE_MAGIC_CRYPTO_D_1 0x727c
+#define FOE_MAGIC_CRYPTO_E_2 0x727d
+#define FOE_MAGIC_CRYPTO_D_2 0x727e
+#define FOE_MAGIC_OFFLOAD 0x727f
+#define FOE_MAGIC_PTM_LAN 0x7280
+#endif
+
+#define FOE_MAGIC_L2TP_VPN_UPSTREAM 0x7281
+#define FOE_MAGIC_L2TP_VPN_DOWNSTREAM 0x7282
+
+#define FOE_MAGIC_LOCAL 0x7283
+
+#define L2TP_VPN_PPP_NAME "ppp100"
+#define HWNAT_PKT_ERROR 0
+#define HWNAT_PKT_UPSTREAM 1
+#define HWNAT_PKT_DOWNSTREAM 2
+
+#define HWNAT_IPSEC_LEARING 0
+#define HWNAT_IPSEC_SPEED 1
+#define HWNAT_IPSEC_ROLLBACK 2
+
+#define IPSEC_SKB_CB 47
+
+#define FOE_OPE_GETENTRYNUM 0
+#define FOE_OPE_CLEARENTRY 1
+
+
+#define HWNAT_WLAN_IF_MAXNUM 16 //16 is max wifi interface
+#define HWNAT_WLAN_IF_BASE 8 //8 is base
+#define HWNAT_WLAN_IF_NUM 4 //8 is maximum
+#define HWNAT_WLAN_IF_I_NUM 4 //8 is maximum, foe rai
+
+#define MULTICAST_SPEED_STATE_I 3 //with lan and with wlan
+#define MULTICAST_SPEED_STATE_II 1 //with lan and without wlan
+#define MULTICAST_SPEED_STATE_III 2 //without lan and with wlan
+#define MULTICAST_SPEED_STATE_IV 0 //without lan and without wlan
+
+#define LAN_STATE_I 1 //soc lan + external lan port
+#define LAN_STATE_II 2 //only soc lan or only external lan port or both not
+
+struct port_info {
+ unsigned long int txq:4;
+ unsigned long int channel:8;
+ unsigned long int tsid:8;
+ unsigned long int atm_pppoa:1;
+ unsigned long int atm_ipoa:1;
+ unsigned long int atm_vc_mux:1;
+ unsigned long int eth_macSTagEn:1;
+ unsigned long int eth_is_wan:1;
+ unsigned long int ds_to_qdma:1;
+ unsigned long int ds_need_offload:1;
+ unsigned long int force_high_priority_ring:1;
+ unsigned long int resv0:3;
+ unsigned long int txq_is_valid:1;
+ unsigned long int stag:16;
+ unsigned long int magic:16;
+};
+
+typedef union {
+ struct {
+#ifdef __BIG_ENDIAN
+ unsigned int resv : 1 ;
+ unsigned int tsid : 5 ;
+ unsigned int tse : 1 ;
+ unsigned int dei : 1 ;
+ unsigned int gem : 12 ;
+ unsigned int oam : 1 ;
+ unsigned int channel : 8 ;
+ unsigned int queue : 3 ;
+#else
+ unsigned int queue : 3 ;
+ unsigned int channel : 8 ;
+ unsigned int oam : 1 ;
+ unsigned int gem : 12 ;
+ unsigned int dei : 1 ;
+ unsigned int tse : 1 ;
+ unsigned int tsid : 5 ;
+ unsigned int resv : 1 ;
+#endif /* __BIG_ENDIAN */
+
+#ifdef __BIG_ENDIAN
+ unsigned int ico : 1 ;
+ unsigned int uco : 1 ;
+ unsigned int tco : 1 ;
+ unsigned int tso : 1 ;
+ unsigned int pmap : 6 ;
+ unsigned int fport : 3 ;
+ unsigned int insv : 1 ;
+ unsigned int tpid : 2 ;
+ unsigned int vid : 16 ;
+#else
+ unsigned int vid : 16 ;
+ unsigned int tpid : 2 ;
+ unsigned int insv : 1 ;
+ unsigned int fport : 3 ;
+ unsigned int pmap : 6 ;
+ unsigned int tso : 1 ;
+ unsigned int tco : 1 ;
+ unsigned int uco : 1 ;
+ unsigned int ico : 1 ;
+#endif /* __BIG_ENDIAN */
+ } raw ;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ unsigned int mtr : 1 ;
+ unsigned int fport_ppe : 3 ;
+ unsigned int gem : 16 ;
+ unsigned int oam : 1 ;
+ unsigned int channel_ppe : 5 ;
+ unsigned int channel :3 ;
+ unsigned int queue : 3 ;
+#else
+ unsigned int queue : 3 ;
+ unsigned int channel :3 ;
+ unsigned int channel_ppe : 8 ;
+ unsigned int oam : 1 ;
+ unsigned int gem : 16 ;
+ unsigned int fport_ppe : 3 ;
+ unsigned int mtr : 1 ;
+#endif /* __BIG_ENDIAN */
+
+#ifdef __BIG_ENDIAN
+ unsigned int ico : 1 ;
+ unsigned int uco : 1 ;
+ unsigned int tco : 1 ;
+ unsigned int tso : 1 ;
+ unsigned int mtr_index : 6 ;
+ unsigned int fport : 3 ;
+ unsigned int insv : 1 ;
+ unsigned int tpid : 2 ;
+ unsigned int vid : 16 ;
+#else
+ unsigned int vid : 16 ;
+ unsigned int tpid : 2 ;
+ unsigned int insv : 1 ;
+ unsigned int fport : 3 ;
+ unsigned int mtr_index : 6 ;
+ unsigned int tso : 1 ;
+ unsigned int tco : 1 ;
+ unsigned int uco : 1 ;
+ unsigned int ico : 1 ;
+#endif /* __BIG_ENDIAN */
+ } raw1 ;/*tx msg format1 for cpu path ratelimit by ppe meter in EN7526C*/
+
+ unsigned int word[2] ;
+}FETxMsg_T ;
+
+
+/*****************************
+ * FRAME ENGINE REGISTERS OFFSET *
+ *****************************/
+#define FE_GLO_CFG_OFF (0x0000)
+#define CDMP_VLAN_CT_OFF (0x0400)
+#define CDM_VLAN_GE_OFF (0x1400)
+#define GDM2_FWD_CFG_OFF (0x1500)
+#define GDM2_MIB_CLR_OFF (0x1520)
+#define GDM2_LEN_CFG_OFF (0x1524)
+#define GDM2_CHN_EN_OFF (0x152c)
+#define GDM2_TX_GET_CNT_OFF (0x1600)
+#define GDM2_TX_OK_CNT_OFF (0x1604)
+#define GDM2_TX_DROP_CNT_OFF (0x1608)
+#define GDM2_TX_OK_BYTE_CNT_OFF (0x160c)
+#define GDM2_RX_OK_CNT_OFF (0x1650)
+#define GDM2_RX_OVER_DROP_CNT_OFF (0x1654)
+#define GDM2_RX_ERROR_DROP_CNT_OFF (0x1658)
+#define GDM2_RX_OK_BYTE_CNT_OFF (0x165c)
+#define GDM2_RX_ETH_CRCE_CNT_OFF (0x1674)
+#define GDM2_RX_ETH_RUNT_CNT_OFF (0x1680)
+#define GDM2_RX_ETH_LONG_CNT_OFF (0x1684)
+
+
+struct sk_buff;
+struct net_device;
+
+#ifdef TCSUPPORT_HWNAT_L2VID
+extern int (*ra_sw_nat_hook_rx_set_l2lu) (struct sk_buff * skb);
+#endif
+extern int (*ra_sw_nat_hook_rx) (struct sk_buff * skb);
+extern int (*ra_sw_nat_ds_offload)(struct sk_buff *skb, int *dp);
+extern int (*ra_sw_nat_hook_update_dp)(int index, int dp);
+extern int (*ra_sw_nat_hook_update_vlan)(int index,int outer_vlan,int inner_vlan);
+extern int (*ra_sw_nat_local_in_tx) (struct sk_buff * skb,unsigned short port);
+
+extern int (*ra_sw_nat_hook_save_rxinfo)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_restore_rxinfo)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_save_txinfo)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_restore_txinfo)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_is_hwnat_pkt)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_sendto_ppe)(struct sk_buff *skb);
+extern int (*ra_sw_nat_hook_set_l2tp_dev)(struct net_device *dev);
+extern struct net_device* (*ra_sw_nat_hook_read_l2tp_dev)(void);
+extern int (*ra_sw_nat_rtsp_offload_restore) (struct sk_buff * skb, int calc_sum);
+extern int (*ra_sw_nat_rtsp_data_handle) (struct sk_buff * skb, char *rb_ptr, unsigned int datalen);
+extern void (*ra_sw_nat_set_wan_acntid_hook) (struct sk_buff *skb, unsigned char wan_index, unsigned char dir);
+extern void (*ra_sw_nat_clear_wan_acntid_hook)(unsigned char wan_index);
+
+#ifdef TCSUPPORT_MT7510_FE
+extern int (*ra_sw_nat_hook_tx) (struct sk_buff * skb, struct port_info * pinfo, int magic);
+#else
+extern int (*ra_sw_nat_hook_tx) (struct sk_buff * skb, int gmac_no);
+#endif
+extern int (*ra_sw_nat_hook_free) (struct sk_buff * skb);
+extern int (*ra_sw_nat_hook_rxinfo) (struct sk_buff * skb, int magic, char *data, int data_length);
+extern int (*ra_sw_nat_hook_txq) (struct sk_buff * skb, int txq);
+extern int (*ra_sw_nat_hook_magic) (struct sk_buff * skb, int magic);
+extern int (*ra_sw_nat_hook_set_magic) (struct sk_buff * skb, int magic);
+extern int (*ra_sw_nat_hook_xfer) (struct sk_buff *skb, const struct sk_buff *prev_p);
+extern int (*ra_sw_nat_hook_foeentry) (void * inputvalue,int operation);
+extern int (*ra_sw_nat_hook_is_alive_pkt)(unsigned int crsn);
+#if defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_EN7512)
+extern int (*MT7530LanPortMap2Switch_hook)(int port);
+#endif
+
+#ifdef TCSUPPORT_RA_HWNAT_ENHANCE_HOOK
+extern int (*ra_sw_nat_hook_drop_packet) (struct sk_buff * skb);
+extern int (*ra_sw_nat_hook_clean_table) (void);
+extern int (*ra_sw_nat_hook_clean_multicast_entry) (void);
+#endif
+
+#ifdef TCSUPPORT_MT7510_FE
+extern void (*restore_offload_info_hook)(struct sk_buff *skb, struct port_info *pinfo, int magic);
+#endif
+
+extern int (*ra_sw_nat_hook_cpu_meter)(struct sk_buff* skb,FETxMsg_T* txMsg,struct port_info* pinfo,unsigned char dir,unsigned short mtrIndex);
+#endif
Index: linux-3.18.21/include/linux/if_vlan.h
===================================================================
--- linux-3.18.21.orig/include/linux/if_vlan.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/if_vlan.h 2018-02-05 13:21:19.000000000 +0800
@@ -21,6 +21,7 @@
#define VLAN_HLEN 4 /* The additional bytes required by VLAN
* (in addition to the Ethernet header)
*/
+#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
Index: linux-3.18.21/include/linux/ipv6.h
===================================================================
--- linux-3.18.21.orig/include/linux/ipv6.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/ipv6.h 2018-02-05 14:20:40.000000000 +0800
@@ -51,6 +51,9 @@
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ char slaac_addr[64];
+#endif
void *sysctl;
};
Index: linux-3.18.21/include/linux/kernel.h
===================================================================
--- linux-3.18.21.orig/include/linux/kernel.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/kernel.h 2018-02-05 13:21:19.000000000 +0800
@@ -29,6 +29,40 @@
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#ifdef CONFIG_TC3162_IMEM
+#define __IMEM __attribute__ ((__section__(".imem_text")))
+#else
+#define __IMEM
+#endif
+
+#if defined(CONFIG_TC3162_DMEM) && !defined(CONFIG_MIPS_TC3262)
+#define __DMEM __attribute__ ((__section__(".dmem_data")))
+#else
+#define __DMEM
+#endif
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ ((unsigned char *)&addr)[0], \
+ ((unsigned char *)&addr)[1], \
+ ((unsigned char *)&addr)[2], \
+ ((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+
+#define NIP6(addr) \
+ ntohs((addr).s6_addr16[0]), \
+ ntohs((addr).s6_addr16[1]), \
+ ntohs((addr).s6_addr16[2]), \
+ ntohs((addr).s6_addr16[3]), \
+ ntohs((addr).s6_addr16[4]), \
+ ntohs((addr).s6_addr16[5]), \
+ ntohs((addr).s6_addr16[6]), \
+ ntohs((addr).s6_addr16[7])
+#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
+#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
+
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
#define S8_MIN ((s8)(-S8_MAX - 1))
Index: linux-3.18.21/include/linux/libcompileoption.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/libcompileoption.h 2018-02-05 13:21:19.000000000 +0800
@@ -0,0 +1,521 @@
+/******************************************************************************/
+/*
+ * Copyright (C) 1994-2014 Econet Technologies, Corp.
+ * All Rights Reserved.
+ *
+ * TrendChip Confidential; Need to Know only.
+ * Protected as an unpublished work.
+ *
+ * The computer program listings, specifications and documentation
+ * herein are the property of TrendChip Technologies, Corp. and
+ * shall not be reproduced, copied, disclosed, or used in whole or
+ * in part for any reason without the prior express written permission of
+ * TrendChip Technologies, Corp.
+ */
+/******************************************************************************/
+#ifndef _COMPILEOPTION_LIB_H
+#define _COMPILEOPTION_LIB_H
+
+typedef int CompileOption_Val;
+
+extern CompileOption_Val TCSUPPORT_MANUAL_ETHERNET_PORTMAP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_VLAN_TAG_VAL;
+
+extern CompileOption_Val TCSUPPORT_VLAN_TAG_VAL;
+
+extern CompileOption_Val TCSUPPORT_TCAPI_ENHANCE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_C5_2P_VAL;
+#if 0
+extern CompileOption_Val TCSUPPORT_CDS_VTAG_TRANSPARENT_VAL;
+#endif
+extern CompileOption_Val TCSUPPORT_CT_PON_GD_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_GDV20_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CZ_GDCS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CY_VAL;
+
+extern CompileOption_Val TCSUPPORT_ZARLINK_LE89156A_VAL;
+
+extern CompileOption_Val TCSUPPORT_ZARLINK_LE89156B_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CY_JX_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_BIND2_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_JS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_FJ_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_GX_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CQ_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_UPG_PINGPONG_VAL;
+
+extern CompileOption_Val TCSUPPORT_BACKUPROMFILE_VAL;
+
+extern CompileOption_Val TCSUPPORT_PRODUCTIONLINE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_CWMP_PARAMETER_VAL;
+
+extern CompileOption_Val TCSUPPORT_NOTDEFAULTROMFILEAREA_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_MIDWARE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CDS_VAL;
+
+extern CompileOption_Val TCSUPPORT_VLAN_DOT1P_VAL;
+
+extern CompileOption_Val TCSUPPORT_VLAN_PASSTHROUGH_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_FJ_SFU_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_BACKUPROMFILEENCHANCEMENT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_VAL;
+
+extern CompileOption_Val TCSUPPORT_CY_PON_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PHONEAPP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_C9_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_JS_IP_VAL;
+
+extern CompileOption_Val TCSUPPORT_VOIP_LED_APPCTRL_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_VOIP_CRYPT_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_FH_VAL;
+
+extern CompileOption_Val TCSUPPORT_VOIP_SIP_DNS_VAL;
+
+extern CompileOption_Val TCSUPPORT_AUTOBENCH_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_YN_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_DEV_ACCESS_TYPE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ECN_GZ_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_WANNINDEX_INCREASE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_MONITORCOLLECTOR_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PPPOE_EMULATOR_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_NMG_VAL;
+
+extern CompileOption_Val TCSUPPORT_CY_E8_SFU_VAL;
+
+extern CompileOption_Val CT_COM_DEVICEREG_VAL;
+
+extern CompileOption_Val TCSUPPORT_ITMS_CONFIG_AS_DEFAULT_VAL;
+extern CompileOption_Val TCSUPPORT_DMS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_UPNP_DM_VAL;
+
+extern CompileOption_Val TCSUPPORT_TR69_IP_HOST_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_IP_HOST_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PPPINFORM_VAL;
+
+extern CompileOption_Val TCSUPPORT_C1_MS_VAL;
+
+extern CompileOption_Val TCSUPPORT_C1_ZY_VAL;
+
+extern CompileOption_Val TCSUPPORT_PONMGR_VAL;
+
+extern CompileOption_Val TCSUPPORT_PMMGR_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_ATM_VAL;
+
+extern CompileOption_Val TCSUPPORT_CPU_MT7505_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_SWQOS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_SIMCARD_SEPARATION_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_2PORTS_VAL;
+
+extern CompileOption_Val TCSUPPORT_LAN_VLAN_VAL;
+
+extern CompileOption_Val TCSUPPORT_LAN_VLAN_RANGE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_SN_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_WAN_PTM_VAL;
+
+extern CompileOption_Val TCSUPPORT_C1_CUC_VAL;
+
+extern CompileOption_Val TCSUPPORT_SYSLOG_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PORT_BIND_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_GUIACCESSLIMIT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PPPOEPROXY_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ZIPROMFILE_VAL;
+
+extern CompileOption_Val TCSUPPORT_RESERVEAREA_EXTEND_VAL;
+
+extern CompileOption_Val TCSUPPORT_C7_VAL;
+
+extern CompileOption_Val TCSUPPORT_CY_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_E8B_ADSL_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_WANNODE_MAPPING_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_BLOCK_PROCESS_VAL;
+
+extern CompileOption_Val TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND_VAL;
+
+extern CompileOption_Val TCSUPPORT_CPU_MT7510_VAL;
+
+extern CompileOption_Val TCSUPPORT_CPU_MT7520_VAL;
+
+extern CompileOption_Val TCSUPPORT_TRUE_LANDING_PAGE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_NO_HOST_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_VAL;
+
+extern CompileOption_Val TCSUPPORT_INIC_CLIENT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CZ_GENERAL_VAL;
+
+extern CompileOption_Val TCSUPPORT_KEYPARA_STORE_VAL;
+
+extern CompileOption_Val TCSUPPORT_VPN_VAL;
+
+extern CompileOption_Val TCSUPPORT_MULTI_BOOT_VAL;
+
+extern CompileOption_Val TCSUPPORT_START_TRAP_VAL;
+
+extern CompileOption_Val TCSUPPORT_SYSLOG_ENHANCE_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_AC_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_FAST_GET_VAL;
+
+extern CompileOption_Val TCSUPPORT_CD_DDNS_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_GPON_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_EPON_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_ETHER_VAL;
+
+extern CompileOption_Val TCSUPPORT_OMCI_VAL;
+
+extern CompileOption_Val TCSUPPORT_EPON_OAM_CTC_VAL;
+
+extern CompileOption_Val TCSUPPORT_CMDPROMPT_VAL;
+
+extern CompileOption_Val TCSUPPORT_RANDOM_INFORM_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_SERVICELIST_E8C_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ALARMMONITOR_VAL;
+
+extern CompileOption_Val TCSUPPORT_CZ_TM_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_INFORM_NODE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_LONG_RESETBTN_VAL;
+
+extern CompileOption_Val CWMP_REDIRECT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PPP_ONDEMAND_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_CWMP_ZIPROMFILE_VAL;
+
+extern CompileOption_Val TCSUPPORT_TR69_BIND_PVC_VAL;
+
+extern CompileOption_Val TCSUPPORT_NO_BOOT_VALUECHANGE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CROSS_REBOOT_VAL;
+
+extern CompileOption_Val TCSUPPORT_BHARTI_VAL;
+
+extern CompileOption_Val TCSUPPORT_TTNET_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_PRECISE_TIME_VAL;
+
+extern CompileOption_Val TCSUPPORT_C1_NEW_GUI_VAL;
+
+extern CompileOption_Val TCSUPPORT_PROLINE_CMD_ACTION_VAL;
+
+extern CompileOption_Val TCSUPPORT_MULTISERVICE_ON_WAN_VAL;
+
+extern CompileOption_Val TCSUPPORT_UPNP_ENHANCE_VAL;
+
+extern CompileOption_Val TCSUPPORT_C1_OBM_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_PTM_VAL;
+
+extern CompileOption_Val TCSUPPORT_CD_NEW_GUI_VAL;
+
+extern CompileOption_Val TCSUPPORT_CCT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CLMP_VAL;
+
+extern CompileOption_Val TCSUPPORT_SAGECOM_CWMP_VAL;
+
+extern CompileOption_Val TCSUPPORT_FW_UPGRADE_DELAY_VAL;
+
+extern CompileOption_Val AZTECH_CWMP_REORDER_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_IOT_STRINGTYPE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_FAULT_RESPONSE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CWMP_CRPORTCHANGE_VAL;
+
+extern CompileOption_Val TCSUPPORT_WEB_INTERCEPTION_VAL;
+
+extern CompileOption_Val TCSUPPORT_MNT_CONF_VAL;
+
+extern CompileOption_Val TCSUPPORT_FTP_USB_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_FTP_DOWNLOADCLIENT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_NEWGUI_VAL;
+
+extern CompileOption_Val TCSUPPORT_IPV6_FIREWALL_VAL;
+
+extern CompileOption_Val TCSUPPORT_IPV6_FIREWALL_RFC2827_VAL;
+
+extern CompileOption_Val TCSUPPORT_PRODUCTIONLINE_CONTENT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_HWQOS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CHT_RAMENU_VAL;
+
+extern CompileOption_Val TCSUPPORT_EPON_OAM_LAN_DBG_VAL;
+
+extern CompileOption_Val TCSUPPORT_E8B_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_VLAN_BIND_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_DSLITE_VAL;
+
+extern CompileOption_Val TCSUPPORT_TC2031_VAL;
+
+extern CompileOption_Val TCSUPPORT_HWNAT_VAL;
+
+extern CompileOption_Val TCSUPPORT_HWNAT_WAN_ACCOUNT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_QOS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_1FXS_VAL;
+
+extern CompileOption_Val TCSUPPORT_2PORTS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_C7_VAL;
+
+extern CompileOption_Val TCSUPPORT_USBHOST_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_STANDARD_E8C_VAL;
+
+extern CompileOption_Val TCSUPPORT_IPV6_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_RT3390_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_RT3090_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_RT5392_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_MULTIDRIVER_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_MT7592_VAL;
+
+extern CompileOption_Val TCSUPPORT_OMCI_ALCATEL_VAL;
+
+extern CompileOption_Val TCSUPPORT_GPON_MAPPING_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_USER_ISOLATION_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_VLAN_FILTER_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_MAC_FILTER_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_IGMP_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_HAL_API_QOS_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_HAL_API_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_HAL_API_EXT_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_PON_QOS_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_VLAN_VAL;
+
+extern CompileOption_Val TCSUPPORT_OMCI_LAN_DEBUG_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_LED_VAL;
+
+extern CompileOption_Val TCSUPPORT_UPSTREAM_VLAN_POLICER_VAL;
+
+extern CompileOption_Val TCSUPPORT_GPON_DOWNSTREAM_MAPPING_VAL;
+
+extern CompileOption_Val TCSUPPORT_IGMP_SET_GROUP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CD_WIND_VAL;
+
+extern CompileOption_Val TCSUPPORT_ARC_CWMP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_BUTTONDETECT_VAL;
+
+extern CompileOption_Val TCSUPPORT_ECN_SIP_VAL;
+
+extern CompileOption_Val TCSUPPORT_ECN_MEGACO_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_VOIP_SIP_VAL;
+
+extern CompileOption_Val TCSUPPORT_CHS_VAL;
+
+extern CompileOption_Val TCSUPPORT_VOIP_VAL;
+
+extern CompileOption_Val TCSUPPORT_SLIC_ZL88801_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_JX_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_STBMAC_REPORT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PMINFORM_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_HUB_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ADSL_HN_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_SC_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_C9_HUN_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_TR143_WGET_DOWNLOAD_VAL;
+
+extern CompileOption_Val TCSUPPORT_TR143_CURL_UPLOAD_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ADSL_TJ_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ADSL_BIND1_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_NAMECHGNOTIFY_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_C5_HEN_SFU_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_SFU_SX_VAL;
+
+extern CompileOption_Val TCSUPPORT_EPON_DUMMY_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_HWNAT_OFFLOAD_VAL;
+
+extern CompileOption_Val TCSUPPORT_DS_HWNAT_OFFLOAD_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_SK_VAL;
+
+extern CompileOption_Val TCSUPPORT_CMCC_VAL;
+
+extern CompileOption_Val TCSUPPORT_CMCCV2_VAL;
+
+extern CompileOption_Val TCSUPPORT_CMCC_GANSU_VAL;
+
+extern CompileOption_Val TCSUPPORT_CSC_EEUROPE_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_ITMS_TMOUT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CPU_PERFORMANCE_TEST_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_IGMP_CHT_VAL;
+
+extern CompileOption_Val TCSUPPORT_WAN_UPSTREAM_REMARK_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_HAL_API_MCST_VAL;
+
+extern CompileOption_Val TCSUPPORT_CPU_EN75XX_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_JOYME_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_JOYME_BANDWIDTH_VAL;
+
+extern CompileOption_Val TCSUPPORT_FH_JOYMEV2_PON_VAL;
+
+extern CompileOption_Val TCSUPPORT_C9_ROST_LED_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_CWMP_WORK_COMPATIBLE_VAL;
+
+extern CompileOption_Val TCSUPPORT_UPGRADE_NO_REBOOT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CN_JS_VAL;
+
+extern CompileOption_Val TCSUPPORT_CUC_PON_SD_VAL;
+
+extern CompileOption_Val TCSUPPORT_PON_ROSTELECOM_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_LED_UPGRADE_VAL;
+
+extern CompileOption_Val TCSUPPORT_FWC_ENV_VAL;
+
+extern CompileOption_Val RA_NAT_EXE_VAL;
+
+extern CompileOption_Val TCSUPPORT_DLF_CTRL_VAL;
+
+extern CompileOption_Val TCSUPPORT_FWC_QOS_VAL;
+
+
+extern CompileOption_Val TCSUPPORT_FWC_FDB_VLAN_VAL;
+
+extern CompileOption_Val TCSUPPORT_FWC_VLAN_TAG_VAL;
+
+extern CompileOption_Val TCSUPPORT_FWC_MCST_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CZ_GD_VAL;
+
+extern CompileOption_Val TCSUPPORT_CT_PON_CZ_NX_VAL;
+
+extern CompileOption_Val TCSUPPORT_VNPTT_VAL;
+
+extern CompileOption_Val TCSUPPORT_CCA_VAL;
+
+extern CompileOption_Val TCSUPPORT_SWNAT_VAL;
+
+extern CompileOption_Val TCSUPPORT_TR64_DIGEST_AUTH_VAL;
+
+extern CompileOption_Val TCSUPPORT_PORT_ISOLATION_VAL;
+
+extern CompileOption_Val TCSUPPORT_TLS_VAL;
+
+extern CompileOption_Val TCSUPPORT_SDN_OVS_VAL;
+
+extern CompileOption_Val TCSUPPORT_FH_SDN_PON_VAL;
+
+extern CompileOption_Val TCSUPPORT_CRJO_VAL;
+
+extern CompileOption_Val TCSUPPORT_XPON_HAL_API_NG_VAL;
+
+extern CompileOption_Val TCSUPPORT_WLAN_SW_RPS_VAL;
+
+extern CompileOption_Val TCSUPPORT_IPSEC_PASSTHROUGH_VAL;
+
+extern CompileOption_Val TCSUPPORT_MIPS_1004K_VAL;
+
+void
+init_compileoption_val(void);
+
+
+#endif
Index: linux-3.18.21/include/linux/mtd/rt_flash.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/mtd/rt_flash.h 2018-02-05 13:21:20.000000000 +0800
@@ -0,0 +1,48 @@
+#ifndef __MTD_RT_FLASH_H__
+#define __MTD_RT_FLASH_H__
+
+#include <asm/tc3162/tc3162.h>
+#ifndef VPint
+#define VPint *(volatile unsigned long int *)
+#endif
+
+#ifdef TCSUPPORT_NEW_SPIFLASH
+extern unsigned char ReadSPIByte(unsigned long index);
+extern unsigned long ReadSPIDWord(unsigned long index);
+#else
+#ifdef TCSUPPORT_MT7510_E1
+#define ReadSPIByte(i) (((*((unsigned char*)i))==0) ? (*((unsigned char*)i)): (*((unsigned char*)i)))
+#define ReadSPIDWord(i) (((*((unsigned int*)i))==0) ? (*((unsigned int*)i)): (*((unsigned int*)i)))
+#else
+#define ReadSPIByte(i) (*((unsigned char*)i))
+#define ReadSPIDWord(i) (*((unsigned int*)i))
+#endif
+#endif
+#ifdef TCSUPPORT_MT7510_E1
+#define READ_FLASH_BYTE(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_byte != NULL) ? ranand_read_byte((i)) : -1) \
+ : (ReadSPIByte(i)) )
+
+
+/* for read flash, frankliao added 20101216 */
+#define READ_FLASH_DWORD(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_dword != NULL) ? ranand_read_dword((i)) : -1) \
+ : (ReadSPIDWord(i)) )
+#else
+#define READ_FLASH_BYTE(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_byte != NULL) ? ranand_read_byte((i)) : -1) \
+ : (ReadSPIByte(i)) )
+
+
+/* for read flash, frankliao added 20101216 */
+#define READ_FLASH_DWORD(i) ( (IS_NANDFLASH) ? \
+ ((ranand_read_dword != NULL) ? ranand_read_dword((i)) : -1) \
+ : (ReadSPIDWord(i)) )
+
+#endif
+/* frankliao added 20101215 */
+extern unsigned long flash_base;
+extern unsigned int (*ranand_read_byte)(unsigned long long);
+extern unsigned int (*ranand_read_dword)(unsigned long long);
+
+#endif /* __MTD_MTD_RT_FLASH_H__ */
Index: linux-3.18.21/include/linux/netdevice.h
===================================================================
--- linux-3.18.21.orig/include/linux/netdevice.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/netdevice.h 2018-02-05 14:20:40.000000000 +0800
@@ -1220,6 +1220,8 @@
IFF_LIVE_ADDR_CHANGE = 1<<20,
IFF_MACVLAN = 1<<21,
IFF_XMIT_DST_RELEASE_PERM = 1<<22,
+ IFF_RSMUX = 1<<23,
+ IFF_OSMUX = 1<<24,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1245,6 +1247,8 @@
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
+#define IFF_RSMUX IFF_RSMUX /* pvc interface */
+#define IFF_OSMUX IFF_OSMUX /* smux interface */
/**
* struct net_device - The DEVICE structure.
@@ -1701,6 +1705,12 @@
struct lock_class_key *qdisc_tx_busylock;
int group;
struct pm_qos_request pm_qos_req;
+ u8 vlan_mode;
+ u16 tci;
+ u16 multicast_tci;
+ u8 ipversion;
+ struct list_head ext_mvlan_list;
+
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
Index: linux-3.18.21/include/linux/netfilter/nf_conntrack_rtsp.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/netfilter/nf_conntrack_rtsp.h 2018-02-05 13:21:20.000000000 +0800
@@ -0,0 +1,78 @@
+/*
+ * RTSP extension for IP connection tracking.
+ * (C) 2003 by Tom Marshall <tmarshall at real.com>
+ * based on ip_conntrack_irc.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * 2013-03-04: Il'inykh Sergey <sergeyi at inango-sw.com>. Inango Systems Ltd
+ * - conditional compilation for kernel 3.7
+ * - port mapping improvements
+ */
+#ifndef _IP_CONNTRACK_RTSP_H
+#define _IP_CONNTRACK_RTSP_H
+
+#include <linux/version.h>
+
+//#define IP_NF_RTSP_DEBUG 1
+#define IP_NF_RTSP_VERSION "0.7"
+
+#ifdef __KERNEL__
+/* port block types */
+typedef enum {
+ pb_single, /* client_port=x */
+ pb_range, /* client_port=x-y */
+ pb_discon /* client_port=x/y (rtspbis) */
+} portblock_t;
+
+/* We record seq number and length of rtsp headers here, all in host order. */
+
+/*
+ * This structure is per expected connection. It is a member of struct
+ * ip_conntrack_expect. The TCP SEQ for the conntrack expect is stored
+ * there and we are expected to only store the length of the data which
+ * needs replaced. If a packet contains multiple RTSP messages, we create
+ * one expected connection per message.
+ *
+ * We use these variables to mark the entire header block. This may seem
+ * like overkill, but the nature of RTSP requires it. A header may appear
+ * multiple times in a message. We must treat two Transport headers the
+ * same as one Transport header with two entries.
+ */
+struct ip_ct_rtsp_expect
+{
+ u_int32_t len; /* length of header block */
+ portblock_t pbtype; /* Type of port block that was requested */
+ u_int16_t loport; /* Port that was requested, low or first */
+ u_int16_t hiport; /* Port that was requested, high or second */
+#if 0
+ uint method; /* RTSP method */
+ uint cseq; /* CSeq from request */
+#endif
+};
+
+extern unsigned int (*nf_nat_rtsp_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ unsigned int protoff,
+#endif
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct ip_ct_rtsp_expect *prtspexp,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+
+extern unsigned int (*nf_nat_rtp_hook)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ __be32 rtpip,
+ u_int16_t rtp_srcport);
+
+#define RTSP_PORT 554
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_CONNTRACK_RTSP_H */
Index: linux-3.18.21/include/linux/netfilter/xt_layer7.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/netfilter/xt_layer7.h 2018-02-05 13:21:20.000000000 +0800
@@ -0,0 +1,13 @@
+#ifndef _XT_LAYER7_H
+#define _XT_LAYER7_H
+
+#define MAX_PATTERN_LEN 8192
+#define MAX_PROTOCOL_LEN 256
+
+struct xt_layer7_info {
+ char protocol[MAX_PROTOCOL_LEN];
+ char pattern[MAX_PATTERN_LEN];
+ u_int8_t invert;
+ u_int8_t pkt;
+};
+#endif
Index: linux-3.18.21/include/linux/netfilter_helpers.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/netfilter_helpers.h 2018-02-05 13:21:20.000000000 +0800
@@ -0,0 +1,133 @@
+/*
+ * Helpers for netfiler modules. This file provides implementations for basic
+ * functions such as strncasecmp(), etc.
+ *
+ * gcc will warn for defined but unused functions, so we only include the
+ * functions requested. The following macros are used:
+ * NF_NEED_STRNCASECMP nf_strncasecmp()
+ * NF_NEED_STRTOU16 nf_strtou16()
+ * NF_NEED_STRTOU32 nf_strtou32()
+ */
+#ifndef _NETFILTER_HELPERS_H
+#define _NETFILTER_HELPERS_H
+
+/* Only include these functions for kernel code. */
+#ifdef __KERNEL__
+
+#include <linux/ctype.h>
+#define iseol(c) ( (c) == '\r' || (c) == '\n' )
+
+/*
+ * The standard strncasecmp()
+ */
+#ifdef NF_NEED_STRNCASECMP
+static int
+nf_strncasecmp(const char* s1, const char* s2, u_int32_t len)
+{
+ if (s1 == NULL || s2 == NULL)
+ {
+ if (s1 == NULL && s2 == NULL)
+ {
+ return 0;
+ }
+ return (s1 == NULL) ? -1 : 1;
+ }
+ while (len > 0 && tolower(*s1) == tolower(*s2))
+ {
+ len--;
+ s1++;
+ s2++;
+ }
+ return ( (len == 0) ? 0 : (tolower(*s1) - tolower(*s2)) );
+}
+#endif /* NF_NEED_STRNCASECMP */
+
+/*
+ * Parse a string containing a 16-bit unsigned integer.
+ * Returns the number of chars used, or zero if no number is found.
+ */
+#ifdef NF_NEED_STRTOU16
+static int
+nf_strtou16(const char* pbuf, u_int16_t* pval)
+{
+ int n = 0;
+
+ *pval = 0;
+ while (isdigit(pbuf[n]))
+ {
+ *pval = (*pval * 10) + (pbuf[n] - '0');
+ n++;
+ }
+
+ return n;
+}
+#endif /* NF_NEED_STRTOU16 */
+
+/*
+ * Parse a string containing a 32-bit unsigned integer.
+ * Returns the number of chars used, or zero if no number is found.
+ */
+#ifdef NF_NEED_STRTOU32
+static int
+nf_strtou32(const char* pbuf, u_int32_t* pval)
+{
+ int n = 0;
+
+ *pval = 0;
+ while (pbuf[n] >= '0' && pbuf[n] <= '9')
+ {
+ *pval = (*pval * 10) + (pbuf[n] - '0');
+ n++;
+ }
+
+ return n;
+}
+#endif /* NF_NEED_STRTOU32 */
+
+/*
+ * Given a buffer and length, advance to the next line and mark the current
+ * line.
+ */
+#ifdef NF_NEED_NEXTLINE
+static int
+nf_nextline(char* p, uint len, uint* poff, uint* plineoff, uint* plinelen)
+{
+ uint off = *poff;
+ uint physlen = 0;
+
+ if (off >= len)
+ {
+ return 0;
+ }
+
+ while (p[off] != '\n')
+ {
+ if (len-off <= 1)
+ {
+ return 0;
+ }
+
+ physlen++;
+ off++;
+ }
+
+ /* if we saw a crlf, physlen needs adjusted */
+ if (physlen > 0 && p[off] == '\n' && p[off-1] == '\r')
+ {
+ physlen--;
+ }
+
+ /* advance past the newline */
+ off++;
+
+ *plineoff = *poff;
+ *plinelen = physlen;
+ *poff = off;
+
+ return 1;
+}
+#endif /* NF_NEED_NEXTLINE */
+
+#endif /* __KERNEL__ */
+
+#endif /* _NETFILTER_HELPERS_H */
Index: linux-3.18.21/include/linux/netfilter_mime.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/linux/netfilter_mime.h 2018-02-05 13:21:20.000000000 +0800
@@ -0,0 +1,89 @@
+/*
+ * MIME functions for netfilter modules. This file provides implementations
+ * for basic MIME parsing. MIME headers are used in many protocols, such as
+ * HTTP, RTSP, SIP, etc.
+ *
+ * gcc will warn for defined but unused functions, so we only include the
+ * functions requested. The following macros are used:
+ * NF_NEED_MIME_NEXTLINE nf_mime_nextline()
+ */
+#ifndef _NETFILTER_MIME_H
+#define _NETFILTER_MIME_H
+
+/* Only include these functions for kernel code. */
+#ifdef __KERNEL__
+
+#include <linux/ctype.h>
+
+/*
+ * Given a buffer and length, advance to the next line and mark the current
+ * line. If the current line is empty, *plinelen will be set to zero. If
+ * not, it will be set to the actual line length (including CRLF).
+ *
+ * 'line' in this context means logical line (includes LWS continuations).
+ * Returns 1 on success, 0 on failure.
+ */
+#ifdef NF_NEED_MIME_NEXTLINE
+static int
+nf_mime_nextline(char* p, uint len, uint* poff, uint* plineoff, uint* plinelen)
+{
+ uint off = *poff;
+ uint physlen = 0;
+ int is_first_line = 1;
+
+ if (off >= len)
+ {
+ return 0;
+ }
+
+ do
+ {
+ while (p[off] != '\n')
+ {
+ if (len-off <= 1)
+ {
+ return 0;
+ }
+
+ physlen++;
+ off++;
+ }
+
+ /* if we saw a crlf, physlen needs adjusted */
+ if (physlen > 0 && p[off] == '\n' && p[off-1] == '\r')
+ {
+ physlen--;
+ }
+
+ /* advance past the newline */
+ off++;
+
+ /* check for an empty line */
+ if (physlen == 0)
+ {
+ break;
+ }
+
+ /* check for colon on the first physical line */
+ if (is_first_line)
+ {
+ is_first_line = 0;
+ if (memchr(p+(*poff), ':', physlen) == NULL)
+ {
+ return 0;
+ }
+ }
+ }
+ while (p[off] == ' ' || p[off] == '\t');
+
+ *plineoff = *poff;
+ *plinelen = (physlen == 0) ? 0 : (off - *poff);
+ *poff = off;
+
+ return 1;
+}
+#endif /* NF_NEED_MIME_NEXTLINE */
+
+#endif /* __KERNEL__ */
+
+#endif /* _NETFILTER_MIME_H */
Index: linux-3.18.21/include/linux/pci_ids.h
===================================================================
--- linux-3.18.21.orig/include/linux/pci_ids.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/pci_ids.h 2018-02-05 13:21:21.000000000 +0800
@@ -2989,6 +2989,17 @@
#define PCI_VENDOR_ID_XEN 0x5853
#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
+#define PCI_VENDOR_ID_TRENDCHIP 0x9005
+#define PCI_DEVICE_ID_TC3162 0x3162
+
+#define PCI_VENDOR_ID_SIS 0x1039
+#define PCI_DEVICE_ID_SIS 0x000a
+#define PCI_VENDOR_ID_RT 0x1814
+#define PCI_DEVICE_ID_RT 0x0801
+#define PCI_VENDOR_ID_MTK 0x14c3
+#define PCI_DEVICE_ID_MTK 0x0801
+
+
#define PCI_VENDOR_ID_OCZ 0x1b85
#endif /* _LINUX_PCI_IDS_H */
Index: linux-3.18.21/include/linux/preempt.h
===================================================================
--- linux-3.18.21.orig/include/linux/preempt.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/preempt.h 2018-02-05 13:21:21.000000000 +0800
@@ -20,7 +20,7 @@
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -59,7 +59,7 @@
#define preempt_check_resched() \
do { \
- if (should_resched()) \
+ if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
Index: linux-3.18.21/include/linux/proc_fs.h
===================================================================
--- linux-3.18.21.orig/include/linux/proc_fs.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/proc_fs.h 2018-02-05 13:21:21.000000000 +0800
@@ -7,6 +7,56 @@
#include <linux/types.h>
#include <linux/fs.h>
+#include <linux/proc_ns.h>
+
+#if 1
+typedef int (read_proc_t)(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+typedef int (write_proc_t)(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+struct proc_dir_entry {
+ unsigned int low_ino;
+ umode_t mode;
+ nlink_t nlink;
+ kuid_t uid;
+ kgid_t gid;
+ loff_t size;
+ const struct inode_operations *proc_iops;
+ const struct file_operations *proc_fops;
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+ read_proc_t *read_proc;
+ write_proc_t *write_proc;
+ int pde_users;
+ atomic_t count; /* use count */
+ atomic_t in_use; /* number of callers into module in progress; */
+ /* negative -> it's going away RSN */
+ struct completion *pde_unload_completion;
+ struct list_head pde_openers; /* who did ->open, but not ->release */
+ spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
+ u8 namelen;
+ char name[];
+};
+
+union proc_op {
+ int (*proc_get_link)(struct dentry *, struct path *);
+ int (*proc_read)(struct task_struct *task, char *page);
+ int (*proc_show)(struct seq_file *m,
+ struct pid_namespace *ns, struct pid *pid,
+ struct task_struct *task);
+};
+
+struct proc_inode {
+ struct pid *pid;
+ int fd;
+ union proc_op op;
+ struct proc_dir_entry *pde;
+ struct ctl_table_header *sysctl;
+ struct ctl_table *sysctl_entry;
+ struct proc_ns ns;
+ struct inode vfs_inode;
+};
+#endif
struct proc_dir_entry;
#ifdef CONFIG_PROC_FS
@@ -27,6 +77,8 @@
const struct file_operations *,
void *);
+extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+ struct proc_dir_entry *parent);
static inline struct proc_dir_entry *proc_create(
const char *name, umode_t mode, struct proc_dir_entry *parent,
const struct file_operations *proc_fops)
@@ -34,6 +86,18 @@
return proc_create_data(name, mode, parent, proc_fops, NULL);
}
+static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
+ mode_t mode, struct proc_dir_entry *base,
+ read_proc_t *read_proc, void * data)
+{
+ struct proc_dir_entry *res=create_proc_entry(name,mode,base);
+ if (res) {
+ res->read_proc=read_proc;
+ res->data=data;
+ }
+ return res;
+}
+
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
extern void *PDE_DATA(const struct inode *);
Index: linux-3.18.21/include/linux/semaphore.h
===================================================================
--- linux-3.18.21.orig/include/linux/semaphore.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/semaphore.h 2018-02-05 13:21:22.000000000 +0800
@@ -29,6 +29,9 @@
#define DEFINE_SEMAPHORE(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
static inline void sema_init(struct semaphore *sem, int val)
{
static struct lock_class_key __key;
Index: linux-3.18.21/include/linux/skbuff.h
===================================================================
--- linux-3.18.21.orig/include/linux/skbuff.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/skbuff.h 2018-02-05 14:20:40.000000000 +0800
@@ -33,7 +33,7 @@
#include <linux/netdev_features.h>
#include <linux/sched.h>
#include <net/flow_keys.h>
-
+#include <linux/ecnt_skbuff.h>
/* A. Checksumming of received packets by device.
*
* CHECKSUM_NONE:
@@ -152,6 +152,7 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
atomic_t use;
+ __u16 lVlanId;
};
#endif
@@ -624,11 +625,15 @@
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
- union {
+ /* union { */
+ /* fix bug
+ * when skb->dropcount be valued, eg: net/core/sock.c sock_queue_rcv_skb()
+ * it will cause skb->mark miss its original value
+ */
__u32 mark;
__u32 dropcount;
__u32 reserved_tailroom;
- };
+ /* }; */
union {
__be16 inner_protocol;
@@ -647,6 +652,101 @@
/* private: */
__u32 headers_end[0];
/* public: */
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ int (*skb_recycling_callback)(struct sk_buff *skb);
+ int skb_recycling_ind;
+#endif
+#if defined(TCSUPPORT_RA_HWNAT)
+ char foe[8];
+#endif
+#define BRIDGE_INTERFACE 1
+#define ROUTE_INTERFACE 2
+ __u8 bridge_flag;
+ u32 adapterAddr; /* for crypto's private adapter address */
+ __u8 ipsec_pt_flag;
+#if (defined(TCSUPPORT_WAN_GPON) || defined (TCSUPPORT_WAN_EPON))
+ union{
+ struct{
+ __u16 gem_type : 1 ; // 0:unicast, 1:multicast
+ __u16 gem_port :12 ; // gpon mapping gem port id
+ __u16 : 3 ;
+ };
+
+ struct{
+ __u8 epon_queue ;
+ __u8 epon_pbit ;
+ };
+
+ __u16 xpon_raw_info ;
+ };
+ __u32 pon_mark ; //upstream use for tse/tsid, downstream use for gpon ds mapping pkt_from_wan/pkt_down_mapping_flag/down queue id.
+ __u8 v_if ; //virtual interface. (GPON MAC bridge or EPON LLID)
+#endif /* (defined(TCSUPPORT_WAN_GPON) || defined (TCSUPPORT_WAN_EPON)) */
+ __u32 pon_vlan_flag;
+ struct net_device * original_dev;
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ __u32 pon_mac_filter_flag;
+#endif
+#if defined(TCSUPPORT_VLAN_TAG) || defined(TCSUPPORT_CT_VLAN_TAG)
+ __u16 vlan_tags[2];
+#endif
+ __u32 vlan_tag_flag;
+ __u16 pon_vlan_tpid[4];//index = 0 mean inner tag,4 mean outer tag
+ __u16 pon_vlan_tci[4];
+ __u8 pon_tag_num;
+#if !defined(TCSUPPORT_FH_ENV)
+ __u32 xpon_igmp_flag;
+#endif
+
+#ifdef TCSUPPORT_PORTBIND
+#if !defined(TCSUPPORT_CT)
+ #define MASK_ORIGIN_DEV 0x1 /* flag for port bind set origin dev name */
+ #define MASK_OUT_DEV 0x2 /* flag for port bind set origin dev name */
+ #define IFNAMSIZ 16
+ __u32 portbind_mark;
+ char orig_dev_name[IFNAMSIZ];
+#endif
+#endif
+#define PON_CLASSIFICATION_REMARK (1<<16)
+/*#if defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_PON_MAC_FILTER) || (defined(TCSUPPORT_GPON_MAPPING) && defined(TCSUPPORT_GPON_DOWNSTREAM_MAPPING))*/
+ __u8 ppe_info_flag;
+ __u16 ppe_magic;
+ __u8 ppe_ai;
+ __u16 ppe_foe_entry;
+/*#endif*/
+
+#if !defined(TCSUPPORT_FH_ENV)
+ __u16 lan_vlan_tci; // store the tci of pkt from lan port
+ __u8 lan_vlan_tci_valid; // indicate whether the above "lan_vlan_tci"field is valid
+#endif
+
+ /*TCSUPPORT_DLF_CTRL start*/
+ __u16 dlf_resv0; /*stroe special tag*/
+ __u32 dlf_resv1; /*store vlan1,vlan2*/
+ __u8 dlf_resv2; /*store vpm and vlan layer*/
+ /*TCSUPPORT_DLF_CTRL end*/
+
+/* TCSUPPORT_SWNAT */
+ int hash_index;
+ __u8 sw_nat_flag;
+ __u8 foe_tbl_index;
+/* TCSUPPORT_SWNAT end */
+
+#if defined(TCSUPPORT_CPU_EN7516) || defined(TCSUPPORT_CPU_EN7527)
+#define TSO_ENABLE_MARK 0x1
+ __u8 tso_mark;
+#endif
+ /*
+ bit0-bit3 : queue mapping index
+ bit4-bit10: dscp remark
+ */
+ __u32 mark2;
+ ecnt_sk_buff_t ecnt_sk_buff;
+
+#ifdef TCSUPPORT_WLAN_SW_RPS
+ void * pAd;
+ __u8 rxBlk[150];//size 150 should larger than sizeof(RX_BLK) in wifi driver
+#endif
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
@@ -3113,6 +3213,81 @@
#endif
}
+static inline void ecnt_nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ bool copy)
+{
+#if defined(TCSUPPORT_RA_HWNAT)
+ if (ra_sw_nat_hook_xfer)
+ ra_sw_nat_hook_xfer(dst, src);
+#endif
+
+#if !defined(TCSUPPORT_CT)
+#ifdef TCSUPPORT_PORTBIND /*CONFIG_PORT_BINDING*/
+ dst->portbind_mark = src->portbind_mark;
+ memcpy(dst->orig_dev_name, src->orig_dev_name, IFNAMSIZ);
+#endif
+#endif
+#if defined(TCSUPPORT_VLAN_TAG) || defined(TCSUPPORT_CT_VLAN_TAG)
+ dst->vlan_tags[0] = src->vlan_tags[0];
+ dst->vlan_tags[1] = src->vlan_tags[1];
+ dst->vlan_tag_flag = src->vlan_tag_flag;
+#endif
+#ifdef TCSUPPORT_PON_VLAN
+ dst->pon_vlan_tpid[0] = src->pon_vlan_tpid[0];
+ dst->pon_vlan_tpid[1] = src->pon_vlan_tpid[1];
+ dst->pon_vlan_tpid[2] = src->pon_vlan_tpid[2];
+ dst->pon_vlan_tpid[3] = src->pon_vlan_tpid[3];
+
+ dst->pon_vlan_tci[0] = src->pon_vlan_tci[0];
+ dst->pon_vlan_tci[1] = src->pon_vlan_tci[1];
+ dst->pon_vlan_tci[2] = src->pon_vlan_tci[2];
+ dst->pon_vlan_tci[3] = src->pon_vlan_tci[3];
+
+ dst->pon_tag_num = src->pon_tag_num;
+ dst->pon_vlan_flag = src->pon_vlan_flag;
+#endif
+ dst->original_dev = src->original_dev;
+
+#ifdef TCSUPPORT_XPON_IGMP_CHT
+ dst->xpon_igmp_flag = src->xpon_igmp_flag;
+#endif
+
+#ifdef TCSUPPORT_PON_MAC_FILTER
+ dst->pon_mac_filter_flag = src->pon_mac_filter_flag;
+#endif
+#if defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_PON_MAC_FILTER) || (defined(TCSUPPORT_GPON_MAPPING) && defined(TCSUPPORT_GPON_DOWNSTREAM_MAPPING))
+ dst->ppe_info_flag = src->ppe_info_flag;
+ dst->ppe_magic = src->ppe_magic;
+ dst->ppe_ai = src->ppe_ai;
+ dst->ppe_foe_entry = src->ppe_foe_entry;
+#endif
+#if (defined(TCSUPPORT_WAN_GPON) || defined (TCSUPPORT_WAN_EPON))
+ dst->xpon_raw_info = src->xpon_raw_info;
+ dst->pon_mark = src->pon_mark;
+ dst->v_if = src->v_if;
+#endif
+
+#if defined(TCSUPPORT_UPSTREAM_VLAN_POLICER)
+ dst->up_strm_policer_flag = src->up_strm_policer_flag;
+ dst->up_strm_policer_trtcm_id = src->up_strm_policer_trtcm_id;
+#endif // #if defined(TCSUPPORT_UPSTREAM_VLAN_POLICER)
+
+#if defined(TCSUPPORT_RA_HWNAT)
+ dst->bridge_flag = src->bridge_flag;
+#endif
+#if defined(TCSUPPORT_TSO_ENABLE)
+ dst->tso_mark = src->tso_mark;
+#endif
+ dst->mark2 = src->mark2;
+
+ dst->adapterAddr = src->adapterAddr;
+ dst->ipsec_pt_flag = src->ipsec_pt_flag;
+ dst->hash_index = src->hash_index;
+ dst->sw_nat_flag = src->sw_nat_flag;
+ dst->foe_tbl_index = src->foe_tbl_index;
+ memcpy(&dst->ecnt_sk_buff, &src->ecnt_sk_buff, sizeof(ecnt_sk_buff_t));
+}
+
/* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
@@ -3131,6 +3306,7 @@
if (copy)
dst->nf_trace = src->nf_trace;
#endif
+ ecnt_nf_copy(dst,src,copy);
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
@@ -3360,5 +3536,42 @@
skb_network_header(skb);
return hdr_len + skb_gso_transport_seglen(skb);
}
+
+#if defined(TCSUPPORT_MAX_PACKET_2000)
+#define SKBMGR_RX_BUF_LEN 2048
+#else
+#define SKBMGR_RX_BUF_LEN SKB_WITH_OVERHEAD(2048)
+#endif
+//use for wifi driver, size 3840 is accord to wifi driver RX_BUFFER_AGGRESIZE.
+#if defined(TCSUPPORT_MBUF_ENHANCE)
+#define SKBMGR_4K_RX_BUF_LEN SKB_WITH_OVERHEAD(4096)
+#else
+#define SKBMGR_4K_RX_BUF_LEN SKB_WITH_OVERHEAD(4352)
+#endif
+
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+struct sk_buff *skbmgr_alloc_skb2k(void);
+int skbmgr_recycling_callback(struct sk_buff *skb);
+
+static inline struct sk_buff *skbmgr_dev_alloc_skb2k(void)
+{
+ struct sk_buff *skb = skbmgr_alloc_skb2k();
+ if (likely(skb))
+ skb_reserve(skb, NET_SKB_PAD);
+ return skb;
+}
+struct sk_buff *skbmgr_alloc_skb4k(void);
+
+int skbmgr_4k_recycling_callback(struct sk_buff *skb);
+
+static inline struct sk_buff *skbmgr_dev_alloc_skb4k(void)
+{
+ struct sk_buff *skb = skbmgr_alloc_skb4k();
+ if (likely(skb))
+ skb_reserve(skb, NET_SKB_PAD);
+ return skb;
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
Index: linux-3.18.21/include/linux/spinlock_types.h
===================================================================
--- linux-3.18.21.orig/include/linux/spinlock_types.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/spinlock_types.h 2018-02-05 13:21:22.000000000 +0800
@@ -81,6 +81,14 @@
#define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+/*
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
+ */
+#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
+
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
#include <linux/rwlock_types.h>
Index: linux-3.18.21/include/linux/usb.h
===================================================================
--- linux-3.18.21.orig/include/linux/usb.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/linux/usb.h 2018-02-05 13:21:22.000000000 +0800
@@ -589,13 +589,19 @@
struct list_head filelist;
+#ifdef TCSUPPORT_MUH_TEST
+#define USB_MAXCHILDREN (31)
+ struct usb_device *children[USB_MAXCHILDREN];
+#endif
int maxchild;
u32 quirks;
atomic_t urbnum;
unsigned long active_duration;
-
+#ifdef TCSUPPORT_USB_HOST_LED
+ u8 phyportnum;
+#endif
#ifdef CONFIG_PM
unsigned long connect_time;
@@ -1883,6 +1889,9 @@
/* debugfs stuff */
extern struct dentry *usb_debug_root;
+#define err(format, arg...) \
+ printk(KERN_ERR KBUILD_MODNAME ": " format "\n", ##arg)
+
/* LED triggers */
enum usb_led_event {
USB_LED_EVENT_HOST = 0,
Index: linux-3.18.21/include/net/ip.h
===================================================================
--- linux-3.18.21.orig/include/net/ip.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/net/ip.h 2018-02-05 13:21:23.000000000 +0800
@@ -473,6 +473,9 @@
IP_DEFRAG_VS_FWD,
IP_DEFRAG_AF_PACKET,
IP_DEFRAG_MACVLAN,
+#ifdef MTK_CRYPTO_DRIVER
+ IP_DEFRAG_VPN_SPEED,
+#endif
};
int ip_defrag(struct sk_buff *skb, u32 user);
Index: linux-3.18.21/include/net/mtk_esp.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/net/mtk_esp.h 2018-02-05 13:21:23.000000000 +0800
@@ -0,0 +1,262 @@
+#ifndef MTK_ESP_H
+#define MTK_ESP_H
+
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <asm/tc3162/tc3162.h>
+
+#define HASH_DIGEST_OUT 0
+#define HASH_DIGEST_IN 1
+#define CRYPTO_ENCRYPTION 1
+#define CRYPTO_DECRYPTION 2
+
+#ifdef TCSUPPORT_IPSEC_PASSTHROUGH
+#define CONFIG_VPN_PASSTHROUGH_LOG
+#define CONFIG_VPN_PASSTHROUGH_INFO
+#endif
+
+#define VPN_DEFAULT_MAX_MTU 1500
+#define DECRIPTION_DIRECTION 0
+#define ENCRIPTION_DIRECTION 1
+
+#define VPN_PASSTHROUGH_SWITCH_ON 1
+#define VPN_PASSTHROUGH_SWITCH_OFF 0
+
+#define IPESC_EIP93_ADAPTERS 16
+#define SKB_QUEUE_MAX_SIZE 100
+
+/************************************************************************
+* E X T E R N E L S T R U C T U R E D E F I N I T I O N
+*************************************************************************
+*/
+#if 1 /*def TCSUPPORT_IPSEC_PASSTHROUGH*/
+enum vpn_passthrough_para{
+ VPN_PASSTHROUGH_DEBUG_INFO = 0,
+ VPN_PASSTHROUGH_DEBUG_LEVER,
+ VPN_PASSTHROUGH_SWITCH,
+ VPN_PASSTHROUGH_SKB_QUEUE_MAX_NUM,
+};
+
+#ifdef CONFIG_VPN_PASSTHROUGH_LOG
+#ifndef VPN_LOG
+typedef enum
+{
+ VPN_PASSTHROUGH_LOG_OFF = 0,
+ VPN_PASSTHROUGH_LOG_ERROR,
+ VPN_PASSTHROUGH_LOG_WARNING ,
+ VPN_PASSTHROUGH_LOG_DBG,
+ VPN_PASSTHROUGH_LOG_TRACE,
+ VPN_PASSTHROUGH_LOG_DUMP_DATA,
+ VPN_PASSTHROUGH_LOG_MAX_LEVER,
+} SIF_LOG_LEVER;
+#endif
+#endif
+
+typedef struct entrynum_s
+{
+ uint32 firstperiod;
+ uint32 secondperiod;
+}entrynum_t;
+
+typedef struct d_info_s
+{
+ uint32 d_phase1_normal_counts;
+ uint32 d_phase1_speed_counts;
+ uint32 d_phase1_drop_counts;
+ uint32 d_phase2_normal_counts;
+ uint32 d_phase2_speed_counts;
+ uint32 d_phase2_drop_counts;
+} d_info_t;
+
+typedef struct e_info_s
+{
+ uint32 e_phase1_normal_counts;
+ uint32 e_phase1_speed_counts;
+ uint32 e_phase1_drop_counts;
+ uint32 e_phase1_drop_counts_1;
+ uint32 e_phase1_drop_counts_2;
+ uint32 e_phase2_normal_counts;
+ uint32 e_phase2_speed_counts;
+ uint32 e_phase2_drop_counts;
+} e_info_t;
+
+typedef struct vpnPassThrough_Stats_s
+{
+ struct d_info_s dinfo;
+ struct e_info_s einfo;
+}vpnPassThrough_Stats_t;
+
+typedef struct vpnPassThrough_Global_Para
+{
+ vpnPassThrough_Stats_t statistics;
+ uint32 vpndebuglevel;
+ uint32 vpnpassthroughswitch;
+ uint32 vpnskbqueuemaxnum;
+}vpnPassThrough_Global_Para_t;
+
+#endif
+
+typedef union
+{
+ struct
+ {
+ unsigned int padCrtlStat : 8;
+ unsigned int errStatus : 8;
+ unsigned int padValue : 8;
+ unsigned int prngMode : 2;
+ unsigned int haltMode : 1;
+ unsigned int hashFinal : 1;
+ unsigned int initArc4 : 1;
+ unsigned int reserved : 1;
+ unsigned int peReady : 1;
+ unsigned int hostReady : 1;
+ } bits;
+ unsigned int word;
+
+} peCrtlStat_t;
+
+typedef union
+{
+ struct
+ {
+ unsigned int byPass : 8;
+ unsigned int peReady : 1;
+ unsigned int hostReady : 1;
+ unsigned int reserved : 2;
+ unsigned int length : 20;
+ } bits;
+ unsigned int word;
+
+} peLength_t;
+
+typedef struct addrHandler_s
+{
+ unsigned int addr;
+ dma_addr_t phyAddr;
+
+} addrHandler_t;
+
+typedef struct eip93DescpHandler_s
+{
+ peCrtlStat_t peCrtlStat;
+ addrHandler_t srcAddr;
+ addrHandler_t dstAddr;
+ addrHandler_t saAddr;
+ addrHandler_t stateAddr;
+ addrHandler_t arc4Addr;
+ unsigned int userId;
+ peLength_t peLength;
+
+} eip93DescpHandler_t;
+
+typedef struct addrsDigestPreCompute_s
+{
+ unsigned int *hashKeyTank;
+ addrHandler_t ipadHandler;
+ addrHandler_t opadHandler;
+ unsigned int blkSize;
+ eip93DescpHandler_t *cmdHandler;
+ addrHandler_t saHandler;
+ addrHandler_t stateHandler;
+ addrHandler_t stateHandler2;
+ unsigned int digestWord;
+ unsigned int *pIDigest;
+ unsigned int *pODigest;
+
+} addrsDigestPreCompute_t;
+
+typedef struct ipsecEip93Adapter_s
+{
+ unsigned int spi; //every ipsec flow has a unique spi
+ struct xfrm_state *x; //the SA
+ unsigned int isHashPreCompute; //0:pre-compute init, 1:inner digest done, 2:inner digest done, 3:pre-compute done
+ unsigned int isEncryptOrDecrypt; //1:encrypt, 2:decrypt
+ struct sk_buff_head skbQueue;
+ addrsDigestPreCompute_t *addrsPreCompute; //for hash pre-compute
+ eip93DescpHandler_t *cmdHandler; //for encrypt/decrypt
+ spinlock_t lock;
+ unsigned int addedLen; //refer to ssh_hwaccel_alloc_combined() in safenet_la.c
+#if 1 /* def TCSUPPORT_IPSEC_PASSTHROUGH */
+ struct entrynum_s foeentryindex;
+ //below two parameters for input
+ struct net_device *dev;
+ struct sec_path *sp;
+ //below one parameter for output
+ struct dst_entry *xdst;
+ //below one parameter for both
+ struct dst_entry *dst;
+#endif
+
+} ipsecEip93Adapter_t;
+
+#if 1 /* def TCSUPPORT_IPSEC_PASSTHROUGH */
+//default open for hw_nat
+typedef struct adapterlistpara_s
+{
+ int para;//0:dst
+ struct sk_buff *skb;
+}adapterlistpara_t;
+
+/*below define is for ipsec passthrough period II
+ because input finish handle is the same as the output finish handle,so use the same strcuture
+*/
+typedef struct pushinfo_s
+ {
+ eip93DescpHandler_t *resHandler;
+ ipsecEip93Adapter_t *currentAdapter;
+ }pushinfo_t;
+
+ typedef struct pullinfo_s
+ {
+ struct sk_buff *skb;
+ unsigned int entry_index;
+ }pullinfo_t;
+
+ typedef struct ipsec_finishpara_s
+ {
+ union
+ {
+ struct pushinfo_s learn_push;
+ struct pullinfo_s learn_pull;
+ }data;
+
+ int flag;
+ }ipsec_finishpara_t;
+
+
+/*below define is for ipsec passthrough period I
+ because input handle is the same as the output handle,so use the same strcuture
+*/
+ typedef struct learninfo_s
+ {
+ struct sk_buff *skb;
+ struct xfrm_state *x;
+ }learninfo_t;
+
+ typedef struct speedinfo_s
+ {
+ struct sk_buff *skb;
+ unsigned int entry_idx;
+ }speedinfo_t;
+
+ typedef struct ipsec_para_s
+ {
+ union
+ {
+ struct learninfo_s learn;
+ struct speedinfo_s speed;
+ }data;
+
+ int flag;
+ }ipsec_para_t;
+#endif
+
+/************************************************************************
+* E X T E R N E L F U N C T I O N D E C L A R A T I O N
+*************************************************************************
+*/
+
+
+#endif
+
Index: linux-3.18.21/include/net/neighbour.h
===================================================================
--- linux-3.18.21.orig/include/net/neighbour.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/net/neighbour.h 2018-02-05 13:21:23.000000000 +0800
@@ -56,6 +56,9 @@
NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ NEIGH_VAR_DEFAULT_ROUTE,
+#endif
/* Following are used by "default" only */
NEIGH_VAR_GC_INTERVAL,
NEIGH_VAR_GC_THRESH1,
@@ -82,6 +85,9 @@
int reachable_time;
int data[NEIGH_VAR_DATA_MAX];
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ char dlf_route[64];
+#endif
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
Index: linux-3.18.21/include/net/netfilter/nf_conntrack.h
===================================================================
--- linux-3.18.21.orig/include/net/netfilter/nf_conntrack.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/net/netfilter/nf_conntrack.h 2018-02-05 14:20:41.000000000 +0800
@@ -111,9 +111,27 @@
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
+#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || \
+ defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
+ struct {
+ /*
+ * e.g. "http". NULL before decision. "unknown" after decision
+ * if no match.
+ */
+ char *app_proto;
+ /*
+ * application layer data so far. NULL after match decision.
+ */
+ char *app_data;
+ unsigned int app_data_len;
+ } layer7;
+#endif
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
+
+ u_int16_t loport; /* Port that was requested, low or first */
+ u_int16_t hiport; /* Port that was requested, high or second */
};
static inline struct nf_conn *
Index: linux-3.18.21/include/net/sock.h
===================================================================
--- linux-3.18.21.orig/include/net/sock.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/net/sock.h 2018-02-05 13:21:24.000000000 +0800
@@ -69,6 +69,7 @@
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
+#include <linux/foe_hook.h>
struct cgroup;
struct cgroup_subsys;
@@ -423,6 +424,8 @@
#endif
__u32 sk_mark;
u32 sk_classid;
+ __u16 lVlanId;
+
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
@@ -431,6 +434,7 @@
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
+ struct SkbFoeInfo sk_foe_info;
};
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
Index: linux-3.18.21/include/uapi/linux/Kbuild
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/Kbuild 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/Kbuild 2018-02-05 13:21:25.000000000 +0800
@@ -125,6 +125,7 @@
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += flat.h
+header-y += foe_hook.h
header-y += fou.h
header-y += fs.h
header-y += fsl_hypervisor.h
@@ -183,6 +184,7 @@
header-y += if_x25.h
header-y += igmp.h
header-y += in.h
+header-y += ecnt_in.h
header-y += in6.h
header-y += in_route.h
header-y += inet_diag.h
@@ -328,6 +330,7 @@
header-y += ptrace.h
header-y += qnx4_fs.h
header-y += qnxtypes.h
+header-y += qos_type.h
header-y += quota.h
header-y += radeonfb.h
header-y += random.h
Index: linux-3.18.21/include/uapi/linux/atm.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/atm.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/atm.h 2018-02-05 13:21:25.000000000 +0800
@@ -129,6 +129,7 @@
#define ATM_VBR 3
#define ATM_ABR 4
#define ATM_ANYCLASS 5 /* compatible with everything */
+#define ATM_nrtVBR 6
#define ATM_MAX_PCR -1 /* maximum available PCR */
@@ -154,6 +155,8 @@
unsigned int adtf :10; /* ACR Decrease Time Factor (10-bit) */
unsigned int cdf :3; /* Cutoff Decrease Factor (3-bit) */
unsigned int spare :9; /* spare bits */
+ int scr; /* desired SCR in cells per second */
+ int mbs; /* maximum burst size (MBS) in cells */
};
struct atm_qos {
Index: linux-3.18.21/include/uapi/linux/ecnt_in.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/ecnt_in.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,35 @@
+#ifndef _LINUX_ECNT_IN_H
+#define _LINUX_ECNT_IN_H
+
+/*
+#define IP_MINTTL 21
+#define IP_NODEFRAG 22
+*/
+#define IP_SKB_MARK_FLAG 23
+#define IP_SKB_VLAN_ID_FLAG 24
+
+
+/*
+#define IP_MULTICAST_ALL 49
+#define IP_UNICAST_IF 50
+*/
+#define IP_SKB_MARK 51
+#define IP_SKB_VLAN_ID 52
+
+
+
+/* interface type */
+#define IF_TYPE_WAN_ROUTE (1<<0)
+#define IF_TYPE_WAN_BRIDE (1<<1)
+#define IF_TYPE_LAN (1<<2)
+#define IF_TYPE_INTERNET (1<<3)
+#define IF_TYPE_OTHER (1<<4)
+#define IF_TYPE_HAS_REGISTER (1<<5)
+#define IF_TYPE_LAN_BIND_INTERNET (1<<6)
+#define IF_TYPE_OTHER_WAN_BRIDE (IF_TYPE_WAN_BRIDE | IF_TYPE_OTHER)
+
+#define MAX_PVC_NUM 8
+#define MAX_SMUX_NUM 8
+
+
+#endif /* _LINUX_ECNT_IN_H */
Index: linux-3.18.21/include/uapi/linux/foe_hook.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/foe_hook.h 2018-02-05 13:21:25.000000000 +0800
@@ -0,0 +1,13 @@
+#ifndef __FOE_USE_HOOK_H
+#define __FOE_USE_HOOK_H
+
+struct SkbFoeInfo{
+ unsigned short ppe_magic;
+ unsigned short ppe_foe_entry;
+ unsigned char ppe_ai;
+ unsigned char wan_type;
+ unsigned short wan_index;
+};
+
+
+#endif
Index: linux-3.18.21/include/uapi/linux/if_bridge.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/if_bridge.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/if_bridge.h 2018-02-05 14:20:41.000000000 +0800
@@ -51,6 +51,19 @@
#define BR_STATE_FORWARDING 3
#define BR_STATE_BLOCKING 4
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMP_SNOOPING)
+/*IGMP Snooping*/
+#define BRCTL_SET_IGMPSNOOPING_STATE 19
+#define BRCTL_SET_IGMPSNOOPING_AGEING_TIME 20
+#define BRCTL_GET_MC_FDB_ENTRIES 21
+#define BRCTL_SET_IGMPSNOOPING_QUICKLEAVE 22
+#define BRCTL_SET_IGMPSNOOPING_DBG 23
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP)
+#define BRCTL_XPON_IGMP_CMD 1000
+#endif
+
struct __bridge_info {
__u64 designated_root;
__u64 bridge_id;
@@ -71,6 +84,12 @@
__u32 tcn_timer_value;
__u32 topology_change_timer_value;
__u32 gc_timer_value;
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMP_SNOOPING)
+ __u8 igmpsnoop_enabled;
+ __u8 igmpsnoop_quickleave;
+ __u8 igmpsnoop_dbg;
+ __u32 igmpsnoop_ageing_time;
+#endif
};
struct __port_info {
@@ -87,7 +106,26 @@
__u32 message_age_timer_value;
__u32 forward_delay_timer_value;
__u32 hold_timer_value;
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMP_SNOOPING)
+ __u8 is_router;
+#endif
+
+};
+
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMP_SNOOPING)
+struct __mc_fdb_entry
+{
+ __u8 group_addr[40];
+ __u8 host_addr[6];
+ __u8 group_mac[6];
+ __u16 port_no;
+ __u32 ageing_timer_value;
+ __u8 src_addr[40];
+ __u8 filter_mode;
+ __u8 version;
+ __u32 unused;
};
+#endif
struct __fdb_entry {
__u8 mac_addr[ETH_ALEN];
Index: linux-3.18.21/include/uapi/linux/if_ether.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/if_ether.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/if_ether.h 2018-02-05 13:21:25.000000000 +0800
@@ -62,6 +62,8 @@
#define ETH_P_ATALK 0x809B /* Appletalk DDP */
#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
+#define ETH_P_QinQ_88a8 0x88a8 /* VLAN Extended Header */
+#define ETH_P_QinQ_9100 0x9100 /* VLAN Extended Header */
#define ETH_P_IPX 0x8137 /* IPX over DIX */
#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */
Index: linux-3.18.21/include/uapi/linux/if_packet.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/if_packet.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/if_packet.h 2018-02-05 13:21:25.000000000 +0800
@@ -54,6 +54,7 @@
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_SKB_FOE_INFO 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -191,7 +192,11 @@
* you can see which blk[s] is[are] outstanding etc.
* 3. Validate kernel code.
*/
+ #if 0
__aligned_u64 seq_num;
+ #else
+ __u64 seq_num;
+ #endif
/*
* ts_last_pkt:
Index: linux-3.18.21/include/uapi/linux/in.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/in.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/in.h 2018-02-05 13:21:25.000000000 +0800
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/ecnt_in.h>
/* Standard well-defined IP protocols. */
enum {
@@ -109,6 +110,9 @@
#define IP_MINTTL 21
#define IP_NODEFRAG 22
+/*
+! important !: Add new MACRO to ecnt_in.h
+*/
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
@@ -144,6 +148,9 @@
#define MCAST_MSFILTER 48
#define IP_MULTICAST_ALL 49
#define IP_UNICAST_IF 50
+/*
+! important !: Add new MACRO to ecnt_in.h
+*/
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
Index: linux-3.18.21/include/uapi/linux/in6.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/in6.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/in6.h 2018-02-05 13:21:25.000000000 +0800
@@ -282,6 +282,9 @@
#define IPV6_TRANSPARENT 75
#define IPV6_UNICAST_IF 76
+#define IPV6_SKB_MARK 77
+#define IPV6_SKB_VLAN_ID 78
+
/*
* Multicast Routing:
* see include/uapi/linux/mroute6.h.
Index: linux-3.18.21/include/uapi/linux/netfilter/Kbuild
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter/Kbuild 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter/Kbuild 2018-02-05 13:21:25.000000000 +0800
@@ -59,6 +59,7 @@
header-y += xt_iprange.h
header-y += xt_ipvs.h
header-y += xt_l2tp.h
+header-y += xt_layer7.h
header-y += xt_length.h
header-y += xt_limit.h
header-y += xt_mac.h
@@ -85,3 +86,4 @@
header-y += xt_tcpudp.h
header-y += xt_time.h
header-y += xt_u32.h
+header-y += xt_layer7.h
Index: linux-3.18.21/include/uapi/linux/netfilter/xt_layer7.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/netfilter/xt_layer7.h 2018-02-05 13:21:26.000000000 +0800
@@ -0,0 +1,12 @@
+#ifndef _XT_LAYER7_H
+#define _XT_LAYER7_H
+
+#define MAX_PATTERN_LEN 8192
+#define MAX_PROTOCOL_LEN 256
+
+struct xt_layer7_info {
+ char protocol[MAX_PROTOCOL_LEN];
+ char pattern[MAX_PATTERN_LEN];
+ u_int8_t invert;
+};
+#endif
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/Kbuild
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_bridge/Kbuild 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/Kbuild 2018-02-05 13:21:26.000000000 +0800
@@ -7,6 +7,7 @@
header-y += ebt_ip6.h
header-y += ebt_limit.h
header-y += ebt_log.h
+header-y += ebt_ulog.h
header-y += ebt_mark_m.h
header-y += ebt_mark_t.h
header-y += ebt_nat.h
@@ -16,3 +17,5 @@
header-y += ebt_stp.h
header-y += ebt_vlan.h
header-y += ebtables.h
+header-y += ebt_ftos_t.h
+header-y += ebt_tc.h
\ No newline at end of file
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ftos_t.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ftos_t.h 2018-02-05 13:21:26.000000000 +0800
@@ -0,0 +1,12 @@
+#ifndef __LINUX_BRIDGE_EBT_FTOS_T_H
+#define __LINUX_BRIDGE_EBT_FTOS_T_H
+
+struct ebt_ftos_info
+{
+ unsigned char ftos;
+ unsigned char mask;
+ int target;
+};
+#define EBT_FTOS_TARGET "ftos"
+
+#endif
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ip.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_bridge/ebt_ip.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ip.h 2018-02-05 13:21:26.000000000 +0800
@@ -23,22 +23,44 @@
#define EBT_IP_PROTO 0x08
#define EBT_IP_SPORT 0x10
#define EBT_IP_DPORT 0x20
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+#define EBT_IP_IPP 0x40
+#define EBT_IP_DSCP 0x80
+#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
+ EBT_IP_SPORT | EBT_IP_DPORT | EBT_IP_IPP | EBT_IP_DSCP)
+#else
#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
EBT_IP_SPORT | EBT_IP_DPORT )
+#endif
#define EBT_IP_MATCH "ip"
/* the same values are used for the invflags */
struct ebt_ip_info {
+#if defined(TCSUPPORT_ORN_EBTABLES)
__be32 saddr;
__be32 daddr;
__be32 smsk;
__be32 dmsk;
+#else
+ __be32 saddr[2];
+ __be32 daddr[2];
+ __be32 smsk[2];
+ __be32 dmsk[2];
+#endif
__u8 tos;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ __u8 protocol[2];
+#else
__u8 protocol;
+#endif
__u8 bitmask;
__u8 invflags;
__u16 sport[2];
__u16 dport[2];
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ __u8 ipp[2];
+ __u8 dscp[2];
+#endif
};
#endif
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ip6.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_bridge/ebt_ip6.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ip6.h 2018-02-05 14:20:41.000000000 +0800
@@ -22,9 +22,12 @@
#define EBT_IP6_DPORT 0x20
#define EBT_IP6_ICMP6 0x40
+#if !defined(TCSUPPORT_CT_PORTSLIMIT)
#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
EBT_IP6_ICMP6)
+#endif
+
#define EBT_IP6_MATCH "ip6"
/* the same values are used for the invflags */
@@ -33,8 +36,16 @@
struct in6_addr daddr;
struct in6_addr smsk;
struct in6_addr dmsk;
+#if defined(TCSUPPORT_ORN_EBTABLES)
__u8 tclass;
+#else
+ __u8 tclass[2];
+#endif
+#if defined(TCSUPPORT_ORN_EBTABLES)
__u8 protocol;
+#else
+ __u8 protocol[2];
+#endif
__u8 bitmask;
__u8 invflags;
union {
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_tc.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_tc.h 2018-02-05 13:21:26.000000000 +0800
@@ -0,0 +1,12 @@
+#ifndef __LINUX_BRIDGE_EBT_TC_H_
+#define __LINUX_BRIDGE_EBT_TC_H_
+
+struct ebt_tc_info
+{
+ unsigned char tc;
+ int target;
+};
+#define EBT_TC_TARGET "tc"
+
+#endif
+
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ulog.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_ulog.h 2018-02-05 13:21:26.000000000 +0800
@@ -0,0 +1,36 @@
+#ifndef _EBT_ULOG_H
+#define _EBT_ULOG_H
+
+#define EBT_ULOG_DEFAULT_NLGROUP 0
+#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
+#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
+#define EBT_ULOG_PREFIX_LEN 32
+#define EBT_ULOG_MAX_QLEN 50
+#define EBT_ULOG_WATCHER "ulog"
+#define EBT_ULOG_VERSION 1
+
+struct ebt_ulog_info {
+ uint32_t nlgroup;
+ unsigned int cprange;
+ unsigned int qthreshold;
+ char prefix[EBT_ULOG_PREFIX_LEN];
+};
+
+typedef struct ebt_ulog_packet_msg {
+ int version;
+ char indev[IFNAMSIZ];
+ char outdev[IFNAMSIZ];
+ char physindev[IFNAMSIZ];
+ char physoutdev[IFNAMSIZ];
+ char prefix[EBT_ULOG_PREFIX_LEN];
+ struct timeval stamp;
+ unsigned long mark;
+ unsigned int hook;
+ size_t data_len;
+ /* The complete packet, including Ethernet header and perhaps
+ * the VLAN header is appended */
+ unsigned char data[0] __attribute__
+ ((aligned (__alignof__(struct ebt_ulog_info))));
+} ebt_ulog_packet_msg_t;
+
+#endif /* _EBT_ULOG_H */
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_vlan.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_bridge/ebt_vlan.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebt_vlan.h 2018-02-05 13:21:26.000000000 +0800
@@ -10,8 +10,13 @@
#define EBT_VLAN_MATCH "vlan"
struct ebt_vlan_info {
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ __u16 id[2]; /* VLAN ID {1-4095} */
+ __u8 prio[2]; /* VLAN User Priority {0-7} */
+#else
__u16 id; /* VLAN ID {1-4095} */
__u8 prio; /* VLAN User Priority {0-7} */
+#endif
__be16 encap; /* VLAN Encapsulated frame code {0-65535} */
__u8 bitmask; /* Args bitmask bit 1=1 - ID arg,
bit 2=1 User-Priority arg, bit 3=1 encap*/
Index: linux-3.18.21/include/uapi/linux/netfilter_bridge/ebtables.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_bridge/ebtables.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_bridge/ebtables.h 2018-02-05 13:21:26.000000000 +0800
@@ -30,6 +30,8 @@
* reclaim a part of this int for backwards compatible extensions.
* The 4 lsb are more than enough to store the verdict. */
#define EBT_VERDICT_BITS 0x0000000F
+#define EBT_VLAN_MARK 0x00000008 /*Rodney_20090724*/
+#define EBT_VLAN_REMARKING 0x00000010 /*Rodney_20090724*/
struct xt_match;
struct xt_target;
@@ -104,8 +106,20 @@
#define EBT_802_3 0x04
#define EBT_SOURCEMAC 0x08
#define EBT_DESTMAC 0x10
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+#define EBT_SOURCEMACSTART 0x100
+#define EBT_SOURCEMACEND 0x200
+#define EBT_DESTMACSTART 0x400
+#define EBT_DESTMACEND 0x800
+#endif
+#if defined(TCSUPPORT_PON_MAC_FILTER)
#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \
+ | EBT_SOURCEMACSTART |EBT_SOURCEMACEND |EBT_DESTMACSTART |EBT_DESTMACEND \
| EBT_ENTRY_OR_ENTRIES)
+#else
+#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \
+ | EBT_ENTRY_OR_ENTRIES)
+#endif
#define EBT_IPROTO 0x01
#define EBT_IIN 0x02
@@ -114,8 +128,20 @@
#define EBT_IDEST 0x10
#define EBT_ILOGICALIN 0x20
#define EBT_ILOGICALOUT 0x40
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+#define EBT_ISOURCESTART 0x100
+#define EBT_ISOURCEEND 0x200
+#define EBT_IDESTSTART 0x400
+#define EBT_IDESTEND 0x800
+#endif
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \
+ | EBT_ISOURCESTART |EBT_ISOURCEEND |EBT_IDESTSTART |EBT_IDESTEND \
+ | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST)
+#else
#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \
| EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST)
+#endif
struct ebt_entry_match {
union {
@@ -171,6 +197,16 @@
unsigned char sourcemsk[ETH_ALEN];
unsigned char destmac[ETH_ALEN];
unsigned char destmsk[ETH_ALEN];
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ unsigned char sourcemacstart[ETH_ALEN];
+ unsigned char sourcestartmsk[ETH_ALEN];
+ unsigned char sourcemacend[ETH_ALEN];
+ unsigned char sourceendmsk[ETH_ALEN];
+ unsigned char destmacstart[ETH_ALEN];
+ unsigned char deststartmsk[ETH_ALEN];
+ unsigned char destmacend[ETH_ALEN];
+ unsigned char destendmsk[ETH_ALEN];
+#endif
/* sizeof ebt_entry + matches */
unsigned int watchers_offset;
/* sizeof ebt_entry + matches + watchers */
Index: linux-3.18.21/include/uapi/linux/netfilter_ipv4/Kbuild
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netfilter_ipv4/Kbuild 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netfilter_ipv4/Kbuild 2018-02-05 13:21:26.000000000 +0800
@@ -8,3 +8,4 @@
header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_ttl.h
+header-y += ipt_TRIGGER.h
Index: linux-3.18.21/include/uapi/linux/netfilter_ipv4/ipt_TRIGGER.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/netfilter_ipv4/ipt_TRIGGER.h 2018-02-05 13:21:26.000000000 +0800
@@ -0,0 +1,26 @@
+#ifndef _IPT_TRIGGER_H_target
+#define _IPT_TRIGGER_H_target
+
+
+#define TRIGGER_TIMEOUT 600 /* 600 secs */
+
+enum ipt_trigger_type
+{
+ IPT_TRIGGER_DNAT = 1,
+ IPT_TRIGGER_IN = 2,
+ IPT_TRIGGER_OUT = 3
+};
+
+struct ipt_trigger_ports {
+ u_int16_t mport[2]; /* Related destination port range */
+ u_int16_t rport[2]; /* Port range to map related destination port range to */
+};
+
+struct ipt_trigger_info {
+ enum ipt_trigger_type type;
+ u_int16_t proto; /* Related protocol */
+ struct ipt_trigger_ports ports;
+};
+
+#endif /*_IPT_TRIGGER_H_target*/
+
Index: linux-3.18.21/include/uapi/linux/netlink.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/netlink.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/netlink.h 2018-02-05 13:21:26.000000000 +0800
@@ -22,6 +22,7 @@
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
+#define NETLINK_ECNT_EVENT 17
/* leave room for NETLINK_DM (DM Events) */
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19
Index: linux-3.18.21/include/uapi/linux/qos_type.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/include/uapi/linux/qos_type.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,51 @@
+#ifndef _QOS_TYPE_H_
+#define _QOS_TYPE_H_
+
+#define MAX_BUF_LEN 64
+#define MAX_RULE_NUM 16
+
+#define EBT_CHK_TYPE 1
+#define DEV_XMIT_CHK_TYPE 2
+
+
+#define QOSTYPE_IOC_MAGIC 'g'
+#define QOSTYPE_MAJOR 111
+#define QOSTYPE_IOC_SET_TYPE _IOR(QOSTYPE_IOC_MAGIC,10,unsigned long)
+#define QOSTYPE_PBIT_MARK_OFFSET 8
+#define QOSTYPE_DSCP_SW_MARK_OFFSET 6
+#define QOSTYPE_QUEUE_INDEX_OFFSET 12
+#define QOSTYPE_8021P_MARK 0x0F00 /* 8~11 bits used for 802.1p */
+#define QOSTYPE_DSCP_MARK 0x003F /* 0~5 bits used for DSCP */
+#define QOSTYPE_DSCP_SW_MARK 0x0040 /* 6~6 bits used for DSCP switch flag */
+#define QOSTYPE_QUEUE_INDEX_MARK 0xF000 /* 12~15 bits used for queue index */
+
+
+/* type_name: wan_if, rtp_proto */
+struct qos_match
+{
+ char type_name[MAX_BUF_LEN];
+ char type_value[MAX_BUF_LEN];
+};
+
+/* qm[0]: wan_if, qm[1]: rtp_proto */
+struct qos_action
+{
+ struct qos_match qm[2];
+ /* if match, set dscp here */
+ int dscp_flag;
+ unsigned int dscp;
+};
+
+struct qos_type
+{
+ int rule_no;
+ struct qos_match qm[2];
+};
+
+int qostype_chk(int chkType, int rule_no, char *wan_if, int rtp_match);
+int set_tos(int rule_no, unsigned int tos);
+int get_tos(int rule_no, unsigned int *tos);
+int unset_tos(int rule_no);
+
+#endif
+
Index: linux-3.18.21/include/uapi/linux/sched.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/sched.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/sched.h 2018-02-05 13:21:26.000000000 +0800
@@ -30,6 +30,8 @@
#define CLONE_NEWNET 0x40000000 /* New network namespace */
#define CLONE_IO 0x80000000 /* Clone io context */
+#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
+
/*
* Scheduling policies
*/
Index: linux-3.18.21/include/uapi/linux/sysctl.h
===================================================================
--- linux-3.18.21.orig/include/uapi/linux/sysctl.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/include/uapi/linux/sysctl.h 2018-02-05 13:21:26.000000000 +0800
@@ -325,6 +325,17 @@
NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
NET_NF_CONNTRACK_CHECKSUM=32,
+ /* Add new MACROS for ECNT HOOK */
+ NET_NF_CONNTRACK_FTP_ENABLE=33,
+ NET_NF_CONNTRACK_SIP_ENABLE=34,
+ NET_NF_CONNTRACK_H323_ENABLE=35,
+ NET_NF_CONNTRACK_RTSP_ENABLE=36,
+ NET_NF_CONNTRACK_L2TP_ENABLE=37,
+ NET_NF_CONNTRACK_IPSEC_ENABLE=38,
+ NET_NF_CONNTRACK_PPTP_ENABLE=39,
+ NET_NF_CONNTRACK_PORTSCAN_ENABLE=40,
+ NET_NF_CONNTRACK_FTP_PORT=41,
+ NET_NF_CONNTRACK_ESP_TIMEOUT=42,
};
/* /proc/sys/net/ipv4 */
Index: linux-3.18.21/init/main.c
===================================================================
--- linux-3.18.21.orig/init/main.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/init/main.c 2018-02-05 13:21:27.000000000 +0800
@@ -89,6 +89,9 @@
#include <asm/smp.h>
#endif
+#include <linux/libcompileoption.h>
+#include <ecnt_hook/ecnt_hook.h>
+
static int kernel_init(void *);
extern void init_IRQ(void);
@@ -502,6 +505,7 @@
char *command_line;
char *after_dashes;
+ init_compileoption_val();
/*
* Need to run as early as possible, to initialize the
* lockdep hash:
@@ -599,6 +603,8 @@
kmem_cache_init_late();
+ ecnt_hook_init();
+
/*
* HACK ALERT! This is early. We're enabling the console before
* we've done PCI setups etc, and console_init() must be aware of
Index: linux-3.18.21/kernel/Makefile
===================================================================
--- linux-3.18.21.orig/kernel/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/Makefile 2018-02-05 13:21:27.000000000 +0800
@@ -8,7 +8,7 @@
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
extable.o params.o \
kthread.o sys_ni.o nsproxy.o \
- notifier.o ksysfs.o cred.o reboot.o \
+ ecnt_event.o notifier.o ksysfs.o cred.o reboot.o \
async.o range.o groups.o smpboot.o
ifdef CONFIG_FUNCTION_TRACER
Index: linux-3.18.21/kernel/ecnt_event.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/kernel/ecnt_event.c 2018-02-05 13:21:27.000000000 +0800
@@ -0,0 +1,445 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+************************************************************************/
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/netlink.h>
+#include <net/sock.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+
+#include <ecnt_hook/ecnt_hook_event.h>
+
+#include "ecnt_event.h"
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+************************************************************************/
+
+/************************************************************************
+* M A C R O S
+************************************************************************/
+
+/************************************************************************
+* D A T A T Y P E S
+************************************************************************/
+MODULE_LICENSE("GPL");
+
+static struct sock *nl_sk = NULL;
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+************************************************************************/
+
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+************************************************************************/
+
+
+
+/************************************************************************
+* P U B L I C D A T A
+************************************************************************/
+
+/************************************************************************
+* P R I V A T E D A T A
+************************************************************************/
+
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+************************************************************************/
+__IMEM struct event_work *work_dequeue(struct event_work_head *list)
+{
+ unsigned long flags;
+ struct event_work *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+ result = __work_dequeue(list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return result;
+}
+
+__IMEM void work_queue_head(struct event_work_head *list, struct event_work *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ __work_queue_head(list, newsk);
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+__IMEM int workmgr_free_callback(struct event_work *work)
+{
+ struct event_work_head *list;
+
+ list = &workmgr_pool[WORKMGR_QUEUE_ID].list;
+ if (work_queue_len(list) > workmgr_max_list_len){
+ workmgr_max_list_len = work_queue_len(list) + 1;
+ }
+ work_queue_head(list, work);
+ return 1;
+}
+
+__IMEM struct event_work *workmgr_alloc(void)
+{
+ struct event_work_head *list;
+ struct event_work *work;
+ int alloc_no;
+ struct ecnt_event_data *event_data;
+
+ list = &workmgr_pool[WORKMGR_QUEUE_ID].list;
+
+ if (work_queue_len(list)) {
+ work = work_dequeue(list);
+
+ if (unlikely(work == NULL))
+ goto try_normal;
+
+ atomic_inc(&workmgr_alloc_no);
+ alloc_no = atomic_read(&workmgr_alloc_no);
+ if (alloc_no > workmgr_max_alloc_no)
+ workmgr_max_alloc_no = alloc_no;
+
+ return work;
+ }
+
+try_normal:
+ printk(KERN_ERR "event work memory need alloc More!\n");
+ if ((atomic_read(&workmgr_alloc_no) < workmgr_limit_max)) {
+ work = kzalloc(sizeof(struct event_work), GFP_ATOMIC|__GFP_NOWARN);
+ if (likely(work)) {
+ work->work_free_callback = workmgr_free_callback;
+ event_data = kzalloc(MAX_MSGSIZE, GFP_ATOMIC|__GFP_NOWARN);
+ work->event_data = event_data;
+ atomic_inc(&workmgr_alloc_no);
+ alloc_no = atomic_read(&workmgr_alloc_no);
+ if (alloc_no > workmgr_max_alloc_no)
+ workmgr_max_alloc_no = alloc_no;
+ } else {
+ workmgr_alloc_fail++;
+ }
+ } else {
+ work = NULL;
+ printk(KERN_ERR "event work memory alloc fail try normal!\n");
+ workmgr_alloc_fail++;
+ }
+ return work;
+}
+
+static void send_event_msg(struct work_struct *workp)
+{
+ struct sk_buff *skb_1;
+ struct nlmsghdr *nlh;
+ int len = NLMSG_SPACE(MAX_MSGSIZE);
+
+ struct event_work *p_event_work = container_of(workp, struct event_work, mywork);
+
+ skb_1 = alloc_skb(len, GFP_KERNEL);
+ if(!skb_1){
+ event_send_err++;
+ printk(KERN_ERR "send_event_msg:alloc_skb error\n");
+ goto finish;
+ }
+
+ nlh = nlmsg_put(skb_1, 0, 0, p_event_work->type, MAX_MSGSIZE, 0);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,19)
+ NETLINK_CB(skb_1).pid = 0;
+#else
+ NETLINK_CB(skb_1).portid = 0;
+#endif
+ NETLINK_CB(skb_1).dst_group = 1;
+
+ memcpy(NLMSG_DATA(nlh), p_event_work->event_data, p_event_work->event_data_len);
+
+ ECNT_EVENT_BROADCAST((struct ecnt_data *)skb_1);
+
+ if(nl_sk){
+ netlink_broadcast(nl_sk, skb_1, 0, 1, GFP_KERNEL);
+ }else{
+ printk(KERN_ERR "Should first init netlink sock!\n");
+ consume_skb(skb_1);
+ }
+finish:
+ if (p_event_work->work_free_callback) {
+ (*p_event_work->work_free_callback)(p_event_work);
+ }
+}
+
+static int work_limit_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+ int i;
+ struct event_work_head *list;
+
+ out += sprintf(out, "workmgr_limit = %d\n", workmgr_limit);
+ out += sprintf(out, "workmgr_limit_max = %d\n", workmgr_limit_max);
+ out += sprintf(out, "workmgr_alloc_no = %d\n", atomic_read(&workmgr_alloc_no));
+ out += sprintf(out, "workmgr_alloc_fail = %d\n", workmgr_alloc_fail);
+ out += sprintf(out, "workmgr_max_alloc_no = %d\n", workmgr_max_alloc_no);
+ out += sprintf(out, "workmgr_max_list_len = %d\n", workmgr_max_list_len);
+ out += sprintf(out, "event_send_total = %d\n", event_send_total);
+ out += sprintf(out, "event_send_err = %d\n", event_send_err);
+
+ for (i=0; i<WORKMGR_MAX_QUEUE; i++) {
+ list = &workmgr_pool[i].list;
+ out += sprintf(out, "event_work_queue_len CPU%d = %d\n", i, work_queue_len(list));
+ }
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int work_limit_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ workmgr_limit_max = val;
+
+ return count;
+}
+
+static void workmgr_mem_init(void)
+{
+ struct event_work_head *list;
+ struct event_work *work;
+ struct ecnt_event_data *event_data;
+ int alloc_no;
+ int i;
+
+ list = &workmgr_pool[WORKMGR_QUEUE_ID].list;
+ for(i=0; i<workmgr_limit; i++) {
+ work = kzalloc(sizeof(struct event_work), GFP_ATOMIC|__GFP_NOWARN);
+ if (likely(work)) {
+ work->work_free_callback = workmgr_free_callback;
+ event_data = kzalloc(MAX_MSGSIZE, GFP_ATOMIC|__GFP_NOWARN);
+ work->event_data = event_data;
+
+ work_queue_head(list, work);
+ } else {
+ workmgr_alloc_fail++;
+ printk(KERN_ERR "event work memory init fail Once!\n");
+ }
+ }
+}
+
+static void user_to_kernel (struct sk_buff *skb)
+{
+ /* TODO userspace -> kenrel event */
+}
+
+static void netlink_sock_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,19)
+ struct netlink_kernel_cfg cfg = {
+ .input = user_to_kernel,
+ .flags = NL_CFG_F_NONROOT_RECV,
+ .groups = 1,
+ };
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_ECNT_EVENT, &cfg);
+#else
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_ECNT_EVENT, 0, user_to_kernel, NULL, THIS_MODULE);
+#endif
+ printk(KERN_INFO "Initializing Econet Event Netlink Socket nl_sk = %p\r\n", nl_sk);
+}
+
+int ecnt_send_event(unsigned short type,
+ struct ecnt_event_data *event_data, int event_data_len)
+{
+ struct event_work *event_workp = workmgr_alloc();
+
+ event_send_total++;
+
+ if(event_workp == NULL){
+ event_send_err++;
+ printk(KERN_ERR "workmgr alloc fail, maybe too many events(0x%x)!!\n", type);
+ return 0;
+ }
+
+ event_workp->type = type;
+
+ if(event_data_len < MAX_MSGSIZE)
+ event_workp->event_data_len = event_data_len;
+ else
+ event_workp->event_data_len = MAX_MSGSIZE;
+
+ memcpy(event_workp->event_data, event_data, event_workp->event_data_len);
+
+ INIT_WORK(&(event_workp->mywork), send_event_msg);
+
+ if(schedule_work(&(event_workp->mywork)) == 0)
+ {
+ event_send_err++;
+ printk(KERN_ERR "event work schedule fail!!\n", type);
+ if (event_workp->work_free_callback) {
+ (*event_workp->work_free_callback)(event_workp);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+EXPORT_SYMBOL(ecnt_send_event);
+
+int ecnt_event_execute(struct ecnt_data *in_data, struct ecnt_event_handle *event_handle)
+{
+ int i = 0;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb = (struct sk_buff *)in_data;
+ struct ecnt_event_data *event_data;
+ unsigned short event_type;
+
+ nlh = (struct nlmsghdr *)skb->data;
+
+ event_data = (struct ecnt_event_data *)(NLMSG_DATA(nlh));
+
+ while(event_handle[i].handle){
+ event_type = (((event_handle[i].maintype)<<8) | (event_handle[i].subtype));
+ if(nlh->nlmsg_type == event_type){
+ event_handle[i].handle(event_data);
+ }
+ i++;
+ }
+
+ return 1;
+}
+
+EXPORT_SYMBOL(ecnt_event_execute);
+
+struct ecnt_event_source* ecnt_event_register(char *name,
+ int (*hook)(struct ecnt_data *in_data))
+{
+ struct ecnt_hook_ops *ecnt_event_opt;
+
+ ecnt_event_opt = kzalloc(sizeof(struct ecnt_hook_ops), GFP_KERNEL);
+ if (ecnt_event_opt == NULL)
+ return NULL;
+
+ ecnt_event_opt->is_execute = 1;
+ ecnt_event_opt->hookfn = hook;
+ ecnt_event_opt->maintype = ECNT_HOOK_EVENT;
+ ecnt_event_opt->subtype = ECNT_HOOK_EVENT_SUB;
+ ecnt_event_opt->priority = 1;
+ ecnt_event_opt->name = name;
+
+ if(ecnt_register_hook(ecnt_event_opt)) {
+ printk("ecnt_event_opt register fail\n");
+ kfree(ecnt_event_opt);
+ return NULL;
+ }
+
+ printk("ecnt_event_opt register......%s\n", ecnt_event_opt->name);
+ return ecnt_event_opt;
+}
+
+EXPORT_SYMBOL(ecnt_event_register);
+
+int ecnt_event_unregister(struct ecnt_event_source *source){
+
+ /* Free Source */
+ ecnt_unregister_hook(source);
+ kfree(source);
+ return 0;
+}
+
+EXPORT_SYMBOL(ecnt_event_unregister);
+
+static int __init event_notify_init(void)
+{
+ struct proc_dir_entry *test_proc;
+ int i;
+
+ for (i=0; i<WORKMGR_MAX_QUEUE; i++) {
+ work_queue_head_init(&workmgr_pool[i].list);
+ }
+ workmgr_mem_init();
+
+ test_proc = create_proc_entry("tc3162/workmgr_limit", 0, NULL);
+ if (!test_proc)
+ return 0;
+
+ test_proc->read_proc = work_limit_read;
+ test_proc->write_proc = work_limit_write;
+
+ printk(KERN_INFO "Initializing Ecnt Event Netlink Socket\r\n");
+ netlink_sock_init();
+
+ return 0;
+}
+
+static void __exit event_notify_exit(void)
+{
+ sock_release(nl_sk->sk_socket);
+ remove_proc_entry("tc3162/workmgr_limit", 0);
+ printk(KERN_INFO "Goodbye\r\n");
+
+}
+
+subsys_initcall(event_notify_init);
+module_exit(event_notify_exit);
+
+MODULE_DESCRIPTION("Ecnt Event Notify Driver");
+
Index: linux-3.18.21/kernel/ecnt_event.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/kernel/ecnt_event.h 2018-02-05 13:21:27.000000000 +0800
@@ -0,0 +1,178 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+#ifndef ECNT_EVENT_H_
+#define ECNT_EVENT_H_
+
+/************************************************************************
+* I N C L U D E S
+************************************************************************/
+#include "ecnt_event_global/ecnt_event_global.h"
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+************************************************************************/
+
+/************************************************************************
+* M A C R O S
+************************************************************************/
+#define WORKMGR_QUEUE_ID 0
+#define WORKMGR_MAX_QUEUE 1
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+struct event_work{
+ /* These two members must be first. */
+ struct event_work *next;
+ struct event_work *prev;
+
+ int type;
+ int event_data_len;
+ int (*work_free_callback)(struct event_work *work);
+
+ struct ecnt_event_data *event_data;
+
+ struct work_struct mywork;
+};
+
+struct event_work_head {
+ /* These two members must be first. */
+ struct event_work *next;
+ struct event_work *prev;
+
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+union {
+ struct event_work_head list;
+ char pad[SMP_CACHE_BYTES];
+} workmgr_pool[WORKMGR_MAX_QUEUE];
+
+
+/************************************************************************
+* D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+atomic_t workmgr_alloc_no;
+
+int workmgr_limit = 12;
+int workmgr_limit_max = 30;
+int workmgr_max_alloc_no = 0;
+int workmgr_alloc_fail = 0;
+int workmgr_max_list_len = 0;
+
+int event_send_total = 0;
+int event_send_err = 0;
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+ I N L I N E F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+static inline __u32 work_queue_len(const struct event_work_head *list_)
+{
+ return list_->qlen;
+}
+
+static inline struct event_work *work_peek(const struct sk_buff_head *list_)
+{
+ struct event_work *work = list_->next;
+
+ if (work == (struct event_work *)list_)
+ work = NULL;
+ return work;
+}
+
+static inline void __work_unlink(struct event_work *work, struct event_work_head *list)
+{
+ struct event_work *next, *prev;
+
+ list->qlen--;
+ next = work->next;
+ prev = work->prev;
+ work->next = work->prev = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline struct event_work *__work_dequeue(struct event_work_head *list)
+{
+ struct event_work *work = work_peek(list);
+ if (work)
+ __work_unlink(work, list);
+ return work;
+}
+
+static inline void __work_insert(struct event_work *newsk,
+ struct event_work *prev, struct event_work *next,
+ struct event_work_head *list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+ list->qlen++;
+}
+
+static inline void __work_queue_after(struct event_work_head *list,
+ struct event_work *prev,
+ struct event_work *newsk)
+{
+ __work_insert(newsk, prev, prev->next, list);
+}
+
+static inline void __work_queue_head(struct event_work_head *list,
+ struct event_work *newsk)
+{
+ __work_queue_after(list, (struct event_work *)list, newsk);
+}
+
+static inline void __work_queue_head_init(struct event_work_head *list)
+{
+ list->prev = list->next = (struct event_work *)list;
+ list->qlen = 0;
+}
+
+static inline void work_queue_head_init(struct event_work_head *list)
+{
+ spin_lock_init(&list->lock);
+ __work_queue_head_init(list);
+}
+
+#endif/* ECNT_EVENT_H_ */
+
Index: linux-3.18.21/kernel/ecnt_kernel.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/kernel/ecnt_kernel.h 2018-02-05 13:21:27.000000000 +0800
@@ -0,0 +1,25 @@
+#ifndef _LINUX_ECNT_KENREL_H
+#define _LINUX_ECNT_KENREL_H
+static inline void ecnt_reset_kthread_cpus_allowed(struct task_struct *task)
+{
+ /* Binding kernel thread to CPU0 if support VOIP */
+ #ifdef TCSUPPORT_VOIP
+ cpumask_var_t new_mask;
+ *(unsigned long *)new_mask = 0x1;
+ set_cpus_allowed_ptr(task, new_mask);
+ #endif
+}
+
+#define ECNT_BIN_NET_NETFILTER_TABLE_HOOK \
+{ CTL_INT, NET_NF_CONNTRACK_FTP_ENABLE, "nf_conntrack_ftp_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_SIP_ENABLE, "nf_conntrack_sip_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_H323_ENABLE, "nf_conntrack_h323_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_RTSP_ENABLE, "nf_conntrack_rtsp_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_L2TP_ENABLE, "nf_conntrack_l2tp_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_IPSEC_ENABLE, "nf_conntrack_ipsec_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_PPTP_ENABLE, "nf_conntrack_pptp_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_PORTSCAN_ENABLE, "nf_conntrack_portscan_enable" },\
+{ CTL_INT, NET_NF_CONNTRACK_FTP_PORT, "nf_conntrack_ftp_port" },\
+{ CTL_INT, NET_NF_CONNTRACK_ESP_TIMEOUT, "nf_conntrack_esp_timeout" },\
+
+#endif
\ No newline at end of file
Index: linux-3.18.21/kernel/gcov/Makefile
===================================================================
--- linux-3.18.21.orig/kernel/gcov/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/gcov/Makefile 2018-02-05 13:21:27.000000000 +0800
@@ -7,7 +7,8 @@
ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
cc-ver := 0304
-else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
+else
+ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
cc-ver := 0407
else
# Use cc-version if available, otherwise set 0
@@ -23,7 +24,7 @@
# scripts/Makefile.clean. But the following workaround seems least invasive.
cc-ver := $(if $(call cc-version),$(call cc-version),0)
endif
-
+endif
obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
ifeq ($(call if-lt, $(cc-ver), 0407),1)
Index: linux-3.18.21/kernel/irq/chip.c
===================================================================
--- linux-3.18.21.orig/kernel/irq/chip.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/irq/chip.c 2018-02-05 13:21:27.000000000 +0800
@@ -430,7 +430,7 @@
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
-void
+__IMEM void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
Index: linux-3.18.21/kernel/irq/handle.c
===================================================================
--- linux-3.18.21.orig/kernel/irq/handle.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/irq/handle.c 2018-02-05 13:21:27.000000000 +0800
@@ -130,7 +130,7 @@
wake_up_process(action->thread);
}
-irqreturn_t
+__IMEM irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t retval = IRQ_NONE;
@@ -180,7 +180,7 @@
return retval;
}
-irqreturn_t handle_irq_event(struct irq_desc *desc)
+__IMEM irqreturn_t handle_irq_event(struct irq_desc *desc)
{
struct irqaction *action = desc->action;
irqreturn_t ret;
Index: linux-3.18.21/kernel/irq/irqdesc.c
===================================================================
--- linux-3.18.21.orig/kernel/irq/irqdesc.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/irq/irqdesc.c 2018-02-05 13:21:27.000000000 +0800
@@ -287,7 +287,7 @@
return arch_early_irq_init();
}
-struct irq_desc *irq_to_desc(unsigned int irq)
+__IMEM struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
@@ -342,7 +342,7 @@
* @irq: The irq number to handle
*
*/
-int generic_handle_irq(unsigned int irq)
+__IMEM int generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
Index: linux-3.18.21/kernel/irq/proc.c
===================================================================
--- linux-3.18.21.orig/kernel/irq/proc.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/irq/proc.c 2018-02-05 13:21:27.000000000 +0800
@@ -103,13 +103,40 @@
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
+#ifndef CONFIG_MIPS_MT_SMP
if (type)
err = cpumask_parselist_user(buffer, count, new_value);
else
err = cpumask_parse_user(buffer, count, new_value);
if (err)
goto free_cpumask;
+#else
+ if (type)
+ err = bitmap_parselist_user(buffer, count, cpumask_bits(new_value), 4);
+ else
+ err = bitmap_parse_user(buffer, count, cpumask_bits(new_value), 4);
+ if (err)
+ goto free_cpumask;
+
+#ifndef TCSUPPORT_MIPS_1004K /* for 34K CPU only */
+ printk("\r\n\r\n%s new_value origin value is %08x\r\n\r\n", __FUNCTION__, *new_value);
+ if(*(unsigned long *)new_value == 0x8)
+ {
+ *(unsigned long *)new_value = 0x2;
+ }
+ else if(*(unsigned long *)new_value == 0xf)
+ {
+ *(unsigned long *)new_value = 0x3;
+ }
+ else
+ {
+ *(unsigned long *)new_value = 0x1;
+ }
+
+ printk("\r\n\r\n%s new_value changed value is %08x\r\n\r\n", __FUNCTION__, *new_value);
+#endif
+#endif
if (!is_affinity_mask_valid(new_value)) {
err = -EINVAL;
goto free_cpumask;
Index: linux-3.18.21/kernel/kthread.c
===================================================================
--- linux-3.18.21.orig/kernel/kthread.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/kthread.c 2018-02-05 13:21:27.000000000 +0800
@@ -19,6 +19,7 @@
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <trace/events/sched.h>
+#include "ecnt_kernel.h"
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
@@ -319,6 +320,7 @@
*/
sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
set_cpus_allowed_ptr(task, cpu_all_mask);
+ ecnt_reset_kthread_cpus_allowed(task);
}
kfree(create);
return task;
@@ -488,6 +490,7 @@
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
+ ecnt_reset_kthread_cpus_allowed(tsk);
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
Index: linux-3.18.21/kernel/locking/semaphore.c
===================================================================
--- linux-3.18.21.orig/kernel/locking/semaphore.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/locking/semaphore.c 2018-02-05 13:21:27.000000000 +0800
@@ -188,6 +188,183 @@
}
EXPORT_SYMBOL(up);
+#if defined(TCSUPPORT_NEW_SPIFLASH)
+#define SF_MUX_LOCK_MAX (10)
+
+int down_Manual_interruptible(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+ int result = 0;
+
+ //while(1) {
+ spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count == 0)) {
+ sem->count += (2 * SF_MUX_LOCK_MAX);
+ // printk("down_Manual_0:sem->count is %d\n", sem->count);
+ }
+ else{
+ // printk("down_Manual_1:sem->count is %d\n", sem->count);
+ result = __down_interruptible(sem);
+ if(result == 0)
+ sem->count += (2 * SF_MUX_LOCK_MAX);
+ // printk("down_Manual_2:sem->count is %d, result is %d\n", sem->count, result);
+ }
+ spin_unlock_irqrestore(&sem->lock, flags);
+ //}
+
+ return result;
+}
+EXPORT_SYMBOL(down_Manual_interruptible);
+
+int down_Auto_interruptible(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+ int result = 0;
+
+ //while(1) {
+ spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count < (2 * SF_MUX_LOCK_MAX))) {
+ sem->count += 1;
+ //printk("down_Auto_0:sem->count is %d\n", sem->count);
+ }
+ else{
+ //printk("down_Auto_1:sem->count is %d\n", sem->count);
+ result = __down_interruptible(sem);
+ if(result == 0)
+ sem->count += 1;
+ //printk("down_Auto_2:sem->count is %d, result is %d\n", sem->count, result);
+ }
+ spin_unlock_irqrestore(&sem->lock, flags);
+ //}
+
+ return result;
+}
+EXPORT_SYMBOL(down_Auto_interruptible);
+
+int down_Unzip_interruptible(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+ int result = 0;
+
+ //while(1) {
+ spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count <= SF_MUX_LOCK_MAX)) {
+ sem->count += SF_MUX_LOCK_MAX;
+ //printk("down_Unzip_0:sem->count is %d\n", sem->count);
+ }
+ else{
+ //printk("down_Unzip_1:sem->count is %d\n", sem->count);
+ result = __down_interruptible(sem);
+ if(result == 0)
+ sem->count += (SF_MUX_LOCK_MAX);
+ //printk("down_Unzip_2:sem->count is %d, result is %d\n", sem->count, result);
+ }
+ spin_unlock_irqrestore(&sem->lock, flags);
+ //}
+
+ return result;
+}
+EXPORT_SYMBOL(down_Unzip_interruptible);
+
+void up_Manual(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&sem->lock, flags);
+#if 0
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count -= (2 * SF_MUX_LOCK_MAX);
+ else
+ __up(sem);
+#endif
+ sem->count -= (2 * SF_MUX_LOCK_MAX);
+ if (!(list_empty(&sem->wait_list)))
+ __up(sem);
+// printk("up_Manual:sem->count is %d\n", sem->count);
+ spin_unlock_irqrestore(&sem->lock, flags);
+}
+EXPORT_SYMBOL(up_Manual);
+
+void up_Auto(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&sem->lock, flags);
+#if 0
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count -= 1;
+ else
+ __up(sem);
+#endif
+ sem->count -= 1;
+ if (!(list_empty(&sem->wait_list)))
+ __up(sem);
+ //printk("up_Auto:sem->count is %d\n", sem->count);
+ spin_unlock_irqrestore(&sem->lock, flags);
+}
+EXPORT_SYMBOL(up_Auto);
+
+void up_Unzip(struct semaphore *sem)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&sem->lock, flags);
+#if 0
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count -= SF_MUX_LOCK_MAX;
+ else
+ __up(sem);
+#endif
+ sem->count -= (SF_MUX_LOCK_MAX);
+ if (!(list_empty(&sem->wait_list)))
+ __up(sem);
+ //printk("up_Unzip:sem->count is %d\n", sem->count);
+ spin_unlock_irqrestore(&sem->lock, flags);
+}
+EXPORT_SYMBOL(up_Unzip);
+
+int down_Normal_interruptible(struct semaphore *sem)
+{
+#if 0
+ unsigned long flags = 0;
+ spin_lock_irqsave(&sem->lock, flags);
+ return 0;
+//#else
+
+ unsigned long flags;
+ int result = 0;
+
+ spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_interruptible(sem);
+ spin_unlock_irqrestore(&sem->lock, flags);
+
+
+ return result;
+#endif
+}
+EXPORT_SYMBOL(down_Normal_interruptible);
+
+void up_Normal(struct semaphore *sem)
+{
+#if 0
+ unsigned long flags = 0;
+ spin_unlock_irqrestore(&sem->lock, flags);
+//#else
+ unsigned long flags;
+
+ spin_lock_irqsave(&sem->lock, flags);
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count++;
+ else
+ __up(sem);
+ spin_unlock_irqrestore(&sem->lock, flags);
+#endif
+}
+EXPORT_SYMBOL(up_Normal);
+#endif
/* Functions for the contended case */
struct semaphore_waiter {
Index: linux-3.18.21/kernel/module.c
===================================================================
--- linux-3.18.21.orig/kernel/module.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/module.c 2018-02-05 13:21:27.000000000 +0800
@@ -184,6 +184,68 @@
} index;
};
+/****************************************************************************
+ Ecnt Support IMEM for Modules
+ ****************************************************************************/
+#ifdef CONFIG_MIPS_TC3262
+#include <asm/tc3162/tc3162.h>
+extern int __imem, __dmem;
+
+#ifdef CONFIG_TC3162_IMEM
+unsigned char refill = 0;
+unsigned int imem_remains;
+#if defined(TCSUPPORT_CPU_EN7512) || defined(TCSUPPORT_CPU_EN7521)
+#undef CONFIG_IMEM_SIZE
+#define CONFIG_IMEM_SIZE 65536
+#endif
+#endif
+#endif
+
+#ifdef CONFIG_TC3162_IMEM
+void ecnt_move_module(struct load_info *info, void **pdest, Elf_Shdr *shdr)
+{
+ extern int __imem, _imem_end;
+ static unsigned int next_dest = &_imem_end;
+
+ if(!strcmp(info->secstrings + shdr->sh_name, ".imem_text")){
+ refill = 0;
+ imem_remains = (unsigned int)(&__imem) + CONFIG_IMEM_SIZE - next_dest;
+ unsigned char mod;
+
+ if((mod = next_dest & 3) != 0)
+ next_dest += (4 - mod);
+ if(next_dest + shdr->sh_size > (unsigned int)(&__imem) + CONFIG_IMEM_SIZE){
+ printk("The remaining IMEM space cannot accommodate section %s !!\r\n", info->secstrings + shdr->sh_name);
+ //printk("Remaining IMEM space: %d bytes\tSection Size: %d bytes\r\n", imem_remains, shdr->sh_size);
+ }
+ else{
+ *pdest = next_dest;
+ next_dest += shdr->sh_size;
+ imem_remains = (unsigned int)(&__imem) + CONFIG_IMEM_SIZE - next_dest;
+ refill = 1;
+ //printk("Remaining IMEM space: %d bytes\tSection Size: %d bytes\r\n", imem_remains, shdr->sh_size);
+ //printk("==>move_module: set refill to 1!!!\n");
+ }
+ }
+}
+
+void ecnt_init_module(void)
+{
+ unsigned int controlReg;
+ unsigned long flags;
+ /* refill internal imem */
+ if(refill){
+ //printk("SYSCALL_DEFINE3: will call ispram_fill......\n");
+ ispram_refill();
+ //printk("SYSCALL_DEFINE3: ispram_fill done!!\n");
+ //VPint(CR_DMC_ISPCFGR) = (CPHYSADDR(&__imem) & 0xfffff000) | (1<<8) | (0x7);
+ refill = 0;
+ }
+}
+#endif
+
+/****************************************************************************/
+
/* We require a truly strong try_module_get(): 0 means failure due to
ongoing or failed initialization etc. */
static inline int strong_try_module_get(struct module *mod)
@@ -2676,10 +2738,12 @@
err = try_to_force_load(mod, "bad vermagic");
if (err)
return err;
+#if !defined(CONFIG_MIPS_TC3262)
} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
pr_err("%s: version magic '%s' should be '%s'\n",
mod->name, modmagic, vermagic);
return -ENOEXEC;
+#endif
}
if (!get_modinfo(info, "intree"))
@@ -2831,7 +2895,9 @@
+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
else
dest = mod->module_core + shdr->sh_entsize;
-
+#ifdef CONFIG_TC3162_IMEM
+ ecnt_move_module(info, &dest, shdr);
+#endif
if (shdr->sh_type != SHT_NOBITS)
memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
/* Update sh_addr to point to copy in image. */
@@ -3012,6 +3078,9 @@
{
int ret = 0;
+#ifdef CONFIG_TC3162_IMEM
+ ecnt_init_module();
+#endif
/*
* We want to find out whether @mod uses async during init. Clear
* PF_USED_ASYNC. async_schedule*() will set it.
Index: linux-3.18.21/kernel/printk/printk.c
===================================================================
--- linux-3.18.21.orig/kernel/printk/printk.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/printk/printk.c 2018-02-05 13:21:27.000000000 +0800
@@ -55,6 +55,11 @@
#include "console_cmdline.h"
#include "braille.h"
+#ifdef TCSUPPORT_UART1_ENHANCE
+extern void disable_ring_buffer_mode(void);
+#define CRITICAL_CONDITION_LOGLEVEL 2
+#endif
+
int console_printk[4] = {
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
@@ -1398,6 +1403,16 @@
struct console *con;
trace_console(text, len);
+
+#ifdef TCSUPPORT_UART1_ENHANCE
+ /* if system crash, that kernel will printk with ALARM log level,
+ * in this situation, we disable ring buffer mode and flush
+ * data in ring buffer for immediately print crash message.
+ */
+ if(level >= 0 && level <= CRITICAL_CONDITION_LOGLEVEL) {
+ disable_ring_buffer_mode();
+ }
+#endif
if (level >= console_loglevel && !ignore_loglevel)
return;
Index: linux-3.18.21/kernel/sched/core.c
===================================================================
--- linux-3.18.21.orig/kernel/sched/core.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/sched/core.c 2018-02-05 13:21:27.000000000 +0800
@@ -4033,7 +4033,24 @@
rcu_read_unlock();
return -ESRCH;
}
-
+#ifndef TCSUPPORT_MIPS_1004K /* for 34K CPU only */
+#ifdef CONFIG_MIPS_MT_SMP
+ printk("\r\n\r\n%s in_mask origin value is %08x\r\n\r\n", __FUNCTION__, *in_mask);
+ if(*(unsigned long *)in_mask == 0x8)
+ {
+ *(unsigned long *)in_mask = 0x2;
+ }
+ else if(*(unsigned long *)in_mask == 0xf)
+ {
+ *(unsigned long *)in_mask = 0x3;
+ }
+ else
+ {
+ *(unsigned long *)in_mask = 0x1;
+ }
+ printk("\r\n\r\n%s new_mask changed value is %08x\r\n\r\n", __FUNCTION__, *in_mask);
+#endif
+#endif
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
@@ -4245,7 +4262,7 @@
int __sched _cond_resched(void)
{
- if (should_resched()) {
+ if (should_resched(0)){
__cond_resched();
return 1;
}
@@ -4263,7 +4280,7 @@
*/
int __cond_resched_lock(spinlock_t *lock)
{
- int resched = should_resched();
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
int ret = 0;
lockdep_assert_held(lock);
@@ -4285,7 +4302,7 @@
{
BUG_ON(!in_softirq());
- if (should_resched()) {
+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
local_bh_enable();
__cond_resched();
local_bh_disable();
Index: linux-3.18.21/kernel/softirq.c
===================================================================
--- linux-3.18.21.orig/kernel/softirq.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/softirq.c 2018-02-05 13:21:27.000000000 +0800
@@ -223,7 +223,7 @@
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
-asmlinkage __visible void __do_softirq(void)
+__IMEM asmlinkage __visible void __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -290,14 +290,15 @@
wakeup_softirqd();
}
- lockdep_softirq_end(in_hardirq);
+ lockdep_softirq_end(in_hardirq);
+ tc3162wdog_kick();
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-asmlinkage __visible void do_softirq(void)
+__IMEM asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
@@ -318,7 +319,7 @@
/*
* Enter an interrupt context.
*/
-void irq_enter(void)
+__IMEM void irq_enter(void)
{
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
@@ -373,7 +374,7 @@
/*
* Exit an interrupt context. Process softirqs if needed and possible:
*/
-void irq_exit(void)
+__IMEM void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
@@ -411,7 +412,7 @@
wakeup_softirqd();
}
-void raise_softirq(unsigned int nr)
+__IMEM void raise_softirq(unsigned int nr)
{
unsigned long flags;
Index: linux-3.18.21/kernel/sysctl_binary.c
===================================================================
--- linux-3.18.21.orig/kernel/sysctl_binary.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/kernel/sysctl_binary.c 2018-02-05 13:21:27.000000000 +0800
@@ -697,7 +697,7 @@
{ CTL_INT, NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
{ CTL_INT, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
{ CTL_INT, NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
-
+ ECNT_BIN_NET_NETFILTER_TABLE_HOOK
{}
};
Index: linux-3.18.21/lib/Kconfig
===================================================================
--- linux-3.18.21.orig/lib/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/lib/Kconfig 2018-02-05 13:21:28.000000000 +0800
@@ -225,6 +225,14 @@
config LZ4_DECOMPRESS
tristate
+config LZMA_COMPRESS
+ bool
+ default y
+
+config LZMA_DECOMPRESS
+ bool
+ default y
+
source "lib/xz/Kconfig"
#
Index: linux-3.18.21/lib/Makefile
===================================================================
--- linux-3.18.21.orig/lib/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/lib/Makefile 2018-02-05 13:21:28.000000000 +0800
@@ -129,6 +129,8 @@
obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
+obj-y += ../../kernel_ext/lib/ecnt_utility.o
+
obj-$(CONFIG_AVERAGE) += average.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
Index: linux-3.18.21/lib/decompress_unlzma.c
===================================================================
--- linux-3.18.21.orig/lib/decompress_unlzma.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/lib/decompress_unlzma.c 2018-02-05 13:21:28.000000000 +0800
@@ -36,6 +36,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/decompress/unlzma_mm.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
Index: linux-3.18.21/mm/slab_common.c
===================================================================
--- linux-3.18.21.orig/mm/slab_common.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/mm/slab_common.c 2018-02-05 13:21:29.000000000 +0800
@@ -649,7 +649,7 @@
* Find the kmem_cache structure that serves a given size of
* allocation
*/
-struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+__IMEM struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
int index;
Index: linux-3.18.21/mm/slub.c
===================================================================
--- linux-3.18.21.orig/mm/slub.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/mm/slub.c 2018-02-05 13:21:29.000000000 +0800
@@ -2456,7 +2456,7 @@
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+__IMEM void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
@@ -2675,7 +2675,7 @@
}
-void kmem_cache_free(struct kmem_cache *s, void *x)
+__IMEM void kmem_cache_free(struct kmem_cache *s, void *x)
{
s = cache_from_obj(s, x);
if (!s)
@@ -3708,7 +3708,7 @@
#endif
-void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
+__IMEM void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
void *ret;
Index: linux-3.18.21/net/8021q/ecnt_vlan.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/8021q/ecnt_vlan.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,93 @@
+#ifndef _LINUX_ECNT_VLAN_H
+#define _LINUX_ECNT_VLAN_H
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+#include <linux/export.h>
+#include <ecnt_hook/ecnt_hook.h>
+
+#if defined(TCSUPPORT_PON_VLAN)
+extern int (*pon_vlan_get_mode_hook)(void);
+#endif
+
+
+#if defined(TCSUPPORT_VLAN_TAG)
+extern int (*remove_vtag_hook)(struct sk_buff *skb, struct net_device *dev);
+//extern int (*insert_vtag_hook)(struct sk_buff **pskb);
+extern int (*check_vtag_hook)(void);
+#endif
+
+
+extern struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb);
+
+static inline int isBridgeWan(struct net_device *dev)
+{
+ if (dev == NULL)
+ return 0;
+
+ if (rcu_dereference(dev->rx_handler_data) == NULL)
+ return 0;
+
+ return 1;
+}
+
+static inline int ecnt_vlan_receive_inline_hook(struct sk_buff **skbp, __be16 vlan_proto)
+{
+ struct sk_buff *skb = *skbp;
+ unsigned int offset = skb->data - skb_mac_header(skb);
+ __u16 vlan_tci;
+
+#if defined(TCSUPPORT_PON_VLAN)
+ int vlan_mode = MODE_HGU;
+ if(pon_vlan_get_mode_hook)
+ vlan_mode = pon_vlan_get_mode_hook();
+#endif
+
+
+ /*
+ * vlan_insert_tag expect skb->data pointing to mac header.
+ * So change skb->data before calling it and change back to
+ * original position later
+ */
+ skb_push(skb, offset);
+ vlan_tci = vlan_tx_tag_get(skb);
+ if(skb->vlan_tag_flag&VLAN_TAG_FOR_CFI)
+ {
+ vlan_tci |= VLAN_CFI_MASK;
+ }
+ skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
+ vlan_tci);
+
+ if (!skb)
+ return ECNT_CONTINUE;
+ skb->vlan_tci = 0;
+ skb->protocol = vlan_proto;
+ skb_pull(skb, offset);
+
+
+#ifdef TCSUPPORT_PON_VLAN
+ if(vlan_mode == MODE_HGU)
+#endif
+ {
+#if defined(TCSUPPORT_VLAN_TAG)
+ struct net_device * orig_dev = skb->dev;
+ if (check_vtag_hook && (check_vtag_hook() == 1)) {
+ if (remove_vtag_hook) {
+ if (remove_vtag_hook(skb, orig_dev) == -1) {
+ /* must free skb !! */
+ return ECNT_CONTINUE;
+ }
+ else {
+ return ECNT_RETURN;/*return true*/
+ }
+ }
+ }
+#endif
+ }
+ return ECNT_CONTINUE;
+}
+
+
+#endif
+
Index: linux-3.18.21/net/8021q/vlan_core.c
===================================================================
--- linux-3.18.21.orig/net/8021q/vlan_core.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/8021q/vlan_core.c 2018-02-05 13:21:29.000000000 +0800
@@ -3,6 +3,7 @@
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
#include <linux/export.h>
+#include "ecnt_vlan.h"
#include "vlan.h"
bool vlan_do_receive(struct sk_buff **skbp)
@@ -14,8 +15,13 @@
struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
- if (!vlan_dev)
+ if (!vlan_dev){
+ int ret;
+ ret = ecnt_vlan_receive_inline_hook(skbp, vlan_proto);
+ if(ret != ECNT_CONTINUE)
+ return true;
return false;
+ }
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
Index: linux-3.18.21/net/Kconfig
===================================================================
--- linux-3.18.21.orig/net/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/Kconfig 2018-02-05 13:21:29.000000000 +0800
@@ -102,6 +102,10 @@
overhead in the transmit and receive paths.
If you are unsure how to answer this question, answer N.
+config SMUX
+ bool "smux"
+ help
+ This enables multi-service on one interface/PVC.
menuconfig NETFILTER
bool "Network packet filtering framework (Netfilter)"
Index: linux-3.18.21/net/Makefile
===================================================================
--- linux-3.18.21.orig/net/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/Makefile 2018-02-05 13:21:29.000000000 +0800
@@ -73,3 +73,6 @@
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_NET_MPLS_GSO) += mpls/
obj-$(CONFIG_HSR) += hsr/
+ifeq ($(TCSUPPORT_RA_HWNAT),1)
+obj-y += nat/foe_hook/
+endif
Index: linux-3.18.21/net/atm/br2684.c
===================================================================
--- linux-3.18.21.orig/net/atm/br2684.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/atm/br2684.c 2018-02-05 13:21:29.000000000 +0800
@@ -28,6 +28,8 @@
#include <linux/atmbr2684.h>
#include "common.h"
+#include "ecnt_net_atm.h"
+#include <ecnt_hook/ecnt_hook.h>
static void skb_debug(const struct sk_buff *skb)
{
@@ -207,6 +209,7 @@
((brdev->payload == p_bridged) ?
sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
+ int ret = 0;
if (skb_headroom(skb) < minheadroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
@@ -218,7 +221,13 @@
}
skb = skb2;
}
-
+ ret = ecnt_br2684_xmit_vcc_if_inline_hook(skb, dev, brvcc->encaps, brvcc->atmvcc);
+ if(ret){
+ if(ret != ECNT_CONTINUE)
+ return 0;
+ }
+ else
+ {
if (brvcc->encaps == e_llc) {
if (brdev->payload == p_bridged) {
skb_push(skb, sizeof(llc_oui_pid_pad));
@@ -248,6 +257,7 @@
memset(skb->data, 0, 2);
}
}
+ }
skb_debug(skb);
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
@@ -256,7 +266,8 @@
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
-
+
+#if !defined(CONFIG_CPU_TC3162) && !defined(CONFIG_MIPS_TC3262)
if (atomic_dec_return(&brvcc->qspace) < 1) {
/* No more please! */
netif_stop_queue(brvcc->device);
@@ -264,7 +275,7 @@
if (unlikely(atomic_read(&brvcc->qspace) > 0))
netif_wake_queue(brvcc->device);
}
-
+#endif
/* If this fails immediately, the skb will be freed and br2684_pop()
will wake the queue if appropriate. Just return an error so that
the stats are updated correctly */
@@ -288,7 +299,7 @@
return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
}
-static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
+netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct br2684_dev *brdev = BRPRIV(dev);
@@ -307,6 +318,10 @@
dev_kfree_skb(skb);
goto out_devs;
}
+
+ if(ecnt_br2684_start_xmit_padpkt_inline_hook(&skb, &brvcc->copies_failed, &devs_lock, dev)!=ECNT_CONTINUE)
+ return NETDEV_TX_OK;
+
atmvcc = brvcc->atmvcc;
bh_lock_sock(sk_atm(atmvcc));
@@ -425,10 +440,12 @@
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
struct net_device *net_dev = brvcc->device;
struct br2684_dev *brdev = BRPRIV(net_dev);
+ int err = 0;
pr_debug("\n");
if (unlikely(skb == NULL)) {
+ ecnt_br2684_push_destroy_inline_hook(atmvcc, net_dev);
/* skb==NULL means VCC is being destroyed */
br2684_close_vcc(brvcc);
if (list_empty(&brdev->brvccs)) {
@@ -444,6 +461,14 @@
skb_debug(skb);
atm_return(atmvcc, skb->truesize);
pr_debug("skb from brdev %p\n", brdev);
+
+ err = ecnt_br2684_push_hook_inline_hook(atmvcc, skb);
+ if(err != ECNT_RETURN_DROP){
+ if(err == ECNT_RETURN)
+ goto error;
+ }
+ else
+ {
if (brvcc->encaps == e_llc) {
if (skb->len > 7 && skb->data[7] == 0x01)
@@ -497,6 +522,7 @@
skb->protocol = eth_type_trans(skb, net_dev);
}
}
+ }
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
@@ -512,6 +538,9 @@
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
+ err = ecnt_br2684_push_hwnat_inline_hook(skb);
+ if(err)
+ return;
netif_rx(skb);
return;
@@ -538,6 +567,10 @@
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
+
+ if(ecnt_br2684_regvcc_init_inline_hook(atmvcc, be.encaps, &err) == ECNT_RETURN)
+ return err;
+
brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc)
return -ENOMEM;
@@ -653,7 +686,8 @@
netdev->addr_len = 0;
netdev->mtu = 1500;
netdev->type = ARPHRD_PPP;
- netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ /*netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;*/
+ ecnt_br2684_setup_routed_flags_inline_hook(netdev);
netdev->tx_queue_len = 100;
INIT_LIST_HEAD(&brdev->brvccs);
}
@@ -702,6 +736,8 @@
brdev->payload = payload;
+ ecnt_br2684_create_config_inline_hook(payload);
+
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
brdev->number = 1;
@@ -837,7 +873,7 @@
extern struct proc_dir_entry *atm_proc_root; /* from proc.c */
#endif /* CONFIG_PROC_FS */
-static int __init br2684_init(void)
+int br2684_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *p;
@@ -850,7 +886,7 @@
return 0;
}
-static void __exit br2684_exit(void)
+void br2684_exit(void)
{
struct net_device *net_dev;
struct br2684_dev *brdev;
@@ -878,8 +914,11 @@
}
}
+
+#if 0//!defined(TCSUPPORT_CPU_MT7510) && !defined(TCSUPPORT_CPU_MT7505) && !defined(TCSUPPORT_CPU_EN7512)
module_init(br2684_init);
module_exit(br2684_exit);
+#endif
MODULE_AUTHOR("Marcell GAL");
MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5");
Index: linux-3.18.21/net/atm/common.c
===================================================================
--- linux-3.18.21.orig/net/atm/common.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/atm/common.c 2018-02-05 13:21:29.000000000 +0800
@@ -717,9 +717,11 @@
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
+#if 0 // !defined(CONFIG_CPU_TC3162) && !defined(CONFIG_MIPS_TC3262)
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
+#endif
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
Index: linux-3.18.21/net/atm/ecnt_net_atm.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/atm/ecnt_net_atm.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,257 @@
+#ifndef _LINUX_ECNT_NET_CORE_H
+#define _LINUX_ECNT_NET_CORE_H
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/ip.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/capability.h>
+#include <linux/seq_file.h>
+
+#include <linux/atmbr2684.h>
+
+#include <ecnt_hook/ecnt_hook.h>
+#include "common.h"
+
+
+#if !defined(TCSUPPORT_CPU_MT7510) && !defined(TCSUPPORT_CPU_MT7505) && !defined(TCSUPPORT_CPU_EN7512)
+#ifdef TCSUPPORT_RA_HWNAT
+#include <linux/foe_hook.h>
+#endif
+#endif
+
+#define MIN_PKT_SIZE 60
+extern int napi_en;
+extern void (*br2684_config_hook)(int linkMode, int linkType);
+extern int (*br2684_init_hook)(struct atm_vcc *atmvcc, int encaps);
+extern int (*br2684_push_hook)(struct atm_vcc *atmvcc, struct sk_buff *skb);
+extern int (*br2684_xmit_hook)(struct sk_buff *skb, struct net_device *dev, int encaps);
+extern void (*pppoatm_config_hook)(int linkMode, int linkType);
+extern int (*pppoatm_init_hook)(struct atm_vcc *atmvcc, int encaps);
+extern int (*pppoatm_push_hook)(struct atm_vcc *atmvcc, struct sk_buff *skb);
+
+
+#if !defined(TCSUPPORT_CT)
+extern int (*check_smuxIf_exist_hook)(struct net_device *dev);
+#endif
+
+
+static inline int ecnt_br2684_xmit_vcc_if_inline_hook(struct sk_buff *skb, struct net_device *dev, int encaps, struct atm_vcc *atmvcc)
+{
+ int err = 0;
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ if (br2684_xmit_hook){
+ err = br2684_xmit_hook(skb, dev, encaps);
+ if (err){
+ return ECNT_RETURN;
+ }
+ else
+ return ECNT_CONTINUE;
+ }
+ else
+ return ECNT_RETURN_DROP;
+#endif
+ if(atmvcc->send == NULL)
+ {
+ printk("\r\n[br2684_xmit_vcc]++++atmvcc->send == NULL++++");
+ dev_kfree_skb(skb);
+ return ECNT_RETURN;
+ }
+
+ return ECNT_RETURN_DROP;
+}
+
+static inline int ecnt_br2684_start_xmit_padpkt_inline_hook(struct sk_buff **skb, int *copies_failed, rwlock_t *devs_lock, struct net_device *dev){
+ struct sk_buff *skb2;
+ /*if the packet length < 60, pad upto 60 bytes. shnwind 2008.4.17*/
+
+
+ if ((*skb)->len < MIN_PKT_SIZE)
+ {
+ skb2=skb_copy_expand(*skb, 0, MIN_PKT_SIZE - (*skb)->len, GFP_ATOMIC);
+ dev_kfree_skb(*skb);
+ if (skb2 == NULL) {
+ *copies_failed=*copies_failed+1;
+ read_unlock(devs_lock);
+ return ECNT_RETURN_DROP;
+
+ }
+ *skb = skb2;
+ memset((*skb)->tail, 0, MIN_PKT_SIZE - (*skb)->len);
+ skb_put((*skb), MIN_PKT_SIZE - (*skb)->len);
+ }
+ return ECNT_CONTINUE;
+}
+
+static inline void ecnt_br2684_push_destroy_inline_hook(struct atm_vcc *atmvcc, struct net_device *net_dev)
+{
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+#ifdef CONFIG_SMUX
+#if !defined(TCSUPPORT_CT)
+ unsigned char ifNum = 0;
+#endif
+#endif
+
+#ifdef CONFIG_SMUX
+#if !defined(TCSUPPORT_CT)
+ if(check_smuxIf_exist_hook != NULL) {
+ if((ifNum = check_smuxIf_exist_hook(net_dev)) > 0) {
+ printk("\n==> Exist %d smux interfaces, just return and do not close PVC\n", ifNum);
+ return;//If smux interface exist, just return and do not close PVC
+ }
+ }
+#endif
+#endif
+#endif
+}
+
+static inline int ecnt_br2684_push_skb_trim_inline_hook(struct sk_buff *skb){
+ if (skb->data[7] == 0x01)
+ __skb_trim(skb, skb->len - 4);
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_br2684_setup_routed_flags_inline_hook(struct net_device *netdev)
+{
+ /*release note TLM7.3.24.0-9
+ Symptom: When Wan PVC is IPoA mode, the RIP function is failed.
+ Reason: IPoA mode PVC is created with Point-to-Point Flag.
+ Solution:Remove Point-to-Point Flag in br2684_setup_routed().
+ */
+ netdev->flags = IFF_NOARP | IFF_MULTICAST;
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_br2684_create_config_inline_hook(int payload){
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ if (br2684_config_hook){
+ br2684_config_hook(payload, 0);
+ } else {
+ printk("br2684_config_hook function: (NULL)\n");
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_pppoatm_devppp_ioctl_config_inline_hook(void)
+{
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505)
+ if (pppoatm_config_hook){
+ // choose router mode & pppoa type
+ pppoatm_config_hook(0, 1);
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_br2684_push_hwnat_inline_hook(struct sk_buff *skb){
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ if (napi_en)
+ {
+ netif_receive_skb(skb);
+ return ECNT_CONTINUE;
+ }
+#endif
+ return ECNT_RETURN_DROP;
+
+}
+
+static inline int ecnt_br2684_regvcc_init_inline_hook(struct atm_vcc *atmvcc, int encaps, int *err)
+{
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ if (br2684_init_hook){
+ *err = br2684_init_hook(atmvcc, encaps);
+ if (*err){
+ printk("br2684_init_hook: error detected\n");
+ return ECNT_RETURN;
+ //return err;
+ } else {
+ printk("br2684_init_hook: success\n");
+ }
+ } else {
+ printk("br2684_init_hook function: (NULL)\n");
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_br2684_push_hook_inline_hook(struct atm_vcc *atmvcc, struct sk_buff *skb){
+ int err;
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505) || defined(TCSUPPORT_CPU_EN7512)
+ // hardware handle mpoa header
+ if (br2684_push_hook){
+ err = br2684_push_hook(atmvcc, skb);
+ if (err){
+ //goto error;
+ return ECNT_RETURN;
+ }
+ else
+ return ECNT_CONTINUE;
+ }
+ // soft handle mpoa header
+
+#endif
+ return ECNT_RETURN_DROP;
+}
+
+static inline int ecnt_pppoatm_push_hook_inline_hook(struct atm_vcc *atmvcc, struct sk_buff *skb)
+{
+ int ret = 0;
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505)
+ if (pppoatm_push_hook){
+ ret = pppoatm_push_hook(atmvcc, skb);
+ if (ret == -1){
+ return ECNT_HOOK_ERROR;
+ //goto error;
+ } else if (ret == -2){
+ return ECNT_RETURN;
+ //return;
+ }
+ return ECNT_CONTINUE;
+ }
+ else
+ return ECNT_RETURN_DROP;
+#endif
+ return ECNT_RETURN_DROP;
+}
+
+static inline int ecnt_pppoatm_assign_vcc_init_inline_hook(struct atm_vcc *atmvcc, int encaps, int *err){
+#if defined(TCSUPPORT_CPU_MT7510) || defined(TCSUPPORT_CPU_MT7505)
+ if (pppoatm_init_hook){
+ *err = pppoatm_init_hook(atmvcc, (encaps-1));
+ if (*err){
+ printk("pppoatm_init_hook: error detected\n");
+ return ECNT_RETURN;
+ //return err;
+ } else {
+ printk("pppoatm_init_hook: success\n");
+ }
+ } else {
+ printk("pppoatm_init_hook function: (NULL)\n");
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+
+#endif
+
+static inline int ecnt_pppoatm_may_send_pppoa_inline_hook(void){
+#if !defined(TCSUPPORT_PPPOA_ENHANCE)
+ return ECNT_CONTINUE;
+#else
+ return ECNT_RETURN_DROP;
+#endif
+}
+
Index: linux-3.18.21/net/atm/pppoatm.c
===================================================================
--- linux-3.18.21.orig/net/atm/pppoatm.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/atm/pppoatm.c 2018-02-05 13:21:29.000000000 +0800
@@ -49,6 +49,8 @@
#include <linux/atmppp.h>
#include "common.h"
+#include <ecnt_hook/ecnt_hook.h>
+#include "ecnt_net_atm.h"
enum pppoatm_encaps {
e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
@@ -181,6 +183,7 @@
/* Called when an AAL5 PDU comes in */
static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
+ int ret;
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
@@ -194,6 +197,17 @@
return;
}
atm_return(atmvcc, skb->truesize);
+
+ ret = ecnt_pppoatm_push_hook_inline_hook(atmvcc, skb);
+
+ if(ret){
+ if(ret == ECNT_HOOK_ERROR)
+ goto error;
+ else if(ret == ECNT_RETURN)
+ return;
+ }
+ else
+ {
switch (pvcc->encaps) {
case e_llc:
if (skb->len < LLC_LEN ||
@@ -226,6 +240,8 @@
case e_vc:
break;
}
+ }
+
ppp_input(&pvcc->chan, skb);
return;
@@ -236,6 +252,8 @@
static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
{
+ if(ecnt_pppoatm_may_send_pppoa_inline_hook() == ECNT_RETURN_DROP)
+ return 1;
/*
* It's not clear that we need to bother with using atm_may_send()
* to check we don't exceed sk->sk_sndbuf. If userspace sets a
@@ -337,7 +355,9 @@
}
} else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
+ #if 0// !defined(TCSUPPORT_CPU_MT7510) && !defined(TCSUPPORT_CPU_MT7505)
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
+ #endif
break;
case e_vc:
if (!pppoatm_may_send(pvcc, skb->truesize))
@@ -376,6 +396,7 @@
{
switch (cmd) {
case PPPIOCGFLAGS:
+ ecnt_pppoatm_devppp_ioctl_config_inline_hook();
return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
case PPPIOCSFLAGS:
@@ -405,6 +426,10 @@
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
return -EINVAL;
+
+ if(ecnt_pppoatm_assign_vcc_init_inline_hook(atmvcc, be.encaps, &err) == ECNT_RETURN)
+ return err;
+
pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
if (pvcc == NULL)
return -ENOMEM;
@@ -481,20 +506,22 @@
.ioctl = pppoatm_ioctl,
};
-static int __init pppoatm_init(void)
+int pppoatm_init(void)
{
register_atm_ioctl(&pppoatm_ioctl_ops);
return 0;
}
-static void __exit pppoatm_exit(void)
+
+void pppoatm_exit(void)
{
deregister_atm_ioctl(&pppoatm_ioctl_ops);
}
+#if 0 //!defined(TCSUPPORT_CPU_MT7510) && !defined(TCSUPPORT_CPU_MT7505)
module_init(pppoatm_init);
module_exit(pppoatm_exit);
-
+#endif
MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
MODULE_LICENSE("GPL");
Index: linux-3.18.21/net/bridge/Makefile
===================================================================
--- linux-3.18.21.orig/net/bridge/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/Makefile 2018-02-05 13:21:29.000000000 +0800
@@ -6,7 +6,8 @@
bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
br_ioctl.o br_stp.o br_stp_bpdu.o \
- br_stp_if.o br_stp_timer.o br_netlink.o
+ br_stp_if.o br_stp_timer.o br_netlink.o \
+ ecnt_br.o
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
Index: linux-3.18.21/net/bridge/br_device.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_device.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_device.c 2018-02-05 13:21:29.000000000 +0800
@@ -26,7 +26,7 @@
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
/* net device transmit always called with BH disabled */
-netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+__IMEM netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
Index: linux-3.18.21/net/bridge/br_fdb.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_fdb.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_fdb.c 2018-02-05 13:21:29.000000000 +0800
@@ -21,10 +21,12 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <ecnt_hook/ecnt_hook_net.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/if_vlan.h>
#include "br_private.h"
+#include "ecnt_br.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
@@ -47,6 +49,9 @@
return -ENOMEM;
get_random_bytes(&fdb_salt, sizeof(fdb_salt));
+
+ ecnt_br_fdb_init_inline_hook();
+
return 0;
}
@@ -71,7 +76,7 @@
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
-static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
+inline int br_mac_hash(const unsigned char *mac, __u16 vid)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(mac + 2));
@@ -134,7 +139,12 @@
{
if (f->is_static)
fdb_del_hw(br, f->addr.addr);
-
+ struct net_data_s net_data;
+ net_data.br_fdb_data.fdb = f;
+ net_data.br_fdb_data.addr = f->addr.addr;
+ net_data.source = NULL;
+ net_data.pskb = NULL;
+ ECNT_BR_FDB_HOOK(ECNT_BR_FDB_DELETE, &net_data);
hlist_del_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
@@ -343,11 +353,12 @@
}
/* No locking or refcounting, assumes caller has rcu_read_lock */
-struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+__IMEM struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
+ struct net_data_s net_data;
hlist_for_each_entry_rcu(fdb,
&br->hash[br_mac_hash(addr, vid)], hlist) {
@@ -358,9 +369,15 @@
return fdb;
}
}
-
+ net_data.br_fdb_data.fdb = NULL;
+ net_data.br_fdb_data.addr = addr;
+ net_data.source = NULL;
+ net_data.pskb = NULL;
+ ECNT_BR_FDB_HOOK(ECNT_BR_FDB_GET, &net_data);
+
return NULL;
}
+EXPORT_SYMBOL(__br_fdb_get);
#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
@@ -472,6 +489,7 @@
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
+ struct net_data_s net_data;
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
@@ -483,6 +501,11 @@
fdb->added_by_user = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
+ net_data.br_fdb_data.fdb = fdb;
+ net_data.br_fdb_data.addr = addr;
+ net_data.source = NULL;
+ net_data.pskb = NULL;
+ ECNT_BR_FDB_HOOK(ECNT_BR_FDB_CREATE, &net_data);
}
return fdb;
}
@@ -531,12 +554,13 @@
return ret;
}
-void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+__IMEM void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
+ struct net_data_s net_data;
/* some users want to always flood. */
if (hold_time(br) == 0)
@@ -557,6 +581,12 @@
source->dev->name);
} else {
/* fastpath: update of existing entry */
+ net_data.br_fdb_data.fdb = fdb;
+ net_data.br_fdb_data.addr = addr;
+ net_data.source = source;
+ net_data.pskb = NULL;
+ ECNT_BR_FDB_HOOK(ECNT_BR_FDB_UPDATE, &net_data);
+
if (unlikely(source != fdb->dst)) {
fdb->dst = source;
fdb_modified = true;
@@ -583,6 +613,7 @@
spin_unlock(&br->hash_lock);
}
}
+EXPORT_SYMBOL(br_fdb_update);
static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
{
Index: linux-3.18.21/net/bridge/br_forward.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_forward.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_forward.c 2018-02-05 14:20:19.000000000 +0800
@@ -19,7 +19,10 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
+#include "ecnt_br.h"
#include "br_private.h"
+#include <ecnt_hook/ecnt_hook_net.h>
+
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
@@ -35,7 +38,7 @@
p->state == BR_STATE_FORWARDING;
}
-int br_dev_queue_push_xmit(struct sk_buff *skb)
+__IMEM int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* ip_fragment doesn't copy the MAC header */
if (nf_bridge_maybe_copy_header(skb) ||
@@ -51,15 +54,17 @@
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
-int br_forward_finish(struct sk_buff *skb)
+__IMEM int br_forward_finish(struct sk_buff *skb)
{
+ ECNT_BR_FRWARD_FINISH_HOOK(ECNT_BR_FORWARD_BR_FORWARD_FINISH, skb);
+
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
-
+
}
EXPORT_SYMBOL_GPL(br_forward_finish);
-static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
+__IMEM static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
if (!skb)
@@ -81,10 +86,11 @@
br_forward_finish);
}
-static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
+__IMEM static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_device *indev;
+ ecnt_br_forward_inline_hook(skb);
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
@@ -103,7 +109,7 @@
}
/* called with rcu_read_lock */
-void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
+__IMEM void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (to && should_deliver(to, skb)) {
__br_deliver(to, skb);
@@ -115,7 +121,7 @@
EXPORT_SYMBOL_GPL(br_deliver);
/* called with rcu_read_lock */
-void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
+__IMEM void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
{
if (should_deliver(to, skb)) {
if (skb0)
@@ -128,6 +134,9 @@
if (!skb0)
kfree_skb(skb);
}
+#if defined(TCSUPPORT_XPON_IGMP)
+EXPORT_SYMBOL(br_forward);
+#endif
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
@@ -160,6 +169,9 @@
if (!prev)
goto out;
+ if(-1 == ecnt_maybe_deliver_inline_hook(prev,skb))
+ goto out;
+
err = deliver_clone(prev, skb, __packet_hook);
if (err)
return ERR_PTR(err);
@@ -180,9 +192,12 @@
prev = NULL;
+ ecnt_br_flood_inline_hook( br , skb );
+
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off */
- if (unicast && !(p->flags & BR_FLOOD))
+ if (unicast && !(p->flags & BR_FLOOD)
+ || !ecnt_should_deliver(p, skb, 1) )
continue;
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
Index: linux-3.18.21/net/bridge/br_input.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_input.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_input.c 2018-02-05 13:21:29.000000000 +0800
@@ -19,12 +19,13 @@
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
+#include "ecnt_br.h"
/* Hook for brouter */
br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
EXPORT_SYMBOL(br_should_route_hook);
-static int br_pass_frame_up(struct sk_buff *skb)
+__IMEM static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -58,7 +59,7 @@
}
/* note: already called with rcu_read_lock */
-int br_handle_frame_finish(struct sk_buff *skb)
+__IMEM int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -68,13 +69,24 @@
struct sk_buff *skb2;
bool unicast = true;
u16 vid = 0;
+ int ret = 0;
+#if defined(TCSUPPORT_RA_HWNAT)
+ skb->bridge_flag = 0;
+#endif
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
goto out;
+ ret = ecnt_br_handle_frame_finish_inline_hook(skb, vid);
+
+ if (ECNT_RETURN_DROP == ret)
+ {
+ goto drop;
+ }
+
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
@@ -158,7 +170,7 @@
* Return NULL if skb is handled
* note: already called with rcu_read_lock
*/
-rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
+__IMEM rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{
struct net_bridge_port *p;
struct sk_buff *skb = *pskb;
@@ -225,6 +237,7 @@
forward:
switch (p->state) {
case BR_STATE_FORWARDING:
+ ecnt_br_input_state_forward_inline_hook(skb);
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {
if ((*rhook)(skb)) {
Index: linux-3.18.21/net/bridge/br_ioctl.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_ioctl.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_ioctl.c 2018-02-05 13:21:29.000000000 +0800
@@ -20,6 +20,7 @@
#include <net/net_namespace.h>
#include <asm/uaccess.h>
#include "br_private.h"
+#include "ecnt_br.h"
/* called with RTNL */
static int get_bridge_ifindices(struct net *net, int *indices, int num)
@@ -116,7 +117,21 @@
if (copy_from_user(args, rq->ifr_data, sizeof(args)))
return -EFAULT;
-
+
+ /*ecnt_old_dev_ioctl_inline_hook*/
+ /*case BRCTL_GET_BRIDGE_INFO , and case BRCTL_GET_PORT_INFO, have been moved to hook*/
+ /*any changes , be careful*/
+ {
+ int retval = 0 ;
+ int ret = 0 ;
+ ret = ecnt_old_dev_ioctl_inline_hook(br, rq , &retval) ;
+ if( ECNT_HOOK_ERROR == ret )
+ return -EFAULT;
+ else if( ECNT_RETURN_DROP == ret )
+ return 0 ;
+ else if( ECNT_RETURN == ret )
+ return retval ;
+ }
switch (args[0]) {
case BRCTL_ADD_IF:
case BRCTL_DEL_IF:
Index: linux-3.18.21/net/bridge/br_multicast.c
===================================================================
--- linux-3.18.21.orig/net/bridge/br_multicast.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_multicast.c 2018-02-05 13:21:29.000000000 +0800
@@ -34,6 +34,7 @@
#endif
#include "br_private.h"
+#include "ecnt_br.h"
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
@@ -86,6 +87,13 @@
return 0;
}
+/*------export the br_ip_hash for use outside------*/
+int br_ip_hash_for_export(struct net_bridge_mdb_htable *mdb, struct br_ip *ip)
+{
+ return br_ip_hash(mdb, ip);
+}
+/*-------------------------------------------*/
+
static struct net_bridge_mdb_entry *__br_mdb_ip_get(
struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
{
@@ -147,6 +155,9 @@
if (BR_INPUT_SKB_CB(skb)->igmp)
return NULL;
+ if ( ECNT_RETURN == ecnt_br_mdb_get_inline_hook(skb) )
+ return NULL;
+
ip.proto = skb->protocol;
ip.vid = vid;
@@ -211,6 +222,9 @@
struct net_bridge_port_group *p =
container_of(head, struct net_bridge_port_group, rcu);
+ if ( p->port && p->port->br )
+ ecnt_br_multicast_del_pg_inline_hook(p->port->br , p);
+
kfree(p);
}
@@ -277,6 +291,8 @@
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
+ ecnt_br_multicast_del_pg_inline_hook( br , pg );
+
return;
}
@@ -509,7 +525,7 @@
return NULL;
}
-static struct net_bridge_mdb_entry *br_multicast_get_group(
+struct net_bridge_mdb_entry *br_multicast_get_group(
struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, int hash)
{
@@ -643,6 +659,8 @@
if (unlikely(!p))
return NULL;
+ ecnt_br_multicast_new_port_group_inline_hook(port,group,p);
+
p->addr = *group;
p->port = port;
p->state = state;
@@ -678,6 +696,11 @@
mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
+ /*here walk around kenel process, if any change ,be careful*/
+ if(ECNT_CONTINUE != ecnt_br_multicast_add_group_inline_hook(mp,port,group,br,now))
+ goto err;
+ else
+ goto out;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
@@ -1417,7 +1440,11 @@
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
- timer_pending(&other_query->timer))
+ (timer_pending(&other_query->timer)
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ && !br->quick_leave
+#endif
+ ))
goto out;
mdb = mlock_dereference(br->mdb, br);
@@ -1450,13 +1477,21 @@
}
}
- if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
+ if (port && (port->flags & BR_MULTICAST_FAST_LEAVE
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ || br->quick_leave
+#endif
+ )) {
struct net_bridge_port_group __rcu **pp;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ if(!br_multicast_equal_port_group(p, port, group))
+#else
if (p->port != port)
+#endif
continue;
rcu_assign_pointer(*pp, p->next);
@@ -1579,6 +1614,9 @@
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
return -EINVAL;
+ if ( ECNT_RETURN == ecnt_br_mdb_get_inline_hook(skb) )
+ return 0;
+
if (iph->protocol != IPPROTO_IGMP) {
if (!ipv4_is_local_multicast(iph->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
@@ -1624,6 +1662,15 @@
BR_INPUT_SKB_CB(skb)->igmp = 1;
ih = igmp_hdr(skb2);
+ /*ecnt_br_multicast_ipv4_rcv_inline_hook*/
+ {
+ int ret ;
+ ret = ecnt_br_multicast_ipv4_rcv_inline_hook(port,skb,skb2,iph,ih,&err) ;
+ if(ret != ECNT_CONTINUE){
+ goto out;
+ }
+ }
+
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
@@ -1754,6 +1801,13 @@
BR_INPUT_SKB_CB(skb)->igmp = 1;
+ {
+ int ret ;
+ ret = ecnt_br_multicast_ipv6_rcv_inline_hook(port,skb,skb2,icmp6_type,&err) ;
+ if(ECNT_RETURN_DROP == ret)
+ goto out ;
+ }
+
switch (icmp6_type) {
case ICMPV6_MGM_REPORT:
{
@@ -1794,9 +1848,15 @@
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb, u16 vid)
{
+ int ret = -EINVAL;
+
BR_INPUT_SKB_CB(skb)->igmp = 0;
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
+ ret = ecnt_br_multicast_rcv_inline_hook(br, skb);
+ if ( ECNT_RETURN_DROP == ret )
+ return -EINVAL;
+
if (br->multicast_disabled)
return 0;
@@ -1879,6 +1939,9 @@
setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
(unsigned long)br);
#endif
+
+ ecnt_br_multicast_init_inline_hook(br);
+
}
static void __br_multicast_open(struct net_bridge *br,
@@ -2042,6 +2105,8 @@
int err = 0;
struct net_bridge_mdb_htable *mdb;
+ ecnt_br_multicast_toggle_inline_hook(val);
+
spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val)
goto unlock;
Index: linux-3.18.21/net/bridge/br_private.h
===================================================================
--- linux-3.18.21.orig/net/bridge/br_private.h 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/br_private.h 2018-02-05 13:21:29.000000000 +0800
@@ -40,6 +40,8 @@
/* Path to usermode spanning tree program */
#define BR_STP_PROG "/sbin/bridge-stp"
+/*CONFIG_BRIDGE_IGMP_SNOOPING && TCSUPPORT_IGMPSNOOPING_ENHANCE*/
+#define UPNP_MCAST htonl(0xEFFFFFFA)
typedef struct bridge_id bridge_id;
typedef struct mac_addr mac_addr;
@@ -102,8 +104,18 @@
unsigned char is_static;
unsigned char added_by_user;
__u16 vlan_id;
+ unsigned char vlan_layer;
};
+/*TCSUPPORT_IGMP_SNOOPING start*/
+struct net_bridge_mc_src_entry
+{
+ struct in_addr src;
+ struct in6_addr src6;
+ int filt_mode;
+};
+/*TCSUPPORT_IGMP_SNOOPING end*/
+
struct net_bridge_port_group {
struct net_bridge_port *port;
struct net_bridge_port_group __rcu *next;
@@ -112,6 +124,16 @@
struct timer_list timer;
struct br_ip addr;
unsigned char state;
+
+/*TCSUPPORT_IGMP_SNOOPING start*/
+ unsigned long ageing_time;
+ int leave_count;
+ struct net_bridge_mc_src_entry src_entry; //for IGMPv3
+ unsigned char group_mac[6]; /*Multicast address*/
+ unsigned char host_mac[6]; /*host mac address*/
+ u8 version;//version = 4 or 6
+/*TCSUPPORT_IGMP_SNOOPING end*/
+
};
struct net_bridge_mdb_entry
@@ -182,6 +204,14 @@
struct timer_list multicast_router_timer;
struct hlist_head mglist;
struct hlist_node rlist;
+
+/*TCSUPPORT_IGMP_SNOOPING start*/
+ struct net_bridge_mc_src_entry src_entry; //for IGMPv3 temp
+ mac_addr macAddr;
+ mac_addr groupMacAddr;
+ u8 version;//version = 4 or 6;
+/*TCSUPPORT_IGMP_SNOOPING end*/
+
#endif
#ifdef CONFIG_SYSFS
@@ -261,6 +291,9 @@
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
+/*TCSUPPORT_IGMP_SNOOPING start*/
+ u8 quick_leave:1;
+/*TCSUPPORT_IGMP_SNOOPING end*/
u32 hash_elasticity;
u32 hash_max;
Index: linux-3.18.21/net/bridge/ecnt_br.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/bridge/ecnt_br.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,2133 @@
+#include <linux/foe_hook.h>
+#include <linux/skbuff.h>
+#include "br_private.h"
+#include <asm/tc3162/tc3162.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#endif
+#include <ecnt_hook/ecnt_hook.h>
+#include <ecnt_hook/ecnt_hook_net.h>
+#ifdef TCSUPPORT_BRIDGE_MAC_LIMIT
+unsigned int macLimit = 0;
+
+static DEFINE_SPINLOCK(mac_limit_set_lock);
+
+typedef struct
+{
+ char* devName;
+ int devPortNo;
+ int macNumByPort;
+ int maxNumByPort;
+}dev_mac_num;
+
+#if !defined(TCSUPPORT_FWC_FDB_VLAN)
+static dev_mac_num devMacNum[] =
+{
+ {"ra0",1,0,0},
+ {"eth0.1",3,0,0},
+ {"eth0.2",4,0,0},
+ {"eth0.3",5,0,0},
+ {"eth0.4",6,0,0},
+};
+#endif
+#define CNT_DEVMACNUM (sizeof(devMacNum)/sizeof(dev_mac_num))
+
+#endif
+/*---------------------------------------------------------------------*/
+
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED start*/
+int g_last_snoop_state = 0;
+unsigned int hw_igmp_flood_enable = 1;
+unsigned int g_snooping_enable = 1;
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED end*/
+
+/*TCSUPPORT_IGMP_SNOOPING start*/
+int snoopingdebug = 0;
+/*TCSUPPORT_IGMP_SNOOPING end*/
+
+/*---------------------------------------------------------------------*/
+
+#define DEBUGP_SNOOP(x, args...) if(snoopingdebug) printk(x, ## args)
+
+/*---------------------------------------------------------------------*/
+
+int (*check_vtag_match_dev_hook)(unsigned char *devName, unsigned short vid, unsigned char tagFlag);
+EXPORT_SYMBOL(check_vtag_match_dev_hook);
+
+/* merge form TC2 main trunck
+ * Convert IP6 address to printable (loggable) representation.
+ */
+static char digits[] = "0123456789abcdef";
+static int ip6round = 0;
+char* ip6_sprintf(const struct in6_addr *addr)
+{
+ static char ip6buf[8][48];
+ int i = 0;
+ char *cp = NULL;
+ const u_int16_t *a = (const u_int16_t *)addr;
+ const u_int8_t *d = NULL;
+ int dcolon = 0;
+
+ ip6round = (ip6round + 1) & 7;
+ cp = ip6buf[ip6round];
+
+ for (i = 0; i < 8; i++) {
+ if (dcolon == 1) {
+ if (*a == 0) {
+ if (i == 7)
+ *cp++ = ':';
+ a++;
+ continue;
+ } else
+ dcolon = 2;
+ }
+ if (*a == 0) {
+ if (dcolon == 0 && *(a + 1) == 0) {
+ if (i == 0)
+ *cp++ = ':';
+ *cp++ = ':';
+ dcolon = 1;
+ } else {
+ *cp++ = '0';
+ *cp++ = ':';
+ }
+ a++;
+ continue;
+ }
+ d = (const u_char *)a;
+ {
+ char ch[4] = {0};
+ char i, j;
+ ch[0] = digits[*d >> 4];
+ ch[1] = digits[*d++ & 0xf];
+ ch[2] = digits[*d >> 4];
+ ch[3] = digits[*d & 0xf];
+ for(i=0; i<4; i++)
+ {
+ if(ch[i] != '0')
+ break;
+ }
+ if(i==4)
+ *cp++ = digits[0];
+ else
+ for(j=i; j<4; j++) *cp++ = ch[j];
+ }
+ *cp++ = ':';
+ a++;
+ }
+ *--cp = 0;
+ return (ip6buf[ip6round]);
+}
+
+static inline unsigned long hold_time(const struct net_bridge *br)
+{
+ return br->topology_change ? br->forward_delay : br->ageing_time;
+}
+
+static inline int has_expired(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb)
+{
+ return !fdb->is_static &&
+ time_before_eq(fdb->updated + hold_time(br), jiffies);
+}
+
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ hlist_for_each_entry_rcu(fdb, head, hlist) {
+ if (ether_addr_equal(fdb->addr.addr, addr) &&
+ fdb->vlan_id == vid)
+ return fdb;
+ }
+ return NULL;
+}
+
+static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ hlist_for_each_entry(fdb, head, hlist) {
+ if (ether_addr_equal(fdb->addr.addr, addr) &&
+ fdb->vlan_id == vid)
+ return fdb;
+ }
+ return NULL;
+}
+
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static inline int br_mdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip)
+{
+ struct __mc_fdb_entry *fe = buf;
+ struct net_bridge_mdb_htable *mdb = NULL;
+ struct net_bridge_port_group *bpg = NULL;
+ int i = 0, num = 0;
+ long result = 0;
+ struct net_bridge_mdb_entry *f = NULL;
+
+ mdb = br->mdb;
+ if(!mdb)
+ return 0;
+ memset(buf, 0, maxnum*sizeof(struct __mc_fdb_entry));
+
+ rcu_read_lock();
+ spin_lock(&br->multicast_lock);
+ for (i = 0; i < mdb->max; i++) {
+ hlist_for_each_entry_rcu(f, &mdb->mhash[i], hlist[mdb->ver]) {
+ if (num >= maxnum)
+ goto out;
+ if (skip) {
+ --skip;
+ continue;
+ }
+ bpg = f->ports;
+ while(bpg){
+ if (has_expired(br, bpg)){
+ bpg = bpg->next;
+ continue;
+ }
+ if(bpg->version ==4){
+ sprintf(fe->group_addr,NIPQUAD_FMT ,NIPQUAD(bpg->addr.u.ip4));
+ sprintf(fe->src_addr, NIPQUAD_FMT, NIPQUAD(bpg->src_entry.src.s_addr));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if(bpg->version == 6){
+ strncpy(fe->group_addr,ip6_sprintf(&bpg->addr.u.ip6),sizeof(fe->group_addr));
+ strncpy(fe->src_addr,ip6_sprintf(&bpg->src_entry.src6),sizeof(fe->src_addr));
+ }
+#endif
+ fe->port_no = bpg->port->port_no;
+ fe->version = bpg->version;
+ memcpy(fe->group_mac, bpg->group_mac, ETH_ALEN);
+ memcpy(fe->host_addr, bpg->port->macAddr.addr, ETH_ALEN);
+
+ fe->filter_mode = bpg->src_entry.filt_mode;
+ result = jiffies - bpg->ageing_time;
+ fe->ageing_timer_value = jiffies_to_clock_t((result>0) ? result : 0);
+ bpg = bpg->next;
+ ++fe;
+ ++num;
+ }
+
+ }
+ }
+
+ out:
+ spin_unlock(&br->multicast_lock);
+ rcu_read_unlock();
+ return num;
+}
+
+int get_mc_fdb_entries(struct net_bridge *br, void __user *userbuf,
+ unsigned long maxnum, unsigned long offset)
+{
+ int num = 0;
+ void *buf = NULL;
+ size_t size = 0;
+
+ /* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */
+ if (maxnum > PAGE_SIZE/sizeof(struct __mc_fdb_entry))
+ maxnum = PAGE_SIZE/sizeof(struct __mc_fdb_entry);
+
+ size = maxnum * sizeof(struct __mc_fdb_entry);
+
+ buf = kmalloc(size, GFP_USER);
+ if (!buf)
+ return -ENOMEM;
+
+ num = br_mdb_fillbuf(br, buf, maxnum, offset);
+ if (num > 0) {
+ if (copy_to_user(userbuf, buf, num*sizeof(struct __mc_fdb_entry)))
+ num = -EFAULT;
+ }
+ kfree(buf);
+
+ return num;
+}
+
+int br_multicast_equal_port_group(struct net_bridge_port_group *pg,
+ struct net_bridge_port *port, struct br_ip *group)
+{
+ if(!pg || !port || !group)
+ return 0;
+ if((pg->version != port->version) ||(pg->port != port))
+ return 0;
+
+ if(port->version == 4)
+ {
+ if(pg->src_entry.src.s_addr == port->src_entry.src.s_addr)
+ return 1;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if(port->version == 6)
+ {
+ if(ipv6_addr_equal(&pg->src_entry.src6, &port->src_entry.src6))//group ip
+ return 1;
+ }
+#endif
+ return 0;
+}
+#else
+static inline int br_mdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip)
+{
+ return 0 ;
+}
+int get_mc_fdb_entries(struct net_bridge *br, void __user *userbuf,
+ unsigned long maxnum, unsigned long offset)
+{
+ return 0 ;
+}
+int br_multicast_equal_port_group(struct net_bridge_port_group *pg,
+ struct net_bridge_port *port, struct br_ip *group)
+{
+ return 0 ;
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////
+/*TCSUPPORT_XPON_IGMP || TCSUPPORT_MULTICAST_SPEED start*/
+ #define MUL_PROTO_IGMP 1
+ #define MUL_PROTO_MLD 2
+
+ static struct list_head hwnat_igmp_entry;
+ static unsigned int hwnat_igmp_flag = 1;
+ static unsigned int hwnat_age_time = 3000;
+ static spinlock_t hwnat_lock;
+ static struct net_bridge * hwnat_br = NULL;
+
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED start*/
+ static spinlock_t hwnat_disable_snooping_lock;
+ static LIST_HEAD(multicast_flood_hw_list);
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED end*/
+
+/*TCSUPPORT_XPON_IGMP || TCSUPPORT_MULTICAST_SPEED end*/
+
+/*TCSUPPORT_XPON_IGMP || TCSUPPORT_MULTICAST_SPEED start*/
+#ifdef TCSUPPORT_RA_HWNAT
+ extern int (*hwnat_is_alive_pkt_hook)(struct sk_buff* skb);
+ extern int (*hwnat_skb_to_foe_hook)(struct sk_buff* skb);
+ extern int (*hwnat_set_special_tag_hook)(int index, int tag);
+ extern int (*hwnat_delete_foe_entry_hook)(int index);
+ extern int (*hwnat_is_multicast_entry_hook)(int index ,unsigned char* grp_addr,unsigned char* src_addr,int type);
+ /*TCSUPPORT_MULTICAST_SPEED start*/
+ extern int (*multicast_speed_find_entry_hook)(int index);
+ extern int (*multicast_speed_learn_flow_hook)(struct sk_buff* skb);
+ extern int (*hwnat_set_rule_according_to_state_hook)(int index, int state,int mask);
+ /*TCSUPPORT_MULTICAST_SPEED end*/
+ extern int (*xpon_igmp_learn_flow_hook)(struct sk_buff* skb);
+ extern int (*hwnat_set_wlan_multicast_hook)(int index,int flag);
+ extern int (*wan_multicast_undrop_hook)(void);
+ /*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED start*/
+ extern int (*multicast_flood_find_entry_hook)(int index);
+ extern int (*hwnat_set_multicast_speed_enable_hook)(int enable);
+ extern int (*multicast_flood_is_bind_hook)(int index);
+ /*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED end*/
+#else
+ static int (*hwnat_is_alive_pkt_hook)(struct sk_buff* skb) = NULL;
+ static int (*hwnat_skb_to_foe_hook)(struct sk_buff* skb) = NULL;
+ static int (*hwnat_set_special_tag_hook)(int index, int tag) = NULL;
+ static int (*hwnat_delete_foe_entry_hook)(int index) = NULL;
+ static int (*hwnat_is_multicast_entry_hook)(int index ,unsigned char* grp_addr,unsigned char* src_addr,int type) = NULL;
+ /*TCSUPPORT_MULTICAST_SPEED start*/
+ extern int (*multicast_speed_find_entry_hook)(int index);
+ static int (*multicast_speed_learn_flow_hook)(struct sk_buff* skb) = NULL;
+ static int (*hwnat_set_rule_according_to_state_hook)(int index, int state,int mask) = NULL;
+ /*TCSUPPORT_MULTICAST_SPEED end*/
+ static int (*hwnat_set_wlan_multicast_hook)(int index,int flag) = NULL;
+ static int (*xpon_igmp_learn_flow_hook)(struct sk_buff* skb) = NULL;
+ static int (*wan_multicast_undrop_hook)(void) = NULL;
+ /*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED start*/
+ static int (*multicast_flood_find_entry_hook)(int index) = NULL;
+ static int (*hwnat_set_multicast_speed_enable_hook)(int enable) = NULL;
+ static int (*multicast_flood_is_bind_hook)(int index) = NULL;
+ /*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED end*/
+#endif
+
+/*------------------------------------------------------------------------------*/
+struct list_head* igmp_hwnat_get_list(void)
+{
+ return &hwnat_igmp_entry;
+}
+
+int igmp_hwnat_debug_on(void)
+{
+ return hwnat_igmp_flag&0x02;
+}
+
+int igmp_hwnat_enable(void)
+{
+ return hwnat_igmp_flag&0x01;
+}
+
+#define IGMP_HWNAT_DEBUG(fmt,args...) \
+ do{ \
+ if(igmp_hwnat_debug_on()) \
+ { \
+ printk("\r\n%s:"fmt,__FUNCTION__,##args);\
+ } \
+ }while(0)
+
+void* igmp_hwnat_alloc(int size)
+{
+ void* ptr = NULL;
+ if (size>0)
+ {
+ ptr = kzalloc(size, GFP_ATOMIC);
+ }
+ return ptr;
+
+}
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+void igmp_hwnat_free(struct rcu_head *head)
+{
+ struct IGMP_HWNATEntry_s *entry
+ = container_of(head, struct IGMP_HWNATEntry_s,rcu);
+ if(entry)
+ kfree(entry);
+
+ return;
+}
+#else
+void igmp_hwnat_free(void* ptr)
+{
+ if (ptr)
+ kfree(ptr);
+
+ ptr = NULL;
+ return;
+}
+#endif
+
+/**************************************************
+Function: Get Port Mask Bit
+Input: pointer to struct net_bridge_port
+Return:
+ 0-3: ethernet port mask
+ 8-11: wifi port mask
+ -1: error
+**************************************************/
+extern int g_port_reverse_kernel;
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+int igmp_hwnat_get_port(struct net_bridge_port* p)
+{
+ if (! p || !( p->dev))
+ {
+ if (! p)
+ IGMP_HWNAT_DEBUG("p == NULL");
+ else
+ IGMP_HWNAT_DEBUG("p->dev == NULL");
+ return -1;
+ }
+
+ IGMP_HWNAT_DEBUG("port name = %s ",p->dev->name);
+
+ if ((p->dev->name[0] == 'e') && (p->dev->name[4] == '.')) {
+#if !defined(TCSUPPORT_CUC_C5_2P)
+ if (g_port_reverse_kernel == 1 && (p->dev->name[5] - '1') <= 3)
+ return 3 - (p->dev->name[5] - '1');
+ else
+ return (p->dev->name[5] - '1');
+#endif
+ }
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ if ((strlen(p->dev->name) == 3) && (strncmp(p->dev->name,"ra", 2) == 0) &&
+ ((p->dev->name[2] >= '0') && (p->dev->name[2] <= '7')))
+ {
+ return (p->dev->name[2] - '0')+HWNAT_WLAN_IF_BASE;
+ }
+
+ if ((strlen(p->dev->name) == 4) && (strncmp(p->dev->name,"rai", 3) == 0) &&
+ ((p->dev->name[3] >= '0') && (p->dev->name[3] <= '7')))
+ {
+ return (p->dev->name[3] - '0')+HWNAT_WLAN_IF_BASE+HWNAT_WLAN_IF_NUM;
+ }
+#else
+ if ((p->dev->name[0] == 'r') && (p->dev->name[1] == 'a'))
+ return (p->dev->name[5] - '0')+8;
+#endif
+
+ return -1;
+}
+
+/**************************************************
+Function: Check if the port can receive the
+ multicast flow
+Input:
+ br: pointer to struct net_bridge
+ port: pointer to struct net_bridge_port
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: fail; 1: ok
+**************************************************/
+
+int igmp_hwnat_should_deliver(struct net_bridge *br,struct net_bridge_port *port,IGMP_HWNATEntry_t* entry)
+{
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct net_bridge_mdb_entry *mp = NULL;
+ struct net_bridge_port_group *pg = NULL;
+ struct br_ip group;
+ int hash,flag = 0;
+ char src[16];
+
+ if (!mdb)
+ {
+ IGMP_HWNAT_DEBUG("mdb == NULL");
+ return 0;
+ }
+
+ IGMP_HWNAT_DEBUG("entry->proto=%d",entry->proto);
+
+ memset(&group, 0, sizeof(group));
+ if (entry->proto == MUL_PROTO_IGMP)
+ group.proto = htons(ETH_P_IP);
+ else if (entry->proto == MUL_PROTO_MLD)
+ group.proto = htons(ETH_P_IPV6);
+ else
+ return 0;
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ memcpy(group.u.ip6.s6_addr,entry->grp_addr,16);
+#endif
+
+ hash = br_ip_hash_for_export(mdb, &group);
+ mp = br_multicast_get_group(br, port, &group, hash);
+
+ if (mp==NULL)
+ {
+ IGMP_HWNAT_DEBUG("mp == NULL");
+ return 0;
+ }
+
+ memset(src,0,16);
+ memcpy(src,entry->src_addr,16);
+ pg = mp->ports;
+
+ while(pg)
+ {
+ if (pg->port != port)
+ {
+ pg = pg->next;
+ continue;
+ }
+
+ if (entry->proto==MUL_PROTO_IGMP)
+ {
+ if(pg->src_entry.filt_mode == MCAST_INCLUDE)
+ {
+ if (memcmp(&pg->src_entry.src.s_addr,src,4)==0)
+ return 1;
+ }
+ else if(pg->src_entry.filt_mode == MCAST_EXCLUDE)
+ {
+ if(0 == pg->src_entry.src.s_addr)
+ return 1;
+ else if(memcmp(&pg->src_entry.src.s_addr,src,4))
+ flag = 2;
+ else if(memcmp(&pg->src_entry.src.s_addr,src,4)==0)
+ return 0;
+ }
+ }
+ if (entry->proto==MUL_PROTO_MLD)
+ return 1;
+
+ pg = pg->next;
+ }
+
+ if (flag == 2)
+ return 1;
+ return 0;
+
+}
+
+/**************************************************
+Function: Get forwarded ports given a multicast
+ group
+Input:
+ br: pointer to struct net_bridge
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0x0-0x0f0f: port mask
+***************************************************/
+int igmp_hwnat_port_mask(struct net_bridge *br,IGMP_HWNATEntry_t* entry)
+{
+ struct net_bridge_port *p = NULL;
+ int port = 0,mask = 0;
+ int switch_port = 0;
+
+ IGMP_HWNAT_DEBUG("enter");
+ list_for_each_entry(p, &br->port_list, list)
+ {
+ if (igmp_hwnat_should_deliver(br,p,entry)==0)
+ continue;
+
+ port = igmp_hwnat_get_port(p);
+
+ if (port < 0 )
+ continue;
+ /*lan port*/
+ if(port >= 0 && port <= 5)
+ {
+#if defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_EN7512)
+ if(MT7530LanPortMap2Switch_hook)
+ {
+ switch_port = MT7530LanPortMap2Switch_hook(port);
+ }
+ port = switch_port;
+#endif
+ mask |= 1 << port;
+ }
+ /*wifi port*/
+ else if(port >= HWNAT_WLAN_IF_BASE)
+ {
+ mask |= 1 << port;
+ }
+ }
+
+ IGMP_HWNAT_DEBUG("mask = %d",mask);
+
+ return mask;
+}
+
+/**************************************************
+Function: Delete a entry maintained by SW
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_delete_entry(IGMP_HWNATEntry_t* entry)
+{
+ if (entry == NULL)
+ {
+ IGMP_HWNAT_DEBUG("entry == NULL");
+ return 0;
+ }
+ IGMP_HWNAT_DEBUG("enter");
+
+ del_timer(&entry->age_timer);
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ list_del_rcu(&(entry->list));
+ call_rcu_bh(&(entry->rcu), igmp_hwnat_free);
+#else
+ list_del(&entry->list);
+ igmp_hwnat_free(entry);
+#endif
+ return 0;
+}
+
+/**************************************************
+Function: Check if the multicast group is still
+ accelerated by HW
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: fail; 1: ok
+**************************************************/
+int igmp_hwnat_check_entry_valid(IGMP_HWNATEntry_t* entry)
+{
+ int valid = 0,type = 0;
+
+ if (entry->proto == MUL_PROTO_MLD)
+ type = 1;
+
+ if (hwnat_is_multicast_entry_hook)
+ valid = hwnat_is_multicast_entry_hook(entry->index,entry->grp_addr,entry->src_addr,type);
+
+ return valid;
+}
+
+/**************************************************
+Function: Get the foe index from skb
+Input:
+ entry: pointer to struct sk_buff
+Return:
+ foe entry index in hwnat
+**************************************************/
+int igmp_hwnat_flow_index(struct sk_buff* skb)
+{
+ int index = -1;
+
+ if (hwnat_skb_to_foe_hook && skb)
+ index = hwnat_skb_to_foe_hook(skb);
+
+ return index;
+}
+
+/**************************************************
+Function: Clean the HW accelebration given
+ the multicast group
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+*************************************************/
+int igmp_hwnat_delete_foe(IGMP_HWNATEntry_t* entry)
+{
+ if (igmp_hwnat_check_entry_valid(entry)==0)
+ {
+ IGMP_HWNAT_DEBUG("entry is not valid");
+ return 0;
+ }
+
+ IGMP_HWNAT_DEBUG("index = %d ,mask = %x",entry->index,entry->mask);
+
+ if (hwnat_delete_foe_entry_hook)
+ hwnat_delete_foe_entry_hook(entry->index);
+
+ return 0;
+}
+
+/**************************************************
+Function: Delete the SW and HW maintained
+ multicast flow
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_delete_flow(IGMP_HWNATEntry_t* entry)
+{
+ if (entry == NULL)
+ {
+ IGMP_HWNAT_DEBUG("entry == NUL");
+ return 0;
+ }
+
+ IGMP_HWNAT_DEBUG("entry index = %d",entry->index);
+
+ igmp_hwnat_delete_foe(entry);
+ igmp_hwnat_delete_entry(entry);
+ return 0;
+}
+
+/**************************************************
+Function: Clear all the SW and HW maintained
+ multicast flow
+Input:
+ N/A
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_clear_flows(void)
+{
+ IGMP_HWNATEntry_t* entry = NULL,*tmp = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+
+ IGMP_HWNAT_DEBUG("enter");
+
+ spin_lock_bh(&hwnat_lock);
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+#else
+ list_for_each_entry_safe(entry,tmp,hwnat_flow,list)
+#endif
+ {
+ igmp_hwnat_delete_flow(entry);
+ }
+ spin_unlock_bh(&hwnat_lock);
+ return 0;
+}
+EXPORT_SYMBOL(igmp_hwnat_clear_flows);
+
+/**************************************************
+Function: Find the SW maintained entry given
+ foe entry index
+Input:
+ index: foe entry index
+Return:
+ 0: ok
+**************************************************/
+IGMP_HWNATEntry_t* igmp_hwnat_find_entry_rcu(int index)
+{
+ IGMP_HWNATEntry_t* entry = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+ {
+ if (entry->index == index)
+ {
+ rcu_read_unlock();
+ return entry;
+ }
+ }
+
+ rcu_read_unlock();
+ return NULL;
+}
+
+/**************************************************
+Function: Find the SW maintained entry given
+ foe entry index
+Input:
+ index: foe entry index
+Return:
+ 0: ok
+**************************************************/
+IGMP_HWNATEntry_t* igmp_hwnat_find_entry(int index)
+{
+ IGMP_HWNATEntry_t* entry = NULL;
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if(0 == g_snooping_enable)
+ {
+ return NULL;
+ }
+#endif
+
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+ {
+ if (entry->index == index)
+ {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**************************************************
+Function: Callback function to check if the HW
+ mulitcast flow is still valid
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+**************************************************/
+void igmp_hwnat_timer_timeout(unsigned long arg)
+{
+ IGMP_HWNATEntry_t* entry = (IGMP_HWNATEntry_t* )arg;
+ IGMP_HWNAT_DEBUG("enter");
+
+ if (entry)
+ {
+ if (igmp_hwnat_check_entry_valid(entry)==0)
+ {
+ spin_lock(&hwnat_lock);
+ igmp_hwnat_delete_entry(entry);
+ spin_unlock(&hwnat_lock);
+ }
+ else
+ {
+ mod_timer(&entry->age_timer,round_jiffies(jiffies) + hwnat_age_time);
+ }
+ }
+ return;
+}
+
+/**************************************************
+Function: Add a HW accelebrated multicast flow into
+ SW maintained entry
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+***************************************************/
+IGMP_HWNATEntry_t* igmp_hwnat_add_flow(struct sk_buff* skb ,int proto,unsigned char* grp_addr, unsigned char* src_addr)
+{
+ IGMP_HWNATEntry_t* entry = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+ int index = igmp_hwnat_flow_index(skb);
+
+ IGMP_HWNAT_DEBUG("index = %d",index);
+
+ if (index < 0)
+ return NULL;
+
+ entry = (IGMP_HWNATEntry_t* )igmp_hwnat_alloc(sizeof(IGMP_HWNATEntry_t));
+
+ if (entry==NULL)
+ {
+ IGMP_HWNAT_DEBUG("alloc entry fail");
+ return NULL;
+ }
+ entry->index = index;
+ entry->mask = 0;
+ entry->proto = proto;
+ memcpy(entry->grp_addr,grp_addr,16);
+ memcpy(entry->src_addr,src_addr,16);
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ list_add_tail_rcu(&entry->list, hwnat_flow);
+#else
+ list_add_tail(&entry->list,hwnat_flow);
+#endif
+
+ setup_timer(&entry->age_timer, igmp_hwnat_timer_timeout, (unsigned long)entry);
+ mod_timer(&entry->age_timer,round_jiffies(jiffies) + hwnat_age_time);
+
+ return entry;
+}
+
+/**************************************************
+Function: Clear all the SW and HW maintained
+ multicast flow
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+***************************************************/
+int igmp_hwnat_update_flow(IGMP_HWNATEntry_t* entry ,int mask)
+{
+ IGMP_HWNAT_DEBUG("index = %d ,mask = %d ",entry->index,mask);
+
+ if (hwnat_set_special_tag_hook)
+ hwnat_set_special_tag_hook(entry->index,mask);
+
+ entry->mask = mask;
+
+ return 0;
+}
+
+/**************************************************
+Function: Enable hwnat wlan multicast
+ accelebration give a multicast group
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_open_wlan(IGMP_HWNATEntry_t* entry)
+{
+
+ IGMP_HWNAT_DEBUG("enter");
+
+ if (hwnat_set_wlan_multicast_hook)/*always NULL*/
+ hwnat_set_wlan_multicast_hook(entry->index,1);
+
+ return 0;
+}
+
+/**************************************************
+Function: Disble hwnat wlan multicast
+ accelebration give a multicast group
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_close_wlan(IGMP_HWNATEntry_t* entry)
+{
+ IGMP_HWNAT_DEBUG("enter");
+
+ if (hwnat_set_wlan_multicast_hook)/*always NULL*/
+ hwnat_set_wlan_multicast_hook(entry->index,0);
+
+ return 0;
+}
+
+/**************************************************
+Function: update hw_nat mask to hw&sw entry, update sw wifinum
+Input:
+ entry: pointer to struct IGMP_HWNATEntry_t
+ mask: port mask
+ state: port state, lan? wlan?
+Return:
+ 0: ok
+**************************************************/
+int igmp_hwnat_update_hw_nat_info(IGMP_HWNATEntry_t* entry,unsigned long mask,int state)
+{
+ int i;
+ unsigned long wlanmask;
+ unsigned long masktemp = mask;
+ entry->wifinum = 0;
+ IGMP_HWNAT_DEBUG("state=%d,mask=%d",state,mask);
+ switch(state)
+ {
+ //only state i and state iii need to know how much wifi interfaces
+ case MULTICAST_SPEED_STATE_I:
+ case MULTICAST_SPEED_STATE_III:
+ //for multi ssid speed
+ wlanmask = (masktemp>>HWNAT_WLAN_IF_BASE)&0x0ff;
+ for(i = 0;i < HWNAT_WLAN_IF_MAXNUM; i++)
+ {
+ if(wlanmask&(1 << i))
+ entry->wifinum++;
+ }
+ break;
+
+ //fall through and do nothing
+ case MULTICAST_SPEED_STATE_II:
+ default:
+ break;
+ }
+
+ if(hwnat_set_rule_according_to_state_hook)
+ hwnat_set_rule_according_to_state_hook(entry->index,state,mask);
+
+ entry->mask = mask;
+
+ return 0;
+}
+
+
+/**************************************************
+Function: Sync the SW maintained multicast
+ flow with HW maintained flow
+Input:
+ entry: pointer to struct net_bridge
+Return:
+ 0: ok
+***************************************************/
+int igmp_hwnat_update_all(struct net_bridge *br)
+{
+ IGMP_HWNATEntry_t* entry = NULL,*temp = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+ unsigned long mask,old_mask;
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ int interfaceflag;
+ int masktemp;
+#endif
+ if (igmp_hwnat_enable()==0)
+ return 0;
+
+ IGMP_HWNAT_DEBUG("enter");
+
+ spin_lock(&hwnat_lock);
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+#else
+ list_for_each_entry_safe(entry,temp,hwnat_flow,list)
+#endif
+ {
+ if (igmp_hwnat_check_entry_valid(entry)==0)/*check if is a valid hw_nat entry*/
+ {
+ igmp_hwnat_delete_entry(entry);
+ continue;
+ }
+
+ mask = igmp_hwnat_port_mask(br,entry);
+ old_mask = entry->mask;
+ if (mask != old_mask)
+ {
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ masktemp = mask;
+
+ interfaceflag = (masktemp&0xff) >0?1:0;//compute lan interface
+ interfaceflag |= (((masktemp>>HWNAT_WLAN_IF_BASE)&0x0ff) >0?1:0)<<1;//compute wlan interface
+ switch (interfaceflag)
+ {
+ case MULTICAST_SPEED_STATE_I:
+ igmp_hwnat_update_hw_nat_info(entry,mask,MULTICAST_SPEED_STATE_I); /*reset special tag*/
+ break;
+
+ case MULTICAST_SPEED_STATE_II:
+ igmp_hwnat_update_hw_nat_info(entry,mask,MULTICAST_SPEED_STATE_II);
+ break;
+
+ case MULTICAST_SPEED_STATE_III:
+ igmp_hwnat_update_hw_nat_info(entry,mask,MULTICAST_SPEED_STATE_III);
+ break;
+
+ case MULTICAST_SPEED_STATE_IV:
+ igmp_hwnat_delete_flow(entry);
+ break;
+
+ default://do nothing
+ break;
+ }
+#else
+ if ((mask&0x0f)==0)
+ igmp_hwnat_delete_flow(entry);
+
+ if ((mask&0x0f) != (old_mask&0x0f) && (mask&0x0f)>0)
+ igmp_hwnat_update_flow(entry,mask);
+
+ if (((mask>>8)&0x0f) > 0 && ((old_mask>>8)&0x0f) == 0)
+ igmp_hwnat_open_wlan(entry);
+
+ if (((mask>>8)&0x0f) == 0 && ((old_mask>>8)&0x0f) > 0)
+ igmp_hwnat_close_wlan(entry);
+#endif
+ }
+ }
+
+ spin_unlock(&hwnat_lock);
+
+ return 0;
+}
+
+/**************************************************
+Function: Check if the addr is multicast addr
+Input:
+ entry: pointer to mac address
+Return:
+ 0: fail; 1: ok
+***************************************************/
+int igmp_hwnat_is_flow_pkt(char* dst)
+{
+ char mac[3] = {0x01,0x00,0x5e};
+
+ if (memcmp(dst,mac,3)==0)
+ return 1;
+
+ if (dst[0]==0x33 && dst[1] == 0x33)
+ return 1;
+
+ return 0;
+}
+
+
+/**************************************************
+Function: SW learn the multicast flow from
+ passed skb by hw_nat module
+Input:
+ entry: pointer to struct sk_buff
+Return:
+ 0: ok
+***************************************************/
+int igmp_hwnat_learn_flow(struct sk_buff* skb)
+{
+ unsigned char dest_addr[16],src_addr[16];
+ short int proto = 0;
+ struct iphdr* ih;
+ struct ipv6hdr* i6h;
+ unsigned short eth_type;
+ unsigned char* buff = skb_mac_header(skb)+18;
+ IGMP_HWNATEntry_t* entry = NULL;
+ int index;
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ struct ethhdr *eth;
+ struct vlan_ethhdr *vlan_eth;
+ char *dst;
+#endif
+
+ if (igmp_hwnat_enable()==0)
+ return 0;
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if(0 == g_snooping_enable)
+ {
+ return 0;
+ }
+#endif
+
+ IGMP_HWNAT_DEBUG("name=%s",skb->dev->name);
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ if((strncmp(skb->dev->name,"eth", 3) == 0))
+ {
+ vlan_eth = vlan_eth_hdr(skb);
+ buff = skb_mac_header(skb)+ ETH_HLEN + VLAN_HLEN;
+ if(vlan_eth)
+ {
+ dst = vlan_eth->h_dest;
+ eth_type = vlan_eth->h_vlan_encapsulated_proto;
+ }
+ else
+ {
+ printk("\r\n%s:vlan_eth == NULL,return",__FUNCTION__);
+ return 0;
+ }
+ }
+ else
+ {
+ eth =(struct ethhdr *)skb->data;
+ buff = skb_mac_header(skb)+ ETH_HLEN;
+ if(eth)
+ {
+ dst = eth->h_dest;
+ eth_type = eth->h_proto;
+ }
+ else
+ {
+ printk("\r\n%s:eth == NULL,return",__FUNCTION__);
+ return 0;
+ }
+ }
+
+ if (igmp_hwnat_is_flow_pkt(dst)==0)
+ {
+ IGMP_HWNAT_DEBUG("hw nat rule not match pkt");
+ return 0;
+ }
+#else
+ if (igmp_hwnat_is_flow_pkt(vlan_eth_hdr(skb)->h_dest)==0)
+ return 0;
+
+ eth_type = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+#endif
+
+#if !defined(TCSUPPORT_CT_SIMCARD_SEPARATION) || !defined(TCSUPPORT_CT_2PORTS)
+ IGMP_HWNAT_DEBUG("eth_type=%x",eth_type);
+#endif
+ memset(dest_addr,0,16);
+ memset(src_addr,0,16);
+
+ if (eth_type==htons(ETH_P_IP))
+ {
+ proto = MUL_PROTO_IGMP;
+ ih = (struct iphdr*)buff;
+ memcpy(dest_addr,&ih->daddr,4);
+ memcpy(src_addr,&ih->saddr,4);
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if(eth_type==htons(ETH_P_IPV6))
+ {
+ proto = MUL_PROTO_MLD;
+ i6h = (struct ipv6hdr*)buff;
+ memcpy(dest_addr, i6h->daddr.s6_addr,16);
+ memcpy(src_addr, i6h->saddr.s6_addr,16);
+ }
+#endif
+ else
+ {
+ return 0;
+ }
+
+ index = igmp_hwnat_flow_index(skb);
+
+ spin_lock(&hwnat_lock);
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ entry = igmp_hwnat_find_entry_rcu(index);
+#else
+ entry = igmp_hwnat_find_entry(index);
+ #endif
+
+ if (entry != NULL)
+ igmp_hwnat_delete_entry(entry);
+
+ igmp_hwnat_add_flow(skb,proto,dest_addr,src_addr);
+
+ spin_unlock(&hwnat_lock);
+
+ if (hwnat_br)
+ igmp_hwnat_update_all(hwnat_br);
+
+ return 0;
+}
+
+
+/**************************************************
+Function: Clean all dropped multicast flow by HW
+Input:
+ N/A
+Return:
+ 0: ok
+***************************************************/
+int igmp_hwnat_multicast_undrop(void)
+{
+ if (wan_multicast_undrop_hook)
+ wan_multicast_undrop_hook();
+
+ return 0;
+}
+
+int igmp_hwnat_read_proc(char *buf, char **start, off_t off, int count,int *eof, void *data)
+{
+ int len = 0;
+ IGMP_HWNATEntry_t* entry = NULL;
+ struct list_head* hwnat_flow = igmp_hwnat_get_list();
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ multicast_flood_hwentry_t* flood_entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+#endif
+
+ len = sprintf(buf,"flag = %d time = %d \n",hwnat_igmp_flag,hwnat_age_time);
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ len += sprintf(buf+len,"hw_igmp_flood_enable = %d.\n",hw_igmp_flood_enable);
+ spin_lock(&hwnat_disable_snooping_lock);
+ list_for_each_entry_safe(flood_entry, ptr, &multicast_flood_hw_list, list)
+ {
+ len += sprintf(buf+len,"flood index = %d port_mask = %d.\n",flood_entry->index, flood_entry->port_mask);
+ }
+ spin_unlock(&hwnat_disable_snooping_lock);
+#endif
+
+ len += sprintf(buf+len,"index type mask wlannum grp_addr src_addr \n");
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ rcu_read_lock();
+#else
+ spin_lock(&hwnat_lock);
+#endif
+
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ list_for_each_entry_rcu(entry,hwnat_flow,list)
+#else
+ list_for_each_entry(entry,hwnat_flow,list)
+#endif
+ {
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ if(MUL_PROTO_IGMP == entry->proto)
+ {
+ len += sprintf(buf+len,"%d %d %d %d %u.%u.%u.%u %u.%u.%u.%u \n",entry->index,entry->proto,entry->mask,entry->wifinum
+ ,entry->grp_addr[0],entry->grp_addr[1],entry->grp_addr[2],entry->grp_addr[3],
+ entry->src_addr[0],entry->src_addr[1],entry->src_addr[2],entry->src_addr[3]);
+ }
+ else
+ {
+ len += sprintf(buf+len,"%d %d %d %d %s %s \n",entry->index,entry->proto,entry->mask,entry->wifinum
+ ,ip6_sprintf((struct in6_addr*)(entry->grp_addr)),ip6_sprintf((struct in6_addr*)(entry->src_addr)));
+ }
+#else
+ len += sprintf(buf+len,"%d %d %d %d %u.%u.%u.%u %u.%u.%u.%u \n",entry->index,entry->proto,entry->mask,entry->wifinum
+ ,entry->grp_addr[0],entry->grp_addr[1],entry->grp_addr[2],entry->grp_addr[3],
+ entry->src_addr[0],entry->src_addr[1],entry->src_addr[2],entry->src_addr[3]);
+#endif
+ }
+#ifndef TCSUPPORT_MULTICAST_SPEED
+ spin_unlock(&hwnat_lock);
+#else
+ rcu_read_unlock();
+#endif
+
+ *start = buf + off;
+ if (len < off + count)
+ *eof = 1;
+ len -= off;
+ if (len > count)
+ len = count ;
+ if (len <0)
+ len = 0;
+
+ return len;
+
+}
+static int igmp_hwnat_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char buff[32],cmd[8];
+ int len,flag;
+ int flood_flag = 0;
+
+ if (count > 32)
+ len = 32;
+ else
+ len = count;
+
+ memset(buff,0,32);
+ memset(cmd,0,8);
+ if (copy_from_user(buff, buffer, len - 1))
+ return -EFAULT;
+
+ sscanf(buff,"%s %d",cmd,&flag);
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ if (memcmp(cmd,"switch",4)==0)
+ {
+ hwnat_igmp_flag &= 0xfffffffe;
+ hwnat_igmp_flag |= ((flag > 0? 1:0));
+ }
+
+ if (memcmp(cmd,"debug",4)==0)
+ {
+ hwnat_igmp_flag &= 0xfffffffd;
+ hwnat_igmp_flag |= (((flag > 0? 1:0)) << 1);
+ }
+
+ if (memcmp(cmd,"default",4)==0)
+ {
+ hwnat_igmp_flag = 1;
+ hwnat_age_time =3000;
+ }
+#endif
+ if (memcmp(cmd,"flag",4)==0)
+ hwnat_igmp_flag = flag;
+
+
+ if (memcmp(cmd,"time",4)==0)
+ hwnat_age_time = flag;
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if (memcmp(cmd,"m_flood_hw",4)==0)
+ {
+ if(flag)
+ {
+ flood_flag = 1;
+ }
+ else
+ {
+ flood_flag = 0;
+ }
+
+ hw_igmp_flood_enable = flood_flag;
+ if(hwnat_set_multicast_speed_enable_hook)
+ {
+ printk("flood_flag = %d, func = %s, line = %d.\n", flood_flag, __FUNCTION__,__LINE__);
+ hwnat_set_multicast_speed_enable_hook(flood_flag);
+ }
+ }
+#endif
+
+ return len;
+}
+
+void add_multicast_flood_hwentry(struct sk_buff* skb)
+{
+ int index = -1;
+ multicast_flood_hwentry_t* entry = NULL;
+ multicast_flood_hwentry_t* del_entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+ int flood_flag = false;
+ unsigned long flags;
+
+ if(0 == hw_igmp_flood_enable)
+ {
+ return ;
+ }
+
+ flood_flag = g_snooping_enable ;
+ if(flood_flag)
+ {
+ return ;
+ }
+
+ index = igmp_hwnat_flow_index(skb);
+ if(0 > index )
+ {
+ return;
+ }
+
+ entry = (multicast_flood_hwentry_t* )igmp_hwnat_alloc(sizeof(multicast_flood_hwentry_t));
+ if (NULL == entry)
+ {
+ return ;
+ }
+ spin_lock_irqsave(&hwnat_disable_snooping_lock, flags);
+ list_for_each_entry_safe(del_entry, ptr, &multicast_flood_hw_list, list)
+ {
+ if(del_entry->index == index)
+ {
+ list_del(&del_entry->list);
+ kfree(del_entry);
+ del_entry = NULL;
+ }
+ }
+
+ entry->index = index;
+ entry->port_mask = 0;
+
+ list_add_tail(&entry->list, &multicast_flood_hw_list);
+
+ spin_unlock_irqrestore(&hwnat_disable_snooping_lock, flags);
+
+ return ;
+}
+
+void update_multicast_flood_hwentry(int index, unsigned long mask)
+{
+ multicast_flood_hwentry_t* entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+ int flood_flag = false;
+ unsigned long flags;
+
+ if(0 == hw_igmp_flood_enable)
+ {
+ return ;
+ }
+
+ flood_flag = g_snooping_enable;
+ if(flood_flag)
+ {
+ return ;
+ }
+
+ if(0 >= index || 0 > mask)
+ {
+ return ;
+ }
+
+ spin_lock_irqsave(&hwnat_disable_snooping_lock, flags);
+
+ list_for_each_entry_safe(entry, ptr, &multicast_flood_hw_list, list)
+ {
+ if(entry->index == index)
+ {
+ entry->port_mask |= mask;
+ }
+ }
+
+ spin_unlock_irqrestore(&hwnat_disable_snooping_lock, flags);
+
+ return ;
+}
+
+multicast_flood_hwentry_t* find_multicast_flood_hwentry(int index)
+{
+ multicast_flood_hwentry_t* entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+ int find_flag = 0;
+ int flood_flag = false;
+ unsigned long flags;
+
+ if(0 == hw_igmp_flood_enable)
+ {
+ return NULL;
+ }
+
+ flood_flag = g_snooping_enable ;
+ if(flood_flag)
+ {
+ return NULL;
+ }
+
+ if(0 >= index)
+ {
+ return NULL ;
+ }
+
+ spin_lock_irqsave(&hwnat_disable_snooping_lock, flags);
+
+ list_for_each_entry_safe(entry, ptr, &multicast_flood_hw_list, list)
+ {
+ if(entry->index == index)
+ {
+ find_flag = 1;
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&hwnat_disable_snooping_lock, flags);
+ if(find_flag)
+ {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*update mask */
+void update_multicast_flood_mask(int index)
+{
+ int wifi_flag = false;
+ multicast_flood_hwentry_t* entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+ int lan_flag = false;
+ int flood_flag = 0;
+ unsigned long flags;
+
+ if(0 >= index)
+ {
+ return ;
+ }
+
+ if(0 == hw_igmp_flood_enable)
+ {
+ return ;
+ }
+
+ flood_flag = g_snooping_enable ;
+ if(flood_flag)
+ {
+ return ;
+ }
+
+ spin_lock_irqsave(&hwnat_disable_snooping_lock, flags);
+ list_for_each_entry_safe(entry, ptr, &multicast_flood_hw_list, list)
+ {
+ if(entry->index == index)
+ {
+ lan_flag |= ((entry->port_mask & 0x0ff) > 0 ? 1 : 0);
+ wifi_flag |= (((entry->port_mask >> HWNAT_WLAN_IF_BASE)&0x0ff) > 0 ? 1 : 0);
+ if(wifi_flag)
+ {
+ if(lan_flag)
+ {
+ if(hwnat_set_rule_according_to_state_hook)
+ hwnat_set_rule_according_to_state_hook(index, MULTICAST_SPEED_STATE_I, entry->port_mask);
+ }
+ else
+ {
+ if(hwnat_set_rule_according_to_state_hook)
+ hwnat_set_rule_according_to_state_hook(index, MULTICAST_SPEED_STATE_III, entry->port_mask);
+ }
+ }
+ else
+ {
+ if(hwnat_set_rule_according_to_state_hook)
+ hwnat_set_rule_according_to_state_hook(index, MULTICAST_SPEED_STATE_II, entry->port_mask);
+ }
+
+ }
+ else
+ {
+ if(multicast_flood_is_bind_hook && (0 == multicast_flood_is_bind_hook(entry->index)))
+ {
+ list_del(&entry->list);
+ kfree(entry);
+ entry = NULL;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&hwnat_disable_snooping_lock, flags);
+
+ return ;
+}
+
+int clear_multicast_flood_hwentry(void)
+{
+ multicast_flood_hwentry_t* entry = NULL;
+ multicast_flood_hwentry_t* ptr = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwnat_disable_snooping_lock, flags);
+ list_for_each_entry_safe(entry, ptr, &multicast_flood_hw_list, list)
+ {
+ if (hwnat_delete_foe_entry_hook)
+ {
+ hwnat_delete_foe_entry_hook(entry->index);
+ }
+ list_del(&entry->list);
+ kfree(entry);
+ entry = NULL;
+ }
+
+ spin_unlock_irqrestore(&hwnat_disable_snooping_lock, flags);
+ return 0;
+}
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+struct ecnt_hook_ops ecnt_hwnat_mc_undrop_ops = {
+ .name = "igmp_hwnat_multicast_undrop",
+ .hookfn = igmp_hwnat_multicast_undrop,
+ .is_execute = 1,
+ .maintype = ECNT_NET_IGMP,
+ .subtype = ECNT_NET_IGMP_GROUP_ADDED,
+};
+
+struct ecnt_hook_ops ecnt_clear_flood_mc_ops = {
+ .name = "clear_multicast_flood_hwentry",
+ .hookfn = clear_multicast_flood_hwentry,
+ .is_execute = 1,
+ .maintype = ECNT_NET_IGMP,
+ .subtype = ECNT_NET_IGMP_GROUP_DROPPED,
+};
+#endif
+
+void igmp_hwnat_init(struct net_bridge *br)
+{
+ struct proc_dir_entry *hwnat_proc = NULL;
+ INIT_LIST_HEAD(&hwnat_igmp_entry);
+ spin_lock_init(&hwnat_lock);
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ multicast_speed_learn_flow_hook = igmp_hwnat_learn_flow;
+ multicast_speed_find_entry_hook = igmp_hwnat_find_entry;
+
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ spin_lock_init(&hwnat_disable_snooping_lock);
+ multicast_flood_find_entry_hook = find_multicast_flood_hwentry;
+ ecnt_register_hook(&ecnt_hwnat_mc_undrop_ops);
+ ecnt_register_hook(&ecnt_clear_flood_mc_ops);
+#endif
+
+ xpon_igmp_learn_flow_hook = igmp_hwnat_learn_flow;/*for what? likely no use*/
+
+ hwnat_proc = create_proc_entry("tc3162/igmp_hwnat", 0, NULL);
+ hwnat_proc->read_proc = igmp_hwnat_read_proc;
+ hwnat_proc->write_proc = igmp_hwnat_write_proc;
+ hwnat_br = br;
+ return;
+}
+
+void igmp_hwnat_fini(void)
+{
+ igmp_hwnat_clear_flows();
+ remove_proc_entry("tc3162/igmp_hwnat",0);
+ return;
+}
+#else
+int igmp_hwnat_get_port(struct net_bridge_port* p)
+{
+ return 0 ;
+}
+int igmp_hwnat_should_deliver(struct net_bridge *br,struct net_bridge_port *port,IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_port_mask(struct net_bridge *br,IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_delete_entry(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_check_entry_valid(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_flow_index(struct sk_buff* skb)
+{
+ return 0 ;
+}
+int igmp_hwnat_delete_foe(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_delete_flow(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_clear_flows(void)
+{
+ return 0 ;
+}
+IGMP_HWNATEntry_t* igmp_hwnat_find_entry_rcu(int index)
+{
+ return NULL ;
+}
+IGMP_HWNATEntry_t* igmp_hwnat_find_entry(int index)
+{
+ return NULL ;
+}
+void igmp_hwnat_timer_timeout(unsigned long arg)
+{
+ return ;
+}
+IGMP_HWNATEntry_t* igmp_hwnat_add_flow(struct sk_buff* skb ,int proto,unsigned char* grp_addr, unsigned char* src_addr)
+{
+ return NULL ;
+}
+int igmp_hwnat_update_flow(IGMP_HWNATEntry_t* entry ,int mask)
+{
+ return 0 ;
+}
+int igmp_hwnat_open_wlan(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_close_wlan(IGMP_HWNATEntry_t* entry)
+{
+ return 0 ;
+}
+int igmp_hwnat_update_hw_nat_info(IGMP_HWNATEntry_t* entry,unsigned long mask,int state)
+{
+ return 0 ;
+}
+int igmp_hwnat_update_all(struct net_bridge *br)
+{
+ return 0 ;
+}
+int igmp_hwnat_is_flow_pkt(char* dst)
+{
+ return 0 ;
+}
+int igmp_hwnat_learn_flow(struct sk_buff* skb)
+{
+ return 0 ;
+}
+int igmp_hwnat_multicast_undrop(void)
+{
+ return 0 ;
+}
+int igmp_hwnat_read_proc(char *buf, char **start, off_t off, int count,int *eof, void *data)
+{
+ return 0 ;
+}
+static int igmp_hwnat_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ return 0 ;
+}
+void add_multicast_flood_hwentry(struct sk_buff* skb)
+{
+ return ;
+}
+void update_multicast_flood_hwentry(int index, unsigned long mask)
+{
+ return ;
+}
+multicast_flood_hwentry_t* find_multicast_flood_hwentry(int index)
+{
+ return NULL ;
+}
+void update_multicast_flood_mask(int index)
+{
+ return ;
+}
+int clear_multicast_flood_hwentry(void)
+{
+ return 0 ;
+}
+void igmp_hwnat_init(struct net_bridge *br)
+{
+ return ;
+}
+void igmp_hwnat_fini(void)
+{
+ return ;
+}
+#endif
+
+#ifdef TCSUPPORT_PORT_ISOLATION
+int checkPacketsDeliver(struct net_bridge_port *prev, struct sk_buff *skb, int portBindMatch)
+{
+ int deliverFlag = 0;//0 means not deliver, 1 menas deliver
+ int routePolicyMatch = 0;
+ unsigned char vlanNum = 0;
+ unsigned short vlanId= {0};
+ unsigned char *pdata=NULL;
+
+ if(skb && (skb->dev) && (skb->dev->name[0] == 'n'))
+ {
+ if((prev->dev) && (prev->dev->name[0] == 'n'))
+ {
+ goto out;
+ }
+ else if((prev->dev) && ((prev->dev->name[0] == 'e') || (prev->dev->name[0] == 'u') || (prev->dev->name[0] == 'r')))
+ {
+ if(portBindMatch==0)
+ goto out;
+ }
+ }
+ else if(skb && (skb->dev) && ((skb->dev->name[0] == 'e') || (skb->dev->name[0] == 'u') || (skb->dev->name[0] == 'r')))
+ {
+ if((prev->dev) && ((prev->dev->name[0] == 'e') || (prev->dev->name[0] == 'u') || (prev->dev->name[0] == 'r')))
+ {
+ if(portBindMatch==0)
+ goto out;
+ }
+ else if((prev->dev) && ((prev->dev->name[0] == 'n')))
+ {
+ if((skb->mark & 0x1E00000) != 0)
+ {
+ routePolicyMatch = 1;
+ }
+
+ pdata = (unsigned char *)eth_hdr(skb);
+
+ if(pdata){ //check vlan_tag
+ pdata += 12;
+ if(*((unsigned short int *)pdata) == 0x8100){
+ vlanNum++;
+ vlanId = ((*(pdata+2) << 8) + *(pdata + 3)) & VLAN_VID_MASK;
+ pdata += 4;
+ if(*((unsigned short int *)pdata) == 0x8100){
+ vlanNum++;
+ vlanId = ((*(pdata+2) << 8) + *(pdata + 3)) & VLAN_VID_MASK;
+ pdata += 4;
+ }
+ }
+ }
+
+ if(check_vtag_match_dev_hook)
+ {
+ if(vlanNum != 0)
+ {
+ if(check_vtag_match_dev_hook(prev->dev->name, vlanId, 1) == -1)
+ goto out;
+ }
+ else
+ {
+ // no vlan packets, if in group, flood to group member, or just sent to untag device.
+ if((portBindMatch == 0) && (routePolicyMatch == 0))
+ {
+ if(check_vtag_match_dev_hook(prev->dev->name, vlanId, 0) == -1)
+ goto out;
+ }
+ }
+ }
+ }
+ }
+ deliverFlag = 1;
+
+out:
+ return deliverFlag;
+}
+
+#endif
+
+
+#ifdef TCSUPPORT_BRIDGE_MAC_LIMIT
+bool br_fdb_total_mac_num_exceed(struct net_bridge *br)
+{
+ int i = 0;
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *n;
+ int macNumTotal = 0;
+ if (br == NULL)
+ {
+ return false;
+ }
+
+ for (i = 0; i < BR_HASH_SIZE; i++) {
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *n;
+ hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
+ if (f && !f->is_local)
+ {
+ macNumTotal++;
+ }
+ }
+ }
+
+ spin_lock_bh(&mac_limit_set_lock);
+ if((0 != macLimit)&&(macNumTotal >= macLimit))
+ {
+ spin_unlock_bh(&mac_limit_set_lock);
+ return true;
+ }
+
+ spin_unlock_bh(&mac_limit_set_lock);
+ return false;
+}
+
+bool br_fdb_port_mac_num_exceed(struct net_bridge *br, unsigned char* devName)
+{
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *n;
+ int i = 0;
+ int portMacNum = 0;
+
+ for (i = 0; i < BR_HASH_SIZE; i++)
+ {
+ hlist_for_each_entry_safe(f, n, &br->hash[i], hlist);
+ if (f && !f->is_local)
+ {
+ if(f->dst->dev->name && 0 == strcmp(f->dst->dev->name,devName))
+ {
+ portMacNum++;
+ }
+ }
+ }
+
+ spin_lock_bh(&mac_limit_set_lock);
+ for(i = 0;i < CNT_DEVMACNUM;i++)
+ {
+ if(0 == strcmp(devName,devMacNum[i].devName))
+ {
+ if((0 != devMacNum[i].maxNumByPort)&&(portMacNum >= devMacNum[i].maxNumByPort))
+ {
+ printk("devMacNum[i].maxNumByPor = %d\n",devMacNum[i].maxNumByPort);
+ spin_unlock_bh(&mac_limit_set_lock);
+ return true;
+ }
+ }
+ }
+ spin_unlock_bh(&mac_limit_set_lock);
+
+ return false;
+}
+
+
+static int ecnt_br_fdb_update_inline_hook
+(struct net_bridge *br, struct net_bridge_port *p)
+{
+ if(br_fdb_total_mac_num_exceed(br))
+ return -1;
+
+ if(br_fdb_port_mac_num_exceed(br,p->dev->name))
+ return -1;
+
+ return 0;
+}
+
+int checkMacLimitEnable(void){
+ int i = 0;
+
+ if(0 != macLimit)
+ return 1;
+
+ for(i = 0; i < CNT_DEVMACNUM; i++)
+ {
+ if(0 != devMacNum[i].maxNumByPort)
+ {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int ecnt_br_maclimit_hook
+(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid, bool added_by_user)
+{
+ struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+ struct net_bridge_fdb_entry *fdb;
+ bool fdb_modified = false;
+
+ if( !checkMacLimitEnable()){
+ return ECNT_CONTINUE;
+ }
+
+ if ( !br || !source || !addr )
+ return ECNT_CONTINUE;
+
+ if ( !(source->flags & BR_LEARNING) )
+ return ECNT_CONTINUE;
+
+ /* some users want to always flood. */
+ if (hold_time(br) == 0)
+ return ECNT_CONTINUE;
+
+ /* ignore packets unless we are using this port */
+ if (!(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return ECNT_CONTINUE;
+
+ fdb = fdb_find_rcu(head, addr, vid);
+ if (likely(fdb))
+ return ECNT_CONTINUE;
+
+ spin_lock_bh(&br->hash_lock);
+ if (likely(!fdb_find(head, addr, vid)))
+ {
+ if ( -1 == ecnt_br_fdb_update_inline_hook(br, source) )
+ {
+ spin_unlock_bh(&br->hash_lock);
+ return ECNT_RETURN_DROP;
+ }
+ }
+ spin_unlock_bh(&br->hash_lock);
+
+ return ECNT_CONTINUE;
+}
+
+static void flush_br0_mac(void)
+{
+ struct net_device* dev = dev_get_by_name(&init_net, "br0");
+ if(dev != NULL){
+ struct net_bridge *br = netdev_priv(dev);
+ if( br != NULL )
+ br_fdb_flush(br);
+ dev_put(dev);
+ }
+}
+
+static int mac_limit_total_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char val_string[64];
+ char* endpo;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL ;
+
+ memset(val_string,0,64);
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT ;
+
+ spin_lock_bh(&mac_limit_set_lock);
+ macLimit = simple_strtol(val_string,&endpo,10);
+ spin_unlock_bh(&mac_limit_set_lock);
+
+ flush_br0_mac();
+
+ #ifdef TCSUPPORT_RA_HWNAT_ENHANCE_HOOK
+ if(ra_sw_nat_hook_clean_table)
+ ra_sw_nat_hook_clean_table();
+ #endif
+
+ return count;
+}
+
+static int mac_limit_total_read_proc(char *buf, char **start, off_t off, int count, int *eof, void *data)
+{
+ printk("%d\n",macLimit);
+
+ return 0;
+}
+
+
+static int mac_limit_by_port_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char val_string[64];
+ char* endpo;
+ char port[64];
+ int portNo;
+ char portMacLimit[64];
+ int portMacLimitNum;
+ char* p = NULL;
+ int i;
+
+ if (count > sizeof(val_string) - 1)
+ return -EINVAL ;
+
+ memset(val_string,0,64);
+ if (copy_from_user(val_string, buffer, count))
+ return -EFAULT ;
+
+ p = strchr(val_string,'-');
+
+ if(NULL == p)
+ {
+ printk("p = NULL\n");
+ return -EINVAL;
+ }
+
+ memset(port,0,64);
+ memmove(port,val_string,p-val_string);
+ portNo = simple_strtol(port,&endpo,10);
+
+ memset(portMacLimit,0,64);
+ strcpy(portMacLimit,p+1);
+ portMacLimitNum = simple_strtol(portMacLimit,&endpo,10);
+
+ spin_lock_bh(&mac_limit_set_lock);
+ for(i = 0;i < CNT_DEVMACNUM;i++)
+ {
+ if(devMacNum[i].devPortNo == portNo)
+ {
+ devMacNum[i].maxNumByPort = portMacLimitNum;
+ break;
+ }
+ }
+ spin_unlock_bh(&mac_limit_set_lock);
+
+ flush_br0_mac();
+
+ #ifdef TCSUPPORT_RA_HWNAT_ENHANCE_HOOK
+ if(ra_sw_nat_hook_clean_table)
+ ra_sw_nat_hook_clean_table();
+ #endif
+
+ return count;
+}
+
+static int mac_limit_by_port_read_proc(char *buf, char **start, off_t off, int count, int *eof, void *data)
+{
+ int i = 0;
+
+ for(i = 1;i < CNT_DEVMACNUM;i++)
+ {
+ printk("eth0.%d:%d\n",i,devMacNum[i].maxNumByPort);
+ }
+
+ return 0;
+}
+
+void bridgeMacLimitProcInit(void)
+{
+ struct proc_dir_entry *br_fdb_proc = NULL;
+
+ proc_mkdir("br_fdb", NULL);
+ br_fdb_proc = create_proc_entry("br_fdb/mac_limit_total", 0, NULL);
+ if(NULL == br_fdb_proc)
+ {
+ printk("ERROR!Create proc entry mac_limit_total fail!");
+ return;
+ }
+ br_fdb_proc->read_proc = mac_limit_total_read_proc;
+ br_fdb_proc->write_proc = mac_limit_total_write_proc;
+
+ br_fdb_proc = create_proc_entry("br_fdb/mac_limit_by_port",0,NULL);
+ if(NULL == br_fdb_proc)
+ {
+ printk("ERROR!Create proc entry mac_limit_by_port fail!");
+ return;
+ }
+ br_fdb_proc->read_proc = mac_limit_by_port_read_proc;
+ br_fdb_proc->write_proc = mac_limit_by_port_write_proc;
+}
+
+void bridgeMacLimitProcFini(void)
+{
+ remove_proc_entry("br_fdb/mac_limit_total",NULL);
+ remove_proc_entry("br_fdb/mac_limit_by_port",NULL);
+}
+
+#endif
+
Index: linux-3.18.21/net/bridge/ecnt_br.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/bridge/ecnt_br.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,863 @@
+#ifndef _LINUX_ECNT_BR_H
+#define _LINUX_ECNT_BR_H
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/if_bridge.h>
+#include <linux/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/net.h>
+#include <linux/textsearch.h>
+#include <net/checksum.h>
+#include <linux/rcupdate.h>
+#include <linux/hrtimer.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
+#include <linux/sched.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/igmp.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/mld.h>
+#endif
+#include <linux/ecnt_in.h>
+#include <linux/spinlock.h>
+#include "br_private.h"
+
+#define WLAN_DEV_OFFSET 5
+#ifdef TCSUPPORT_WLAN_AC
+#define WLAN_AC_DEV_OFFSET 11
+#endif
+#define USB_DEV_OFFSET 9
+#define WDS_DEV_OFFSET 10
+#define DEV_OFFSET 28
+
+#ifdef TCSUPPORT_BRIDGE_MAC_LIMIT
+extern bool br_fdb_total_mac_num_exceed(struct net_bridge *br);
+extern bool br_fdb_port_mac_num_exceed(struct net_bridge *br, unsigned char* devName);
+extern void bridgeMacLimitProcInit(void);
+extern void bridgeMacLimitProcFini(void);
+#endif
+
+#ifdef TCSUPPORT_BRIDGE_MAC_LIMIT
+int ecnt_br_maclimit_hook
+(struct net_bridge *br, struct net_bridge_port *source,
+const unsigned char *addr, u16 vid, bool added_by_user);
+#endif
+
+/*-----------------------------------------------------------------*/
+
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED start*/
+extern int g_last_snoop_state;
+extern unsigned int hw_igmp_flood_enable;
+extern unsigned int g_snooping_enable;
+/*TCSUPPORT_XPON_IGMP && TCSUPPORT_MULTICAST_SPEED end*/
+
+/*TCSUPPORT_IGMP_SNOOPING start*/
+extern int snoopingdebug;
+/*TCSUPPORT_IGMP_SNOOPING end*/
+
+/*-----------------------------------------------------------------*/
+
+#define DEBUGP_SNOOP(x, args...) if(snoopingdebug) printk(x, ## args)
+
+extern int br_mdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip);
+extern int br_multicast_equal_port_group(struct net_bridge_port_group *pg,
+ struct net_bridge_port *port, struct br_ip *group);
+extern char* ip6_sprintf(const struct in6_addr *addr);
+extern int igmp_hwnat_get_port(struct net_bridge_port* p);
+extern int igmp_hwnat_update_all(struct net_bridge *br);
+extern int igmp_hwnat_multicast_undrop(void);
+extern void igmp_hwnat_init(struct net_bridge *br);
+extern void igmp_hwnat_fini(void);
+extern void add_multicast_flood_hwentry(struct sk_buff* skb);
+extern int clear_multicast_flood_hwentry(void);
+
+extern int (*xpon_hgu_multicast_data_hook)(struct sk_buff *skb);
+
+/*-----------------------------------------------------------------*/
+
+#if defined(TCSUPPORT_XPON_IGMP)
+extern int (*xpon_igmp_ioctl_hook)(unsigned long subcmd,unsigned long argv1,unsigned long argv2);
+extern int (*xpon_sfu_up_send_multicast_frame_hook)(struct sk_buff *skb, int clone);
+extern int (*xpon_sfu_down_multicast_incoming_hook)(struct sk_buff *skb, int clone);
+#endif
+
+#ifdef TCSUPPORT_PORT_ISOLATION
+extern int checkPacketsDeliver(struct net_bridge_port *prev, struct sk_buff *skb, int portBindMatch);
+extern int (*portbind_check_hook)(char *inIf, char *outIf);
+extern int (*portbind_sw_hook)(void);
+extern int (*portbind_sw_prior_hook)(struct sk_buff *skb);
+#endif
+
+static inline int ecnt_br_forward_inline_hook(struct sk_buff *skb)
+{
+#if defined(TCSUPPORT_RA_HWNAT)
+ skb->bridge_flag = 1;
+#endif
+ return ECNT_CONTINUE;
+}
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static inline int ecnt_br_multicast_ipv4_rcv_inline_hook(struct net_bridge_port *port,struct sk_buff *skb,struct sk_buff *skb2,
+ struct iphdr *iph,struct igmphdr *ih,int* err)
+{
+#if defined(TCSUPPORT_IGMPSNOOPING_ENHANCE)
+ if(iph->daddr == UPNP_MCAST)/*flooding UPNP packets,239.255.255.250*/
+ return ECNT_RETURN ;
+#endif
+#if defined(TCSUPPORT_IGMP_SNOOPING)
+ if(port){
+ memcpy(port->macAddr.addr, eth_hdr(skb)->h_source,ETH_ALEN);
+ memset(&port->src_entry, 0, sizeof(port->src_entry));
+ port->version = 4;
+ }
+#endif
+ switch(ih->type)
+ {
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ //ignore wan site control packet.
+ if((skb2->dev != NULL) && (skb2->dev->name[0] == 'n')){
+ //printk("\r\nignore wan site report packet(v1/v2)!");
+ *err = 0;
+ return ECNT_RETURN_DROP;
+ }
+ break;
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ //ignore wan site control packet.
+ if((skb2->dev != NULL) && (skb2->dev->name[0] == 'n')){
+ //printk("\r\nignore wan site report packet(v3)!");
+ *err = 0;
+ return ECNT_RETURN_DROP;
+ }
+ break;
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ break;
+ case IGMP_HOST_LEAVE_MESSAGE:
+ //ignore wan site control packet.
+ if((skb2->dev != NULL) && (skb2->dev->name[0] == 'n')){
+ //printk("\r\nignore wan site leave packet!");
+ *err = 0;
+ return ECNT_RETURN_DROP;
+ }
+ break;
+ }
+
+ return ECNT_CONTINUE ;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void MultiIP2MAC(struct in6_addr *pIpaddr, unsigned char *mac)
+{
+ if(pIpaddr == NULL || mac == NULL)
+ return;
+
+ *mac = 0x33;
+ *(mac + 1) = 0x33;
+ *(mac + 2) = pIpaddr->s6_addr[12];
+ *(mac + 3) = pIpaddr->s6_addr[13];
+ *(mac + 4) = pIpaddr->s6_addr[14];
+ *(mac + 5) = pIpaddr->s6_addr[15];
+
+ return;
+}
+
+static inline int ecnt_br_multicast_ipv6_rcv_inline_hook(struct net_bridge_port *port,struct sk_buff *skb,struct sk_buff *skb2,
+ u8 type, int* err)
+{
+ if(port){
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb2);
+ if ( mld && port )
+ {
+ MultiIP2MAC(&mld->mld_mca, port->groupMacAddr.addr);
+ memcpy(port->macAddr.addr, eth_hdr(skb)->h_source, ETH_ALEN);
+ memset(&port->src_entry, 0, sizeof(port->src_entry));
+ port->version = 6;
+ }
+ }
+ switch(type)
+ {
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MLD2_REPORT:
+ if((skb2->dev != NULL) && (skb2->dev->name[0] == 'n')){
+ /*printk("\r\n[v6]ignore wan site report packet(v1/v2)!");*/
+ *err = 0;
+ return ECNT_RETURN_DROP;
+ }
+ break;
+ case ICMPV6_MGM_QUERY:
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ if((skb2->dev != NULL) && (skb2->dev->name[0] == 'n')){
+ /*printk("\r\n[v6]ignore wan site leave packet!");*/
+ *err = 0;
+ return ECNT_RETURN_DROP;
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ return ECNT_CONTINUE;
+}
+#endif
+#else
+static inline int ecnt_br_multicast_ipv4_rcv_inline_hook(struct net_bridge_port *port,struct sk_buff *skb,struct sk_buff *skb2,
+ struct iphdr *iph,struct igmphdr *ih,int* err)
+{
+ return 0 ;
+}
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ecnt_br_multicast_ipv6_rcv_inline_hook(struct net_bridge_port *port,struct sk_buff *skb,u8 type)
+{
+ return 0 ;
+}
+#endif
+#endif
+
+static inline int ecnt_br_input_state_forward_inline_hook(struct sk_buff *skb)
+{
+#if 1 //defined(CONFIG_PORT_BINDING) || defined(TCSUPPORT_REDIRECT_WITH_PORTMASK)
+/*_____________________________________________
+** remark packet from different lan interfac,
+** use the highest 4 bits.
+**
+** eth0 0x10000000
+** eth0.1 0x10000000
+** eth0.2 0x20000000
+** eth0.3 0x30000000
+** eth0.4 0x40000000
+** ra0 0x50000000
+** ra1 0x60000000
+** ra2 0x70000000
+** ra3 0x80000000
+** usb0 0x90000000
+** wds0~3 0xA0000000
+** rai0 0xB0000000
+** rai1 0xC0000000
+** rai2 0xD0000000
+** rai3 0xE0000000
+**_________________________________________
+*/
+
+ switch (skb->dev->name[0]) {
+ case 'e':
+#ifdef TCSUPPORT_TC2031
+ /* device name format must be eth0 */
+ skb->mark |= 0x10000000;
+#else
+ //single lan port
+ if(!strcmp(skb->dev->name, "eth0"))
+ {
+ skb->mark |= 0x10000000;
+ }
+
+ /* device name format must be eth0.x */
+ if (skb->dev->name[4] == '.')
+ skb->mark |= (skb->dev->name[5] - '0') << DEV_OFFSET;
+#endif
+ break;
+ case 'r':
+#ifdef TCSUPPORT_WLAN_AC
+ if (skb->dev->name[2] == 'i')
+ /* device name must be raix */
+ skb->mark |= ((skb->dev->name[3] - '0') + WLAN_AC_DEV_OFFSET) << DEV_OFFSET;
+ else
+#endif
+ /* device name must be rax */
+ skb->mark |= ((skb->dev->name[2] - '0') + WLAN_DEV_OFFSET) << DEV_OFFSET;
+ break;
+ case 'u':
+ /* device name must be usbx */
+ skb->mark |= ((skb->dev->name[3] - '0') + USB_DEV_OFFSET) << DEV_OFFSET;
+ break;
+ case 'w':
+ /* device name must be wdsx */
+ skb->mark |= (WDS_DEV_OFFSET) << DEV_OFFSET;
+ break;
+ default:
+ break;
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_br_handle_frame_finish_inline_hook(struct sk_buff *skb, u16 vid)
+{
+ int ret = 0;
+ struct net_bridge *br;
+ struct net_bridge_port *source = br_port_get_rcu(skb->dev);
+ const unsigned char *dest = NULL;
+ const unsigned char *addr = NULL;
+
+ br = source->br;
+ addr =eth_hdr(skb)->h_source;
+
+
+#if defined(TCSUPPORT_XPON_IGMP)
+ dest = eth_hdr(skb)->h_dest;
+
+ /*downstream multicast operation */
+ if(is_multicast_ether_addr(dest) && xpon_sfu_down_multicast_incoming_hook)
+ {
+ ret = xpon_sfu_down_multicast_incoming_hook(skb, 1);
+ if (ret > 0 )
+ {
+ return ECNT_RETURN_DROP;
+ }
+ }
+
+ /*send upstream multicast to ANI, jump kernel multicast */
+ if(is_multicast_ether_addr(dest) && xpon_sfu_up_send_multicast_frame_hook)
+ {
+ ret = xpon_sfu_up_send_multicast_frame_hook(skb, 1);
+ if (ret > 0 )
+ {
+ return ECNT_RETURN_DROP;
+ }
+ }
+#endif
+
+#if defined(TCSUPPORT_BRIDGE_MAC_LIMIT)
+ if ( ECNT_RETURN_DROP == ecnt_br_maclimit_hook(br, source, addr
+ , vid, false) )
+ return ECNT_RETURN_DROP;
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_old_dev_ioctl_inline_hook(struct net_bridge *br,struct ifreq *rq , int* ret)
+{
+ unsigned long args[4];
+
+#if defined(TCSUPPORT_XPON_IGMP)
+ typeof(xpon_igmp_ioctl_hook) xpon_igmp_ioctl;
+#endif
+
+ if (copy_from_user(args, rq->ifr_data, sizeof(args)))
+ return ECNT_HOOK_ERROR;
+
+ switch(args[0])
+ {
+ case BRCTL_GET_BRIDGE_INFO:
+ {
+ struct __bridge_info b;
+
+ memset(&b, 0, sizeof(struct __bridge_info));
+ rcu_read_lock();
+ memcpy(&b.designated_root, &br->designated_root, 8);
+ memcpy(&b.bridge_id, &br->bridge_id, 8);
+ b.root_path_cost = br->root_path_cost;
+ b.max_age = jiffies_to_clock_t(br->max_age);
+ b.hello_time = jiffies_to_clock_t(br->hello_time);
+ b.forward_delay = br->forward_delay;
+ b.bridge_max_age = br->bridge_max_age;
+ b.bridge_hello_time = br->bridge_hello_time;
+ b.bridge_forward_delay = jiffies_to_clock_t(br->bridge_forward_delay);
+ b.topology_change = br->topology_change;
+ b.topology_change_detected = br->topology_change_detected;
+ b.root_port = br->root_port;
+
+ b.stp_enabled = (br->stp_enabled != BR_NO_STP);
+ b.ageing_time = jiffies_to_clock_t(br->ageing_time);
+ b.hello_timer_value = br_timer_value(&br->hello_timer);
+ b.tcn_timer_value = br_timer_value(&br->tcn_timer);
+ b.topology_change_timer_value = br_timer_value(&br->topology_change_timer);
+ b.gc_timer_value = br_timer_value(&br->gc_timer);
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMPSNOOPING_ENHANCE)
+ b.igmpsnoop_ageing_time = br->multicast_membership_interval;
+ b.igmpsnoop_enabled = !br->multicast_disabled;
+ b.igmpsnoop_quickleave = br->quick_leave;
+ b.igmpsnoop_dbg = (__u8)snoopingdebug;
+#endif
+ rcu_read_unlock();
+
+ if (copy_to_user((void __user *)args[1], &b, sizeof(b)))
+ return -EFAULT;
+
+ return ECNT_RETURN_DROP;
+ }
+ case BRCTL_GET_PORT_INFO:
+ {
+ struct __port_info p;
+ struct net_bridge_port *pt;
+
+ rcu_read_lock();
+ if ((pt = br_get_port(br, args[2])) == NULL) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ memset(&p, 0, sizeof(struct __port_info));
+ memcpy(&p.designated_root, &pt->designated_root, 8);
+ memcpy(&p.designated_bridge, &pt->designated_bridge, 8);
+ p.port_id = pt->port_id;
+ p.designated_port = pt->designated_port;
+ p.path_cost = pt->path_cost;
+ p.designated_cost = pt->designated_cost;
+ p.state = pt->state;
+ p.top_change_ack = pt->topology_change_ack;
+ p.config_pending = pt->config_pending;
+ p.message_age_timer_value = br_timer_value(&pt->message_age_timer);
+ p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer);
+ p.hold_timer_value = br_timer_value(&pt->hold_timer);
+
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMPSNOOPING_ENHANCE)
+ p.is_router = pt->multicast_router;
+#endif
+
+ rcu_read_unlock();
+
+ if (copy_to_user((void __user *)args[1], &p, sizeof(p)))
+ return -EFAULT;
+
+ return ECNT_RETURN_DROP;
+ }
+#if defined(CONFIG_BRIDGE_IGMP_SNOOPING) && defined(TCSUPPORT_IGMPSNOOPING_ENHANCE)
+ case BRCTL_SET_IGMPSNOOPING_STATE:
+ if (!capable(CAP_NET_ADMIN))
+ return ECNT_HOOK_ERROR;
+ br_multicast_toggle(br, args[1]);
+ return ECNT_RETURN_DROP;
+
+ case BRCTL_SET_IGMPSNOOPING_AGEING_TIME:
+ if (!capable(CAP_NET_ADMIN))
+ return ECNT_HOOK_ERROR;
+ spin_lock_bh(&br->lock);
+ br->multicast_membership_interval = clock_t_to_jiffies(args[1]);
+ spin_unlock_bh(&br->lock);
+ return ECNT_RETURN_DROP;
+
+ case BRCTL_GET_MC_FDB_ENTRIES:
+ if (!capable(CAP_NET_ADMIN))
+ return ECNT_HOOK_ERROR;
+ *ret = get_mc_fdb_entries(br, (void __user *)args[1],
+ args[2], args[3]);
+ return ECNT_RETURN;
+
+ case BRCTL_SET_IGMPSNOOPING_QUICKLEAVE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ br->quick_leave = args[1];
+ return ECNT_RETURN_DROP;
+
+ case BRCTL_SET_IGMPSNOOPING_DBG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ snoopingdebug = (int)args[1] ;
+ return ECNT_RETURN_DROP;
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP)
+ case BRCTL_XPON_IGMP_CMD:
+ xpon_igmp_ioctl = rcu_dereference(xpon_igmp_ioctl_hook);
+ if (xpon_igmp_ioctl)
+ return xpon_igmp_ioctl(args[1],args[2],args[3]);
+ return ECNT_RETURN;
+#endif
+
+ default:
+ return ECNT_CONTINUE;
+ }
+
+ return ECNT_CONTINUE;
+
+}
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static inline void ecnt_br_multicast_del_pg_inline_hook(struct net_bridge *br,
+ struct net_bridge_port_group *pg)
+{
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ if(pg->version == 4){
+ DEBUGP_SNOOP("mc_fdb_delete delete dev=%s group=" NIPQUAD_FMT " src ip=" NIPQUAD_FMT "\n",
+ pg->port->dev->name, NIPQUAD(pg->addr.u.ip4),NIPQUAD(pg->src_entry.src.s_addr));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if(pg->version == 6)
+ {
+ DEBUGP_SNOOP("mc_fdb_delete deleteV6 dev=%s group=[%s] src ip=[%s]\n",
+ pg->port->dev->name, ip6_sprintf(&pg->addr.u.ip6),ip6_sprintf(&pg->src_entry.src6));
+ }
+#endif
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP) || defined(TCSUPPORT_MULTICAST_SPEED)
+ igmp_hwnat_update_all(br);
+#endif
+
+ return ;
+}
+
+static inline void ecnt_br_multicast_new_port_group_inline_hook(struct net_bridge_port *port,struct br_ip *group,struct net_bridge_port_group *p)
+{
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ if(port->version == 4){
+ DEBUGP_SNOOP("br_multicast_add_group new portgroup dev=%s group=" NIPQUAD_FMT " src ip=" NIPQUAD_FMT "\n",
+ port->dev->name, NIPQUAD(group->u.ip4),NIPQUAD(port->src_entry.src.s_addr));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else{
+ DEBUGP_SNOOP("br_multicast_add_group newV6 portgroup dev=%s group=[%s] src ip=[%s]\n",
+ port->dev->name, ip6_sprintf(&group->u.ip6), ip6_sprintf(&port->src_entry.src6));
+ }
+#endif
+ memcpy(&p->src_entry, &port->src_entry, sizeof(port->src_entry));
+ memcpy(p->group_mac, port->groupMacAddr.addr, sizeof(port->groupMacAddr.addr));
+ memcpy(p->host_mac, port->macAddr.addr, sizeof(port->macAddr.addr));
+ p->version = port->version;
+#endif
+}
+
+static inline int ecnt_br_multicast_add_group_inline_hook(struct net_bridge_mdb_entry *mp,struct net_bridge_port *port,
+ struct br_ip *group,struct net_bridge *br,unsigned long now)
+{
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+
+ for (pp = &mp->ports ; (p = mlock_dereference(*pp, br)) != NULL ; pp = &p->next)
+ {
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ if(br_multicast_equal_port_group(p, port, group)){
+ if(port->version == 4){
+ DEBUGP_SNOOP("br_multicast_add_group update portgroup dev=%s group=" NIPQUAD_FMT " src ip=" NIPQUAD_FMT "\n",
+ port->dev->name, NIPQUAD(group->u.ip4),NIPQUAD(port->src_entry.src.s_addr));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else{
+ DEBUGP_SNOOP("br_multicast_add_group updateV6 portgroup dev=%s group=[%s] src ip=[%s]\n",
+ port->dev->name, ip6_sprintf(&group->u.ip6),ip6_sprintf(&port->src_entry.src6));
+ }
+#endif
+ memcpy(&p->src_entry, &port->src_entry, sizeof(port->src_entry));
+ goto found;
+ }
+#else
+ if (p->port == port)
+ goto found;
+#endif
+ if ((unsigned long)p->port < (unsigned long)port)
+ break;
+ }
+
+ p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
+ if (unlikely(!p))
+ return ECNT_HOOK_ERROR;
+ rcu_assign_pointer(*pp, p);
+ br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+
+#if defined(TCSUPPORT_XPON_IGMP) || defined(TCSUPPORT_MULTICAST_SPEED)
+ igmp_hwnat_update_all(br);
+#ifdef TCSUPPORT_XPON_IGMP
+ igmp_hwnat_multicast_undrop();/*delete all drop entry*/
+#endif
+#endif
+
+found:
+#if defined(TCSUPPORT_IGMP_SNOOPING)
+ p->ageing_time = now;
+ p->leave_count = 3;
+#endif
+ mod_timer(&p->timer, now + br->multicast_membership_interval);
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_br_multicast_rcv_inline_hook(struct net_bridge *br, struct sk_buff *skb)
+{
+ const struct iphdr *iph = NULL;
+ unsigned int len = 0;
+ u8 nexthdr = 0;
+ int offset = 0;
+ __be16 frag_off = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *ip6h = NULL;
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if(g_last_snoop_state != br->multicast_disabled)
+ {
+ if(br->multicast_disabled)
+ {
+ igmp_hwnat_clear_flows();
+ igmp_hwnat_multicast_undrop();
+ }
+ else
+ {
+ /*snoop disable to enable*/
+ clear_multicast_flood_hwentry();
+ }
+ g_last_snoop_state = br->multicast_disabled;
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+static inline void ecnt_br_multicast_toggle_inline_hook(unsigned long val)
+{
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if(val)
+ {
+ g_snooping_enable = 1;
+ }
+ else
+ {
+ g_snooping_enable = 0;
+ }
+
+ igmp_hwnat_clear_flows();
+ clear_multicast_flood_hwentry();
+ igmp_hwnat_multicast_undrop();
+#endif
+
+}
+
+static inline void ecnt_br_multicast_init_inline_hook(struct net_bridge *br)
+{
+ br_multicast_set_querier(br,1);
+#if defined(TCSUPPORT_XPON_IGMP)|| defined(TCSUPPORT_MULTICAST_SPEED)
+ igmp_hwnat_init(br);
+#endif
+
+ return ;
+}
+#else
+static inline void ecnt_br_multicast_del_pg_inline_hook(struct net_bridge *br,
+ struct net_bridge_port_group *pg)
+{
+ return ;
+}
+static inline void ecnt_br_multicast_new_port_group_inline_hook(struct net_bridge_port *port,struct br_ip *group,struct net_bridge_port_group *p)
+{
+ return ;
+}
+static inline int ecnt_br_multicast_add_group_inline_hook(struct net_bridge_mdb_entry *mp,struct net_bridge_port *port,
+ struct br_ip *group,struct net_bridge *br,unsigned long now)
+{
+ return 0;
+}
+static inline int ecnt_br_multicast_rcv_inline_hook(struct net_bridge *br, struct sk_buff *skb)
+{
+ return ECNT_CONTINUE;
+}
+static inline void ecnt_br_multicast_toggle_inline_hook(unsigned long val)
+{
+ return ;
+}
+static inline void ecnt_br_multicast_init_inline_hook(struct net_bridge *br)
+{
+ return ;
+}
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+/* Don't forward packets to originating port or forwarding diasabled */
+static inline int ecnt_should_deliver(const struct net_bridge_port *p,
+ const struct sk_buff *skb, int bind_check)
+{
+ /*
+ * bind_check==1 --> only check port bind
+ * bind_check==2 --> check port bind & check port state
+ **/
+
+ if ( bind_check )
+ {
+ if ( 1 == bind_check )
+ return 1;
+ }
+
+ return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+ p->state == BR_STATE_FORWARDING);
+}
+
+static inline void ecnt_br_flood_inline_hook(struct net_bridge *br, struct sk_buff *skb )
+{
+ struct net_bridge_port *p;
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ int igmp_snoop_flag = false;
+ int hw_flood_flag = false;
+ int switch_port = 0;
+ int port = 0;
+ int mask = 0;
+ int index = -1;
+ int proc_hw_flag = 0;
+#endif
+
+/*step1 : add to multicast_flood list*/
+#ifdef TCSUPPORT_RA_HWNAT
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ proc_hw_flag = hw_igmp_flood_enable ;
+ igmp_snoop_flag = g_snooping_enable ;
+ /*disable snooping*/
+ if(!igmp_snoop_flag && proc_hw_flag)
+ {
+ /*hgu multicast data flow*/
+ if(xpon_hgu_multicast_data_hook && xpon_hgu_multicast_data_hook(skb))
+ {
+ /*hw accelerating*/
+ index = igmp_hwnat_flow_index(skb);
+ if(index > 0)
+ {
+ hw_flood_flag = true;
+ }
+ //printk("line = %d, function= %s,index = %d.\n",__LINE__,__FUNCTION__, index);
+ }
+ }
+
+ if(hw_flood_flag)
+ {
+ add_multicast_flood_hwentry(skb);
+ }
+ else
+ {
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+ }
+#else
+ {
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+ }
+#endif
+#endif
+
+ tc3162wdog_kick();
+
+/*step2: check if some port can not be forward, cal the mask*/
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ if(hw_flood_flag)
+ {
+ list_for_each_entry_rcu(p, &br->port_list, list)
+ {
+ if( ecnt_should_deliver(p, skb, 2) )
+ {
+ /*calc mask*/
+ port = igmp_hwnat_get_port(p);
+ /*lan port*/
+ if(port >= 0 && port <= 5)
+ {
+#if defined(TCSUPPORT_CPU_MT7520) || defined(TCSUPPORT_CPU_EN7512)
+ if(MT7530LanPortMap2Switch_hook)
+ {
+ switch_port = MT7530LanPortMap2Switch_hook(port);
+ }
+ port = switch_port;
+#endif
+ mask |= 1 << port;
+ }
+ /*wifi port*/
+ else if(port >= HWNAT_WLAN_IF_BASE)
+ {
+ mask |= 1 << port;
+ }
+ }
+ }
+ }
+#endif
+
+#if defined(TCSUPPORT_XPON_IGMP) && defined(TCSUPPORT_MULTICAST_SPEED)
+ /*step3: update mask*/
+ if(hw_flood_flag)
+ {
+ update_multicast_flood_hwentry(index, mask);
+ update_multicast_flood_mask(index);
+ }
+#endif
+#endif
+
+}
+
+static inline int ecnt_br_fdb_init_inline_hook(void)
+{
+#ifdef TCSUPPORT_BRIDGE_MAC_LIMIT
+ bridgeMacLimitProcInit();
+#endif
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_br_mdb_get_inline_hook
+(struct sk_buff *skb)
+{
+#ifdef TCSUPPORT_IGMPSNOOPING_ENHANCE
+ struct iphdr *iph = NULL;
+ struct udphdr *uh = NULL;
+
+ if ( !skb || htons(ETH_P_IP) != skb->protocol )
+ return ECNT_CONTINUE;
+
+ iph = ip_hdr(skb);
+ /* ignore for SSDP udp packets. */
+ if ( iph
+ && IPPROTO_UDP == iph->protocol
+ && UPNP_MCAST == iph->daddr )
+ {
+ uh = (struct udphdr *)(skb_network_header(skb) + (iph->ihl * 4));
+
+ if ( uh
+ && htons(1900) == uh->dest )
+ {
+ return ECNT_RETURN;
+ }
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_maybe_deliver_inline_hook(struct net_bridge_port *prev, struct sk_buff *skb)
+{
+ struct net_device *indev = NULL;
+ struct net_device *outdev = NULL;
+
+ #ifdef TCSUPPORT_PORT_ISOLATION
+ #if defined(TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND)
+ if (portbind_sw_prior_hook && (portbind_sw_prior_hook(skb) == 1))
+ #else
+ if (portbind_sw_hook && (portbind_sw_hook() == 1))
+ #endif
+ {
+ if (skb->dev)
+ indev = skb->dev;
+ if (prev)
+ outdev = prev->dev;
+
+ if ( (indev == NULL) ||
+ (outdev == NULL) ||
+ (portbind_check_hook == NULL) ||
+ ((portbind_check_hook) &&
+ portbind_check_hook(indev->name, outdev->name)) )
+ {
+ if(!checkPacketsDeliver(prev, skb, 1))
+ return -1;
+ }
+ else
+ {
+ if(!checkPacketsDeliver(prev, skb, 0))
+ return -1;
+ }
+ }
+ else
+ {
+ if(!checkPacketsDeliver(prev,skb,1))
+ return -1;
+ }
+ #endif
+
+ return 0;
+}
+
+#endif
+
Index: linux-3.18.21/net/bridge/netfilter/Kconfig
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/Kconfig 2018-02-05 13:21:29.000000000 +0800
@@ -166,6 +166,22 @@
To compile it as a module, choose M here. If unsure, say N.
+config BRIDGE_EBT_FTOS
+ tristate "ebt: ftos target support"
+ depends on BRIDGE_NF_EBTABLES
+ help
+ This option adds the FTOS target, which allows altering the TOS, IPP
+ or DSCP.
+ To compile it as a module, choose M here. If unsure, say N.
+
+config BRIDGE_EBT_TC
+ tristate "ebt: tc target support"
+ depends on BRIDGE_NF_EBTABLES && IPV6
+ help
+ This option adds the TC target, which allows altering the TC
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config BRIDGE_EBT_DNAT
tristate "ebt: dnat target support"
help
@@ -211,6 +227,22 @@
To compile it as a module, choose M here. If unsure, say N.
+config BRIDGE_EBT_ULOG
+ tristate "ebt: ulog support (OBSOLETE)"
+ help
+ This option enables the old bridge-specific "ebt_ulog" implementation
+ which has been obsoleted by the new "nfnetlink_log" code (see
+ CONFIG_NETFILTER_NETLINK_LOG).
+
+ This option adds the ulog watcher, that you can use in any rule
+ in any ebtables table. The packet is passed to a userspace
+ logging daemon using netlink multicast sockets. This differs
+ from the log watcher in the sense that the complete packet is
+ sent to userspace instead of a descriptive text and that
+ netlink multicast sockets are used instead of the syslog.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config BRIDGE_EBT_NFLOG
tristate "ebt: nflog support"
help
Index: linux-3.18.21/net/bridge/netfilter/Makefile
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/Makefile 2018-02-05 13:21:29.000000000 +0800
@@ -34,7 +34,10 @@
obj-$(CONFIG_BRIDGE_EBT_DNAT) += ebt_dnat.o
obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
+obj-$(CONFIG_BRIDGE_EBT_FTOS) += ebt_ftos.o
+obj-$(CONFIG_BRIDGE_EBT_TC) += ebt_tc.o
# watchers
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
+obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
Index: linux-3.18.21/net/bridge/netfilter/ebt_ftos.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/bridge/netfilter/ebt_ftos.c 2018-02-05 13:21:29.000000000 +0800
@@ -0,0 +1,131 @@
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <linux/if_vlan.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_ftos_t.h>
+#ifdef CONFIG_QOS
+#include <linux/qos_type.h>
+#endif
+//#define QOS_WANIF_MARK 0xff000
+
+#if 1
+static inline __be16 vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+}
+#endif
+
+/*
+(struct sk_buff **pskb, unsigned int hooknr,
+ const struct net_device *in, const struct net_device *out,
+ const void *data, unsigned int datalen)
+ */
+
+static int ebt_ftos_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct ebt_ftos_info *ftosinfo = par->matchinfo;
+ struct iphdr *ih = NULL;
+ struct iphdr _iph;
+ __u8 oldtos = 0;
+#ifdef CONFIG_QOS
+ __u8 tos = 0;
+ int rule_no = 0;
+#endif
+
+#if 1 /*Rodney_20090724*/
+ if((skb)->protocol == htons(ETH_P_IP))
+ ih = (struct iphdr *)skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ else if(((skb)->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IP)))
+ ih = (struct iphdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN);
+ else if(((skb)->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ ih = (struct iphdr *)(skb_mac_header(skb) + ETH_HLEN +PPPOE_SES_HLEN);
+ else
+ ih = (struct iphdr *)skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+#else
+ ih = (struct iphdr *)skb_header_pointer(*pskb, 0, sizeof(_iph), &_iph);
+#endif
+
+
+ if (!skb_make_writable(skb, sizeof(struct iphdr)))
+ return NF_DROP;
+
+ oldtos = ih->tos;
+#if 0
+ if ( (*pskb)->mark & QOS_WANIF_MARK ) {
+ tos = (ih->tos & ~ftosinfo->mask) | (ftosinfo->ftos & ftosinfo->mask);
+ (*pskb)->mark |= (tos << 18);
+ return ftosinfo->target | ~EBT_VERDICT_BITS;
+ }
+#endif
+#ifdef CONFIG_QOS
+ rule_no = (skb->mark & QOS_RULE_INDEX_MARK) >> 12;
+ if (0 == qostype_chk(EBT_CHK_TYPE, rule_no, NULL, 0)) {
+ tos = (ih->tos & ~ftosinfo->mask) | (ftosinfo->ftos & ftosinfo->mask);
+ set_tos(rule_no, tos);
+ return ftosinfo->target | ~EBT_VERDICT_BITS;
+ }
+ else {
+ unset_tos(rule_no);
+ }
+#endif
+
+ ih->tos = (ih->tos & ~ftosinfo->mask) | (ftosinfo->ftos & ftosinfo->mask);
+ csum_replace2(&ih->check, htons(oldtos), htons(ih->tos));
+
+ return ftosinfo->target | ~EBT_VERDICT_BITS;
+}
+/*
+static int ebt_target_ftos_check(const char *tablename, unsigned int hookmask,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+*/
+
+static int ebt_ftos_mt_check(const struct xt_mtchk_param *par)
+{
+ struct ebt_ftos_info *info = (struct ebt_ftos_info *)par->matchinfo;
+/*
+ if (datalen != sizeof(struct ebt_ftos_info))
+ return -EINVAL;
+*/
+ if (BASE_CHAIN && info->target == EBT_RETURN)
+ return -EINVAL;
+// CLEAR_BASE_CHAIN_BIT;
+ if (INVALID_TARGET)
+ return -EINVAL;
+ return 0;
+}
+
+static struct xt_target ftos_target __read_mostly=
+{
+ .name = "ftos",
+ .revision = 0,
+ .family = NFPROTO_BRIDGE,
+ .target = ebt_ftos_mt,
+ .checkentry = ebt_ftos_mt_check,
+ .targetsize = sizeof(struct ebt_ftos_info),
+ .me = THIS_MODULE,
+};
+
+static int __init ebt_ftos_init(void)
+{
+ return xt_register_target(&ftos_target);
+}
+
+static void __exit ebt_ftos_fini(void)
+{
+ xt_unregister_target(&ftos_target);
+}
+
+module_init(ebt_ftos_init);
+module_exit(ebt_ftos_fini);
+MODULE_LICENSE("GPL");
+#endif
Index: linux-3.18.21/net/bridge/netfilter/ebt_ip.c
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/ebt_ip.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/ebt_ip.c 2018-02-05 13:21:29.000000000 +0800
@@ -23,7 +23,17 @@
__be16 src;
__be16 dst;
};
-
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+static inline __be16 vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+}
+#endif
static bool
ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -32,13 +42,37 @@
struct iphdr _iph;
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
-
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if(skb->protocol == htons(ETH_P_IP))
+ ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ else if((skb->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IP)))
+ ih = (struct iphdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN);
+ else if((skb->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ ih = (struct iphdr *)(skb_mac_header(skb) + ETH_HLEN +PPPOE_SES_HLEN);
+ else
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+#else
+ ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+#endif
if (ih == NULL)
return false;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if (info->bitmask & EBT_IP_IPP){
+ if (FWINV((ih->tos & 0xe0) < info->ipp[0] || (ih->tos & 0xe0) > info->ipp[1], EBT_IP_IPP))
+ return false;
+ }
+ if (info->bitmask & EBT_IP_DSCP){
+ if (FWINV((ih->tos & 0xfc) < info->dscp[0] || (ih->tos & 0xfc) > info->dscp[1], EBT_IP_DSCP))
+ return false;
+ }
+ if (info->bitmask & EBT_IP_TOS &&
+ FWINV((info->tos & 0x1e) != (ih->tos & 0x1e), EBT_IP_TOS))
+#else
if (info->bitmask & EBT_IP_TOS &&
FWINV(info->tos != ih->tos, EBT_IP_TOS))
+#endif
return false;
+#if defined(TCSUPPORT_ORN_EBTABLES)
if (info->bitmask & EBT_IP_SOURCE &&
FWINV((ih->saddr & info->smsk) !=
info->saddr, EBT_IP_SOURCE))
@@ -47,16 +81,45 @@
FWINV((ih->daddr & info->dmsk) !=
info->daddr, EBT_IP_DEST))
return false;
+#else
+ /*xyzhu_20100413*/
+ if ( info->bitmask & EBT_IP_SOURCE &&
+ FWINV( ((ih->saddr & info->smsk[0]) < info->saddr[0]) ||
+ ((ih->saddr & info->smsk[0]) > info->saddr[1]), EBT_IP_SOURCE ) ) {
+ return false;
+ }
+
+ if ( info->bitmask & EBT_IP_DEST &&
+ FWINV( ((ih->daddr & info->dmsk[0]) < info->daddr[0]) ||
+ ((ih->daddr & info->dmsk[0]) > info->daddr[1]), EBT_IP_DEST ) ) {
+ return false;
+ }
+#endif
if (info->bitmask & EBT_IP_PROTO) {
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if (FWINV((info->protocol[0] != ih->protocol) && (info->protocol[1] != ih->protocol), EBT_IP_PROTO))
+#else
if (FWINV(info->protocol != ih->protocol, EBT_IP_PROTO))
+#endif
return false;
if (!(info->bitmask & EBT_IP_DPORT) &&
!(info->bitmask & EBT_IP_SPORT))
return true;
if (ntohs(ih->frag_off) & IP_OFFSET)
return false;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if(skb->protocol == htons(ETH_P_IP))
+ pptr = (struct tcpudphdr *)skb_header_pointer(skb, ih->ihl*4, sizeof(_ports), &_ports);
+ else if((skb->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IP)))
+ pptr = (struct tcpudphdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN + ih->ihl*4);
+ else if((skb->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ pptr = (struct tcpudphdr *)(skb_mac_header(skb) + ETH_HLEN + PPPOE_SES_HLEN + ih->ihl*4);
+ else
+ pptr = (struct tcpudphdr *)skb_header_pointer(skb, ih->ihl*4, sizeof(_ports), &_ports);
+#else
pptr = skb_header_pointer(skb, ih->ihl*4,
sizeof(_ports), &_ports);
+#endif
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP_DPORT) {
@@ -90,11 +153,19 @@
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
return -EINVAL;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if (info->protocol[0] != IPPROTO_TCP && info->protocol[1] != IPPROTO_TCP &&
+ info->protocol[0] != IPPROTO_UDP && info->protocol[1] != IPPROTO_UDP &&
+ info->protocol[0] != IPPROTO_UDPLITE && info->protocol[1] != IPPROTO_UDPLITE &&
+ info->protocol[0] != IPPROTO_SCTP && info->protocol[1] != IPPROTO_SCTP &&
+ info->protocol[0] != IPPROTO_DCCP && info->protocol[1] != IPPROTO_DCCP)
+#else
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
+#endif
return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
Index: linux-3.18.21/net/bridge/netfilter/ebt_ip6.c
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/ebt_ip6.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/ebt_ip6.c 2018-02-05 14:20:19.000000000 +0800
@@ -21,7 +21,6 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
-
union pkthdr {
struct {
__be16 src;
@@ -32,6 +31,15 @@
u8 code;
} icmphdr;
};
+static inline __be16 vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+}
static bool
ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -41,13 +49,51 @@
struct ipv6hdr _ip6h;
const union pkthdr *pptr;
union pkthdr _pkthdr;
+ struct in6_addr tmp_addr;
+ int i;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if(skb->protocol == htons(ETH_P_IPV6))
+ ih6 = (struct ipv6hdr *)skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
+ else if((skb->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IPV6)))
+ ih6 = (struct ipv6hdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN);
+ else if((skb->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ ih6 = (struct ipv6hdr *)(skb_mac_header(skb) + ETH_HLEN +PPPOE_SES_HLEN);
+ else
+ ih6 = (struct ipv6hdr *)skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
+#else
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
+#endif
if (ih6 == NULL)
return false;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if (info->bitmask & EBT_IP6_TCLASS) {
+ __u8 tc = ipv6_get_dsfield((struct ipv6hdr *)ih6);
+ if (FWINV(tc < info->tclass[0] ||
+ tc > info->tclass[1], EBT_IP6_TCLASS))
+ return false;
+ }
+#else
if (info->bitmask & EBT_IP6_TCLASS &&
FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
return false;
+#endif
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ for (i = 0; i < 4; i++)
+ tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
+ info->smsk.in6_u.u6_addr32[i];
+ if (info->bitmask & EBT_IP6_SOURCE &&
+ FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
+ EBT_IP6_SOURCE))
+ return false;
+
+ for (i = 0; i < 4; i++)
+ tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
+ info->dmsk.in6_u.u6_addr32[i];
+ if (info->bitmask & EBT_IP6_DEST &&
+ FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
+ return false;
+#else
if ((info->bitmask & EBT_IP6_SOURCE &&
FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
&info->saddr), EBT_IP6_SOURCE)) ||
@@ -55,6 +101,7 @@
FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
&info->daddr), EBT_IP6_DEST)))
return false;
+#endif
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
__be16 frag_off;
@@ -63,15 +110,31 @@
offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
if (offset_ph == -1)
return false;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if ( FWINV((info->protocol[0] != nexthdr) && (info->protocol[1] != nexthdr), EBT_IP6_PROTO) )
+ return false;
+#else
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
+#endif
if (!(info->bitmask & ( EBT_IP6_DPORT |
- EBT_IP6_SPORT | EBT_IP6_ICMP6)))
+ EBT_IP6_SPORT | EBT_IP6_ICMP6))
+ ){
return true;
-
- /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+ }
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if(skb->protocol == htons(ETH_P_IPV6))
+ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr), &_pkthdr);
+ else if((skb->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IPV6)))
+ pptr = (union pkthdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN + offset_ph);
+ else if((skb->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ pptr = (union pkthdr *)(skb_mac_header(skb) + ETH_HLEN + PPPOE_SES_HLEN + offset_ph);
+ else
+ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr), &_pkthdr);
+#else
pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
&_pkthdr);
+#endif
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
@@ -109,11 +172,19 @@
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
return -EINVAL;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if (info->protocol[0] != IPPROTO_TCP && info->protocol[1] != IPPROTO_TCP &&
+ info->protocol[0] != IPPROTO_UDP && info->protocol[1] != IPPROTO_UDP &&
+ info->protocol[0] != IPPROTO_UDPLITE && info->protocol[1] != IPPROTO_UDPLITE &&
+ info->protocol[0] != IPPROTO_SCTP && info->protocol[1] != IPPROTO_SCTP &&
+ info->protocol[0] != IPPROTO_DCCP && info->protocol[1] != IPPROTO_DCCP )
+#else
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
+#endif
return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
Index: linux-3.18.21/net/bridge/netfilter/ebt_tc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/bridge/netfilter/ebt_tc.c 2018-02-05 13:21:29.000000000 +0800
@@ -0,0 +1,134 @@
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/in.h>
+#include <net/dsfield.h>
+#include <linux/if_vlan.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_tc.h>
+#ifdef CONFIG_QOS
+#include <linux/qos_type.h>
+#endif
+//#define QOS_WANIF_MARK 0xff000
+
+#if 1
+static inline __be16 vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+}
+#endif
+/*
+static int ebt_target_tc(struct sk_buff **pskb, unsigned int hooknr,
+ const struct net_device *in, const struct net_device *out,
+ const void *data, unsigned int datalen)
+ */
+
+static int
+ebt_tc_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct ebt_tc_info *tcinfo = par->matchinfo;
+ const struct ipv6hdr *ih6 = NULL;
+ struct ipv6hdr _ip6h;
+
+#ifdef CONFIG_QOS
+ __u8 tos = 0;
+ int rule_no = 0;
+#endif
+
+#if 1
+ if(skb->protocol == htons(ETH_P_IPV6))
+ ih6 = (struct ipv6hdr *)skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
+ else if((skb->protocol == htons(ETH_P_8021Q)) && (vlan_proto(skb) == htons(ETH_P_IPV6)))
+ ih6 = (struct ipv6hdr *)(skb_mac_header(skb) + VLAN_ETH_HLEN);
+ else if((skb->protocol == htons(ETH_P_PPP_SES)) && (pppoe_proto(skb) == htons(0x0021)))
+ ih6 = (struct ipv6hdr *)(skb_mac_header(skb) + ETH_HLEN +PPPOE_SES_HLEN);
+ else
+ ih6 = (struct ipv6hdr *)skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
+#else
+ ih6 = skb_header_pointer(*pskb, 0, sizeof(_ip6h), &_ip6h);
+
+#endif
+
+ if ( ih6 == NULL )
+ return NF_DROP;
+
+ if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
+ return NF_DROP;
+
+#if 0
+ if ( (*pskb)->mark & QOS_WANIF_MARK ) {
+ tos = tcinfo->tc;
+ //printk("tos is %d\n", tos);
+ (*pskb)->mark |= (tos << 20);
+ //printk("mark is %x\n", (tos<<20));
+ return tcinfo->target | ~EBT_VERDICT_BITS;
+ }
+#endif
+#ifdef CONFIG_QOS
+ rule_no = ((skb)->mark & QOS_RULE_INDEX_MARK) >> 12;
+ if (0 == qostype_chk(EBT_CHK_TYPE, rule_no, NULL, 0)) {
+ tos = tcinfo->tc;
+ set_tos(rule_no, tos);
+ return tcinfo->target | ~EBT_VERDICT_BITS;
+ }
+ else {
+ unset_tos(rule_no);
+ }
+#endif
+ ipv6_change_dsfield((struct ipv6hdr *)ih6, /*0xFF*/0x0, tcinfo->tc);
+
+ return tcinfo->target | ~EBT_VERDICT_BITS;
+}
+
+/*
+static int ebt_target_tc_check(const char *tablename, unsigned int hookmask,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+*/
+static int ebt_tc_mt_check(const struct xt_mtchk_param *par)
+{
+ struct ebt_tc_info *info = par->matchinfo;
+
+ //printk("==>ebt_target_tc_check(): info->tc is 0x%x\n", info->tc);
+/*
+ if (datalen != sizeof(struct ebt_tc_info))
+ return -EINVAL;
+*/
+ if (BASE_CHAIN && info->target == EBT_RETURN)
+ return -EINVAL;
+// CLEAR_BASE_CHAIN_BIT;
+ if (INVALID_TARGET)
+ return -EINVAL;
+ return 0;
+}
+
+static struct xt_target tc_target __read_mostly=
+{
+ .name = "tc",
+ .revision = 0,
+ .family = NFPROTO_BRIDGE,
+ .target = ebt_tc_mt,
+ .checkentry = ebt_tc_mt_check,
+ .targetsize = sizeof(struct ebt_tc_info),
+ .me = THIS_MODULE,
+};
+
+static int __init ebt_tc_init(void)
+{
+ return xt_register_target(&tc_target);
+}
+
+static void __exit ebt_tc_fini(void)
+{
+ xt_unregister_target(&tc_target);
+}
+
+module_init(ebt_tc_init);
+module_exit(ebt_tc_fini);
+MODULE_LICENSE("GPL");
Index: linux-3.18.21/net/bridge/netfilter/ebt_ulog.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/bridge/netfilter/ebt_ulog.c 2018-02-05 13:21:29.000000000 +0800
@@ -0,0 +1,342 @@
+/*
+ * netfilter module for userspace bridged Ethernet frames logging daemons
+ *
+ * Authors:
+ * Bart De Schuymer <bdschuym@pandora.be>
+ * Harald Welte <laforge@netfilter.org>
+ *
+ * November, 2004
+ *
+ * Based on ipt_ULOG.c, which is
+ * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
+ *
+ * This module accepts two parameters:
+ *
+ * nlbufsiz:
+ * The parameter specifies how big the buffer for each netlink multicast
+ * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
+ * get accumulated in the kernel until they are sent to userspace. It is
+ * NOT possible to allocate more than 128kB, and it is strongly discouraged,
+ * because atomically allocating 128kB inside the network rx softirq is not
+ * reliable. Please also keep in mind that this buffer size is allocated for
+ * each nlgroup you are using, so the total kernel memory usage increases
+ * by that factor.
+ *
+ * flushtimeout:
+ * Specify, after how many hundredths of a second the queue should be
+ * flushed even if it is not full yet.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_ulog.h>
+#include <net/netfilter/nf_log.h>
+#include <net/sock.h>
+#include "../br_private.h"
+
+static unsigned int nlbufsiz = NLMSG_GOODSIZE;
+module_param(nlbufsiz, uint, 0600);
+MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
+ "(defaults to 4096)");
+
+static unsigned int flushtimeout = 10;
+module_param(flushtimeout, uint, 0600);
+MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
+ "(defaults to 10)");
+
+typedef struct {
+ unsigned int qlen; /* number of nlmsgs' in the skb */
+ struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
+ struct sk_buff *skb; /* the pre-allocated skb */
+ struct timer_list timer; /* the timer function */
+ spinlock_t lock; /* the per-queue lock */
+} ebt_ulog_buff_t;
+
+static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
+static struct sock *ebtulognl;
+
+/* send one ulog_buff_t to userspace */
+static void ulog_send(unsigned int nlgroup)
+{
+ ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup];
+
+ if (timer_pending(&ub->timer))
+ del_timer(&ub->timer);
+
+ if (!ub->skb)
+ return;
+
+ /* last nlmsg needs NLMSG_DONE */
+ if (ub->qlen > 1)
+ ub->lastnlh->nlmsg_type = NLMSG_DONE;
+
+ NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
+ netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
+
+ ub->qlen = 0;
+ ub->skb = NULL;
+}
+
+/* timer function to flush queue in flushtimeout time */
+static void ulog_timer(unsigned long data)
+{
+ spin_lock_bh(&ulog_buffers[data].lock);
+ if (ulog_buffers[data].skb)
+ ulog_send(data);
+ spin_unlock_bh(&ulog_buffers[data].lock);
+}
+
+static struct sk_buff *ulog_alloc_skb(unsigned int size)
+{
+ struct sk_buff *skb;
+ unsigned int n;
+
+ n = max(size, nlbufsiz);
+ skb = alloc_skb(n, GFP_ATOMIC);
+ if (!skb) {
+ pr_debug("cannot alloc whole buffer of size %ub!\n", n);
+ if (n > size) {
+ /* try to allocate only as much as we need for
+ * current packet */
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ pr_debug("cannot even allocate "
+ "buffer of size %ub\n", size);
+ }
+ }
+
+ return skb;
+}
+
+static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ const struct ebt_ulog_info *uloginfo, const char *prefix)
+{
+ ebt_ulog_packet_msg_t *pm;
+ size_t size, copy_len;
+ struct nlmsghdr *nlh;
+ unsigned int group = uloginfo->nlgroup;
+ ebt_ulog_buff_t *ub = &ulog_buffers[group];
+ spinlock_t *lock = &ub->lock;
+ ktime_t kt;
+
+ if ((uloginfo->cprange == 0) ||
+ (uloginfo->cprange > skb->len + ETH_HLEN))
+ copy_len = skb->len + ETH_HLEN;
+ else
+ copy_len = uloginfo->cprange;
+
+ size = NLMSG_SPACE(sizeof(*pm) + copy_len);
+ if (size > nlbufsiz) {
+ pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
+ return;
+ }
+
+ spin_lock_bh(lock);
+
+ if (!ub->skb) {
+ if (!(ub->skb = ulog_alloc_skb(size)))
+ goto alloc_failure;
+ } else if (size > skb_tailroom(ub->skb)) {
+ ulog_send(group);
+
+ if (!(ub->skb = ulog_alloc_skb(size)))
+ goto alloc_failure;
+ }
+
+ nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
+ size - NLMSG_ALIGN(sizeof(*nlh)));
+ ub->qlen++;
+
+ pm = NLMSG_DATA(nlh);
+
+ /* Fill in the ulog data */
+ pm->version = EBT_ULOG_VERSION;
+ kt = ktime_get_real();
+ pm->stamp = ktime_to_timeval(kt);
+ if (ub->qlen == 1)
+ ub->skb->tstamp = kt;
+ pm->data_len = copy_len;
+ pm->mark = skb->mark;
+ pm->hook = hooknr;
+ if (uloginfo->prefix != NULL)
+ strcpy(pm->prefix, uloginfo->prefix);
+ else
+ *(pm->prefix) = '\0';
+
+ if (in) {
+ strcpy(pm->physindev, in->name);
+ /* If in isn't a bridge, then physindev==indev */
+ if (br_port_exists(in))
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
+ else
+ strcpy(pm->indev, in->name);
+ } else
+ pm->indev[0] = pm->physindev[0] = '\0';
+
+ if (out) {
+ /* If out exists, then out is a bridge port */
+ strcpy(pm->physoutdev, out->name);
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
+ } else
+ pm->outdev[0] = pm->physoutdev[0] = '\0';
+
+ if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
+ BUG();
+
+ if (ub->qlen > 1)
+ ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
+
+ ub->lastnlh = nlh;
+
+ if (ub->qlen >= uloginfo->qthreshold)
+ ulog_send(group);
+ else if (!timer_pending(&ub->timer)) {
+ ub->timer.expires = jiffies + flushtimeout * HZ / 100;
+ add_timer(&ub->timer);
+ }
+
+unlock:
+ spin_unlock_bh(lock);
+
+ return;
+
+nlmsg_failure:
+ pr_debug("error during NLMSG_PUT. This should "
+ "not happen, please report to author.\n");
+ goto unlock;
+alloc_failure:
+ goto unlock;
+}
+
+/* this function is registered with the netfilter core */
+static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
+ const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const struct nf_loginfo *li,
+ const char *prefix)
+{
+ struct ebt_ulog_info loginfo;
+
+ if (!li || li->type != NF_LOG_TYPE_ULOG) {
+ loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP;
+ loginfo.cprange = 0;
+ loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD;
+ loginfo.prefix[0] = '\0';
+ } else {
+ loginfo.nlgroup = li->u.ulog.group;
+ loginfo.cprange = li->u.ulog.copy_len;
+ loginfo.qthreshold = li->u.ulog.qthreshold;
+ strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
+ }
+
+ ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+}
+
+static unsigned int
+ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
+ par->targinfo, NULL);
+ return EBT_CONTINUE;
+}
+
+static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
+{
+ struct ebt_ulog_info *uloginfo = par->targinfo;
+
+ if (uloginfo->nlgroup > 31)
+ return -EINVAL;
+
+ uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
+
+ if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
+ uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
+
+ return 0;
+}
+
+static struct xt_target ebt_ulog_tg_reg __read_mostly = {
+ .name = "ulog",
+ .revision = 0,
+ .family = NFPROTO_BRIDGE,
+ .target = ebt_ulog_tg,
+ .checkentry = ebt_ulog_tg_check,
+ .targetsize = sizeof(struct ebt_ulog_info),
+ .me = THIS_MODULE,
+};
+
+static struct nf_logger ebt_ulog_logger __read_mostly = {
+ .name = "ebt_ulog",
+ .logfn = &ebt_log_packet,
+ .me = THIS_MODULE,
+};
+
+static int __init ebt_ulog_init(void)
+{
+ int ret;
+ int i;
+
+ if (nlbufsiz >= 128*1024) {
+ pr_warning("Netlink buffer has to be <= 128kB,"
+ " please try a smaller nlbufsiz parameter.\n");
+ return -EINVAL;
+ }
+
+ /* initialize ulog_buffers */
+ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
+ setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
+ spin_lock_init(&ulog_buffers[i].lock);
+ }
+
+ ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
+ EBT_ULOG_MAXNLGROUPS, NULL, NULL,
+ THIS_MODULE);
+ if (!ebtulognl)
+ ret = -ENOMEM;
+ else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
+ netlink_kernel_release(ebtulognl);
+
+ if (ret == 0)
+ nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
+
+ return ret;
+}
+
+static void __exit ebt_ulog_fini(void)
+{
+ ebt_ulog_buff_t *ub;
+ int i;
+
+ nf_log_unregister(&ebt_ulog_logger);
+ xt_unregister_target(&ebt_ulog_tg_reg);
+ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
+ ub = &ulog_buffers[i];
+ if (timer_pending(&ub->timer))
+ del_timer(&ub->timer);
+ spin_lock_bh(&ub->lock);
+ if (ub->skb) {
+ kfree_skb(ub->skb);
+ ub->skb = NULL;
+ }
+ spin_unlock_bh(&ub->lock);
+ }
+ netlink_kernel_release(ebtulognl);
+}
+
+module_init(ebt_ulog_init);
+module_exit(ebt_ulog_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
+MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
Index: linux-3.18.21/net/bridge/netfilter/ebt_vlan.c
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/ebt_vlan.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/ebt_vlan.c 2018-02-05 13:21:29.000000000 +0800
@@ -72,11 +72,25 @@
/* Checking VLAN Identifier (VID) */
if (GET_BITMASK(EBT_VLAN_ID))
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ {
+ if (FWINV(id < info->id[0] || id > info->id[1], EBT_VLAN_ID))
+ return false;
+ }
+#else
EXIT_ON_MISMATCH(id, EBT_VLAN_ID);
+#endif
/* Checking user_priority */
if (GET_BITMASK(EBT_VLAN_PRIO))
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ {
+ if (FWINV(prio < info->prio[0] || prio > info->prio[1], EBT_VLAN_PRIO))
+ return false;
+ }
+#else
EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO);
+#endif
/* Checking Encapsulated Proto (Length/Type) field */
if (GET_BITMASK(EBT_VLAN_ENCAP))
@@ -119,6 +133,15 @@
* 0x0FFF - Reserved for implementation use.
* if_vlan.h: VLAN_N_VID 4096. */
if (GET_BITMASK(EBT_VLAN_ID)) {
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if((info->id[0] !=0) || (info->id[1] != 0)){ /* if id!=0 => check vid range */
+ if((info->id[0] > VLAN_N_VID) || (info->id[1] > VLAN_N_VID)
+ || (info->id[0] > info->id[1])){
+ pr_debug("Vlan id is out of range (1-4096)\n");
+ return -EINVAL;
+ }
+ }
+#else
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_N_VID) {
pr_debug("id %d is out of range (1-4096)\n",
@@ -132,14 +155,23 @@
info->bitmask &= ~EBT_VLAN_PRIO;
}
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */
+#endif
}
if (GET_BITMASK(EBT_VLAN_PRIO)) {
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ if ((info->prio[0] > 7) || (info->prio[1] > 7)
+ || (info->prio[0] > info->prio[1])){
+ pr_debug("prio is out of range!!\n");
+ return -EINVAL;
+ }
+#else
if ((unsigned char) info->prio > 7) {
pr_debug("prio %d is out of range (0-7)\n",
info->prio);
return -EINVAL;
}
+#endif
}
/* Check for encapsulated proto range - it is possible to be
* any value for u_short range.
Index: linux-3.18.21/net/bridge/netfilter/ebtables.c
===================================================================
--- linux-3.18.21.orig/net/bridge/netfilter/ebtables.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/bridge/netfilter/ebtables.c 2018-02-05 13:21:29.000000000 +0800
@@ -124,6 +124,20 @@
#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
/* process standard matches */
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+inline __be16 vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+}
+#endif
+
+
+
static inline int
ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
const struct net_device *in, const struct net_device *out)
@@ -132,6 +146,9 @@
const struct net_bridge_port *p;
__be16 ethproto;
int verdict, i;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ int ipv4_packet = 0, ipv6_packet = 0;
+#endif
if (vlan_tx_tag_present(skb))
ethproto = htons(ETH_P_8021Q);
@@ -141,10 +158,32 @@
if (e->bitmask & EBT_802_3) {
if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
return 1;
+#if !defined(TCSUPPORT_ORN_EBTABLES)
+ } else if (!(e->bitmask & EBT_NOPROTO) &&
+ FWINV2(e->ethproto != h->h_proto, EBT_IPROTO)){
+ if((skb->protocol == htons(ETH_P_IP))||
+ ((skb->protocol == htons(ETH_P_8021Q))&&(vlan_proto(skb) == htons(ETH_P_IP)))||
+ ((skb->protocol == htons(ETH_P_PPP_SES))&&(pppoe_proto(skb) == htons(0x0021)))) /*0x0021: refer to include/linux/ppp_defs.h #define PPP_IP*/
+ ipv4_packet = 1; /*ipv4 packet*/
+ else
+ ipv4_packet = 0;
+
+ if((skb->protocol == htons(ETH_P_IPV6))||
+ ((skb->protocol == htons(ETH_P_8021Q))&&(vlan_proto(skb) == htons(ETH_P_IPV6)))||
+ ((skb->protocol == htons(ETH_P_PPP_SES))&&(pppoe_proto(skb) == htons(0x0021)))) /*0x0021: refer to include/linux/ppp_defs.h #define PPP_IP*/
+ ipv6_packet = 1; /*ipv6 packet*/
+ else
+ ipv6_packet = 0;
+
+ if(!(((e->ethproto==ETH_P_8021Q)&&(skb->mark & EBT_VLAN_MARK))||
+ ((e->ethproto==ETH_P_IP)&&(ipv4_packet)) || ((e->ethproto == ETH_P_IPV6) && (ipv6_packet))))
+ return 1;
+ }
+#else
} else if (!(e->bitmask & EBT_NOPROTO) &&
FWINV2(e->ethproto != ethproto, EBT_IPROTO))
return 1;
-
+#endif
if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
return 1;
if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
@@ -157,22 +196,60 @@
FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
return 1;
- if (e->bitmask & EBT_SOURCEMAC) {
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ if (e->bitmask & EBT_SOURCEMACSTART)
+ {
verdict = 0;
for (i = 0; i < 6; i++)
- verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
- e->sourcemsk[i];
- if (FWINV2(verdict != 0, EBT_ISOURCE) )
+ {
+ if(h->h_source[i] < e->sourcemacstart[i] || h->h_source[i] > e->sourcemacend[i])
+ {
+ verdict = 1;
+ break;
+ }
+ }
+ if (FWINV2(verdict != 0, EBT_ISOURCESTART) )
return 1;
+ }else{
+#endif
+ if (e->bitmask & EBT_SOURCEMAC) {
+ verdict = 0;
+ for (i = 0; i < 6; i++)
+ verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
+ e->sourcemsk[i];
+ if (FWINV2(verdict != 0, EBT_ISOURCE) )
+ return 1;
+ }
+#if defined(TCSUPPORT_PON_MAC_FILTER)
}
- if (e->bitmask & EBT_DESTMAC) {
+#endif
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ if (e->bitmask & EBT_DESTMACSTART)
+ {
verdict = 0;
for (i = 0; i < 6; i++)
- verdict |= (h->h_dest[i] ^ e->destmac[i]) &
- e->destmsk[i];
- if (FWINV2(verdict != 0, EBT_IDEST) )
+ {
+ if((h->h_dest[i] < e->destmacstart[i]) || (h->h_dest[i] > e->destmacend[i]))
+ {
+ verdict = 1;
+ break;
+ }
+ }
+ if (FWINV2(verdict != 0, EBT_IDESTSTART) )
return 1;
+ }else{
+#endif
+ if (e->bitmask & EBT_DESTMAC) {
+ verdict = 0;
+ for (i = 0; i < 6; i++)
+ verdict |= (h->h_dest[i] ^ e->destmac[i]) &
+ e->destmsk[i];
+ if (FWINV2(verdict != 0, EBT_IDEST) )
+ return 1;
+ }
+#if defined(TCSUPPORT_PON_MAC_FILTER)
}
+#endif
return 0;
}
@@ -220,7 +297,9 @@
base = private->entries;
i = 0;
while (i < nentries) {
+
if (ebt_basic_match(point, skb, in, out))
+
goto letscontinue;
if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
Index: linux-3.18.21/net/core/Makefile
===================================================================
--- linux-3.18.21.orig/net/core/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/core/Makefile 2018-02-05 13:21:30.000000000 +0800
@@ -16,6 +16,7 @@
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
obj-$(CONFIG_NETPOLL) += netpoll.o
+obj-$(CONFIG_QOS) += qos_type.o
obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
@@ -23,3 +24,8 @@
obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-y += ecnt_net_utility.o
+obj-y += ecnt_skb_alloc.o
+ifneq ($(strip $(TCSUPPORT_CT_VOIP_QOS)),)
+obj-$(TCSUPPORT_VOIP) += ecnt_voip_proc.o
+endif
\ No newline at end of file
Index: linux-3.18.21/net/core/dev.c
===================================================================
--- linux-3.18.21.orig/net/core/dev.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/core/dev.c 2018-02-05 13:21:30.000000000 +0800
@@ -133,6 +133,7 @@
#include <linux/vmalloc.h>
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
+#include "ecnt_net_core.h"
#include "net-sysfs.h"
@@ -153,6 +154,18 @@
struct net_device *dev,
struct netdev_notifier_info *info);
+#if defined(TCSUPPORT_WAN2LAN_EXT)
+u32 (*gswPbusRead_hook)(u32 pbus_addr);
+int (*gswPbusWrite_hook)(u32 pbus_addr, u32 pbus_data);
+int (*lanPortmap_hook)(int lanPort);
+int (*lanPortmap_rev_hook)(int switchPort);
+EXPORT_SYMBOL(gswPbusRead_hook);
+EXPORT_SYMBOL(gswPbusWrite_hook);
+EXPORT_SYMBOL(lanPortmap_hook);
+EXPORT_SYMBOL(lanPortmap_rev_hook);
+#endif
+
+extern void tc3162wdog_kick(void);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore.
@@ -2225,7 +2238,7 @@
}
EXPORT_SYMBOL(netif_tx_wake_queue);
-void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+__IMEM void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
{
unsigned long flags;
@@ -2244,7 +2257,7 @@
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
+__IMEM void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
{
if (in_irq() || irqs_disabled())
__dev_kfree_skb_irq(skb, reason);
@@ -2631,7 +2644,7 @@
return rc;
}
-struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
+__IMEM struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
struct netdev_queue *txq, int *ret)
{
struct sk_buff *skb = first;
@@ -2680,7 +2693,11 @@
if (unlikely(!skb))
goto out_null;
+#if defined(TCSUPPORT_TSO_ENABLE)
+ if (netif_needs_gso(dev, skb, features)&& (!(skb->tso_mark & TSO_ENABLE_MARK))) {
+#else
if (netif_needs_gso(dev, skb, features)) {
+#endif
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
@@ -2706,8 +2723,13 @@
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
- if (!(features & NETIF_F_ALL_CSUM) &&
- skb_checksum_help(skb))
+#if defined(TCSUPPORT_TSO_ENABLE)
+ if (!(features & NETIF_F_ALL_CSUM)&& (!(skb->tso_mark & TSO_ENABLE_MARK)) &&
+ skb_checksum_help(skb))
+#else
+ if (!(features & NETIF_F_ALL_CSUM)&&
+ skb_checksum_help(skb))
+#endif
goto out_kfree_skb;
}
}
@@ -2901,12 +2923,17 @@
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+__IMEM static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
+ int ret;
+
+ ret = ecnt_dev_queue_xmit_inline_hook(&skb);
+ if(ret != ECNT_CONTINUE)
+ return NET_XMIT_DROP;
skb_reset_mac_header(skb);
@@ -3000,7 +3027,7 @@
return rc;
}
-int dev_queue_xmit(struct sk_buff *skb)
+__IMEM int dev_queue_xmit(struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
@@ -3388,7 +3415,7 @@
*
*/
-int netif_rx(struct sk_buff *skb)
+__IMEM int netif_rx(struct sk_buff *skb)
{
trace_netif_rx_entry(skb);
@@ -3615,7 +3642,7 @@
}
}
-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+__IMEM static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
@@ -3631,6 +3658,9 @@
orig_dev = skb->dev;
+ ret = ecnt_netif_recv_inline_hook(&skb);
+ if(ret != ECNT_CONTINUE)
+ return NET_RX_DROP;
skb_reset_network_header(skb);
if (!skb_transport_header_was_set(skb))
skb_reset_transport_header(skb);
@@ -3639,7 +3669,6 @@
pt_prev = NULL;
rcu_read_lock();
-
another_round:
skb->skb_iif = skb->dev->ifindex;
@@ -3758,7 +3787,7 @@
return ret;
}
-static int __netif_receive_skb(struct sk_buff *skb)
+__IMEM static int __netif_receive_skb(struct sk_buff *skb)
{
int ret;
@@ -3783,7 +3812,7 @@
return ret;
}
-static int netif_receive_skb_internal(struct sk_buff *skb)
+__IMEM static int netif_receive_skb_internal(struct sk_buff *skb)
{
net_timestamp_check(netdev_tstamp_prequeue, skb);
@@ -3796,7 +3825,9 @@
int cpu, ret;
rcu_read_lock();
-
+ if (ra_sw_nat_hook_magic && ra_sw_nat_hook_magic(skb,0xbeaf))
+ cpu = 1;
+ else
cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
@@ -3825,7 +3856,7 @@
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
-int netif_receive_skb(struct sk_buff *skb)
+__IMEM int netif_receive_skb(struct sk_buff *skb)
{
trace_netif_receive_skb_entry(skb);
@@ -4323,7 +4354,7 @@
local_irq_enable();
}
-static int process_backlog(struct napi_struct *napi, int quota)
+__IMEM static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
@@ -4344,7 +4375,14 @@
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
+ if (ra_sw_nat_hook_magic && ra_sw_nat_hook_magic(skb,0xbeaf))
+ {
+ ra_sw_nat_hook_set_magic(skb,0x7275);
+ skb->dev->netdev_ops->ndo_start_xmit(skb,skb->dev);
+ }
+ else
__netif_receive_skb(skb);
+
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
@@ -4385,7 +4423,7 @@
*
* The entry's receive function will be scheduled to run
*/
-void __napi_schedule(struct napi_struct *n)
+__IMEM void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
@@ -4395,7 +4433,7 @@
}
EXPORT_SYMBOL(__napi_schedule);
-void __napi_complete(struct napi_struct *n)
+__IMEM void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
BUG_ON(n->gro_list);
@@ -4509,7 +4547,7 @@
}
EXPORT_SYMBOL(netif_napi_del);
-static void net_rx_action(struct softirq_action *h)
+__IMEM static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
@@ -4526,8 +4564,12 @@
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
- if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
+ if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))){
+ /*clear watchdog counter because system will reboot in big
+ * traffic, shnwind*/
+ tc3162wdog_kick();
goto softnet_break;
+ }
local_irq_enable();
@@ -6268,6 +6310,8 @@
if (dev->addr_assign_type == NET_ADDR_PERM)
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ ecnt_register_netdevice_inline_hook(dev);
+
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
@@ -6784,6 +6828,7 @@
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
{
ASSERT_RTNL();
+ ecnt_unregister_netdevice_queue_inline_hook(dev);
if (head) {
list_move_tail(&dev->unreg_list, head);
@@ -7062,6 +7107,8 @@
return hash;
}
+
+
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
@@ -7075,6 +7122,7 @@
net->dev_index_head = netdev_create_hash();
if (net->dev_index_head == NULL)
goto err_idx;
+
return 0;
@@ -7171,6 +7219,7 @@
{
kfree(net->dev_name_head);
kfree(net->dev_index_head);
+ ecnt_netdev_dest_hook();
}
static struct pernet_operations __net_initdata netdev_net_ops = {
@@ -7354,6 +7403,7 @@
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
rc = 0;
+ ecnt_netdev_init_hook();
out:
return rc;
}
Index: linux-3.18.21/net/core/ecnt_net_core.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/core/ecnt_net_core.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,505 @@
+#ifndef _LINUX_ECNT_NET_CORE_H
+#define _LINUX_ECNT_NET_CORE_H
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+
+#include <linux/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/net.h>
+#include <linux/textsearch.h>
+#include <net/checksum.h>
+#include <linux/rcupdate.h>
+#include <linux/hrtimer.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
+#include <linux/sched.h>
+#include <net/flow_keys.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <ecnt_hook/ecnt_hook_net.h>
+#include <uapi/linux/ecnt_in.h>
+#include <linux/ecnt_vlan_bind.h>
+
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+#include <net/neighbour.h>
+#include <linux/sysctl.h>
+#endif
+
+extern int (*smux_pkt_recv_hook)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net_device *rdev);
+
+#ifdef TCSUPPORT_PORTBIND
+#if !defined(TCSUPPORT_CT)
+extern int (*portbind_sw_hook)(void);
+/*TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+extern int (*portbind_sw_prior_hook)(struct sk_buff *skb);
+extern struct net_device* (*portbind_get_outdev_by_indev_hook)(unsigned char* indev_name);
+/*END TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND*/
+extern int (*portbind_check_hook)(char *inIf, char *outIf);
+#endif
+#endif
+
+
+#if defined(TCSUPPORT_VLAN_TAG)
+//extern int (*remove_vtag_hook)(struct sk_buff *skb, struct net_device *dev);
+extern int (*insert_vtag_hook)(struct sk_buff **pskb);
+extern int (*check_vtag_hook)(void);
+extern int (*get_vtag_hook)(struct net_device *dev, struct sk_buff *skb);
+#endif
+
+static inline int ecnt_vlan_untag_inline_hook(struct sk_buff *skb, __be16 vlan_tci)
+{
+ if(vlan_tci&VLAN_CFI_MASK)
+ skb->vlan_tag_flag |= VLAN_TAG_FOR_CFI;
+ return 0;
+}
+
+#if defined(TCSUPPORT_PON_VLAN)
+extern int (*pon_insert_tag_hook)(struct sk_buff **pskb);
+extern int (*pon_check_vlan_hook)(struct net_device *dev, struct sk_buff *skb);
+extern int (*pon_vlan_get_mode_hook)(void);
+
+static inline int ecnt_ponvlan_rx_inline_hook(struct sk_buff **skbp,struct net_device *orig_dev)
+{
+ int vlan_mode = MODE_HGU;
+ struct sk_buff *skb = *skbp;
+ if(pon_vlan_get_mode_hook)
+ vlan_mode = pon_vlan_get_mode_hook();
+
+ int retval = ECNT_CONTINUE;
+ if((orig_dev->name[0] == 'r' || orig_dev->name[0] == 'u' || orig_dev->name[0] == 'e') && vlan_mode == MODE_SFU)
+ {
+ skb->pon_vlan_flag |= PON_PKT_FROM_LAN;
+ }
+ else if(strncmp(orig_dev->name,"pon", 3) == 0 && vlan_mode == MODE_SFU)
+ {
+ skb->pon_vlan_flag |= PON_PKT_FROM_WAN;
+ }
+
+ skb->original_dev = skb->dev;
+ if(pon_insert_tag_hook && vlan_mode == MODE_HGU)
+ {
+ if(strcmp(orig_dev->name,"pon") == 0)
+ skb->pon_vlan_flag |= PON_VLAN_RX_CALL_HOOK;
+
+ if(pon_insert_tag_hook)
+ {
+ retval = pon_insert_tag_hook(skbp);
+ if(retval == -1)
+ {
+ kfree_skb(skb);
+ return ECNT_RETURN_DROP;
+ }
+ if(retval == -2)
+ {
+ return ECNT_RETURN;
+ }
+ }
+
+ }
+#ifdef TCSUPPORT_PON_IP_HOST
+ else if(pon_insert_tag_hook && vlan_mode == MODE_SFU){
+ if(isVoipWan(orig_dev)){
+ skb->pon_vlan_flag |= PON_PKT_VOIP_RX;
+ if(pon_insert_tag_hook(&skb) == -1)
+ {
+ kfree_skb(skb);
+ return ECNT_RETURN_DROP;
+ }
+ }
+ }
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_ponvlan_xmit_inline_hook(struct sk_buff **skbp, struct net_device *dev)
+{
+ struct sk_buff *skb = *skbp;
+ int vlan_mode = MODE_HGU;
+ int retval = ECNT_CONTINUE;
+ if(pon_vlan_get_mode_hook)
+ vlan_mode = pon_vlan_get_mode_hook();
+
+ if(vlan_mode == MODE_SFU)
+ {
+ /* packet from cpe */
+ if (skb->original_dev == NULL)
+ {
+ skb->pon_vlan_flag |= PON_PKT_FROM_CPE;
+ }
+ }
+ else
+ {
+ if(strncmp(skb->dev->name,"pon", 3) == 0)
+ skb->pon_vlan_flag |= PON_VLAN_TX_CALL_HOOK;
+ }
+ if(pon_insert_tag_hook)
+ {
+ retval = pon_insert_tag_hook(skbp);
+ if(retval == -1)
+ {
+ kfree_skb(*skbp);
+ return ECNT_RETURN_DROP;
+ }
+ if(retval == -2)
+ return ECNT_RETURN;
+ }
+ if(pon_check_vlan_hook)
+ {
+ retval = pon_check_vlan_hook(dev,skb);
+ if(retval != 1)
+ return ECNT_RETURN;
+ }
+ return ECNT_CONTINUE;
+}
+#endif
+
+
+
+#ifdef TCSUPPORT_PORTBIND
+#if !defined(TCSUPPORT_CT)
+static inline void ecnt_portbind_rx(struct sk_buff *skb,struct net_device *orig_dev){
+#if defined(TCSUPPORT_FTP_THROUGHPUT)
+ if (portbind_sw_hook) {
+#endif
+ /*
+#if defined(TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND)
+ if ((portbind_sw_prior_hook) && (portbind_sw_prior_hook(skb) == 1)) {
+#else
+ if (portbind_sw_hook && (portbind_sw_hook() == 1)) {
+#endif
+ */
+
+
+#ifdef CONFIG_SMUX
+ /*we only check OSMUX interface and other interface*/
+ if( (orig_dev->priv_flags & IFF_OSMUX) || (skb->portbind_mark & MASK_ORIGIN_DEV) == 0)
+ {
+ skb->portbind_mark |= MASK_ORIGIN_DEV;
+ memcpy(skb->orig_dev_name, orig_dev->name, IFNAMSIZ);
+ }
+#else
+ if( (skb->portbind_mark & MASK_ORIGIN_DEV) == 0)
+ {
+ skb->portbind_mark |= MASK_ORIGIN_DEV;
+ memcpy(skb->orig_dev_name, orig_dev->name, IFNAMSIZ);
+ }
+#endif
+#if defined(TCSUPPORT_FTP_THROUGHPUT)
+ }
+#endif
+}
+#endif
+#endif
+
+#if defined(TCSUPPORT_VLAN_TAG)
+static inline int ecnt_insert_vlan_tag_inline_hook(struct sk_buff **skbp, struct net_device *dev)
+{
+ struct sk_buff *skb = *skbp;
+#if defined(TCSUPPORT_PON_VLAN)
+ int vlan_mode = MODE_HGU;
+#endif
+
+#ifdef TCSUPPORT_PON_VLAN
+ if(pon_vlan_get_mode_hook)
+ vlan_mode = pon_vlan_get_mode_hook();
+
+ if(vlan_mode == MODE_HGU)
+#endif
+ {
+ if (check_vtag_hook && (check_vtag_hook()) == 1)
+ {
+
+ if (insert_vtag_hook && (-1 == insert_vtag_hook(skbp)))
+ {
+ return ECNT_RETURN_DROP;
+ }
+ }
+ }
+ return ECNT_CONTINUE;
+
+}
+#endif
+
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+extern int (*pon_check_mac_hook)(struct sk_buff *skb);
+extern int (*pon_mac_filter_get_mode_hook)(void);
+
+static inline int ecnt_pon_macfilter_rx_inline_hook(struct sk_buff **skbp,struct net_device *orig_dev)
+{
+ int mac_filter_mode = MODE_HGU;
+ struct sk_buff *skb = *skbp;
+ int retval = ECNT_CONTINUE;
+
+ if(pon_mac_filter_get_mode_hook)
+ mac_filter_mode = pon_mac_filter_get_mode_hook();
+
+ if(pon_check_mac_hook){
+ if(mac_filter_mode == MODE_HGU)
+ {
+ if(strcmp(orig_dev->name,"pon") == 0)
+ skb->pon_mac_filter_flag |= PON_MAC_FILTER_RX_CALL_HOOK;
+
+ retval = pon_check_mac_hook(*skbp);
+ if(retval == -1)
+ {
+ kfree_skb(*skbp);
+ return ECNT_RETURN_DROP;
+ }
+ }
+#ifdef TCSUPPORT_CHS
+ else if(mac_filter_mode == MODE_SFU)
+ {
+ if (orig_dev->name[0] == 'e'|| orig_dev->name[0] == 'r'|| orig_dev->name[0] == 'u')
+ skb->pon_mac_filter_flag |= PON_MAC_FILTER_RX_CALL_HOOK;
+
+ retval = pon_check_mac_hook(*skbp);
+ if(retval == -1)
+ {
+ kfree_skb(*skbp);
+ return ECNT_RETURN_DROP;
+ }
+ }
+#endif
+ }
+
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_pon_macfilter_xmit_inline_hook(struct sk_buff **skbp)
+{
+ struct sk_buff *skb = *skbp;
+ int mac_filter_mode = MODE_HGU;
+ int retval = ECNT_CONTINUE;
+
+ if(pon_mac_filter_get_mode_hook)
+ mac_filter_mode = pon_mac_filter_get_mode_hook();
+
+
+ if(pon_check_mac_hook)
+ {
+ if(mac_filter_mode == MODE_SFU
+ && (skb->dev->name[0] == 'e' || skb->dev->name[0] == 'r' || skb->dev->name[0] == 'u'))
+ skb->pon_mac_filter_flag |= PON_MAC_FILTER_TX_CALL_HOOK;
+
+ retval = pon_check_mac_hook(*skbp);
+ if(retval == -1)
+ {
+ //printk("[%s:%d]pon_check_mac_hook fail:kfree,orig_dev->name=%s\n",__FUNCTION__,__LINE__, skb->dev->name);
+ kfree_skb(*skbp);
+ return ECNT_RETURN_DROP;
+ }
+ }
+
+ return ECNT_CONTINUE;
+}
+#endif
+
+
+static inline int ecnt_netif_recv_inline_hook(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ int ret;
+ struct net_device *orig_dev;
+ orig_dev = skb->dev;
+#if defined(TCSUPPORT_PON_VLAN)
+ int vlan_mode = MODE_HGU;
+#endif
+
+ ECNT_CORE_DEV_HOOK(ECNT_NETIF_RCV_SKB, skb);
+
+#if defined(TCSUPPORT_PON_VLAN)
+ ret = ecnt_ponvlan_rx_inline_hook(pskb, orig_dev);
+ if(ret != ECNT_CONTINUE)
+ return ECNT_RETURN;
+#endif
+
+#ifdef TCSUPPORT_PON_VLAN
+ if(pon_vlan_get_mode_hook)
+ vlan_mode = pon_vlan_get_mode_hook();
+
+ if(vlan_mode == MODE_HGU)
+#endif
+ {
+#ifdef TCSUPPORT_VLAN_TAG
+ if (check_vtag_hook && (check_vtag_hook() == 1))
+ {
+ if (get_vtag_hook)
+ if (-1 == get_vtag_hook(orig_dev, skb)) {
+ kfree_skb(skb);
+ return ECNT_RETURN_DROP;
+ }
+ }
+#endif
+ }
+
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ ret = ecnt_pon_macfilter_rx_inline_hook(pskb, orig_dev);
+ if(ret != ECNT_CONTINUE){
+ return ECNT_RETURN;
+ }
+#endif
+
+
+#ifdef TCSUPPORT_PORTBIND
+#if !defined(TCSUPPORT_CT)
+ ecnt_portbind_rx(skb, orig_dev);
+#endif
+#endif
+
+#ifdef CONFIG_SMUX
+ if((orig_dev->priv_flags & IFF_RSMUX) && smux_pkt_recv_hook
+#ifdef TCSUPPORT_XPON_HAL_API_EXT
+ &&!(skb->mark&DOWNSTREAM_SIMULATION_MASK)
+#endif
+ ){
+ ret = smux_pkt_recv_hook(skb, skb->dev, orig_dev);
+#if ((defined(TCSUPPORT_WAN_ETHER) || defined(TCSUPPORT_WAN_PTM)) && defined(TCSUPPORT_MULTISERVICE_ON_WAN) ) || defined(TCSUPPORT_WAN_GPON) || defined(TCSUPPORT_WAN_EPON)
+#ifdef TCSUPPORT_VLAN_TAG
+ if (skb) {
+ skb->vlan_tag_flag |= VLAN_TAG_FROM_INDEV;
+ }
+#endif
+#endif
+ return ECNT_RETURN;
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+
+#ifdef TCSUPPORT_PORTBIND
+#if !defined(TCSUPPORT_CT)
+static inline int ecnt_portbind_xmit_inline_hook(struct sk_buff *skb)
+{
+ #if defined(TCSUPPORT_FTP_THROUGHPUT)
+ if ((portbind_sw_hook) && ((skb->portbind_mark & MASK_OUT_DEV) == 0)) {
+ #else
+#if defined(TCSUPPORT_ROUTEPOLICY_PRIOR_PORTBIND)
+ if ((portbind_sw_prior_hook) && (portbind_sw_prior_hook(skb) == 1) && ((skb->portbind_mark & MASK_OUT_DEV) == 0)) {
+#else
+ if ((portbind_sw_hook) && (portbind_sw_hook() == 1) && ((skb->portbind_mark & MASK_OUT_DEV) == 0)) {
+ #endif
+#endif
+ if (portbind_check_hook) {
+ /* only need check once. shnwind 20110407 */
+ int portbind_ret = 0;
+ portbind_ret = portbind_check_hook(skb->orig_dev_name, skb->dev->name);
+ if(portbind_ret == 0){
+ kfree_skb(skb);
+ return ECNT_RETURN_DROP;
+ }else if(portbind_ret == 1){
+ skb->portbind_mark |= MASK_OUT_DEV;
+ }
+#if 0
+ if (portbind_check_hook(skb->orig_dev_name, skb->dev->name) == 0) {
+ kfree_skb(skb);
+ return ECNT_RETURN_DROP;
+ }else if (portbind_check_hook(skb->orig_dev_name, skb->dev->name) == 1)
+ skb->portbind_mark |= MASK_OUT_DEV;
+#endif
+ /* else check again */
+ }
+ }
+
+
+ return ECNT_CONTINUE;
+
+
+}
+#endif
+#endif
+
+static inline int ecnt_dev_queue_xmit_inline_hook(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ int ret;
+
+ ECNT_CORE_DEV_HOOK(ECNT_DEV_QUE_XMIT, skb);
+
+
+#if defined(TCSUPPORT_PORTBIND)
+#if !defined(TCSUPPORT_CT)
+ ret = ecnt_portbind_xmit_inline_hook(skb);
+ if(ret != ECNT_CONTINUE)
+ {
+ return ret;
+ }
+#endif
+#endif
+
+#if defined(TCSUPPORT_PON_VLAN)
+ ret = ecnt_ponvlan_xmit_inline_hook(pskb, skb->dev);
+ if(ret != ECNT_CONTINUE)
+ {
+ return ret;
+ }
+#endif
+
+#if defined(TCSUPPORT_VLAN_TAG)
+ ret = ecnt_insert_vlan_tag_inline_hook(pskb, skb->dev);
+ if(ret != ECNT_CONTINUE)
+ {
+ return ret;
+ }
+#endif
+
+#if defined(TCSUPPORT_PON_MAC_FILTER)
+ ret = ecnt_pon_macfilter_xmit_inline_hook(pskb);
+ if(ret != ECNT_CONTINUE)
+ {
+ return ret;
+ }
+#endif
+
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_register_netdevice_inline_hook(struct net_device *dev)
+{
+
+#if defined(TCSUPPORT_TSO_ENABLE)
+ if(!strncmp(dev->name, "eth", 3) ){
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_GSO;
+ dev->hw_features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_GSO;
+ dev->wanted_features |= NETIF_F_SG;
+ dev->wanted_features |= NETIF_F_GSO;
+ }
+ if(!strncmp(dev->name, "nas", 3) || !strncmp(dev->name, "pon", 3)){
+ dev->hw_features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_GSO;
+ }
+#endif
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_unregister_netdevice_queue_inline_hook(struct net_device *dev)
+{
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_neighbour_sysctl_register_inline_hook(struct ctl_table *neigh_var, struct neigh_parms *p, struct net_device *dev)
+{
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ neigh_var = neigh_var+NEIGH_VAR_DEFAULT_ROUTE;
+ neigh_var->data = &p->dlf_route[0];
+ neigh_var->extra1 = dev;
+ neigh_var->extra2 = p;
+#endif
+ return ECNT_CONTINUE;
+}
+
+#endif
+
Index: linux-3.18.21/net/core/ecnt_net_utility.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/core/ecnt_net_utility.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,491 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (<28><>EcoNet Software<72><65>)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (<28><>EcoNet<65><74>) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (<28><>ECONET SOFTWARE<52><45>) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN <20><>AS IS<49><53>
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVER<45><52>S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVER<45><52>S SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#ifdef TCSUPPORT_IPV6
+#include <linux/ipv6.h>
+#endif
+#include <linux/ecnt_vlan_bind.h>
+#include <net/addrconf.h>
+#include "ecnt_net_core.h"
+#include "../ipv6/ecnt_net_ipv6.h"
+
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+#define PPP_IP 0x21 /* Internet Protocol */
+#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+#define MAX_DEV_BANDWIDTH_NUM 30
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+#ifdef TCSUPPORT_DOWNSTREAM_QOS
+/*use for set voip rx port in application, shnwind add 20110215.*/
+unsigned short int voip_rx_port[VOIP_RX_PORT_NUM] = {0};
+EXPORT_SYMBOL(voip_rx_port);
+char downstream_qos_enable = 0;
+EXPORT_SYMBOL(downstream_qos_enable);
+#endif
+
+typedef struct devBandwidth_s{
+ unsigned char mac[6];
+ unsigned long upBytes;
+ unsigned long downBytes;
+ int valid;
+}devBandwidth_t;
+typedef struct devBandwidthList_s{
+ int enable;
+ struct devBandwidth_s bandwidthList[MAX_DEV_BANDWIDTH_NUM];
+}devBandwidthList_t;
+
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+struct devBandwidthList_s *gHwBandwidthList = NULL;
+EXPORT_SYMBOL(gHwBandwidthList);
+struct devBandwidthList_s *gDevBandwidthList = NULL;
+EXPORT_SYMBOL(gDevBandwidthList);
+
+int (*match_multicast_vtag_check)
+(struct sk_buff *skb, struct net_device *vdev);
+EXPORT_SYMBOL(match_multicast_vtag_check);
+#if !defined(TCSUPPORT_CT_VLAN_TAG)
+int (*match_multicast_vtag)(struct sk_buff *skb, struct net_device *vdev);
+EXPORT_SYMBOL(match_multicast_vtag);
+#endif
+int (*vlanbind_check_group_hook)(struct sk_buff *skb);
+EXPORT_SYMBOL(vlanbind_check_group_hook);
+int (*wifi_eth_fast_tx_hook)(struct sk_buff *skb);
+EXPORT_SYMBOL(wifi_eth_fast_tx_hook);
+
+
+
+/*-------------------sw_rps_for_wifi---------------------------*/
+//#ifdef TCSUPPORT_WLAN_SW_RPS
+int (*fromWlan5GPktRpsHandle_hook)(void* pRxPacket) = NULL;
+EXPORT_SYMBOL(fromWlan5GPktRpsHandle_hook);
+
+int (*toWlan5GPktRpsHandle_hook)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(toWlan5GPktRpsHandle_hook);
+
+int (*toWlan2GPktRpsHandle_hook)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(toWlan2GPktRpsHandle_hook);
+
+unsigned int (*ecnt_7603_get_ampdu_pkt_hook)(void) = NULL;
+EXPORT_SYMBOL(ecnt_7603_get_ampdu_pkt_hook);
+unsigned int (*ecnt_7603_get_rx_pkt_hook)(void) = NULL;
+EXPORT_SYMBOL(ecnt_7603_get_rx_pkt_hook);
+
+int (*ecnt_wifi_rx_rps_hook)(struct sk_buff * skb) = NULL;
+EXPORT_SYMBOL(ecnt_wifi_rx_rps_hook);
+
+int (*get_WifitolanRps_hook)(void) = NULL;
+EXPORT_SYMBOL(get_WifitolanRps_hook);
+
+int (*ecnt_set_2Gwifi_rps_hook)(int RxOn, int WLanCPU, int TxOn_2G, int LanCPU) = NULL;
+EXPORT_SYMBOL(ecnt_set_2Gwifi_rps_hook);
+
+int (*ecnt_set_wifi_rps_hook)(int RxOn, int WLanCPU, int TxOn, int LanCPU) = NULL;
+EXPORT_SYMBOL(ecnt_set_wifi_rps_hook);
+
+//#endif
+/*-------------------sw_rps_for_wifi---------------------------*/
+
+/*for ALG switch*/
+/*0 means switch off; 1 means switch on; 2 means switch not set*/
+int nf_conntrack_ftp_enable __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_ftp_enable);
+int nf_conntrack_sip_enable __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_sip_enable);
+int nf_conntrack_h323_enable __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_h323_enable);
+int nf_conntrack_rtsp_enable __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_rtsp_enable);
+int nf_conntrack_l2tp_enable __read_mostly = 2;
+EXPORT_SYMBOL_GPL(nf_conntrack_l2tp_enable);
+int nf_conntrack_ipsec_enable __read_mostly = 2;
+EXPORT_SYMBOL_GPL(nf_conntrack_ipsec_enable);
+int nf_conntrack_pptp_enable __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_pptp_enable);
+int nf_conntrack_portscan_enable __read_mostly = 0;
+EXPORT_SYMBOL_GPL(nf_conntrack_portscan_enable);
+int nf_conntrack_ftp_port __read_mostly = 21;
+EXPORT_SYMBOL_GPL(nf_conntrack_ftp_port);
+int nf_conntrack_esp_timeout __read_mostly = 30;
+EXPORT_SYMBOL_GPL(nf_conntrack_esp_timeout);
+struct net_device* (*portbind_get_outdev_by_indev_ct_hook)(unsigned char* indev_name);
+EXPORT_SYMBOL(portbind_get_outdev_by_indev_ct_hook);
+
+int (*wlan_ratelimit_enqueue_hook) (struct sk_buff * skb,unsigned char direction) = NULL;
+EXPORT_SYMBOL(wlan_ratelimit_enqueue_hook);
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+
+/****************************************************************************
+**function name
+ __vlan_proto
+**description:
+ get protocol via skb
+**return
+ eth_type
+**parameter:
+ skb: the packet information
+****************************************************************************/
+static inline __be16 __vlan_proto(const struct sk_buff *skb)
+{
+ return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+}
+
+/****************************************************************************
+**function name
+ check_ppp_udp_multicast
+**description:
+ check multicast packet in downstream
+**return
+ 0: check ok or ignore
+ -1: fail
+**parameter:
+ skb: the packet information
+ vdev: virtual net device
+****************************************************************************/
+int check_ppp_udp_multicast
+(struct sk_buff *skb, struct net_device *vdev)
+{
+
+ return 0; /* check ok or ignore. */
+}
+EXPORT_SYMBOL(check_ppp_udp_multicast);
+
+/****************************************************************************
+**function name
+ __is_ip_udp
+**description:
+ check whether packet is IP udp packets.
+**return
+ 0: check ok or ignore
+ -1: fail
+**parameter:
+ skb: the packet information
+ vdev: virtual net device
+****************************************************************************/
+int __is_ip_udp(struct sk_buff *skb)
+{
+#if !defined(TCSUPPORT_CT_PON_SC)
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(__is_ip_udp);
+
+/****************************************************************************
+**function name
+ tr143RxShortCut
+**description:
+ tr143 test shortcut data path
+**return
+ 0: receive succeed.
+ -1: fail
+**parameter:
+ skb: the packet information
+ vlanLayer: vlan layer counts
+ ifaceidx: interface index
+ iptype: ip type, 1: IP, 2:PPP, 3:dslite+IP, 4:dslite+PPP
+****************************************************************************/
+int tr143RxShortCut(int enable
+, struct sk_buff *skb
+, int vlanLayer
+, int ifaceidx
+, int iptype
+)
+{
+
+ return 0;
+}
+EXPORT_SYMBOL(tr143RxShortCut);
+
+#if (defined(TCSUPPORT_XPON_MAPPING) || defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_XPON_IGMP)) && defined(TCSUPPORT_PON_IP_HOST)
+#define br_port_get_rcu(dev) \
+ ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
+#define PON_IP_HOST_WANIF_PATH "tc3162/pon_wanIf"
+
+char voip_wanIf[8] = "NULL";
+char tr069_wanIf[8] = "nas0_0";
+
+static int pon_wanIf_read_proc(char *buf, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ len = sprintf(buf,"voip %s\n tr069 %s", voip_wanIf, tr069_wanIf );
+
+ if (len < off + count)
+ *eof = 1;
+
+ len -= off;
+ *start = buf + off;
+ if(len > count)
+ len = count;
+ if(len < 0)
+ len = 0;
+
+ return len;
+}
+
+static int pon_wanIf_write_proc(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char wan_type[8] = {0};
+ char wan_name[8] = {0};
+ char temp[16] = {0};
+
+ if (count > 15)
+ return -EFAULT;
+
+ if (copy_from_user(temp, buffer, count))
+ return -EFAULT;
+
+ sscanf(temp, "%s %s", wan_type, wan_name);
+
+ if(memcmp(wan_name, "nas", 3) != 0)
+ return -EFAULT;
+
+ if(strcmp(wan_type, "voip") == 0)
+ strcpy(voip_wanIf, wan_name);
+ else if(strcmp(wan_type, "tr069") == 0)
+ strcpy(tr069_wanIf, wan_name);
+ else
+ return -EFAULT;
+
+ return count;
+}
+
+int isBridgeWan(struct net_device *dev)
+{
+ struct net_bridge_port *port;
+
+ if (dev == NULL)
+ return 0;
+
+ if ((port = br_port_get_rcu(dev)) == NULL)
+ return 0;
+
+ return 1;
+}
+
+
+/****************************************************************************
+**function name
+ ecnt_wanIf_proc_init
+**description:
+ wanIf proc init
+**return
+**parameter:
+****************************************************************************/
+void ecnt_wanIf_proc_init(void)
+{
+ struct proc_dir_entry *voip_wanIf_proc = NULL;
+
+ voip_wanIf_proc = create_proc_entry(PON_IP_HOST_WANIF_PATH, 0, NULL);
+ voip_wanIf_proc->read_proc = pon_wanIf_read_proc;
+ voip_wanIf_proc->write_proc = pon_wanIf_write_proc;
+
+ return;
+}
+/****************************************************************************
+**function name
+ ecnt_wanIf_proc_deinit
+**description:
+ wanIf proc destroy
+**return
+**parameter:
+****************************************************************************/
+void ecnt_wanIf_proc_deinit(void)
+{
+ remove_proc_entry(PON_IP_HOST_WANIF_PATH, NULL);
+}
+#endif
+
+int isVoipWan(struct net_device *dev){
+#if (defined(TCSUPPORT_XPON_MAPPING) || defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_XPON_IGMP)) && defined(TCSUPPORT_PON_IP_HOST)
+ if(dev == NULL || isBridgeWan(dev))
+ return 0;
+
+#if !defined(TCSUPPORT_CT)
+ if(dev->name[0] == 'n' && dev->name[3] == voip_wanIf[3])
+#endif
+ return 1;
+#endif
+ return 0;
+}
+
+EXPORT_SYMBOL(isVoipWan);
+
+
+
+
+void ecnt_netdev_init_hook(void)
+{
+
+
+#if (defined(TCSUPPORT_XPON_MAPPING) || defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_XPON_IGMP)) && defined(TCSUPPORT_PON_IP_HOST)
+ ecnt_wanIf_proc_init();
+#endif
+
+}
+
+
+void ecnt_netdev_dest_hook(void)
+{
+
+
+#if (defined(TCSUPPORT_XPON_MAPPING) || defined(TCSUPPORT_PON_VLAN) || defined(TCSUPPORT_XPON_IGMP)) && defined(TCSUPPORT_PON_IP_HOST)
+ ecnt_wanIf_proc_deinit();
+#endif
+
+}
+
+
+/****************************************************************************
+**function name
+ __is_igmp
+**description:
+ check whether packet is igmp packets.
+**return
+ 0: match fail
+ 1: match ok
+**parameter:
+ skb: the packet information
+ vdev: virtual net device
+****************************************************************************/
+int __is_igmp(struct sk_buff *skb)
+{
+ return 0;
+
+}
+EXPORT_SYMBOL(__is_igmp);
+
+/****************************************************************************
+**function name
+ __is_udp_multicast
+**description:
+ check multicast packet in downstream
+**return
+ 0: match fail
+ 1: match ok
+**parameter:
+ skb: the packet information
+ vdev: virtual net device
+****************************************************************************/
+int __is_udp_multicast(struct sk_buff *skb)
+{
+
+ return 0;
+}
+EXPORT_SYMBOL(__is_udp_multicast);
+
+#if defined(TCSUPPORT_FH_JOYMEV2_PON) || defined(TCSUPPORT_CT_JOYME_BANDWIDTH)
+int (*wifi_bandwidth_hook_tx) (struct sk_buff * skb) = NULL;
+int (*wifi_bandwidth_hook_rx)(unsigned char* srcMac, int length) = NULL;
+
+EXPORT_SYMBOL(wifi_bandwidth_hook_tx);
+EXPORT_SYMBOL(wifi_bandwidth_hook_rx);
+#endif
+
+
+
+int localInShortCut(struct sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+
+ skb->pkt_type = PACKET_HOST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY ;
+
+ skb_reset_network_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+ iph = ip_hdr(skb);
+ pskb_trim_rcsum(skb, ntohs(iph->tot_len));
+
+ if ( !skb->skb_iif )
+ skb->skb_iif = skb->dev->ifindex;
+
+ /* remove ip header */
+ skb_pull(skb, ip_hdrlen(skb));
+ skb_reset_transport_header(skb);
+
+ if ( NULL == skb_dst(skb) )
+ {
+ ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev);
+ }
+
+ tcp_v4_rcv(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL(localInShortCut);
+
Index: linux-3.18.21/net/core/ecnt_skb_alloc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/core/ecnt_skb_alloc.c 2018-02-05 14:20:19.000000000 +0800
@@ -0,0 +1,945 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/splice.h>
+#include <linux/cache.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/errqueue.h>
+#include <linux/prefetch.h>
+#include <linux/if_vlan.h>
+
+#if defined(TCSUPPORT_RA_HWNAT)
+#include <linux/foe_hook.h>
+#endif
+
+atomic_t g_used_skb_num;
+#if defined(MT7612E)
+int g_max_skb_num = 4096;
+#else
+int g_max_skb_num = 1280;
+#endif
+EXPORT_SYMBOL(g_used_skb_num);
+
+int peak_skb_num = 0;
+
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+atomic_t skbmgr_alloc_no;
+atomic_t skbmgr_4k_alloc_no;
+#endif
+
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+
+#define SKBMGR_DEF_HOT_LIST_LEN 512
+
+int skbmgr_limit = 1280;
+int skbmgr_max_alloc_no = 0;
+int skbmgr_alloc_fail = 0;
+
+
+int skbmgr_alloc_normal = 0;
+
+int skbmgr_hot_list_len = SKBMGR_DEF_HOT_LIST_LEN;
+int skbmgr_max_list_len = 0;
+
+union {
+ struct sk_buff_head list;
+ char pad[SMP_CACHE_BYTES];
+} skbmgr_pool[SKBMGR_MAX_QUEUE];
+
+__IMEM struct sk_buff *skbmgr_alloc_skb2k(void)
+{
+ struct sk_buff_head *list;
+ struct sk_buff *skb;
+ int alloc_no;
+
+ list = &skbmgr_pool[SKBMGR_QUEUE_ID].list;
+
+ if (skb_queue_len(list)) {
+ unsigned int size;
+ struct skb_shared_info *shinfo;
+ u8 *data;
+
+ skb = skb_dequeue(list);
+
+ if (unlikely(skb == NULL))
+ goto try_normal;
+
+#if 0
+ size = skb->truesize-SKB_DATA_ALIGN(sizeof(struct sk_buff))-SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = skb->head;
+
+ /*
+ * Only clear those fields we need to clear, not those that we will
+ * actually initialise below. Hence, don't put any more fields after
+ * the tail pointer in struct sk_buff!
+ */
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ skb->skb_recycling_callback = skbmgr_recycling_callback;
+ skb->skb_recycling_ind = SKBMGR_INDICATION;
+#endif
+
+ atomic_inc(&skbmgr_alloc_no);
+ alloc_no = atomic_read(&skbmgr_alloc_no);
+ if (alloc_no > skbmgr_max_alloc_no)
+ skbmgr_max_alloc_no = alloc_no;
+
+ return skb;
+ }
+
+try_normal:
+ if ((skbmgr_limit == 0) || (atomic_read(&skbmgr_alloc_no) < skbmgr_limit)) {
+ skbmgr_alloc_normal++;
+ skb = alloc_skb(SKBMGR_RX_BUF_LEN, GFP_ATOMIC|__GFP_NOWARN);
+ if (likely(skb)) {
+ skb->skb_recycling_callback = skbmgr_recycling_callback;
+ skb->skb_recycling_ind = SKBMGR_INDICATION;
+
+ atomic_inc(&skbmgr_alloc_no);
+ alloc_no = atomic_read(&skbmgr_alloc_no);
+ if (alloc_no > skbmgr_max_alloc_no)
+ skbmgr_max_alloc_no = alloc_no;
+ } else {
+ skbmgr_alloc_fail++;
+ }
+ } else {
+ skb = NULL;
+ skbmgr_alloc_fail++;
+ }
+ return skb;
+}
+
+EXPORT_SYMBOL(skbmgr_alloc_skb2k);
+
+__IMEM int skbmgr_recycling_callback(struct sk_buff *skb)
+{
+ struct sk_buff_head *list;
+
+ list = &skbmgr_pool[SKBMGR_QUEUE_ID].list;
+
+ if (skb_queue_len(list) < skbmgr_hot_list_len) {
+ if (/*(skb->truesize - sizeof(struct sk_buff) != SKBMGR_RX_BUF_LEN) ||*/
+ skb_is_nonlinear(skb) ||
+ (skb->fclone != SKB_FCLONE_UNAVAILABLE) ||
+ skb_cloned(skb) ||
+ (skb_shinfo(skb)->nr_frags) ||
+ skb_has_frag_list(skb)) {
+ return 0;
+ }
+#if defined(TCSUPPORT_HWNAT)
+ pktflow_free(skb);
+#endif
+#if defined(TCSUPPORT_RA_HWNAT)
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
+ if (skb_queue_len(list) > skbmgr_max_list_len)
+ skbmgr_max_list_len = skb_queue_len(list) + 1;
+
+#if 1
+ unsigned int size;
+ struct skb_shared_info *shinfo;
+ u8 *data;
+
+ size = skb->truesize-SKB_DATA_ALIGN(sizeof(struct sk_buff))-SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = skb->head;
+
+ /*
+ * Only clear those fields we need to clear, not those that we will
+ * actually initialise below. Hence, don't put any more fields after
+ * the tail pointer in struct sk_buff!
+ */
+ #ifdef TCSUPPORT_WLAN_SW_RPS
+ memset(skb, 0, offsetof(struct sk_buff, pAd));
+ #else
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ #endif
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ skb->skb_recycling_callback = skbmgr_recycling_callback;
+ skb->skb_recycling_ind = SKBMGR_INDICATION;
+#endif
+
+ skb_queue_head(list, skb);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(skbmgr_recycling_callback);
+
+#if defined(TCSUPPORT_MAX_PACKET_2000) || defined(MT7612E)
+#ifdef TCSUPPORT_CPU_EN7521
+#define SKBMGR_4K_LIMIT 3072
+#else
+#define SKBMGR_4K_LIMIT 2048
+#endif
+#else
+#if defined(MT7592)
+#define SKBMGR_4K_LIMIT 1024
+#else
+#define SKBMGR_4K_LIMIT 512
+#endif
+#endif
+
+#if defined(TCSUPPORT_MAX_PACKET_2000) || defined(MT7612E) || defined(MT7615E)
+#define SKBMGR_4K_DEF_HOT_LIST_LEN 512
+#else
+#define SKBMGR_4K_DEF_HOT_LIST_LEN 128
+#endif
+
+int skbmgr_4k_max_alloc_no = 0;
+int skbmgr_4k_alloc_fail = 0;
+int skbmgr_4k_alloc_normal = 0;
+
+//int skbmgr_limit = SKBMGR_4K_LIMIT;
+int skbmgr_4k_hot_list_len = SKBMGR_4K_DEF_HOT_LIST_LEN;
+int skbmgr_4k_max_list_len = 0;
+__DMEM int skbmgr_4k_limit = SKBMGR_4K_LIMIT;
+
+
+union {
+ struct sk_buff_head list;
+ char pad[SMP_CACHE_BYTES];
+} skbmgr_4k_pool[SKBMGR_MAX_QUEUE];
+
+
+__IMEM struct sk_buff *skbmgr_alloc_skb4k(void)
+{
+ struct sk_buff_head *list;
+ struct sk_buff *skb;
+ int alloc_no;
+
+ list = &skbmgr_4k_pool[SKBMGR_QUEUE_ID].list;
+
+ if (skb_queue_len(list)) {
+ unsigned int size;
+ struct skb_shared_info *shinfo;
+ u8 *data;
+
+ skb = skb_dequeue(list);
+
+ if (unlikely(skb == NULL))
+ goto try_normal;
+
+ size = skb->truesize-SKB_DATA_ALIGN(sizeof(struct sk_buff))-SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = skb->head;
+
+ /*
+ * Only clear those fields we need to clear, not those that we will
+ * actually initialise below. Hence, don't put any more fields after
+ * the tail pointer in struct sk_buff!
+ */
+ #ifdef TCSUPPORT_WLAN_SW_RPS
+ memset(skb, 0, offsetof(struct sk_buff, pAd));
+ #else
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ #endif
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ skb->skb_recycling_callback = skbmgr_4k_recycling_callback;
+ skb->skb_recycling_ind = SKBMGR_4K_INDICATION;
+
+ atomic_inc(&skbmgr_4k_alloc_no);
+ alloc_no = atomic_read(&skbmgr_4k_alloc_no);
+ if (alloc_no > skbmgr_4k_max_alloc_no)
+ skbmgr_4k_max_alloc_no = alloc_no;
+
+ return skb;
+ }
+
+try_normal:
+ if ((atomic_read(&skbmgr_4k_alloc_no) < skbmgr_4k_limit)) {
+ skbmgr_4k_alloc_normal++;
+ skb = alloc_skb(SKBMGR_4K_RX_BUF_LEN , GFP_ATOMIC|__GFP_NOWARN);
+ if (likely(skb)) {
+ skb->skb_recycling_callback = skbmgr_4k_recycling_callback;
+ skb->skb_recycling_ind = SKBMGR_4K_INDICATION;
+ atomic_inc(&skbmgr_4k_alloc_no);
+ alloc_no = atomic_read(&skbmgr_4k_alloc_no);
+ if (alloc_no > skbmgr_4k_max_alloc_no)
+ skbmgr_4k_max_alloc_no = alloc_no;
+ } else {
+ skbmgr_4k_alloc_fail++;
+ }
+ } else {
+ skb = NULL;
+ skbmgr_4k_alloc_fail++;
+ }
+ return skb;
+}
+
+EXPORT_SYMBOL(skbmgr_alloc_skb4k);
+
+__IMEM int skbmgr_4k_recycling_callback(struct sk_buff *skb)
+{
+ struct sk_buff_head *list;
+
+ list = &skbmgr_4k_pool[SKBMGR_QUEUE_ID].list;
+
+ if (skb_queue_len(list) < skbmgr_4k_hot_list_len) {
+ if (/*(skb->truesize - sizeof(struct sk_buff) != SKBMGR_4K_RX_BUF_LEN) ||*/
+ (skb->fclone != SKB_FCLONE_UNAVAILABLE) ||
+ skb_cloned(skb) ||
+ (skb_shinfo(skb)->nr_frags) ||
+ skb_has_frag_list(skb)) {
+ return 0;
+ }
+
+#if defined(TCSUPPORT_HWNAT)
+ pktflow_free(skb);
+#endif
+#if defined(TCSUPPORT_RA_HWNAT)
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
+ if (skb_queue_len(list) > skbmgr_4k_max_list_len)
+ skbmgr_4k_max_list_len = skb_queue_len(list) + 1;
+
+ skb_queue_head(list, skb);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(skbmgr_4k_recycling_callback);
+#endif
+
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+
+#define SKBMGR_SG_RX_BUF_LEN SKB_DATA_ALIGN(128+NET_SKB_PAD)
+
+__DMEM int skbmgr_sg_max_list_len = 0;
+
+__DMEM union {
+ struct sk_buff_head list;
+ char pad[SMP_CACHE_BYTES];
+} skbmgr_sg_pool[NR_CPUS];
+
+struct sk_buff *skbmgr_alloc_skb128(void)
+{
+ struct sk_buff_head *list = &skbmgr_sg_pool[smp_processor_id()].list;
+ struct sk_buff *skb;
+
+ if (skb_queue_len(list)) {
+ unsigned long flags;
+ unsigned int size;
+ struct skb_shared_info *shinfo;
+ u8 *data;
+
+ local_irq_save(flags);
+ skb = __skb_dequeue(list);
+ local_irq_restore(flags);
+
+ if (unlikely(skb == NULL))
+ goto try_normal;
+
+ size = skb->truesize-SKB_DATA_ALIGN(sizeof(struct sk_buff))-SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = skb->head;
+
+ /*
+ * Only clear those fields we need to clear, not those that we will
+ * actually initialise below. Hence, don't put any more fields after
+ * the tail pointer in struct sk_buff!
+ */
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ skb->skb_recycling_callback = skbmgr_sg_recycling_callback;
+
+ return skb;
+ }
+
+try_normal:
+ skb = alloc_skb(SKBMGR_SG_RX_BUF_LEN, GFP_ATOMIC|__GFP_NOWARN);
+ if (likely(skb))
+ skb->skb_recycling_callback = skbmgr_sg_recycling_callback;
+ return skb;
+}
+
+EXPORT_SYMBOL(skbmgr_alloc_skb128);
+int skbmgr_sg_recycling_callback(struct sk_buff *skb)
+{
+ struct sk_buff_head *list = &skbmgr_sg_pool[smp_processor_id()].list;
+
+ if (skb_queue_len(list) < skbmgr_hot_list_len) {
+ unsigned long flags;
+
+ if (/*(skb->truesize - sizeof(struct sk_buff) != SKBMGR_SG_RX_BUF_LEN) ||*/
+ (skb_shinfo(skb)->nr_frags) ||
+ (skb_shinfo(skb)->frag_list)) {
+ return 0;
+ }
+
+#if defined(TCSUPPORT_HWNAT)
+ pktflow_free(skb);
+#endif
+#ifdef TCSUPPORT_RA_HWNAT
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
+
+ if (skb_queue_len(list) > skbmgr_sg_max_list_len)
+ skbmgr_sg_max_list_len = skb_queue_len(list) + 1;
+
+ local_irq_save(flags);
+ __skb_queue_head(list, skb);
+ local_irq_restore(flags);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(skbmgr_sg_recycling_callback);
+#endif //xflu
+
+void skbmgr_free_all_skbs(void)
+{
+ struct sk_buff_head *list;
+ struct sk_buff *skb;
+ int i;
+
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ list = &skbmgr_pool[i].list;
+ while ((skb = skb_dequeue(list)) != NULL) {
+ skb->skb_recycling_callback = NULL;
+ if (skb->skb_recycling_ind == SKBMGR_INDICATION)
+ atomic_dec(&skbmgr_alloc_no);
+ skb->skb_recycling_ind = 0;
+ skb_release_data(skb);
+ kfree_skbmem(skb);
+ }
+ }
+#if 0
+ //do not need free 4k poll here. shnwind.
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ list = &skbmgr_4k_pool[i].list;
+ while ((skb = skb_dequeue(list)) != NULL) {
+ skb->skb_recycling_callback = NULL;
+ if (skb->skb_recycling_ind == SKBMGR_4K_INDICATION)
+ atomic_dec(&skbmgr_4k_alloc_no);
+ skb->skb_recycling_ind = 0;
+ skb_release_data(skb);
+ kfree_skbmem(skb);
+ }
+ }
+#endif
+
+#ifdef SKBMGR_SINGLE_QUEUE
+ list = &skbmgr_4k_pool[0].list;
+ while ((skb = skb_dequeue(list)) != NULL) {
+ skb->skb_recycling_callback = NULL;
+ if (skb->skb_recycling_ind == SKBMGR_4K_INDICATION)
+ atomic_dec(&skbmgr_4k_alloc_no);
+ skb->skb_recycling_ind = 0;
+ kfree_skbmem(skb);
+ }
+#else
+ for (i=0; i<NR_CPUS; i++) {
+ list = &skbmgr_4k_pool[i].list;
+ while ((skb = skb_dequeue(list)) != NULL) {
+ skb->skb_recycling_callback = NULL;
+ if (skb->skb_recycling_ind == SKBMGR_4K_INDICATION)
+ atomic_dec(&skbmgr_4k_alloc_no);
+ skb->skb_recycling_ind = 0;
+ kfree_skbmem(skb);
+ }
+ }
+#endif
+
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ for (i=0; i<NR_CPUS; i++) {
+ list = &skbmgr_sg_pool[i].list;
+ while ((skb = skb_dequeue(list)) != NULL) {
+ skb->skb_recycling_callback = NULL;
+ kfree_skbmem(skb);
+ }
+ }
+#endif
+}
+
+static int hot_list_len_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "skbmgr_hot_list_len %d skbmgr_4k_hot_list_len %d\n", skbmgr_hot_list_len,skbmgr_4k_hot_list_len);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int hot_list_len_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ skbmgr_hot_list_len = val;
+ if (skbmgr_hot_list_len == 0) {
+ skbmgr_free_all_skbs();
+ skbmgr_max_list_len = 0;
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ skbmgr_sg_max_list_len = 0;
+#endif
+ }
+
+ return count;
+}
+
+/*add proc function,user can change max_skb_num value */
+static int driver_max_skb_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "%d (%d,%d)\n", g_max_skb_num, atomic_read(&g_used_skb_num), peak_skb_num);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int driver_max_skb_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ g_max_skb_num = val;
+
+ return count;
+}
+
+static int limit_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "%d\n", skbmgr_limit);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int limit_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ skbmgr_limit = val;
+ if (skbmgr_hot_list_len == 0) {
+ skbmgr_free_all_skbs();
+ skbmgr_max_list_len = 0;
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ skbmgr_sg_max_list_len = 0;
+#endif
+ }
+
+ return count;
+}
+
+
+
+static int limit_4k_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "%d\n", skbmgr_4k_limit);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int limit_4k_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+ struct sk_buff_head *list;
+ int i;
+ struct sk_buff *skb;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ skbmgr_4k_limit = val;
+ if (skbmgr_hot_list_len == 0) {
+ skbmgr_free_all_skbs();
+ skbmgr_max_list_len = 0;
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ skbmgr_sg_max_list_len = 0;
+#endif
+ }
+ return count;
+}
+
+
+static int skbmgr_info_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+ struct sk_buff_head *list;
+ int i;
+
+ out += sprintf(out, "skbmgr_limit = %d\n", skbmgr_limit);
+ out += sprintf(out, "skbmgr_max_alloc_no = %d\n", skbmgr_max_alloc_no);
+ out += sprintf(out, "skbmgr_alloc_fail = %d\n", skbmgr_alloc_fail);
+ out += sprintf(out, "skbmgr_alloc_no = %d\n", atomic_read(&skbmgr_alloc_no));
+ out += sprintf(out, "skbmgr_max_list_len = %d\n", skbmgr_max_list_len);
+ out += sprintf(out, "skbmgr_alloc_normal = %d\n", skbmgr_alloc_normal);
+ out += sprintf(out, "skbmgr_4k_limit = %d\n", skbmgr_4k_limit);
+ out += sprintf(out, "skbmgr_4k_max_alloc_no = %d\n", skbmgr_4k_max_alloc_no);
+ out += sprintf(out, "skbmgr_4k_alloc_fail = %d\n", skbmgr_4k_alloc_fail);
+ out += sprintf(out, "skbmgr_4k_alloc_no = %d\n", atomic_read(&skbmgr_4k_alloc_no));
+ out += sprintf(out, "skbmgr_4k_max_list_len = %d\n", skbmgr_4k_max_list_len);
+ out += sprintf(out, "skbmgr_4k_alloc_normal = %d\n", skbmgr_4k_alloc_normal);
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ out += sprintf(out, "skbmgr_sg_max_list_len = %d\n", skbmgr_sg_max_list_len);
+#endif
+
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ list = &skbmgr_pool[i].list;
+ out += sprintf(out, "skbmgr_queue_len CPU%d = %d\n", i, skb_queue_len(list));
+ }
+
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ list = &skbmgr_4k_pool[i].list;
+ out += sprintf(out, "skbmgr_4k_queue_len CPU%d = %d\n", i, skb_queue_len(list));
+ }
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ list = &skbmgr_sg_pool[i].list;
+ out += sprintf(out, "skbmgr_sg_queue_len CPU%d = %d\n", i, skb_queue_len(list));
+ }
+#endif
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+#if defined(TCSUPPORT_MEMORY_CONTROL) || defined(TCSUPPORT_CT)
+int auto_clear_cache_flag = 0;
+int auto_kill_process_flag = 0;
+static int auto_clear_cache_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "%d\n", auto_clear_cache_flag);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int auto_clear_cache_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ auto_clear_cache_flag = val;
+
+ return count;
+}
+
+static int auto_kill_process_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "%d\n", auto_kill_process_flag);
+
+ len = out - page;
+ len -= offset;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0)
+ return 0;
+ } else
+ len = count;
+
+ *start = page + offset;
+ return len;
+}
+
+static int auto_kill_process_write(struct file *file, const char __user * buffer,
+ unsigned long count, void *data)
+{
+ char buf[64];
+ int val;
+
+ if (count > 64)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ auto_kill_process_flag = val;
+
+ return count;
+}
+#endif
+int register_proc_skbmgr(void)
+{
+ struct proc_dir_entry *p;
+
+ p = create_proc_entry("skbmgr_hot_list_len", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+
+ p->read_proc = hot_list_len_read;
+ p->write_proc = hot_list_len_write;
+
+ p = create_proc_read_entry("skbmgr_info", 0, init_net.proc_net, skbmgr_info_read, NULL);
+ if (!p)
+ return 0;
+
+ p = create_proc_entry("skbmgr_limit", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+
+ p->read_proc = limit_read;
+ p->write_proc = limit_write;
+
+ p = create_proc_entry("skbmgr_4k_limit", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+ p->read_proc = limit_4k_read;
+ p->write_proc = limit_4k_write;
+
+
+ p = create_proc_entry("skbmgr_driver_max_skb", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+
+ p->read_proc = driver_max_skb_read;
+ p->write_proc = driver_max_skb_write;
+
+#if defined(TCSUPPORT_MEMORY_CONTROL) || defined(TCSUPPORT_CT)
+ p = create_proc_entry("auto_clear_cache", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+
+ p->read_proc = auto_clear_cache_read;
+ p->write_proc = auto_clear_cache_write;
+
+ p = create_proc_entry("auto_kill_process", 0644, init_net.proc_net);
+ if (!p)
+ return 0;
+
+ p->read_proc = auto_kill_process_read;
+ p->write_proc = auto_kill_process_write;
+#endif
+ return 1;
+}
+
+static void unregister_proc_skbmgr(void)
+{
+ remove_proc_entry("skbmgr_hot_list_len", init_net.proc_net);
+ remove_proc_entry("skbmgr_info", init_net.proc_net);
+ remove_proc_entry("skbmgr_limit", init_net.proc_net);
+ remove_proc_entry("skbmgr_driver_max_skb", init_net.proc_net);
+#if defined(TCSUPPORT_MEMORY_CONTROL) || defined(TCSUPPORT_CT)
+ remove_proc_entry("auto_clear_cache", init_net.proc_net);
+ remove_proc_entry("auto_kill_process", init_net.proc_net);
+#endif
+}
+
+void skbmgr_4k_pool_init(void){
+ int i;
+
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ skb_queue_head_init(&skbmgr_4k_pool[i].list);
+ }
+}
+
+void skbmgr_pool_init(void){
+ int i;
+ for (i=0; i<SKBMGR_MAX_QUEUE; i++) {
+ skb_queue_head_init(&skbmgr_pool[i].list);
+ }
+}
+
+void ecnt_skb_init(void){
+ int i;
+
+ atomic_set(&skbmgr_alloc_no, 0);
+ atomic_set(&g_used_skb_num, 0);
+
+ skbmgr_pool_init();
+ skbmgr_4k_pool_init();
+
+#if defined(TC3262_GMAC_SG_MODE) || defined(TC3262_PTM_SG_MODE)
+ for (i=0; i<NR_CPUS; i++) {
+ skb_queue_head_init(&skbmgr_sg_pool[i].list);
+ }
+#endif
+
+ register_proc_skbmgr();
+}
Index: linux-3.18.21/net/core/ecnt_voip_proc.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/core/ecnt_voip_proc.c 2018-02-05 14:20:20.000000000 +0800
@@ -0,0 +1,4 @@
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+
Index: linux-3.18.21/net/core/ethtool.c
===================================================================
--- linux-3.18.21.orig/net/core/ethtool.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/core/ethtool.c 2018-02-05 13:21:30.000000000 +0800
@@ -1953,3 +1953,6 @@
return rc;
}
+
+EXPORT_SYMBOL(dev_ethtool);
+
Index: linux-3.18.21/net/core/neighbour.c
===================================================================
--- linux-3.18.21.orig/net/core/neighbour.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/core/neighbour.c 2018-02-05 13:21:30.000000000 +0800
@@ -40,7 +40,7 @@
#include <linux/log2.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
-
+#include "ecnt_net_core.h"
#define DEBUG
#define NEIGH_DEBUG 1
#define neigh_dbg(level, fmt, ...) \
@@ -1321,7 +1321,7 @@
/* Slow and careful. */
-int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+__IMEM int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
int rc = 0;
@@ -1362,7 +1362,7 @@
/* As fast as possible without hh cache */
-int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
+__IMEM int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
{
struct net_device *dev = neigh->dev;
unsigned int seq;
@@ -3013,6 +3013,14 @@
NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ [NEIGH_VAR_DEFAULT_ROUTE] = {
+ .procname = "default_route",
+ .maxlen = 64,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+#endif
[NEIGH_VAR_GC_INTERVAL] = {
.procname = "gc_interval",
.maxlen = sizeof(int),
@@ -3069,6 +3077,8 @@
if (dev) {
dev_name_source = dev->name;
/* Terminate the table early */
+
+ /*add proc before "NEIGH_VAR_GC_INTERVAL", such as "NEIGH_VAR_DEFAULT_ROUTE"*/
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
@@ -3080,6 +3090,8 @@
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
}
+ ecnt_neighbour_sysctl_register_inline_hook(t->neigh_vars, p, dev);
+
if (handler) {
/* RetransTime */
t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
Index: linux-3.18.21/net/core/qos_type.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/core/qos_type.c 2018-02-05 13:21:30.000000000 +0800
@@ -0,0 +1,187 @@
+#include <net/sock.h>
+#include <linux/qos_type.h>
+
+static struct qos_action qa[MAX_RULE_NUM];
+
+int qostype_chk(int chk_type, int rule_no, char *wan_if, int rtp_match)
+{
+ struct qos_action *pqa = NULL;
+ char value[MAX_BUF_LEN];
+ char *pval = NULL;
+ char *p = NULL;
+
+ if (rule_no < 0 || rule_no > MAX_RULE_NUM - 1) {
+ printk("%s:rule no should be between 0 and %d.\n", __FUNCTION__, MAX_RULE_NUM - 1);
+ return -1;
+ }
+
+ pqa = qa + rule_no;
+
+ if (chk_type == EBT_CHK_TYPE) {
+ if (!strcmp(pqa->qm[0].type_name, "wan_if") || !strcmp(pqa->qm[1].type_name, "rtp_proto")) {
+ return 0;
+ }
+ else {
+ return -1;
+ }
+ }
+
+ if (wan_if == NULL) {
+ return -1;
+ }
+
+ if (!strcmp(pqa->qm[0].type_name, "wan_if")) {
+ strcpy(value, pqa->qm[0].type_value);
+ /* check if wan interface is in group */
+ pval = value;
+ while ((p = strsep(&pval, ",")) != NULL) {
+ if (!strcmp(p, wan_if)) {
+ goto rtp_proto_handle;
+ }
+ }
+ return -1;
+ }
+
+rtp_proto_handle:
+ if ( (strcmp(pqa->qm[1].type_name, "rtp_proto") != 0) ||
+ (!strcmp(pqa->qm[1].type_name, "rtp_proto") && (1 == rtp_match)) ) {
+ return 0;
+ }
+
+ return -1;
+}
+
+int set_tos(int rule_no, unsigned int tos)
+{
+ struct qos_action *pqa = NULL;
+ if (rule_no < 0 || rule_no > MAX_RULE_NUM) {
+ printk("%s:rule no should be between 0 and %d.\n", __FUNCTION__, MAX_RULE_NUM - 1);
+ return -1;
+ }
+
+ pqa = &qa[rule_no];
+
+ pqa->dscp_flag = 1;
+ pqa->dscp = tos;
+ return 0;
+}
+
+int unset_tos(int rule_no)
+{
+ struct qos_action *pqa = NULL;
+ if (rule_no < 0 || rule_no > MAX_RULE_NUM) {
+ printk("%s:rule no should be between 0 and %d.\n", __FUNCTION__, MAX_RULE_NUM - 1);
+ return -1;
+ }
+
+ pqa = &qa[rule_no];
+
+ pqa->dscp_flag = 0;
+
+ return 0;
+}
+
+int get_tos(int rule_no, unsigned int *tos)
+{
+ struct qos_action *pqa = NULL;
+ if (rule_no < 0 || rule_no > MAX_RULE_NUM) {
+ printk("%s:rule no should be between 0 and %d.\n", __FUNCTION__, MAX_RULE_NUM - 1);
+ return -1;
+ }
+
+ pqa = &qa[rule_no];
+
+ if (0 == pqa->dscp_flag) {
+ return -1;
+ }
+
+ *tos = pqa->dscp;
+
+ return 0;
+}
+
+static int qostype_settype(struct qos_type *pqt)
+{
+ int rule_no = pqt->rule_no;
+ if (rule_no < 0 || rule_no > MAX_RULE_NUM - 1) {
+ printk("%s:rule no should be between 0 and %d.\n", __FUNCTION__, MAX_RULE_NUM - 1);
+ return -1;
+ }
+
+ memcpy(qa[pqt->rule_no].qm, pqt->qm, sizeof(struct qos_match)*2);
+
+ return 0;
+}
+
+int
+qostype_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct qos_type qt;
+
+ switch (cmd) {
+ case QOSTYPE_IOC_SET_TYPE:
+ if (copy_from_user(&qt, (struct qos_type*)arg, sizeof(qt))) {
+ return -EFAULT;
+ }
+ qostype_settype(&qt);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int qostype_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static struct file_operations qostype_fops = {
+ .owner = THIS_MODULE,
+ .write = NULL,
+ .read = NULL,
+ .unlocked_ioctl = qostype_ioctl,
+ .open = qostype_open,
+ .release = NULL,
+};
+
+/***************************************************************************
+ * Function Name: qostype_drv_init
+ * Description : Initialization of qostype driver
+ * Returns : 0
+ ***************************************************************************/
+static int __init qostype_drv_init(void)
+{
+ int status = 0;
+
+ /* qostype ioctl */
+ status = register_chrdev(QOSTYPE_MAJOR, "qostype", &qostype_fops);
+ if (status < 0)
+ return status;
+
+ memset(qa, 0, sizeof(qa));
+
+ return 0;
+}
+
+
+/*_______________________________________________________________________
+** Function Name: qostype_drv_exit
+** Description : qostype module clean routine
+** Returns : None
+**_______________________________________________________________________
+*/
+static void __exit qostype_drv_exit(void)
+{
+ unregister_chrdev(QOSTYPE_MAJOR, "qostype");
+}
+
+EXPORT_SYMBOL(qostype_chk);
+EXPORT_SYMBOL(set_tos);
+EXPORT_SYMBOL(unset_tos);
+
+module_init(qostype_drv_init);
+module_exit(qostype_drv_exit);
+
Index: linux-3.18.21/net/core/skbuff.c
===================================================================
--- linux-3.18.21.orig/net/core/skbuff.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/core/skbuff.c 2018-02-05 13:21:30.000000000 +0800
@@ -74,6 +74,8 @@
#include <asm/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
+#include <linux/ecnt_skbuff.h>
+#include "ecnt_net_core.h"
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -120,7 +122,7 @@
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
-static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
+__IMEM static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
unsigned long ip, bool *pfmemalloc)
{
void *obj;
@@ -194,8 +196,8 @@
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
- */
-struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ */
+__IMEM struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct kmem_cache *cache;
@@ -214,6 +216,13 @@
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*if number of skb reach the max number,go to nodata*/
+ if(atomic_read(&g_used_skb_num) > g_max_skb_num){
+ goto nodata;
+ }
+#endif
+
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
@@ -232,13 +241,25 @@
*/
size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*alloc successfully*/
+ atomic_inc(&g_used_skb_num);
+ if(peak_skb_num < atomic_read(&g_used_skb_num))
+ {
+ peak_skb_num = atomic_read(&g_used_skb_num);
+ }
+#endif
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
+#ifdef TCSUPPORT_WLAN_SW_RPS
+ memset(skb, 0, offsetof(struct sk_buff, pAd));
+#else
memset(skb, 0, offsetof(struct sk_buff, tail));
+#endif
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
skb->pfmemalloc = pfmemalloc;
@@ -302,6 +323,13 @@
struct sk_buff *skb;
unsigned int size = frag_size ? : ksize(data);
+
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*if number of skb reach the max number,go to nodata*/
+ if(atomic_read(&g_used_skb_num) > g_max_skb_num){
+ return NULL;
+ }
+#endif
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
if (!skb)
return NULL;
@@ -323,7 +351,14 @@
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
-
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*alloc successfully*/
+ atomic_inc(&g_used_skb_num);
+ if(peak_skb_num < atomic_read(&g_used_skb_num))
+ {
+ peak_skb_num = atomic_read(&g_used_skb_num);
+ }
+#endif
return skb;
}
@@ -516,9 +551,13 @@
put_page(virt_to_head_page(skb->head));
else
kfree(skb->head);
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*sub used skb number*/
+ atomic_dec(&g_used_skb_num);
+#endif
}
-static void skb_release_data(struct sk_buff *skb)
+__IMEM void skb_release_data(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -552,10 +591,15 @@
/*
* Free an skbuff by memory without cleaning the state.
*/
-static void kfree_skbmem(struct sk_buff *skb)
+__IMEM void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff_fclones *fclones;
+#if defined(TCSUPPORT_RA_HWNAT)
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
+
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_head_cache, skb);
@@ -581,7 +625,7 @@
}
}
-static void skb_release_head_state(struct sk_buff *skb)
+__IMEM static void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
#ifdef CONFIG_XFRM
@@ -604,6 +648,16 @@
skb->tc_verd = 0;
#endif
#endif
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ if (skb->skb_recycling_ind == SKBMGR_INDICATION) {
+ atomic_dec(&skbmgr_alloc_no);
+ skb->skb_recycling_ind = 0;
+ }else if(skb->skb_recycling_ind == SKBMGR_4K_INDICATION){
+ atomic_dec(&skbmgr_4k_alloc_no);
+ skb->skb_recycling_ind = 0;
+ }
+#endif
+
}
/* Free everything but the sk_buff shell. */
@@ -623,9 +677,22 @@
* always call kfree_skb
*/
-void __kfree_skb(struct sk_buff *skb)
+__IMEM void __kfree_skb(struct sk_buff *skb)
{
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ skb_release_head_state(skb);
+
+ if (skb->skb_recycling_callback) {
+ if ((*skb->skb_recycling_callback)(skb)) {
+ return;
+ }
+ }
+ skb->skb_recycling_callback = NULL;
+
+ skb_release_data(skb);
+#else
skb_release_all(skb);
+#endif
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
@@ -637,7 +704,7 @@
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
-void kfree_skb(struct sk_buff *skb)
+__IMEM void kfree_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
@@ -689,7 +756,7 @@
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
-void consume_skb(struct sk_buff *skb)
+__IMEM void consume_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
@@ -725,7 +792,6 @@
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
-
memcpy(&new->headers_start, &old->headers_start,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
@@ -757,6 +823,15 @@
#endif
#endif
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ if (new->skb_recycling_ind == SKBMGR_INDICATION) {
+ atomic_dec(&skbmgr_alloc_no);
+ }else if(new->skb_recycling_ind == SKBMGR_4K_INDICATION){
+ atomic_dec(&skbmgr_4k_alloc_no);
+ }
+ new->skb_recycling_ind = 0;
+ new->skb_recycling_callback = NULL;
+#endif
}
/*
@@ -778,6 +853,11 @@
n->cloned = 1;
n->nohdr = 0;
n->destructor = NULL;
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ n->skb_recycling_callback = NULL;
+ n->skb_recycling_ind = 0;
+ skb->skb_recycling_callback = NULL;
+#endif
C(tail);
C(end);
C(head);
@@ -931,7 +1011,8 @@
}
static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
-{
+{
+
__copy_skb_header(new, old);
skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
@@ -1078,6 +1159,11 @@
if (skb_shared(skb))
BUG();
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*the skb num reach the max number,go to nodata*/
+ if(atomic_read(&g_used_skb_num) > g_max_skb_num)
+ goto nodata;
+#endif
size = SKB_DATA_ALIGN(size);
@@ -1087,6 +1173,10 @@
gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ /*add skb num*/
+ atomic_inc(&g_used_skb_num);
+#endif
size = SKB_WITH_OVERHEAD(ksize(data));
/* Copy only real data... and, alas, header. This should be
@@ -1112,6 +1202,9 @@
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ skb->skb_recycling_callback = NULL;
+#endif
skb_release_data(skb);
} else {
@@ -1182,7 +1275,7 @@
* You must pass %GFP_ATOMIC as the allocation priority if this function
* is called from an interrupt.
*/
-struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+__IMEM struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t gfp_mask)
{
@@ -1301,7 +1394,7 @@
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
-unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+__IMEM unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
@@ -1322,7 +1415,7 @@
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
-unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+__IMEM unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
@@ -1342,7 +1435,7 @@
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
-unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+__IMEM unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
return skb_pull_inline(skb, len);
}
@@ -1357,7 +1450,7 @@
* the buffer is already under the length specified it is not modified.
* The skb must be linear.
*/
-void skb_trim(struct sk_buff *skb, unsigned int len)
+__IMEM void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len)
__skb_trim(skb, len);
@@ -1367,7 +1460,7 @@
/* Trims skb to length len. It can change skb pointers.
*/
-int ___pskb_trim(struct sk_buff *skb, unsigned int len)
+__IMEM int ___pskb_trim(struct sk_buff *skb, unsigned int len)
{
struct sk_buff **fragp;
struct sk_buff *frag;
@@ -1475,7 +1568,7 @@
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
-unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
+__IMEM unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
{
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
@@ -1607,7 +1700,7 @@
* check arch/{*}/net/{*}.S files,
* since it is called from BPF assembly code.
*/
-int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
+__IMEM int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int start = skb_headlen(skb);
struct sk_buff *frag_iter;
@@ -2265,7 +2358,7 @@
* returned or %NULL if the list is empty.
*/
-struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+__IMEM struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
unsigned long flags;
struct sk_buff *result;
@@ -2324,7 +2417,7 @@
*
* A buffer cannot be placed on two lists at the same time.
*/
-void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+__IMEM void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
@@ -3270,6 +3363,10 @@
void __init skb_init(void)
{
+#if defined(CONFIG_CPU_TC3162) || defined(CONFIG_MIPS_TC3262)
+ ecnt_skb_init();
+#endif
+
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -4100,7 +4197,7 @@
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
-static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
+struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
@@ -4131,6 +4228,7 @@
vhdr = (struct vlan_hdr *)skb->data;
vlan_tci = ntohs(vhdr->h_vlan_TCI);
+ ecnt_vlan_untag_inline_hook(skb,vlan_tci);
__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
skb_pull_rcsum(skb, VLAN_HLEN);
Index: linux-3.18.21/net/ecnt_net.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ecnt_net.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,40 @@
+#ifndef _LINUX_ECNT_NET_H
+#define _LINUX_ECNT_NET_H
+#include <uapi/linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
+#include <ecnt_hook/ecnt_hook.h>
+
+
+/*for alg switch*/
+extern int nf_conntrack_ftp_enable;
+extern int nf_conntrack_sip_enable;
+extern int nf_conntrack_h323_enable;
+extern int nf_conntrack_rtsp_enable;
+extern int nf_conntrack_l2tp_enable;
+extern int nf_conntrack_ipsec_enable;
+extern int nf_conntrack_pptp_enable;
+extern int nf_conntrack_portscan_enable;
+extern int nf_conntrack_ftp_port;
+extern int nf_conntrack_esp_timeout;
+
+static inline int ecnt_skbmark_to_sockmark_copy
+(struct sock *sk, struct sk_buff *skb)
+{
+ if ( !sk || !skb )
+ return ECNT_CONTINUE;
+
+ sk->sk_mark &= 0x0fffffff;
+ /* not change QoS info stored into sk->sk_mark */
+ sk->sk_mark |= (skb->mark & (~QOS_FILTER_MARK));
+
+ return ECNT_CONTINUE;
+}
+
+
+
+
+#endif
+
Index: linux-3.18.21/net/ethernet/eth.c
===================================================================
--- linux-3.18.21.orig/net/ethernet/eth.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ethernet/eth.c 2018-02-05 13:21:30.000000000 +0800
@@ -75,7 +75,7 @@
* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
* in here instead.
*/
-int eth_header(struct sk_buff *skb, struct net_device *dev,
+__IMEM int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
@@ -181,7 +181,7 @@
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*/
-__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+__IMEM __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
unsigned short _service_access_point;
const unsigned short *sap;
Index: linux-3.18.21/net/ipv4/Makefile
===================================================================
--- linux-3.18.21.orig/net/ipv4/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/Makefile 2018-02-05 13:21:30.000000000 +0800
@@ -28,6 +28,10 @@
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o
obj-$(CONFIG_INET_ESP) += esp4.o
+#ifneq ($(TCSUPPORT_IPSEC_PASSTHROUGH),)
+#obj-$(CONFIG_INET_ESP) += mtk_esp4.o
+#move to modules crypto for kernel 3.18
+#endif
obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o
obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o
Index: linux-3.18.21/net/ipv4/arp.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/arp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/arp.c 2018-02-05 13:21:30.000000000 +0800
@@ -117,6 +117,7 @@
#include <linux/uaccess.h>
#include <linux/netfilter_arp.h>
+#include "ecnt_net_ipv4.h"
/*
* Interface to generic neighbour cache.
@@ -684,6 +685,7 @@
*/
void arp_xmit(struct sk_buff *skb)
{
+ ecnt_arp_xmit_inline_hook(skb);
/* Send it off, maybe filter it using firewalling first. */
NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
}
Index: linux-3.18.21/net/ipv4/ecnt_net_ipv4.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ipv4/ecnt_net_ipv4.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,256 @@
+#ifndef _LINUX_ECNT_NET_IPV4_H
+#define _LINUX_ECNT_NET_IPV4_H
+#include <uapi/linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/ecnt_vlan_bind.h>
+#include "../ecnt_net.h"
+#include <linux/foe_hook.h>
+
+#if defined(TCSUPPORT_TSO_ENABLE)
+#include <ecnt_hook/ecnt_hook_tso.h>
+#endif
+
+#define IP_CMSG_PKTINFO 1
+#define IP_CMSG_TTL 2
+#define IP_CMSG_TOS 4
+#define IP_CMSG_RECVOPTS 8
+#define IP_CMSG_RETOPTS 16
+#define IP_CMSG_PASSSEC 32
+#define IP_CMSG_ORIGDSTADDR 64
+#define IP_CMSG_SKB_MARK 128
+#define IP_CMSG_VLAN_ID 256
+
+static inline int ecnt_do_ip_setsockopt_inline_hook
+(struct sock *sk, int level, struct inet_sock *inet,
+int optname, char __user *optval, unsigned int optlen, int *err)
+{
+ int val = 0;
+
+ switch ( optname )
+ {
+ case IP_SKB_MARK_FLAG:
+ case IP_SKB_VLAN_ID_FLAG:
+ case IP_SKB_MARK:
+ case IP_SKB_VLAN_ID:
+ if (optlen >= sizeof(int))
+ {
+ if (get_user(val, (int __user *) optval))
+ return ECNT_HOOK_ERROR;
+ }
+ else if (optlen >= sizeof(__u16))
+ {
+ __u16 u16val;
+ if (get_user(u16val, (__u16 __user *) optval))
+ return ECNT_HOOK_ERROR;
+ val = (int) u16val;
+ }
+ else if (optlen >= sizeof(char))
+ {
+ unsigned char ucval;
+ if (get_user(ucval, (unsigned char __user *) optval))
+ return ECNT_HOOK_ERROR;
+ val = (int) ucval;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch ( optname )
+ {
+ case IP_SKB_MARK_FLAG:
+ if ( val )
+ inet->cmsg_flags |= IP_CMSG_SKB_MARK;
+ else
+ inet->cmsg_flags &= ~IP_CMSG_SKB_MARK;
+ *err = 0;
+ break;
+ case IP_SKB_VLAN_ID_FLAG:
+ if ( val )
+ inet->cmsg_flags |= IP_CMSG_VLAN_ID;
+ else
+ inet->cmsg_flags &= ~IP_CMSG_VLAN_ID;
+ *err = 0;
+ break;
+ case IP_SKB_MARK:
+ sk->sk_mark = val;
+ *err = 0;
+ break;
+ case IP_SKB_VLAN_ID:
+ sk->lVlanId = val;
+ *err = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_do_ip_getsockopt_inline_hook
+(struct sock *sk, int level, struct inet_sock *inet,
+int optname, char __user *optval, int __user *optlen, unsigned int flags)
+{
+ int val = 0, len = 0;
+
+ if (get_user(len, optlen))
+ return ECNT_HOOK_ERROR;
+ if (len < 0)
+ return ECNT_HOOK_ERROR;
+
+ switch ( optname )
+ {
+ case IP_SKB_MARK_FLAG:
+ val = (inet->cmsg_flags & IP_CMSG_SKB_MARK) != 0;
+ break;
+ case IP_SKB_VLAN_ID_FLAG:
+ val = (inet->cmsg_flags & IP_CMSG_VLAN_ID) != 0;
+ break;
+ case IP_SKB_MARK:
+ val = sk->sk_mark;
+ break;
+ case IP_SKB_VLAN_ID:
+ val = sk->lVlanId;
+ break;
+ default:
+ return ECNT_CONTINUE;
+ }
+
+ if (len < sizeof(u16) && len > 0 && val >= 0 && val <= 255) {
+ unsigned char ucval = (unsigned char)val;
+ len = 1;
+ if (put_user(len, optlen))
+ return ECNT_HOOK_ERROR;
+ if (copy_to_user(optval, &ucval, 1))
+ return ECNT_HOOK_ERROR;
+ } else {
+ len = min_t(unsigned int, sizeof(int), len);
+ if (put_user(len, optlen))
+ return ECNT_HOOK_ERROR;
+ if (copy_to_user(optval, &val, len))
+ return ECNT_HOOK_ERROR;
+ }
+
+
+ return ECNT_RETURN;
+}
+
+static inline void ip_cmsg_recv_skbmark
+(struct msghdr *msg, struct sk_buff *skb)
+{
+ __u32 skb_mark = skb->mark;
+ put_cmsg(msg, SOL_IP, IP_SKB_MARK_FLAG, sizeof(__u32), &skb_mark);
+}
+static inline void ip_cmsg_recv_vlanid
+(struct msghdr *msg, struct sk_buff *skb)
+{
+}
+
+static inline void ecnt_ip_cmsg_recv_inline_hook
+(struct msghdr *msg, struct sk_buff *skb, int flags)
+{
+ if ( (flags>>=1) == 0 )
+ return;
+ if ( flags & 1 )
+ ip_cmsg_recv_skbmark(msg, skb);
+
+ if ( (flags>>=1) == 0 )
+ return;
+ if ( flags & 1 )
+ ip_cmsg_recv_vlanid(msg, skb);
+
+ return;
+}
+
+static inline int ecnt_udp_send_skb_inline_hook
+(struct inet_sock *inet, struct sock *sk, struct sk_buff *skb)
+{
+ if ( !inet || !sk || !skb )
+ return ECNT_CONTINUE;
+
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_tcp_recvmsg_inline_hook
+(struct sock *sk, struct sk_buff *skb)
+{
+ if ( !sk || !skb )
+ return ECNT_CONTINUE;
+
+ ecnt_skbmark_to_sockmark_copy(sk, skb);
+
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_udp_recvmsg_inline_hook
+(struct sock *sk, struct sk_buff *skb)
+{
+ if ( !sk || !skb )
+ return ECNT_CONTINUE;
+
+ ecnt_skbmark_to_sockmark_copy(sk, skb);
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_tcp_v4_rcv_inline_hook
+(struct sock *sk, struct sk_buff *skb, struct net *net,
+struct iphdr *iph, struct tcphdr *th)
+{
+
+ if (!sk)
+ {
+ }
+
+ if(ra_sw_nat_local_in_tx)
+ ra_sw_nat_local_in_tx(skb,th->dest);
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_icmp_send_inline_hook
+(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+
+
+ return ECNT_CONTINUE;
+}
+
+
+static inline int ecnt_arp_xmit_inline_hook
+(struct sk_buff *skb)
+{
+
+ return ECNT_CONTINUE;
+}
+
+#if defined(TCSUPPORT_TSO_ENABLE)
+/* if match the rule, then set the tso mark. */
+static inline int ecnt_tcp_v4_send_check_inline_hook(struct sk_buff *skb,
+ __be32 saddr, __be32 daddr)
+{
+ tsoRuleEntry_v4_t entry;
+ struct tcphdr *th;
+
+ th = tcp_hdr(skb);
+ entry.dport = th->dest;
+ entry.sport = th->source;
+ entry.daddr_v4 = daddr;
+ entry.saddr_v4 = saddr;
+
+ if(TSO_SET_SKB_MARK_V4(&entry) == 0){
+ skb->tso_mark |= TSO_ENABLE_MARK;
+ }
+ else{
+ skb->tso_mark &= (~TSO_ENABLE_MARK);
+ }
+ return ECNT_CONTINUE;
+}
+#endif
+
+#endif
+
Index: linux-3.18.21/net/ipv4/esp4.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/esp4.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/esp4.c 2018-02-05 13:21:30.000000000 +0800
@@ -17,6 +17,8 @@
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/udp.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
+
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
@@ -136,6 +138,10 @@
int seqhilen;
__be32 *seqhi;
+ err = CRYPTO_API_DO_IPSEC_ESP_OUTPUT(ECNT_CRYPTO, x, skb);
+ if (err != ECNT_CONTINUE)
+ return err;
+
/* skb is pure payload to encrypt */
aead = x->data;
@@ -386,6 +392,12 @@
struct scatterlist *asg;
int err = -EINVAL;
+ err = CRYPTO_API_DO_IPSEC_ESP_INPUT_PT(ECNT_CRYPTO, x, skb);
+ if (err == ECNT_CONTINUE) /* no HW Crypto */
+ err = -EINVAL;
+ else
+ return err;
+
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out;
Index: linux-3.18.21/net/ipv4/icmp.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/icmp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/icmp.c 2018-02-05 13:21:30.000000000 +0800
@@ -96,6 +96,7 @@
#include <net/xfrm.h>
#include <net/inet_common.h>
#include <net/ip_fib.h>
+#include "ecnt_net_ipv4.h"
/*
* Build xmit assembly blocks
@@ -557,6 +558,10 @@
if (!rt)
goto out;
+
+ if ( ECNT_RETURN ==
+ ecnt_icmp_send_inline_hook(skb_in, type, code, info) )
+ goto out;
net = dev_net(rt->dst.dev);
/*
Index: linux-3.18.21/net/ipv4/igmp.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/igmp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/igmp.c 2018-02-05 13:21:30.000000000 +0800
@@ -105,6 +105,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
+#include <ecnt_hook/ecnt_hook_net.h>
#define IP_MAX_MEMBERSHIPS 20
#define IP_MAX_MSF 10
@@ -1168,6 +1169,7 @@
static void igmp_group_dropped(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
+ struct net_data_s net_data;
#ifdef CONFIG_IP_MULTICAST
int reporter;
#endif
@@ -1181,6 +1183,8 @@
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
+ ECNT_IGMP_HOOK(ECNT_NET_IGMP_GROUP_DROPPED,&net_data);
+
reporter = im->reporter;
igmp_stop_timer(im);
@@ -1203,6 +1207,7 @@
static void igmp_group_added(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
+ struct net_data_s net_data;
if (im->loaded == 0) {
im->loaded = 1;
@@ -1212,6 +1217,8 @@
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
+
+ ECNT_IGMP_HOOK(ECNT_NET_IGMP_GROUP_ADDED,&net_data);
if (in_dev->dead)
return;
@@ -2431,6 +2438,8 @@
return rv;
}
+EXPORT_SYMBOL(ip_check_mc_rcu);
+
#if defined(CONFIG_PROC_FS)
struct igmp_mc_iter_state {
struct seq_net_private p;
Index: linux-3.18.21/net/ipv4/ip_forward.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/ip_forward.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/ip_forward.c 2018-02-05 13:21:30.000000000 +0800
@@ -57,7 +57,7 @@
}
-static int ip_forward_finish(struct sk_buff *skb)
+__IMEM static int ip_forward_finish(struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
@@ -70,7 +70,7 @@
return dst_output(skb);
}
-int ip_forward(struct sk_buff *skb)
+__IMEM int ip_forward(struct sk_buff *skb)
{
u32 mtu;
struct iphdr *iph; /* Our header */
Index: linux-3.18.21/net/ipv4/ip_input.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/ip_input.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/ip_input.c 2018-02-05 13:21:30.000000000 +0800
@@ -252,7 +252,10 @@
if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
return 0;
}
-
+#if defined(TCSUPPORT_RA_HWNAT) && !defined(TCSUPPORT_MT7510_FE)
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip_local_deliver_finish);
}
@@ -309,7 +312,7 @@
int sysctl_ip_early_demux __read_mostly = 1;
EXPORT_SYMBOL(sysctl_ip_early_demux);
-static int ip_rcv_finish(struct sk_buff *skb)
+__IMEM static int ip_rcv_finish(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
@@ -373,7 +376,7 @@
/*
* Main IP Receive routine.
*/
-int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+__IMEM int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct iphdr *iph;
u32 len;
Index: linux-3.18.21/net/ipv4/ip_output.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/ip_output.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/ip_output.c 2018-02-05 13:21:30.000000000 +0800
@@ -80,6 +80,10 @@
#include <linux/netlink.h>
#include <linux/tcp.h>
+#include <ecnt_hook/ecnt_hook_net.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
+
+
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
@@ -104,6 +108,15 @@
int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
{
int err;
+ struct net_data_s net_data;
+ int ret = -1;
+
+ net_data.pskb = &skb;
+ ret = ECNT_IP_OUTPUT_HOOK(ECNT_NET_IP_LOCAL_OUT,&net_data);
+ if ( ECNT_RETURN == ret )
+ {
+ return ret;
+ }
err = __ip_local_out(skb);
if (likely(err == 1))
@@ -163,7 +176,7 @@
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
-static inline int ip_finish_output2(struct sk_buff *skb)
+__IMEM static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
@@ -253,7 +266,7 @@
return ret;
}
-static int ip_finish_output(struct sk_buff *skb)
+__IMEM static int ip_finish_output(struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
@@ -262,6 +275,21 @@
return dst_output(skb);
}
#endif
+
+/* kernel 2.6.36
+#ifdef TCSUPPORT_IPSEC_PASSTHROUGH
+ if(1 == skb->ipsec_pt_flag)
+ {
+ struct adapterlistpara_s ptr;
+ ptr.para = 0;
+ ptr.skb = skb;
+ ipsec_set_adatpterlist_para(&ptr);
+ skb->ipsec_pt_flag = 0;
+ }
+#endif
+*/
+ CRYPTO_API_ESP_ADATPDER_SET(ECNT_CRYPTO, skb);
+
if (skb_is_gso(skb))
return ip_finish_output_gso(skb);
@@ -331,7 +359,7 @@
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-int ip_output(struct sock *sk, struct sk_buff *skb)
+__IMEM int ip_output(struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev;
Index: linux-3.18.21/net/ipv4/ip_sockglue.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/ip_sockglue.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/ip_sockglue.c 2018-02-05 13:21:30.000000000 +0800
@@ -44,14 +44,7 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-
-#define IP_CMSG_PKTINFO 1
-#define IP_CMSG_TTL 2
-#define IP_CMSG_TOS 4
-#define IP_CMSG_RECVOPTS 8
-#define IP_CMSG_RETOPTS 16
-#define IP_CMSG_PASSSEC 32
-#define IP_CMSG_ORIGDSTADDR 64
+#include "ecnt_net_ipv4.h"
/*
* SOL_IP control messages.
@@ -183,6 +176,7 @@
if (flags & 1)
ip_cmsg_recv_dstaddr(msg, skb);
+ ecnt_ip_cmsg_recv_inline_hook(msg, skb, flags);
}
EXPORT_SYMBOL(ip_cmsg_recv);
@@ -1036,6 +1030,8 @@
err = -ENOPROTOOPT;
break;
}
+ ecnt_do_ip_setsockopt_inline_hook(sk, level, inet,
+ optname, optval, optlen, &err);
release_sock(sk);
return err;
@@ -1135,6 +1131,7 @@
struct inet_sock *inet = inet_sk(sk);
int val;
int len;
+ int ret = 0;
if (level != SOL_IP)
return -EOPNOTSUPP;
@@ -1148,6 +1145,13 @@
return -EINVAL;
lock_sock(sk);
+ ret = ecnt_do_ip_getsockopt_inline_hook(sk, level, inet,
+ optname, optval, optlen, flags);
+ if ( ECNT_CONTINUE != ret )
+ {
+ release_sock(sk);
+ return (ECNT_RETURN == ret ? 0 : -EINVAL);
+ }
switch (optname) {
case IP_OPTIONS:
@@ -1340,6 +1344,7 @@
release_sock(sk);
return -ENOPROTOOPT;
}
+
release_sock(sk);
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
Index: linux-3.18.21/net/ipv4/netfilter.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/netfilter.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/netfilter.c 2018-02-05 13:21:30.000000000 +0800
@@ -121,7 +121,7 @@
return 0;
}
-__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+__IMEM __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol)
{
const struct iphdr *iph = ip_hdr(skb);
Index: linux-3.18.21/net/ipv4/netfilter/Makefile
===================================================================
--- linux-3.18.21.orig/net/ipv4/netfilter/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/netfilter/Makefile 2018-02-05 13:21:30.000000000 +0800
@@ -3,7 +3,7 @@
#
# objects for l3 independent conntrack
-nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o ecnt_nf_conntrack_l3proto_ipv4.o
ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
ifeq ($(CONFIG_PROC_FS),y)
nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
@@ -69,3 +69,8 @@
# just filtering instance of ARP tables for now
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+ifeq ($(TCSUPPORT_PORT_TRIGGER),1)
+obj-y += ipt_TRIGGER.o
+endif
+
Index: linux-3.18.21/net/ipv4/netfilter/ecnt_nf_conntrack_l3proto_ipv4.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ipv4/netfilter/ecnt_nf_conntrack_l3proto_ipv4.c 2018-02-05 13:21:30.000000000 +0800
@@ -0,0 +1,141 @@
+/***************************************************************
+Copyright Statement:
+
+This software/firmware and related documentation (EcoNet Software)
+are protected under relevant copyright laws. The information contained herein
+is confidential and proprietary to EcoNet (HK) Limited (EcoNet) and/or
+its licensors. Without the prior written permission of EcoNet and/or its licensors,
+any reproduction, modification, use or disclosure of EcoNet Software, and
+information contained herein, in whole or in part, shall be strictly prohibited.
+
+EcoNet (HK) Limited EcoNet. ALL RIGHTS RESERVED.
+
+BY OPENING OR USING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY
+ACKNOWLEDGES AND AGREES THAT THE SOFTWARE/FIRMWARE AND ITS
+DOCUMENTATIONS (ECONET SOFTWARE) RECEIVED FROM ECONET
+AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON AN AS IS
+BASIS ONLY. ECONET EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+WHETHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+OR NON-INFRINGEMENT. NOR DOES ECONET PROVIDE ANY WARRANTY
+WHATSOEVER WITH RESPECT TO THE SOFTWARE OF ANY THIRD PARTIES WHICH
+MAY BE USED BY, INCORPORATED IN, OR SUPPLIED WITH THE ECONET SOFTWARE.
+RECEIVER AGREES TO LOOK ONLY TO SUCH THIRD PARTIES FOR ANY AND ALL
+WARRANTY CLAIMS RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+THAT IT IS RECEIVERS SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD
+PARTY ALL PROPER LICENSES CONTAINED IN ECONET SOFTWARE.
+
+ECONET SHALL NOT BE RESPONSIBLE FOR ANY ECONET SOFTWARE RELEASES
+MADE TO RECEIVERS SPECIFICATION OR CONFORMING TO A PARTICULAR
+STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND
+ECONET'S ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE ECONET
+SOFTWARE RELEASED HEREUNDER SHALL BE, AT ECONET'S SOLE OPTION, TO
+REVISE OR REPLACE THE ECONET SOFTWARE AT ISSUE OR REFUND ANY SOFTWARE
+LICENSE FEES OR SERVICE CHARGES PAID BY RECEIVER TO ECONET FOR SUCH
+ECONET SOFTWARE.
+***************************************************************/
+
+/************************************************************************
+* I N C L U D E S
+*************************************************************************
+*/
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+#include <net/netfilter/nf_log.h>
+
+#include <linux/foe_hook.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <ecnt_hook/ecnt_hook_net.h>
+
+/************************************************************************
+* D E F I N E S & C O N S T A N T S
+*************************************************************************
+*/
+
+/************************************************************************
+* M A C R O S
+*************************************************************************
+*/
+
+/************************************************************************
+* D A T A T Y P E S
+*************************************************************************
+*/
+
+/************************************************************************
+* E X T E R N A L D A T A D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E C L A R A T I O N S
+*************************************************************************
+*/
+
+/************************************************************************
+* P U B L I C D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* P R I V A T E D A T A
+*************************************************************************
+*/
+
+/************************************************************************
+* F U N C T I O N D E F I N I T I O N S
+*************************************************************************
+*/
+#ifdef TCSUPPORT_DS_HWNAT_OFFLOAD
+int ecnt_nf_conntrack_seq_adjust(struct sk_buff* skb)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if(!ct)
+ {
+ return -1;
+ }
+
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status))
+ {
+ typeof(nf_nat_seq_adjust_hook) seq_adjust;
+
+ seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
+ if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
+ {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ecnt_nf_conntrack_seq_adjust);
+
+int ecnt_nf_conntrack_in(struct sk_buff* skb,struct net_device* in_dev)
+{
+ return nf_conntrack_in(dev_net(in_dev), PF_INET, NF_INET_PRE_ROUTING, skb);
+}
+EXPORT_SYMBOL(ecnt_nf_conntrack_in);
+
+#endif /* TCSUPPORT_DS_HWNAT_OFFLOAD */
+
Index: linux-3.18.21/net/ipv4/netfilter/ipt_TRIGGER.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ipv4/netfilter/ipt_TRIGGER.c 2018-02-05 13:21:30.000000000 +0800
@@ -0,0 +1,424 @@
+/* Kernel module to match the port-ranges, trigger related port-ranges,
+ * and alters the destination to a local IP address.
+ *
+ * Copyright (C) 2003, CyberTAN Corporation
+ * All Rights Reserved.
+ *
+ * Description:
+ * This is kernel module for port-triggering.
+ *
+ * The module follows the Netfilter framework, called extended packet
+ * matching modules.
+ */
+
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/list.h>
+#include <net/protocol.h>
+#include <net/checksum.h>
+#include <linux/spinlock.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_nat.h>
+#include <uapi/linux/netfilter_ipv4/ipt_TRIGGER.h>
+
+/* This rwlock protects the main hash table, protocol/helper/expected
+ * registrations, conntrack timers*/
+
+
+static DEFINE_SPINLOCK(nf_trigger_lock);
+
+
+
+#define NF_IP_PRE_ROUTING 0
+#define NF_IP_FORWARD 2
+#define IPT_CONTINUE XT_CONTINUE
+
+
+
+/***********************lock help**********************/
+#define MUST_BE_READ_LOCKED(l)
+#define MUST_BE_WRITE_LOCKED(l)
+
+
+#define LOCK_BH(l) spin_lock_bh(l)
+#define UNLOCK_BH(l) spin_unlock_bh(l)
+
+#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&nf_trigger_lock)
+#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&nf_trigger_lock)
+
+
+
+
+/***********************list help**********************/
+#define LIST_FIND(head, cmpfn, type, args...) \
+({ \
+ const struct list_head *__i, *__j = NULL; \
+ \
+ ASSERT_READ_LOCK(head); \
+ list_for_each(__i, (head)) \
+ if (cmpfn((const type)__i , ## args)) { \
+ __j = __i; \
+ break; \
+ } \
+ (type)__j; \
+})
+
+static inline int
+__list_cmp_same(const void *p1, const void *p2) { return p1 == p2; }
+
+static inline void
+list_prepend(struct list_head *head, void *new)
+{
+ ASSERT_WRITE_LOCK(head);
+ list_add(new, head);
+}
+
+#define list_named_find(head, name) \
+LIST_FIND(head, __list_cmp_name, void *, name)
+
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+struct ipt_trigger {
+ struct list_head list; /* Trigger list */
+ struct timer_list timeout; /* Timer for list destroying */
+ u_int32_t srcip; /* Outgoing source address */
+ u_int32_t dstip; /* Outgoing destination address */
+ u_int16_t mproto; /* Trigger protocol */
+ u_int16_t rproto; /* Related protocol */
+ struct ipt_trigger_ports ports; /* Trigger and related ports */
+ u_int8_t reply; /* Confirm a reply connection */
+};
+
+LIST_HEAD(trigger_list);
+//DECLARE_LOCK(ip_trigger_lock);
+
+static void trigger_refresh(struct ipt_trigger *trig, unsigned long extra_jiffies)
+{
+ DEBUGP("%s: \n", __FUNCTION__);
+ NF_CT_ASSERT(trig);
+ /*King(20120808): replace WRITE_LOCK with LOCK_BH, because nf_conntrack_lock is defined as spin lock in Linux
+ kernel-2.6.36 but rw lock in 2.6.22.15*/
+ LOCK_BH(&nf_trigger_lock);
+ /* Need del_timer for race avoidance (may already be dying). */
+ if (del_timer(&trig->timeout)) {
+ trig->timeout.expires = jiffies + extra_jiffies;
+ add_timer(&trig->timeout);
+ }
+
+ UNLOCK_BH(&nf_trigger_lock);
+}
+
+static void __del_trigger(struct ipt_trigger *trig)
+{
+ DEBUGP("%s: \n", __FUNCTION__);
+ NF_CT_ASSERT(trig);
+ MUST_BE_WRITE_LOCKED(&nf_trigger_lock);
+
+ /* delete from 'trigger_list' */
+ list_del(&trig->list);
+ kfree(trig);
+}
+
+static void trigger_timeout(unsigned long ul_trig)
+{
+ struct ipt_trigger *trig= (void *) ul_trig;
+
+ DEBUGP("trigger list %p timed out\n", trig);
+ /*King(20120808): replace WRITE_LOCK with LOCK_BH, because nf_conntrack_lock is defined as spin lock in Linux
+ kernel-2.6.36 but rw lock in 2.6.22.15*/
+ LOCK_BH(&nf_trigger_lock);
+ __del_trigger(trig);
+ UNLOCK_BH(&nf_trigger_lock);
+}
+
+static unsigned int
+add_new_trigger(struct ipt_trigger *trig)
+{
+ struct ipt_trigger *new = NULL;
+
+ DEBUGP("!!!!!!!!!!!! %s !!!!!!!!!!!\n", __FUNCTION__);
+ /*King(20120808): replace WRITE_LOCK with LOCK_BH, because nf_conntrack_lock is defined as spin lock in Linux
+ kernel-2.6.36 but rw lock in 2.6.22.15*/
+
+ LOCK_BH(&nf_trigger_lock);
+ new = (struct ipt_trigger *)
+ kmalloc(sizeof(struct ipt_trigger), GFP_ATOMIC);
+
+ if (!new) {
+ UNLOCK_BH(&nf_trigger_lock);
+ DEBUGP("%s: OOM allocating trigger list\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ memset(new, 0, sizeof(*trig));
+ INIT_LIST_HEAD(&new->list);
+ memcpy(new, trig, sizeof(*trig));
+
+ /* add to global table of trigger */
+ list_prepend(&trigger_list, &new->list);
+
+ /* add and start timer if required */
+ init_timer(&new->timeout);
+
+ new->timeout.data = (unsigned long)new;
+ new->timeout.function = trigger_timeout;
+ new->timeout.expires = jiffies + (TRIGGER_TIMEOUT * HZ);
+ add_timer(&new->timeout);
+
+ UNLOCK_BH(&nf_trigger_lock);
+
+ return 0;
+}
+
+/*
+ * Service-Name OutBound InBound
+ * 1. TMD UDP:1000 TCP/UDP:2000..2010
+ * 2. WOKAO UDP:1000 TCP/UDP:3000..3010
+ * 3. net2phone-1 UDP:6801 TCP:30000..30000
+ * 4. net2phone-2 UDP:6801 UDP:30000..30000
+ *
+ * For supporting to use the same outgoing port to trigger different port rules,
+ * it should check the inbound protocol and port range value. If all conditions
+ * are matched, it is a same trigger item, else it needs to create a new one.
+ */
+static inline int trigger_out_matched(const struct ipt_trigger *i,
+ const u_int16_t proto, const u_int16_t dport, const struct ipt_trigger_info *info)
+{
+ DEBUGP("%s: i=%p, proto= %d, dport=%d.\n", __FUNCTION__, i, proto, dport);
+ DEBUGP("%s: Got one, mproto= %d, mport[0..1]=%d, %d, ", __FUNCTION__,
+ i->mproto, i->ports.mport[0], i->ports.mport[1]);
+ DEBUGP("rproto= %d, rport[0..1]=%d, %d.\n",
+ i->rproto, i->ports.rport[0], i->ports.rport[1]);
+
+ return ((i->mproto == proto) &&
+ (i->ports.mport[0] <= dport) &&
+ (i->ports.mport[1] >= dport) &&
+ (i->rproto == info->proto) &&
+ (i->ports.rport[0] == info->ports.rport[0]) &&
+ (i->ports.rport[1] == info->ports.rport[1]));
+}
+
+static unsigned int
+trigger_out(struct sk_buff *skb,
+ unsigned int hooknum,
+ const void *targinfo)
+{
+ const struct ipt_trigger_info *info = targinfo;
+ struct ipt_trigger trig, *found = NULL;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *tcph = (void *)iph + iph->ihl*4; /* Might be TCP, UDP */
+
+ DEBUGP("############# %s ############\n", __FUNCTION__);
+ /* Check if the trigger range has already existed in 'trigger_list'. */
+ found = LIST_FIND(&trigger_list, trigger_out_matched,
+ struct ipt_trigger *, iph->protocol, ntohs(tcph->dest), info);
+
+
+ if (found) {
+ /* Yeah, it exists. We need to update(delay) the destroying timer. */
+ trigger_refresh(found, TRIGGER_TIMEOUT * HZ);
+ /* In order to allow multiple hosts use the same port range, we update
+ the 'saddr' after previous trigger has a reply connection. */
+ if (found->reply)
+ found->srcip = iph->saddr;
+ }
+ else {
+ /* Create new trigger */
+ memset(&trig, 0, sizeof(trig));
+ trig.srcip = iph->saddr;
+ trig.mproto = iph->protocol;
+ trig.rproto = info->proto;
+ memcpy(&trig.ports, &info->ports, sizeof(struct ipt_trigger_ports));
+ add_new_trigger(&trig); /* Add the new 'trig' to list 'trigger_list'. */
+ }
+
+ return IPT_CONTINUE; /* We don't block any packet. */
+}
+
+static inline int trigger_in_matched(const struct ipt_trigger *i,
+ const u_int16_t proto, const u_int16_t dport)
+{
+ DEBUGP("%s: i=%p, proto= %d, dport=%d.\n", __FUNCTION__, i, proto, dport);
+ DEBUGP("%s: Got one, rproto= %d, rport[0..1]=%d, %d.\n", __FUNCTION__,
+ i->rproto, i->ports.rport[0], i->ports.rport[1]);
+ u_int16_t rproto = i->rproto;
+
+ if (!rproto)
+ rproto = proto;
+
+ return ((rproto == proto) && (i->ports.rport[0] <= dport)
+ && (i->ports.rport[1] >= dport));
+}
+
+static unsigned int
+trigger_in(struct sk_buff *skb,
+ unsigned int hooknum,
+ const void *targinfo)
+{
+ struct ipt_trigger *found = NULL;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *tcph = (void *)iph + iph->ihl*4; /* Might be TCP, UDP */
+ /* Check if the trigger-ed range has already existed in 'trigger_list'. */
+ found = LIST_FIND(&trigger_list, trigger_in_matched,
+ struct ipt_trigger *, iph->protocol, ntohs(tcph->dest));
+ if (found) {
+ DEBUGP("############# %s ############\n", __FUNCTION__);
+ /* Yeah, it exists. We need to update(delay) the destroying timer. */
+ trigger_refresh(found, TRIGGER_TIMEOUT * HZ);
+ return NF_ACCEPT; /* Accept it, or the imcoming packet could be
+ dropped in the FORWARD chain */
+ }
+
+ return IPT_CONTINUE; /* Our job is the interception. */
+}
+
+static unsigned int
+trigger_dnat(struct sk_buff *skb,
+ unsigned int hooknum,
+ const void *targinfo)
+{
+ struct ipt_trigger *found = NULL;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *tcph = (void *)iph + iph->ihl*4; /* Might be TCP, UDP */
+ struct nf_conn *ct = NULL;
+ enum ip_conntrack_info ctinfo;
+ //struct nf_nat_multi_range_compat newrange;
+
+ NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING);
+ /* Check if the trigger-ed range has already existed in 'trigger_list'. */
+ found = LIST_FIND(&trigger_list, trigger_in_matched,
+ struct ipt_trigger *, iph->protocol, ntohs(tcph->dest));
+
+ if (!found || !found->srcip)
+ return IPT_CONTINUE; /* We don't block any packet. */
+
+ DEBUGP("############# %s ############\n", __FUNCTION__);
+ found->reply = 1; /* Confirm there has been a reply connection. */
+ ct = nf_ct_get(skb, &ctinfo);
+ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW));
+
+ DEBUGP("%s: got ", __FUNCTION__);
+
+
+ /* Alter the destination of imcoming packet. */
+ struct nf_nat_range newrange;
+
+ /* Transfer from original range. */
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+ memset(&newrange.min_proto, 0, sizeof(newrange.min_proto));
+ memset(&newrange.max_proto, 0, sizeof(newrange.max_proto));
+ newrange.flags = NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.ip = found->srcip;
+ newrange.max_addr.ip = found->srcip;
+ DEBUGP("%s: found->srcip = %x\n", __FUNCTION__, found->srcip);
+
+ /* Hand modified range to generic setup. */
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
+static unsigned int
+trigger_target(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct ipt_trigger_info *info = par->targinfo;
+ const struct iphdr *iph = ip_hdr(skb);
+ unsigned int hooknum = par->hooknum;
+
+ DEBUGP("%s: type = %s\n", __FUNCTION__,
+ (info->type == IPT_TRIGGER_DNAT) ? "dnat" :
+ (info->type == IPT_TRIGGER_IN) ? "in" : "out");
+
+ /* The Port-trigger only supports TCP and UDP. */
+ if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP))
+ return IPT_CONTINUE;
+
+ if (info->type == IPT_TRIGGER_OUT)
+ return trigger_out(skb, hooknum, info);
+ else if (info->type == IPT_TRIGGER_IN)
+ return trigger_in(skb, hooknum, info);
+ else if (info->type == IPT_TRIGGER_DNAT)
+ return trigger_dnat(skb, hooknum, info);
+
+ return IPT_CONTINUE;
+}
+static bool
+trigger_check(const struct xt_mtchk_param *par)
+{
+ const struct ipt_trigger_info *info = par->matchinfo;
+ struct list_head *cur_item = NULL, *tmp_item = NULL;
+ unsigned int hook_mask = par->hook_mask;
+ char *tablename = par->table;
+
+ if ((strcmp(tablename, "mangle") == 0)) {
+ DEBUGP("trigger_check: bad table `%s'.\n", tablename);
+ return -EINVAL;
+ }
+ if (hook_mask & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_FORWARD))) {
+ DEBUGP("trigger_check: bad hooks %x.\n", hook_mask);
+ return -EINVAL;
+ }
+
+ if (info->proto) {
+ if (info->proto != IPPROTO_TCP && info->proto != IPPROTO_UDP) {
+ DEBUGP("trigger_check: bad proto %d.\n", info->proto);
+ return -EINVAL;
+ }
+ }
+ if (info->type == IPT_TRIGGER_OUT) {
+ if (!info->ports.mport[0] || !info->ports.rport[0]) {
+ DEBUGP("trigger_check: Try 'iptbles -j TRIGGER -h' for help.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Empty the 'trigger_list' */
+ list_for_each_safe(cur_item, tmp_item, &trigger_list) {
+ struct ipt_trigger *trig = (void *)cur_item;
+
+ DEBUGP("%s: list_for_each_safe(): %p.\n", __FUNCTION__, trig);
+ del_timer(&trig->timeout);
+ __del_trigger(trig);
+ }
+ return 0;
+}
+
+
+static struct xt_target redirect_reg = {
+ .name = "TRIGGER",
+ .family = NFPROTO_IPV4,
+ .target = trigger_target,
+ .targetsize = sizeof(struct ipt_trigger_info),
+ .checkentry = trigger_check,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ return xt_register_target(&redirect_reg);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&redirect_reg);
+}
+
+module_init(init);
+module_exit(fini);
Index: linux-3.18.21/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c 2018-02-05 13:21:30.000000000 +0800
@@ -147,7 +147,7 @@
return nf_conntrack_confirm(skb);
}
-static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
+__IMEM static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
Index: linux-3.18.21/net/ipv4/netfilter/nf_defrag_ipv4.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/netfilter/nf_defrag_ipv4.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/netfilter/nf_defrag_ipv4.c 2018-02-05 13:21:30.000000000 +0800
@@ -61,7 +61,7 @@
return IP_DEFRAG_CONNTRACK_OUT + zone;
}
-static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
+__IMEM static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
Index: linux-3.18.21/net/ipv4/tcp.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/tcp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/tcp.c 2018-02-05 13:21:30.000000000 +0800
@@ -279,6 +279,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include "ecnt_net_ipv4.h"
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -1809,6 +1810,7 @@
continue;
found_ok_skb:
+ ecnt_tcp_recvmsg_inline_hook(sk, skb);
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
Index: linux-3.18.21/net/ipv4/tcp_ipv4.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/tcp_ipv4.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/tcp_ipv4.c 2018-02-05 13:21:30.000000000 +0800
@@ -84,6 +84,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include "ecnt_net_ipv4.h"
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
@@ -557,6 +558,10 @@
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
+
+#if defined(TCSUPPORT_TSO_ENABLE)
+ ecnt_tcp_v4_send_check_inline_hook(skb, inet->inet_saddr, inet->inet_daddr);
+#endif
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
@@ -1635,6 +1640,8 @@
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+ if ( ECNT_RETURN_DROP == ecnt_tcp_v4_rcv_inline_hook(sk, skb, net, iph, th) )
+ goto discard_it;
if (!sk)
goto no_tcp_socket;
Index: linux-3.18.21/net/ipv4/udp.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/udp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/udp.c 2018-02-05 13:21:30.000000000 +0800
@@ -114,6 +114,9 @@
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
+#include "ecnt_net_ipv4.h"
+
+#include <ecnt_hook/ecnt_hook_net.h>
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
@@ -824,6 +827,7 @@
uh->check = CSUM_MANGLED_0;
send:
+ ecnt_udp_send_skb_inline_hook(inet, sk, skb);
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
@@ -1262,6 +1266,7 @@
&peeked, &off, &err);
if (!skb)
goto out;
+ ecnt_udp_recvmsg_inline_hook(sk, skb);
ulen = skb->len - sizeof(struct udphdr);
copied = len;
@@ -1993,8 +1998,14 @@
int udp_rcv(struct sk_buff *skb)
{
+ struct net_data_s net_data;
+
+ net_data.pskb = &skb;
+ ECNT_UDP_RCV_HOOK(ECNT_NET_UDP_RCV,&net_data);
+
return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
+EXPORT_SYMBOL(udp_rcv);
void udp_destroy_sock(struct sock *sk)
{
Index: linux-3.18.21/net/ipv4/xfrm4_mode_transport.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/xfrm4_mode_transport.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/xfrm4_mode_transport.c 2018-02-05 13:21:30.000000000 +0800
@@ -5,6 +5,8 @@
*/
#include <linux/init.h>
+#include <linux/crypto.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -22,8 +24,11 @@
{
struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4;
+ int header_len = x->props.header_len;
+
+ CRYPTO_API_GET_XFRM_HEADER_LEN(ECNT_CRYPTO, x, &header_len);
+ skb_set_network_header(skb, -header_len);
- skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + ihl;
Index: linux-3.18.21/net/ipv4/xfrm4_mode_tunnel.c
===================================================================
--- linux-3.18.21.orig/net/ipv4/xfrm4_mode_tunnel.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv4/xfrm4_mode_tunnel.c 2018-02-05 13:21:30.000000000 +0800
@@ -6,6 +6,8 @@
#include <linux/gfp.h>
#include <linux/init.h>
+#include <linux/crypto.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -32,8 +34,11 @@
struct dst_entry *dst = skb_dst(skb);
struct iphdr *top_iph;
int flags;
+ int header_len = x->props.header_len;
+
+ CRYPTO_API_GET_XFRM_HEADER_LEN(ECNT_CRYPTO, x, &header_len);
+ skb_set_network_header(skb, -header_len);
- skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + sizeof(*top_iph);
Index: linux-3.18.21/net/ipv6/addrconf.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/addrconf.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/addrconf.c 2018-02-05 14:20:20.000000000 +0800
@@ -90,6 +90,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
+#include "ecnt_net_ipv6.h"
/* Set to 3 to get tracing... */
#define ACONF_DEBUG 2
@@ -172,7 +173,11 @@
.mtu6 = IPV6_MIN_MTU,
.accept_ra = 1,
.accept_redirects = 1,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .autoconf = 0,
+#else
.autoconf = 1,
+#endif
.force_mld_version = 0,
.mldv1_unsolicited_report_interval = 10 * HZ,
.mldv2_unsolicited_report_interval = HZ,
@@ -186,7 +191,11 @@
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .accept_ra_defrtr = 0,
+#else
.accept_ra_defrtr = 1,
+#endif
.accept_ra_from_local = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -201,6 +210,9 @@
.disable_ipv6 = 0,
.accept_dad = 1,
.suppress_frag_ndisc = 1,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .slaac_addr ="",
+#endif
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -209,7 +221,11 @@
.mtu6 = IPV6_MIN_MTU,
.accept_ra = 1,
.accept_redirects = 1,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .autoconf = 0,
+#else
.autoconf = 1,
+#endif
.force_mld_version = 0,
.mldv1_unsolicited_report_interval = 10 * HZ,
.mldv2_unsolicited_report_interval = HZ,
@@ -223,7 +239,11 @@
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .accept_ra_defrtr = 0,
+#else
.accept_ra_defrtr = 1,
+#endif
.accept_ra_from_local = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -238,6 +258,9 @@
.disable_ipv6 = 0,
.accept_dad = 1,
.suppress_frag_ndisc = 1,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .slaac_addr ="",
+#endif
};
/* Check if a valid qdisc is available */
@@ -2351,6 +2374,8 @@
in6_ifa_put(ifp);
addrconf_verify();
}
+
+ ecnt_addrconf_prefix_rcv_inline_hook(in6_dev,pinfo,addr);
}
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
in6_dev_put(in6_dev);
@@ -2773,6 +2798,8 @@
if (IS_ERR(idev))
return;
+ if (ecnt_addrconf_dev_config_if_inline_hook(dev,idev) == 0)
+ return;
addrconf_addr_gen(idev, false);
}
@@ -3127,7 +3154,7 @@
if (idev->dead || !(idev->if_flags & IF_READY))
goto out;
- if (!ipv6_accept_ra(idev))
+ if (ecnt_addrconf_rs_timer_if_inline_hook(idev))
goto out;
/* Announcement received after solicitation was sent */
@@ -3362,8 +3389,8 @@
read_lock_bh(&ifp->idev->lock);
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
- send_rs = send_mld &&
- ipv6_accept_ra(ifp->idev) &&
+
+ send_rs = send_mld && (ecnt_addrconf_dad_complete_if_inline_hook(ifp)) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0;
read_unlock_bh(&ifp->idev->lock);
@@ -5224,6 +5251,15 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ {
+ .procname = "slaac_addr",
+ .data = &ipv6_devconf.slaac_addr,
+ .maxlen = 64,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ },
+#endif
{
/* sentinel */
}
Index: linux-3.18.21/net/ipv6/af_inet6.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/af_inet6.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/af_inet6.c 2018-02-05 13:21:30.000000000 +0800
@@ -76,7 +76,11 @@
struct ipv6_params ipv6_defaults = {
.disable_ipv6 = 0,
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .autoconf = 0,
+#else
.autoconf = 1,
+#endif
};
static int disable_ipv6_mod;
Index: linux-3.18.21/net/ipv6/ecnt_net_ipv6.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ipv6/ecnt_net_ipv6.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,284 @@
+#ifndef _LINUX_ECNT_NET_IPV4_H
+#define _LINUX_ECNT_NET_IPV4_H
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/in6.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include "../ecnt_net.h"
+#include <linux/ecnt_vlan_bind.h>
+#include <net/tcp.h>
+#include <net/addrconf.h>
+
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr);
+static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
+#endif
+
+
+#define IP_CMSG_SKB_MARK 128
+
+static inline int ecnt_udp6_lib_rcv_inline_hook
+( struct sk_buff *skb,struct udphdr *uh, int proto)
+{
+
+ return ECNT_CONTINUE;
+}
+
+static inline void ecnt_udpv6_recvmsg_inline_hook
+(struct msghdr *msg, struct sk_buff *skb, struct inet_sock *inet, int is_udp4)
+{
+ if ( !is_udp4 && skb->protocol == htons(ETH_P_IPV6) )
+ {
+ if ( inet->cmsg_flags & IP_CMSG_SKB_MARK )
+ {
+ ip_cmsg_recv(msg, skb);
+ }
+ }
+
+ return;
+}
+
+static inline void ecnt_udpv6_sendmsg_inline_hook
+(struct flowi6 *p_fl6, struct sock *sk)
+{
+
+ if ( !p_fl6 )
+ return;
+
+
+ return;
+}
+
+static inline int ecnt_do_ipv6_setsockopt_inline_hook
+(struct sock *sk, int level, struct net *net, int usrval,
+int optname, char __user *optval, unsigned int optlen, int *err)
+{
+ int val = usrval;
+
+ if ( optlen < sizeof(int) )
+ {
+ if (optlen >= sizeof(__u16))
+ {
+ __u16 u16val;
+ if (get_user(u16val, (__u16 __user *) optval))
+ return ECNT_HOOK_ERROR;
+ val = (int) u16val;
+ }
+ else if ( optlen >= sizeof(char) )
+ {
+ unsigned char ucval;
+
+ if ( get_user(ucval, (unsigned char __user *) optval) )
+ {
+ *err = -EFAULT;
+ return ECNT_HOOK_ERROR;
+ }
+ val = (int) ucval;
+ }
+ }
+
+ switch ( optname )
+ {
+ case IPV6_SKB_MARK:
+ *err = -EINVAL;
+ if ( optlen < sizeof(int) )
+ break;
+ sk->sk_mark = val;
+ *err = 0;
+ break;
+ case IPV6_SKB_VLAN_ID:
+ *err = -EINVAL;
+ if ( optlen < sizeof(__be16) )
+ break;
+ sk->lVlanId = val;
+ *err = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_do_ipv6_getsockopt_inline_hook
+(struct sock *sk, int level, int optname, int *val,
+char __user *optval, int __user *optlen, unsigned int flags)
+{
+ int len = 0;
+
+ switch ( optname )
+ {
+ case IPV6_SKB_VLAN_ID:
+ *val = sk->lVlanId;;
+ break;
+ default:
+ return ECNT_CONTINUE;
+ }
+
+ return ECNT_RETURN;
+}
+
+static inline int ecnt_udp_v6_push_pending_frames_inline_hook
+(struct inet_sock *inet, struct sock *sk, struct sk_buff *skb)
+{
+ if ( !inet || !sk || !skb )
+ return ECNT_CONTINUE;
+
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_tcp_v6_rcv_inline_hook
+(struct sock *sk, struct sk_buff *skb,
+struct net *net, struct ipv6hdr *hdr, struct tcphdr *th)
+{
+ if ( !sk )
+ {
+ }
+ else
+ {
+ }
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_icmpv6_send_inline_hook
+(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_ndisc_send_skb_inline_hook
+(struct sk_buff *skb)
+{
+
+ return ECNT_CONTINUE;
+}
+
+#endif
+
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+static inline int is_wan_dev(struct net_device *dev)
+{
+ return (strstr(dev->name, "nas") != NULL
+#if defined(TCSUPPORT_FH_ENV) || defined(TCSUPPORT_IS_FH_PON)
+ || strstr(dev->name, "pon0.") != NULL
+#endif
+ || strstr(dev->name, "ppp") != NULL);
+}
+#endif
+
+
+static inline int ecnt_ndisc_router_discovery_if_inline_hook(struct inet6_dev *in6_dev)
+{
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ /*Enable WAN interface to receive RA for SLAAC mode*/
+ if (!is_wan_dev(in6_dev->dev) || !in6_dev->cnf.accept_ra)
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#else
+ if (!ipv6_accept_ra(in6_dev))
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#endif
+
+}
+
+
+static inline int ecnt_ndisc_router_discovery_inline_hook
+(struct sk_buff *skb,struct inet6_dev *in6_dev,struct ndisc_options ndopts, struct neighbour *neigh)
+{
+
+
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ if(neigh){
+ /*Add for outputing default gateway by RA*/
+ sprintf(neigh->parms->dlf_route, NIP6_FMT, NIP6(ipv6_hdr(skb)->saddr));
+ }
+
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_addrconf_prefix_rcv_inline_hook
+(struct inet6_dev *in6_dev,struct prefix_info *pinfo,struct in6_addr addr)
+{
+
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ /*Add for outputing slaac address by RA*/
+ sprintf(in6_dev->cnf.slaac_addr, NIP6_FMT" %d", NIP6(addr), pinfo->prefix_len);
+#endif
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_addrconf_dad_complete_if_inline_hook(struct inet6_ifaddr *ifp)
+{
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ /*Enable WAN interface to send RS for SLAAC mode*/
+ if(is_wan_dev(ifp->idev->dev) )
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#else
+ if(ipv6_accept_ra(ifp->idev))
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#endif
+
+}
+static inline int ecnt_addrconf_rs_timer_if_inline_hook(struct inet6_dev *idev)
+{
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ /*Enable WAN interface to send RS for SLAAC mode*/
+ if(!is_wan_dev(idev->dev))
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#else
+ if (!ipv6_accept_ra(idev))
+ return ECNT_CONTINUE;
+ else
+ return ECNT_RETURN_DROP;
+#endif
+
+}
+
+static inline int ecnt_addrconf_dev_config_if_inline_hook(struct net_device *dev, struct inet6_dev *idev)
+{
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ struct in6_addr addr;
+ //Disable lan device add linklocal address,except br0
+ if(is_wan_dev(dev) || (strstr(dev->name, "br") != NULL)){
+ memset(&addr, 0, sizeof(struct in6_addr));
+ addr.s6_addr32[0] = htonl(0xFE800000);
+ //set br0 local link address as fe80::1
+ if(strstr(dev->name, "br") != NULL){
+ addr.s6_addr[8] = 0;
+ addr.s6_addr[9] = 0;
+ addr.s6_addr[10] = 0;
+ addr.s6_addr[11] = 0;
+ addr.s6_addr[12] = 0;
+ addr.s6_addr[13] = 0;
+ addr.s6_addr[14] = 0;
+ addr.s6_addr[15] = 1;
+ addrconf_add_linklocal(idev, &addr);
+ }
+ else{
+ if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
+ addrconf_add_linklocal(idev, &addr);
+ }
+ }
+ return ECNT_RETURN_DROP;
+#else
+ return ECNT_CONTINUE;
+#endif
+
+}
+
+
+
Index: linux-3.18.21/net/ipv6/ip6_icmp.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/ip6_icmp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/ip6_icmp.c 2018-02-05 13:21:31.000000000 +0800
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <net/ipv6.h>
+#include "ecnt_net_ipv6.h"
#if IS_ENABLED(CONFIG_IPV6)
@@ -35,6 +36,9 @@
ip6_icmp_send_t *send;
rcu_read_lock();
+ if ( ECNT_RETURN ==
+ ecnt_icmpv6_send_inline_hook(skb, type, code, info) )
+ goto out;
send = rcu_dereference(ip6_icmp_send);
if (!send)
Index: linux-3.18.21/net/ipv6/ip6_input.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/ip6_input.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/ip6_input.c 2018-02-05 13:21:31.000000000 +0800
@@ -276,6 +276,10 @@
int ip6_input(struct sk_buff *skb)
{
+#if defined(TCSUPPORT_RA_HWNAT) && !defined(TCSUPPORT_MT7510_FE)
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#endif
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
ip6_input_finish);
}
Index: linux-3.18.21/net/ipv6/ipv6_sockglue.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/ipv6_sockglue.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/ipv6_sockglue.c 2018-02-05 13:21:31.000000000 +0800
@@ -54,6 +54,7 @@
#include <net/compat.h>
#include <asm/uaccess.h>
+#include "ecnt_net_ipv6.h"
struct ip6_ra_chain *ip6_ra_chain;
DEFINE_RWLOCK(ip6_ra_lock);
@@ -839,6 +840,8 @@
retv = 0;
break;
}
+ ecnt_do_ipv6_setsockopt_inline_hook(sk, level, net, val,
+ optname, optval, optlen, &retv);
release_sock(sk);
@@ -957,6 +960,13 @@
if (get_user(len, optlen))
return -EFAULT;
+
+ if ( ECNT_RETURN == ecnt_do_ipv6_getsockopt_inline_hook(sk, level, &val,
+ optname, optval, optlen, flags) )
+ {
+ goto GETSOCKOPT_END;
+ }
+
switch (optname) {
case IPV6_ADDRFORM:
if (sk->sk_protocol != IPPROTO_UDP &&
@@ -1281,6 +1291,8 @@
default:
return -ENOPROTOOPT;
}
+
+GETSOCKOPT_END:
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
Index: linux-3.18.21/net/ipv6/mcast.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/mcast.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/mcast.c 2018-02-05 14:20:20.000000000 +0800
@@ -994,6 +994,10 @@
return rv;
}
+#if defined(TCSUPPORT_XPON_IGMP)
+EXPORT_SYMBOL(ipv6_chk_mcast_addr);
+#endif
+
static void mld_gq_start_timer(struct inet6_dev *idev)
{
unsigned long tv = prandom_u32() % idev->mc_maxdelay;
Index: linux-3.18.21/net/ipv6/ndisc.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/ndisc.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/ndisc.c 2018-02-05 13:21:31.000000000 +0800
@@ -71,6 +71,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
+#include "ecnt_net_ipv6.h"
/* Set to 3 to get tracing... */
#define ND_DEBUG 1
@@ -138,6 +139,9 @@
[NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ,
[NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10,
},
+#ifdef TCSUPPORT_IPV6_ENHANCEMENT
+ .dlf_route = "",
+#endif
},
.gc_interval = 30 * HZ,
.gc_thresh1 = 128,
@@ -453,7 +457,7 @@
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
-
+ ecnt_ndisc_send_skb_inline_hook(skb);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
if (!err) {
@@ -1104,7 +1108,8 @@
return;
}
- if (!ipv6_accept_ra(in6_dev)) {
+ if (ecnt_ndisc_router_discovery_if_inline_hook(in6_dev))
+ {
ND_PRINTK(2, info,
"RA: %s, did not accept ra for dev: %s\n",
__func__, skb->dev->name);
@@ -1292,7 +1297,7 @@
NEIGH_UPDATE_F_ISROUTER);
}
- if (!ipv6_accept_ra(in6_dev)) {
+ if (ecnt_ndisc_router_discovery_if_inline_hook(in6_dev)){
ND_PRINTK(2, info,
"RA: %s, accept_ra is false for dev: %s\n",
__func__, skb->dev->name);
@@ -1385,6 +1390,8 @@
if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
ND_PRINTK(2, warn, "RA: invalid RA options\n");
}
+
+ ecnt_ndisc_router_discovery_inline_hook(skb,in6_dev,ndopts,neigh);
out:
ip6_rt_put(rt);
if (neigh)
Index: linux-3.18.21/net/ipv6/netfilter/ecnt_netfilter_ipv6.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/ipv6/netfilter/ecnt_netfilter_ipv6.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,21 @@
+#ifndef _LINUX_ECNT_NETFILTER_IPV6_H
+#define _LINUX_ECNT_NETFILTER_IPV6_H
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include "../ecnt_net_ipv6.h"
+
+static inline int ecnt_ip6t_mangle_out_inline_hook
+(struct sk_buff *skb, const struct net_device *out)
+{
+
+ if ( !skb || !out )
+ return ECNT_CONTINUE;
+
+
+ return ECNT_CONTINUE;
+}
+
+
+#endif
+
Index: linux-3.18.21/net/ipv6/netfilter/ip6table_mangle.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/netfilter/ip6table_mangle.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/netfilter/ip6table_mangle.c 2018-02-05 13:21:31.000000000 +0800
@@ -12,6 +12,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
#include <net/ipv6.h>
+#include "ecnt_netfilter_ipv6.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -48,6 +49,9 @@
}
#endif
+ if ( ECNT_RETURN_DROP == ecnt_ip6t_mangle_out_inline_hook(skb, out) )
+ return NF_DROP;
+
/* save source/dest address, mark, hoplimit, flowlabel, priority, */
memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
Index: linux-3.18.21/net/ipv6/tcp_ipv6.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/tcp_ipv6.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/tcp_ipv6.c 2018-02-05 13:21:31.000000000 +0800
@@ -69,6 +69,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include "ecnt_net_ipv6.h"
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
@@ -1385,7 +1386,7 @@
return 0;
}
-static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
const struct tcphdr *th)
{
/* This is tricky: we move IP6CB at its correct location into
@@ -1450,6 +1451,8 @@
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
inet6_iif(skb));
+ if ( ECNT_RETURN_DROP == ecnt_tcp_v6_rcv_inline_hook(sk, skb, net, hdr, th) )
+ goto discard_it;
if (!sk)
goto no_tcp_socket;
Index: linux-3.18.21/net/ipv6/udp.c
===================================================================
--- linux-3.18.21.orig/net/ipv6/udp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/ipv6/udp.c 2018-02-05 13:21:31.000000000 +0800
@@ -52,6 +52,9 @@
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
+#include "ecnt_net_ipv6.h"
+
+#include <ecnt_hook/ecnt_hook_net.h>
static unsigned int udp6_ehashfn(struct net *net,
const struct in6_addr *laddr,
@@ -480,6 +483,8 @@
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
+ ecnt_udpv6_recvmsg_inline_hook(msg, skb, inet, is_udp4);
+
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
@@ -868,6 +873,9 @@
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
+ if(ecnt_udp6_lib_rcv_inline_hook(skb, uh, proto) == ECNT_RETURN_DROP)
+ goto discard;
+
/*
* Multicast receive code
*/
@@ -944,8 +952,14 @@
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
+ struct net_data_s net_data;
+
+ net_data.pskb = &skb;
+ ECNT_UDPV6_RCV_HOOK(ECNT_NET_UDPV6_RCV,&net_data);
+
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
+EXPORT_SYMBOL(udpv6_rcv);
/*
* Throw away all pending data and cancel the corking. Socket is locked.
@@ -1056,6 +1070,7 @@
uh->check = CSUM_MANGLED_0;
send:
+ ecnt_udp_v6_push_pending_frames_inline_hook(inet, sk, skb);
err = ip6_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
@@ -1206,6 +1221,8 @@
connected = 1;
}
+ ecnt_udpv6_sendmsg_inline_hook(&fl6, sk);
+
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
Index: linux-3.18.21/net/nat/foe_hook/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/nat/foe_hook/Makefile 2018-02-05 13:21:31.000000000 +0800
@@ -0,0 +1,2 @@
+obj-y += foe_hook.o
+foe_hook-objs := hook.o
Index: linux-3.18.21/net/nat/foe_hook/hook.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/nat/foe_hook/hook.c 2018-02-05 13:21:31.000000000 +0800
@@ -0,0 +1,256 @@
+/*
+ ***************************************************************************
+ * Ralink Tech Inc.
+ * 4F, No. 2 Technology 5th Rd.
+ * Science-based Industrial Park
+ * Hsin-chu, Taiwan, R.O.C.
+ *
+ * (c) Copyright 2002-2006, Ralink Technology, Inc.
+ *
+ * All rights reserved. Ralink's source code is an unpublished work and the
+ * use of a copyright notice does not imply otherwise. This source code
+ * contains confidential trade secret material of Ralink Tech. Any attempt
+ * or participation in deciphering, decoding, reverse engineering or in any
+ * way altering the source code is stricitly prohibited, unless the prior
+ * written consent of Ralink Technology, Inc. is obtained.
+ ***************************************************************************
+
+ Module Name:
+
+ hook.c
+
+ Abstract:
+
+ Revision History:
+ Who When What
+ -------- ---------- ----------------------------------------------
+ Name Date Modification logs
+ Steven Liu 2006-10-06 Initial version
+*/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/foe_hook.h>
+#include <net/sock.h>
+
+#ifdef TCSUPPORT_HWNAT_L2VID
+int (*ra_sw_nat_hook_rx_set_l2lu) (struct sk_buff * skb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_rx_set_l2lu);
+#endif
+int (*ra_sw_nat_hook_rx) (struct sk_buff * skb) = NULL;
+int (*ra_sw_nat_ds_offload) (struct sk_buff * skb, int *dp) = NULL;
+int (*ra_sw_nat_hook_update_dp)(int index, int dp) = NULL;
+int (*ra_sw_nat_hook_update_vlan)(int index,int outer_vlan,int inner_vlan) = NULL;
+int (*ra_sw_nat_local_in_tx) (struct sk_buff * skb,unsigned short port) = NULL;
+
+int (*ra_sw_nat_hook_save_rxinfo)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_restore_rxinfo)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_save_txinfo)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_restore_txinfo)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_is_hwnat_pkt)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_sendto_ppe)(struct sk_buff *skb) = NULL;
+int (*ra_sw_nat_hook_set_l2tp_dev)(struct net_device *dev) = NULL;
+struct net_device* (*ra_sw_nat_hook_read_l2tp_dev)(void) = NULL;
+int (*ra_sw_nat_rtsp_offload_restore) (struct sk_buff * skb, int calc_sum) = NULL;
+int (*ra_sw_nat_rtsp_data_handle) (struct sk_buff * skb, char *rb_ptr, unsigned int datalen) = NULL;
+void (*ra_sw_nat_set_wan_acntid_hook) (struct sk_buff *skb, unsigned char wan_index, unsigned char dir) = NULL;
+void (*ra_sw_nat_clear_wan_acntid_hook) (unsigned char wan_index) = NULL;
+
+#ifdef TCSUPPORT_MT7510_FE
+int (*ra_sw_nat_hook_tx) (struct sk_buff * skb, struct port_info * pinfo, int magic);
+#else
+int (*ra_sw_nat_hook_tx) (struct sk_buff * skb, int gmac_no) = NULL;
+#endif
+int (*ra_sw_nat_hook_free) (struct sk_buff * skb) = NULL;
+int (*ra_sw_nat_hook_rxinfo) (struct sk_buff * skb, int magic, char *data, int data_length) = NULL;
+int (*ra_sw_nat_hook_txq) (struct sk_buff * skb, int txq) = NULL;
+int (*ra_sw_nat_hook_magic) (struct sk_buff * skb, int magic) = NULL;
+int (*ra_sw_nat_hook_set_magic) (struct sk_buff * skb, int magic) = NULL;
+int (*ra_sw_nat_hook_xfer) (struct sk_buff *skb, const struct sk_buff *prev_p) = NULL;
+int (*ra_sw_nat_hook_is_alive_pkt)(unsigned int crsn) = NULL;
+#ifdef TCSUPPORT_RA_HWNAT_ENHANCE_HOOK
+int (*ra_sw_nat_hook_drop_packet) (struct sk_buff * skb) = NULL;
+int (*ra_sw_nat_hook_clean_table) (void) = NULL;
+int (*ra_sw_nat_hook_clean_multicast_entry) (void) = NULL;
+#endif
+int (*ra_sw_nat_hook_cpu_meter)(struct sk_buff* skb,FETxMsg_T* txMsg,struct port_info* pinfo,unsigned char dir,unsigned short mtrIndex) = NULL;
+
+int (*ra_sw_nat_hook_tls_vtag_handle_hook)(struct sk_buff** pskb) = NULL;
+EXPORT_SYMBOL(ra_sw_nat_hook_tls_vtag_handle_hook);
+
+int is_hwnat_dont_clean = 0;
+EXPORT_SYMBOL(is_hwnat_dont_clean);
+
+int (*ra_sw_nat_hook_foeentry) (void * inputvalue,int operation) = NULL;
+#ifdef TCSUPPORT_MT7510_FE
+#ifdef TCSUPPORT_HWNAT_LED
+int is_hwnat_led_enable = 1;
+#else
+int is_hwnat_led_enable = 0;
+#endif // end of TCSUPPORT_HWNAT_LED
+EXPORT_SYMBOL(is_hwnat_led_enable);
+#endif // end of TCSUPPORT_MT7510_FE
+
+//#if defined(TCSUPPORT_XPON_IGMP) || defined(TCSUPPORT_MULTICAST_SPEED)
+int (*hwnat_is_alive_pkt_hook)(struct sk_buff* skb) = NULL;
+int (*hwnat_skb_to_foe_hook)(struct sk_buff* skb) = NULL;
+int (*hwnat_set_special_tag_hook)(int index, int tag) = NULL;
+int (*hwnat_multicast_set_hwnat_info_hook)(int index, int mask, short int bindvid) = NULL;
+int (*hwnat_delete_foe_entry_hook)(int index) = NULL;
+int (*hwnat_is_multicast_entry_hook)(int index ,unsigned char* grp_addr,unsigned char* src_addr,int type) = NULL;
+int (*hwnat_is_drop_entry_hook)(int index ,unsigned char* grp_addr,unsigned char* src_addr,int type) = NULL;
+int (*multicast_speed_find_entry_hook)(int index) = NULL;
+int (*multicast_speed_learn_flow_hook)(struct sk_buff* skb) = NULL;
+int (*hwnat_set_rule_according_to_state_hook)(int index, int state,int mask) = NULL;
+int (*hwnat_set_recover_info_hook)(struct sk_buff* skb,struct sock *sk,int flag) = NULL;
+int (*xpon_igmp_learn_flow_hook)(struct sk_buff* skb) = NULL;
+int (*xpon_igmp_exist)(void) = NULL;
+int (*xpon_igmp_clear_flows)(void) = NULL;
+int (*hwnat_set_wlan_multicast_hook)(int index ,int flag) = NULL;
+int (*wan_multicast_drop_hook)(struct sk_buff* skb) = NULL;
+int (*wan_multicast_undrop_hook)(void) = NULL;
+int (*wan_mvlan_change_hook)(void) = NULL;
+int (*multicast_flood_find_entry_hook)(int index) = NULL;
+int (*hwnat_set_multicast_speed_enable_hook)(int enable) = NULL;
+int (*multicast_flood_is_bind_hook)(int index) = NULL;
+
+int (*MT7530LanPortMap2Switch_hook) (int port) = NULL;
+EXPORT_SYMBOL(MT7530LanPortMap2Switch_hook);
+
+#ifdef TCSUPPORT_MT7510_FE
+void (*restore_offload_info_hook)(struct sk_buff *skb, struct port_info *pinfo, int magic) = NULL;
+EXPORT_SYMBOL(restore_offload_info_hook);
+#endif
+
+EXPORT_SYMBOL(multicast_flood_is_bind_hook);
+EXPORT_SYMBOL(multicast_flood_find_entry_hook);
+EXPORT_SYMBOL(hwnat_set_multicast_speed_enable_hook);
+EXPORT_SYMBOL(hwnat_is_alive_pkt_hook);
+EXPORT_SYMBOL(hwnat_skb_to_foe_hook);
+EXPORT_SYMBOL(hwnat_set_special_tag_hook);
+EXPORT_SYMBOL(hwnat_multicast_set_hwnat_info_hook);
+EXPORT_SYMBOL(hwnat_delete_foe_entry_hook);
+EXPORT_SYMBOL(hwnat_is_multicast_entry_hook);
+EXPORT_SYMBOL(hwnat_is_drop_entry_hook);
+EXPORT_SYMBOL(multicast_speed_find_entry_hook);
+EXPORT_SYMBOL(multicast_speed_learn_flow_hook);
+EXPORT_SYMBOL(hwnat_set_rule_according_to_state_hook);
+EXPORT_SYMBOL(hwnat_set_recover_info_hook);
+EXPORT_SYMBOL(xpon_igmp_learn_flow_hook);
+EXPORT_SYMBOL(xpon_igmp_exist);
+EXPORT_SYMBOL(xpon_igmp_clear_flows);
+EXPORT_SYMBOL(hwnat_set_wlan_multicast_hook);
+EXPORT_SYMBOL(wan_multicast_drop_hook);
+EXPORT_SYMBOL(wan_multicast_undrop_hook);
+EXPORT_SYMBOL(wan_mvlan_change_hook);
+//#endif
+
+EXPORT_SYMBOL(ra_sw_nat_hook_rx);
+EXPORT_SYMBOL(ra_sw_nat_ds_offload);
+EXPORT_SYMBOL(ra_sw_nat_hook_update_dp);
+EXPORT_SYMBOL(ra_sw_nat_hook_update_vlan);
+EXPORT_SYMBOL(ra_sw_nat_local_in_tx);
+
+EXPORT_SYMBOL(ra_sw_nat_hook_save_rxinfo);
+EXPORT_SYMBOL(ra_sw_nat_hook_restore_rxinfo);
+EXPORT_SYMBOL(ra_sw_nat_hook_save_txinfo);
+EXPORT_SYMBOL(ra_sw_nat_hook_restore_txinfo);
+EXPORT_SYMBOL(ra_sw_nat_hook_is_hwnat_pkt);
+EXPORT_SYMBOL(ra_sw_nat_hook_sendto_ppe);
+EXPORT_SYMBOL(ra_sw_nat_hook_set_l2tp_dev);
+EXPORT_SYMBOL(ra_sw_nat_hook_read_l2tp_dev);
+EXPORT_SYMBOL(ra_sw_nat_rtsp_offload_restore);
+EXPORT_SYMBOL(ra_sw_nat_rtsp_data_handle);
+EXPORT_SYMBOL(ra_sw_nat_set_wan_acntid_hook);
+EXPORT_SYMBOL(ra_sw_nat_clear_wan_acntid_hook);
+
+EXPORT_SYMBOL(ra_sw_nat_hook_tx);
+EXPORT_SYMBOL(ra_sw_nat_hook_free);
+EXPORT_SYMBOL(ra_sw_nat_hook_rxinfo);
+EXPORT_SYMBOL(ra_sw_nat_hook_txq);
+EXPORT_SYMBOL(ra_sw_nat_hook_magic);
+EXPORT_SYMBOL(ra_sw_nat_hook_set_magic);
+EXPORT_SYMBOL(ra_sw_nat_hook_xfer);
+EXPORT_SYMBOL(ra_sw_nat_hook_is_alive_pkt);
+#ifdef TCSUPPORT_RA_HWNAT_ENHANCE_HOOK
+EXPORT_SYMBOL(ra_sw_nat_hook_drop_packet);
+EXPORT_SYMBOL(ra_sw_nat_hook_clean_table);
+EXPORT_SYMBOL(ra_sw_nat_hook_clean_multicast_entry);
+#endif
+EXPORT_SYMBOL(ra_sw_nat_hook_foeentry);
+EXPORT_SYMBOL(ra_sw_nat_hook_cpu_meter);
+
+int (*hwnat_clean_lan_hook)(unsigned int) = NULL;
+int (*hwnat_clean_wan_hook)(unsigned int) = NULL;
+EXPORT_SYMBOL(hwnat_clean_lan_hook);
+EXPORT_SYMBOL(hwnat_clean_wan_hook);
+
+int hwnat_clean_lan(unsigned int port)
+{
+ if (hwnat_clean_lan_hook)
+ return hwnat_clean_lan_hook(port);
+ return 0;
+}
+
+int hwnat_clean_wan(unsigned int vid)
+{
+ if (hwnat_clean_wan_hook)
+ return hwnat_clean_wan_hook(vid);
+ return 0;
+}
+EXPORT_SYMBOL(hwnat_clean_lan);
+EXPORT_SYMBOL(hwnat_clean_wan);
+
+#if defined(TCSUPPORT_SWNAT)
+int (*sw_upstream_nat_rx_hook) (struct sk_buff * skb, int foe_tbl_index) = NULL;
+int (*sw_upstream_nat_tx_hook)(struct sk_buff * skb)= NULL;
+int (*sw_downstream_nat_rx_hook) (struct sk_buff * skb) = NULL;
+int (*sw_downstream_nat_tx_hook)(struct sk_buff * skb)= NULL;
+EXPORT_SYMBOL(sw_upstream_nat_rx_hook);
+EXPORT_SYMBOL(sw_upstream_nat_tx_hook);
+EXPORT_SYMBOL(sw_downstream_nat_rx_hook);
+EXPORT_SYMBOL(sw_downstream_nat_tx_hook);
+
+unsigned char (*get_swnat_clean_flag_hook)(void) = NULL;
+void (*set_swnat_clean_flag_hook)(unsigned char val) = NULL;
+EXPORT_SYMBOL(get_swnat_clean_flag_hook);
+EXPORT_SYMBOL(set_swnat_clean_flag_hook);
+int (*sw_threshold_get_hook)(int *min, int *max)= NULL;
+EXPORT_SYMBOL(sw_threshold_get_hook);
+#endif
+
+#ifndef TCSUPPORT_IPSEC_PASSTHROUGH
+#include <net/mtk_esp.h>
+//when not open vpn passthrough function,below function do nothing
+void
+ipsec_esp_ouput_finish_pt(ipsec_finishpara_t* inputParams)
+{
+ return;
+}
+
+void ipsec_esp_input_finish_pt(ipsec_finishpara_t* inputParams)
+{
+ return;
+}
+int ipsec_esp_output_pt(ipsec_para_t* ipsecparams)
+{
+ return -1;
+}
+
+int ipsec_esp_input_pt(ipsec_para_t* ipsecparams)
+{
+ return -1;
+}
+
+EXPORT_SYMBOL(ipsec_esp_input_pt);
+EXPORT_SYMBOL(ipsec_esp_input_finish_pt);
+EXPORT_SYMBOL(ipsec_esp_output_pt);
+EXPORT_SYMBOL(ipsec_esp_ouput_finish_pt);
+#endif
+int (*wan_speed_test_hook)(struct sk_buff*) = NULL;
+EXPORT_SYMBOL(wan_speed_test_hook);
+
+
Index: linux-3.18.21/net/netfilter/Kconfig
===================================================================
--- linux-3.18.21.orig/net/netfilter/Kconfig 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/Kconfig 2018-02-05 13:21:31.000000000 +0800
@@ -323,6 +323,16 @@
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_RTSP
+ tristate "RTSP protocol support"
+ depends on NETFILTER_ADVANCED
+ help
+ Support the RTSP protocol. This allows UDP transports to be setup
+ properly, including RTP and RDT.
+
+ If you want to compile it as a module, say 'M' here and read
+ Documentation/modules.txt. If unsure, say 'Y'.
+
config NF_CT_NETLINK
tristate 'Connection tracking netlink interface'
select NETFILTER_NETLINK
@@ -411,6 +421,11 @@
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
+config NF_NAT_RTSP
+ tristate
+ depends on NF_CONNTRACK && NF_NAT
+ default NF_NAT && NF_CONNTRACK_RTSP
+
config NETFILTER_SYNPROXY
tristate
@@ -1177,6 +1192,26 @@
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_LAYER7
+ tristate '"layer7" match support'
+ depends on NETFILTER_XTABLES
+ depends on NETFILTER_ADVANCED
+ depends on NF_CONNTRACK
+ help
+ Say Y if you want to be able to classify connections (and their
+ packets) based on regular expression matching of their application
+ layer data. This is one way to classify applications such as
+ peer-to-peer filesharing systems that do not always use the same
+ port.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_LAYER7_DEBUG
+ bool 'Layer 7 debugging output'
+ depends on NETFILTER_XT_MATCH_LAYER7
+ help
+ Say Y to get lots of debugging output.
+
config NETFILTER_XT_MATCH_LENGTH
tristate '"length" match support'
depends on NETFILTER_ADVANCED
Index: linux-3.18.21/net/netfilter/Makefile
===================================================================
--- linux-3.18.21.orig/net/netfilter/Makefile 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/Makefile 2018-02-05 13:21:31.000000000 +0800
@@ -43,6 +43,7 @@
obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
+obj-$(CONFIG_NF_CONNTRACK_RTSP) += nf_conntrack_rtsp.o
nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
@@ -63,6 +64,7 @@
obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
+obj-$(CONFIG_NF_NAT_RTSP) += nf_nat_rtsp.o
# SYNPROXY
obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
@@ -144,6 +146,7 @@
obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
obj-$(CONFIG_NETFILTER_XT_MATCH_L2TP) += xt_l2tp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
Index: linux-3.18.21/net/netfilter/ecnt_netfilter.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/ecnt_netfilter.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,309 @@
+#ifndef _LINUX_ECNT_NETFILTER_H
+#define _LINUX_ECNT_NETFILTER_H
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/ecnt_vlan_bind.h>
+#include "../ecnt_net.h"
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+
+
+#define ECNT_NF_CT_SYSCTL_TABLE_HOOK \
+{\
+ .procname = "nf_conntrack_ftp_enable",\
+ .data = &nf_conntrack_ftp_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for ftp alg switch */ \
+{\
+ .procname = "nf_conntrack_sip_enable",\
+ .data = &nf_conntrack_sip_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for sip alg switch */ \
+{\
+ .procname = "nf_conntrack_h323_enable",\
+ .data = &nf_conntrack_h323_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for h323 alg switch */ \
+{\
+ .procname = "nf_conntrack_rtsp_enable",\
+ .data = &nf_conntrack_rtsp_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for rtsp alg switch */ \
+{\
+ .procname = "nf_conntrack_l2tp_enable",\
+ .data = &nf_conntrack_l2tp_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for l2tp alg switch */ \
+{\
+ .procname = "nf_conntrack_ipsec_enable",\
+ .data = &nf_conntrack_ipsec_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for ipsec alg switch */ \
+{\
+ .procname = "nf_conntrack_pptp_enable",\
+ .data = &nf_conntrack_pptp_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for pptp alg switch */ \
+{\
+ .procname = "nf_conntrack_portscan_enable",\
+ .data = &nf_conntrack_portscan_enable,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for ftp port */ \
+{\
+ .procname = "nf_conntrack_ftp_port",\
+ .data = &nf_conntrack_ftp_port,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for portscan switch */ \
+{\
+ .procname = "nf_conntrack_esp_timeout",\
+ .data = &nf_conntrack_esp_timeout,\
+ .maxlen = sizeof(int),\
+ .mode = 0644,\
+ .proc_handler = &proc_dointvec,\
+}, /* for esp unreply ct timeout */
+
+
+
+static inline void ecnt_init_conntrack_inline_hook
+(struct net *net, struct sk_buff *skb, struct nf_conn *ct)
+{
+ ct->loport = 0;
+ ct->hiport = 0;
+
+ return;
+}
+
+static inline void ecnt_nf_conntrack_init_start_inline_hook(int cpu)
+{
+
+ return;
+}
+
+static inline int ecnt_nf_conntrack_ftp_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ /*for FTP ALG switch*/
+ if ( !nf_conntrack_ftp_enable )
+ return ECNT_RETURN; /*ftp switch is off, just accept packet and do not do ALG */
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_sip_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ /*for SIP ALG switch*/
+ if ( !nf_conntrack_sip_enable )
+ return ECNT_RETURN; /* sip switch is off, just accept packet and do not do ALG */
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_sip_help_tcp_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ int ret = 0;
+
+ ret = ecnt_nf_conntrack_sip_help_inline_hook(skb, ct);
+ if ( ECNT_CONTINUE != ret)
+ return ret;
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_sip_help_udp_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ int ret = 0;
+
+ ret = ecnt_nf_conntrack_sip_help_inline_hook(skb, ct);
+ if ( ECNT_CONTINUE != ret)
+ return ret;
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_h323_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ /* for H.323 ALG switch */
+ if ( !nf_conntrack_h323_enable )
+ return ECNT_RETURN; /*h323 switch is off, just accept packet and do not do ALG */
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_h245_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ int ret = 0;
+
+ ret = ecnt_nf_conntrack_h323_help_inline_hook(skb, ct);
+ if ( ECNT_CONTINUE != ret)
+ return ret;
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_q31_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ int ret = 0;
+
+ ret = ecnt_nf_conntrack_h323_help_inline_hook(skb, ct);
+ if ( ECNT_CONTINUE != ret)
+ return ret;
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_ras_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ int ret = 0;
+
+ ret = ecnt_nf_conntrack_h323_help_inline_hook(skb, ct);
+ if ( ECNT_CONTINUE != ret)
+ return ret;
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_rtsp_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ /* for RTSP ALG switch */
+ if ( !nf_conntrack_rtsp_enable )
+ return ECNT_RETURN; /* rtsp switch is off, just accept packet and do not do ALG */
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_pptp_help_inline_hook
+(struct sk_buff *skb, struct nf_conn *ct)
+{
+ /* for PPTP ALG switch */
+ if ( !nf_conntrack_pptp_enable )
+ return ECNT_RETURN; /* pptp switch is off, just accept packet and do not do ALG */
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_generic_packet_inline_hook
+(struct nf_conn *ct,
+const struct sk_buff *skb,
+unsigned int dataoff,
+enum ip_conntrack_info ctinfo,
+u_int8_t pf,
+unsigned int hooknum,
+unsigned int *timeout)
+{
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_alloc_inline_hook
+(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp, u32 hash, struct nf_conn *ct)
+{
+
+ return ECNT_CONTINUE;
+}
+
+static inline int ecnt_nf_conntrack_free_inline_hook
+(struct nf_conn *ct)
+{
+
+ return ECNT_CONTINUE;
+}
+
+static inline void ecnt_resolve_normal_ct_inline_hook
+(struct nf_conn_help *help, struct sk_buff *skb, struct nf_conntrack_tuple_hash *h)
+{
+#ifdef TCSUPPORT_DS_HWNAT_OFFLOAD
+ if(!(rtsp_hwnat_offload
+ &&(strcmp(help->helper->name, "rtsp") == 0)
+ && (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY))){
+#endif
+ if (ra_sw_nat_hook_free)
+ ra_sw_nat_hook_free(skb);
+#ifdef TCSUPPORT_DS_HWNAT_OFFLOAD
+ }
+#endif
+
+ return;
+}
+
+static inline void ecnt_nf_conntrack_rtsp_help_reply_inline_hook
+(struct sk_buff * skb, char *rb_ptr, unsigned int datalen)
+{
+#ifdef TCSUPPORT_DS_HWNAT_OFFLOAD
+ if(ra_sw_nat_rtsp_data_handle)
+ ra_sw_nat_rtsp_data_handle(skb, rb_ptr, datalen);
+#endif
+
+ return;
+}
+
+/*
+* function nf_nat_mangle_tcp_packet() can only adjust sequence once for one
+* skb data, add ecnt_nf_ct_seqadj_set_inline_hook for change offset after
+nf_nat_mangle_tcp_packet
+*/
+static inline int
+ecnt_nf_ct_seqadj_set_inline_hook
+(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+__be32 seq, s32 off)
+{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct nf_ct_seqadj *this_way = NULL;
+
+ if (unlikely(!seqadj)) {
+ WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
+ return 0;
+ }
+
+ if ( 0 == off )
+ return 0;
+
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+
+ spin_lock_bh(&ct->lock);
+
+ /* only HOOK TX, so use thisway direction. */
+ this_way = &seqadj->seq[dir];
+ /* increase mode */
+ if ( this_way->correction_pos == ntohl(seq) )
+ {
+ this_way->offset_after += off;
+ }
+
+ spin_unlock_bh(&ct->lock);
+ return 0;
+}
+
+#endif
+
Index: linux-3.18.21/net/netfilter/nf_conntrack_core.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_core.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_core.c 2018-02-05 13:21:32.000000000 +0800
@@ -52,6 +52,7 @@
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
+#include "ecnt_netfilter.h"
#define NF_CONNTRACK_VERSION "0.5.0"
@@ -312,6 +313,13 @@
* too.
*/
nf_ct_remove_expectations(ct);
+
+#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
+ if(ct->layer7.app_proto)
+ kfree(ct->layer7.app_proto);
+ if(ct->layer7.app_data)
+ kfree(ct->layer7.app_data);
+#endif
nf_ct_del_from_dying_or_unconfirmed_list(ct);
@@ -835,6 +843,9 @@
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+
+ ecnt_nf_conntrack_alloc_inline_hook(net, zone, orig, repl, gfp, hash, ct);
+
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
write_pnet(&ct->ct_net, net);
@@ -880,6 +891,8 @@
*/
NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
+ ecnt_nf_conntrack_free_inline_hook(ct);
+
nf_ct_ext_destroy(ct);
nf_ct_ext_free(ct);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -982,6 +995,7 @@
/* Now it is inserted into the unconfirmed list, bump refcount */
nf_conntrack_get(&ct->ct_general);
+ ecnt_init_conntrack_inline_hook(net, skb, ct);
nf_ct_add_to_unconfirmed_list(ct);
local_bh_enable();
@@ -1055,6 +1069,18 @@
}
skb->nfct = &ct->ct_general;
skb->nfctinfo = *ctinfo;
+ #ifdef TCSUPPORT_RA_HWNAT
+ {
+ struct nf_conn_help *help = nfct_help(ct);
+
+ if (help && help->helper) {
+ if (strcmp(help->helper->name, "FULLCONE-NAT") != 0) {
+ ecnt_resolve_normal_ct_inline_hook(help, skb, h);
+ }
+ }
+ }
+#endif
+
return ct;
}
@@ -1689,6 +1715,7 @@
if (ret < 0)
goto err_proto;
+ ecnt_nf_conntrack_init_start_inline_hook(cpu);
/* Set up fake conntrack: to never be deleted, not in any hashes */
for_each_possible_cpu(cpu) {
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
Index: linux-3.18.21/net/netfilter/nf_conntrack_expect.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_expect.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_expect.c 2018-02-05 13:21:32.000000000 +0800
@@ -211,7 +211,21 @@
otherwise they clash */
struct nf_conntrack_tuple_mask intersect_mask;
int count;
-
+ /*bugfix if sip packet contain with audio and image media attribute with the same port, then it will not match rule in hash contain "sip/audio"
+ * and nf_ct_tuple_mask_cmp() only cmp src/dst addr, port, proto, these are all match with rule in hash
+ * then sip packet with image is dropped
+ */
+ if(a->class != b->class)
+ {
+ struct nf_conn_help *master_help_b = nfct_help(b->master);/*b is from app alg*/
+ if( master_help_b && master_help_b->helper )
+ {
+ if( strcmp(master_help_b->helper->name,"sip")==0)
+ {
+ return false;
+ }
+ }
+ }
intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
Index: linux-3.18.21/net/netfilter/nf_conntrack_ftp.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_ftp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_ftp.c 2018-02-05 13:21:32.000000000 +0800
@@ -26,6 +26,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_ftp.h>
+#include "ecnt_netfilter.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
@@ -410,6 +411,9 @@
return NF_ACCEPT;
}
+ if ( ECNT_RETURN == ecnt_nf_conntrack_ftp_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
Index: linux-3.18.21/net/netfilter/nf_conntrack_h323_main.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_h323_main.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_h323_main.c 2018-02-05 13:21:32.000000000 +0800
@@ -33,6 +33,7 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_h323.h>
+#include "ecnt_netfilter.h"
/* Parameters */
static unsigned int default_rrq_ttl __read_mostly = 300;
@@ -592,6 +593,9 @@
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_h245_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
@@ -1167,6 +1171,9 @@
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_q31_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
@@ -1766,6 +1773,9 @@
int datalen = 0;
int ret;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_ras_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
pr_debug("nf_ct_ras: skblen = %u\n", skb->len);
spin_lock_bh(&nf_h323_lock);
Index: linux-3.18.21/net/netfilter/nf_conntrack_pptp.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_pptp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_pptp.c 2018-02-05 13:21:32.000000000 +0800
@@ -33,6 +33,7 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
+#include "ecnt_netfilter.h"
#define NF_CT_PPTP_VERSION "3.1"
@@ -527,6 +528,9 @@
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_pptp_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
nexthdr_off = protoff;
tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph);
BUG_ON(!tcph);
Index: linux-3.18.21/net/netfilter/nf_conntrack_proto_generic.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_proto_generic.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_proto_generic.c 2018-02-05 13:21:32.000000000 +0800
@@ -11,6 +11,7 @@
#include <linux/timer.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
+#include "ecnt_netfilter.h"
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
@@ -83,6 +84,10 @@
unsigned int hooknum,
unsigned int *timeout)
{
+ if ( ECNT_CONTINUE != ecnt_generic_packet_inline_hook(ct, skb, dataoff
+ , ctinfo, pf, hooknum, timeout) )
+ return NF_ACCEPT;
+
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
Index: linux-3.18.21/net/netfilter/nf_conntrack_proto_tcp.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_proto_tcp.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_proto_tcp.c 2018-02-05 13:21:32.000000000 +0800
@@ -645,7 +645,9 @@
(in_recv_win ? 1 : 0),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
-
+#if defined(TCSUPPORT_RA_HWNAT)
+ res = 1;
+#else
if (before(seq, sender->td_maxend + 1) &&
in_recv_win &&
before(sack, receiver->td_end + 1) &&
@@ -724,7 +726,7 @@
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
}
-
+#endif
pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
"receiver end=%u maxend=%u maxwin=%u\n",
res, sender->td_end, sender->td_maxend, sender->td_maxwin,
Index: linux-3.18.21/net/netfilter/nf_conntrack_rtsp.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/nf_conntrack_rtsp.c 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,715 @@
+/*
+ * RTSP extension for IP connection tracking
+ * (C) 2003 by Tom Marshall <tmarshall at real.com>
+ *
+ * 2005-02-13: Harald Welte <laforge at netfilter.org>
+ * - port to 2.6
+ * - update to recent post-2.6.11 api changes
+ * 2006-09-14: Steven Van Acker <deepstar at singularity.be>
+ * - removed calls to NAT code from conntrack helper: NAT no longer needed to use rtsp-conntrack
+ * 2007-04-18: Michael Guntsche <mike at it-loops.com>
+ * - Port to new NF API
+ * 2013-03-04: Il'inykh Sergey <sergeyi at inango-sw.com>. Inango Systems Ltd
+ * - fixed rtcp nat mapping and other port mapping fixes
+ * - simple TEARDOWN request handling
+ * - codestyle fixes and other less significant bug fixes
+ *
+ * based on ip_conntrack_irc.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Module load syntax:
+ * insmod nf_conntrack_rtsp.o ports=port1,port2,...port<MAX_PORTS>
+ * max_outstanding=n setup_timeout=secs
+ *
+ * If no ports are specified, the default will be port 554.
+ *
+ * With max_outstanding you can define the maximum number of not yet
+ * answered SETUP requests per RTSP session (default 8).
+ * With setup_timeout you can specify how long the system waits for
+ * an expected data channel (default 300 seconds).
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_rtsp.h>
+
+#define NF_NEED_STRNCASECMP
+#define NF_NEED_STRTOU16
+#define NF_NEED_STRTOU32
+#define NF_NEED_NEXTLINE
+#include <linux/netfilter_helpers.h>
+#define NF_NEED_MIME_NEXTLINE
+#include <linux/netfilter_mime.h>
+
+#include <linux/ctype.h>
+#include "ecnt_netfilter.h"
+
+#define MAX_PORTS 8
+static int ports[MAX_PORTS];
+static int num_ports = 0;
+static int max_outstanding = 8;
+static unsigned int setup_timeout = 300;
+
+#define pr_debug(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+
+
+MODULE_AUTHOR("Tom Marshall <tmarshall at real.com>");
+MODULE_DESCRIPTION("RTSP connection tracking module");
+MODULE_LICENSE("GPL");
+module_param_array(ports, int, &num_ports, 0400);
+MODULE_PARM_DESC(ports, "port numbers of RTSP servers");
+module_param(max_outstanding, int, 0400);
+MODULE_PARM_DESC(max_outstanding, "max number of outstanding SETUP requests per RTSP session");
+module_param(setup_timeout, int, 0400);
+MODULE_PARM_DESC(setup_timeout, "timeout on for unestablished data channels");
+
+static char *rtsp_buffer;
+static DEFINE_SPINLOCK(rtsp_buffer_lock);
+
+static struct nf_conntrack_expect_policy rtsp_exp_policy;
+
+unsigned int (*nf_nat_rtsp_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ unsigned int protoff,
+#endif
+ unsigned int matchoff, unsigned int matchlen,
+ struct ip_ct_rtsp_expect* prtspexp,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+
+unsigned int (*nf_nat_rtp_hook)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ __be32 rtpip,
+ u_int16_t rtp_srcport) = NULL;
+
+
+EXPORT_SYMBOL_GPL(nf_nat_rtsp_hook);
+EXPORT_SYMBOL_GPL(nf_nat_rtp_hook);
+
+/*
+ * Max mappings we will allow for one RTSP connection (for RTP, the number
+ * of allocated ports is twice this value). Note that SMIL burns a lot of
+ * ports so keep this reasonably high. If this is too low, you will see a
+ * lot of "no free client map entries" messages.
+ */
+#define MAX_PORT_MAPS 16
+
+/*** default port list was here in the masq code: 554, 3030, 4040 ***/
+
+#define SKIP_WSPACE(ptr,len,off) while(off < len && isspace(*(ptr+off))) { off++; }
+
+/*
+ * Parse an RTSP packet.
+ *
+ * Returns zero if parsing failed.
+ *
+ * Parameters:
+ * IN ptcp tcp data pointer
+ * IN tcplen tcp data len
+ * IN/OUT ptcpoff points to current tcp offset
+ * OUT phdrsoff set to offset of rtsp headers
+ * OUT phdrslen set to length of rtsp headers
+ * OUT pcseqoff set to offset of CSeq header
+ * OUT pcseqlen set to length of CSeq header
+ */
+static int
+rtsp_parse_message(char* ptcp, uint tcplen, uint* ptcpoff,
+ uint* phdrsoff, uint* phdrslen,
+ uint* pcseqoff, uint* pcseqlen,
+ uint* transoff, uint* translen,
+ uint* rtpinfoff, uint* rtpinflen)
+{
+ uint entitylen = 0;
+ uint lineoff;
+ uint linelen;
+
+ if (!nf_nextline(ptcp, tcplen, ptcpoff, &lineoff, &linelen))
+ return 0;
+
+ *phdrsoff = *ptcpoff;
+ while (nf_mime_nextline(ptcp, tcplen, ptcpoff, &lineoff, &linelen)) {
+ if (linelen == 0) {
+ if (entitylen > 0)
+ *ptcpoff += min(entitylen, tcplen - *ptcpoff);
+ break;
+ }
+ if (lineoff+linelen > tcplen) {
+ pr_info("!! overrun !!\n");
+ break;
+ }
+
+ if (nf_strncasecmp(ptcp+lineoff, "CSeq:", 5) == 0) {
+ *pcseqoff = lineoff;
+ *pcseqlen = linelen;
+ }
+
+ if (nf_strncasecmp(ptcp+lineoff, "Transport:", 10) == 0) {
+ *transoff = lineoff;
+ *translen = linelen;
+ }
+
+ if (nf_strncasecmp(ptcp+lineoff, "RTP-Info:", 9) == 0) {
+ *rtpinfoff = lineoff;
+ *rtpinflen = linelen;
+ }
+
+ if (nf_strncasecmp(ptcp+lineoff, "Content-Length:", 15) == 0) {
+ uint off = lineoff+15;
+ SKIP_WSPACE(ptcp+lineoff, linelen, off);
+ nf_strtou32(ptcp+off, &entitylen);
+ }
+ }
+ *phdrslen = (*ptcpoff) - (*phdrsoff);
+
+ return 1;
+}
+
+/*
+ * Find lo/hi client ports (if any) in transport header
+ * In:
+ * ptcp, tcplen = packet
+ * tranoff, tranlen = buffer to search
+ *
+ * Out:
+ * pport_lo, pport_hi = lo/hi ports (host endian)
+ *
+ * Returns nonzero if any client ports found
+ *
+ * Note: it is valid (and expected) for the client to request multiple
+ * transports, so we need to parse the entire line.
+ */
+static int
+rtsp_parse_transport(char* ptran, uint tranlen,
+ struct ip_ct_rtsp_expect* prtspexp)
+{
+ int rc = 0;
+ uint off = 0;
+
+ if (tranlen < 10 || !iseol(ptran[tranlen-1]) ||
+ nf_strncasecmp(ptran, "Transport:", 10) != 0) {
+ pr_info("sanity check failed\n");
+ return 0;
+ }
+
+ pr_debug("tran='%.*s'\n", (int)tranlen, ptran);
+ off += 10;
+ SKIP_WSPACE(ptran, tranlen, off);
+
+ /* Transport: tran;field;field=val,tran;field;field=val,... */
+ while (off < tranlen) {
+ const char* pparamend;
+ uint nextparamoff;
+
+ pparamend = memchr(ptran+off, ',', tranlen-off);
+ pparamend = (pparamend == NULL) ? ptran+tranlen : pparamend+1;
+ nextparamoff = pparamend-ptran;
+
+ while (off < nextparamoff) {
+ const char* pfieldend;
+ uint nextfieldoff;
+
+ pfieldend = memchr(ptran+off, ';', nextparamoff-off);
+ nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-ptran+1;
+
+ if (strncmp(ptran+off, "client_port=", 12) == 0) {
+ u_int16_t port;
+ uint numlen;
+
+ off += 12;
+ numlen = nf_strtou16(ptran+off, &port);
+ off += numlen;
+ if (prtspexp->loport != 0 && prtspexp->loport != port)
+ pr_debug("multiple ports found, port %hu ignored\n", port);
+ else {
+ pr_debug("lo port found : %hu\n", port);
+ prtspexp->loport = prtspexp->hiport = port;
+ if (ptran[off] == '-') {
+ off++;
+ numlen = nf_strtou16(ptran+off, &port);
+ off += numlen;
+ prtspexp->pbtype = pb_range;
+ prtspexp->hiport = port;
+
+ // If we have a range, assume rtp:
+ // loport must be even, hiport must be loport+1
+ if ((prtspexp->loport & 0x0001) != 0 ||
+ prtspexp->hiport != prtspexp->loport+1) {
+ pr_debug("incorrect range: %hu-%hu, correcting\n",
+ prtspexp->loport, prtspexp->hiport);
+ prtspexp->loport &= 0xfffe;
+ prtspexp->hiport = prtspexp->loport+1;
+ }
+ } else if (ptran[off] == '/') {
+ off++;
+ numlen = nf_strtou16(ptran+off, &port);
+ off += numlen;
+ prtspexp->pbtype = pb_discon;
+ prtspexp->hiport = port;
+ }
+ rc = 1;
+ }
+ }
+
+ /*
+ * Note we don't look for the destination parameter here.
+ * If we are using NAT, the NAT module will handle it. If not,
+ * and the client is sending packets elsewhere, the expectation
+ * will quietly time out.
+ */
+
+ off = nextfieldoff;
+ }
+
+ off = nextparamoff;
+ }
+
+ return rc;
+}
+
+static int
+rtsp_reply_rtpinfo(struct sk_buff *skb,
+ char* prtpinf, uint rtpinflen,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ int rc = 0;
+ uint off = 0;
+ char ipbuf[40];
+ int ip_buf_len = 0;
+ __be32 newip = 0;
+ u_int16_t src_port = 0;
+ typeof(nf_nat_rtp_hook) nf_nat_rtp;
+ int dir = CTINFO2DIR(ctinfo); /* = IP_CT_DIR_REPLY */
+
+ if (rtpinflen < 9 || !iseol(prtpinf[rtpinflen-1]) ||
+ nf_strncasecmp(prtpinf, "RTP-Info:", 9) != 0) {
+ pr_info("rtsp_process_rtpinfo check failed\n");
+ return 0;
+ }
+
+ pr_debug("rtpinfo='%.*s'\n", (int)rtpinflen, prtpinf);
+ off += 10;
+ SKIP_WSPACE(prtpinf, rtpinflen, off);
+
+ /* RTP-Info: tran;field;field=val,tran;field;field=val,... */
+ while (off < rtpinflen) {
+ const char* pparamend;
+ uint nextparamoff;
+
+ pparamend = memchr(prtpinf+off, ',', rtpinflen-off);
+ pparamend = (pparamend == NULL) ? prtpinf+rtpinflen : pparamend+1;
+ nextparamoff = pparamend-prtpinf;
+
+ while (off < nextparamoff) {
+ const char* pfieldend;
+ uint nextfieldoff;
+
+ pfieldend = memchr(prtpinf+off, ';', nextparamoff-off);
+ nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-prtpinf+1;
+
+ if (strncmp(prtpinf+off, "url=", 4) == 0) {
+ off += 4;
+
+ /* rtsp:// */
+ off += 7;
+ const char* pipend;
+ pipend = memchr(prtpinf+off, ':', nextparamoff-off);
+ if ( pipend )
+ {
+ nf_strtou16(pipend+1, &src_port);
+ ip_buf_len = pipend - (prtpinf+off);
+ if ( ip_buf_len < sizeof(ipbuf) )
+ {
+ memset(ipbuf, 0, sizeof(ipbuf));
+ memcpy(ipbuf, prtpinf+off, ip_buf_len);
+ newip = in_aton(ipbuf);
+
+ /*
+ for vlc debug
+ newip = in_aton("192.168.40.250");
+ ct->loport = ct->hiport = htons(5566);
+ printk("\n==>org src_port=[%d]<===\n", src_port);
+ src_port = htons(1234);*/
+
+ nf_nat_rtp = rcu_dereference(nf_nat_rtp_hook);
+ if (nf_nat_rtp && ct->status & IPS_NAT_MASK)
+ {
+ pr_debug("\n=>nf_nat_rtp ip=%pI4,"
+ " loport=[%d], hiport=[%d], src_port=[%d]\n"
+ , &newip, ct->loport, ct->hiport, src_port);
+ /* pass the request off to the rtp helper */
+ rc = nf_nat_rtp(skb, ct, ctinfo, newip, src_port);
+ }
+ }
+ }
+
+ /* now only process one ip*/
+ break;
+ }
+
+ off = nextfieldoff;
+ }
+
+ off = nextparamoff;
+ }
+
+ return rc;
+}
+
+/*** conntrack functions ***/
+
+/* outbound packet: client->server */
+
+static inline int
+help_out(struct sk_buff *skb, unsigned char *rb_ptr, unsigned int datalen,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff)
+#else
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+#endif
+{
+ struct ip_ct_rtsp_expect expinfo;
+
+ int dir = CTINFO2DIR(ctinfo); /* = IP_CT_DIR_ORIGINAL */
+ //struct tcphdr* tcph = (void*)iph + iph->ihl * 4;
+ //uint tcplen = pktlen - iph->ihl * 4;
+ char* pdata = rb_ptr;
+ //uint datalen = tcplen - tcph->doff * 4;
+ uint dataoff = 0;
+ int ret = NF_ACCEPT;
+
+ struct nf_conntrack_expect *rtp_exp;
+ struct nf_conntrack_expect *rtcp_exp = NULL;
+
+ __be16 be_loport;
+ __be16 be_hiport;
+
+ typeof(nf_nat_rtsp_hook) nf_nat_rtsp;
+
+ memset(&expinfo, 0, sizeof(expinfo));
+
+ while (dataoff < datalen) {
+ uint cmdoff = dataoff;
+ uint hdrsoff = 0;
+ uint hdrslen = 0;
+ uint cseqoff = 0;
+ uint cseqlen = 0;
+ uint transoff = 0;
+ uint translen = 0;
+ uint rtpinfoff = 0;
+ uint rtpinflen = 0;
+ uint off;
+
+ if (!rtsp_parse_message(pdata, datalen, &dataoff,
+ &hdrsoff, &hdrslen,
+ &cseqoff, &cseqlen,
+ &transoff, &translen,
+ &rtpinfoff, &rtpinflen))
+ break; /* not a valid message */
+
+ if (strncmp(pdata+cmdoff, "TEARDOWN ", 9) == 0) {
+ pr_debug("teardown handled\n");
+ nf_ct_remove_expectations(ct); /* FIXME must be session id aware */
+ break;
+ }
+
+ if (strncmp(pdata+cmdoff, "SETUP ", 6) != 0)
+ continue; /* not a SETUP message */
+
+ pr_debug("found a setup message\n");
+
+ off = 0;
+ if(translen)
+ rtsp_parse_transport(pdata+transoff, translen, &expinfo);
+
+ if (expinfo.loport == 0) {
+ pr_debug("no udp transports found\n");
+ continue; /* no udp transports found */
+ }
+
+ pr_debug("udp transport found, ports=(%d,%hu,%hu)\n",
+ (int)expinfo.pbtype, expinfo.loport, expinfo.hiport);
+
+
+ be_loport = htons(expinfo.loport);
+ ct->loport = htons(expinfo.loport);
+ ct->hiport = htons(expinfo.hiport);
+
+ rtp_exp = nf_ct_expect_alloc(ct);
+ if (rtp_exp == NULL) {
+ ret = NF_DROP;
+ goto out;
+ }
+
+ nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
+ nf_ct_l3num(ct),
+ &ct->tuplehash[!dir].tuple.src.u3,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ IPPROTO_UDP, NULL, &be_loport);
+
+ rtp_exp->flags = 0;
+
+ if (expinfo.pbtype == pb_range) {
+ pr_debug("setup expectation for rtcp\n");
+
+ be_hiport = htons(expinfo.hiport);
+ rtcp_exp = nf_ct_expect_alloc(ct);
+ if (rtcp_exp == NULL) {
+ ret = NF_DROP;
+ goto out1;
+ }
+
+ nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
+ nf_ct_l3num(ct),
+ &ct->tuplehash[!dir].tuple.src.u3,
+ &ct->tuplehash[!dir].tuple.dst.u3,
+ IPPROTO_UDP, NULL, &be_hiport);
+
+ rtcp_exp->flags = 0;
+
+ pr_debug("expect_related %pI4:%u-%u-%pI4:%u-%u\n",
+ &rtp_exp->tuple.src.u3.ip,
+ ntohs(rtp_exp->tuple.src.u.udp.port),
+ ntohs(rtcp_exp->tuple.src.u.udp.port),
+ &rtp_exp->tuple.dst.u3.ip,
+ ntohs(rtp_exp->tuple.dst.u.udp.port),
+ ntohs(rtcp_exp->tuple.dst.u.udp.port));
+ } else {
+ pr_debug("expect_related %pI4:%u-%pI4:%u\n",
+ &rtp_exp->tuple.src.u3.ip,
+ ntohs(rtp_exp->tuple.src.u.udp.port),
+ &rtp_exp->tuple.dst.u3.ip,
+ ntohs(rtp_exp->tuple.dst.u.udp.port));
+ }
+
+ nf_nat_rtsp = rcu_dereference(nf_nat_rtsp_hook);
+ if (nf_nat_rtsp && ct->status & IPS_NAT_MASK)
+ /* pass the request off to the nat helper */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ ret = nf_nat_rtsp(skb, ctinfo, protoff, hdrsoff, hdrslen,
+ &expinfo, rtp_exp, rtcp_exp);
+#else
+ ret = nf_nat_rtsp(skb, ctinfo, hdrsoff, hdrslen,
+ &expinfo, rtp_exp, rtcp_exp);
+#endif
+ else {
+ if (nf_ct_expect_related(rtp_exp) == 0) {
+ if (rtcp_exp && nf_ct_expect_related(rtcp_exp) != 0) {
+ nf_ct_unexpect_related(rtp_exp);
+ pr_info("nf_conntrack_expect_related failed for rtcp\n");
+ ret = NF_DROP;
+ }
+ } else {
+ pr_info("nf_conntrack_expect_related failed for rtp\n");
+ ret = NF_DROP;
+ }
+ }
+ if (rtcp_exp) {
+ nf_ct_expect_put(rtcp_exp);
+ }
+out1:
+ nf_ct_expect_put(rtp_exp);
+ goto out;
+ }
+out:
+
+ return ret;
+}
+
+
+static inline int
+help_in(struct sk_buff *skb, unsigned char *rb_ptr, unsigned int datalen,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff)
+{
+ char* pdata = rb_ptr;
+ uint dataoff = 0;
+ uint hdrsoff = 0;
+ uint hdrslen = 0;
+ uint cseqoff = 0;
+ uint cseqlen = 0;
+ uint transoff = 0;
+ uint translen = 0;
+ uint rtpinfoff = 0;
+ uint rtpinflen = 0;
+ uint off;
+
+ if ( !rtsp_parse_message(pdata, datalen, &dataoff,
+ &hdrsoff, &hdrslen,
+ &cseqoff, &cseqlen,
+ &transoff, &translen,
+ &rtpinfoff, &rtpinflen) )
+ return NF_ACCEPT; /* not a valid message */
+
+ if ( 0 != rtpinflen )
+ rtsp_reply_rtpinfo(skb, pdata+rtpinfoff, rtpinflen, ct, ctinfo);
+
+ return NF_ACCEPT;
+}
+
+static int help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ struct tcphdr _tcph, *th;
+ unsigned int dataoff, datalen;
+ char *rb_ptr;
+ int ret = NF_DROP;
+
+ /* Until there's been traffic both ways, don't look in packets. */
+ if (ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+ pr_debug("conntrackinfo = %u\n", ctinfo);
+ return NF_ACCEPT;
+ }
+
+ if ( ECNT_RETURN == ecnt_nf_conntrack_rtsp_help_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
+ /* Not whole TCP header? */
+ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+
+ if (!th)
+ return NF_ACCEPT;
+
+ /* No data ? */
+ dataoff = protoff + th->doff*4;
+ datalen = skb->len - dataoff;
+ if (dataoff >= skb->len)
+ return NF_ACCEPT;
+
+ spin_lock_bh(&rtsp_buffer_lock);
+ rb_ptr = skb_header_pointer(skb, dataoff,
+ skb->len - dataoff, rtsp_buffer);
+ BUG_ON(rb_ptr == NULL);
+
+#if 0
+ /* Checksum invalid? Ignore. */
+ /* FIXME: Source route IP option packets --RR */
+ if (tcp_v4_check(tcph, tcplen, iph->saddr, iph->daddr,
+ csum_partial((char*)tcph, tcplen, 0)))
+ {
+ DEBUGP("bad csum: %p %u %u.%u.%u.%u %u.%u.%u.%u\n",
+ tcph, tcplen, NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
+ return NF_ACCEPT;
+ }
+#endif
+
+ switch (CTINFO2DIR(ctinfo)) {
+ case IP_CT_DIR_ORIGINAL:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ ret = help_out(skb, rb_ptr, datalen, ct, ctinfo, protoff);
+#else
+ ret = help_out(skb, rb_ptr, datalen, ct, ctinfo);
+#endif
+ break;
+ case IP_CT_DIR_REPLY:
+ pr_debug("IP_CT_DIR_REPLY\n");
+ ecnt_nf_conntrack_rtsp_help_reply_inline_hook(skb, rb_ptr, datalen);
+ /* inbound packet: server->client */
+ /*ret = NF_ACCEPT;*/
+ ret = help_in(skb, rb_ptr, datalen, ct, ctinfo, protoff);
+ break;
+ }
+
+ spin_unlock_bh(&rtsp_buffer_lock);
+
+ return ret;
+}
+
+static struct nf_conntrack_helper rtsp_helpers[MAX_PORTS];
+static char rtsp_names[MAX_PORTS][10];
+
+/* This function is intentionally _NOT_ defined as __exit */
+static void
+fini(void)
+{
+ int i;
+ for (i = 0; i < num_ports; i++) {
+ pr_debug("unregistering port %d\n", ports[i]);
+ nf_conntrack_helper_unregister(&rtsp_helpers[i]);
+ }
+ kfree(rtsp_buffer);
+}
+
+static int __init
+init(void)
+{
+ int i, ret;
+ struct nf_conntrack_helper *hlpr;
+ char *tmpname;
+
+ printk("nf_conntrack_rtsp v" IP_NF_RTSP_VERSION " loading\n");
+
+ if (max_outstanding < 1) {
+ printk("nf_conntrack_rtsp: max_outstanding must be a positive integer\n");
+ return -EBUSY;
+ }
+ if (setup_timeout < 0) {
+ printk("nf_conntrack_rtsp: setup_timeout must be a positive integer\n");
+ return -EBUSY;
+ }
+
+ rtsp_exp_policy.max_expected = max_outstanding;
+ rtsp_exp_policy.timeout = setup_timeout;
+
+ rtsp_buffer = kmalloc(65536, GFP_KERNEL);
+ if (!rtsp_buffer)
+ return -ENOMEM;
+
+ /* If no port given, default to standard rtsp port */
+ if (ports[0] == 0) {
+ ports[0] = RTSP_PORT;
+ num_ports = 1;
+ }
+
+ for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
+ hlpr = &rtsp_helpers[i];
+ memset(hlpr, 0, sizeof(struct nf_conntrack_helper));
+ hlpr->tuple.src.l3num = AF_INET;
+ hlpr->tuple.src.u.tcp.port = htons(ports[i]);
+ hlpr->tuple.dst.protonum = IPPROTO_TCP;
+ hlpr->expect_policy = &rtsp_exp_policy;
+ hlpr->me = THIS_MODULE;
+ hlpr->help = help;
+
+ tmpname = &rtsp_names[i][0];
+ if (ports[i] == RTSP_PORT) {
+ sprintf(tmpname, "rtsp");
+ } else {
+ sprintf(tmpname, "rtsp-%d", i);
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+ strlcpy(hlpr->name, tmpname, sizeof(hlpr->name));
+#else
+ hlpr->name = tmpname;
+#endif
+ pr_debug("port #%d: %d\n", i, ports[i]);
+
+ ret = nf_conntrack_helper_register(hlpr);
+
+ if (ret) {
+ printk("nf_conntrack_rtsp: ERROR registering port %d\n", ports[i]);
+ fini();
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+module_init(init);
+module_exit(fini);
Index: linux-3.18.21/net/netfilter/nf_conntrack_sip.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_sip.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_sip.c 2018-02-05 13:21:32.000000000 +0800
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_sip.h>
+#include "ecnt_netfilter.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
@@ -47,7 +48,7 @@
MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
"only (default 1)");
-static int sip_direct_media __read_mostly = 1;
+static int sip_direct_media __read_mostly = 0;/*Do not check incoming rtp pkts' src ip*/
module_param(sip_direct_media, int, 0600);
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
"endpoints only (default 1)");
@@ -1495,6 +1496,9 @@
ctinfo != IP_CT_ESTABLISHED_REPLY)
return NF_ACCEPT;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_sip_help_tcp_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
/* No Data ? */
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
@@ -1574,6 +1578,9 @@
if (dataoff >= skb->len)
return NF_ACCEPT;
+ if ( ECNT_RETURN == ecnt_nf_conntrack_sip_help_udp_inline_hook(skb, ct) )
+ return NF_ACCEPT;
+
nf_ct_refresh(ct, skb, sip_timeout * HZ);
if (unlikely(skb_linearize(skb)))
Index: linux-3.18.21/net/netfilter/nf_conntrack_standalone.c
===================================================================
--- linux-3.18.21.orig/net/netfilter/nf_conntrack_standalone.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/netfilter/nf_conntrack_standalone.c 2018-02-05 13:21:32.000000000 +0800
@@ -32,6 +32,7 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
+#include "ecnt_netfilter.h"
MODULE_LICENSE("GPL");
@@ -240,6 +241,12 @@
if (ct_show_delta_time(s, ct))
goto release;
+#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE)
+ if(ct->layer7.app_proto &&
+ seq_printf(s, "l7proto=%s ", ct->layer7.app_proto))
+ goto release;
+#endif
+
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
@@ -453,6 +460,7 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ ECNT_NF_CT_SYSCTL_TABLE_HOOK
{ }
};
Index: linux-3.18.21/net/netfilter/nf_nat_rtsp.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/nf_nat_rtsp.c 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,844 @@
+/*
+ * RTSP extension for TCP NAT alteration
+ * (C) 2003 by Tom Marshall <tmarshall at real.com>
+ *
+ * 2013-03-04: Il'inykh Sergey <sergeyi at inango-sw.com>. Inango Systems Ltd
+ * - fixed rtcp nat mapping and other port mapping fixes
+ * - fixed system hard lock because of bug in the parser
+ * - codestyle fixes and less significant fixes
+ *
+ * based on ip_nat_irc.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Module load syntax:
+ * insmod nf_nat_rtsp.o ports=port1,port2,...port<MAX_PORTS>
+ * stunaddr=<address>
+ * destaction=[auto|strip|none]
+ *
+ * If no ports are specified, the default will be port 554 only.
+ *
+ * stunaddr specifies the address used to detect that a client is using STUN.
+ * If this address is seen in the destination parameter, it is assumed that
+ * the client has already punched a UDP hole in the firewall, so we don't
+ * mangle the client_port. If none is specified, it is autodetected. It
+ * only needs to be set if you have multiple levels of NAT. It should be
+ * set to the external address that the STUN clients detect. Note that in
+ * this case, it will not be possible for clients to use UDP with servers
+ * between the NATs.
+ *
+ * If no destaction is specified, auto is used.
+ * destaction=auto: strip destination parameter if it is not stunaddr.
+ * destaction=strip: always strip destination parameter (not recommended).
+ * destaction=none: do not touch destination parameter (not recommended).
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <net/tcp.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+# include <net/netfilter/nf_nat.h>
+#else
+# include <net/netfilter/nf_nat_rule.h>
+#endif
+#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_rtsp.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#include <linux/inet.h>
+#include <linux/ctype.h>
+#define NF_NEED_STRNCASECMP
+#define NF_NEED_STRTOU16
+#include <linux/netfilter_helpers.h>
+#define NF_NEED_MIME_NEXTLINE
+#include <linux/netfilter_mime.h>
+#include "ecnt_netfilter.h"
+
+#define MAX_PORTS 8
+#define DSTACT_AUTO 0
+#define DSTACT_STRIP 1
+#define DSTACT_NONE 2
+
+static char* stunaddr = NULL;
+static char* destaction = NULL;
+
+static u_int32_t extip = 0;
+static int dstact = 0;
+#define pr_debug(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+
+static void nf_nat_rtsp_expected(struct nf_conn* ct, struct nf_conntrack_expect *exp);
+
+MODULE_AUTHOR("Tom Marshall <tmarshall at real.com>");
+MODULE_DESCRIPTION("RTSP network address translation module");
+MODULE_LICENSE("GPL");
+module_param(stunaddr, charp, 0644);
+MODULE_PARM_DESC(stunaddr, "Address for detecting STUN");
+module_param(destaction, charp, 0644);
+MODULE_PARM_DESC(destaction, "Action for destination parameter (auto/strip/none)");
+
+#define SKIP_WSPACE(ptr,len,off) while(off < len && isspace(*(ptr+off))) { off++; }
+
+/*** helper functions ***/
+
+static void
+get_skb_tcpdata(struct sk_buff* skb, char** pptcpdata, uint* ptcpdatalen)
+{
+ struct iphdr* iph = ip_hdr(skb);
+ struct tcphdr* tcph = (void *)iph + ip_hdrlen(skb);
+
+ *pptcpdata = (char*)tcph + tcph->doff*4;
+ *ptcpdatalen = ((char*)skb_transport_header(skb) + skb->len) - *pptcpdata;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+/* copy of sip_sprintf_addr */
+static int rtsp_sprintf_addr(const struct nf_conn *ct, char *buffer,
+ const union nf_inet_addr *addr, bool delim)
+{
+ if (nf_ct_l3num(ct) == NFPROTO_IPV4) {
+ return sprintf(buffer, "%pI4", &addr->ip);
+ } else {
+ if (delim)
+ return sprintf(buffer, "[%pI6c]", &addr->ip6);
+ else
+ return sprintf(buffer, "%pI6c", &addr->ip6);
+ }
+}
+#endif
+
+/*** nat functions ***/
+
+/*
+ * Mangle the "Transport:" header:
+ * - Replace all occurences of "client_port=<spec>"
+ * - Handle destination parameter
+ *
+ * In:
+ * ct, ctinfo = conntrack context
+ * skb = packet
+ * tranoff = Transport header offset from TCP data
+ * tranlen = Transport header length (incl. CRLF)
+ * rport_lo = replacement low port (host endian)
+ * rport_hi = replacement high port (host endian)
+ *
+ * Returns packet size difference.
+ *
+ * Assumes that a complete transport header is present, ending with CR or LF
+ */
+static int
+rtsp_mangle_tran(enum ip_conntrack_info ctinfo,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ unsigned int protoff,
+#endif
+ struct nf_conntrack_expect* rtp_exp,
+ struct nf_conntrack_expect* rtcp_exp,
+ struct ip_ct_rtsp_expect* prtspexp,
+ struct sk_buff* skb, uint tranoff, uint tranlen)
+{
+ char* ptcp;
+ uint tcplen;
+ char* ptran;
+ char rbuf1[16]; /* Replacement buffer (one port) */
+ uint rbuf1len; /* Replacement len (one port) */
+ char rbufa[16]; /* Replacement buffer (all ports) */
+ uint rbufalen; /* Replacement len (all ports) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ union nf_inet_addr newip;
+#else
+ u_int32_t newip;
+#endif
+ u_int16_t loport, hiport;
+ uint off = 0;
+ uint diff; /* Number of bytes we removed */
+ struct tcphdr *tp = tcp_hdr(skb);
+
+ struct nf_conn *ct = rtp_exp->master;
+ /* struct nf_conn *ct = nf_ct_get(skb, &ctinfo); */
+ struct nf_conntrack_tuple *rtp_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ char szextaddr[INET6_ADDRSTRLEN];
+#else
+ char szextaddr[INET_ADDRSTRLEN];
+#endif
+ uint extaddrlen;
+ int is_stun;
+
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ ptran = ptcp+tranoff;
+
+ if (tranoff+tranlen > tcplen || tcplen-tranoff < tranlen ||
+ tranlen < 10 || !iseol(ptran[tranlen-1]) ||
+ nf_strncasecmp(ptran, "Transport:", 10) != 0) {
+ pr_info("sanity check failed\n");
+ return 0;
+ }
+ off += 10;
+ SKIP_WSPACE(ptcp+tranoff, tranlen, off);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
+ rtp_t = &rtp_exp->tuple;
+ rtp_t->dst.u3 = newip;
+ if (rtcp_exp) {
+ rtcp_exp->tuple.dst.u3 = newip;
+ }
+ extaddrlen = rtsp_sprintf_addr(ct, szextaddr, &newip, true); // FIXME handle extip
+ pr_debug("stunaddr=%s (auto)\n", szextaddr);
+#else
+ newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ rtp_t = &rtp_exp->tuple;
+ rtp_t->dst.u3.ip = newip;
+ if (rtcp_exp) {
+ rtcp_exp->tuple.dst.u3.ip = newip;
+ }
+ extaddrlen = extip ? sprintf(szextaddr, "%pI4", &extip)
+ : sprintf(szextaddr, "%pI4", &newip);
+ pr_debug("stunaddr=%s (%s)\n", szextaddr, (extip?"forced":"auto"));
+#endif
+ hiport = 0;
+ rbuf1len = rbufalen = 0;
+ switch (prtspexp->pbtype) {
+ case pb_single:
+ for (loport = prtspexp->loport; loport != 0; loport++) { /* XXX: improper wrap? */
+ rtp_t->dst.u.udp.port = htons(loport);
+ if (nf_ct_expect_related(rtp_exp) == 0) {
+ pr_debug("using port %hu\n", loport);
+ break;
+ }
+ }
+ if (loport != 0) {
+ rbuf1len = sprintf(rbuf1, "%hu", loport);
+ rbufalen = sprintf(rbufa, "%hu", loport);
+ }
+ break;
+ case pb_range:
+ for (loport = prtspexp->loport; loport != 0; loport += 2) { /* XXX: improper wrap? */
+ rtp_t->dst.u.udp.port = htons(loport);
+ if (nf_ct_expect_related(rtp_exp) != 0) {
+ continue;
+ }
+ hiport = loport + 1;
+ rtcp_exp->tuple.dst.u.udp.port = htons(hiport);
+ if (nf_ct_expect_related(rtcp_exp) != 0) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ }
+
+ /* FIXME: invalid print in case of ipv6 */
+ pr_debug("nat expect_related %pI4:%u-%u-%pI4:%u-%u\n",
+ &rtp_exp->tuple.src.u3.ip,
+ ntohs(rtp_exp->tuple.src.u.udp.port),
+ ntohs(rtcp_exp->tuple.src.u.udp.port),
+ &rtp_exp->tuple.dst.u3.ip,
+ ntohs(rtp_exp->tuple.dst.u.udp.port),
+ ntohs(rtcp_exp->tuple.dst.u.udp.port));
+ break;
+ }
+ if (loport != 0) {
+ rbuf1len = sprintf(rbuf1, "%hu", loport);
+ rbufalen = sprintf(rbufa, "%hu-%hu", loport, hiport);
+ }
+ break;
+ case pb_discon:
+ for (loport = prtspexp->loport; loport != 0; loport++) { /* XXX: improper wrap? */
+ rtp_t->dst.u.udp.port = htons(loport);
+ if (nf_ct_expect_related(rtp_exp) == 0) {
+ pr_debug("using port %hu (1 of 2)\n", loport);
+ break;
+ }
+ }
+ for (hiport = prtspexp->hiport; hiport != 0; hiport++) { /* XXX: improper wrap? */
+ rtp_t->dst.u.udp.port = htons(hiport);
+ if (nf_ct_expect_related(rtp_exp) == 0) {
+ pr_debug("using port %hu (2 of 2)\n", hiport);
+ break;
+ }
+ }
+ if (loport != 0 && hiport != 0) {
+ rbuf1len = sprintf(rbuf1, "%hu", loport);
+ rbufalen = sprintf(rbufa, hiport == loport+1 ?
+ "%hu-%hu":"%hu/%hu", loport, hiport);
+ }
+ break;
+ }
+
+ if (rbuf1len == 0)
+ return 0; /* cannot get replacement port(s) */
+
+ /* Transport: tran;field;field=val,tran;field;field=val,...
+ `off` is set to the start of Transport value from start of line
+ */
+ while (off < tranlen) {
+ uint saveoff;
+ const char* pparamend;
+ uint nextparamoff;
+
+ pparamend = memchr(ptran+off, ',', tranlen-off);
+ pparamend = (pparamend == NULL) ? ptran+tranlen : pparamend+1;
+ nextparamoff = pparamend-ptran;
+
+ /*
+ * We pass over each param twice. On the first pass, we look for a
+ * destination= field. It is handled by the security policy. If it
+ * is present, allowed, and equal to our external address, we assume
+ * that STUN is being used and we leave the client_port= field alone.
+ */
+ is_stun = 0;
+ saveoff = off;
+ while (off < nextparamoff) {
+ const char* pfieldend;
+ uint nextfieldoff;
+
+ pfieldend = memchr(ptran+off, ';', nextparamoff-off);
+ nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-ptran+1;
+
+ if (dstact != DSTACT_NONE && strncmp(ptran+off, "destination=", 12) == 0) {
+ if (strncmp(ptran+off+12, szextaddr, extaddrlen) == 0)
+ is_stun = 1;
+
+ if (dstact == DSTACT_STRIP || (dstact == DSTACT_AUTO && !is_stun)) {
+ uint dstoff = (ptran-ptcp)+off;
+ uint dstlen = nextfieldoff-off;
+ char* pdstrep = NULL;
+ uint dstreplen = 0;
+ diff = dstlen;
+ if (dstact == DSTACT_AUTO && !is_stun) {
+ pr_debug("RTSP: replace dst addr\n");
+ dstoff += 12;
+ dstlen -= 13;
+ pdstrep = szextaddr;
+ dstreplen = extaddrlen;
+ diff = nextfieldoff-off-13-extaddrlen;
+ }
+
+ /* force to do sequence adjust */
+ if ( dstreplen != dstlen && tp )
+ ecnt_nf_ct_seqadj_set_inline_hook(ct, ctinfo,
+ tp->seq, dstreplen - dstlen);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
+ dstoff, dstlen, pdstrep, dstreplen)) {
+#else
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+ dstoff, dstlen, pdstrep, dstreplen)) {
+#endif
+ /* mangle failed, all we can do is bail */
+ nf_ct_unexpect_related(rtp_exp);
+ if (rtcp_exp)
+ nf_ct_unexpect_related(rtcp_exp);
+ return 0;
+ }
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ ptran = ptcp+tranoff;
+ tranlen -= diff;
+ nextparamoff -= diff;
+ nextfieldoff -= diff;
+ }
+ }
+
+ off = nextfieldoff;
+ }
+
+ if (is_stun)
+ continue;
+
+ off = saveoff;
+ while (off < nextparamoff) {
+ const char* pfieldend;
+ uint nextfieldoff;
+
+ pfieldend = memchr(ptran+off, ';', nextparamoff-off);
+ nextfieldoff = (pfieldend == NULL) ? nextparamoff : pfieldend-ptran+1;
+
+ if (strncmp(ptran+off, "client_port=", 12) == 0) {
+ u_int16_t port;
+ uint numlen;
+ uint origoff;
+ uint origlen;
+ char* rbuf = rbuf1;
+ uint rbuflen = rbuf1len;
+
+ off += 12;
+ origoff = (ptran-ptcp)+off;
+ origlen = 0;
+ numlen = nf_strtou16(ptran+off, &port);
+ off += numlen;
+ origlen += numlen;
+ if (port != prtspexp->loport) {
+ pr_debug("multiple ports found, port %hu ignored\n", port);
+ } else {
+ if (ptran[off] == '-' || ptran[off] == '/') {
+ off++;
+ origlen++;
+ numlen = nf_strtou16(ptran+off, &port);
+ off += numlen;
+ origlen += numlen;
+ rbuf = rbufa;
+ rbuflen = rbufalen;
+ }
+
+ /*
+ * note we cannot just memcpy() if the sizes are the same.
+ * the mangle function does skb resizing, checks for a
+ * cloned skb, and updates the checksums.
+ *
+ * parameter 4 below is offset from start of tcp data.
+ */
+ diff = origlen-rbuflen;
+
+ /* force to do sequence adjust */
+ if ( 0 != diff && tp )
+ ecnt_nf_ct_seqadj_set_inline_hook(ct, ctinfo
+ ,tp->seq, diff);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
+ origoff, origlen, rbuf, rbuflen)) {
+#else
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+ origoff, origlen, rbuf, rbuflen)) {
+#endif
+ /* mangle failed, all we can do is bail */
+ nf_ct_unexpect_related(rtp_exp);
+ if (rtcp_exp)
+ nf_ct_unexpect_related(rtcp_exp);
+ return 0;
+ }
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ ptran = ptcp+tranoff;
+ tranlen -= diff;
+ nextparamoff -= diff;
+ nextfieldoff -= diff;
+ }
+ }
+
+ off = nextfieldoff;
+ }
+
+ off = nextparamoff;
+ }
+
+ return 1;
+}
+
+
+static int
+rtsp_mangle_xnat(enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct nf_conntrack_expect* rtp_exp,
+ struct nf_conntrack_expect* rtcp_exp,
+ struct ip_ct_rtsp_expect* prtspexp,
+ struct sk_buff* skb, uint xnatoff, uint xnatlen)
+{
+ char* ptcp;
+ uint tcplen;
+ char* pxnat;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ union nf_inet_addr newip;
+#else
+ u_int32_t newip;
+#endif
+ uint off = 0;
+ struct tcphdr *tp = tcp_hdr(skb);
+ struct nf_conn *ct = rtp_exp->master;
+ struct nf_conntrack_tuple *rtp_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ char szextaddr[INET6_ADDRSTRLEN];
+#else
+ char szextaddr[INET_ADDRSTRLEN];
+#endif
+ uint extaddrlen;
+
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ pxnat = ptcp+xnatoff;
+
+ if (xnatoff+xnatlen > tcplen || tcplen-xnatoff < xnatlen ||
+ xnatlen < 14 || !iseol(pxnat[xnatlen-1]) ||
+ nf_strncasecmp(pxnat, "x-NAT_Address:", 14) != 0) {
+ pr_info("x-nat sanity check failed\n");
+ return 0;
+ }
+ off += 14;
+ SKIP_WSPACE(ptcp+xnatoff, xnatlen, off);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
+ extaddrlen = rtsp_sprintf_addr(ct, szextaddr, &newip, true);
+ pr_debug("stunaddr=%s (auto)\n", szextaddr);
+#else
+ newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ extaddrlen = extip ? sprintf(szextaddr, "%pI4", &extip)
+ : sprintf(szextaddr, "%pI4", &newip);
+ pr_debug("stunaddr=%s (%s)\n", szextaddr, (extip?"forced":"auto"));
+#endif
+
+ const char* pparamend;
+ pparamend = memchr(pxnat+off, ':', xnatlen-off);
+
+ if ( !pparamend )
+ return 0;
+
+ uint nextparamoff;
+ nextparamoff = pparamend-pxnat;
+
+ char* pdstrep = NULL;
+ uint dstreplen = 0;
+ pdstrep = szextaddr;
+ dstreplen = extaddrlen;
+
+ uint dstoff = (pxnat-ptcp)+off;
+ uint dstlen = nextparamoff-off;
+
+ /* no need replace ip when ip is same with wan ip(stun). */
+ if ( strncmp(pxnat+off, szextaddr, extaddrlen) == 0)
+ return 1;
+
+ if (dstact == DSTACT_AUTO) {
+ pr_debug("x-NAT_Address: replace dst addr\n");
+ pdstrep = szextaddr;
+ dstreplen = extaddrlen;
+ }
+
+ /* force to do sequence adjust */
+ if ( dstreplen != dstlen && tp )
+ ecnt_nf_ct_seqadj_set_inline_hook(ct, ctinfo,
+ tp->seq, dstreplen - dstlen);
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
+ dstoff, dstlen, pdstrep, dstreplen)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+static uint
+help_out(struct sk_buff *skb, enum ip_conntrack_info ctinfo,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ unsigned int protoff,
+#endif
+ unsigned int matchoff, unsigned int matchlen,
+ struct ip_ct_rtsp_expect* prtspexp,
+ struct nf_conntrack_expect* rtp_exp,
+ struct nf_conntrack_expect* rtcp_exp)
+{
+ char* ptcp;
+ uint tcplen;
+ uint hdrsoff;
+ uint hdrslen;
+ uint lineoff;
+ uint linelen;
+ uint off;
+ int dir = CTINFO2DIR(ctinfo);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ union nf_inet_addr saddr = rtp_exp->master->tuplehash[dir].tuple.src.u3;
+#else
+ __be32 saddr = rtp_exp->master->tuplehash[dir].tuple.src.u3.ip;
+#endif
+
+ //struct iphdr* iph = (struct iphdr*)(*pskb)->nh.iph;
+ //struct tcphdr* tcph = (struct tcphdr*)((void*)iph + iph->ihl*4);
+
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ hdrsoff = matchoff;//exp->seq - ntohl(tcph->seq);
+ hdrslen = matchlen;
+ off = hdrsoff;
+ pr_debug("NAT rtsp help_out\n");
+
+ while (nf_mime_nextline(ptcp, hdrsoff+hdrslen, &off, &lineoff, &linelen)) {
+ if (linelen == 0)
+ break;
+
+ if (off > hdrsoff+hdrslen) {
+ pr_info("!! overrun !!");
+ break;
+ }
+ pr_debug("hdr: len=%u, %.*s", linelen, (int)linelen, ptcp+lineoff);
+
+ if (nf_strncasecmp(ptcp+lineoff, "Transport:", 10) == 0) {
+ uint oldtcplen = tcplen;
+ pr_debug("hdr: Transport\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ if (!rtsp_mangle_tran(ctinfo, protoff, rtp_exp, rtcp_exp,
+ prtspexp, skb, lineoff, linelen)) {
+#else
+ if (!rtsp_mangle_tran(ctinfo, rtp_exp, rtcp_exp, prtspexp,
+ skb, lineoff, linelen)) {
+#endif
+ pr_debug("hdr: Transport mangle failed");
+ break;
+ }
+ rtp_exp->expectfn = nf_nat_rtsp_expected;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ rtp_exp->saved_addr = saddr;
+#else
+ rtp_exp->saved_ip = saddr;
+#endif
+ rtp_exp->saved_proto.udp.port = htons(prtspexp->loport);
+ rtp_exp->dir = !dir;
+ if (rtcp_exp) {
+ rtcp_exp->expectfn = nf_nat_rtsp_expected;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ rtcp_exp->saved_addr = saddr;
+#else
+ rtcp_exp->saved_ip = saddr;
+#endif
+ rtcp_exp->saved_proto.udp.port = htons(prtspexp->hiport);
+ rtcp_exp->dir = !dir;
+ }
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ hdrslen -= (oldtcplen-tcplen);
+ off -= (oldtcplen-tcplen);
+ lineoff -= (oldtcplen-tcplen);
+ linelen -= (oldtcplen-tcplen);
+ pr_debug("rep: len=%u, %.*s", linelen, (int)linelen, ptcp+lineoff);
+ }
+ else if (nf_strncasecmp(ptcp+lineoff, "x-NAT_Address:", 14) == 0) {
+ uint oldtcplen = tcplen;
+ pr_debug("hdr: x-NAT_Address\n");
+
+ if (!rtsp_mangle_xnat(ctinfo, protoff, rtp_exp, rtcp_exp,
+ prtspexp, skb, lineoff, linelen)) {
+ pr_debug("hdr: rtsp_mangle_xnat mangle failed");
+ break;
+ }
+
+ get_skb_tcpdata(skb, &ptcp, &tcplen);
+ hdrslen -= (oldtcplen-tcplen);
+ off -= (oldtcplen-tcplen);
+ lineoff -= (oldtcplen-tcplen);
+ linelen -= (oldtcplen-tcplen);
+ pr_debug("x-NAT rep: len=%u, %.*s", linelen, (int)linelen, ptcp+lineoff);
+ }
+ }
+
+ return NF_ACCEPT;
+}
+
+static unsigned int
+nf_nat_rtsp(struct sk_buff *skb, enum ip_conntrack_info ctinfo,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ unsigned int protoff,
+#endif
+ unsigned int matchoff, unsigned int matchlen,
+ struct ip_ct_rtsp_expect* prtspexp,
+ struct nf_conntrack_expect* rtp_exp,
+ struct nf_conntrack_expect* rtcp_exp)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ int rc = NF_ACCEPT;
+
+ switch (dir) {
+ case IP_CT_DIR_ORIGINAL:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ rc = help_out(skb, ctinfo, protoff, matchoff, matchlen, prtspexp,
+ rtp_exp, rtcp_exp);
+#else
+ rc = help_out(skb, ctinfo, matchoff, matchlen, prtspexp,
+ rtp_exp, rtcp_exp);
+#endif
+ break;
+ case IP_CT_DIR_REPLY:
+ pr_debug("unmangle ! %u\n", ctinfo);
+ /* XXX: unmangle */
+ rc = NF_ACCEPT;
+ break;
+ }
+ //UNLOCK_BH(&ip_rtsp_lock);
+
+ return rc;
+}
+
+static unsigned int
+init_rtp_expect(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ __be32 rtpip, u_int16_t port,
+ u_int16_t rtp_srcport)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_conntrack_expect *rtp_new = NULL;
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_hash *h = NULL;
+ struct nf_conn *ct_f = NULL;
+ struct net *net = nf_ct_net(ct);
+ u16 zone;
+
+ rtp_new = nf_ct_expect_alloc(ct);
+ if (rtp_new == NULL) {
+ printk("\n nf_ct_expect_alloc failed \n");
+ return -1;
+ }
+
+ /* RTP server --> wan ip */
+ nf_ct_expect_init(rtp_new, NF_CT_EXPECT_CLASS_DEFAULT,
+ nf_ct_l3num(ct),
+ &rtpip,
+ &ct->tuplehash[dir].tuple.dst.u3,
+ IPPROTO_UDP, NULL, &port);
+
+ /*pr_debug("\n=>init_rtp_expect rtpip=%pI4, dstport=[%d], srcport=[%d]\n"
+ , &rtpip, port, rtp_srcport);*/
+
+ rtp_new->flags = 0;
+ rtp_new->expectfn = nf_nat_rtsp_expected;
+ rtp_new->saved_addr = ct->tuplehash[!dir].tuple.src.u3;
+ rtp_new->saved_proto.udp.port = port;
+ rtp_new->dir = dir;
+
+ if ( 0 != nf_ct_expect_related(rtp_new) )
+ pr_debug("nf_ct_expect_related failed for new rtp\n");
+
+ nf_ct_expect_put(rtp_new);
+
+ /* check dup ct */
+ rcu_read_lock();
+ memset(&tuple, 0, sizeof(tuple));
+ tuple.src.u3.ip = rtpip;
+ tuple.src.u.all = rtp_srcport;
+ tuple.src.l3num = PF_INET;
+ tuple.dst.u3.ip = ct->tuplehash[dir].tuple.dst.u3.ip;
+ tuple.dst.u.all = port;
+ tuple.dst.protonum = IPPROTO_UDP;
+
+ zone = nf_ct_zone(ct);
+ h = nf_conntrack_find_get(net, zone, &tuple);
+ if ( h )
+ {
+
+ ct_f = nf_ct_tuplehash_to_ctrack(h);
+ if ( ct_f )
+ {
+ /* if souce ip is not LAN ip, delete it. */
+ if ( ct_f->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.src.u3.ip )
+ {
+ pr_debug("\ndelete dup tuple, reverse info:src-ip=%pI4, dst-ip=%pI4\n"
+ , &ct_f->tuplehash[dir].tuple.src.u3
+ , &ct_f->tuplehash[dir].tuple.dst.u3
+ );
+
+ if ( del_timer(&ct_f->timeout) )
+ nf_ct_delete(ct_f, 0, 0);
+ }
+ nf_ct_put(ct_f);
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static unsigned int
+nf_nat_rtp(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ __be32 rtpip,
+ u_int16_t rtp_srcport)
+{
+ int rc = NF_ACCEPT;
+ int uport =0;
+
+ if ( ct->loport < ct->hiport )
+ {
+ /* init port range*/
+ for ( uport = ct->loport; uport <= ct->hiport; uport ++ )
+ init_rtp_expect(ct, ctinfo, rtpip, uport, rtp_srcport);
+ }
+ else
+ init_rtp_expect(ct, ctinfo, rtpip, ct->loport, rtp_srcport);
+
+ return rc;
+}
+
+static void nf_nat_rtsp_expected(struct nf_conn* ct, struct nf_conntrack_expect *exp)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ struct nf_nat_range range;
+#else
+ struct nf_nat_ipv4_range range;
+#endif
+
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* For DST manip, map port here to where it's expected. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ range.min_proto = range.max_proto = exp->saved_proto;
+ range.min_addr = range.max_addr = exp->saved_addr;
+#else
+ range.min = range.max = exp->saved_proto;
+ range.min_ip = range.max_ip = exp->saved_ip;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+#else
+ range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+ nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+#endif
+
+ /* Change src to where master sends to, but only if the connection
+ * actually came from the same source. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
+ &ct->master->tuplehash[exp->dir].tuple.src.u3)) {
+ range.min_addr = range.max_addr
+ = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
+#else
+ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
+ ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
+ range.min_ip = range.max_ip
+ = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+#else
+ range.flags = IP_NAT_RANGE_MAP_IPS;
+ nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+#endif
+ }
+}
+
+
+static void __exit fini(void)
+{
+ rcu_assign_pointer(nf_nat_rtsp_hook, NULL);
+ rcu_assign_pointer(nf_nat_rtp_hook, NULL);
+
+ synchronize_net();
+}
+
+static int __init init(void)
+{
+ printk("nf_nat_rtsp v" IP_NF_RTSP_VERSION " loading\n");
+
+ BUG_ON(nf_nat_rtsp_hook);
+ rcu_assign_pointer(nf_nat_rtsp_hook, nf_nat_rtsp);
+ BUG_ON(nf_nat_rtp_hook);
+ rcu_assign_pointer(nf_nat_rtp_hook, nf_nat_rtp);
+
+ if (stunaddr != NULL)
+ extip = in_aton(stunaddr);
+
+ if (destaction != NULL) {
+ if (strcmp(destaction, "auto") == 0)
+ dstact = DSTACT_AUTO;
+
+ if (strcmp(destaction, "strip") == 0)
+ dstact = DSTACT_STRIP;
+
+ if (strcmp(destaction, "none") == 0)
+ dstact = DSTACT_NONE;
+ }
+
+ return 0;
+}
+
+module_init(init);
+module_exit(fini);
Index: linux-3.18.21/net/netfilter/regexp/regexp.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/regexp/regexp.c 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,1194 @@
+/*
+ * regcomp and regexec -- regsub and regerror are elsewhere
+ * @(#)regexp.c 1.3 of 18 April 87
+ *
+ * Copyright (c) 1986 by University of Toronto.
+ * Written by Henry Spencer. Not derived from licensed software.
+ *
+ * Permission is granted to anyone to use this software for any
+ * purpose on any computer system, and to redistribute it freely,
+ * subject to the following restrictions:
+ *
+ * 1. The author is not responsible for the consequences of use of
+ * this software, no matter how awful, even if they arise
+ * from defects in it.
+ *
+ * 2. The origin of this software must not be misrepresented, either
+ * by explicit claim or by omission.
+ *
+ * 3. Altered versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ * Beware that some of this code is subtly aware of the way operator
+ * precedence is structured in regular expressions. Serious changes in
+ * regular-expression syntax might require a total rethink.
+ *
+ * This code was modified by Ethan Sommer to work within the kernel
+ * (it now uses kmalloc etc..)
+ *
+ * Modified slightly by Matthew Strait to use more modern C.
+ */
+
+#include "regexp.h"
+#include "regmagic.h"
+
+/* added by ethan and matt. Lets it work in both kernel and user space.
+(So iptables can use it, for instance.) Yea, it goes both ways... */
+#if __KERNEL__
+ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
+#else
+ #define printk(format,args...) printf(format,##args)
+#endif
+
+void regerror(char * s)
+{
+ printk("<3>Regexp: %s\n", s);
+ /* NOTREACHED */
+}
+
+/*
+ * The "internal use only" fields in regexp.h are present to pass info from
+ * compile to execute that permits the execute phase to run lots faster on
+ * simple cases. They are:
+ *
+ * regstart char that must begin a match; '\0' if none obvious
+ * reganch is the match anchored (at beginning-of-line only)?
+ * regmust string (pointer into program) that match must include, or NULL
+ * regmlen length of regmust string
+ *
+ * Regstart and reganch permit very fast decisions on suitable starting points
+ * for a match, cutting down the work a lot. Regmust permits fast rejection
+ * of lines that cannot possibly match. The regmust tests are costly enough
+ * that regcomp() supplies a regmust only if the r.e. contains something
+ * potentially expensive (at present, the only such thing detected is * or +
+ * at the start of the r.e., which can involve a lot of backup). Regmlen is
+ * supplied because the test in regexec() needs it and regcomp() is computing
+ * it anyway.
+ */
+
+/*
+ * Structure for regexp "program". This is essentially a linear encoding
+ * of a nondeterministic finite-state machine (aka syntax charts or
+ * "railroad normal form" in parsing technology). Each node is an opcode
+ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
+ * all nodes except BRANCH implement concatenation; a "next" pointer with
+ * a BRANCH on both ends of it is connecting two alternatives. (Here we
+ * have one of the subtle syntax dependencies: an individual BRANCH (as
+ * opposed to a collection of them) is never concatenated with anything
+ * because of operator precedence.) The operand of some types of node is
+ * a literal string; for others, it is a node leading into a sub-FSM. In
+ * particular, the operand of a BRANCH node is the first node of the branch.
+ * (NB this is *not* a tree structure: the tail of the branch connects
+ * to the thing following the set of BRANCHes.) The opcodes are:
+ */
+
+/* definition number opnd? meaning */
+#define END 0 /* no End of program. */
+#define BOL 1 /* no Match "" at beginning of line. */
+#define EOL 2 /* no Match "" at end of line. */
+#define ANY 3 /* no Match any one character. */
+#define ANYOF 4 /* str Match any character in this string. */
+#define ANYBUT 5 /* str Match any character not in this string. */
+#define BRANCH 6 /* node Match this alternative, or the next... */
+#define BACK 7 /* no Match "", "next" ptr points backward. */
+#define EXACTLY 8 /* str Match this string. */
+#define NOTHING 9 /* no Match empty string. */
+#define STAR 10 /* node Match this (simple) thing 0 or more times. */
+#define PLUS 11 /* node Match this (simple) thing 1 or more times. */
+#define OPEN 20 /* no Mark this point in input as start of #n. */
+ /* OPEN+1 is number 1, etc. */
+#define CLOSE 30 /* no Analogous to OPEN. */
+
+/*
+ * Opcode notes:
+ *
+ * BRANCH The set of branches constituting a single choice are hooked
+ * together with their "next" pointers, since precedence prevents
+ * anything being concatenated to any individual branch. The
+ * "next" pointer of the last BRANCH in a choice points to the
+ * thing following the whole choice. This is also where the
+ * final "next" pointer of each individual branch points; each
+ * branch starts with the operand node of a BRANCH node.
+ *
+ * BACK Normal "next" pointers all implicitly point forward; BACK
+ * exists to make loop structures possible.
+ *
+ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
+ * BRANCH structures using BACK. Simple cases (one character
+ * per match) are implemented with STAR and PLUS for speed
+ * and to minimize recursive plunges.
+ *
+ * OPEN,CLOSE ...are numbered at compile time.
+ */
+
+/*
+ * A node is one char of opcode followed by two chars of "next" pointer.
+ * "Next" pointers are stored as two 8-bit pieces, high order first. The
+ * value is a positive offset from the opcode of the node containing it.
+ * An operand, if any, simply follows the node. (Note that much of the
+ * code generation knows about this implicit relationship.)
+ *
+ * Using two bytes for the "next" pointer is vast overkill for most things,
+ * but allows patterns to get big without disasters.
+ */
+#define OP(p) (*(p))
+#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
+#define OPERAND(p) ((p) + 3)
+
+/*
+ * See regmagic.h for one further detail of program structure.
+ */
+
+
+/*
+ * Utility definitions.
+ */
+#ifndef CHARBITS
+#define UCHARAT(p) ((int)*(unsigned char *)(p))
+#else
+#define UCHARAT(p) ((int)*(p)&CHARBITS)
+#endif
+
+#define FAIL(m) { regerror(m); return(NULL); }
+#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
+#define META "^$.[()|?+*\\"
+
+/*
+ * Flags to be passed up and down.
+ */
+#define HASWIDTH 01 /* Known never to match null string. */
+#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */
+#define SPSTART 04 /* Starts with * or +. */
+#define WORST 0 /* Worst case. */
+
+/*
+ * Global work variables for regcomp().
+ */
+struct match_globals {
+char *reginput; /* String-input pointer. */
+char *regbol; /* Beginning of input, for ^ check. */
+char **regstartp; /* Pointer to startp array. */
+char **regendp; /* Ditto for endp. */
+char *regparse; /* Input-scan pointer. */
+int regnpar; /* () count. */
+char regdummy;
+char *regcode; /* Code-emit pointer; &regdummy = don't. */
+long regsize; /* Code size. */
+};
+
+/*
+ * Forward declarations for regcomp()'s friends.
+ */
+#ifndef STATIC
+#define STATIC static
+#endif
+static char *reg(struct match_globals *g, int paren,int *flagp);
+static char *regbranch(struct match_globals *g, int *flagp);
+static char *regpiece(struct match_globals *g, int *flagp);
+static char *regatom(struct match_globals *g, int *flagp);
+static char *regnode(struct match_globals *g, char op);
+static char *regnext(struct match_globals *g, char *p);
+static void regc(struct match_globals *g, char b);
+static void reginsert(struct match_globals *g, char op, char *opnd);
+static void regtail(struct match_globals *g, char *p, char *val);
+static void regoptail(struct match_globals *g, char *p, char *val);
+
+
+__kernel_size_t my_strcspn(const char *s1,const char *s2)
+{
+ char *scan1;
+ char *scan2;
+ int count;
+
+ count = 0;
+ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
+ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */
+ if (*scan1 == *scan2++)
+ return(count);
+ count++;
+ }
+ return(count);
+}
+
+/*
+ - regcomp - compile a regular expression into internal code
+ *
+ * We can't allocate space until we know how big the compiled form will be,
+ * but we can't compile it (and thus know how big it is) until we've got a
+ * place to put the code. So we cheat: we compile it twice, once with code
+ * generation turned off and size counting turned on, and once "for real".
+ * This also means that we don't allocate space until we are sure that the
+ * thing really will compile successfully, and we never have to move the
+ * code and thus invalidate pointers into it. (Note that it has to be in
+ * one piece because free() must be able to free it all.)
+ *
+ * Beware that the optimization-preparation code in here knows about some
+ * of the structure of the compiled regexp.
+*/
+regexp *
+regcomp(char *exp,int *patternsize)
+{
+ register regexp *r;
+ register char *scan;
+ register char *longest;
+ register int len;
+ int flags;
+ struct match_globals g;
+
+ /* commented out by ethan
+ extern char *malloc();
+ */
+
+ if (exp == NULL)
+ FAIL("NULL argument");
+
+ /* First pass: determine size, legality. */
+ g.regparse = exp;
+ g.regnpar = 1;
+ g.regsize = 0L;
+ g.regcode = &g.regdummy;
+ regc(&g, MAGIC);
+ if (reg(&g, 0, &flags) == NULL)
+ return(NULL);
+
+ /* Small enough for pointer-storage convention? */
+ if (g.regsize >= 32767L) /* Probably could be 65535L. */
+ FAIL("regexp too big");
+
+ /* Allocate space. */
+ *patternsize=sizeof(regexp) + (unsigned)g.regsize;
+ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
+ if (r == NULL)
+ FAIL("out of space");
+
+ /* Second pass: emit code. */
+ g.regparse = exp;
+ g.regnpar = 1;
+ g.regcode = r->program;
+ regc(&g, MAGIC);
+ if (reg(&g, 0, &flags) == NULL)
+ return(NULL);
+
+ /* Dig out information for optimizations. */
+ r->regstart = '\0'; /* Worst-case defaults. */
+ r->reganch = 0;
+ r->regmust = NULL;
+ r->regmlen = 0;
+ scan = r->program+1; /* First BRANCH. */
+ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */
+ scan = OPERAND(scan);
+
+ /* Starting-point info. */
+ if (OP(scan) == EXACTLY)
+ r->regstart = *OPERAND(scan);
+ else if (OP(scan) == BOL)
+ r->reganch++;
+
+ /*
+ * If there's something expensive in the r.e., find the
+ * longest literal string that must appear and make it the
+ * regmust. Resolve ties in favor of later strings, since
+ * the regstart check works with the beginning of the r.e.
+ * and avoiding duplication strengthens checking. Not a
+ * strong reason, but sufficient in the absence of others.
+ */
+ if (flags&SPSTART) {
+ longest = NULL;
+ len = 0;
+ for (; scan != NULL; scan = regnext(&g, scan))
+ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
+ longest = OPERAND(scan);
+ len = strlen(OPERAND(scan));
+ }
+ r->regmust = longest;
+ r->regmlen = len;
+ }
+ }
+
+ return(r);
+}
+
+/*
+ - reg - regular expression, i.e. main body or parenthesized thing
+ *
+ * Caller must absorb opening parenthesis.
+ *
+ * Combining parenthesis handling with the base level of regular expression
+ * is a trifle forced, but the need to tie the tails of the branches to what
+ * follows makes it hard to avoid.
+ */
+static char *
+reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
+{
+ register char *ret;
+ register char *br;
+ register char *ender;
+ register int parno = 0; /* 0 makes gcc happy */
+ int flags;
+
+ *flagp = HASWIDTH; /* Tentatively. */
+
+ /* Make an OPEN node, if parenthesized. */
+ if (paren) {
+ if (g->regnpar >= NSUBEXP)
+ FAIL("too many ()");
+ parno = g->regnpar;
+ g->regnpar++;
+ ret = regnode(g, OPEN+parno);
+ } else
+ ret = NULL;
+
+ /* Pick up the branches, linking them together. */
+ br = regbranch(g, &flags);
+ if (br == NULL)
+ return(NULL);
+ if (ret != NULL)
+ regtail(g, ret, br); /* OPEN -> first. */
+ else
+ ret = br;
+ if (!(flags&HASWIDTH))
+ *flagp &= ~HASWIDTH;
+ *flagp |= flags&SPSTART;
+ while (*g->regparse == '|') {
+ g->regparse++;
+ br = regbranch(g, &flags);
+ if (br == NULL)
+ return(NULL);
+ regtail(g, ret, br); /* BRANCH -> BRANCH. */
+ if (!(flags&HASWIDTH))
+ *flagp &= ~HASWIDTH;
+ *flagp |= flags&SPSTART;
+ }
+
+ /* Make a closing node, and hook it on the end. */
+ ender = regnode(g, (paren) ? CLOSE+parno : END);
+ regtail(g, ret, ender);
+
+ /* Hook the tails of the branches to the closing node. */
+ for (br = ret; br != NULL; br = regnext(g, br))
+ regoptail(g, br, ender);
+
+ /* Check for proper termination. */
+ if (paren && *g->regparse++ != ')') {
+ FAIL("unmatched ()");
+ } else if (!paren && *g->regparse != '\0') {
+ if (*g->regparse == ')') {
+ FAIL("unmatched ()");
+ } else
+ FAIL("junk on end"); /* "Can't happen". */
+ /* NOTREACHED */
+ }
+
+ return(ret);
+}
+
+/*
+ - regbranch - one alternative of an | operator
+ *
+ * Implements the concatenation operator.
+ */
+static char *
+regbranch(struct match_globals *g, int *flagp)
+{
+ register char *ret;
+ register char *chain;
+ register char *latest;
+ int flags;
+
+ *flagp = WORST; /* Tentatively. */
+
+ ret = regnode(g, BRANCH);
+ chain = NULL;
+ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
+ latest = regpiece(g, &flags);
+ if (latest == NULL)
+ return(NULL);
+ *flagp |= flags&HASWIDTH;
+ if (chain == NULL) /* First piece. */
+ *flagp |= flags&SPSTART;
+ else
+ regtail(g, chain, latest);
+ chain = latest;
+ }
+ if (chain == NULL) /* Loop ran zero times. */
+ (void) regnode(g, NOTHING);
+
+ return(ret);
+}
+
+/*
+ - regpiece - something followed by possible [*+?]
+ *
+ * Note that the branching code sequences used for ? and the general cases
+ * of * and + are somewhat optimized: they use the same NOTHING node as
+ * both the endmarker for their branch list and the body of the last branch.
+ * It might seem that this node could be dispensed with entirely, but the
+ * endmarker role is not redundant.
+ */
+static char *
+regpiece(struct match_globals *g, int *flagp)
+{
+ register char *ret;
+ register char op;
+ register char *next;
+ int flags;
+
+ ret = regatom(g, &flags);
+if (ret == NULL)
+ return(NULL);
+
+ op = *g->regparse;
+if (!ISMULT(op)) {
+ *flagp = flags;
+ return(ret);
+ }
+
+ if (!(flags&HASWIDTH) && op != '?')
+ FAIL("*+ operand could be empty");
+ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
+
+ if (op == '*' && (flags&SIMPLE))
+ reginsert(g, STAR, ret);
+ else if (op == '*') {
+ /* Emit x* as (x&|), where & means "self". */
+ reginsert(g, BRANCH, ret); /* Either x */
+ regoptail(g, ret, regnode(g, BACK)); /* and loop */
+ regoptail(g, ret, ret); /* back */
+ regtail(g, ret, regnode(g, BRANCH)); /* or */
+ regtail(g, ret, regnode(g, NOTHING)); /* null. */
+ } else if (op == '+' && (flags&SIMPLE))
+ reginsert(g, PLUS, ret);
+ else if (op == '+') {
+ /* Emit x+ as x(&|), where & means "self". */
+ next = regnode(g, BRANCH); /* Either */
+ regtail(g, ret, next);
+ regtail(g, regnode(g, BACK), ret); /* loop back */
+ regtail(g, next, regnode(g, BRANCH)); /* or */
+ regtail(g, ret, regnode(g, NOTHING)); /* null. */
+ } else if (op == '?') {
+ /* Emit x? as (x|) */
+ reginsert(g, BRANCH, ret); /* Either x */
+ regtail(g, ret, regnode(g, BRANCH)); /* or */
+ next = regnode(g, NOTHING); /* null. */
+ regtail(g, ret, next);
+ regoptail(g, ret, next);
+ }
+ g->regparse++;
+ if (ISMULT(*g->regparse))
+ FAIL("nested *?+");
+
+ return(ret);
+}
+
+/*
+ - regatom - the lowest level
+ *
+ * Optimization: gobbles an entire sequence of ordinary characters so that
+ * it can turn them into a single node, which is smaller to store and
+ * faster to run. Backslashed characters are exceptions, each becoming a
+ * separate node; the code is simpler that way and it's not worth fixing.
+ */
+static char *
+regatom(struct match_globals *g, int *flagp)
+{
+ register char *ret;
+ int flags;
+
+ *flagp = WORST; /* Tentatively. */
+
+ switch (*g->regparse++) {
+ case '^':
+ ret = regnode(g, BOL);
+ break;
+ case '$':
+ ret = regnode(g, EOL);
+ break;
+ case '.':
+ ret = regnode(g, ANY);
+ *flagp |= HASWIDTH|SIMPLE;
+ break;
+ case '[': {
+ register int class;
+ register int classend;
+
+ if (*g->regparse == '^') { /* Complement of range. */
+ ret = regnode(g, ANYBUT);
+ g->regparse++;
+ } else
+ ret = regnode(g, ANYOF);
+ if (*g->regparse == ']' || *g->regparse == '-')
+ regc(g, *g->regparse++);
+ while (*g->regparse != '\0' && *g->regparse != ']') {
+ if (*g->regparse == '-') {
+ g->regparse++;
+ if (*g->regparse == ']' || *g->regparse == '\0')
+ regc(g, '-');
+ else {
+ class = UCHARAT(g->regparse-2)+1;
+ classend = UCHARAT(g->regparse);
+ if (class > classend+1)
+ FAIL("invalid [] range");
+ for (; class <= classend; class++)
+ regc(g, class);
+ g->regparse++;
+ }
+ } else
+ regc(g, *g->regparse++);
+ }
+ regc(g, '\0');
+ if (*g->regparse != ']')
+ FAIL("unmatched []");
+ g->regparse++;
+ *flagp |= HASWIDTH|SIMPLE;
+ }
+ break;
+ case '(':
+ ret = reg(g, 1, &flags);
+ if (ret == NULL)
+ return(NULL);
+ *flagp |= flags&(HASWIDTH|SPSTART);
+ break;
+ case '\0':
+ case '|':
+ case ')':
+ FAIL("internal urp"); /* Supposed to be caught earlier. */
+ break;
+ case '?':
+ case '+':
+ case '*':
+ FAIL("?+* follows nothing");
+ break;
+ case '\\':
+ if (*g->regparse == '\0')
+ FAIL("trailing \\");
+ ret = regnode(g, EXACTLY);
+ regc(g, *g->regparse++);
+ regc(g, '\0');
+ *flagp |= HASWIDTH|SIMPLE;
+ break;
+ default: {
+ register int len;
+ register char ender;
+
+ g->regparse--;
+ len = my_strcspn((const char *)g->regparse, (const char *)META);
+ if (len <= 0)
+ FAIL("internal disaster");
+ ender = *(g->regparse+len);
+ if (len > 1 && ISMULT(ender))
+ len--; /* Back off clear of ?+* operand. */
+ *flagp |= HASWIDTH;
+ if (len == 1)
+ *flagp |= SIMPLE;
+ ret = regnode(g, EXACTLY);
+ while (len > 0) {
+ regc(g, *g->regparse++);
+ len--;
+ }
+ regc(g, '\0');
+ }
+ break;
+ }
+
+ return(ret);
+}
+
+/*
+ - regnode - emit a node
+ */
+static char * /* Location. */
+regnode(struct match_globals *g, char op)
+{
+ register char *ret;
+ register char *ptr;
+
+ ret = g->regcode;
+ if (ret == &g->regdummy) {
+ g->regsize += 3;
+ return(ret);
+ }
+
+ ptr = ret;
+ *ptr++ = op;
+ *ptr++ = '\0'; /* Null "next" pointer. */
+ *ptr++ = '\0';
+ g->regcode = ptr;
+
+ return(ret);
+}
+
+/*
+ - regc - emit (if appropriate) a byte of code
+ */
+static void
+regc(struct match_globals *g, char b)
+{
+ if (g->regcode != &g->regdummy)
+ *g->regcode++ = b;
+ else
+ g->regsize++;
+}
+
+/*
+ - reginsert - insert an operator in front of already-emitted operand
+ *
+ * Means relocating the operand.
+ */
+static void
+reginsert(struct match_globals *g, char op, char* opnd)
+{
+ register char *src;
+ register char *dst;
+ register char *place;
+
+ if (g->regcode == &g->regdummy) {
+ g->regsize += 3;
+ return;
+ }
+
+ src = g->regcode;
+ g->regcode += 3;
+ dst = g->regcode;
+ while (src > opnd)
+ *--dst = *--src;
+
+ place = opnd; /* Op node, where operand used to be. */
+ *place++ = op;
+ *place++ = '\0';
+ *place++ = '\0';
+}
+
+/*
+ - regtail - set the next-pointer at the end of a node chain
+ */
+static void
+regtail(struct match_globals *g, char *p, char *val)
+{
+ register char *scan;
+ register char *temp;
+ register int offset;
+
+ if (p == &g->regdummy)
+ return;
+
+ /* Find last node. */
+ scan = p;
+ for (;;) {
+ temp = regnext(g, scan);
+ if (temp == NULL)
+ break;
+ scan = temp;
+ }
+
+ if (OP(scan) == BACK)
+ offset = scan - val;
+ else
+ offset = val - scan;
+ *(scan+1) = (offset>>8)&0377;
+ *(scan+2) = offset&0377;
+}
+
+/*
+ - regoptail - regtail on operand of first argument; nop if operandless
+ */
+static void
+regoptail(struct match_globals *g, char *p, char *val)
+{
+/* "Operandless" and "op != BRANCH" are synonymous in practice. */
+ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
+ return;
+ regtail(g, OPERAND(p), val);
+}
+
+/*
+ * regexec and friends
+ */
+
+
+/*
+ * Forwards.
+ */
+static int regtry(struct match_globals *g, regexp *prog, char *string);
+static int regmatch(struct match_globals *g, char *prog);
+static int regrepeat(struct match_globals *g, char *p);
+
+#ifdef DEBUG
+int regnarrate = 0;
+void regdump();
+static char *regprop(char *op);
+#endif
+
+/*
+ - regexec - match a regexp against a string
+ */
+int
+regexec(regexp *prog, char *string)
+{
+ register char *s;
+ struct match_globals g;
+
+ /* Be paranoid... */
+ if (prog == NULL || string == NULL) {
+ printk("<3>Regexp: NULL parameter\n");
+ return(0);
+ }
+
+ /* Check validity of program. */
+if (UCHARAT(prog->program) != MAGIC) {
+ printk("<3>Regexp: corrupted program\n");
+ return(0);
+ }
+
+ /* If there is a "must appear" string, look for it. */
+ if (prog->regmust != NULL) {
+ s = string;
+ while ((s = strchr(s, prog->regmust[0])) != NULL) {
+ if (strncmp(s, prog->regmust, prog->regmlen) == 0)
+ break; /* Found it. */
+ s++;
+ }
+ if (s == NULL) /* Not present. */
+ return(0);
+ }
+
+ /* Mark beginning of line for ^ . */
+ g.regbol = string;
+ /* Simplest case: anchored match need be tried only once. */
+ if (prog->reganch)
+ return(regtry(&g, prog, string));
+
+ /* Messy cases: unanchored match. */
+ s = string;
+ if (prog->regstart != '\0')
+ /* We know what char it must start with. */
+ while ((s = strchr(s, prog->regstart)) != NULL) {
+ if (regtry(&g, prog, s))
+ return(1);
+ s++;
+ }
+ else
+ /* We don't -- general case. */
+ do {
+ if (regtry(&g, prog, s))
+ return(1);
+ } while (*s++ != '\0');
+
+ /* Failure. */
+ return(0);
+}
+
+/*
+ - regtry - try match at specific point
+ */
+static int /* 0 failure, 1 success */
+regtry(struct match_globals *g, regexp *prog, char *string)
+{
+ register int i;
+ register char **sp;
+ register char **ep;
+ g->reginput = string;
+ g->regstartp = prog->startp;
+ g->regendp = prog->endp;
+
+ sp = prog->startp;
+ ep = prog->endp;
+ for (i = NSUBEXP; i > 0; i--) {
+ *sp++ = NULL;
+ *ep++ = NULL;
+ }
+ if (regmatch(g, prog->program + 1)) {
+ prog->startp[0] = string;
+ prog->endp[0] = g->reginput;
+ return(1);
+ } else
+ return(0);
+}
+
+/*
+ - regmatch - main matching routine
+ *
+ * Conceptually the strategy is simple: check to see whether the current
+ * node matches, call self recursively to see whether the rest matches,
+ * and then act accordingly. In practice we make some effort to avoid
+ * recursion, in particular by going through "ordinary" nodes (that don't
+ * need to know whether the rest of the match failed) by a loop instead of
+ * by recursion.
+ */
+static int /* 0 failure, 1 success */
+regmatch(struct match_globals *g, char *prog)
+{
+ register char *scan = prog; /* Current node. */
+ char *next; /* Next node. */
+
+#ifdef DEBUG
+ if (scan != NULL && regnarrate)
+ fprintf(stderr, "%s(\n", regprop(scan));
+#endif
+ while (scan != NULL) {
+#ifdef DEBUG
+ if (regnarrate)
+ fprintf(stderr, "%s...\n", regprop(scan));
+#endif
+ next = regnext(g, scan);
+
+ switch (OP(scan)) {
+ case BOL:
+ if (g->reginput != g->regbol)
+ return(0);
+ break;
+ case EOL:
+ if (*g->reginput != '\0')
+ return(0);
+ break;
+ case ANY:
+ if (*g->reginput == '\0')
+ return(0);
+ g->reginput++;
+ break;
+ case EXACTLY: {
+ register int len;
+ register char *opnd;
+
+ opnd = OPERAND(scan);
+ /* Inline the first character, for speed. */
+ if (*opnd != *g->reginput)
+ return(0);
+ len = strlen(opnd);
+ if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
+ return(0);
+ g->reginput += len;
+ }
+ break;
+ case ANYOF:
+ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
+ return(0);
+ g->reginput++;
+ break;
+ case ANYBUT:
+ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
+ return(0);
+ g->reginput++;
+ break;
+ case NOTHING:
+ case BACK:
+ break;
+ case OPEN+1:
+ case OPEN+2:
+ case OPEN+3:
+ case OPEN+4:
+ case OPEN+5:
+ case OPEN+6:
+ case OPEN+7:
+ case OPEN+8:
+ case OPEN+9: {
+ register int no;
+ register char *save;
+
+ no = OP(scan) - OPEN;
+ save = g->reginput;
+
+ if (regmatch(g, next)) {
+ /*
+ * Don't set startp if some later
+ * invocation of the same parentheses
+ * already has.
+ */
+ if (g->regstartp[no] == NULL)
+ g->regstartp[no] = save;
+ return(1);
+ } else
+ return(0);
+ }
+ break;
+ case CLOSE+1:
+ case CLOSE+2:
+ case CLOSE+3:
+ case CLOSE+4:
+ case CLOSE+5:
+ case CLOSE+6:
+ case CLOSE+7:
+ case CLOSE+8:
+ case CLOSE+9:
+ {
+ register int no;
+ register char *save;
+
+ no = OP(scan) - CLOSE;
+ save = g->reginput;
+
+ if (regmatch(g, next)) {
+ /*
+ * Don't set endp if some later
+ * invocation of the same parentheses
+ * already has.
+ */
+ if (g->regendp[no] == NULL)
+ g->regendp[no] = save;
+ return(1);
+ } else
+ return(0);
+ }
+ break;
+ case BRANCH: {
+ register char *save;
+
+ if (OP(next) != BRANCH) /* No choice. */
+ next = OPERAND(scan); /* Avoid recursion. */
+ else {
+ do {
+ save = g->reginput;
+ if (regmatch(g, OPERAND(scan)))
+ return(1);
+ g->reginput = save;
+ scan = regnext(g, scan);
+ } while (scan != NULL && OP(scan) == BRANCH);
+ return(0);
+ /* NOTREACHED */
+ }
+ }
+ break;
+ case STAR:
+ case PLUS: {
+ register char nextch;
+ register int no;
+ register char *save;
+ register int min;
+
+ /*
+ * Lookahead to avoid useless match attempts
+ * when we know what character comes next.
+ */
+ nextch = '\0';
+ if (OP(next) == EXACTLY)
+ nextch = *OPERAND(next);
+ min = (OP(scan) == STAR) ? 0 : 1;
+ save = g->reginput;
+ no = regrepeat(g, OPERAND(scan));
+ while (no >= min) {
+ /* If it could work, try it. */
+ if (nextch == '\0' || *g->reginput == nextch)
+ if (regmatch(g, next))
+ return(1);
+ /* Couldn't or didn't -- back up. */
+ no--;
+ g->reginput = save + no;
+ }
+ return(0);
+ }
+ break;
+ case END:
+ return(1); /* Success! */
+ break;
+ default:
+ printk("<3>Regexp: memory corruption\n");
+ return(0);
+ break;
+ }
+
+ scan = next;
+ }
+
+/*
+ * We get here only if there's trouble -- normally "case END" is
+ * the terminating point.
+ */
+ printk("<3>Regexp: corrupted pointers\n");
+ return(0);
+}
+
+/*
+ - regrepeat - repeatedly match something simple, report how many
+ */
+static int
+regrepeat(struct match_globals *g, char *p)
+{
+ register int count = 0;
+ register char *scan;
+ register char *opnd;
+
+ scan = g->reginput;
+ opnd = OPERAND(p);
+ switch (OP(p)) {
+ case ANY:
+ count = strlen(scan);
+ scan += count;
+ break;
+ case EXACTLY:
+ while (*opnd == *scan) {
+ count++;
+ scan++;
+ }
+ break;
+ case ANYOF:
+ while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
+ count++;
+ scan++;
+ }
+ break;
+ case ANYBUT:
+ while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
+ count++;
+ scan++;
+ }
+ break;
+ default: /* Oh dear. Called inappropriately. */
+ printk("<3>Regexp: internal foulup\n");
+ count = 0; /* Best compromise. */
+ break;
+ }
+ g->reginput = scan;
+
+ return(count);
+}
+
+/*
+ - regnext - dig the "next" pointer out of a node
+ */
+static char*
+regnext(struct match_globals *g, char *p)
+{
+ register int offset;
+
+ if (p == &g->regdummy)
+ return(NULL);
+
+ offset = NEXT(p);
+ if (offset == 0)
+ return(NULL);
+
+ if (OP(p) == BACK)
+ return(p-offset);
+ else
+ return(p+offset);
+}
+
+#ifdef DEBUG
+
+static char *regprop();
+
+/*
+ - regdump - dump a regexp onto stdout in vaguely comprehensible form
+ */
+void
+regdump(regexp *r)
+{
+ register char *s;
+ register char op = EXACTLY; /* Arbitrary non-END op. */
+ register char *next;
+ /* extern char *strchr(); */
+
+
+ s = r->program + 1;
+ while (op != END) { /* While that wasn't END last time... */
+ op = OP(s);
+ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */
+ next = regnext(s);
+ if (next == NULL) /* Next ptr. */
+ printf("(0)");
+ else
+ printf("(%d)", (s-r->program)+(next-s));
+ s += 3;
+ if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
+ /* Literal string, where present. */
+ while (*s != '\0') {
+ putchar(*s);
+ s++;
+ }
+ s++;
+ }
+ putchar('\n');
+ }
+
+ /* Header fields of interest. */
+ if (r->regstart != '\0')
+ printf("start `%c' ", r->regstart);
+ if (r->reganch)
+ printf("anchored ");
+ if (r->regmust != NULL)
+ printf("must have \"%s\"", r->regmust);
+ printf("\n");
+}
+
+/*
+ - regprop - printable representation of opcode
+ */
+static char *
+regprop(char *op)
+{
+#define BUFLEN 50
+ register char *p;
+ static char buf[BUFLEN];
+
+ strcpy(buf, ":");
+
+switch (OP(op)) {
+ case BOL:
+ p = "BOL";
+ break;
+ case EOL:
+ p = "EOL";
+ break;
+ case ANY:
+ p = "ANY";
+ break;
+ case ANYOF:
+ p = "ANYOF";
+ break;
+ case ANYBUT:
+ p = "ANYBUT";
+ break;
+ case BRANCH:
+ p = "BRANCH";
+ break;
+ case EXACTLY:
+ p = "EXACTLY";
+ break;
+ case NOTHING:
+ p = "NOTHING";
+ break;
+ case BACK:
+ p = "BACK";
+ break;
+ case END:
+ p = "END";
+ break;
+ case OPEN+1:
+ case OPEN+2:
+ case OPEN+3:
+ case OPEN+4:
+ case OPEN+5:
+ case OPEN+6:
+ case OPEN+7:
+ case OPEN+8:
+ case OPEN+9:
+ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
+ p = NULL;
+ break;
+ case CLOSE+1:
+ case CLOSE+2:
+ case CLOSE+3:
+ case CLOSE+4:
+ case CLOSE+5:
+ case CLOSE+6:
+ case CLOSE+7:
+ case CLOSE+8:
+ case CLOSE+9:
+ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
+ p = NULL;
+ break;
+ case STAR:
+ p = "STAR";
+ break;
+ case PLUS:
+ p = "PLUS";
+ break;
+ default:
+ printk("<3>Regexp: corrupted opcode\n");
+ break;
+ }
+ if (p != NULL)
+ strncat(buf, p, BUFLEN-strlen(buf));
+ return(buf);
+}
+#endif
+
Index: linux-3.18.21/net/netfilter/regexp/regexp.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/regexp/regexp.h 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,40 @@
+/*
+ * Definitions etc. for regexp(3) routines.
+ *
+ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof],
+ * not the System V one.
+ */
+
+#ifndef REGEXP_H
+#define REGEXP_H
+
+
+/*
+http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
+which contains a version of this library, says:
+
+ *
+ * NSUBEXP must be at least 10, and no greater than 117 or the parser
+ * will not work properly.
+ *
+However, it looks rather like this library is limited to 10. If you think
+otherwise, let us know.
+*/
+
+#define NSUBEXP 10
+typedef struct regexp {
+ char *startp[NSUBEXP];
+ char *endp[NSUBEXP];
+ char regstart; /* Internal use only. */
+ char reganch; /* Internal use only. */
+ char *regmust; /* Internal use only. */
+ int regmlen; /* Internal use only. */
+ char program[1]; /* Unwarranted chumminess with compiler. */
+} regexp;
+
+regexp * regcomp(char *exp, int *patternsize);
+int regexec(regexp *prog, char *string);
+void regsub(regexp *prog, char *source, char *dest);
+void regerror(char *s);
+
+#endif
\ No newline at end of file
Index: linux-3.18.21/net/netfilter/regexp/regmagic.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/regexp/regmagic.h 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,5 @@
+/*
+ * The first byte of the regexp internal "program" is actually this magic
+ * number; the start node begins in the second byte.
+ */
+#define MAGIC 0234
\ No newline at end of file
Index: linux-3.18.21/net/netfilter/regexp/regsub.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/regexp/regsub.c 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,95 @@
+/*
+ * regsub
+ * @(#)regsub.c 1.3 of 2 April 86
+ *
+ * Copyright (c) 1986 by University of Toronto.
+ * Written by Henry Spencer. Not derived from licensed software.
+ *
+ * Permission is granted to anyone to use this software for any
+ * purpose on any computer system, and to redistribute it freely,
+ * subject to the following restrictions:
+ *
+ * 1. The author is not responsible for the consequences of use of
+ * this software, no matter how awful, even if they arise
+ * from defects in it.
+ *
+ * 2. The origin of this software must not be misrepresented, either
+ * by explicit claim or by omission.
+ *
+ * 3. Altered versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ *
+ * This code was modified by Ethan Sommer to work within the kernel
+ * (it now uses kmalloc etc..)
+ *
+ */
+#include "regexp.h"
+#include "regmagic.h"
+#include <linux/string.h>
+
+
+#ifndef CHARBITS
+#define UCHARAT(p) ((int)*(unsigned char *)(p))
+#else
+#define UCHARAT(p) ((int)*(p)&CHARBITS)
+#endif
+
+#if 0
+//void regerror(char * s)
+//{
+// printk("regexp(3): %s", s);
+// /* NOTREACHED */
+//}
+#endif
+
+/*
+ - regsub - perform substitutions after a regexp match
+ */
+void
+regsub(regexp * prog, char * source, char * dest)
+{
+ register char *src;
+ register char *dst;
+ register char c;
+ register int no;
+ register int len;
+
+ /* Not necessary and gcc doesn't like it -MLS */
+ /*extern char *strncpy();*/
+
+ if (prog == NULL || source == NULL || dest == NULL) {
+ regerror("NULL parm to regsub");
+ return;
+ }
+ if (UCHARAT(prog->program) != MAGIC) {
+ regerror("damaged regexp fed to regsub");
+ return;
+ }
+
+ src = source;
+ dst = dest;
+ while ((c = *src++) != '\0') {
+ if (c == '&')
+ no = 0;
+ else if (c == '\\' && '0' <= *src && *src <= '9')
+ no = *src++ - '0';
+ else
+ no = -1;
+
+ if (no < 0) { /* Ordinary character. */
+ if (c == '\\' && (*src == '\\' || *src == '&'))
+ c = *src++;
+ *dst++ = c;
+ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
+ len = prog->endp[no] - prog->startp[no];
+ (void) strncpy(dst, prog->startp[no], len);
+ dst += len;
+ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */
+ regerror("damaged match string");
+ return;
+ }
+ }
+ }
+ *dst++ = '\0';
+}
\ No newline at end of file
Index: linux-3.18.21/net/netfilter/xt_layer7.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/netfilter/xt_layer7.c 2018-02-05 13:21:32.000000000 +0800
@@ -0,0 +1,684 @@
+/*
+ Kernel module to match application layer (OSI layer 7) data in connections.
+
+ http://l7-filter.sf.net
+
+ (C) 2003-2009 Matthew Strait and Ethan Sommer.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+ http://www.gnu.org/licenses/gpl.txt
+
+ Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>,
+ xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait,
+ Ethan Sommer, Justin Levandoski.
+*/
+
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_layer7.h>
+#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+
+#include "regexp/regexp.c"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
+MODULE_DESCRIPTION("iptables application layer match module");
+MODULE_ALIAS("ipt_layer7");
+MODULE_VERSION("2.21");
+
+static int maxdatalen = 2048; // this is the default
+module_param(maxdatalen, int, 0444);
+MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
+#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG
+ #define DPRINTK(format,args...) printk(format,##args)
+#else
+ #define DPRINTK(format,args...)
+#endif
+
+/* Number of packets whose data we look at.
+This can be modified through /proc/net/layer7_numpackets */
+static int num_packets = 10;
+
+static struct pattern_cache {
+ char * regex_string;
+ regexp * pattern;
+ struct pattern_cache * next;
+} * first_pattern_cache = NULL;
+
+DEFINE_SPINLOCK(l7_lock);
+
+static int total_acct_packets(struct nf_conn *ct)
+{
+ struct nf_conn_counter *acct;
+
+ BUG_ON(ct == NULL);
+ acct = nf_conn_acct_find(ct);
+ if (!acct)
+ return 0;
+ return (atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets));
+ //return (acct[IP_CT_DIR_ORIGINAL].packets + acct[IP_CT_DIR_REPLY].packets);
+}
+
+#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
+/* Converts an unfriendly string into a friendly one by
+replacing unprintables with periods and all whitespace with " ". */
+static char * friendly_print(unsigned char * s)
+{
+ char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
+ int i;
+
+ if(!f) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "friendly_print, bailing.\n");
+ return NULL;
+ }
+
+ for(i = 0; i < strlen(s); i++){
+ if(isprint(s[i]) && s[i] < 128) f[i] = s[i];
+ else if(isspace(s[i])) f[i] = ' ';
+ else f[i] = '.';
+ }
+ f[i] = '\0';
+ return f;
+}
+
+static char dec2hex(int i)
+{
+ switch (i) {
+ case 0 ... 9:
+ return (i + '0');
+ break;
+ case 10 ... 15:
+ return (i - 10 + 'a');
+ break;
+ default:
+ if (net_ratelimit())
+ printk("layer7: Problem in dec2hex\n");
+ return '\0';
+ }
+}
+
+static char * hex_print(unsigned char * s)
+{
+ char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
+ int i;
+
+ if(!g) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in hex_print, "
+ "bailing.\n");
+ return NULL;
+ }
+
+ for(i = 0; i < strlen(s); i++) {
+ g[i*3 ] = dec2hex(s[i]/16);
+ g[i*3 + 1] = dec2hex(s[i]%16);
+ g[i*3 + 2] = ' ';
+ }
+ g[i*3] = '\0';
+
+ return g;
+}
+#endif // DEBUG
+
+/* Use instead of regcomp. As we expect to be seeing the same regexps over and
+over again, it make sense to cache the results. */
+static regexp * compile_and_cache(const char * regex_string,
+ const char * protocol)
+{
+ struct pattern_cache * node = first_pattern_cache;
+ struct pattern_cache * last_pattern_cache = first_pattern_cache;
+ struct pattern_cache * tmp;
+ unsigned int len;
+
+ while (node != NULL) {
+ if (!strcmp(node->regex_string, regex_string))
+ return node->pattern;
+
+ last_pattern_cache = node;/* points at the last non-NULL node */
+ node = node->next;
+ }
+
+ /* If we reach the end of the list, then we have not yet cached
+ the pattern for this regex. Let's do that now.
+ Be paranoid about running out of memory to avoid list corruption. */
+ tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
+
+ if(!tmp) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "compile_and_cache, bailing.\n");
+ return NULL;
+ }
+
+ tmp->regex_string = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
+ tmp->pattern = kmalloc(sizeof(struct regexp), GFP_ATOMIC);
+ tmp->next = NULL;
+
+ if(!tmp->regex_string || !tmp->pattern) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "compile_and_cache, bailing.\n");
+ kfree(tmp->regex_string);
+ kfree(tmp->pattern);
+ kfree(tmp);
+ return NULL;
+ }
+
+ /* Ok. The new node is all ready now. */
+ node = tmp;
+
+ if(first_pattern_cache == NULL) /* list is empty */
+ first_pattern_cache = node; /* make node the beginning */
+ else
+ last_pattern_cache->next = node; /* attach node to the end */
+
+ /* copy the string and compile the regex */
+ len = strlen(regex_string);
+ DPRINTK("About to compile this: \"%s\"\n", regex_string);
+ node->pattern = regcomp((char *)regex_string, &len);
+ if ( !node->pattern ) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: Error compiling regexp "
+ "\"%s\" (%s)\n",
+ regex_string, protocol);
+ /* pattern is now cached as NULL, so we won't try again. */
+ }
+
+ strcpy(node->regex_string, regex_string);
+ return node->pattern;
+}
+
+static int can_handle(const struct sk_buff *skb)
+{
+ struct iphdr iphdr_tmp;
+ struct iphdr *iphdr;
+ int offset;
+
+ if (!ip_hdr(skb))
+ return 0;
+
+ offset = ((uintptr_t)ip_hdr(skb)) - ((uintptr_t)skb->data);
+
+ iphdr = skb_header_pointer(skb, offset, sizeof(*iphdr), &iphdr_tmp);
+ if (!iphdr)
+ return 0;
+
+ if (iphdr->protocol == IPPROTO_TCP ||
+ iphdr->protocol == IPPROTO_UDP ||
+ iphdr->protocol == IPPROTO_ICMP)
+ return 1;
+
+ return 0;
+}
+
+/* Returns offset the into the skb->data that the application data starts */
+static int app_data_offset(const struct sk_buff *skb)
+{
+ /* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
+ isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
+ int ip_hl = 4*ip_hdr(skb)->ihl;
+
+ if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
+ /* 12 == offset into TCP header for the header length field.
+ Can't get this with skb->h.th->doff because the tcphdr
+ struct doesn't get set when routing (this is confirmed to be
+ true in Netfilter as well as QoS.) */
+ int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
+
+ return ip_hl + tcp_hl;
+ } else if( ip_hdr(skb)->protocol == IPPROTO_UDP ) {
+ return ip_hl + 8; /* UDP header is always 8 bytes */
+ } else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
+ return ip_hl + 8; /* ICMP header is 8 bytes */
+ } else {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: tried to handle unknown "
+ "protocol!\n");
+ return ip_hl + 8; /* something reasonable */
+ }
+}
+
+/* handles whether there's a match when we aren't appending data anymore */
+static int match_no_append(struct nf_conn * conntrack,
+ struct nf_conn * master_conntrack,
+ enum ip_conntrack_info ctinfo,
+ enum ip_conntrack_info master_ctinfo,
+ const struct xt_layer7_info * info)
+{
+ /* If we're in here, throw the app data away */
+ if(master_conntrack->layer7.app_data != NULL) {
+
+ #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
+ if(!master_conntrack->layer7.app_proto) {
+ char * f =
+ friendly_print(master_conntrack->layer7.app_data);
+ char * g =
+ hex_print(master_conntrack->layer7.app_data);
+ DPRINTK("\nl7-filter gave up after %d bytes "
+ "(%d packets):\n%s\n",
+ strlen(f), total_acct_packets(master_conntrack), f);
+ kfree(f);
+ DPRINTK("In hex: %s\n", g);
+ kfree(g);
+ }
+ #endif
+
+ kfree(master_conntrack->layer7.app_data);
+ master_conntrack->layer7.app_data = NULL; /* don't free again */
+ }
+
+ if(master_conntrack->layer7.app_proto){
+ /* Here child connections set their .app_proto (for /proc) */
+ if(!conntrack->layer7.app_proto) {
+ conntrack->layer7.app_proto =
+ kmalloc(strlen(master_conntrack->layer7.app_proto)+1,
+ GFP_ATOMIC);
+ if(!conntrack->layer7.app_proto){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory "
+ "in match_no_append, "
+ "bailing.\n");
+ return 1;
+ }
+ strcpy(conntrack->layer7.app_proto,
+ master_conntrack->layer7.app_proto);
+ }
+
+ return (!strcmp(master_conntrack->layer7.app_proto,
+ info->protocol));
+ }
+ else {
+ /* If not classified, set to "unknown" to distinguish from
+ connections that are still being tested. */
+ master_conntrack->layer7.app_proto =
+ kmalloc(strlen("unknown")+1, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_proto){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "match_no_append, bailing.\n");
+ return 1;
+ }
+ strcpy(master_conntrack->layer7.app_proto, "unknown");
+ return 0;
+ }
+}
+
+/* add the new app data to the conntrack. Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+ char * app_data, int appdatalen)
+{
+ int length = 0, i;
+ int oldlength = master_conntrack->layer7.app_data_len;
+
+ /* This is a fix for a race condition by Deti Fliegl. However, I'm not
+ clear on whether the race condition exists or whether this really
+ fixes it. I might just be being dense... Anyway, if it's not really
+ a fix, all it does is waste a very small amount of time. */
+ if(!master_conntrack->layer7.app_data) return 0;
+
+ /* Strip nulls. Make everything lower case (our regex lib doesn't
+ do case insensitivity). Add it to the end of the current data. */
+ for(i = 0; i < maxdatalen-oldlength-1 &&
+ i < appdatalen; i++) {
+ if(app_data[i] != '\0') {
+ /* the kernel version of tolower mungs 'upper ascii' */
+ master_conntrack->layer7.app_data[length+oldlength] =
+ isascii(app_data[i])?
+ tolower(app_data[i]) : app_data[i];
+ length++;
+ }
+ }
+
+ master_conntrack->layer7.app_data[length+oldlength] = '\0';
+ master_conntrack->layer7.app_data_len = length + oldlength;
+
+ return length;
+}
+
+/* taken from drivers/video/modedb.c */
+static int my_atoi(const char *s)
+{
+ int val = 0;
+
+ for (;; s++) {
+ switch (*s) {
+ case '0'...'9':
+ val = 10*val+(*s-'0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+
+/* write out num_packets to userland. */
+static int layer7_read_proc(char* page, char ** start, off_t off, int count,
+ int* eof, void * data)
+{
+ if(num_packets > 99 && net_ratelimit())
+ printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
+
+ page[0] = num_packets/10 + '0';
+ page[1] = num_packets%10 + '0';
+ page[2] = '\n';
+ page[3] = '\0';
+
+ *eof=1;
+
+ return 3;
+}
+
+/* Read in num_packets from userland */
+static int layer7_write_proc(struct file* file, const char* buffer,
+ unsigned long count, void *data)
+{
+ char * foo = kmalloc(count, GFP_ATOMIC);
+
+ if(!foo){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory, bailing. "
+ "num_packets unchanged.\n");
+ return count;
+ }
+
+ if(copy_from_user(foo, buffer, count)) {
+ return -EFAULT;
+ }
+
+
+ num_packets = my_atoi(foo);
+ kfree (foo);
+
+ /* This has an arbitrary limit to make the math easier. I'm lazy.
+ But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
+ if(num_packets > 99) {
+ printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
+ num_packets = 99;
+ } else if(num_packets < 1) {
+ printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
+ num_packets = 1;
+ }
+
+ return count;
+}
+
+static bool
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+match(const struct sk_buff *skbin, struct xt_action_param *par)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+match(const struct sk_buff *skbin, const struct xt_match_param *par)
+#else
+match(const struct sk_buff *skbin,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ bool *hotdrop)
+#endif
+{
+ /* sidestep const without getting a compiler warning... */
+ struct sk_buff *skb = (struct sk_buff *)skbin;
+
+ const struct xt_layer7_info * info =
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ par->matchinfo;
+ #else
+ matchinfo;
+ #endif
+
+ enum ip_conntrack_info master_ctinfo, ctinfo;
+ struct nf_conn *master_conntrack, *conntrack;
+ unsigned char * app_data;
+ unsigned int pattern_result, appdatalen;
+ regexp * comppattern;
+
+ /* Be paranoid/incompetent - lock the entire match function. */
+ spin_lock_bh(&l7_lock);
+
+ if(!can_handle(skb)){
+ DPRINTK("layer7: This is some protocol I can't handle.\n");
+ spin_unlock_bh(&l7_lock);
+ return info->invert;
+ }
+
+ /* Treat parent & all its children together as one connection, except
+ for the purpose of setting conntrack->layer7.app_proto in the actual
+ connection. This makes /proc/net/ip_conntrack more satisfying. */
+ if(!(conntrack = nf_ct_get(skb, &ctinfo)) ||
+ !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){
+ DPRINTK("layer7: couldn't get conntrack.\n");
+ spin_unlock_bh(&l7_lock);
+ return info->invert;
+ }
+
+ /* Try to get a master conntrack (and its master etc) for FTP, etc. */
+ while (master_ct(master_conntrack) != NULL)
+ master_conntrack = master_ct(master_conntrack);
+
+ /* if we've classified it or seen too many packets */
+ if(total_acct_packets(master_conntrack) > num_packets ||
+ master_conntrack->layer7.app_proto) {
+
+ pattern_result = match_no_append(conntrack, master_conntrack,
+ ctinfo, master_ctinfo, info);
+
+ /* skb->cb[0] == seen. Don't do things twice if there are
+ multiple l7 rules. I'm not sure that using cb for this purpose
+ is correct, even though it says "put your private variables
+ there". But it doesn't look like it is being used for anything
+ else in the skbs that make it here. */
+ skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
+
+ spin_unlock_bh(&l7_lock);
+ return (pattern_result ^ info->invert);
+ }
+
+ if(skb_is_nonlinear(skb)){
+ if(skb_linearize(skb) != 0){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: failed to linearize "
+ "packet, bailing.\n");
+ spin_unlock_bh(&l7_lock);
+ return info->invert;
+ }
+ }
+
+ /* now that the skb is linearized, it's safe to set these. */
+ app_data = skb->data + app_data_offset(skb);
+ appdatalen = skb_tail_pointer(skb) - app_data;
+
+ /* the return value gets checked later, when we're ready to use it */
+ comppattern = compile_and_cache(info->pattern, info->protocol);
+
+ /* On the first packet of a connection, allocate space for app data */
+ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] &&
+ !master_conntrack->layer7.app_data){
+ master_conntrack->layer7.app_data =
+ kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_data){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "match, bailing.\n");
+ spin_unlock_bh(&l7_lock);
+ return info->invert;
+ }
+
+ master_conntrack->layer7.app_data[0] = '\0';
+ }
+
+ /* Can be here, but unallocated, if numpackets is increased near
+ the beginning of a connection */
+ if(master_conntrack->layer7.app_data == NULL){
+ spin_unlock_bh(&l7_lock);
+ return info->invert; /* unmatched */
+ }
+
+ if(!skb->cb[0]){
+ int newbytes;
+ newbytes = add_data(master_conntrack, app_data, appdatalen);
+
+ if(newbytes == 0) { /* didn't add any data */
+ skb->cb[0] = 1;
+ /* Didn't match before, not going to match now */
+ spin_unlock_bh(&l7_lock);
+ return info->invert;
+ }
+ }
+
+ /* If looking for "unknown", then never match. "Unknown" means that
+ we've given up; we're still trying with these packets. */
+ if(!strcmp(info->protocol, "unknown")) {
+ pattern_result = 0;
+ /* If looking for "unset", then always match. "Unset" means that we
+ haven't yet classified the connection. */
+ } else if(!strcmp(info->protocol, "unset")) {
+ pattern_result = 2;
+ DPRINTK("layer7: matched unset: not yet classified "
+ "(%d/%d packets)\n",
+ total_acct_packets(master_conntrack), num_packets);
+ /* If the regexp failed to compile, don't bother running it */
+ } else if(comppattern &&
+ regexec(comppattern, master_conntrack->layer7.app_data)){
+ DPRINTK("layer7: matched %s\n", info->protocol);
+ pattern_result = 1;
+ } else pattern_result = 0;
+
+ if(pattern_result == 1) {
+ master_conntrack->layer7.app_proto =
+ kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_proto){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in "
+ "match, bailing.\n");
+ spin_unlock_bh(&l7_lock);
+ return (pattern_result ^ info->invert);
+ }
+ strcpy(master_conntrack->layer7.app_proto, info->protocol);
+ } else if(pattern_result > 1) { /* cleanup from "unset" */
+ pattern_result = 1;
+ }
+
+ /* mark the packet seen */
+ skb->cb[0] = 1;
+
+ spin_unlock_bh(&l7_lock);
+ return (pattern_result ^ info->invert);
+}
+
+// load nf_conntrack_ipv4
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+static int
+#else
+static bool
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+check(const struct xt_mtchk_param *par)
+{
+ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) {
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", par->match->family);
+#else
+check(const char *tablename, const void *inf,
+ const struct xt_match *match, void *matchinfo,
+ unsigned int hook_mask)
+{
+ if (nf_ct_l3proto_try_module_get(match->family) < 0) {
+ printk(KERN_WARNING "can't load conntrack support for "
+ "proto=%d\n", match->family);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ return -EINVAL;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ if (!nf_ct_acct_enabled(par->net)) {
+ pr_warning("Forcing CT accounting to be enabled\n");
+ nf_ct_set_acct(par->net, true);
+ }
+#endif
+ return 0;
+#else
+ return 0;
+}
+ return 1;
+#endif
+}
+
+static void destroy(const struct xt_mtdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->match->family);
+}
+
+static struct xt_match xt_layer7_match[] __read_mostly = {
+{
+ .name = "layer7",
+ .family = AF_INET,
+ .checkentry = check,
+ .match = match,
+ .destroy = destroy,
+ .matchsize = sizeof(struct xt_layer7_info),
+ .me = THIS_MODULE
+}
+};
+
+static void layer7_cleanup_proc(void)
+{
+ remove_proc_entry("layer7_numpackets", init_net.proc_net);
+}
+
+/* register the proc file */
+static void layer7_init_proc(void)
+{
+ struct proc_dir_entry* entry;
+ entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net);
+ entry->read_proc = layer7_read_proc;
+ entry->write_proc = layer7_write_proc;
+}
+
+static int __init xt_layer7_init(void)
+{
+ need_conntrack();
+
+ layer7_init_proc();
+ if(maxdatalen < 1) {
+ printk(KERN_WARNING "layer7: maxdatalen can't be < 1, "
+ "using 1\n");
+ maxdatalen = 1;
+ }
+ /* This is not a hard limit. It's just here to prevent people from
+ bringing their slow machines to a grinding halt. */
+ else if(maxdatalen > 65536) {
+ printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, "
+ "using 65536\n");
+ maxdatalen = 65536;
+ }
+ return xt_register_matches(xt_layer7_match,
+ ARRAY_SIZE(xt_layer7_match));
+}
+
+static void __exit xt_layer7_fini(void)
+{
+ layer7_cleanup_proc();
+ xt_unregister_matches(xt_layer7_match, ARRAY_SIZE(xt_layer7_match));
+}
+
+module_init(xt_layer7_init);
+module_exit(xt_layer7_fini);
Index: linux-3.18.21/net/packet/af_packet.c
===================================================================
--- linux-3.18.21.orig/net/packet/af_packet.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/packet/af_packet.c 2018-02-05 13:21:32.000000000 +0800
@@ -94,6 +94,8 @@
#endif
#include "internal.h"
+#include "ecnt_packet.h"
+#include <ecnt_hook/ecnt_hook_net.h>
/*
Assumptions:
@@ -1761,8 +1763,10 @@
* structure, so that corresponding packet head is
* never delivered to user.
*/
- if (sk->sk_type != SOCK_DGRAM)
+ if (sk->sk_type != SOCK_DGRAM){
skb_push(skb, skb->data - skb_mac_header(skb));
+ ecnt_packet_rcv_inline_hook(skb, orig_dev);
+ }
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb_network_offset(skb));
@@ -1782,6 +1786,7 @@
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+ ECNT_PACKET_RCV_HOOK(ECNT_NET_AF_PACKET_RCV, skb, nskb);
if (nskb == NULL)
goto drop_n_acct;
@@ -2516,6 +2521,8 @@
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ ecnt_packet_snd_inline_hook(sk,skb,RECEIVE_OR_SEND,FOEINFORECOVER);
+
packet_pick_tx_queue(dev, skb);
if (po->has_vnet_hdr) {
@@ -2884,6 +2891,8 @@
if (skb == NULL)
goto out;
+ ecnt_packet_recvmsg_inline_hook(sk,skb,RECEIVE_OR_SEND,FOEINFOSTORE);
+
if (pkt_sk(sk)->has_vnet_hdr) {
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -3390,8 +3399,18 @@
return 0;
}
default:
+ {
+ int ret;
+ ret = ecnt_packet_setsockopt_inline_hook(sk, optname, optval, optlen) ;
+ if( ret == ECNT_RETURN )
+ return 0 ;
+ else if ( ret == ECNT_HOOK_ERROR )
+ return -EFAULT;
+
return -ENOPROTOOPT;
}
+
+ }
}
static int packet_getsockopt(struct socket *sock, int level, int optname,
@@ -3485,7 +3504,18 @@
val = packet_use_direct_xmit(po);
break;
default:
+ {
+ int ret ;
+ ret = ecnt_packet_getsockopt_inline_hook(sk, optname, optval, optlen, len, lv, data);
+ if(ECNT_RETURN == ret)
+ return 0 ;
+ else if(ECNT_HOOK_ERROR == ret )
+ return -EFAULT ;
+
return -ENOPROTOOPT;
+
+ }
+
}
if (len > lv)
Index: linux-3.18.21/net/packet/ecnt_packet.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/net/packet/ecnt_packet.h 2018-02-05 14:20:41.000000000 +0800
@@ -0,0 +1,166 @@
+#ifndef _LINUX_ECNT_BR_H
+#define _LINUX_ECNT_BR_H
+
+#include <linux/foe_hook.h>
+#include <ecnt_hook/ecnt_hook.h>
+#include <linux/ecnt_in.h>
+
+/*TCSUPPORT_MULTICAST_SPEED start*/
+#ifdef TCSUPPORT_RA_HWNAT
+ extern int (*hwnat_set_recover_info_hook)(struct sk_buff* skb, struct sock* sk,int flag);
+#else
+ static int (*hwnat_set_recover_info_hook)(struct sk_buff* skb, struct sock* sk,int flag) = NULL;
+#endif
+/*TCSUPPORT_MULTICAST_SPEED end*/
+
+
+#ifdef TCSUPPORT_PON_VLAN
+extern int (*pon_check_tpid_hook)(__u16 * buf);
+#endif
+
+#define FOEINFOSTORE 0
+#define FOEINFORECOVER 1
+#define RECEIVE_OR_SEND 0
+#define SET_OR_GET_SOCKOPT 1
+
+static void foe_info_op(struct sock *sk,void *ptr,int opmode,int direction)
+{
+ struct sk_buff * skb;
+ struct SkbFoeInfo *skbfoeinfo;
+
+ switch(opmode)
+ {
+ case RECEIVE_OR_SEND:
+ skb = (struct sk_buff *)ptr;
+ if(hwnat_set_recover_info_hook)
+ hwnat_set_recover_info_hook(skb,sk,direction);
+ break;
+
+ case SET_OR_GET_SOCKOPT:
+ skbfoeinfo = (struct SkbFoeInfo *)ptr;
+ if(FOEINFORECOVER == direction)
+ {
+ skbfoeinfo->ppe_ai = sk->sk_foe_info.ppe_ai;
+ skbfoeinfo->ppe_foe_entry = sk->sk_foe_info.ppe_foe_entry;
+ skbfoeinfo->ppe_magic = sk->sk_foe_info.ppe_magic;
+ skbfoeinfo->wan_type= sk->sk_foe_info.wan_type;
+ skbfoeinfo->wan_index = sk->sk_foe_info.wan_index;
+ }
+ else
+ {
+ sk->sk_foe_info.ppe_ai = skbfoeinfo->ppe_ai;
+ sk->sk_foe_info.ppe_foe_entry = skbfoeinfo->ppe_foe_entry;
+ sk->sk_foe_info.ppe_magic = skbfoeinfo->ppe_magic;
+ sk->sk_foe_info.wan_type = skbfoeinfo->wan_type;
+ sk->sk_foe_info.wan_index = skbfoeinfo->wan_index;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static inline void ecnt_packet_snd_inline_hook(struct sock *sk,void *ptr,int opmode,int direction)
+{
+#ifdef TCSUPPORT_MULTICAST_SPEED
+
+
+
+ foe_info_op(sk , ptr , opmode , direction);
+#endif
+
+ return ;
+}
+
+static inline void ecnt_packet_recvmsg_inline_hook(struct sock *sk,void *ptr,int opmode,int direction)
+{
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ foe_info_op(sk , ptr , opmode , direction);
+#endif
+
+ return ;
+}
+
+void ecnt_packet_rcv_inline_hook(struct sk_buff *skb, struct net_device *orig_dev)
+{
+
+#if defined(TCSUPPORT_PON_VLAN)
+ u16 *proto = NULL;
+ if(orig_dev->name[0] == 'n')
+ {
+ proto = (u16*)(skb->data + 12);
+ while(pon_check_tpid_hook && (pon_check_tpid_hook(proto) == 1))
+ {
+ memmove(skb->data + VLAN_HLEN, skb->data, 12);
+ skb_pull(skb, VLAN_HLEN);
+ proto = (u16*)(skb->data + 12);
+ }
+ }
+#endif
+
+ return;
+}
+
+
+static inline int ecnt_packet_setsockopt_inline_hook(struct sock *sk,int optname,
+ char __user *optval,unsigned int optlen )
+{
+ switch (optname)
+ {
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ case PACKET_SKB_FOE_INFO:
+ {
+ struct SkbFoeInfo skbfoeinfo;
+
+ if (optlen < sizeof(struct SkbFoeInfo))
+ return ECNT_HOOK_ERROR;
+
+ if (copy_from_user(&skbfoeinfo, optval, sizeof(struct SkbFoeInfo)))
+ return ECNT_HOOK_ERROR;
+
+ foe_info_op(sk,&skbfoeinfo,SET_OR_GET_SOCKOPT,FOEINFOSTORE);
+ return ECNT_RETURN;
+ }
+#endif
+ default:
+ return ECNT_CONTINUE ;
+ }
+
+ return ECNT_CONTINUE ;
+
+}
+
+static inline int ecnt_packet_getsockopt_inline_hook(struct sock *sk,int optname,
+ char __user *optval,int __user *optlen,int len, int lv,void *data )
+{
+ struct SkbFoeInfo skbfoeinfo;
+
+ switch (optname)
+ {
+#ifdef TCSUPPORT_MULTICAST_SPEED
+ case PACKET_SKB_FOE_INFO:
+ {
+ memset(&skbfoeinfo, 0, sizeof(struct SkbFoeInfo));
+ if (len > sizeof(struct SkbFoeInfo))
+ len = sizeof(struct SkbFoeInfo);
+
+ foe_info_op(sk,&skbfoeinfo,SET_OR_GET_SOCKOPT,FOEINFORECOVER);
+ data = &skbfoeinfo;
+ if (put_user(len, optlen))
+ return ECNT_HOOK_ERROR;
+ if (copy_to_user(optval, data, len))
+ return ECNT_HOOK_ERROR;
+ return ECNT_RETURN;
+ }
+#endif
+ default:
+ return ECNT_CONTINUE ;
+ }
+
+ return ECNT_CONTINUE ;
+
+}
+
+#endif
+
Index: linux-3.18.21/net/socket.c
===================================================================
--- linux-3.18.21.orig/net/socket.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/socket.c 2018-02-05 13:21:33.000000000 +0800
@@ -107,6 +107,7 @@
#include <linux/atalk.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>
+#include <linux/ecnt_skbuff.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
@@ -392,7 +393,7 @@
}
EXPORT_SYMBOL(sock_alloc_file);
-static int sock_map_fd(struct socket *sock, int flags)
+int sock_map_fd(struct socket *sock, int flags)
{
struct file *newfile;
int fd = get_unused_fd_flags(flags);
@@ -408,6 +409,7 @@
put_unused_fd(fd);
return PTR_ERR(newfile);
}
+EXPORT_SYMBOL(sock_map_fd);
struct socket *sock_from_file(struct file *file, int *err)
{
@@ -1036,7 +1038,19 @@
* With an ioctl, arg may well be a user mode pointer, but we don't know
* what to do with it - that's up to the protocol still.
*/
-
+//#ifdef CONFIG_SMUX
+ static DEFINE_MUTEX(smux_ioctl_mutex);
+ static int (*smux_ioctl_hook) (void __user *arg);
+
+ void smux_ioctl_set(int (*hook) (void __user *))
+ {
+ mutex_lock(&smux_ioctl_mutex);
+ smux_ioctl_hook = hook;
+ mutex_unlock(&smux_ioctl_mutex);
+ }
+
+ EXPORT_SYMBOL(smux_ioctl_set);
+//#endif /* CONFIG_SMUX */
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
@@ -1105,6 +1119,18 @@
err = dlci_ioctl_hook(cmd, argp);
mutex_unlock(&dlci_ioctl_mutex);
break;
+
+//#ifdef CONFIG_SMUX
+ case SIOCSIFSMUX:
+ err = -ENOPKG;
+ if(!smux_ioctl_hook)
+ request_module("smux");
+ mutex_lock(&smux_ioctl_mutex);
+ if(smux_ioctl_hook)
+ err = smux_ioctl_hook(argp);
+ mutex_unlock(&smux_ioctl_mutex);
+ break;
+//#endif /* CONFIG_SMUX */
default:
err = sock_do_ioctl(net, sock, cmd, arg);
break;
@@ -2690,6 +2716,8 @@
ptp_classifier_init();
+ ecnt_net_filter_hook_init();
+
out:
return err;
Index: linux-3.18.21/net/xfrm/xfrm_input.c
===================================================================
--- linux-3.18.21.orig/net/xfrm/xfrm_input.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/xfrm/xfrm_input.c 2018-02-05 13:21:33.000000000 +0800
@@ -15,6 +15,8 @@
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
+
static struct kmem_cache *secpath_cachep __read_mostly;
@@ -293,10 +295,14 @@
skb_dst_force(skb);
- nexthdr = x->type->input(x, skb);
+ nexthdr = x->type->input(x, skb);
+
+ if (nexthdr == -EINPROGRESS)
+ return 0;
- if (nexthdr == -EINPROGRESS)
- return 0;
+ if (CRYPTO_API_GET_XFRM_INPUT_RET(ECNT_CRYPTO, nexthdr) == ECNT_RETURN_DROP)
+ goto drop;
+
resume:
spin_lock(&x->lock);
if (nexthdr <= 0) {
Index: linux-3.18.21/net/xfrm/xfrm_output.c
===================================================================
--- linux-3.18.21.orig/net/xfrm/xfrm_output.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/xfrm/xfrm_output.c 2018-02-05 13:21:33.000000000 +0800
@@ -19,6 +19,8 @@
#include <net/dst.h>
#include <net/xfrm.h>
+#include <ecnt_hook/ecnt_hook_crypto.h>
+
static int xfrm_output2(struct sk_buff *skb);
static int xfrm_skb_check_space(struct sk_buff *skb)
@@ -46,6 +48,22 @@
if (err <= 0)
goto resume;
+/*
+ if(VPN_PASSTHROUGH_SWITCH_ON == gpVpnPTPara->vpnpassthroughswitch)
+ {
+ struct ipsec_para_s ipsec_data;
+ ipsec_data.flag = HWNAT_IPSEC_LEARING;
+ ipsec_data.data.learn.skb = skb;
+ ipsec_data.data.learn.x = x;
+ err = ipsec_esp_output_pt(&ipsec_data);
+ if (err)
+ goto error_nolock;
+ }
+kernel 2.6.36 add code, 3.18 add hook as below!!
+*/
+ err = CRYPTO_API_ESP_OUTPUT_PT_LEARN(ECNT_CRYPTO, x, skb);
+ if (err != ECNT_CONTINUE)
+ return err;
do {
err = xfrm_skb_check_space(skb);
Index: linux-3.18.21/net/xfrm/xfrm_state.c
===================================================================
--- linux-3.18.21.orig/net/xfrm/xfrm_state.c 2015-09-01 06:19:23.000000000 +0800
+++ linux-3.18.21/net/xfrm/xfrm_state.c 2018-02-05 13:21:34.000000000 +0800
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include "xfrm_hash.h"
+#include <ecnt_hook/ecnt_hook_crypto.h>
/* Each xfrm_state may be linked to two tables:
@@ -533,6 +534,8 @@
*/
xfrm_state_put(x);
err = 0;
+
+ CRYPTO_API_FREE_ADAPTER(ECNT_CRYPTO, x->id.spi);
}
return err;
Index: linux-3.18.21/scripts/kconfig/zconf.hash.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/scripts/kconfig/zconf.hash.c 2018-02-05 14:29:17.000000000 +0800
@@ -0,0 +1,289 @@
+/* ANSI-C code produced by gperf version 3.0.4 */
+/* Command-line: gperf -t --output-file scripts/kconfig/zconf.hash.c_shipped -a -C -E -g -k '1,3,$' -p -t scripts/kconfig/zconf.gperf */
+
+#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
+ && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
+ && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
+ && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
+ && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
+ && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
+ && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
+ && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
+ && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
+ && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
+ && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
+ && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
+ && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
+ && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
+ && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
+ && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
+ && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
+ && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
+ && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
+ && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
+ && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
+ && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
+ && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
+/* The character set is not based on ISO-646. */
+#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gnu-gperf@gnu.org>."
+#endif
+
+#line 10 "scripts/kconfig/zconf.gperf"
+struct kconf_id;
+
+static const struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len);
+/* maximum key range = 71, duplicates = 0 */
+
+#ifdef __GNUC__
+__inline
+#else
+#ifdef __cplusplus
+inline
+#endif
+#endif
+static unsigned int
+kconf_id_hash (register const char *str, register unsigned int len)
+{
+ static const unsigned char asso_values[] =
+ {
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 5, 25, 25,
+ 0, 0, 0, 5, 0, 0, 73, 73, 5, 0,
+ 10, 5, 45, 73, 20, 20, 0, 15, 15, 73,
+ 20, 5, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
+ 73, 73, 73, 73, 73, 73
+ };
+ register int hval = len;
+
+ switch (hval)
+ {
+ default:
+ hval += asso_values[(unsigned char)str[2]];
+ /*FALLTHROUGH*/
+ case 2:
+ case 1:
+ hval += asso_values[(unsigned char)str[0]];
+ break;
+ }
+ return hval + asso_values[(unsigned char)str[len - 1]];
+}
+
+struct kconf_id_strings_t
+ {
+ char kconf_id_strings_str2[sizeof("if")];
+ char kconf_id_strings_str3[sizeof("int")];
+ char kconf_id_strings_str5[sizeof("endif")];
+ char kconf_id_strings_str7[sizeof("default")];
+ char kconf_id_strings_str8[sizeof("tristate")];
+ char kconf_id_strings_str9[sizeof("endchoice")];
+ char kconf_id_strings_str12[sizeof("def_tristate")];
+ char kconf_id_strings_str13[sizeof("def_bool")];
+ char kconf_id_strings_str14[sizeof("defconfig_list")];
+ char kconf_id_strings_str17[sizeof("on")];
+ char kconf_id_strings_str18[sizeof("optional")];
+ char kconf_id_strings_str21[sizeof("option")];
+ char kconf_id_strings_str22[sizeof("endmenu")];
+ char kconf_id_strings_str23[sizeof("mainmenu")];
+ char kconf_id_strings_str25[sizeof("menuconfig")];
+ char kconf_id_strings_str27[sizeof("modules")];
+ char kconf_id_strings_str28[sizeof("allnoconfig_y")];
+ char kconf_id_strings_str29[sizeof("menu")];
+ char kconf_id_strings_str31[sizeof("select")];
+ char kconf_id_strings_str32[sizeof("comment")];
+ char kconf_id_strings_str33[sizeof("env")];
+ char kconf_id_strings_str35[sizeof("range")];
+ char kconf_id_strings_str36[sizeof("choice")];
+ char kconf_id_strings_str39[sizeof("bool")];
+ char kconf_id_strings_str41[sizeof("source")];
+ char kconf_id_strings_str42[sizeof("visible")];
+ char kconf_id_strings_str43[sizeof("hex")];
+ char kconf_id_strings_str46[sizeof("config")];
+ char kconf_id_strings_str47[sizeof("boolean")];
+ char kconf_id_strings_str51[sizeof("string")];
+ char kconf_id_strings_str54[sizeof("help")];
+ char kconf_id_strings_str56[sizeof("prompt")];
+ char kconf_id_strings_str72[sizeof("depends")];
+ };
+static const struct kconf_id_strings_t kconf_id_strings_contents =
+ {
+ "if",
+ "int",
+ "endif",
+ "default",
+ "tristate",
+ "endchoice",
+ "def_tristate",
+ "def_bool",
+ "defconfig_list",
+ "on",
+ "optional",
+ "option",
+ "endmenu",
+ "mainmenu",
+ "menuconfig",
+ "modules",
+ "allnoconfig_y",
+ "menu",
+ "select",
+ "comment",
+ "env",
+ "range",
+ "choice",
+ "bool",
+ "source",
+ "visible",
+ "hex",
+ "config",
+ "boolean",
+ "string",
+ "help",
+ "prompt",
+ "depends"
+ };
+#define kconf_id_strings ((const char *) &kconf_id_strings_contents)
+#ifdef __GNUC__
+__inline
+#if defined __GNUC_STDC_INLINE__ || defined __GNUC_GNU_INLINE__
+__attribute__ ((__gnu_inline__))
+#endif
+#endif
+const struct kconf_id *
+kconf_id_lookup (register const char *str, register unsigned int len)
+{
+ enum
+ {
+ TOTAL_KEYWORDS = 33,
+ MIN_WORD_LENGTH = 2,
+ MAX_WORD_LENGTH = 14,
+ MIN_HASH_VALUE = 2,
+ MAX_HASH_VALUE = 72
+ };
+
+ static const struct kconf_id wordlist[] =
+ {
+ {-1}, {-1},
+#line 25 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_IF, TF_COMMAND|TF_PARAM},
+#line 36 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3, T_TYPE, TF_COMMAND, S_INT},
+ {-1},
+#line 26 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5, T_ENDIF, TF_COMMAND},
+ {-1},
+#line 29 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
+#line 31 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_TYPE, TF_COMMAND, S_TRISTATE},
+#line 20 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9, T_ENDCHOICE, TF_COMMAND},
+ {-1}, {-1},
+#line 32 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_TRISTATE},
+#line 35 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
+#line 45 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_OPT_DEFCONFIG_LIST,TF_OPTION},
+ {-1}, {-1},
+#line 43 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_ON, TF_PARAM},
+#line 28 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_OPTIONAL, TF_COMMAND},
+ {-1}, {-1},
+#line 42 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_OPTION, TF_COMMAND},
+#line 17 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ENDMENU, TF_COMMAND},
+#line 15 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_MAINMENU, TF_COMMAND},
+ {-1},
+#line 23 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str25, T_MENUCONFIG, TF_COMMAND},
+ {-1},
+#line 44 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION},
+#line 47 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPT_ALLNOCONFIG_Y,TF_OPTION},
+#line 16 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND},
+ {-1},
+#line 39 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND},
+#line 21 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND},
+#line 46 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_OPT_ENV, TF_OPTION},
+ {-1},
+#line 40 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_RANGE, TF_COMMAND},
+#line 19 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_CHOICE, TF_COMMAND},
+ {-1}, {-1},
+#line 33 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str39, T_TYPE, TF_COMMAND, S_BOOLEAN},
+ {-1},
+#line 18 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_SOURCE, TF_COMMAND},
+#line 41 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_VISIBLE, TF_COMMAND},
+#line 37 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43, T_TYPE, TF_COMMAND, S_HEX},
+ {-1}, {-1},
+#line 22 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_CONFIG, TF_COMMAND},
+#line 34 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str47, T_TYPE, TF_COMMAND, S_BOOLEAN},
+ {-1}, {-1}, {-1},
+#line 38 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str51, T_TYPE, TF_COMMAND, S_STRING},
+ {-1}, {-1},
+#line 24 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str54, T_HELP, TF_COMMAND},
+ {-1},
+#line 30 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str56, T_PROMPT, TF_COMMAND},
+ {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1},
+ {-1}, {-1}, {-1}, {-1}, {-1}, {-1},
+#line 27 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str72, T_DEPENDS, TF_COMMAND}
+ };
+
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
+ register int key = kconf_id_hash (str, len);
+
+ if (key <= MAX_HASH_VALUE && key >= 0)
+ {
+ register int o = wordlist[key].name;
+ if (o >= 0)
+ {
+ register const char *s = o + kconf_id_strings;
+
+ if (*str == *s && !strncmp (str + 1, s + 1, len - 1) && s[len] == '\0')
+ return &wordlist[key];
+ }
+ }
+ }
+ return 0;
+}
+#line 48 "scripts/kconfig/zconf.gperf"
+
Index: linux-3.18.21/scripts/kconfig/zconf.lex.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/scripts/kconfig/zconf.lex.c 2018-02-05 14:29:17.000000000 +0800
@@ -0,0 +1,2420 @@
+
+#line 3 "scripts/kconfig/zconf.lex.c_shipped"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define yy_create_buffer zconf_create_buffer
+#define yy_delete_buffer zconf_delete_buffer
+#define yy_flex_debug zconf_flex_debug
+#define yy_init_buffer zconf_init_buffer
+#define yy_flush_buffer zconf_flush_buffer
+#define yy_load_buffer_state zconf_load_buffer_state
+#define yy_switch_to_buffer zconf_switch_to_buffer
+#define yyin zconfin
+#define yyleng zconfleng
+#define yylex zconflex
+#define yylineno zconflineno
+#define yyout zconfout
+#define yyrestart zconfrestart
+#define yytext zconftext
+#define yywrap zconfwrap
+#define yyalloc zconfalloc
+#define yyrealloc zconfrealloc
+#define yyfree zconffree
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+#define YY_FLEX_SUBMINOR_VERSION 35
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#endif /* ! FLEXINT_H */
+
+#ifdef __cplusplus
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+/* C99 requires __STDC__ to be defined as 1. */
+#if defined (__STDC__)
+
+#define YY_USE_CONST
+
+#endif /* defined (__STDC__) */
+#endif /* ! __cplusplus */
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE zconfrestart(zconfin )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#define YY_BUF_SIZE 16384
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+extern int zconfleng;
+
+extern FILE *zconfin, *zconfout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up zconftext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via zconfrestart()), so that the user can continue scanning by
+ * just pointing zconfin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when zconftext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int zconfleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = (char *) 0;
+static int yy_init = 0; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow zconfwrap()'s to do buffer switches
+ * instead of setting up a fresh zconfin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void zconfrestart (FILE *input_file );
+void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size );
+void zconf_delete_buffer (YY_BUFFER_STATE b );
+void zconf_flush_buffer (YY_BUFFER_STATE b );
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer );
+void zconfpop_buffer_state (void );
+
+static void zconfensure_buffer_stack (void );
+static void zconf_load_buffer_state (void );
+static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file );
+
+#define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size );
+YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str );
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len );
+
+void *zconfalloc (yy_size_t );
+void *zconfrealloc (void *,yy_size_t );
+void zconffree (void * );
+
+#define yy_new_buffer zconf_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define zconfwrap(n) 1
+#define YY_SKIP_YYWRAP
+
+typedef unsigned char YY_CHAR;
+
+FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0;
+
+typedef int yy_state_type;
+
+extern int zconflineno;
+
+int zconflineno = 1;
+
+extern char *zconftext;
+#define yytext_ptr zconftext
+static yyconst flex_int16_t yy_nxt[][17] =
+ {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 16
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 16
+
+ },
+
+ {
+ 11, 19, 20, 21, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19
+ },
+
+ {
+ 11, 19, 20, 21, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19
+ },
+
+ {
+ 11, 22, 22, 23, 22, 24, 22, 22, 24, 22,
+ 22, 22, 22, 22, 22, 25, 22
+ },
+
+ {
+ 11, 22, 22, 23, 22, 24, 22, 22, 24, 22,
+ 22, 22, 22, 22, 22, 25, 22
+ },
+
+ {
+ 11, 26, 26, 27, 28, 29, 30, 31, 29, 32,
+ 33, 34, 35, 35, 36, 37, 38
+
+ },
+
+ {
+ 11, 26, 26, 27, 28, 29, 30, 31, 29, 32,
+ 33, 34, 35, 35, 36, 37, 38
+ },
+
+ {
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11
+ },
+
+ {
+ 11, -12, -12, -12, -12, -12, -12, -12, -12, -12,
+ -12, -12, -12, -12, -12, -12, -12
+ },
+
+ {
+ 11, -13, 39, 40, -13, -13, 41, -13, -13, -13,
+ -13, -13, -13, -13, -13, -13, -13
+ },
+
+ {
+ 11, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14
+
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+ -16, -16, -16, -16, -16, -16, -16
+ },
+
+ {
+ 11, -17, -17, -17, -17, -17, -17, -17, -17, -17,
+ -17, -17, -17, -17, -17, -17, -17
+ },
+
+ {
+ 11, -18, -18, -18, -18, -18, -18, -18, -18, -18,
+ -18, -18, -18, 44, -18, -18, -18
+ },
+
+ {
+ 11, 45, 45, -19, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45
+
+ },
+
+ {
+ 11, -20, 46, 47, -20, -20, -20, -20, -20, -20,
+ -20, -20, -20, -20, -20, -20, -20
+ },
+
+ {
+ 11, 48, -21, -21, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48
+ },
+
+ {
+ 11, 49, 49, 50, 49, -22, 49, 49, -22, 49,
+ 49, 49, 49, 49, 49, -22, 49
+ },
+
+ {
+ 11, -23, -23, -23, -23, -23, -23, -23, -23, -23,
+ -23, -23, -23, -23, -23, -23, -23
+ },
+
+ {
+ 11, -24, -24, -24, -24, -24, -24, -24, -24, -24,
+ -24, -24, -24, -24, -24, -24, -24
+
+ },
+
+ {
+ 11, 51, 51, 52, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51
+ },
+
+ {
+ 11, -26, -26, -26, -26, -26, -26, -26, -26, -26,
+ -26, -26, -26, -26, -26, -26, -26
+ },
+
+ {
+ 11, -27, -27, -27, -27, -27, -27, -27, -27, -27,
+ -27, -27, -27, -27, -27, -27, -27
+ },
+
+ {
+ 11, -28, -28, -28, -28, -28, -28, -28, -28, -28,
+ -28, -28, -28, -28, 53, -28, -28
+ },
+
+ {
+ 11, -29, -29, -29, -29, -29, -29, -29, -29, -29,
+ -29, -29, -29, -29, -29, -29, -29
+
+ },
+
+ {
+ 11, 54, 54, -30, 54, 54, 54, 54, 54, 54,
+ 54, 54, 54, 54, 54, 54, 54
+ },
+
+ {
+ 11, -31, -31, -31, -31, -31, -31, 55, -31, -31,
+ -31, -31, -31, -31, -31, -31, -31
+ },
+
+ {
+ 11, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+ -32, -32, -32, -32, -32, -32, -32
+ },
+
+ {
+ 11, -33, -33, -33, -33, -33, -33, -33, -33, -33,
+ -33, -33, -33, -33, -33, -33, -33
+ },
+
+ {
+ 11, -34, -34, -34, -34, -34, -34, -34, -34, -34,
+ -34, 56, 57, 57, -34, -34, -34
+
+ },
+
+ {
+ 11, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+ -35, 57, 57, 57, -35, -35, -35
+ },
+
+ {
+ 11, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36
+ },
+
+ {
+ 11, -37, -37, 58, -37, -37, -37, -37, -37, -37,
+ -37, -37, -37, -37, -37, -37, -37
+ },
+
+ {
+ 11, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+ -38, -38, -38, -38, -38, -38, 59
+ },
+
+ {
+ 11, -39, 39, 40, -39, -39, 41, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39
+
+ },
+
+ {
+ 11, -40, -40, -40, -40, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, -40, -40, -40
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, -43, -43, -43, -43, -43, -43, -43, -43, -43,
+ -43, -43, -43, -43, -43, -43, -43
+ },
+
+ {
+ 11, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, 44, -44, -44, -44
+
+ },
+
+ {
+ 11, 45, 45, -45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45
+ },
+
+ {
+ 11, -46, 46, 47, -46, -46, -46, -46, -46, -46,
+ -46, -46, -46, -46, -46, -46, -46
+ },
+
+ {
+ 11, 48, -47, -47, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48
+ },
+
+ {
+ 11, -48, -48, -48, -48, -48, -48, -48, -48, -48,
+ -48, -48, -48, -48, -48, -48, -48
+ },
+
+ {
+ 11, 49, 49, 50, 49, -49, 49, 49, -49, 49,
+ 49, 49, 49, 49, 49, -49, 49
+
+ },
+
+ {
+ 11, -50, -50, -50, -50, -50, -50, -50, -50, -50,
+ -50, -50, -50, -50, -50, -50, -50
+ },
+
+ {
+ 11, -51, -51, 52, -51, -51, -51, -51, -51, -51,
+ -51, -51, -51, -51, -51, -51, -51
+ },
+
+ {
+ 11, -52, -52, -52, -52, -52, -52, -52, -52, -52,
+ -52, -52, -52, -52, -52, -52, -52
+ },
+
+ {
+ 11, -53, -53, -53, -53, -53, -53, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53
+ },
+
+ {
+ 11, 54, 54, -54, 54, 54, 54, 54, 54, 54,
+ 54, 54, 54, 54, 54, 54, 54
+
+ },
+
+ {
+ 11, -55, -55, -55, -55, -55, -55, -55, -55, -55,
+ -55, -55, -55, -55, -55, -55, -55
+ },
+
+ {
+ 11, -56, -56, -56, -56, -56, -56, -56, -56, -56,
+ -56, 60, 57, 57, -56, -56, -56
+ },
+
+ {
+ 11, -57, -57, -57, -57, -57, -57, -57, -57, -57,
+ -57, 57, 57, 57, -57, -57, -57
+ },
+
+ {
+ 11, -58, -58, -58, -58, -58, -58, -58, -58, -58,
+ -58, -58, -58, -58, -58, -58, -58
+ },
+
+ {
+ 11, -59, -59, -59, -59, -59, -59, -59, -59, -59,
+ -59, -59, -59, -59, -59, -59, -59
+
+ },
+
+ {
+ 11, -60, -60, -60, -60, -60, -60, -60, -60, -60,
+ -60, 57, 57, 57, -60, -60, -60
+ },
+
+ } ;
+
+static yy_state_type yy_get_previous_state (void );
+static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
+static int yy_get_next_buffer (void );
+static void yy_fatal_error (yyconst char msg[] );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up zconftext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ zconfleng = (size_t) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+
+#define YY_NUM_RULES 33
+#define YY_END_OF_BUFFER 34
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static yyconst flex_int16_t yy_accept[61] =
+ { 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 34, 5, 4, 2, 3, 7, 8, 6, 32, 29,
+ 31, 24, 28, 27, 26, 22, 17, 13, 16, 20,
+ 22, 11, 12, 19, 19, 14, 22, 22, 4, 2,
+ 3, 3, 1, 6, 32, 29, 31, 30, 24, 23,
+ 26, 25, 15, 20, 9, 19, 19, 21, 10, 18
+ } ;
+
+static yyconst flex_int32_t yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 4, 5, 6, 1, 1, 7, 8, 9,
+ 10, 1, 1, 1, 11, 12, 12, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 1, 1, 1,
+ 14, 1, 1, 1, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 1, 15, 1, 1, 13, 1, 13, 13, 13, 13,
+
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 1, 16, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+extern int zconf_flex_debug;
+int zconf_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *zconftext;
+#define YY_NO_INPUT 1
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "lkc.h"
+
+#define START_STRSIZE 16
+
+static struct {
+ struct file *file;
+ int lineno;
+} current_pos;
+
+static char *text;
+static int text_size, text_asize;
+
+struct buffer {
+ struct buffer *parent;
+ YY_BUFFER_STATE state;
+};
+
+struct buffer *current_buf;
+
+static int last_ts, first_ts;
+
+static void zconf_endhelp(void);
+static void zconf_endfile(void);
+
+static void new_string(void)
+{
+ text = xmalloc(START_STRSIZE);
+ text_asize = START_STRSIZE;
+ text_size = 0;
+ *text = 0;
+}
+
+static void append_string(const char *str, int size)
+{
+ int new_size = text_size + size + 1;
+ if (new_size > text_asize) {
+ new_size += START_STRSIZE - 1;
+ new_size &= -START_STRSIZE;
+ text = realloc(text, new_size);
+ text_asize = new_size;
+ }
+ memcpy(text + text_size, str, size);
+ text_size += size;
+ text[text_size] = 0;
+}
+
+static void alloc_string(const char *str, int size)
+{
+ text = xmalloc(size + 1);
+ memcpy(text, str, size);
+ text[size] = 0;
+}
+
+#define INITIAL 0
+#define COMMAND 1
+#define HELP 2
+#define STRING 3
+#define PARAM 4
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+static int yy_init_globals (void );
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int zconflex_destroy (void );
+
+int zconfget_debug (void );
+
+void zconfset_debug (int debug_flag );
+
+YY_EXTRA_TYPE zconfget_extra (void );
+
+void zconfset_extra (YY_EXTRA_TYPE user_defined );
+
+FILE *zconfget_in (void );
+
+void zconfset_in (FILE * in_str );
+
+FILE *zconfget_out (void );
+
+void zconfset_out (FILE * out_str );
+
+int zconfget_leng (void );
+
+char *zconfget_text (void );
+
+int zconfget_lineno (void );
+
+void zconfset_lineno (int line_number );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int zconfwrap (void );
+#else
+extern int zconfwrap (void );
+#endif
+#endif
+
+ static void yyunput (int c,char *buf_ptr );
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char *,yyconst char *,int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * );
+#endif
+
+#ifndef YY_NO_INPUT
+
+#ifdef __cplusplus
+static int yyinput (void );
+#else
+static int input (void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( zconftext, zconfleng, 1, zconfout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ errno=0; \
+ while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(zconfin); \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int zconflex (void);
+
+#define YY_DECL int zconflex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after zconftext and zconfleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+
+ int str = 0;
+ int ts, i;
+
+ if ( !(yy_init) )
+ {
+ (yy_init) = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! zconfin )
+ zconfin = stdin;
+
+ if ( ! zconfout )
+ zconfout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_load_buffer_state( );
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of zconftext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)] ]) > 0 )
+ ++yy_cp;
+
+ yy_current_state = -yy_current_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+case 1:
+/* rule 1 can match eol */
+case 2:
+/* rule 2 can match eol */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ return T_EOL;
+}
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+{
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+{
+ unput(zconftext[0]);
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+
+case 6:
+YY_RULE_SETUP
+{
+ const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
+ BEGIN(PARAM);
+ current_pos.file = current_file;
+ current_pos.lineno = current_file->lineno;
+ if (id && id->flags & TF_COMMAND) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 8:
+/* rule 8 can match eol */
+YY_RULE_SETUP
+{
+ BEGIN(INITIAL);
+ current_file->lineno++;
+ return T_EOL;
+ }
+ YY_BREAK
+
+case 9:
+YY_RULE_SETUP
+return T_AND;
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+return T_OR;
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+return T_OPEN_PAREN;
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+return T_CLOSE_PAREN;
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+return T_NOT;
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+return T_EQUAL;
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+return T_UNEQUAL;
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+{
+ str = zconftext[0];
+ new_string();
+ BEGIN(STRING);
+ }
+ YY_BREAK
+case 17:
+/* rule 17 can match eol */
+YY_RULE_SETUP
+BEGIN(INITIAL); current_file->lineno++; return T_EOL;
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+/* ignore */
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+{
+ const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
+ if (id && id->flags & TF_PARAM) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+/* comment */
+ YY_BREAK
+case 21:
+/* rule 21 can match eol */
+YY_RULE_SETUP
+current_file->lineno++;
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+
+ YY_BREAK
+case YY_STATE_EOF(PARAM):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 23:
+/* rule 23 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ }
+ YY_BREAK
+case 25:
+/* rule 25 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ }
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+{
+ if (str == zconftext[0]) {
+ BEGIN(PARAM);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ } else
+ append_string(zconftext, 1);
+ }
+ YY_BREAK
+case 28:
+/* rule 28 can match eol */
+YY_RULE_SETUP
+{
+ printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
+ current_file->lineno++;
+ BEGIN(INITIAL);
+ return T_EOL;
+ }
+ YY_BREAK
+case YY_STATE_EOF(STRING):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 29:
+YY_RULE_SETUP
+{
+ ts = 0;
+ for (i = 0; i < zconfleng; i++) {
+ if (zconftext[i] == '\t')
+ ts = (ts & ~7) + 8;
+ else
+ ts++;
+ }
+ last_ts = ts;
+ if (first_ts) {
+ if (ts < first_ts) {
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ ts -= first_ts;
+ while (ts > 8) {
+ append_string(" ", 8);
+ ts -= 8;
+ }
+ append_string(" ", ts);
+ }
+ }
+ YY_BREAK
+case 30:
+/* rule 30 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+case 31:
+/* rule 31 can match eol */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ append_string("\n", 1);
+ }
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+{
+ while (zconfleng) {
+ if ((zconftext[zconfleng-1] != ' ') && (zconftext[zconfleng-1] != '\t'))
+ break;
+ zconfleng--;
+ }
+ append_string(zconftext, zconfleng);
+ if (!first_ts)
+ first_ts = last_ts;
+ }
+ YY_BREAK
+case YY_STATE_EOF(HELP):
+{
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(COMMAND):
+{
+ if (current_file) {
+ zconf_endfile();
+ return T_EOL;
+ }
+ fclose(zconfin);
+ yyterminate();
+}
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed zconfin at a new source and called
+ * zconflex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_c_buf_p);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( zconfwrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * zconftext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+} /* end of zconflex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ register char *source = (yytext_ptr);
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), (size_t) num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ zconfrestart(zconfin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) zconfrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ }
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ register int yy_is_jam;
+
+ yy_current_state = yy_nxt[yy_current_state][1];
+ yy_is_jam = (yy_current_state <= 0);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+ static void yyunput (int c, register char * yy_bp )
+{
+ register char *yy_cp;
+
+ yy_cp = (yy_c_buf_p);
+
+ /* undo effects of setting up zconftext */
+ *yy_cp = (yy_hold_char);
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register int number_to_move = (yy_n_chars) + 2;
+ register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ register char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ (yytext_ptr) = yy_bp;
+ (yy_hold_char) = *yy_cp;
+ (yy_c_buf_p) = yy_cp;
+}
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (yy_c_buf_p) - (yytext_ptr);
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ zconfrestart(zconfin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( zconfwrap( ) )
+ return EOF;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve zconftext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void zconfrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_init_buffer(YY_CURRENT_BUFFER,input_file );
+ zconf_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * zconfpop_buffer_state();
+ * zconfpush_buffer_state(new_buffer);
+ */
+ zconfensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ zconf_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (zconfwrap()) processing, but the only time this flag
+ * is looked at is after zconfwrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void zconf_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE zconf_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2 );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ zconf_init_buffer(b,file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with zconf_create_buffer()
+ *
+ */
+ void zconf_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ zconffree((void *) b->yy_ch_buf );
+
+ zconffree((void *) b );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a zconfrestart() or at EOF.
+ */
+ static void zconf_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ zconf_flush_buffer(b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then zconf_init_buffer was _probably_
+ * called from zconfrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void zconf_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ zconf_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ zconfensure_buffer_stack();
+
+ /* This block is copied from zconf_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from zconf_switch_to_buffer. */
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void zconfpop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void zconfensure_buffer_stack (void)
+{
+ int num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in zconfensure_buffer_stack()" );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ int grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in zconfensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ zconf_switch_to_buffer(b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to zconflex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * zconf_scan_bytes() instead.
+ */
+YY_BUFFER_STATE zconf_scan_string (yyconst char * yystr )
+{
+
+ return zconf_scan_bytes(yystr,strlen(yystr) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
+ * scan from a @e copy of @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char * yybytes, int _yybytes_len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = _yybytes_len + 2;
+ buf = (char *) zconfalloc(n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = zconf_scan_buffer(buf,n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yy_fatal_error (yyconst char* msg )
+{
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ zconftext[zconfleng] = (yy_hold_char); \
+ (yy_c_buf_p) = zconftext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ zconfleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int zconfget_lineno (void)
+{
+
+ return zconflineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *zconfget_in (void)
+{
+ return zconfin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *zconfget_out (void)
+{
+ return zconfout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int zconfget_leng (void)
+{
+ return zconfleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *zconfget_text (void)
+{
+ return zconftext;
+}
+
+/** Set the current line number.
+ * @param line_number
+ *
+ */
+void zconfset_lineno (int line_number )
+{
+
+ zconflineno = line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param in_str A readable stream.
+ *
+ * @see zconf_switch_to_buffer
+ */
+void zconfset_in (FILE * in_str )
+{
+ zconfin = in_str ;
+}
+
+void zconfset_out (FILE * out_str )
+{
+ zconfout = out_str ;
+}
+
+int zconfget_debug (void)
+{
+ return zconf_flex_debug;
+}
+
+void zconfset_debug (int bdebug )
+{
+ zconf_flex_debug = bdebug ;
+}
+
+static int yy_init_globals (void)
+{
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from zconflex_destroy(), so don't allocate here.
+ */
+
+ (yy_buffer_stack) = 0;
+ (yy_buffer_stack_top) = 0;
+ (yy_buffer_stack_max) = 0;
+ (yy_c_buf_p) = (char *) 0;
+ (yy_init) = 0;
+ (yy_start) = 0;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ zconfin = stdin;
+ zconfout = stdout;
+#else
+ zconfin = (FILE *) 0;
+ zconfout = (FILE *) 0;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * zconflex_init()
+ */
+ return 0;
+}
+
+/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
+int zconflex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ zconfpop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ zconffree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * zconflex() is called, initialization will occur. */
+ yy_init_globals( );
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
+{
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * s )
+{
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *zconfalloc (yy_size_t size )
+{
+ return (void *) malloc( size );
+}
+
+void *zconfrealloc (void * ptr, yy_size_t size )
+{
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+}
+
+void zconffree (void * ptr )
+{
+ free( (char *) ptr ); /* see zconfrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+void zconf_starthelp(void)
+{
+ new_string();
+ last_ts = first_ts = 0;
+ BEGIN(HELP);
+}
+
+static void zconf_endhelp(void)
+{
+ zconflval.string = text;
+ BEGIN(INITIAL);
+}
+
+/*
+ * Try to open specified file with following names:
+ * ./name
+ * $(srctree)/name
+ * The latter is used when srctree is separate from objtree
+ * when compiling the kernel.
+ * Return NULL if file is not found.
+ */
+FILE *zconf_fopen(const char *name)
+{
+ char *env, fullname[PATH_MAX+1];
+ FILE *f;
+
+ f = fopen(name, "r");
+ if (!f && name != NULL && name[0] != '/') {
+ env = getenv(SRCTREE);
+ if (env) {
+ sprintf(fullname, "%s/%s", env, name);
+ f = fopen(fullname, "r");
+ }
+ }
+ return f;
+}
+
+void zconf_initscan(const char *name)
+{
+ zconfin = zconf_fopen(name);
+ if (!zconfin) {
+ printf("can't find file %s\n", name);
+ exit(1);
+ }
+
+ current_buf = xmalloc(sizeof(*current_buf));
+ memset(current_buf, 0, sizeof(*current_buf));
+
+ current_file = file_lookup(name);
+ current_file->lineno = 1;
+}
+
+void zconf_nextfile(const char *name)
+{
+ struct file *iter;
+ struct file *file = file_lookup(name);
+ struct buffer *buf = xmalloc(sizeof(*buf));
+ memset(buf, 0, sizeof(*buf));
+
+ current_buf->state = YY_CURRENT_BUFFER;
+ zconfin = zconf_fopen(file->name);
+ if (!zconfin) {
+ printf("%s:%d: can't open file \"%s\"\n",
+ zconf_curname(), zconf_lineno(), file->name);
+ exit(1);
+ }
+ zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE));
+ buf->parent = current_buf;
+ current_buf = buf;
+
+ for (iter = current_file->parent; iter; iter = iter->parent ) {
+ if (!strcmp(current_file->name,iter->name) ) {
+ printf("%s:%d: recursive inclusion detected. "
+ "Inclusion path:\n current file : '%s'\n",
+ zconf_curname(), zconf_lineno(),
+ zconf_curname());
+ iter = current_file->parent;
+ while (iter && \
+ strcmp(iter->name,current_file->name)) {
+ printf(" included from: '%s:%d'\n",
+ iter->name, iter->lineno-1);
+ iter = iter->parent;
+ }
+ if (iter)
+ printf(" included from: '%s:%d'\n",
+ iter->name, iter->lineno+1);
+ exit(1);
+ }
+ }
+ file->lineno = 1;
+ file->parent = current_file;
+ current_file = file;
+}
+
+static void zconf_endfile(void)
+{
+ struct buffer *parent;
+
+ current_file = current_file->parent;
+
+ parent = current_buf->parent;
+ if (parent) {
+ fclose(zconfin);
+ zconf_delete_buffer(YY_CURRENT_BUFFER);
+ zconf_switch_to_buffer(parent->state);
+ }
+ free(current_buf);
+ current_buf = parent;
+}
+
+int zconf_lineno(void)
+{
+ return current_pos.lineno;
+}
+
+const char *zconf_curname(void)
+{
+ return current_pos.file ? current_pos.file->name : "<none>";
+}
+
Index: linux-3.18.21/scripts/kconfig/zconf.tab.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/scripts/kconfig/zconf.tab.c 2018-02-05 14:29:17.000000000 +0800
@@ -0,0 +1,2538 @@
+/* A Bison parser, made by GNU Bison 2.5. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "2.5"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+/* Using locations. */
+#define YYLSP_NEEDED 0
+
+/* Substitute the variable and function names. */
+#define yyparse zconfparse
+#define yylex zconflex
+#define yyerror zconferror
+#define yylval zconflval
+#define yychar zconfchar
+#define yydebug zconfdebug
+#define yynerrs zconfnerrs
+
+
+/* Copy the first part of user declarations. */
+
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "lkc.h"
+
+#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
+
+#define PRINTD 0x0001
+#define DEBUG_PARSE 0x0002
+
+int cdebug = PRINTD;
+
+extern int zconflex(void);
+static void zconfprint(const char *err, ...);
+static void zconf_error(const char *err, ...);
+static void zconferror(const char *err);
+static bool zconf_endtoken(const struct kconf_id *id, int starttoken, int endtoken);
+
+struct symbol *symbol_hash[SYMBOL_HASHSIZE];
+
+static struct menu *current_menu, *current_entry;
+
+
+
+
+/* Enabling traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+/* Enabling the token table. */
+#ifndef YYTOKEN_TABLE
+# define YYTOKEN_TABLE 0
+#endif
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ T_MAINMENU = 258,
+ T_MENU = 259,
+ T_ENDMENU = 260,
+ T_SOURCE = 261,
+ T_CHOICE = 262,
+ T_ENDCHOICE = 263,
+ T_COMMENT = 264,
+ T_CONFIG = 265,
+ T_MENUCONFIG = 266,
+ T_HELP = 267,
+ T_HELPTEXT = 268,
+ T_IF = 269,
+ T_ENDIF = 270,
+ T_DEPENDS = 271,
+ T_OPTIONAL = 272,
+ T_PROMPT = 273,
+ T_TYPE = 274,
+ T_DEFAULT = 275,
+ T_SELECT = 276,
+ T_RANGE = 277,
+ T_VISIBLE = 278,
+ T_OPTION = 279,
+ T_ON = 280,
+ T_WORD = 281,
+ T_WORD_QUOTE = 282,
+ T_UNEQUAL = 283,
+ T_CLOSE_PAREN = 284,
+ T_OPEN_PAREN = 285,
+ T_EOL = 286,
+ T_OR = 287,
+ T_AND = 288,
+ T_EQUAL = 289,
+ T_NOT = 290
+ };
+#endif
+
+
+
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+typedef union YYSTYPE
+{
+
+
+ char *string;
+ struct file *file;
+ struct symbol *symbol;
+ struct expr *expr;
+ struct menu *menu;
+ const struct kconf_id *id;
+
+
+
+} YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+/* Copy the second part of user declarations. */
+
+
+/* Include zconf.hash.c here so it can see the token constants. */
+#include "zconf.hash.c"
+
+
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#elif (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+typedef signed char yytype_int8;
+#else
+typedef short int yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short int yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short int yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned int
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(msgid) dgettext ("bison-runtime", msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(msgid) msgid
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(e) ((void) (e))
+#else
+# define YYUSE(e) /* empty */
+#endif
+
+/* Identity function, used to suppress warnings about constant conditions. */
+#ifndef lint
+# define YYID(n) (n)
+#else
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static int
+YYID (int yyi)
+#else
+static int
+YYID (yyi)
+ int yyi;
+#endif
+{
+ return yyi;
+}
+#endif
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's `empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (YYID (0))
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (YYID (0))
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 11
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 290
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 36
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 50
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 118
+/* YYNRULES -- Number of states. */
+#define YYNSTATES 191
+
+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 290
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35
+};
+
+#if YYDEBUG
+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
+ YYRHS. */
+static const yytype_uint16 yyprhs[] =
+{
+ 0, 0, 3, 6, 8, 11, 13, 14, 17, 20,
+ 23, 26, 31, 36, 40, 42, 44, 46, 48, 50,
+ 52, 54, 56, 58, 60, 62, 64, 66, 68, 72,
+ 75, 79, 82, 86, 89, 90, 93, 96, 99, 102,
+ 105, 108, 112, 117, 122, 127, 133, 137, 138, 142,
+ 143, 146, 150, 153, 155, 159, 160, 163, 166, 169,
+ 172, 175, 180, 184, 187, 192, 193, 196, 200, 202,
+ 206, 207, 210, 213, 216, 220, 224, 228, 230, 234,
+ 235, 238, 241, 244, 248, 252, 255, 258, 261, 262,
+ 265, 268, 271, 276, 277, 280, 283, 286, 287, 290,
+ 292, 294, 297, 300, 303, 305, 308, 309, 312, 314,
+ 318, 322, 326, 329, 333, 337, 339, 341, 342
+};
+
+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
+static const yytype_int8 yyrhs[] =
+{
+ 37, 0, -1, 81, 38, -1, 38, -1, 63, 39,
+ -1, 39, -1, -1, 39, 41, -1, 39, 55, -1,
+ 39, 67, -1, 39, 80, -1, 39, 26, 1, 31,
+ -1, 39, 40, 1, 31, -1, 39, 1, 31, -1,
+ 16, -1, 18, -1, 19, -1, 21, -1, 17, -1,
+ 22, -1, 20, -1, 23, -1, 31, -1, 61, -1,
+ 71, -1, 44, -1, 46, -1, 69, -1, 26, 1,
+ 31, -1, 1, 31, -1, 10, 26, 31, -1, 43,
+ 47, -1, 11, 26, 31, -1, 45, 47, -1, -1,
+ 47, 48, -1, 47, 49, -1, 47, 75, -1, 47,
+ 73, -1, 47, 42, -1, 47, 31, -1, 19, 78,
+ 31, -1, 18, 79, 82, 31, -1, 20, 83, 82,
+ 31, -1, 21, 26, 82, 31, -1, 22, 84, 84,
+ 82, 31, -1, 24, 50, 31, -1, -1, 50, 26,
+ 51, -1, -1, 34, 79, -1, 7, 85, 31, -1,
+ 52, 56, -1, 80, -1, 53, 58, 54, -1, -1,
+ 56, 57, -1, 56, 75, -1, 56, 73, -1, 56,
+ 31, -1, 56, 42, -1, 18, 79, 82, 31, -1,
+ 19, 78, 31, -1, 17, 31, -1, 20, 26, 82,
+ 31, -1, -1, 58, 41, -1, 14, 83, 81, -1,
+ 80, -1, 59, 62, 60, -1, -1, 62, 41, -1,
+ 62, 67, -1, 62, 55, -1, 3, 79, 81, -1,
+ 4, 79, 31, -1, 64, 76, 74, -1, 80, -1,
+ 65, 68, 66, -1, -1, 68, 41, -1, 68, 67,
+ -1, 68, 55, -1, 6, 79, 31, -1, 9, 79,
+ 31, -1, 70, 74, -1, 12, 31, -1, 72, 13,
+ -1, -1, 74, 75, -1, 74, 31, -1, 74, 42,
+ -1, 16, 25, 83, 31, -1, -1, 76, 77, -1,
+ 76, 31, -1, 23, 82, -1, -1, 79, 82, -1,
+ 26, -1, 27, -1, 5, 31, -1, 8, 31, -1,
+ 15, 31, -1, 31, -1, 81, 31, -1, -1, 14,
+ 83, -1, 84, -1, 84, 34, 84, -1, 84, 28,
+ 84, -1, 30, 83, 29, -1, 35, 83, -1, 83,
+ 32, 83, -1, 83, 33, 83, -1, 26, -1, 27,
+ -1, -1, 26, -1
+};
+
+/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 103, 103, 103, 105, 105, 107, 109, 110, 111,
+ 112, 113, 114, 118, 122, 122, 122, 122, 122, 122,
+ 122, 122, 126, 127, 128, 129, 130, 131, 135, 136,
+ 142, 150, 156, 164, 174, 176, 177, 178, 179, 180,
+ 181, 184, 192, 198, 208, 214, 220, 223, 225, 236,
+ 237, 242, 251, 256, 264, 267, 269, 270, 271, 272,
+ 273, 276, 282, 293, 299, 309, 311, 316, 324, 332,
+ 335, 337, 338, 339, 344, 351, 358, 363, 371, 374,
+ 376, 377, 378, 381, 389, 396, 403, 409, 416, 418,
+ 419, 420, 423, 431, 433, 434, 437, 444, 446, 451,
+ 452, 455, 456, 457, 461, 462, 465, 466, 469, 470,
+ 471, 472, 473, 474, 475, 478, 479, 482, 483
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU",
+ "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
+ "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
+ "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_RANGE",
+ "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
+ "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
+ "T_NOT", "$accept", "input", "start", "stmt_list", "option_name",
+ "common_stmt", "option_error", "config_entry_start", "config_stmt",
+ "menuconfig_entry_start", "menuconfig_stmt", "config_option_list",
+ "config_option", "symbol_option", "symbol_option_list",
+ "symbol_option_arg", "choice", "choice_entry", "choice_end",
+ "choice_stmt", "choice_option_list", "choice_option", "choice_block",
+ "if_entry", "if_end", "if_stmt", "if_block", "mainmenu_stmt", "menu",
+ "menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt",
+ "comment", "comment_stmt", "help_start", "help", "depends_list",
+ "depends", "visibility_list", "visible", "prompt_stmt_opt", "prompt",
+ "end", "nl", "if_expr", "expr", "symbol", "word_opt", 0
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
+ token YYLEX-NUM. */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290
+};
+# endif
+
+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
+{
+ 0, 36, 37, 37, 38, 38, 39, 39, 39, 39,
+ 39, 39, 39, 39, 40, 40, 40, 40, 40, 40,
+ 40, 40, 41, 41, 41, 41, 41, 41, 42, 42,
+ 43, 44, 45, 46, 47, 47, 47, 47, 47, 47,
+ 47, 48, 48, 48, 48, 48, 49, 50, 50, 51,
+ 51, 52, 53, 54, 55, 56, 56, 56, 56, 56,
+ 56, 57, 57, 57, 57, 58, 58, 59, 60, 61,
+ 62, 62, 62, 62, 63, 64, 65, 66, 67, 68,
+ 68, 68, 68, 69, 70, 71, 72, 73, 74, 74,
+ 74, 74, 75, 76, 76, 76, 77, 78, 78, 79,
+ 79, 80, 80, 80, 81, 81, 82, 82, 83, 83,
+ 83, 83, 83, 83, 83, 84, 84, 85, 85
+};
+
+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 2, 1, 2, 1, 0, 2, 2, 2,
+ 2, 4, 4, 3, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 3, 2,
+ 3, 2, 3, 2, 0, 2, 2, 2, 2, 2,
+ 2, 3, 4, 4, 4, 5, 3, 0, 3, 0,
+ 2, 3, 2, 1, 3, 0, 2, 2, 2, 2,
+ 2, 4, 3, 2, 4, 0, 2, 3, 1, 3,
+ 0, 2, 2, 2, 3, 3, 3, 1, 3, 0,
+ 2, 2, 2, 3, 3, 2, 2, 2, 0, 2,
+ 2, 2, 4, 0, 2, 2, 2, 0, 2, 1,
+ 1, 2, 2, 2, 1, 2, 0, 2, 1, 3,
+ 3, 3, 2, 3, 3, 1, 1, 0, 1
+};
+
+/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE doesn't specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint8 yydefact[] =
+{
+ 6, 0, 104, 0, 3, 0, 6, 6, 99, 100,
+ 0, 1, 0, 0, 0, 0, 117, 0, 0, 0,
+ 0, 0, 0, 14, 18, 15, 16, 20, 17, 19,
+ 21, 0, 22, 0, 7, 34, 25, 34, 26, 55,
+ 65, 8, 70, 23, 93, 79, 9, 27, 88, 24,
+ 10, 0, 105, 2, 74, 13, 0, 101, 0, 118,
+ 0, 102, 0, 0, 0, 115, 116, 0, 0, 0,
+ 108, 103, 0, 0, 0, 0, 0, 0, 0, 88,
+ 0, 0, 75, 83, 51, 84, 30, 32, 0, 112,
+ 0, 0, 67, 0, 0, 11, 12, 0, 0, 0,
+ 0, 97, 0, 0, 0, 47, 0, 40, 39, 35,
+ 36, 0, 38, 37, 0, 0, 97, 0, 59, 60,
+ 56, 58, 57, 66, 54, 53, 71, 73, 69, 72,
+ 68, 106, 95, 0, 94, 80, 82, 78, 81, 77,
+ 90, 91, 89, 111, 113, 114, 110, 109, 29, 86,
+ 0, 106, 0, 106, 106, 106, 0, 0, 0, 87,
+ 63, 106, 0, 106, 0, 96, 0, 0, 41, 98,
+ 0, 0, 106, 49, 46, 28, 0, 62, 0, 107,
+ 92, 42, 43, 44, 0, 0, 48, 61, 64, 45,
+ 50
+};
+
+/* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 3, 4, 5, 33, 34, 108, 35, 36, 37,
+ 38, 74, 109, 110, 157, 186, 39, 40, 124, 41,
+ 76, 120, 77, 42, 128, 43, 78, 6, 44, 45,
+ 137, 46, 80, 47, 48, 49, 111, 112, 81, 113,
+ 79, 134, 152, 153, 50, 7, 165, 69, 70, 60
+};
+
+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+#define YYPACT_NINF -90
+static const yytype_int16 yypact[] =
+{
+ 4, 42, -90, 96, -90, 111, -90, 15, -90, -90,
+ 75, -90, 82, 42, 104, 42, 110, 107, 42, 115,
+ 125, -4, 121, -90, -90, -90, -90, -90, -90, -90,
+ -90, 162, -90, 163, -90, -90, -90, -90, -90, -90,
+ -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, 139, -90, -90, 138, -90, 142, -90, 143, -90,
+ 152, -90, 164, 167, 168, -90, -90, -4, -4, 77,
+ -18, -90, 177, 185, 33, 71, 195, 247, 236, -2,
+ 236, 171, -90, -90, -90, -90, -90, -90, 41, -90,
+ -4, -4, 138, 97, 97, -90, -90, 186, 187, 194,
+ 42, 42, -4, 196, 97, -90, 219, -90, -90, -90,
+ -90, 210, -90, -90, 204, 42, 42, 199, -90, -90,
+ -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, 222, -90, 223, -90, -90, -90, -90, -90, -90,
+ -90, -90, -90, -90, 215, -90, -90, -90, -90, -90,
+ -4, 222, 228, 222, -5, 222, 97, 35, 229, -90,
+ -90, 222, 232, 222, -4, -90, 135, 233, -90, -90,
+ 234, 235, 222, 240, -90, -90, 237, -90, 239, -13,
+ -90, -90, -90, -90, 244, 42, -90, -90, -90, -90,
+ -90
+};
+
+/* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -90, -90, 269, 271, -90, 23, -70, -90, -90, -90,
+ -90, 243, -90, -90, -90, -90, -90, -90, -90, -48,
+ -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
+ -90, -20, -90, -90, -90, -90, -90, 206, 205, -68,
+ -90, -90, 169, -1, 27, -7, 118, -66, -89, -90
+};
+
+/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule which
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+#define YYTABLE_NINF -86
+static const yytype_int16 yytable[] =
+{
+ 10, 88, 89, 54, 146, 147, 119, 1, 122, 164,
+ 93, 141, 56, 142, 58, 156, 94, 62, 1, 90,
+ 91, 131, 65, 66, 144, 145, 67, 90, 91, 132,
+ 127, 68, 136, -31, 97, 2, 154, -31, -31, -31,
+ -31, -31, -31, -31, -31, 98, 52, -31, -31, 99,
+ -31, 100, 101, 102, 103, 104, -31, 105, 129, 106,
+ 138, 173, 92, 141, 107, 142, 174, 172, 8, 9,
+ 143, -33, 97, 90, 91, -33, -33, -33, -33, -33,
+ -33, -33, -33, 98, 166, -33, -33, 99, -33, 100,
+ 101, 102, 103, 104, -33, 105, 11, 106, 179, 151,
+ 123, 126, 107, 135, 125, 130, 2, 139, 2, 90,
+ 91, -5, 12, 55, 161, 13, 14, 15, 16, 17,
+ 18, 19, 20, 65, 66, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 57, 59, 31, 61, -4,
+ 12, 63, 32, 13, 14, 15, 16, 17, 18, 19,
+ 20, 64, 71, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 72, 73, 31, 180, 90, 91, 52,
+ 32, -85, 97, 82, 83, -85, -85, -85, -85, -85,
+ -85, -85, -85, 84, 190, -85, -85, 99, -85, -85,
+ -85, -85, -85, -85, -85, 85, 97, 106, 86, 87,
+ -52, -52, 140, -52, -52, -52, -52, 98, 95, -52,
+ -52, 99, 114, 115, 116, 117, 96, 148, 149, 150,
+ 158, 106, 155, 159, 97, 163, 118, -76, -76, -76,
+ -76, -76, -76, -76, -76, 160, 164, -76, -76, 99,
+ 13, 14, 15, 16, 17, 18, 19, 20, 91, 106,
+ 21, 22, 14, 15, 140, 17, 18, 19, 20, 168,
+ 175, 21, 22, 177, 181, 182, 183, 32, 187, 167,
+ 188, 169, 170, 171, 185, 189, 53, 51, 32, 176,
+ 75, 178, 121, 0, 133, 162, 0, 0, 0, 0,
+ 184
+};
+
+#define yypact_value_is_default(yystate) \
+ ((yystate) == (-90))
+
+#define yytable_value_is_error(yytable_value) \
+ YYID (0)
+
+static const yytype_int16 yycheck[] =
+{
+ 1, 67, 68, 10, 93, 94, 76, 3, 76, 14,
+ 28, 81, 13, 81, 15, 104, 34, 18, 3, 32,
+ 33, 23, 26, 27, 90, 91, 30, 32, 33, 31,
+ 78, 35, 80, 0, 1, 31, 102, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 31, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 78, 26,
+ 80, 26, 69, 133, 31, 133, 31, 156, 26, 27,
+ 29, 0, 1, 32, 33, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 150, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 0, 26, 164, 100,
+ 77, 78, 31, 80, 77, 78, 31, 80, 31, 32,
+ 33, 0, 1, 31, 115, 4, 5, 6, 7, 8,
+ 9, 10, 11, 26, 27, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 31, 26, 26, 31, 0,
+ 1, 26, 31, 4, 5, 6, 7, 8, 9, 10,
+ 11, 26, 31, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 1, 1, 26, 31, 32, 33, 31,
+ 31, 0, 1, 31, 31, 4, 5, 6, 7, 8,
+ 9, 10, 11, 31, 185, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 31, 1, 26, 31, 31,
+ 5, 6, 31, 8, 9, 10, 11, 12, 31, 14,
+ 15, 16, 17, 18, 19, 20, 31, 31, 31, 25,
+ 1, 26, 26, 13, 1, 26, 31, 4, 5, 6,
+ 7, 8, 9, 10, 11, 31, 14, 14, 15, 16,
+ 4, 5, 6, 7, 8, 9, 10, 11, 33, 26,
+ 14, 15, 5, 6, 31, 8, 9, 10, 11, 31,
+ 31, 14, 15, 31, 31, 31, 31, 31, 31, 151,
+ 31, 153, 154, 155, 34, 31, 7, 6, 31, 161,
+ 37, 163, 76, -1, 79, 116, -1, -1, -1, -1,
+ 172
+};
+
+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
+{
+ 0, 3, 31, 37, 38, 39, 63, 81, 26, 27,
+ 79, 0, 1, 4, 5, 6, 7, 8, 9, 10,
+ 11, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 26, 31, 40, 41, 43, 44, 45, 46, 52,
+ 53, 55, 59, 61, 64, 65, 67, 69, 70, 71,
+ 80, 39, 31, 38, 81, 31, 79, 31, 79, 26,
+ 85, 31, 79, 26, 26, 26, 27, 30, 35, 83,
+ 84, 31, 1, 1, 47, 47, 56, 58, 62, 76,
+ 68, 74, 31, 31, 31, 31, 31, 31, 83, 83,
+ 32, 33, 81, 28, 34, 31, 31, 1, 12, 16,
+ 18, 19, 20, 21, 22, 24, 26, 31, 42, 48,
+ 49, 72, 73, 75, 17, 18, 19, 20, 31, 42,
+ 57, 73, 75, 41, 54, 80, 41, 55, 60, 67,
+ 80, 23, 31, 74, 77, 41, 55, 66, 67, 80,
+ 31, 42, 75, 29, 83, 83, 84, 84, 31, 31,
+ 25, 79, 78, 79, 83, 26, 84, 50, 1, 13,
+ 31, 79, 78, 26, 14, 82, 83, 82, 31, 82,
+ 82, 82, 84, 26, 31, 31, 82, 31, 82, 83,
+ 31, 31, 31, 31, 82, 34, 51, 31, 31, 31,
+ 79
+};
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+/* Like YYERROR except do call yyerror. This remains here temporarily
+ to ease the transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. However,
+ YYFAIL appears to be in use. Nevertheless, it is formally deprecated
+ in Bison 2.4.2's NEWS entry, where a plan to phase it out is
+ discussed. */
+
+#define YYFAIL goto yyerrlab
+#if defined YYFAIL
+ /* This is here to suppress warnings from the GCC cpp's
+ -Wunused-macros. Normally we don't worry about that warning, but
+ some users do, and we want to make it easy for users to remove
+ YYFAIL uses, which will produce warnings from Bison 2.5. */
+#endif
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (1); \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+while (YYID (0))
+
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (YYID (N)) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (YYID (0))
+#endif
+
+
+/* This macro is provided for backward compatibility. */
+
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
+
+/* YYLEX -- calling `yylex' with the right arguments. */
+
+#ifdef YYLEX_PARAM
+# define YYLEX yylex (YYLEX_PARAM)
+#else
+# define YYLEX yylex ()
+#endif
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (YYID (0))
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (YYID (0))
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_value_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# else
+ YYUSE (yyoutput);
+# endif
+ switch (yytype)
+ {
+ default:
+ break;
+ }
+}
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (yytype < YYNTOKENS)
+ YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
+ else
+ YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+
+ yy_symbol_value_print (yyoutput, yytype, yyvaluep);
+ YYFPRINTF (yyoutput, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+#else
+static void
+yy_stack_print (yybottom, yytop)
+ yytype_int16 *yybottom;
+ yytype_int16 *yytop;
+#endif
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (YYID (0))
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
+#else
+static void
+yy_reduce_print (yyvsp, yyrule)
+ YYSTYPE *yyvsp;
+ int yyrule;
+#endif
+{
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ unsigned long int yylno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ );
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyvsp, Rule); \
+} while (YYID (0))
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static YYSIZE_T
+yystrlen (const char *yystr)
+#else
+static YYSIZE_T
+yystrlen (yystr)
+ const char *yystr;
+#endif
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+#else
+static char *
+yystpcpy (yydest, yysrc)
+ char *yydest;
+ const char *yysrc;
+#endif
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ YYSIZE_T yysize1;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = 0;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - Assume YYFAIL is not used. It's too flawed to consider. See
+ <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
+ for details. YYERROR is fine as it does not invoke this
+ function.
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+ if (! (yysize <= yysize1
+ && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ yysize1 = yysize + yystrlen (yyformat);
+ if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yydestruct (yymsg, yytype, yyvaluep)
+ const char *yymsg;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ YYUSE (yyvaluep);
+
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ switch (yytype)
+ {
+ case 53: /* "choice_entry" */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+ case 59: /* "if_entry" */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+ case 65: /* "menu_entry" */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+/* Prevent warnings from -Wmissing-prototypes. */
+#ifdef YYPARSE_PARAM
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void *YYPARSE_PARAM);
+#else
+int yyparse ();
+#endif
+#else /* ! YYPARSE_PARAM */
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
+/* The lookahead symbol. */
+int yychar;
+
+/* The semantic value of the lookahead symbol. */
+YYSTYPE yylval;
+
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+#ifdef YYPARSE_PARAM
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void *YYPARSE_PARAM)
+#else
+int
+yyparse (YYPARSE_PARAM)
+ void *YYPARSE_PARAM;
+#endif
+#else /* ! YYPARSE_PARAM */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void)
+#else
+int
+yyparse ()
+
+#endif
+#endif
+{
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ `yyss': related to states.
+ `yyvs': related to semantic values.
+
+ Refer to the stacks thru separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yytoken = 0;
+ yyss = yyssa;
+ yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+ yyssp = yyss;
+ yyvsp = yyvs;
+
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = YYLEX;
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ *++yyvsp = yylval;
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ `$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 10:
+
+ { zconf_error("unexpected end statement"); }
+ break;
+
+ case 11:
+
+ { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); }
+ break;
+
+ case 12:
+
+ {
+ zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[(2) - (4)].id)->name);
+}
+ break;
+
+ case 13:
+
+ { zconf_error("invalid statement"); }
+ break;
+
+ case 28:
+
+ { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); }
+ break;
+
+ case 29:
+
+ { zconf_error("invalid option"); }
+ break;
+
+ case 30:
+
+ {
+ struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
+}
+ break;
+
+ case 31:
+
+ {
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 32:
+
+ {
+ struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
+}
+ break;
+
+ case 33:
+
+ {
+ if (current_entry->prompt)
+ current_entry->prompt->type = P_MENU;
+ else
+ zconfprint("warning: menuconfig statement without prompt");
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 41:
+
+ {
+ menu_set_type((yyvsp[(1) - (3)].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[(1) - (3)].id)->stype);
+}
+ break;
+
+ case 42:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 43:
+
+ {
+ menu_add_expr(P_DEFAULT, (yyvsp[(2) - (4)].expr), (yyvsp[(3) - (4)].expr));
+ if ((yyvsp[(1) - (4)].id)->stype != S_UNKNOWN)
+ menu_set_type((yyvsp[(1) - (4)].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[(1) - (4)].id)->stype);
+}
+ break;
+
+ case 44:
+
+ {
+ menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
+ printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 45:
+
+ {
+ menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr));
+ printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 48:
+
+ {
+ const struct kconf_id *id = kconf_id_lookup((yyvsp[(2) - (3)].string), strlen((yyvsp[(2) - (3)].string)));
+ if (id && id->flags & TF_OPTION)
+ menu_add_option(id->token, (yyvsp[(3) - (3)].string));
+ else
+ zconfprint("warning: ignoring unknown option %s", (yyvsp[(2) - (3)].string));
+ free((yyvsp[(2) - (3)].string));
+}
+ break;
+
+ case 49:
+
+ { (yyval.string) = NULL; }
+ break;
+
+ case 50:
+
+ { (yyval.string) = (yyvsp[(2) - (2)].string); }
+ break;
+
+ case 51:
+
+ {
+ struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE);
+ sym->flags |= SYMBOL_AUTO;
+ menu_add_entry(sym);
+ menu_add_expr(P_CHOICE, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 52:
+
+ {
+ (yyval.menu) = menu_add_menu();
+}
+ break;
+
+ case 53:
+
+ {
+ if (zconf_endtoken((yyvsp[(1) - (1)].id), T_CHOICE, T_ENDCHOICE)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
+ }
+}
+ break;
+
+ case 61:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 62:
+
+ {
+ if ((yyvsp[(1) - (3)].id)->stype == S_BOOLEAN || (yyvsp[(1) - (3)].id)->stype == S_TRISTATE) {
+ menu_set_type((yyvsp[(1) - (3)].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[(1) - (3)].id)->stype);
+ } else
+ YYERROR;
+}
+ break;
+
+ case 63:
+
+ {
+ current_entry->sym->flags |= SYMBOL_OPTIONAL;
+ printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 64:
+
+ {
+ if ((yyvsp[(1) - (4)].id)->stype == S_UNKNOWN) {
+ menu_add_symbol(P_DEFAULT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
+ printd(DEBUG_PARSE, "%s:%d:default\n",
+ zconf_curname(), zconf_lineno());
+ } else
+ YYERROR;
+}
+ break;
+
+ case 67:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
+ menu_add_entry(NULL);
+ menu_add_dep((yyvsp[(2) - (3)].expr));
+ (yyval.menu) = menu_add_menu();
+}
+ break;
+
+ case 68:
+
+ {
+ if (zconf_endtoken((yyvsp[(1) - (1)].id), T_IF, T_ENDIF)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
+ }
+}
+ break;
+
+ case 74:
+
+ {
+ menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
+}
+ break;
+
+ case 75:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
+ printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 76:
+
+ {
+ (yyval.menu) = menu_add_menu();
+}
+ break;
+
+ case 77:
+
+ {
+ if (zconf_endtoken((yyvsp[(1) - (1)].id), T_MENU, T_ENDMENU)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
+ }
+}
+ break;
+
+ case 83:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
+ zconf_nextfile((yyvsp[(2) - (3)].string));
+}
+ break;
+
+ case 84:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prompt(P_COMMENT, (yyvsp[(2) - (3)].string), NULL);
+ printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 85:
+
+ {
+ menu_end_entry();
+}
+ break;
+
+ case 86:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
+ zconf_starthelp();
+}
+ break;
+
+ case 87:
+
+ {
+ current_entry->help = (yyvsp[(2) - (2)].string);
+}
+ break;
+
+ case 92:
+
+ {
+ menu_add_dep((yyvsp[(3) - (4)].expr));
+ printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
+}
+ break;
+
+ case 96:
+
+ {
+ menu_add_visibility((yyvsp[(2) - (2)].expr));
+}
+ break;
+
+ case 98:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr));
+}
+ break;
+
+ case 101:
+
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
+ break;
+
+ case 102:
+
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
+ break;
+
+ case 103:
+
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
+ break;
+
+ case 106:
+
+ { (yyval.expr) = NULL; }
+ break;
+
+ case 107:
+
+ { (yyval.expr) = (yyvsp[(2) - (2)].expr); }
+ break;
+
+ case 108:
+
+ { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); }
+ break;
+
+ case 109:
+
+ { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ break;
+
+ case 110:
+
+ { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ break;
+
+ case 111:
+
+ { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
+ break;
+
+ case 112:
+
+ { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
+ break;
+
+ case 113:
+
+ { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+ break;
+
+ case 114:
+
+ { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+ break;
+
+ case 115:
+
+ { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
+ break;
+
+ case 116:
+
+ { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
+ break;
+
+ case 117:
+
+ { (yyval.string) = NULL; }
+ break;
+
+
+
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now `shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*------------------------------------.
+| yyerrlab -- here on detecting error |
+`------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
+ if (/*CONSTCOND*/ 0)
+ goto yyerrorlab;
+
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ *++yyvsp = yylval;
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#if !defined(yyoverflow) || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ }
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ /* Make sure YYID is used. */
+ return YYID (yyresult);
+}
+
+
+
+
+
+void conf_parse(const char *name)
+{
+ struct symbol *sym;
+ int i;
+
+ zconf_initscan(name);
+
+ sym_init();
+ _menu_init();
+ rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
+
+ if (getenv("ZCONF_DEBUG"))
+ zconfdebug = 1;
+ zconfparse();
+ if (zconfnerrs)
+ exit(1);
+ if (!modules_sym)
+ modules_sym = sym_find( "n" );
+
+ rootmenu.prompt->text = _(rootmenu.prompt->text);
+ rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
+
+ menu_finalize(&rootmenu);
+ for_all_symbols(i, sym) {
+ if (sym_check_deps(sym))
+ zconfnerrs++;
+ }
+ if (zconfnerrs)
+ exit(1);
+ sym_set_change_count(1);
+}
+
+static const char *zconf_tokenname(int token)
+{
+ switch (token) {
+ case T_MENU: return "menu";
+ case T_ENDMENU: return "endmenu";
+ case T_CHOICE: return "choice";
+ case T_ENDCHOICE: return "endchoice";
+ case T_IF: return "if";
+ case T_ENDIF: return "endif";
+ case T_DEPENDS: return "depends";
+ case T_VISIBLE: return "visible";
+ }
+ return "<token>";
+}
+
+static bool zconf_endtoken(const struct kconf_id *id, int starttoken, int endtoken)
+{
+ if (id->token != endtoken) {
+ zconf_error("unexpected '%s' within %s block",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ if (current_menu->file != current_file) {
+ zconf_error("'%s' in different file than '%s'",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ fprintf(stderr, "%s:%d: location of the '%s'\n",
+ current_menu->file->name, current_menu->lineno,
+ zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ return true;
+}
+
+static void zconfprint(const char *err, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconf_error(const char *err, ...)
+{
+ va_list ap;
+
+ zconfnerrs++;
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconferror(const char *err)
+{
+ fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
+}
+
+static void print_quoted_string(FILE *out, const char *str)
+{
+ const char *p;
+ int len;
+
+ putc('"', out);
+ while ((p = strchr(str, '"'))) {
+ len = p - str;
+ if (len)
+ fprintf(out, "%.*s", len, str);
+ fputs("\\\"", out);
+ str = p + 1;
+ }
+ fputs(str, out);
+ putc('"', out);
+}
+
+static void print_symbol(FILE *out, struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ struct property *prop;
+
+ if (sym_is_choice(sym))
+ fprintf(out, "\nchoice\n");
+ else
+ fprintf(out, "\nconfig %s\n", sym->name);
+ switch (sym->type) {
+ case S_BOOLEAN:
+ fputs(" boolean\n", out);
+ break;
+ case S_TRISTATE:
+ fputs(" tristate\n", out);
+ break;
+ case S_STRING:
+ fputs(" string\n", out);
+ break;
+ case S_INT:
+ fputs(" integer\n", out);
+ break;
+ case S_HEX:
+ fputs(" hex\n", out);
+ break;
+ default:
+ fputs(" ???\n", out);
+ break;
+ }
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu != menu)
+ continue;
+ switch (prop->type) {
+ case P_PROMPT:
+ fputs(" prompt ", out);
+ print_quoted_string(out, prop->text);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_DEFAULT:
+ fputs( " default ", out);
+ expr_fprint(prop->expr, out);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_CHOICE:
+ fputs(" #choice value\n", out);
+ break;
+ case P_SELECT:
+ fputs( " select ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_RANGE:
+ fputs( " range ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
+ case P_MENU:
+ fputs( " menu ", out);
+ print_quoted_string(out, prop->text);
+ fputc('\n', out);
+ break;
+ default:
+ fprintf(out, " unknown prop %d!\n", prop->type);
+ break;
+ }
+ }
+ if (menu->help) {
+ int len = strlen(menu->help);
+ while (menu->help[--len] == '\n')
+ menu->help[len] = 0;
+ fprintf(out, " help\n%s\n", menu->help);
+ }
+}
+
+void zconfdump(FILE *out)
+{
+ struct property *prop;
+ struct symbol *sym;
+ struct menu *menu;
+
+ menu = rootmenu.list;
+ while (menu) {
+ if ((sym = menu->sym))
+ print_symbol(out, menu);
+ else if ((prop = menu->prompt)) {
+ switch (prop->type) {
+ case P_COMMENT:
+ fputs("\ncomment ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ case P_MENU:
+ fputs("\nmenu ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ default:
+ ;
+ }
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" depends ", out);
+ expr_fprint(prop->visible.expr, out);
+ fputc('\n', out);
+ }
+ }
+
+ if (menu->list)
+ menu = menu->list;
+ else if (menu->next)
+ menu = menu->next;
+ else while ((menu = menu->parent)) {
+ if (menu->prompt && menu->prompt->type == P_MENU)
+ fputs("\nendmenu\n", out);
+ if (menu->next) {
+ menu = menu->next;
+ break;
+ }
+ }
+ }
+}
+
+#include "zconf.lex.c"
+#include "util.c"
+#include "confdata.c"
+#include "expr.c"
+#include "symbol.c"
+#include "menu.c"
+
Index: linux-3.18.21/scripts/mod/devicetable-offsets.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/scripts/mod/devicetable-offsets.h 2018-02-05 16:43:27.000000000 +0800
@@ -0,0 +1,155 @@
+#ifndef __DEVICETABLE_OFFSETS_H__
+#define __DEVICETABLE_OFFSETS_H__
+/*
+ * DO NOT MODIFY.
+ *
+ * This file was generated by Kbuild
+ *
+ */
+
+#define SIZE_usb_device_id 24 /* sizeof(struct usb_device_id) # */
+#define OFF_usb_device_id_match_flags 0 /* offsetof(struct usb_device_id, match_flags) # */
+#define OFF_usb_device_id_idVendor 2 /* offsetof(struct usb_device_id, idVendor) # */
+#define OFF_usb_device_id_idProduct 4 /* offsetof(struct usb_device_id, idProduct) # */
+#define OFF_usb_device_id_bcdDevice_lo 6 /* offsetof(struct usb_device_id, bcdDevice_lo) # */
+#define OFF_usb_device_id_bcdDevice_hi 8 /* offsetof(struct usb_device_id, bcdDevice_hi) # */
+#define OFF_usb_device_id_bDeviceClass 10 /* offsetof(struct usb_device_id, bDeviceClass) # */
+#define OFF_usb_device_id_bDeviceSubClass 11 /* offsetof(struct usb_device_id, bDeviceSubClass) # */
+#define OFF_usb_device_id_bDeviceProtocol 12 /* offsetof(struct usb_device_id, bDeviceProtocol) # */
+#define OFF_usb_device_id_bInterfaceClass 13 /* offsetof(struct usb_device_id, bInterfaceClass) # */
+#define OFF_usb_device_id_bInterfaceSubClass 14 /* offsetof(struct usb_device_id, bInterfaceSubClass) # */
+#define OFF_usb_device_id_bInterfaceProtocol 15 /* offsetof(struct usb_device_id, bInterfaceProtocol) # */
+#define OFF_usb_device_id_bInterfaceNumber 16 /* offsetof(struct usb_device_id, bInterfaceNumber) # */
+#define SIZE_hid_device_id 16 /* sizeof(struct hid_device_id) # */
+#define OFF_hid_device_id_bus 0 /* offsetof(struct hid_device_id, bus) # */
+#define OFF_hid_device_id_group 2 /* offsetof(struct hid_device_id, group) # */
+#define OFF_hid_device_id_vendor 4 /* offsetof(struct hid_device_id, vendor) # */
+#define OFF_hid_device_id_product 8 /* offsetof(struct hid_device_id, product) # */
+#define SIZE_ieee1394_device_id 24 /* sizeof(struct ieee1394_device_id) # */
+#define OFF_ieee1394_device_id_match_flags 0 /* offsetof(struct ieee1394_device_id, match_flags) # */
+#define OFF_ieee1394_device_id_vendor_id 4 /* offsetof(struct ieee1394_device_id, vendor_id) # */
+#define OFF_ieee1394_device_id_model_id 8 /* offsetof(struct ieee1394_device_id, model_id) # */
+#define OFF_ieee1394_device_id_specifier_id 12 /* offsetof(struct ieee1394_device_id, specifier_id) # */
+#define OFF_ieee1394_device_id_version 16 /* offsetof(struct ieee1394_device_id, version) # */
+#define SIZE_pci_device_id 28 /* sizeof(struct pci_device_id) # */
+#define OFF_pci_device_id_vendor 0 /* offsetof(struct pci_device_id, vendor) # */
+#define OFF_pci_device_id_device 4 /* offsetof(struct pci_device_id, device) # */
+#define OFF_pci_device_id_subvendor 8 /* offsetof(struct pci_device_id, subvendor) # */
+#define OFF_pci_device_id_subdevice 12 /* offsetof(struct pci_device_id, subdevice) # */
+#define OFF_pci_device_id_class 16 /* offsetof(struct pci_device_id, class) # */
+#define OFF_pci_device_id_class_mask 20 /* offsetof(struct pci_device_id, class_mask) # */
+#define SIZE_ccw_device_id 12 /* sizeof(struct ccw_device_id) # */
+#define OFF_ccw_device_id_match_flags 0 /* offsetof(struct ccw_device_id, match_flags) # */
+#define OFF_ccw_device_id_cu_type 2 /* offsetof(struct ccw_device_id, cu_type) # */
+#define OFF_ccw_device_id_cu_model 6 /* offsetof(struct ccw_device_id, cu_model) # */
+#define OFF_ccw_device_id_dev_type 4 /* offsetof(struct ccw_device_id, dev_type) # */
+#define OFF_ccw_device_id_dev_model 7 /* offsetof(struct ccw_device_id, dev_model) # */
+#define SIZE_ap_device_id 8 /* sizeof(struct ap_device_id) # */
+#define OFF_ap_device_id_dev_type 2 /* offsetof(struct ap_device_id, dev_type) # */
+#define SIZE_css_device_id 8 /* sizeof(struct css_device_id) # */
+#define OFF_css_device_id_type 1 /* offsetof(struct css_device_id, type) # */
+#define SIZE_serio_device_id 4 /* sizeof(struct serio_device_id) # */
+#define OFF_serio_device_id_type 0 /* offsetof(struct serio_device_id, type) # */
+#define OFF_serio_device_id_proto 3 /* offsetof(struct serio_device_id, proto) # */
+#define OFF_serio_device_id_id 2 /* offsetof(struct serio_device_id, id) # */
+#define OFF_serio_device_id_extra 1 /* offsetof(struct serio_device_id, extra) # */
+#define SIZE_acpi_device_id 16 /* sizeof(struct acpi_device_id) # */
+#define OFF_acpi_device_id_id 0 /* offsetof(struct acpi_device_id, id) # */
+#define SIZE_pnp_device_id 12 /* sizeof(struct pnp_device_id) # */
+#define OFF_pnp_device_id_id 0 /* offsetof(struct pnp_device_id, id) # */
+#define SIZE_pnp_card_device_id 76 /* sizeof(struct pnp_card_device_id) # */
+#define OFF_pnp_card_device_id_devs 12 /* offsetof(struct pnp_card_device_id, devs) # */
+#define SIZE_pcmcia_device_id 52 /* sizeof(struct pcmcia_device_id) # */
+#define OFF_pcmcia_device_id_match_flags 0 /* offsetof(struct pcmcia_device_id, match_flags) # */
+#define OFF_pcmcia_device_id_manf_id 2 /* offsetof(struct pcmcia_device_id, manf_id) # */
+#define OFF_pcmcia_device_id_card_id 4 /* offsetof(struct pcmcia_device_id, card_id) # */
+#define OFF_pcmcia_device_id_func_id 6 /* offsetof(struct pcmcia_device_id, func_id) # */
+#define OFF_pcmcia_device_id_function 7 /* offsetof(struct pcmcia_device_id, function) # */
+#define OFF_pcmcia_device_id_device_no 8 /* offsetof(struct pcmcia_device_id, device_no) # */
+#define OFF_pcmcia_device_id_prod_id_hash 12 /* offsetof(struct pcmcia_device_id, prod_id_hash) # */
+#define SIZE_of_device_id 196 /* sizeof(struct of_device_id) # */
+#define OFF_of_device_id_name 0 /* offsetof(struct of_device_id, name) # */
+#define OFF_of_device_id_type 32 /* offsetof(struct of_device_id, type) # */
+#define OFF_of_device_id_compatible 64 /* offsetof(struct of_device_id, compatible) # */
+#define SIZE_vio_device_id 64 /* sizeof(struct vio_device_id) # */
+#define OFF_vio_device_id_type 0 /* offsetof(struct vio_device_id, type) # */
+#define OFF_vio_device_id_compat 32 /* offsetof(struct vio_device_id, compat) # */
+#define SIZE_input_device_id 160 /* sizeof(struct input_device_id) # */
+#define OFF_input_device_id_flags 0 /* offsetof(struct input_device_id, flags) # */
+#define OFF_input_device_id_bustype 4 /* offsetof(struct input_device_id, bustype) # */
+#define OFF_input_device_id_vendor 6 /* offsetof(struct input_device_id, vendor) # */
+#define OFF_input_device_id_product 8 /* offsetof(struct input_device_id, product) # */
+#define OFF_input_device_id_version 10 /* offsetof(struct input_device_id, version) # */
+#define OFF_input_device_id_evbit 12 /* offsetof(struct input_device_id, evbit) # */
+#define OFF_input_device_id_keybit 16 /* offsetof(struct input_device_id, keybit) # */
+#define OFF_input_device_id_relbit 112 /* offsetof(struct input_device_id, relbit) # */
+#define OFF_input_device_id_absbit 116 /* offsetof(struct input_device_id, absbit) # */
+#define OFF_input_device_id_mscbit 124 /* offsetof(struct input_device_id, mscbit) # */
+#define OFF_input_device_id_ledbit 128 /* offsetof(struct input_device_id, ledbit) # */
+#define OFF_input_device_id_sndbit 132 /* offsetof(struct input_device_id, sndbit) # */
+#define OFF_input_device_id_ffbit 136 /* offsetof(struct input_device_id, ffbit) # */
+#define OFF_input_device_id_swbit 152 /* offsetof(struct input_device_id, swbit) # */
+#define SIZE_eisa_device_id 12 /* sizeof(struct eisa_device_id) # */
+#define OFF_eisa_device_id_sig 0 /* offsetof(struct eisa_device_id, sig) # */
+#define SIZE_parisc_device_id 8 /* sizeof(struct parisc_device_id) # */
+#define OFF_parisc_device_id_hw_type 0 /* offsetof(struct parisc_device_id, hw_type) # */
+#define OFF_parisc_device_id_hversion 2 /* offsetof(struct parisc_device_id, hversion) # */
+#define OFF_parisc_device_id_hversion_rev 1 /* offsetof(struct parisc_device_id, hversion_rev) # */
+#define OFF_parisc_device_id_sversion 4 /* offsetof(struct parisc_device_id, sversion) # */
+#define SIZE_sdio_device_id 12 /* sizeof(struct sdio_device_id) # */
+#define OFF_sdio_device_id_class 0 /* offsetof(struct sdio_device_id, class) # */
+#define OFF_sdio_device_id_vendor 2 /* offsetof(struct sdio_device_id, vendor) # */
+#define OFF_sdio_device_id_device 4 /* offsetof(struct sdio_device_id, device) # */
+#define SIZE_ssb_device_id 6 /* sizeof(struct ssb_device_id) # */
+#define OFF_ssb_device_id_vendor 0 /* offsetof(struct ssb_device_id, vendor) # */
+#define OFF_ssb_device_id_coreid 2 /* offsetof(struct ssb_device_id, coreid) # */
+#define OFF_ssb_device_id_revision 4 /* offsetof(struct ssb_device_id, revision) # */
+#define SIZE_bcma_device_id 6 /* sizeof(struct bcma_device_id) # */
+#define OFF_bcma_device_id_manuf 0 /* offsetof(struct bcma_device_id, manuf) # */
+#define OFF_bcma_device_id_id 2 /* offsetof(struct bcma_device_id, id) # */
+#define OFF_bcma_device_id_rev 4 /* offsetof(struct bcma_device_id, rev) # */
+#define OFF_bcma_device_id_class 5 /* offsetof(struct bcma_device_id, class) # */
+#define SIZE_virtio_device_id 8 /* sizeof(struct virtio_device_id) # */
+#define OFF_virtio_device_id_device 0 /* offsetof(struct virtio_device_id, device) # */
+#define OFF_virtio_device_id_vendor 4 /* offsetof(struct virtio_device_id, vendor) # */
+#define SIZE_hv_vmbus_device_id 20 /* sizeof(struct hv_vmbus_device_id) # */
+#define OFF_hv_vmbus_device_id_guid 0 /* offsetof(struct hv_vmbus_device_id, guid) # */
+#define SIZE_i2c_device_id 24 /* sizeof(struct i2c_device_id) # */
+#define OFF_i2c_device_id_name 0 /* offsetof(struct i2c_device_id, name) # */
+#define SIZE_spi_device_id 36 /* sizeof(struct spi_device_id) # */
+#define OFF_spi_device_id_name 0 /* offsetof(struct spi_device_id, name) # */
+#define SIZE_dmi_system_id 332 /* sizeof(struct dmi_system_id) # */
+#define OFF_dmi_system_id_matches 8 /* offsetof(struct dmi_system_id, matches) # */
+#define SIZE_platform_device_id 24 /* sizeof(struct platform_device_id) # */
+#define OFF_platform_device_id_name 0 /* offsetof(struct platform_device_id, name) # */
+#define SIZE_mdio_device_id 8 /* sizeof(struct mdio_device_id) # */
+#define OFF_mdio_device_id_phy_id 0 /* offsetof(struct mdio_device_id, phy_id) # */
+#define OFF_mdio_device_id_phy_id_mask 4 /* offsetof(struct mdio_device_id, phy_id_mask) # */
+#define SIZE_zorro_device_id 8 /* sizeof(struct zorro_device_id) # */
+#define OFF_zorro_device_id_id 0 /* offsetof(struct zorro_device_id, id) # */
+#define SIZE_isapnp_device_id 12 /* sizeof(struct isapnp_device_id) # */
+#define OFF_isapnp_device_id_vendor 4 /* offsetof(struct isapnp_device_id, vendor) # */
+#define OFF_isapnp_device_id_function 6 /* offsetof(struct isapnp_device_id, function) # */
+#define SIZE_ipack_device_id 12 /* sizeof(struct ipack_device_id) # */
+#define OFF_ipack_device_id_format 0 /* offsetof(struct ipack_device_id, format) # */
+#define OFF_ipack_device_id_vendor 4 /* offsetof(struct ipack_device_id, vendor) # */
+#define OFF_ipack_device_id_device 8 /* offsetof(struct ipack_device_id, device) # */
+#define SIZE_amba_id 12 /* sizeof(struct amba_id) # */
+#define OFF_amba_id_id 0 /* offsetof(struct amba_id, id) # */
+#define OFF_amba_id_mask 4 /* offsetof(struct amba_id, mask) # */
+#define SIZE_x86_cpu_id 12 /* sizeof(struct x86_cpu_id) # */
+#define OFF_x86_cpu_id_feature 6 /* offsetof(struct x86_cpu_id, feature) # */
+#define OFF_x86_cpu_id_family 2 /* offsetof(struct x86_cpu_id, family) # */
+#define OFF_x86_cpu_id_model 4 /* offsetof(struct x86_cpu_id, model) # */
+#define OFF_x86_cpu_id_vendor 0 /* offsetof(struct x86_cpu_id, vendor) # */
+#define SIZE_cpu_feature 2 /* sizeof(struct cpu_feature) # */
+#define OFF_cpu_feature_feature 0 /* offsetof(struct cpu_feature, feature) # */
+#define SIZE_mei_cl_device_id 36 /* sizeof(struct mei_cl_device_id) # */
+#define OFF_mei_cl_device_id_name 0 /* offsetof(struct mei_cl_device_id, name) # */
+#define SIZE_rio_device_id 8 /* sizeof(struct rio_device_id) # */
+#define OFF_rio_device_id_did 0 /* offsetof(struct rio_device_id, did) # */
+#define OFF_rio_device_id_vid 2 /* offsetof(struct rio_device_id, vid) # */
+#define OFF_rio_device_id_asm_did 4 /* offsetof(struct rio_device_id, asm_did) # */
+#define OFF_rio_device_id_asm_vid 6 /* offsetof(struct rio_device_id, asm_vid) # */
+
+#endif
Index: linux-3.18.21/scripts/mod/elfconfig.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-3.18.21/scripts/mod/elfconfig.h 2018-02-05 16:43:27.000000000 +0800
@@ -0,0 +1,4 @@
+#define KERNEL_ELFCLASS ELFCLASS32
+#define KERNEL_ELFDATA ELFDATA2MSB
+#define HOST_ELFCLASS ELFCLASS32
+#define HOST_ELFDATA ELFDATA2LSB