diff -urNp linux-2.6.25.orig/drivers/scsi/Kconfig linux-2.6.25/drivers/scsi/Kconfig
--- linux-2.6.25.orig/drivers/scsi/Kconfig	2008-04-17 04:49:44.000000000 +0200
+++ linux-2.6.25/drivers/scsi/Kconfig	2008-07-28 18:42:43.316188735 +0200
@@ -1013,6 +1013,12 @@ config SCSI_IZIP_SLOW_CTR
 
 	  Generally, saying N is fine.
 
+config SCSI_MV_THOR
+	tristate "Marvell Storage Controller 6121/6122/6141/6145"
+	depends on SCSI && BLK_DEV_SD
+	help
+	  Provides support for Marvell thor Storage Controller series.
+
 config SCSI_MVSAS
 	tristate "Marvell 88SE6440 SAS/SATA support"
 	depends on PCI && SCSI
diff -urNp linux-2.6.25.orig/drivers/scsi/Makefile linux-2.6.25/drivers/scsi/Makefile
--- linux-2.6.25.orig/drivers/scsi/Makefile	2008-04-17 04:49:44.000000000 +0200
+++ linux-2.6.25/drivers/scsi/Makefile	2008-07-28 18:42:43.316188735 +0200
@@ -124,6 +124,7 @@ obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas.o
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
+obj-$(CONFIG_SCSI_MV_THOR)	+= mv/
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_api.h linux-2.6.25/drivers/scsi/mv/common/com_api.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_api.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_api.h	2008-07-28 18:42:43.317189068 +0200
@@ -0,0 +1,83 @@
+#if !defined(COM_API_H)
+#define COM_API_H
+#define MAX_CDB_SIZE						16	//TBD
+
+/* CDB definitions */
+#define APICDB0_ADAPTER						0xF0
+#define APICDB0_LD							0xF1
+#define APICDB0_BLOCK						0xF2
+#define APICDB0_PD							0xF3
+#define APICDB0_EVENT						0xF4
+#define APICDB0_DBG							0xF5
+#define APICDB0_FLASH						0xF6
+
+/*for Adapter*/
+#define APICDB1_ADAPTER_GETCOUNT			0
+#define APICDB1_ADAPTER_GETINFO				(APICDB1_ADAPTER_GETCOUNT+1)
+#define APICDB1_ADAPTER_GETCONFIG			(APICDB1_ADAPTER_GETCOUNT+2)
+#define APICDB1_ADAPTER_SETCONFIG			(APICDB1_ADAPTER_GETCOUNT+3)
+#define APICDB1_ADAPTER_MAX					(APICDB1_ADAPTER_GETCOUNT+4)
+
+/*for LD*/
+#define APICDB1_LD_CREATE					0
+#define APICDB1_LD_GETMAXSIZE				(APICDB1_LD_CREATE+1)
+#define APICDB1_LD_GETINFO					(APICDB1_LD_CREATE+2)
+#define APICDB1_LD_GETTARGETLDINFO			(APICDB1_LD_CREATE+3)
+#define APICDB1_LD_DELETE					(APICDB1_LD_CREATE+4)
+#define APICDB1_LD_GETSTATUS				(APICDB1_LD_CREATE+5)
+#define APICDB1_LD_GETCONFIG				(APICDB1_LD_CREATE+6)
+#define APICDB1_LD_SETCONFIG				(APICDB1_LD_CREATE+7)
+#define APICDB1_LD_STARTREBUILD				(APICDB1_LD_CREATE+8)
+#define APICDB1_LD_STARTCONSISTENCYCHECK	(APICDB1_LD_CREATE+9)
+#define APICDB1_LD_STARTINIT				(APICDB1_LD_CREATE+10)
+#define APICDB1_LD_STARTMIGRATION			(APICDB1_LD_CREATE+11)
+#define APICDB1_LD_BGACONTROL				(APICDB1_LD_CREATE+12)
+#define APICDB1_LD_WIPEMDD					(APICDB1_LD_CREATE+13)
+#define APICDB1_LD_GETSPARESTATUS           (APICDB1_LD_CREATE+14)
+#define APICDB1_LD_SETGLOBALSPARE			(APICDB1_LD_CREATE+15)
+#define APICDB1_LD_SETLDSPARE				(APICDB1_LD_CREATE+16)
+#define APICDB1_LD_REMOVESPARE              (APICDB1_LD_CREATE+17)
+#define APICDB1_LD_HD_SETSTATUS				(APICDB1_LD_CREATE+18)
+#define APICDB1_LD_SHUTDOWN					(APICDB1_LD_CREATE+19)
+#define APICDB1_LD_HD_FREE_SPACE_INFO		(APICDB1_LD_CREATE+20)
+#define APICDB1_LD_HD_GETMBRINFO			(APICDB1_LD_CREATE+21)
+#define APICDB1_LD_MAX						(APICDB1_LD_CREATE+22)
+
+/*for PD*/
+#define APICDB1_PD_GETHD_INFO				0
+#define APICDB1_PD_GETEXPANDER_INFO			(APICDB1_PD_GETHD_INFO+1)
+#define APICDB1_PD_GETPM_INFO				(APICDB1_PD_GETHD_INFO+2)
+#define APICDB1_PD_GETSETTING				(APICDB1_PD_GETHD_INFO+3)
+#define APICDB1_PD_SETSETTING				(APICDB1_PD_GETHD_INFO+4)
+#define APICDB1_PD_BSL_DUMP					(APICDB1_PD_GETHD_INFO+5)
+#define APICDB1_PD_HD_MPCHECK				(APICDB1_PD_GETHD_INFO+6)
+#define APICDB1_PD_HD_GETMPSTATUS			(APICDB1_PD_GETHD_INFO+7)
+#define APICDB1_PD_GETSTATUS				(APICDB1_PD_GETHD_INFO+8)
+#define APICDB1_PD_MAX						(APICDB1_PD_GETHD_INFO+9)
+
+// Sub command for APICDB1_PD_SETSETTING
+#define APICDB4_PD_SET_WRITE_CACHE_OFF		0
+#define APICDB4_PD_SET_WRITE_CACHE_ON		1
+#define APICDB4_PD_SET_SMART_OFF			2
+#define APICDB4_PD_SET_SMART_ON				3
+#define APICDB4_PD_SMART_RETURN_STATUS		4
+
+/*for Block*/
+#define APICDB1_BLOCK_GETINFO				0
+#define APICDB1_BLOCK_HD_BLOCKIDS			(APICDB1_BLOCK_GETINFO+1)
+#define APICDB1_BLOCK_MAX					(APICDB1_BLOCK_GETINFO+2)
+
+/*for event*/
+#define APICDB1_EVENT_GETEVENT				0
+#define APICDB1_EVENT_MAX					(APICDB1_EVENT_GETEVENT + 1)
+
+/*for DBG*/
+#define APICDB1_DBG_PDWR					0
+#define APICDB1_DBG_MAP						(APICDB1_DBG_PDWR+1)
+#define APICDB1_DBG_MAX						(APICDB1_DBG_PDWR+2)
+
+/*for FLASH*/
+#define APICDB1_FLASH_BIN					0
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_dbg.h linux-2.6.25/drivers/scsi/mv/common/com_dbg.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_dbg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_dbg.h	2008-07-28 18:42:43.317189068 +0200
@@ -0,0 +1,132 @@
+#if !defined(COMMON_DEBUG_H)
+#define COMMON_DEBUG_H
+
+/* 
+ *	Marvell Debug Interface
+ * 
+ *	MACRO
+ *		MV_DEBUG is defined in debug version not in release version.
+ *	
+ *	Debug funtions:
+ *		MV_PRINT:	print string in release and debug build.
+ *		MV_DPRINT:	print string in debug build.
+ *		MV_TRACE:	print string including file name, line number in release and debug build.
+ *		MV_DTRACE:	print string including file name, line number in debug build.
+ *		MV_ASSERT:	assert in release and debug build.
+ *		MV_DASSERT: assert in debug build.
+ */
+
+/*
+ *
+ * Debug funtions
+ *
+ */
+/* For both debug and release version */
+#if defined(_OS_WINDOWS)
+	ULONG
+	_cdecl
+	DbgPrint(
+			PCHAR Format,
+			...
+			);
+	#define MV_PRINT				DbgPrint
+#if ( defined(_CPU_IA_64B) || defined(_CPU_AMD_64B) )	
+	
+	#if (_MSC_VER >= 800) || defined(_STDCALL_SUPPORTED)
+		#define NTAPI __stdcall
+	#else
+		#define NTAPI
+	#endif
+	
+	void NTAPI DbgBreakPoint(void);
+	#define MV_ASSERT(_condition_) 	do { if (!(_condition_)) DbgBreakPoint(); } while(0)
+
+#else
+	#define MV_ASSERT(_condition_)	do { if (!(_condition_)) {__asm int 3}; } while(0)
+#endif
+	#define MV_TRACE(_x_)	do {	\
+				MV_PRINT("%s(%d) ", __FILE__, __LINE__);	\
+				MV_PRINT _x_;								\
+			} while( 0 )
+
+#elif defined(_OS_LINUX)
+#define MV_PRINT  printk
+
+#define MV_ASSERT(x)    do { \
+				if ( !(x) ) \
+					MV_PRINT("Assert at File %s: Line %d.\n", __FILE__, __LINE__); \
+	                } while (0)
+
+#define MV_TRACE(_x_)   do {\
+				MV_PRINT("%s(%d) ", __FILE__, __LINE__); \
+				MV_PRINT _x_;\
+			} while(0)
+
+#else /* OTHER OSes */
+	#define MV_PRINT(_x_)
+	#define MV_ASSERT(_condition_)
+	#define MV_TRACE(_x_)
+#endif /* _OS_WINDOWS */
+
+
+/* 
+ * Used with MV_DBG macro, see below .
+ * Should be useful for Win driver too, so it is placed here.
+ *
+ */
+
+#define DMSG_CORE    0x0001  /* CORE dbg msg */
+#define DMSG_KERN    0x0002  /* kernel driver dbg msg */
+#define DMSG_SCSI    0x0004  /* SCSI Subsystem dbg msg */
+#define DMSG_ACDB    0x0008  /* A.C.'s debug msg */
+#define DMSG_HBA     0x0010  /* HBA dbg msg */
+#define DMSG_ERRH    0x0020  /* Error Handling dbg msg */
+#define DMSG_FREQ    0x0040  /* indicates it's _VERR_ frequent dbg msg, will jam your console and severely impact your performance */
+#define DMSG_IOCTL   0x0080  /* err in ioctl */
+#define DMSG_MSG     0x0100  /* plain message, should be enabled all time */
+#define DMSG_SCSI_FREQ 0x0200/* freq scsi dbg msg */
+#define DMSG_RAID    0x0400  /* raid dbg msg */
+#define DMSG_PROF    0x0800  /* profiling msg */
+#define DMSG_PROF_FREQ 0x1000 /* freq profiling msg */
+#define DMSG_TRACE   0x2000  /* trace msg */
+
+/* For debug version only */
+#if defined(MV_DEBUG)
+        #ifdef _OS_LINUX
+                extern unsigned int mv_dbg_opts;
+                #define MV_DBG(x,...) do {\
+			                      if (x&mv_dbg_opts) \
+				                      MV_PRINT(__VA_ARGS__); \
+    		                      } while (0)
+                /* MV_DPRINT to be treated as CORE related debug msg  */
+                #define MV_DPRINT(x)  do {\
+			                      if (DMSG_CORE&mv_dbg_opts) \
+				                      MV_PRINT x; \
+    		                      } while (0)
+        #else
+                #define MV_DPRINT(x)	MV_PRINT x
+                /* in case drivers for non-linux os go crazy */
+                /* MS compiler doesn't support variadic parameter ...
+                 * that's pre VS2005
+                 */
+                /* #define MV_DBG(x, ...)       do{}while(0) */
+        #endif /* _OS_LINUX */
+
+	#define MV_DASSERT	MV_ASSERT
+	#define MV_DTRACE	MV_DTRACE
+#else
+        #ifdef _OS_LINUX
+                #define MV_DBG(x,...)       do{}while(0)
+        #endif
+	#define MV_DPRINT(x)
+	#define MV_DASSERT(x)
+	#define MV_DTRACE(x)
+#endif
+
+MV_BOOLEAN mvLogRegisterModule(MV_U8 moduleId, MV_U32 filterMask, char* name);
+MV_BOOLEAN mvLogSetModuleFilter(MV_U8 moduleId, MV_U32 filterMask);
+MV_U32 mvLogGetModuleFilter(MV_U8 moduleId);
+void mvLogMsg(MV_U8 moduleId, MV_U32 type, char* format, ...);
+
+#endif /* COMMON_DEBUG_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_define.h linux-2.6.25/drivers/scsi/mv/common/com_define.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_define.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_define.h	2008-07-28 18:42:43.317189068 +0200
@@ -0,0 +1,200 @@
+#ifndef COM_DEFINE_H
+#define COM_DEFINE_H
+
+
+/*
+ *	This file defines Marvell OS independent primary data type for all OS.
+ *
+ *	We have macros to differentiate different CPU and OS.
+ *
+ *	CPU definitions:
+ *	_CPU_X86_16B	
+ *	Specify 16bit x86 platform, this is used for BIOS and DOS utility.
+ *	_CPU_X86_32B
+ *	Specify 32bit x86 platform, this is used for most OS drivers.
+ *	_CPU_IA_64B
+ *	Specify 64bit IA64 platform, this is used for IA64 OS drivers.
+ *	_CPU_AMD_64B
+ *	Specify 64bit AMD64 platform, this is used for AMD64 OS drivers.
+ *
+ *	OS definitions:
+ *	_OS_WINDOWS
+ *	_OS_LINUX
+ *	_OS_FREEBSD
+ *	_OS_BIOS
+ */
+ 
+#if !defined(IN)
+	#define IN
+#endif
+
+#if !defined(OUT)
+	#define OUT
+#endif
+
+#if defined(_OS_LINUX)
+	#define BUFFER_PACKED		__attribute__((packed))
+#elif defined(_OS_WINDOWS)
+	#define BUFFER_PACKED
+#elif defined(_OS_BIOS)
+	#define BUFFER_PACKED
+#endif
+
+#define MV_BIT(x)			(1L << (x))
+
+#if !defined(NULL)
+#define NULL 0
+#endif 
+
+#define MV_TRUE							1
+#define MV_FALSE						0
+
+typedef unsigned char	MV_BOOLEAN, *MV_PBOOLEAN;
+typedef unsigned char	MV_U8, *MV_PU8;
+typedef signed char	MV_I8, *MV_PI8;
+
+typedef unsigned short	MV_U16, *MV_PU16;
+typedef signed short	MV_I16, *MV_PI16;
+
+typedef void		MV_VOID, *MV_PVOID;
+
+#ifdef _OS_BIOS
+typedef MV_U8 GEN_FAR* MV_LPU8;
+typedef MV_I8 GEN_FAR* MV_LPI8;
+typedef MV_U16 GEN_FAR* MV_LPU16;
+typedef MV_I16 GEN_FAR* MV_LPI16;
+
+typedef MV_U32 GEN_FAR* MV_LPU32;
+typedef MV_I32 GEN_FAR* MV_LPI32;
+typedef void GEN_FAR* MV_LPVOID;
+#else
+typedef void		*MV_LPVOID;
+#endif
+
+/* Pre-define segment in C code*/
+#if defined(_OS_BIOS)
+#define BASEATTR __based(__segname("_CODE")) 
+#define BASEATTRData __based(__segname("_CODE")) 
+#else
+#define BASEATTR 
+#endif
+
+/* For debug version only */
+#ifdef DEBUG_BIOS
+	#define MV_DUMP32(_x_) mvDebugDumpU32(_x_)
+	#define MV_DUMP16(_x_)  mvDebugDumpU16(_x_)
+	#define MV_DUMP8(_x_)  mvDebugDumpU8(_x_)
+	#define MV_DUMPC32(_x_)  mvDebugDumpU32(_x_)
+	#define MV_DUMPC16(_x_)  mvDebugDumpU16(_x_)
+	#define MV_DUMPC8(_x_)  mvDebugDumpU8(_x_)
+	#define MV_DUMPE32(_x_) //mvDebugDumpU32(_x_)
+	#define MV_DUMPE16(_x_)  mvDebugDumpU16(_x_)
+	#define MV_DUMPE8(_x_)  mvDebugDumpU8(_x_)
+	#define MV_DUMPRUN(_x_)  mvDebugDumpU16(_x_)
+	#define MV_HALTKEY		waitForKeystroke()
+	#define MV_ENTERLINE	mvChangLine()
+	//#define MV_DUMPRUN(_x_)  //mvDebugDumpRun(_x_)
+
+#else
+	#define MV_DUMPC32(_x_)
+	#define MV_DUMPC16(_x_)
+	#define MV_DUMPC8(_x_)	
+	#define MV_DUMPE32(_x_) 
+	#define MV_DUMPE16(_x_) 
+	#define MV_DUMPE8(_x_) 
+	#define MV_DUMP32(_x_) 
+	#define MV_DUMP16(_x_)
+	#define MV_DUMP8(_x_)
+	#define MV_DUMPRUN(_x_) 
+	#define MV_HALTKEY
+	#define MV_ENTERLINE
+#endif
+
+#if defined(_OS_LINUX)
+	/*#include <linux/types.h>*/
+	/** unsigned/signed long is 64bit for AMD64, so use unsigned int instead */
+typedef unsigned int MV_U32, *MV_PU32;
+typedef   signed int MV_I32, *MV_PI32;
+typedef unsigned long MV_ULONG, *MV_PULONG;
+typedef   signed long MV_ILONG, *MV_PILONG;
+
+#else
+	/** unsigned/signed long is 32bit for x86, IA64 and AMD64 */
+	typedef unsigned long MV_U32, *MV_PU32;
+	typedef   signed long MV_I32, *MV_PI32;
+#endif
+
+#if defined(_OS_WINDOWS)
+
+	typedef unsigned __int64 _MV_U64;
+	typedef   signed __int64 _MV_I64;
+#elif defined(_OS_LINUX)
+	typedef unsigned long long _MV_U64;
+	typedef   signed long long _MV_I64;
+#elif defined(_OS_FREEBSD)
+
+#else
+
+#endif
+
+#ifdef _OS_LINUX
+
+	#ifdef _64_SYS_
+		#define _SUPPORT_64_BIT
+	#else
+		#ifdef _SUPPORT_64_BIT
+			#error Error 64_BIT CPU Macro
+		#endif
+	#endif
+
+#elif defined(_OS_BIOS)
+	#undef	_SUPPORT_64_BIT
+#else
+	#define _SUPPORT_64_BIT
+#endif
+
+/*
+ * Primary Data Type
+ */
+#if defined(_OS_LINUX) || defined(_OS_WINDOWS)
+	/* Windows and Linux compiler supports 64 bit data structure. */
+	typedef union {
+		struct {
+			MV_U32 low;
+			MV_U32 high;
+		};
+		_MV_U64 value;
+	} MV_U64, *PMV_U64;
+#else
+	/* BIOS compiler doesn't support 64 bit data structure. */
+	typedef union {
+		struct {
+			MV_U32 low;
+			MV_U32 high;
+		};
+
+		struct {
+			MV_U32 value;
+			MV_U32 value1;
+		};
+	} _MV_U64,MV_U64, *MV_PU64, *PMV_U64;
+#endif
+
+/* PTR_INTEGER is necessary to convert between pointer and integer. */
+#if defined(_SUPPORT_64_BIT)
+	typedef _MV_U64 MV_PTR_INTEGER;
+#else
+	typedef MV_U32 MV_PTR_INTEGER;
+#endif
+
+/* LBA is the logical block access */
+typedef MV_U64 MV_LBA;
+
+#if defined(_CPU_16B)
+	typedef MV_U32 MV_PHYSICAL_ADDR;
+#else
+	typedef MV_U64 MV_PHYSICAL_ADDR;
+#endif
+
+#endif /* COM_DEFINE_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_error.h linux-2.6.25/drivers/scsi/mv/common/com_error.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_error.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_error.h	2008-07-28 18:42:43.317189068 +0200
@@ -0,0 +1,55 @@
+#ifndef __COM_ERROR_H__
+#define __COM_ERROR_H__
+#define ERR_GENERIC							2
+#define ERR_RAID							50
+#define ERR_CORE							100
+#define ERR_API								150
+
+#define ERR_NONE							0
+#define ERR_FAIL							1
+// Generic error
+#define	ERR_UNKNOWN					(ERR_GENERIC + 1)
+#define ERR_NO_RESOURCE				(ERR_GENERIC + 2)
+#define ERR_REQ_OUT_OF_RANGE		(ERR_GENERIC + 3)
+#define ERR_INVALID_REQUEST			(ERR_GENERIC + 4)
+#define ERR_INVALID_PARAMETER		(ERR_GENERIC + 5)
+#define ERR_INVALID_LD_ID			(ERR_GENERIC + 6)
+#define ERR_INVALID_HD_ID			(ERR_GENERIC + 7)
+#define ERR_INVALID_EXP_ID			(ERR_GENERIC + 8)
+#define ERR_INVALID_PM_ID			(ERR_GENERIC + 9)
+#define ERR_INVALID_BLOCK_ID		(ERR_GENERIC + 10)
+#define ERR_INVALID_ADAPTER_ID		(ERR_GENERIC + 11)
+#define ERR_INVALID_RAID_MODE		(ERR_GENERIC + 12)
+
+// RAID errors
+#define ERR_TARGET_IN_LD_FUNCTIONAL	(ERR_RAID + 1)
+#define ERR_TARGET_NO_ENOUGH_SPACE	(ERR_RAID + 2)
+#define ERR_HD_IS_NOT_SPARE			(ERR_RAID + 3)
+#define ERR_HD_IS_SPARE				(ERR_RAID + 4)
+#define ERR_HD_NOT_EXIST			(ERR_RAID + 5)
+#define ERR_HD_IS_ASSIGNED_ALREADY	(ERR_RAID + 6)
+#define ERR_INVALID_HD_COUNT		(ERR_RAID + 7)
+#define ERR_LD_NOT_READY			(ERR_RAID + 8)
+#define ERR_LD_NOT_EXIST			(ERR_RAID + 9)
+#define ERR_LD_IS_FUNCTIONAL		(ERR_RAID + 10)
+#define ERR_HAS_BGA_ACTIVITY		(ERR_RAID + 11)
+#define ERR_NO_BGA_ACTIVITY			(ERR_RAID + 12)
+#define ERR_BGA_RUNNING				(ERR_RAID + 13)
+#define ERR_RAID_NO_AVAILABLE_ID	(ERR_RAID + 14)
+#define ERR_LD_NO_ATAPI				(ERR_RAID + 15)
+#define ERR_INVALID_RAID6_PARITY_DISK_COUNT	(ERR_RAID + 16)
+#define ERR_INVALID_BLOCK_SIZE				(ERR_RAID + 17)
+#define ERR_MIGRATION_NOT_NEED				(ERR_RAID + 18)
+#define ERR_STRIPE_BLOCK_SIZE_MISMATCH		(ERR_RAID + 19)
+#define ERR_MIGRATION_NOT_SUPPORT			(ERR_RAID + 20)
+
+// API errors
+#define ERR_INVALID_MATCH_ID		(ERR_API + 1)	
+#define ERR_INVALID_HDCOUNT			(ERR_API + 2)
+#define ERR_INVALID_BGA_ACTION		(ERR_API + 3)
+#define ERR_HD_IN_DIFF_CARD			(ERR_API + 4)
+#define ERR_INVALID_FLASH_TYPE		(ERR_API + 5)
+#define ERR_INVALID_FLASH_ACTION	(ERR_API + 6)
+#define	ERR_TOO_FEW_EVENT			(ERR_API + 7)
+
+#endif /*  __COM_ERROR_H__ */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_event_define.h linux-2.6.25/drivers/scsi/mv/common/com_event_define.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_event_define.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_event_define.h	2008-07-28 18:42:43.318189000 +0200
@@ -0,0 +1,252 @@
+#ifndef COM_EVENT_DEFINE_H
+#define COM_EVENT_DEFINE_H
+//=======================================
+//			Perceived Severity
+//=======================================
+
+#define SEVERITY_UNKNOWN	0
+#define SEVERITY_OTHER		1
+#define SEVERITY_INFO		2	
+#define SEVERITY_WARNING	3	// used when its appropriate to let the user decide if action is needed
+#define SEVERITY_MINOR		4	// indicate action is needed, but the situation is not serious at this time
+#define SEVERITY_MAJOR		5	// indicate action is needed NOW
+#define SEVERITY_CRITICAL	6	// indicate action is needed NOW and the scope is broad
+#define SEVERITY_FATAL		7	// indicate an error occurred, but it's too late to take remedial action
+
+//=======================================
+//				Event Classes
+//=======================================
+
+#define	EVT_CLASS_ADAPTER			0
+#define	EVT_CLASS_LD				1		// Logical Drive
+#define	EVT_CLASS_HD				2		// Hard Drive
+#define	EVT_CLASS_PM				3		// Port Multplier
+#define	EVT_CLASS_EXPANDER			4
+#define	EVT_CLASS_MDD				5 
+#define	EVT_CLASS_BSL				6		// Bad Sector Lock
+
+//=============================================================
+//					Event Codes 
+//
+//	!!!  When adding an EVT_CODE, Please put its severity level
+//  !!!  and suggested mesage string as comments.  This is the 
+//  !!!  only place to document how 'Params' in 'DriverEvent' 
+//  !!!  structure is to be used.
+//=============================================================
+
+//
+// Event code for EVT_CLASS_LD (Logical Drive)
+//
+
+#define	EVT_CODE_LD_OFFLINE					0	// ("LD %d is offline", DeviceID)
+#define	EVT_CODE_LD_ONLINE					1	// ("LD %d is online", DeviceID) 
+#define	EVT_CODE_LD_CREATE					2	// ("LD %d is created", DeviceID)
+#define	EVT_CODE_LD_DELETE					3	// ("LD %d has been deleted", DeviceID)
+#define	EVT_CODE_LD_DEGRADE					4	// ("LD %d is degrading", DeviceID)
+#define	EVT_CODE_LD_PARTIALLYOPTIMAL		5	// ("LD %d is in loose condition", DeviceID)
+#define	EVT_CODE_LD_CACHE_MODE_CHANGE		6	// 
+#define	EVT_CODE_LD_FIXED					7	// 
+#define	EVT_CODE_LD_FOUND_ERROR				8	// 
+// Note: Don't change the following 8 event code order! See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_CHECK_START				9	// 
+#define	EVT_CODE_LD_CHECK_RESTART			10	// 
+#define	EVT_CODE_LD_CHECK_PAUSE				11	// 
+#define	EVT_CODE_LD_CHECK_RESUME			12	// 
+#define	EVT_CODE_LD_CHECK_ABORT				13	// 
+#define	EVT_CODE_LD_CHECK_COMPLETE			14	// 
+#define	EVT_CODE_LD_CHECK_PROGRESS			15	//  
+#define	EVT_CODE_LD_CHECK_ERROR				16	// 
+// Note: Don't change the following 8 event code order! See raid_get_bga_event_id() for detail. 
+#define	EVT_CODE_LD_FIX_START				17	// 
+#define	EVT_CODE_LD_FIX_RESTART				18	// 
+#define	EVT_CODE_LD_FIX_PAUSE				19	// 
+#define	EVT_CODE_LD_FIX_RESUME				20	// 
+#define	EVT_CODE_LD_FIX_ABORT				21	// 
+#define	EVT_CODE_LD_FIX_COMPLETE			22	// 
+#define	EVT_CODE_LD_FIX_PROGRESS			23	// 
+#define	EVT_CODE_LD_FIX_ERROR				24	//
+// Note: Don't change the following 8 event code order!  See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_INIT_QUICK_START		25	// ("Quick initialization of LD started", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_RESTART		26	// ("Quick initialization of LD restarted", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_PAUSE		27	// ("Quick initialization of LD paused", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_RESUME		28	// ("Quick initialization of LD resumed", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_ABORT		29	// ("Quick initialization of LD aborted", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_COMPLETE		30	// ("Quick initialization of LD completed", DevicdID)
+#define	EVT_CODE_LD_INIT_QUICK_PROGRESS		31	// ("Quick initialization of LD %d is %d%% done", DevicdID, Params[0])
+#define	EVT_CODE_LD_INIT_QUICK_ERROR		32	// ("Quick initialization of LD failed", DevicdID)
+// Note: Don't change the following 8 event code order!  See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_INIT_BACK_START			33	// ("Background initialization of LD started", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_RESTART		34	// ("Background initialization of LD restarted", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_PAUSE			35	// ("Background initialization of LD paused", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_RESUME		36	// ("Background initialization of LD resumed", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_ABORT			37	// ("Background initialization of LD aborted", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_COMPLETE		38	// ("Background initialization of LD completed", DevicdID)
+#define	EVT_CODE_LD_INIT_BACK_PROGRESS		39	// ("Background initialization of LD %d is %d%% done", DevicdID, Params[0])
+#define	EVT_CODE_LD_INIT_BACK_ERROR			40	// ("Background initialization of LD failed", DevicdID)
+// Note: Don't change the following 8 event code order!  See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_INIT_FORE_START			41	// ("Foreground initialization of LD started", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_RESTART		42	// ("Foreground initialization of LD restarted", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_PAUSE			43	// ("Foreground initialization of LD paused", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_RESUME		44	// ("Foreground initialization of LD resumed", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_ABORT			45	// ("Foreground initialization of LD aborted", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_COMPLETE		46	// ("Foreground initialization of LD completed", DevicdID)
+#define	EVT_CODE_LD_INIT_FORE_PROGRESS		47	// ("Foreground initialization of LD %d is %d%% done", DevicdID, Params[0])
+#define	EVT_CODE_LD_INIT_FORE_ERROR			48	// ("Fackground initialization of LD failed", DevicdID)
+// Note: Don't change the following 8 event code order!  See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_REBUILD_START			49	// 
+#define	EVT_CODE_LD_REBUILD_RESTART			50	// 
+#define	EVT_CODE_LD_REBUILD_PAUSE			51	// 
+#define	EVT_CODE_LD_REBUILD_RESUME			52	// 
+#define	EVT_CODE_LD_REBUILD_ABORT			53	// 
+#define	EVT_CODE_LD_REBUILD_COMPLETE		54	// 
+#define	EVT_CODE_LD_REBUILD_PROGRESS		55	// ("Rebuilding of LD %d is %d%% done", DevicdID, Params[0])
+#define	EVT_CODE_LD_REBUILD_ERROR			56	// 
+// Note: Don't change the following 8 event code order!  See raid_get_bga_event_id() for detail.
+#define	EVT_CODE_LD_MIGRATION_START			57	// 
+#define	EVT_CODE_LD_MIGRATION_RESTART		58	// 
+#define	EVT_CODE_LD_MIGRATION_PAUSE			59	// 
+#define	EVT_CODE_LD_MIGRATION_RESUME		60	// 
+#define	EVT_CODE_LD_MIGRATION_ABORT			61	// 
+#define	EVT_CODE_LD_MIGRATION_COMPLETE		62	// 
+#define	EVT_CODE_LD_MIGRATION_PROGRESS		63	// ("Migration of LD %d is %d%% done", DevicdID, Params[0])
+#define	EVT_CODE_LD_MIGRATION_ERROR			64	// 
+//only used in application
+#define	EVT_CODE_EVT_ERR					0xffff// 
+
+//
+// Event code for EVT_CLASS_HD (Hard Disk)
+//
+
+#define	EVT_CODE_HD_OFFLINE					0   // ("Disk %d is unplugged", DeviceID)
+#define	EVT_CODE_HD_ONLINE					1   // ("Disk %d is plugged in", DeviceID)
+#define	EVT_CODE_HD_SETDOWN					2   //disk setdown
+#define	EVT_CODE_HD_TIMEOUT					3
+#define	EVT_CODE_HD_RW_ERROR				4
+#define	EVT_CODE_HD_SMART					5
+#define	EVT_CODE_HD_ERROR_FIXED				6
+#define	EVT_CODE_HD_PLUG_IN					7
+#define	EVT_CODE_HD_PLUG_OUT				8
+#define	EVT_CODE_HD_ASSIGN_SPARE			9
+#define	EVT_CODE_HD_REMOVE_SPARE			10
+#define	EVT_CODE_HD_SMART_THRESHOLD_OVER	11
+
+//
+// Event code for EVT_CLASS_MDD
+//
+
+#define	EVT_CODE_MDD_ERROR					0
+
+
+//=======================================
+//				Event IDs
+//=======================================
+
+//
+// Event Id for EVT_CLASS_LD
+//
+
+#define	EVT_ID_LD_OFFLINE					( EVT_CLASS_LD << 16 | EVT_CODE_LD_OFFLINE )
+#define	EVT_ID_LD_ONLINE					( EVT_CLASS_LD << 16 | EVT_CODE_LD_ONLINE ) 
+#define	EVT_ID_LD_CREATE					( EVT_CLASS_LD << 16 | EVT_CODE_LD_CREATE )
+#define	EVT_ID_LD_DELETE					( EVT_CLASS_LD << 16 | EVT_CODE_LD_DELETE )
+#define	EVT_ID_LD_DEGRADE					( EVT_CLASS_LD << 16 | EVT_CODE_LD_DEGRADE )
+#define	EVT_ID_LD_PARTIALLYOPTIMAL 			( EVT_CLASS_LD << 16 | EVT_CODE_LD_PARTIALLYOPTIMAL )
+#define	EVT_ID_LD_CACHE_MODE_CHANGE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_CACHE_MODE_CHANGE )
+#define	EVT_ID_LD_FIXED						( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIXED )
+#define	EVT_ID_LD_FOUND_ERROR				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FOUND_ERROR )
+
+#define	EVT_ID_LD_CHECK_START				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_START )
+#define	EVT_ID_LD_CHECK_RESTART				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_RESTART )
+#define	EVT_ID_LD_CHECK_PAUSE				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_PAUSE )
+#define	EVT_ID_LD_CHECK_RESUME				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_RESUME )
+#define	EVT_ID_LD_CHECK_ABORT				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_ABORT )
+#define	EVT_ID_LD_CHECK_COMPLETE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_COMPLETE )
+#define	EVT_ID_LD_CHECK_PROGRESS			( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_PROGRESS )
+#define	EVT_ID_LD_CHECK_ERROR				( EVT_CLASS_LD << 16 | EVT_CODE_LD_CHECK_ERROR )
+
+#define	EVT_ID_LD_FIXED_START				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_START )
+#define	EVT_ID_LD_FIXED_RESTART				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_RESTART )
+#define	EVT_ID_LD_FIXED_PAUSE				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_PAUSE )
+#define	EVT_ID_LD_FIXED_RESUME				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_RESUME )
+#define	EVT_ID_LD_FIXED_ABORT				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_ABORT )
+#define	EVT_ID_LD_FIXED_COMPLETE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_COMPLETE )
+#define	EVT_ID_LD_FIXED_PROGRESS			( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_PROGRESS )
+#define	EVT_ID_LD_FIXED_ERROR				( EVT_CLASS_LD << 16 | EVT_CODE_LD_FIX_ERROR )
+
+#define	EVT_ID_LD_INIT_QUICK_START			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_START )
+#define	EVT_ID_LD_INIT_QUICK_RESTART		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_RESTART )
+#define	EVT_ID_LD_INIT_QUICK_PAUSE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_PAUSE )
+#define	EVT_ID_LD_INIT_QUICK_RESUME			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_RESUME )
+#define	EVT_ID_LD_INIT_QUICK_ABORT			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_ABORT )
+#define	EVT_ID_LD_INIT_QUICK_COMPLETE		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_COMPLETE )
+#define	EVT_ID_LD_INIT_QUICK_PROGRESS		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_PROGRESS )
+#define	EVT_ID_LD_INIT_QUICK_ERROR			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_QUICK_ERROR )
+
+#define	EVT_ID_LD_INIT_BACK_START			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_START )
+#define	EVT_ID_LD_INIT_BACK_RESTART			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_RESTART )
+#define	EVT_ID_LD_INIT_BACK_PAUSE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_PAUSE )
+#define	EVT_ID_LD_INIT_BACK_RESUME			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_RESUME )
+#define	EVT_ID_LD_INIT_BACK_ABORT			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_ABORT )
+#define	EVT_ID_LD_INIT_BACK_COMPLETE		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_COMPLETE )
+#define	EVT_ID_LD_INIT_BACK_PROGRESS		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_PROGRESS )
+#define	EVT_ID_LD_INIT_BACK_ERROR			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_BACK_ERROR )
+
+#define	EVT_ID_LD_INIT_FORE_START			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_START )
+#define	EVT_ID_LD_INIT_FORE_RESTART			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_RESTART )
+#define	EVT_ID_LD_INIT_FORE_PAUSE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_PAUSE )
+#define	EVT_ID_LD_INIT_FORE_RESUME			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_RESUME )
+#define	EVT_ID_LD_INIT_FORE_ABORT			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_ABORT )
+#define	EVT_ID_LD_INIT_FORE_COMPLETE		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_COMPLETE )
+#define	EVT_ID_LD_INIT_FORE_PROGRESS		( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_PROGRESS )
+#define	EVT_ID_LD_INIT_FORE_ERROR			( EVT_CLASS_LD << 16 | EVT_CODE_LD_INIT_FORE_ERROR )
+
+#define	EVT_ID_LD_REBUILD_START				( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_START )
+#define	EVT_ID_LD_REBUILD_RESTART			( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_RESTART )
+#define	EVT_ID_LD_REBUILD_PAUSE				( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_PAUSE )
+#define	EVT_ID_LD_REBUILD_RESUME			( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_RESUME )
+#define	EVT_ID_LD_REBUILD_ABORT				( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_ABORT )
+#define	EVT_ID_LD_REBUILD_COMPLETE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_COMPLETE )
+#define	EVT_ID_LD_REBUILD_PROGRESS			( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_PROGRESS )
+#define	EVT_ID_LD_REBUILD_ERROR				( EVT_CLASS_LD << 16 | EVT_CODE_LD_REBUILD_ERROR )
+
+#define	EVT_ID_LD_MIGRATION_START			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_START )
+#define	EVT_ID_LD_MIGRATION_RESTART			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_RESTART )
+#define	EVT_ID_LD_MIGRATION_PAUSE			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_PAUSE )
+#define	EVT_ID_LD_MIGRATION_RESUME			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_RESUME )
+#define	EVT_ID_LD_MIGRATION_ABORT			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_ABORT )
+#define	EVT_ID_LD_MIGRATION_COMPLETE		( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_COMPLETE )
+#define	EVT_ID_LD_MIGRATION_PROGRESS		( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_PROGRESS )
+#define	EVT_ID_LD_MIGRATION_ERROR			( EVT_CLASS_LD << 16 | EVT_CODE_LD_MIGRATION_ERROR )
+
+//
+// Event Id for EVT_CLASS_HD
+//
+
+#define	EVT_ID_HD_OFFLINE					( EVT_CLASS_HD << 16 | EVT_CODE_HD_OFFLINE )
+#define	EVT_ID_HD_ONLINE					( EVT_CLASS_HD << 16 | EVT_CODE_HD_ONLINE )
+#define	EVT_ID_HD_SETDOWN					( EVT_CLASS_HD << 16 | EVT_CODE_HD_SETDOWN )
+#define	EVT_ID_HD_TIMEOUT					( EVT_CLASS_HD << 16 | EVT_CODE_HD_TIMEOUT )
+#define	EVT_ID_HD_RW_ERROR					( EVT_CLASS_HD << 16 | EVT_CODE_HD_RW_ERROR )
+#define	EVT_ID_HD_SMART						( EVT_CLASS_HD << 16 | EVT_CODE_HD_SMART )
+#define	EVT_ID_HD_ERROR_FIXED				( EVT_CLASS_HD << 16 | EVT_CODE_HD_ERROR_FIXED )
+#define	EVT_ID_HD_PLUG_IN					( EVT_CLASS_HD << 16 | EVT_CODE_HD_PLUG_IN )
+#define	EVT_ID_HD_PLUG_OUT					( EVT_CLASS_HD << 16 | EVT_CODE_HD_PLUG_OUT )
+#define	EVT_ID_HD_ASSIGN_SPARE				( EVT_CLASS_HD << 16 | EVT_CODE_HD_ASSIGN_SPARE )
+#define	EVT_ID_HD_REMOVE_SPARE				( EVT_CLASS_HD << 16 | EVT_CODE_HD_REMOVE_SPARE )
+#define	EVT_ID_HD_SMART_THRESHOLD_OVER		( EVT_CLASS_HD << 16 | EVT_CODE_HD_SMART_THRESHOLD_OVER )
+
+//
+// Event Id for EVT_CLASS_MDD
+//
+
+#define	EVT_ID_MDD_ERROR					( EVT_CLASS_MDD << 16 | EVT_CODE_MDD_ERROR )
+
+
+//
+// Event Id for EVT_CLASS_ADAPTER
+//
+
+#define	EVT_ID_EVT_LOST					( EVT_CLASS_ADAPTER << 16 | EVT_CODE_EVT_ERR )
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_event_struct.h linux-2.6.25/drivers/scsi/mv/common/com_event_struct.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_event_struct.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_event_struct.h	2008-07-28 18:42:43.318189000 +0200
@@ -0,0 +1,38 @@
+#ifndef COM_EVENT_DRIVER_H
+#define COM_EVENT_DRIVER_H
+
+#include "com_define.h"
+
+#define MAX_EVENTS				20
+#define MAX_EVENT_PARAMS		4
+#define MAX_EVENTS_RETURNED		6
+
+#ifndef _OS_BIOS
+#pragma pack(8)	//TBD
+#endif
+
+typedef struct _DriverEvent
+{
+	MV_U32		TimeStamp;
+	MV_U32		SequenceNo;	// Event sequence number (contiguous in a single adapter)
+	MV_U32		EventID;	// 1st 16 bits - Event class
+							// last 16 bits - Event code of this particular Event class
+	MV_U8		Severity;
+	MV_U8		AdapterID;
+	MV_U16		DeviceID;	// Device ID relate to the event class (HD ID, LD ID etc) 
+	MV_U32		Params[MAX_EVENT_PARAMS];	// Additional information if ABSOLUTELY necessary.
+} DriverEvent, * PDriverEvent;
+
+typedef struct _EventRequest
+{
+	MV_U8		Count;		// [OUT] # of actual events returned
+	MV_U8		Reserved[3];
+	DriverEvent	Events[MAX_EVENTS_RETURNED]; 
+} EventRequest, * PEventRequest;
+
+#ifndef _OS_BIOS
+#pragma pack()
+#endif
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_flash.h linux-2.6.25/drivers/scsi/mv/common/com_flash.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_flash.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_flash.h	2008-07-28 18:42:43.318189000 +0200
@@ -0,0 +1,29 @@
+#ifndef COM_FLASH_H
+#define COM_FLASH_H
+
+#include "com_define.h"
+
+#define DRIVER_LENGTH					1024*16
+
+typedef struct BUFFER_ALIGN_8 _Flash_DriverData
+{
+	MV_U16			Size;
+	MV_U8			PageNumber;
+	MV_BOOLEAN		isLastPage;
+	MV_U16			Reserved[2];
+	MV_U8			Data[DRIVER_LENGTH];
+}
+Flash_DriveData, *PFlash_DriveData;
+
+#define MAX_FLASH_LENGTH				128*1024
+
+typedef struct BUFFER_ALIGN_8 _Flash_Data
+{
+	MV_U16			Size;
+	MV_U16			Reserved[3];
+	MV_U8			Data[MAX_FLASH_LENGTH];
+} 
+Flash_Data, *PFlash_Data;
+
+
+#endif
\ Kein Zeilenumbruch am Dateiende.
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_ioctl.h linux-2.6.25/drivers/scsi/mv/common/com_ioctl.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_ioctl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_ioctl.h	2008-07-28 18:42:43.318189000 +0200
@@ -0,0 +1,39 @@
+#if !defined(COM_IOCTL_H)
+#define COM_IOCTL_H
+
+#if defined (_OS_WINDOWS)
+#include <ntddscsi.h>
+#elif defined(_OS_LINUX)
+
+#endif
+
+/* private IOCTL commands */
+#define MV_IOCTL_CHECK_DRIVER \
+	    CTL_CODE( FILE_DEVICE_CONTROLLER, \
+				  0x900, METHOD_BUFFERED, FILE_READ_ACCESS | FILE_WRITE_ACCESS)	
+
+/* IOCTL signature */
+#define MV_IOCTL_DRIVER_SIGNATURE			"mv61xxsg"
+#define MV_IOCTL_DRIVER_SIGNATURE_LENGTH	8
+
+/* IOCTL command status */
+#define IOCTL_STATUS_SUCCESS				0
+#define IOCTL_STATUS_INVALID_REQUEST		1
+#define IOCTL_STATUS_ERROR					2
+
+#ifndef _OS_BIOS
+#pragma pack(8)
+#endif
+
+typedef struct _MV_IOCTL_BUFFER
+{
+	SRB_IO_CONTROL Srb_Ctrl;
+	MV_U8 Data_Buffer[32];
+} MV_IOCTL_BUFFER, *PMV_IOCTL_BUFFER;
+
+#ifndef _OS_BIOS
+#pragma pack()
+#endif
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_list.c linux-2.6.25/drivers/scsi/mv/common/com_list.c
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_list.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_list.c	2008-07-28 18:42:43.319189191 +0200
@@ -0,0 +1,183 @@
+#include "mv_include.h"
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static MV_INLINE void __List_Add(List_Head *new_one,
+			      List_Head *prev,
+			      List_Head *next)
+{
+	next->prev = new_one;
+	new_one->next = next;
+	new_one->prev = prev;
+	prev->next = new_one;
+}
+
+/**
+ * List_Add - add a new entry
+ * @new_one: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static MV_INLINE void List_Add(List_Head *new_one, List_Head *head)
+{
+	__List_Add(new_one, head, head->next);
+}
+
+/**
+ * List_AddTail - add a new entry
+ * @new_one: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static MV_INLINE void List_AddTail(List_Head *new_one, List_Head *head)
+{
+	__List_Add(new_one, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static MV_INLINE void __List_Del(List_Head * prev, List_Head * next)
+{
+	next->prev = prev;
+	prev->next = next;
+}
+
+/**
+ * List_Del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: List_Empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static MV_INLINE void List_Del(List_Head *entry)
+{
+	__List_Del(entry->prev, entry->next);
+	entry->next = NULL;
+	entry->prev = NULL;
+}
+
+/**
+ * List_DelInit - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static MV_INLINE void List_DelInit(List_Head *entry)
+{
+	__List_Del(entry->prev, entry->next);
+	MV_LIST_HEAD_INIT(entry);
+}
+
+/**
+ * List_Move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static MV_INLINE void List_Move(List_Head *list, List_Head *head)
+{
+        __List_Del(list->prev, list->next);
+        List_Add(list, head);
+}
+
+/**
+ * List_MoveTail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static MV_INLINE void List_MoveTail(List_Head *list,
+				  List_Head *head)
+{
+        __List_Del(list->prev, list->next);
+        List_AddTail(list, head);
+}
+
+/**
+ * List_Empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static MV_INLINE int List_Empty(const List_Head *head)
+{
+	return head->next == head;
+}
+
+static MV_INLINE int List_GetCount(const List_Head *head)
+{
+	int i=0;
+	List_Head *pos;
+	LIST_FOR_EACH(pos, head) {
+		i++;
+	}
+	return i;
+}
+
+static MV_INLINE List_Head* List_GetFirst(List_Head *head)
+{
+	List_Head * one = NULL;
+	if ( List_Empty(head) ) return NULL;
+
+	one = head->next;
+	List_Del(one);
+	return one;
+}
+
+static MV_INLINE List_Head* List_GetLast(List_Head *head)
+{
+	List_Head * one = NULL;
+	if ( List_Empty(head) ) return NULL;
+
+	one = head->prev;
+	List_Del(one);
+	return one;
+}
+
+static MV_INLINE void __List_Splice(List_Head *list,
+				 List_Head *head)
+{
+	List_Head *first = list->next;
+	List_Head *last = list->prev;
+	List_Head *at = head->next;
+
+	first->prev = head;
+	head->next = first;
+
+	last->next = at;
+	at->prev = last;
+}
+
+/**
+ * List_Splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static MV_INLINE void List_Splice(List_Head *list, List_Head *head)
+{
+	if (!List_Empty(list))
+		__List_Splice(list, head);
+}
+
+/**
+ * List_Splice_Init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static MV_INLINE void List_Splice_Init(List_Head *list,
+				    List_Head *head)
+{
+	if (!List_Empty(list)) {
+		__List_Splice(list, head);
+		MV_LIST_HEAD_INIT(list);
+	}
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_list.h linux-2.6.25/drivers/scsi/mv/common/com_list.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_list.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_list.h	2008-07-28 18:42:43.319189191 +0200
@@ -0,0 +1,124 @@
+#if !defined(COMMON_LIST_H)
+#define COMMON_LIST_H
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+
+/*
+ *
+ *
+ * Data Structure
+ *
+ *
+ */
+typedef struct _List_Head {
+	struct _List_Head *prev, *next;
+} List_Head, * PList_Head;
+
+
+/*
+ *
+ *
+ * Exposed Functions
+ *
+ *
+ */
+ 
+#define MV_LIST_HEAD(name) \
+	List_Head name = { &(name), &(name) }
+
+#define MV_LIST_HEAD_INIT(ptr) do { \
+	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+static MV_INLINE void List_Add(List_Head *new_one, List_Head *head);
+
+static MV_INLINE void List_AddTail(List_Head *new_one, List_Head *head);
+
+static MV_INLINE void List_Del(List_Head *entry);
+
+static MV_INLINE void List_DelInit(List_Head *entry);
+
+static MV_INLINE void List_Move(List_Head *list, List_Head *head);
+
+static MV_INLINE void List_MoveTail(List_Head *list,
+				  List_Head *head);
+
+static MV_INLINE int List_Empty(const List_Head *head);
+
+/**
+ * LIST_ENTRY - get the struct for this entry
+ * @ptr:	the &List_Head pointer.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ */
+//TBD
+/*#define CONTAINER_OF(ptr, type, member) ({			\
+*        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
+*        (type *)( (char *)__mptr - OFFSET_OF(type,member) );})
+*/
+
+#define CONTAINER_OF(ptr, type, member) 			\
+        ( (type *)( (char *)(ptr) - OFFSET_OF(type,member) ) )
+
+#define LIST_ENTRY(ptr, type, member) \
+	CONTAINER_OF(ptr, type, member)
+
+/**
+ * LIST_FOR_EACH	-	iterate over a list
+ * @pos:	the &List_Head to use as a loop counter.
+ * @head:	the head for your list.
+ */
+#define LIST_FOR_EACH(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * LIST_FOR_EACH_PREV	-	iterate over a list backwards
+ * @pos:	the &List_Head to use as a loop counter.
+ * @head:	the head for your list.
+ */
+#define LIST_FOR_EACH_PREV(pos, head) \
+	for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
+ * LIST_FOR_EACH_ENTRY	-	iterate over list of given type
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define LIST_FOR_EACH_ENTRY(pos, head, member)				\
+	for (pos = LIST_ENTRY((head)->next, typeof(*pos), member);	\
+	     &pos->member != (head); 	\
+	     pos = LIST_ENTRY(pos->member.next, typeof(*pos), member))
+
+/**
+ * LIST_FOR_EACH_ENTRY_PREV - iterate backwards over list of given type.
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define LIST_FOR_EACH_ENTRY_PREV(pos, head, member)			\
+	for (pos = LIST_ENTRY((head)->prev, typeof(*pos), member);	\
+	     &pos->member != (head); 	\
+	     pos = LIST_ENTRY(pos->member.prev, typeof(*pos), member))
+
+#ifndef _OS_BIOS
+#include "com_list.c"
+#endif
+
+#define List_GetFirstEntry(head, type, member)	\
+	LIST_ENTRY(List_GetFirst(head), type, member)
+
+#define List_GetLastEntry(head, type, member)	\
+	LIST_ENTRY(List_GetLast(head), type, member)
+
+#endif /* COMMON_LIST_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_scsi.c linux-2.6.25/drivers/scsi/mv/common/com_scsi.c
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_scsi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_scsi.c	2008-07-28 18:42:43.319189191 +0200
@@ -0,0 +1,22 @@
+#include "mv_include.h"
+
+MV_VOID MV_SetSenseData(
+	IN PMV_Sense_Data pSense,
+	IN MV_U8 SenseKey,
+    IN MV_U8 AdditionalSenseCode,
+	IN MV_U8 ASCQ
+	)
+{
+	/* The caller should make sure it's a valid sense buffer. */
+	MV_DASSERT( pSense!=NULL );
+
+    MV_ZeroMemory(pSense, sizeof(MV_Sense_Data));
+
+    pSense->Valid = 0;	//TBD: Why?
+	pSense->ErrorCode = MV_SCSI_RESPONSE_CODE;
+	pSense->SenseKey = SenseKey;
+	pSense->AdditionalSenseCode = AdditionalSenseCode;
+	pSense->AdditionalSenseCodeQualifier = ASCQ;
+	pSense->AdditionalSenseLength = sizeof(MV_Sense_Data) - 8;
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_scsi.h linux-2.6.25/drivers/scsi/mv/common/com_scsi.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_scsi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_scsi.h	2008-07-28 18:42:43.319189191 +0200
@@ -0,0 +1,190 @@
+#if !defined(COM_SCSI_H)
+#define COM_SCSI_H
+
+/*
+ * SCSI command
+ */
+#define SCSI_CMD_INQUIRY				0x12
+#define SCSI_CMD_START_STOP_UNIT		0x1B
+#define SCSI_CMD_TEST_UNIT_READY		0x00
+#define SCSI_CMD_RESERVE_6				0x16
+#define SCSI_CMD_RELEASE_6				0x17
+
+#define SCSI_CMD_READ_6					0x08
+#define SCSI_CMD_READ_10				0x28
+#define SCSI_CMD_READ_12				0xA8
+#define SCSI_CMD_READ_16				0x88
+
+#define SCSI_CMD_WRITE_6				0x0A
+#define SCSI_CMD_WRITE_10				0x2A
+#define SCSI_CMD_WRITE_12				0xAA
+#define SCSI_CMD_WRITE_16				0x8A
+
+#define SCSI_CMD_READ_CAPACITY_10		0x25
+#define SCSI_CMD_READ_CAPACITY_16		0x9E	/* 9Eh/10h */
+
+#define SCSI_CMD_VERIFY_10				0x2F
+#define SCSI_CMD_VERIFY_16				0x8F
+
+#define SCSI_CMD_REQUEST_SENSE			0x03
+#define SCSI_CMD_MODE_SENSE_6			0x1A
+#define SCSI_CMD_MODE_SENSE_10			0x5A
+#define SCSI_CMD_MODE_SELECT_6			0x15
+#define SCSI_CMD_MODE_SELECT_10			0x55
+
+#define SCSI_CMD_WRITE_VERIFY_10		0x2E
+#define SCSI_CMD_SYNCHRONIZE_CACHE_10	0x35
+
+/* ATAPI CDB */
+#define SCSI_CMD_REPORT_LUN						0xA0
+#define SCSI_CMD_PREVENT_MEDIUM_REMOVAL			0x1E
+#define SCSI_CMD_READ_SUB_CHANNEL				0x42
+#define SCSI_CMD_READ_TOC						0x43
+#define SCSI_CMD_READ_DISC_STRUCTURE			0xAD
+#define SCSI_CMD_READ_CD						0xBE
+#define SCSI_CMD_GET_EVENT_STATUS_NOTIFICATION	0x4A
+
+/* MMC */
+#define SCSI_CMD_READ_DISC_INFO				0x51
+
+#define SCSI_IS_READ(cmd)			\
+	( ((cmd)==SCSI_CMD_READ_6)		\
+	|| ((cmd)==SCSI_CMD_READ_10)	\
+	|| ((cmd)==SCSI_CMD_READ_12)	\
+	|| ((cmd)==SCSI_CMD_READ_16) )
+
+#define SCSI_IS_WRITE(cmd)			\
+	( ((cmd)==SCSI_CMD_WRITE_6)		\
+	|| ((cmd)==SCSI_CMD_WRITE_10)	\
+	|| ((cmd)==SCSI_CMD_WRITE_12)	\
+	|| ((cmd)==SCSI_CMD_WRITE_16) )
+
+#define SCSI_IS_MODE_SENSE(cmd)				\
+	( ((cmd)==SCSI_CMD_MODE_SENSE_6)		\
+	|| ((cmd)==SCSI_CMD_MODE_SENSE_10) )
+
+#define SCSI_IS_REQUEST_SENSE(cmd)			\
+	( ((cmd)==SCSI_CMD_REQUEST_SENSE) )
+
+#define SCSI_IS_VERIFY(cmd)			( ((cmd)==SCSI_CMD_VERIFY_10) || ((cmd)==SCSI_CMD_VERIFY_16) )
+
+//TBD: Define for other modules
+#define SCSI_CMD_MARVELL_SPECIFIC	0xE1
+	#define CDB_CORE_MODULE						0x1
+		#define CDB_CORE_IDENTIFY				0x1
+		#define CDB_CORE_SET_UDMA_MODE			0x2
+		#define CDB_CORE_SET_PIO_MODE			0x3
+		#define CDB_CORE_ENABLE_WRITE_CACHE		0x4
+		#define CDB_CORE_DISABLE_WRITE_CACHE	0x5
+		#define CDB_CORE_ENABLE_SMART			0x6
+		#define CDB_CORE_DISABLE_SMART			0x7
+		#define CDB_CORE_SMART_RETURN_STATUS	0x8
+		#define CDB_CORE_SHUTDOWN				0x9
+		#define CDB_CORE_ENABLE_READ_AHEAD		0xA
+		#define CDB_CORE_DISABLE_READ_AHEAD		0xB
+		#define CDB_CORE_READ_LOG_EXT			0xC
+#ifdef CACHE_MODULE_SUPPORT
+	#define CDB_CACHE_MODULE						0x2
+		#define CBD_CACHE_FLUSH_DEV                     0xD
+#endif
+
+#define SCSI_IS_INTERNAL(cmd)		((cmd)==SCSI_CMD_MARVELL_SPECIFIC)
+
+/*
+ * SCSI status
+ */
+#define SCSI_STATUS_GOOD					0x00			/* Good */
+#define SCSI_STATUS_CHECK_CONDITION			0x02			/* Sense data is valid */
+#define SCSI_STATUS_CONDITION_MET			0x04			/* We don't use it */
+#define SCSI_STATUS_BUSY					0x08			/* Busy */
+#define SCSI_STATUS_INTERMEDIATE			0x10			/* We don't use it */
+#define SCSI_STATUS_INTERMEDIATE_MET		0x14			/* We don't use it */
+#define SCSI_STATUS_RESERVATION_CONFLICT	0x18			/* We don't use it */
+#define SCSI_STATUS_FULL					0x28			/* Task set full */
+#define SCSI_STATUS_ACA_ACTIVE				0x30			/* We don't use it */
+#define SCSI_STATUS_ABORTED					0x40			/* We don't use it */
+
+/*
+ * SCSI sense key
+ */
+#define SCSI_SK_NO_SENSE					0x00	/* No sense key */
+#define SCSI_SK_RECOVERED_ERROR				0x01	/* Command completed with recovery action */
+#define SCSI_SK_NOT_READY					0x02	/* Not ready */
+#define SCSI_SK_MEDIUM_ERROR				0x03	/* Finally failed even with recovery */
+#define SCSI_SK_HARDWARE_ERROR				0x04	/* Hardware error */
+#define SCSI_SK_ILLEGAL_REQUEST				0x05	/* Invalid CDB */
+#define SCSI_SK_UNIT_ATTENTION				0x06	/* Unit needs attention */
+#define SCSI_SK_DATA_PROTECT				0x07	/* We don't use it */
+#define SCSI_SK_BLANK_CHECK					0x08	/* We don't use it */
+#define SCSI_SK_VENDOR_SPECIFIC				0x09	/* We don't use it */
+#define SCSI_SK_COPY_ABORTED				0x0A	/* We don't use it */
+#define SCSI_SK_ABORTED_COMMAND				0x0B	/* We don't use it */
+#define SCSI_SK_VOLUME_OVERFLOW				0x0D	/* We don't use it */
+#define SCSI_SK_MISCOMPARE					0x0E	/* We don't use it */
+
+/*
+ * SCSI additional sense code
+ */
+#define SCSI_ASC_NO_ASC						0x00
+#define SCSI_ASC_LUN_NOT_READY				0x04
+#define SCSI_ASC_ECC_ERROR					0x10
+#define SCSI_ASC_ID_ADDR_MARK_NOT_FOUND		0x12
+#define SCSI_ASC_INVALID_OPCODE				0x20
+#define SCSI_ASC_LBA_OUT_OF_RANGE			0x21
+#define SCSI_ASC_INVALID_FEILD_IN_CDB		0x24
+#define SCSI_ASC_LOGICAL_UNIT_NOT_SUPPORTED	0x25
+#define SCSI_ASC_INVALID_FIELD_IN_PARAMETER	0x26
+#define SCSI_ASC_INTERNAL_TARGET_FAILURE	0x44
+
+/*
+ * SCSI additional sense code qualifier
+ */
+#define SCSI_ASCQ_NO_ASCQ					0x00
+#define SCSI_ASCQ_INTERVENTION_REQUIRED		0x03
+#define SCSI_ASCQ_MAINTENANCE_IN_PROGRESS	0x80
+
+/* SCSI command CDB helper functions. */
+#define SCSI_CDB10_GET_LBA(cdb)	((MV_U32)(((MV_U32)cdb[2]<<24) | ((MV_U32)cdb[3]<<16) | ((MV_U32)cdb[4]<<8) | (MV_U32)cdb[5]))
+#define SCSI_CDB10_SET_LBA(cdb, lba)	{\
+	cdb[2] = (MV_U8)(lba >> 24);	\
+	cdb[3] = (MV_U8)(lba >> 16);	\
+	cdb[4] = (MV_U8)(lba >> 8);	\
+	cdb[5] = (MV_U8)lba;	\
+}
+#define SCSI_CDB10_GET_SECTOR(cdb)	((cdb[7]<<8) | cdb[8])
+#define SCSI_CDB10_SET_SECTOR(cdb, sector) {\
+	cdb[7] = (MV_U8)(sector >> 8);	\
+	cdb[8] = (MV_U8)sector;	\
+}
+
+#define MV_SCSI_RESPONSE_CODE			0x70
+#define MV_SCSI_DIRECT_ACCESS_DEVICE    0x00
+
+typedef struct _MV_Sense_Data
+{
+	MV_U8 ErrorCode:7;
+	MV_U8 Valid:1;
+	MV_U8 SegmentNumber;
+	MV_U8 SenseKey:4;
+	MV_U8 Reserved:1;
+	MV_U8 IncorrectLength:1;
+	MV_U8 EndOfMedia:1;
+	MV_U8 FileMark:1;
+	MV_U8 Information[4];
+	MV_U8 AdditionalSenseLength;
+	MV_U8 CommandSpecificInformation[4];
+	MV_U8 AdditionalSenseCode;
+	MV_U8 AdditionalSenseCodeQualifier;
+	MV_U8 FieldReplaceableUnitCode;
+	MV_U8 SenseKeySpecific[3];
+}MV_Sense_Data, *PMV_Sense_Data;
+
+MV_VOID MV_SetSenseData(
+	IN PMV_Sense_Data pSense,
+	IN MV_U8 SenseKey,
+    IN MV_U8 AdditionalSenseCode,
+	IN MV_U8 ASCQ
+	);
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_struct.h linux-2.6.25/drivers/scsi/mv/common/com_struct.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_struct.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_struct.h	2008-07-28 18:42:43.320189076 +0200
@@ -0,0 +1,560 @@
+#ifndef COM_STRUCT_H
+#define COM_STRUCT_H
+
+#include "com_define.h"
+
+#define GET_ALL							0xFF
+#define ID_UNKNOWN						0x7F
+
+#define MAX_NUM_ADAPTERS				2
+#define MAX_LD_SUPPORTED				8
+#ifdef SUPPORT_RAID6
+#define MAX_HD_SUPPORTED				8
+#else
+#define MAX_HD_SUPPORTED				24
+#endif
+#ifndef _OS_BIOS
+#define MAX_HD_SUPPORTED_API			32	// API is already reserved 32 HDs
+#endif
+
+#define MAX_BLOCK_PER_HD_SUPPORTED		8	//Can also be got from Adapter structure.
+#define MAX_EXPANDER_SUPPORTED			0	//Can also be got from Adapter structure.
+#define MAX_PM_SUPPORTED				4	//Can also be got from Adapter structure.
+#define MAX_HD_BSL						32
+#define MAX_BLOCK_SUPPORTED 			32
+
+#define MAX_BGA_RATE					0xFA
+
+#ifndef MV_GUID_SIZE
+#define MV_GUID_SIZE					8
+#endif
+
+#define LD_MAX_NAME_LENGTH 				16
+
+#define CACHE_WRITEBACK_ENABLE                   0
+#define CACHE_WRITETHRU_ENABLE                   1
+#define CACHE_ADAPTIVE_ENABLE                      2
+#define CACHE_WRITE_POLICY                      (CACHE_WRITEBACK_ENABLE | CACHE_WRITETHRU_ENABLE | CACHE_ADAPTIVE_ENABLE)
+#define CACHE_LOOKAHEAD_ENABLE                  MV_BIT(2)
+
+#define CONSISTENCYCHECK_ONLY			0
+#define CONSISTENCYCHECK_FIX			1
+
+#define INIT_QUICK						0	//Just initialize first part size of LD
+#define INIT_FULLFOREGROUND				1	//Initialize full LD size
+#define INIT_FULLBACKGROUND				2	//Initialize full LD size background
+#define INIT_NONE						3
+
+#define BGA_CONTROL_START				0
+#define BGA_CONTROL_RESTART				1
+#define BGA_CONTROL_PAUSE				2
+#define BGA_CONTROL_RESUME				3
+#define BGA_CONTROL_ABORT				4
+#define BGA_CONTROL_COMPLETE			5
+#define BGA_CONTROL_IN_PROCESS			6
+#define BGA_CONTROL_TERMINATE_IMMEDIATE	7
+#define BGA_CONTROL_AUTO_PAUSE			8
+
+#define LD_STATUS_FUNCTIONAL			0
+#define LD_STATUS_DEGRADE				1 
+#define LD_STATUS_DELETED				2
+#define LD_STATUS_MISSING				3//LD missing in system.
+#define LD_STATUS_OFFLINE				4
+#define LD_STATUS_PARTIALLYOPTIMAL		5 //Specially for RAID 6 lost one HD
+#define LD_STATUS_MIGRATION             MV_BIT(3)
+#define LD_STATUS_INVALID				0xFF
+
+#define LD_BGA_NONE						0
+#define LD_BGA_REBUILD					MV_BIT(0)
+#define LD_BGA_CONSISTENCY_FIX			MV_BIT(1)
+#define LD_BGA_CONSISTENCY_CHECK		MV_BIT(2)
+#define LD_BGA_INIT_QUICK				MV_BIT(3)
+#define LD_BGA_INIT_BACK				MV_BIT(4)
+#define LD_BGA_MIGRATION				MV_BIT(5)
+
+#define LD_BGA_STATE_NONE				0
+#define LD_BGA_STATE_RUNNING			1
+#define LD_BGA_STATE_ABORTED			2
+#define LD_BGA_STATE_PAUSED				3
+#define LD_BGA_STATE_AUTOPAUSED			4
+#define LD_BGA_STATE_DDF_PENDING		MV_BIT(7)
+
+#define	LD_MODE_RAID0					0x0
+#define	LD_MODE_RAID1					0x1
+#define	LD_MODE_RAID1E					0x11
+#define	LD_MODE_RAID5					0x5
+#define	LD_MODE_RAID6					0x6
+#define	LD_MODE_JBOD					0x0f
+#define	LD_MODE_RAID10					0x10	//TBD
+#define	LD_MODE_RAID50					0x50	//TBD
+#define	LD_MODE_RAID60					0x60	//TBD
+
+#define	LD_RAID0_SUPPORTED				MV_BIT(0)
+#define	LD_RAID1_SUPPORTED				MV_BIT(1)
+#define	LD_RAID1E_SUPPORTED				MV_BIT(2)
+#define	LD_RAID5_SUPPORTED				MV_BIT(3)
+#define	LD_RAID6_SUPPORTED				MV_BIT(4)
+#define	LD_JBOD_SUPPORTED				MV_BIT(5)
+#define	LD_RAID10_SUPPORTED				MV_BIT(6)
+#define	LD_RAID50_SUPPORTED				MV_BIT(7)
+
+#define HD_WIPE_MDD						0
+#define HD_WIPE_FORCE					1
+
+#define ROUNDING_SCHEME_NONE			0          // no rounding
+#define ROUNDING_SCHEME_1GB				1          // 1 GB rounding
+#define ROUNDING_SCHEME_10GB			2          // 10 GB rounding
+
+#define DEVICE_TYPE_NONE				0
+#define DEVICE_TYPE_HD					1
+#define DEVICE_TYPE_PM					2
+#define DEVICE_TYPE_EXPANDER			3
+#define DEVICE_TYPE_PORT				0xFF
+
+#define HD_STATUS_FREE					MV_BIT(0)
+#define HD_STATUS_ASSIGNED				MV_BIT(1)
+#define HD_STATUS_SPARE					MV_BIT(2)
+#define HD_STATUS_OFFLINE				MV_BIT(3)
+#define HD_STATUS_SMARTCHECKING			MV_BIT(4)
+#define HD_STATUS_MP					MV_BIT(5)
+
+#define HD_BGA_STATE_NONE				LD_BGA_STATE_NONE
+#define HD_BGA_STATE_RUNNING			LD_BGA_STATE_RUNNING
+#define HD_BGA_STATE_ABORTED			LD_BGA_STATE_ABORTED
+#define HD_BGA_STATE_PAUSED				LD_BGA_STATE_PAUSED
+#define HD_BGA_STATE_AUTOPAUSED			LD_BGA_STATE_AUTOPAUSED
+
+#define GLOBAL_SPARE_DISK				MV_BIT(2)
+
+#define PD_DDF_VALID					MV_BIT(0)
+#define PD_DISK_VALID					MV_BIT(1)
+#define PD_DDF_CLEAN					MV_BIT(2)
+#define PD_NEED_UPDATE					MV_BIT(3)
+#define PD_MBR_VALID					MV_BIT(4)
+
+#define PD_STATE_ONLINE					MV_BIT(0)
+#define PD_STATE_FAILED					MV_BIT(1)
+#define PD_STATE_REBUILDING				MV_BIT(2)
+#define PD_STATE_TRANSITION				MV_BIT(3)
+#define PD_STATE_SMART_ERROR			MV_BIT(4)
+#define PD_STATE_READ_ERROR				MV_BIT(5)
+#define PD_STATE_MISSING				MV_BIT(6)
+
+#define HD_STATUS_SETONLINE				0
+#define HD_STATUS_SETOFFLINE			1
+
+#define HD_TYPE_SATA					MV_BIT(0)
+#define HD_TYPE_PATA					MV_BIT(1)
+#define HD_TYPE_SAS						MV_BIT(2)
+#define HD_TYPE_ATAPI					MV_BIT(3)	//SATA, PATA, can co-exist with ATAPI
+
+#define HD_FEATURE_NCQ					MV_BIT(0)
+#define HD_FEATURE_TCQ					MV_BIT(1)
+#define HD_FEATURE_1_5G					MV_BIT(2)
+#define HD_FEATURE_3G					MV_BIT(3)
+#define HD_FEATURE_WRITE_CACHE			MV_BIT(4)
+#define HD_FEATURE_48BITS				MV_BIT(5)
+#define HD_FEATURE_SMART				MV_BIT(6)
+
+#define EXP_SSP							MV_BIT(0)
+#define EXP_STP							MV_BIT(1)
+#define EXP_SMP							MV_BIT(2)
+
+#define HD_DMA_NONE						0
+#define HD_DMA_1						1
+#define HD_DMA_2						2
+#define HD_DMA_3						3
+#define HD_DMA_4						4
+#define HD_DMA_5						5
+#define HD_DMA_6						6
+#define HD_DMA_7						7
+#define HD_DMA_8						8
+#define HD_DMA_9						9
+
+#define HD_PIO_NONE						0
+#define HD_PIO_1						1
+#define HD_PIO_2						2
+#define HD_PIO_3						3
+#define HD_PIO_4						4
+#define HD_PIO_5						5
+
+#define HD_XCQ_OFF						0
+#define HD_NCQ_ON						1
+#define HD_TCQ_ON						2
+
+#define SECTOR_LENGTH					512
+#define SECTOR_WRITE					0
+#define SECTOR_READ						1
+
+#define DBG_LD2HD						0
+#define DBG_HD2LD						1
+
+#define DRIVER_LENGTH					1024*16
+#define FLASH_DOWNLOAD					0xf0
+#define FLASH_UPLOAD					0xf
+#define	FLASH_TYPE_CONFIG				0
+#define	FLASH_TYPE_BIN					1
+#define	FLASH_TYPE_BIOS					2
+#define	FLASH_TYPE_FIRMWARE				3
+
+#define BLOCK_INVALID					0
+#define BLOCK_VALID						MV_BIT(0)	//Free block can be used to create LD.
+#define BLOCK_ASSIGNED					MV_BIT(1)	//Block used for LD
+#ifdef _OS_BIOS
+#define FREE_BLOCK(Flags)	(Flags&(BLOCK_VALID)==Flags)
+#define ASSIGN_BLOCK(Flags)	(Flags&(BLOCK_VALID|BLOCK_ASSIGNED)==Flags)
+#define INVALID_BLOCK(Flags)	(Flags&(BLOCK_VALID|BLOCK_ASSIGNED)==0)
+#endif
+//#define BLOCK_STATUS_NORMAL				0
+//#define BLOCK_STATUS_REBUILDING			MV_BIT(0)
+//#define BLOCK_STATUS_CONSISTENTCHECKING	MV_BIT(1)
+//#define BLOCK_STATUS_INITIALIZING		MV_BIT(2)
+//#define BLOCK_STATUS_MIGRATING			MV_BIT(3)
+//#define BLOCK_STATUS_OFFLINE			MV_BIT(4)
+
+#ifndef _OS_BIOS
+#pragma pack(8)
+#endif
+
+typedef struct _Version_Info
+{
+	MV_U32		VerMajor;
+	MV_U32		VerMinor;
+	MV_U32		VerOEM;
+	MV_U32		VerBuild;
+}Version_Info, *PVersion_Info;
+
+#define BASE_ADDRESS_MAX_NUM 6
+
+#define SUPPORT_LD_MODE_RAID0		MV_BIT(0)
+#define SUPPORT_LD_MODE_RAID1		MV_BIT(1)
+#define SUPPORT_LD_MODE_RAID10		MV_BIT(2)
+#define SUPPORT_LD_MODE_RAID1E		MV_BIT(3)
+#define SUPPORT_LD_MODE_RAID5		MV_BIT(4)
+#define SUPPORT_LD_MODE_RAID6		MV_BIT(5)
+#define SUPPORT_LD_MODE_RAID50		MV_BIT(6)
+#define SUPPORT_LD_MODE_JBOD		MV_BIT(7)
+
+typedef struct _Adapter_Info {
+
+	Version_Info	DriverVersion;
+	Version_Info	BIOSVersion;
+	MV_U64			Reserved1[2];//Reserve for firmware
+
+	MV_U32			SystemIOBusNumber;
+	MV_U32			SlotNumber;
+	MV_U32			InterruptLevel;
+	MV_U32			InterruptVector;
+	
+	MV_U32			VenDevID;
+	MV_U32			SubVenDevID;
+	
+	MV_U8			PortCount;		//How many ports, like 4 ports,  or 4S1P.
+	MV_U8			PortSupportType;		//Like SATA port, SAS port, PATA port, use MV_BIT
+	MV_BOOLEAN		RAMSupport;
+	MV_U8			Reserved2[13];
+	
+	MV_BOOLEAN		AlarmSupport;
+	MV_U8			MaxBlockPerPD;	//E.g one HD support 8 block at maximum.
+	MV_U8			MaxHD;		//E.g 16 HD support
+	MV_U8			MaxExpander;	//E.g 4 Expander support
+	MV_U8			MaxPM;		//E.g 4 PM support
+	MV_U8			MaxLogicalDrive;
+	MV_U16			LogicalDriverMode;	// check SUPPORT_LD_MODE definition
+	MV_U8			WWN[8];			//For future VDS use.
+
+} Adapter_Info, *PAdapter_Info;
+
+typedef struct _Adapter_Config {
+	MV_BOOLEAN		AlarmOn;
+	MV_BOOLEAN		AutoRebuildOn;
+	MV_U8			BGARate;
+	MV_BOOLEAN		PollSMARTStatus;
+	MV_U8			Reserved[4];
+} Adapter_Config, *PAdapter_Config;
+
+typedef struct _HD_Info
+{
+	MV_U8			Type;			/*Refer to DEVICE_TYPE_xxx*/
+	MV_U8			ParentType;		//Refer to DEVICE_TYPE_xxx
+	MV_U16			ID;			/* ID should be unique*/
+	MV_U16			ParentID;		//Point to Port, PM or Expander ID
+	MV_U8			AdapterID;
+	MV_U8			PhyID;	/* Means HD attached to which Phy of the Expander or PM or Adapter.*/
+
+	MV_U8			Status;		/*Refer to HD_STATUS_XXX*/
+	MV_U8			HDType;	/*For HD type, refer to HD_Type_xxx*/
+	MV_U8			PIOMode;
+	MV_U8			MDMAMode;
+	MV_U8			UDMAMode;	
+	MV_U8			Reserved1[3];
+
+	MV_U32			FeatureSupport;	/*Support 1.5G, 3G, TCQ, NCQ, and etc, MV_BIT related*/
+
+	MV_U8			Model[40];
+	MV_U8			SerialNo[20];
+	MV_U8			FWVersion[8];
+
+	MV_U8			WWN[64];	/*ATA/ATAPI-8 has such definitions for the identify buffer*/
+	MV_U8			Reserved3[64];
+
+	MV_U64		   	Size;		//unit: 1KB
+}
+HD_Info, *PHD_Info;
+
+typedef struct _HD_MBR_Info
+{
+	MV_U8 			HDCount;
+	MV_U8			Reserved[7];
+	MV_U16	 		HDIDs[MAX_HD_SUPPORTED_API];	
+	MV_BOOLEAN	 	hasMBR[MAX_HD_SUPPORTED_API];
+} HD_MBR_Info, *PHD_MBR_Info;
+
+
+typedef struct _HD_FreeSpaceInfo
+{
+	MV_U16			ID;			/* ID should be unique*/
+	MV_U8			AdapterID;
+	MV_U8			Reserved[4];
+	MV_BOOLEAN		isFixed;
+
+	MV_U64		   	Size;		//unit: 1KB
+}
+HD_FreeSpaceInfo, *PHD_FreeSpaceInfo;
+
+
+typedef struct _HD_Block_Info
+{
+	MV_U16			ID;			/* ID in the HD_Info*/
+	MV_U8			Type;			/*Refer to DEVICE_TYPE_xxx*/
+	MV_U8			Reserved1[5];
+
+	MV_U16			BlockIDs[MAX_BLOCK_PER_HD_SUPPORTED];  // Free is 0xff
+}
+HD_Block_Info, *PHD_Block_Info;
+
+typedef struct _Exp_Info
+{
+	MV_U8			Type;			/*Refer to DEVICE_TYPE_xxx */
+	MV_U8			ParentType;		//Refer to DEVICE_TYPE_xxx
+	MV_U16 			ID;			//ID should be unique
+	MV_U16			ParentID;		//Point to Port or Expander ID
+	MV_U8			AdapterID;
+	MV_U8			PhyID;	/* Means this Expander attached to which Phy of the Expander or Adapter.*/
+
+	MV_U8			SAS_Address[8];
+
+	MV_BOOLEAN		Configuring;	
+	MV_BOOLEAN		RouteTableConfigurable;
+	MV_U16			Reserved1[2];
+	MV_U8 			PhyCount;
+	MV_U8			Reserved2[1];
+
+	MV_U16			ExpChangeCount;
+	MV_U16			MaxRouteIndexes;
+	MV_U32			Reserved3[1];
+
+	char			VendorID[8+1];
+	char			ProductID[16+1];
+	char			ProductRev[4+1];
+	char			ComponentVendorID[8+1];
+	MV_U16			ComponentID;
+	MV_U8			ComponentRevisionID;
+
+	MV_U8			Reserved4[8];
+}Exp_Info, * PExp_Info;
+
+typedef  struct _PM_Info{
+	MV_U8     		Type;			// Refer to DEVICE_TYPE_xxx 
+	MV_U8			ParentType;		// Refer to DEVICE_TYPE_xxx
+	MV_U16			ID;				// the same as port ID it attached
+	MV_U16			ParentID;
+	MV_U16			VendorId;
+
+	MV_U16			DeviceId;
+	MV_U8			AdapterID;
+	MV_U8			ProductRevision;
+   	MV_U8			PMSpecRevision;	// 10 means 1.0, 11 means 1.1 etc
+	MV_U8			NumberOfPorts;
+	MV_U8			PhyID;	/* Means this PM attached to which Phy of the Expander or Adapter.*/
+	MV_U8			Reserved[1];
+}PM_Info, *PPM_Info;
+
+typedef struct _HD_CONFIG
+{
+	MV_BOOLEAN		WriteCacheOn;		/* 1: enable write cache */
+	MV_BOOLEAN		SMARTOn;			/* 1: enable S.M.A.R.T */
+	MV_BOOLEAN		Online;				/* 1: to set HD online */
+	MV_U8			Reserved[3];
+	MV_U16			HDID;
+}
+HD_Config, *PHD_Config;
+
+typedef struct  _HD_STATUS
+{
+	MV_BOOLEAN		SmartThresholdExceeded;		
+	MV_U8      		Reserved[1];
+	MV_U16			HDID;
+}
+HD_Status, *PHD_Status;
+
+typedef struct  _SPARE_STATUS
+{
+	MV_U16			HDID;
+	MV_U16			LDID;
+	MV_U8			Status;		// HD_STATUS_SPARE
+	MV_U8      		Reserved[3];
+}
+Spare_Status, *PSpare_Status;
+
+typedef struct  _BSL{
+	MV_U64     	LBA;		//Bad sector LBA for the HD.
+
+	MV_U32			Count;		//How many serial bad sectors 
+	MV_BOOLEAN		Flag;		//Fake bad sector or not.
+	MV_U8      	Reserved[3];
+}
+BSL,*PBSL;
+
+typedef struct _BLOCK_INFO
+{
+	MV_U16      	ID;
+	MV_U16			HDID;		//ID in the HD_Info
+	MV_U16 		Flags;		/*Refer to BLOCK_XXX definition*/
+	MV_U16			LDID;		/*Belong to which LD*/
+
+	MV_U8			Status;		/* Refer to BLOCK_STATUS_XXX*/
+	MV_U8 			Reserved[7];
+
+	MV_U64 		StartLBA;	//unit: 1KB
+	MV_U64 		Size;		//unit: 1KB
+}
+Block_Info, *PBlock_Info;
+
+typedef struct _LD_Info
+{
+	MV_U16			ID;
+	MV_U8 			Status;	/* Refer to LD_STATUS_xxx */
+	MV_U8			BGAStatus; /* Refer to LD_BGA_STATE_xxx */
+	MV_U16			StripeBlockSize;	//unit: 1KB
+	MV_U8			RaidMode;			
+	MV_U8			HDCount;
+
+	MV_U8			CacheMode;/*Default is CacheMode_Default, see above*/	
+	MV_U8 			LD_GUID[MV_GUID_SIZE];
+	MV_U8 			Reserved[7];
+
+	MV_U64			Size;			/* LD size, unit: 1KB */
+
+	MV_U8			Name[LD_MAX_NAME_LENGTH];
+
+	MV_U16			BlockIDs[MAX_HD_SUPPORTED_API];		/* 32 */
+//According to BLOCK ID, to get the related HD ID, then WMRU can draw the related graph like above.
+	MV_U8			SubLDCount;   //for raid 10, 50,60
+	MV_U8			NumParityDisk; //For RAID 6.
+	MV_U8			Reserved1[6];
+}
+LD_Info, *PLD_Info;
+
+typedef struct _Create_LD_Param
+{
+	MV_U8 			RaidMode;
+	MV_U8 			HDCount;
+	MV_U8			RoundingScheme;//please refer to the definitions of  ROUNDING_SCHEME_XXX.
+	MV_U8			SubLDCount;   	//for raid 10,50,60
+	MV_U16			StripeBlockSize; /*In sectors unit: 1KB */
+	MV_U8			NumParityDisk;  //For RAID 6.
+	MV_U8			CachePolicy;//please refer to the definitions of CACHEMODE_XXXX.
+
+	MV_U8			InitializationOption;// please refer to the definitions of INIT_XXXX.
+	MV_U8  			Reserved1;
+	MV_U16			LDID;			// ID of the LD to be migrated or expanded
+	MV_U8  			Reserved2[4];
+
+	MV_U16	 		HDIDs[MAX_HD_SUPPORTED_API];	/* 32 */
+	MV_U8	 		Name[LD_MAX_NAME_LENGTH];
+
+	MV_U64			Size;		/* size of LD in sectors */
+} Create_LD_Param, *PCreate_LD_Param;
+
+typedef struct _LD_STATUS
+{
+	MV_U8			Status;		/* Refer to LD_STATUS_xxx */
+	MV_U8			Bga;		/* Refer to LD_BGA_xxx */
+	MV_U16			BgaPercentage;	/* xx% */
+	MV_U8			BgaState;	/* Refer to LD_BGA_STATE_xxx */
+	MV_U8			Reserved[1];
+	MV_U16			LDID;
+} 
+LD_Status, *PLD_Status;
+
+typedef struct	_LD_Config
+{
+	MV_U8			CacheMode;		/* See definition 4.4.1 CacheMode_xxx */
+	MV_U8			Reserved1;		
+	MV_BOOLEAN		AutoRebuildOn;	/*1- AutoRebuild On*/
+	MV_U8			Status;
+	MV_U16			LDID;
+	MV_U8			Reserved2[2];
+
+	MV_U8 			Name[LD_MAX_NAME_LENGTH];
+}
+LD_Config, * PLD_Config;
+
+typedef struct _HD_MPSTATUS
+{
+	MV_U64			Watermark;
+	MV_U16			LoopCount;			/* loop count */
+	MV_U16			ErrorCount;	/* error detected during media patrol */
+	MV_U16			Percentage;	/* xx% */
+	MV_U8			Status;		/* Refer to HD_BGA_STATE_xxx */
+	MV_U8			Type;
+	MV_U16			HDID;
+	MV_U16			Reserved [3];
+} 
+HD_MPStatus, *PHD_MPStatus;
+
+typedef struct _DBG_DATA
+{
+	MV_U64			LBA;
+	MV_U64			Size;
+	MV_U8			Data[SECTOR_LENGTH];
+} 
+DBG_Data, *PDBG_Data;
+
+typedef struct _DBG_HD
+{
+	MV_U64			LBA;
+	MV_U16			HDID;
+	MV_BOOLEAN		isUsed;
+	MV_U8			Reserved[5];
+} 
+DBG_HD;
+
+typedef struct _DBG_MAP
+{
+	MV_U64			LBA;
+	MV_U16			LDID;
+	MV_BOOLEAN		isUsed;
+	MV_U8			Reserved[5];
+	DBG_HD		HDs[MAX_HD_SUPPORTED_API];
+} 
+DBG_Map, *PDBG_Map;
+
+#ifdef CACHE_MODULE_SUPPORT
+typedef struct _LD_CACHE_STATUS
+{
+	MV_BOOLEAN	IsOnline;
+	MV_U8		CachePolicy;
+	MV_U16		StripeUnitSize;
+	MV_U32		StripeSize;
+}
+LD_CACHE_STATUS, *PLD_CACHE_STATUS;
+#endif
+
+#ifndef _OS_BIOS
+#pragma pack()
+#endif
+
+#endif /* COM_STRUCT_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_tag.c linux-2.6.25/drivers/scsi/mv/common/com_tag.c
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_tag.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_tag.c	2008-07-28 18:42:43.320189076 +0200
@@ -0,0 +1,43 @@
+#include "mv_include.h"
+
+#include "com_tag.h"
+
+MV_VOID Tag_Init( PTag_Stack pTagStack, MV_U8 size )
+{
+	MV_U8 i;
+	
+	MV_DASSERT( size <= MAX_TAG_NUMBER );
+#ifdef _OS_LINUX
+	if ( pTagStack->Top && pTagStack->Top < size )
+		MV_DBG(DMSG_CORE, "__MV__ Init an in-use tag pool "
+		       "curr_size:init_size - %d:%d.\n", pTagStack->Top, size);
+#endif /* _OS_LINUX */
+
+	pTagStack->Top = size;
+	for ( i=0; i<size; i++ )
+	{
+		pTagStack->Stack[i] = size-1-i;
+	}
+}
+
+MV_U8 Tag_GetOne(PTag_Stack pTagStack)
+{
+	MV_DASSERT( pTagStack->Top>0 );
+	return pTagStack->Stack[--pTagStack->Top];
+}
+
+MV_VOID Tag_ReleaseOne(PTag_Stack pTagStack, MV_U8 tag)
+{
+	MV_DASSERT( pTagStack->Top<MAX_TAG_NUMBER );
+	pTagStack->Stack[pTagStack->Top++] = tag;
+}
+
+MV_BOOLEAN Tag_IsEmpty(PTag_Stack pTagStack)
+{
+	if ( pTagStack->Top==0 )
+	{
+	    return MV_TRUE;
+	}
+	return MV_FALSE;
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_tag.h linux-2.6.25/drivers/scsi/mv/common/com_tag.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_tag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_tag.h	2008-07-28 18:42:43.320189076 +0200
@@ -0,0 +1,25 @@
+#if !defined(COM_TAG_H)
+#define COM_TAG_H
+
+typedef struct _Tag_Stack Tag_Stack, *PTag_Stack;
+
+#ifdef _OS_BIOS
+#define MAX_TAG_NUMBER	1
+#else
+#define MAX_TAG_NUMBER	32
+#endif
+
+//TBD: I suppose the stack size is always the same. If not, improve.
+struct _Tag_Stack
+{
+    MV_U8   Stack[MAX_TAG_NUMBER];
+    MV_U8   Top;
+};
+
+MV_U8 Tag_GetOne(PTag_Stack pTagStack);
+MV_VOID Tag_ReleaseOne(PTag_Stack pTagStack, MV_U8 tag);
+MV_VOID Tag_Init(PTag_Stack pTagStack, MV_U8 size);
+MV_BOOLEAN Tag_IsEmpty(PTag_Stack pTagStack);
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_type.h linux-2.6.25/drivers/scsi/mv/common/com_type.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_type.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_type.h	2008-07-28 18:42:43.321189328 +0200
@@ -0,0 +1,284 @@
+#if !defined(COM_TYPE_H)
+#define COM_TYPE_H
+
+#include "mv_os.h"
+#include "com_list.h"
+#include "com_define.h"
+
+/*
+ * Data Structure
+ */
+#define MAX_CDB_SIZE		16
+
+struct _MV_SG_Table;
+typedef struct _MV_SG_Table MV_SG_Table, *PMV_SG_Table;
+
+struct _MV_SG_Entry;
+typedef struct _MV_SG_Entry MV_SG_Entry, *PMV_SG_Entry;
+
+struct _MV_Request;
+typedef struct _MV_Request MV_Request, *PMV_Request;
+
+#ifdef RAID_DRIVER
+typedef struct _MV_XOR_Request MV_XOR_Request, *PMV_XOR_Request;
+#endif
+
+#define REQ_STATUS_SUCCESS				0x0
+#define REQ_STATUS_NOT_READY			0x1
+#define REQ_STATUS_MEDIA_ERROR			0x2
+#define REQ_STATUS_BUSY					0x3
+#define REQ_STATUS_INVALID_REQUEST		0x4
+#define REQ_STATUS_INVALID_PARAMETER	0x5
+#define REQ_STATUS_NO_DEVICE			0x6
+#define REQ_STATUS_HAS_SENSE			0x7		/* Sense data structure is the SCSI "Fixed format sense datat" format. */
+#define REQ_STATUS_ERROR				0x8		/* Generic error. No more to say. */
+
+#define REQ_STATUS_ERROR_WITH_SENSE		0x10	/* Error with sense. Used to report error status to application */
+
+#define REQ_STATUS_PENDING				0x80	/* Request initiator must set the status to REQ_STATUS_PENDING. */
+#define REQ_STATUS_RETRY				0x81
+#define REQ_STATUS_REQUEST_SENSE		0x82
+
+/* SG Table and SG Entry */
+struct _MV_SG_Entry
+{
+	MV_U32 Base_Address;
+	MV_U32 Base_Address_High;
+	MV_U32 Reserved0;
+	MV_U32 Size;
+};
+
+struct _MV_SG_Table
+{
+	MV_U8 Max_Entry_Count;
+	MV_U8 Valid_Entry_Count;
+	MV_U8 Flag;
+	MV_U8 Reserved0;
+	MV_U32 Byte_Count;
+//	MV_SG_Entry Entry[MAX_SG_ENTRY];
+	PMV_SG_Entry Entry_Ptr;
+};
+
+/* 
+ * MV_Request is the general request type passed through different modules. 
+ * Must be 64 bit aligned.
+ */
+struct _MV_Request {
+	List_Head Queue_Pointer;
+
+#if 0
+	union {
+		struct {
+			MV_U8	Target_Id; 
+			MV_U8	Lun;
+		};
+		MV_U16 Device_Id;
+	};
+#else
+	MV_U16 Device_Id;
+#endif
+
+	MV_U16 Req_Flag;         /* Check the REQ_FLAG definition */
+	MV_U8 Scsi_Status;
+	MV_U8 Tag;               /* Request tag */
+	MV_U8 Req_Type;		 /* Check the REQ_TYPE definition */
+#ifdef _OS_WINDOWS
+	MV_U8 Splited_Count;                            
+#elif defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)/* _OS_WINDOWS */ 
+	MV_U8 eh_flag;	/* mark a req after it is re-inserted into
+			 * waiting_list due to error handling.
+			 */
+	MV_U8 Splited_Count;
+	MV_U8 Reserved0[7]; 
+#else
+	MV_U8 Reserved0[1]; 
+#endif /* _OS_WINDOWS */
+
+	MV_PVOID Cmd_Initiator;		/* Which module(extension pointer) creates this request. */
+
+	MV_U8 Sense_Info_Buffer_Length;
+	MV_U8 Reserved1[3];
+	MV_U32 Data_Transfer_Length; 
+
+	MV_U8 Cdb[MAX_CDB_SIZE]; 
+	MV_PVOID Data_Buffer; 
+	MV_PVOID Sense_Info_Buffer;
+
+	//TBD:PMV_SG_Table SG_Table;
+	MV_SG_Table SG_Table;
+
+	MV_PVOID Org_Req;	/* The original request. */ 
+	MV_PVOID Context;
+	MV_PVOID pRaid_Request;
+	MV_PVOID Reserved2;	/* fill in the blank */
+
+	MV_LBA LBA;
+	MV_U32 Sector_Count;
+	MV_U32 Cmd_Flag;
+#if defined(_OS_LINUX) && defined(SUPPORT_ERROR_HANDLING)
+	MV_DECLARE_TIMER(eh_timeout);
+#endif /* _OS_LINUX && SUPPORT_ERROR_HANDLING */
+#ifdef __AC_PROF__
+	MV_ULONG stamp;
+#endif /* __AC_PROF__ */
+	void (*Completion)(MV_PVOID,PMV_Request);/* call back function */
+
+};
+
+#define MV_REQUEST_SIZE		sizeof(MV_Request)
+/*
+ * Request flag is the flag for the MV_Request data structure.
+ */
+#define REQ_FLAG_LBA_VALID		MV_BIT(0)		/* LBA and Sector_Count variable are valid */
+#define REQ_FLAG_CMD_FLAG_VALID	MV_BIT(1)		/* Cmd_Flag is valid */
+#define REQ_FLAG_RETRY			MV_BIT(2)
+#define REQ_FLAG_INTERNAL_SG	MV_BIT(3)		/* SG Table is constructed by Scratch Buffer */
+//will be removed
+
+#define REQ_FLAG_USE_PHYSICAL_SG MV_BIT(4)
+#define REQ_FLAG_USE_LOGICAL_SG MV_BIT(5)
+
+/*
+ * Request Type is the type of MV_Request.
+ */
+#define REQ_TYPE_OS				0
+#define REQ_TYPE_SUBLD			1
+#define REQ_TYPE_SUBBGA			2
+
+/*
+ * Command flag is the flag for the CDB command itself 
+ */
+/* The first 16 bit can be determined by the initiator. */
+#define CMD_FLAG_NON_DATA		MV_BIT(0)		/* 1-non data; 0-data command */
+#define CMD_FLAG_DMA			MV_BIT(1)		/* 1-DMA; 0-PIO */
+#define CMD_FLAG_DATA_IN		MV_BIT(2)		/* 1-host read data; 0-host write data*/
+
+/* The last 16 bit only can be set by the target. Only core driver knows the device characteristic. */
+#define CMD_FLAG_NCQ			MV_BIT(16)
+#define CMD_FLAG_TCQ			MV_BIT(17)
+#define CMD_FLAG_48BIT			MV_BIT(18)
+#define CMD_FLAG_PACKET			MV_BIT(19)		/* ATAPI packet command */
+
+#ifdef RAID_DRIVER
+/* XOR request types */
+#define	XOR_REQUEST_WRITE		0
+#define	XOR_REQUEST_COMPARE		1
+#define	XOR_REQUEST_DMA			2
+
+/* XOR request status */
+#define XOR_STATUS_SUCCESS				0
+#define XOR_STATUS_INVALID_REQUEST		1
+#define XOR_STATUS_ERROR				2
+#define XOR_STATUS_INVALID_PARAMETER	3
+
+#ifdef SUPPORT_RAID6
+#define XOR_SOURCE_SG_COUNT				11	//TBD	support 8 disks RAID5
+#define XOR_TARGET_SG_COUNT				1	//TBD
+typedef MV_U8	XOR_COEF, *PXOR_COEF;		/* XOR Coefficient */
+#endif
+
+struct _MV_XOR_Request {
+	List_Head Queue_Pointer;
+
+#if 0
+	union {
+		struct {
+			MV_U8	Target_Id; 
+			MV_U8	Lun;
+		};
+		MV_U16 Device_Id;
+	};
+#else
+	MV_U16 Device_Id;
+#endif
+	MV_U8 Request_Type;						
+	MV_U8 Request_Status;
+
+	MV_U8 Source_SG_Table_Count;		/* how many items in the SG_Table_List */
+	MV_U8 Target_SG_Table_Count;
+	MV_U8 Reserved[2];
+	
+#ifndef SUPPORT_RAID6
+	PMV_SG_Table Source_SG_Table_List[2];
+	List_Head Target_SG_Table_List;
+#else
+	MV_SG_Table Source_SG_Table_List[XOR_SOURCE_SG_COUNT];
+	MV_SG_Table Target_SG_Table_List[XOR_TARGET_SG_COUNT];
+	XOR_COEF	Coef[XOR_TARGET_SG_COUNT][XOR_SOURCE_SG_COUNT];	//TBD: Use one task or several tasks.
+#endif
+
+	MV_U32 Error_Offset;	/* byte, not sector */
+	MV_PVOID Cmd_Initiator;				/* Which module(extension pointer) creates this request. */
+	MV_PVOID Context;
+	void (*Completion)(MV_PVOID, PMV_XOR_Request);	/* call back function */
+};
+#endif
+
+/* Resource type */
+enum Resource_Type
+{
+	RESOURCE_CACHED_MEMORY = 0,
+	RESOURCE_UNCACHED_MEMORY
+};
+
+/* Module event type */
+enum Module_Event
+{
+	EVENT_MODULE_ALL_STARTED = 0,
+#ifdef CACHE_MODULE_SUPPORT
+	EVENT_DEVICE_CACHE_MODE_CHANGED,
+#endif
+	EVENT_DEVICE_ARRIVAL,
+	EVENT_DEVICE_REMOVAL
+};
+
+/* 
+ * Don't change the order here. 
+ * Module_StartAll will start from big id to small id. 
+ * Make sure module_set setting matches the Module_Id 
+ * MODULE_HBA must be the first one. Refer to Module_AssignModuleExtension.
+ * And HBA_GetNextModuleSendFunction has an assumption that the next level has larger ID.
+ */
+enum Module_Id
+{
+	MODULE_HBA = 0,
+#ifdef CACHE_MODULE_SUPPORT
+	MODULE_CACHE,
+#endif
+
+#ifdef RAID_DRIVER
+	MODULE_RAID,
+#endif
+	MODULE_CORE,
+	MAX_MODULE_NUMBER
+};
+
+/*
+ * Exposed Functions
+ */
+
+/*
+ *
+ * Miscellaneous Definitions
+ *
+ */
+/* Rounding */
+
+/* Packed */
+
+#define MV_MAX(x,y)		(((x) > (y)) ? (x) : (y))
+#define MV_MIN(x,y)		(((x) < (y)) ? (x) : (y))
+
+#define MV_MAX_U64(x, y)	(((x.value) > (y.value)) ? (x) : (y))	// added by xxp
+#define MV_MIN_U64(x, y)	(((x.value) < (y.value)) ? (x) : (y))	// added by xxp
+
+#define MV_MAX_U8			0xFF
+#define MV_MAX_U16			0xFFFF
+#define MV_MAX_U32			0xFFFFFFFFL	/* L postfix is necessary for 16 bit compiler */
+
+#define OFFSET_OF(type,member)	((MV_U32)(MV_PTR_INTEGER)&(((type *)0)->member))
+
+#define ROUNDING(value, align)	( ((value)+(align)-1)/(align)*(align) )
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_u64.c linux-2.6.25/drivers/scsi/mv/common/com_u64.c
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_u64.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_u64.c	2008-07-28 18:42:43.321189328 +0200
@@ -0,0 +1,141 @@
+#include "mv_include.h"
+
+MV_U64 U64_ADD_U32(MV_U64 v64, MV_U32 v32)
+{
+#ifdef _64_BIT_COMPILER
+	v64.value += v32;
+#else
+	v64.low += v32;
+	v64.high = 0;	//TBD
+#endif
+	return v64;
+}
+
+MV_U64 U64_SUBTRACT_U32(MV_U64 v64, MV_U32 v32)
+{
+#ifdef _64_BIT_COMPILER
+	v64.value -= v32;
+#else
+	v64.low -= v32;
+	v64.high = 0;	//TBD
+#endif
+	return v64;
+}
+
+MV_U64 U64_MULTIPLY_U32(MV_U64 v64, MV_U32 v32)
+{
+#ifdef _64_BIT_COMPILER
+	v64.value *= v32;
+#else
+	v64.low *= v32;
+	v64.high = 0;	//TBD
+#endif
+	return v64;
+}
+
+MV_U32 U64_MOD_U32(MV_U64 v64, MV_U32 v32)
+{
+#ifdef _OS_LINUX
+	return do_div(v64.value, v32);
+#else
+	return v64.value % v32;
+#endif /* _OS_LINUX */
+}
+
+MV_U64 U64_DIVIDE_U32(MV_U64 v64, MV_U32 v32)
+{
+#ifdef _OS_LINUX
+	do_div(v64.value, v32);
+#else
+#ifdef _64_BIT_COMPILER
+	v64.value /= v32;
+#else
+	v64.high = 0;	//TBD
+	v64.low /= v32;
+#endif /* _64_BIT_COMPILER */
+
+#endif /* _OS_LINUX */
+	return v64;
+}
+
+MV_I32 U64_COMPARE_U32(MV_U64 v64, MV_U32 v32)
+{
+	if (v64.high > 0)
+		return 1;
+	if (v64.low > v32)
+		return 1;
+#ifdef _64_BIT_COMPILER
+	else if (v64.value == v32)
+#else
+	else if (v64.low == v32)
+#endif
+		return 0;
+	else
+		return -1;
+}
+
+MV_U64 U64_ADD_U64(MV_U64 v1, MV_U64 v2)
+{
+#ifdef _64_BIT_COMPILER
+	v1.value += v2.value;
+#else
+	v1.low += v2.low;
+	v1.high = 0;	//TBD
+	//v1.high += v2.high;
+#endif
+	return v1;
+}
+
+MV_U64 U64_SUBTRACT_U64(MV_U64 v1, MV_U64 v2)
+{
+#ifdef _64_BIT_COMPILER
+	v1.value -= v2.value;
+#else
+	v1.low -= v2.low;
+	v1.high = 0;	//TBD
+	//v1.high -= v2.high;
+#endif
+	return v1;
+}
+
+MV_I32 U64_COMPARE_U64(MV_U64 v1, MV_U64 v2)
+{
+#ifdef _64_BIT_COMPILER
+	if (v1.value > v2.value)
+		return 1;
+	else if (v1.value == v2.value)
+		return 0;
+	else
+		return -1;
+#else
+#if 0
+	if (v1.high > v2.high)
+		return 1;
+	else if((v1.low > v2.low) && (v1.high == v2.high))
+		return 1;
+		
+	else if ((v1.low == v2.low) && (v1.high == v2.high))
+		return 0;
+	else
+		return -1;
+#endif
+	//TBD
+	if (v1.value > v2.value)
+		return 1;
+	else if (v1.value == v2.value)
+		return 0;
+	else
+		return -1;
+
+#endif
+
+}
+
+#ifdef _OS_BIOS
+MV_U64 ZeroU64(MV_U64 v1)
+{
+	v1.low=0;v1.high=0;
+	return	v1;
+}
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_u64.h linux-2.6.25/drivers/scsi/mv/common/com_u64.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_u64.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_u64.h	2008-07-28 18:42:43.321189328 +0200
@@ -0,0 +1,22 @@
+#ifndef _U64_H__
+#define _U64_H__
+
+MV_U64 U64_ADD_U32(MV_U64 v64, MV_U32 v32);
+MV_U64 U64_SUBTRACT_U32(MV_U64 v64, MV_U32 v32);
+MV_U64 U64_MULTIPLY_U32(MV_U64 v64, MV_U32 v32);
+MV_U64 U64_DIVIDE_U32(MV_U64 v64, MV_U32 v32);
+MV_I32 U64_COMPARE_U32(MV_U64 v64, MV_U32 v32);
+MV_U32 U64_MOD_U32(MV_U64 v64, MV_U32 v32);
+
+MV_U64 U64_ADD_U64(MV_U64 v1, MV_U64 v2);
+MV_U64 U64_SUBTRACT_U64(MV_U64 v1, MV_U64 v2);
+MV_I32 U64_COMPARE_U64(MV_U64 v1, MV_U64 v2);
+
+#define U64_SET_VALUE(v64, v32)	do { v64.value = v32; } while(0)
+#define U64_SET_MAX_VALUE(v64)	do { v64.low = v64.high = 0xFFFFFFFF; } while(0);
+#ifdef _OS_BIOS
+MV_U64 ZeroU64(MV_U64 v1);
+#endif
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_util.c linux-2.6.25/drivers/scsi/mv/common/com_util.c
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_util.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_util.c	2008-07-28 18:42:43.322189111 +0200
@@ -0,0 +1,277 @@
+#include "mv_include.h"
+
+void MV_ZeroMvRequest(PMV_Request pReq)
+{
+	PMV_SG_Entry pSGEntry;
+	MV_U8 maxEntryCount;
+
+	MV_DASSERT(pReq);
+	pSGEntry = pReq->SG_Table.Entry_Ptr;
+	maxEntryCount = pReq->SG_Table.Max_Entry_Count;
+	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);
+	pReq->SG_Table.Entry_Ptr = pSGEntry;
+	pReq->SG_Table.Max_Entry_Count = maxEntryCount;
+}
+
+void MV_CopySGTable(PMV_SG_Table pTargetSGTable, PMV_SG_Table pSourceSGTable)
+{
+	pTargetSGTable->Valid_Entry_Count = pSourceSGTable->Valid_Entry_Count;
+	pTargetSGTable->Flag = pSourceSGTable->Flag;
+	pTargetSGTable->Byte_Count = pSourceSGTable->Byte_Count;
+	MV_CopyMemory(pTargetSGTable->Entry_Ptr, pSourceSGTable->Entry_Ptr, 
+					sizeof(MV_SG_Entry)*pTargetSGTable->Valid_Entry_Count);
+
+}
+
+#ifdef _OS_BIOS
+MV_U64 CPU_TO_BIG_ENDIAN_64(MV_U64 x)
+
+{
+	MV_U64 x1;
+	ZeroU64(x1);
+	x1.low=CPU_TO_BIG_ENDIAN_32(x.high);
+	x1.high=CPU_TO_BIG_ENDIAN_32(x.low);
+	return x1;
+}
+#endif	/* #ifdef _OS_BIOS */
+
+MV_BOOLEAN MV_Equals(
+	IN MV_PU8		des,
+	IN MV_PU8		src,
+	IN MV_U8		len							 
+)
+{
+	MV_U8 i;
+
+	for (i=0; i<len; i++){
+		if (*des != *src){
+			return MV_FALSE;
+		}
+		des++;
+		src++;
+	}
+	return MV_TRUE;
+}
+/*
+ * SG Table operation
+ */
+void SGTable_Init(
+	OUT PMV_SG_Table pSGTable,
+	IN MV_U8 flag
+	)
+{
+/*	pSGTable->Max_Entry_Count = MAX_SG_ENTRY; */ /* it is set already when module initialized */
+	pSGTable->Valid_Entry_Count = 0;
+	pSGTable->Flag = flag;
+	pSGTable->Byte_Count = 0;
+}
+
+void SGTable_Append(
+	OUT PMV_SG_Table pSGTable,
+	MV_U32 address,
+	MV_U32 addressHigh,
+	MV_U32 size
+	)
+{
+//	PMV_SG_Entry pSGEntry = &pSGTable->Entry[pSGTable->Valid_Entry_Count];
+	PMV_SG_Entry pSGEntry = &pSGTable->Entry_Ptr[pSGTable->Valid_Entry_Count];
+
+	MV_ASSERT( pSGTable->Valid_Entry_Count+1<=pSGTable->Max_Entry_Count );
+
+	/* 
+	 * Workaround hardware issue:
+	 * If the transfer size is odd, some request cannot be finished.
+	 * Hopefully the workaround won't damage the system.
+	 */
+#ifdef PRD_SIZE_WORD_ALIGN
+	if ( size%2 ) size++;
+#endif
+
+	pSGTable->Valid_Entry_Count += 1;
+	pSGTable->Byte_Count += size;
+
+	pSGEntry->Base_Address = address;
+	pSGEntry->Base_Address_High = addressHigh;
+	pSGEntry->Size = size;
+}
+
+MV_BOOLEAN SGTable_Available(
+	IN PMV_SG_Table pSGTable
+	)
+{
+	return (pSGTable->Valid_Entry_Count < pSGTable->Max_Entry_Count);
+}
+
+#ifndef _OS_BIOS
+void MV_DumpRequest(PMV_Request pReq, MV_BOOLEAN detail)
+{
+	MV_PRINT("MV_Request: Cdb[%2x,%2x,%2x,%2x, %2x,%2x,%2x,%2x, %2x,%2x,%2x,%2x].\n",
+		pReq->Cdb[0],
+		pReq->Cdb[1],
+		pReq->Cdb[2],
+		pReq->Cdb[3],
+		pReq->Cdb[4],
+		pReq->Cdb[5],
+		pReq->Cdb[6],
+		pReq->Cdb[7],
+		pReq->Cdb[8],
+		pReq->Cdb[9],
+		pReq->Cdb[10],
+		pReq->Cdb[11]
+		);
+
+	if ( detail )
+	{
+		MV_PRINT("Device_Id=%d\n", pReq->Device_Id);
+		MV_PRINT("Data_Transfer_Length=%d\n", pReq->Data_Transfer_Length);
+		MV_PRINT("Sense_Info_Buffer_Length=%d\n", pReq->Sense_Info_Buffer_Length);
+	}
+}
+
+#ifdef SUPPORT_RAID6
+void MV_DumpXORRequest(PMV_XOR_Request pXORReq, MV_BOOLEAN detail)
+{
+	MV_U32 i;
+
+	MV_PRINT("MV_XOR_Request: Type=0x%x, Source count=%d, Target count=%d\n",
+		pXORReq->Request_Type,
+		pXORReq->Source_SG_Table_Count,
+		pXORReq->Target_SG_Table_Count
+		);
+
+	if ( detail )
+	{
+		MV_PRINT("Source SG table...\n");
+		for ( i=0; i<pXORReq->Source_SG_Table_Count; i++ )
+			MV_DumpSGTable(&pXORReq->Source_SG_Table_List[i]);
+
+		MV_PRINT("Target SG table...\n");
+		for ( i=0; i<pXORReq->Target_SG_Table_Count; i++ )
+			MV_DumpSGTable(&pXORReq->Target_SG_Table_List[i]);
+	}
+}
+#endif /* SUPPORT_RAID6 */
+
+void MV_DumpSGTable(PMV_SG_Table pSGTable)
+{
+	PMV_SG_Entry pSGEntry;
+	MV_U32 i;
+	MV_PRINT("SG Table: size(0x%x)\n", pSGTable->Byte_Count);
+	for ( i=0; i<pSGTable->Valid_Entry_Count; i++ )
+	{
+//		pSGEntry = &pSGTable->Entry[i];
+		pSGEntry = &pSGTable->Entry_Ptr[i];
+		MV_PRINT("%d: addr(0x%x-0x%x), size(0x%x).\n", 
+			i, pSGEntry->Base_Address_High, pSGEntry->Base_Address, pSGEntry->Size);
+	}
+}
+
+const char* MV_DumpSenseKey(MV_U8 sense)
+{
+	switch ( sense )
+	{
+		case SCSI_SK_NO_SENSE:
+			return "SCSI_SK_NO_SENSE";
+		case SCSI_SK_RECOVERED_ERROR:
+			return "SCSI_SK_RECOVERED_ERROR";
+		case SCSI_SK_NOT_READY:
+			return "SCSI_SK_NOT_READY";
+		case SCSI_SK_MEDIUM_ERROR:
+			return "SCSI_SK_MEDIUM_ERROR";
+		case SCSI_SK_HARDWARE_ERROR:
+			return "SCSI_SK_HARDWARE_ERROR";
+		case SCSI_SK_ILLEGAL_REQUEST:
+			return "SCSI_SK_ILLEGAL_REQUEST";
+		case SCSI_SK_UNIT_ATTENTION:
+			return "SCSI_SK_UNIT_ATTENTION";
+		case SCSI_SK_DATA_PROTECT:
+			return "SCSI_SK_DATA_PROTECT";
+		case SCSI_SK_BLANK_CHECK:
+			return "SCSI_SK_BLANK_CHECK";
+		case SCSI_SK_VENDOR_SPECIFIC:
+			return "SCSI_SK_VENDOR_SPECIFIC";
+		case SCSI_SK_COPY_ABORTED:
+			return "SCSI_SK_COPY_ABORTED";
+		case SCSI_SK_ABORTED_COMMAND:
+			return "SCSI_SK_ABORTED_COMMAND";
+		case SCSI_SK_VOLUME_OVERFLOW:
+			return "SCSI_SK_VOLUME_OVERFLOW";
+		case SCSI_SK_MISCOMPARE:
+			return "SCSI_SK_MISCOMPARE";
+		default:
+			MV_DPRINT(("Unknown sense key 0x%x.\n", sense));
+			return "Unknown sense key";
+	}
+}
+#endif	/* #ifndef _OS_BIOS */
+
+static MV_U32  BASEATTR crc_tab[] = {
+        0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+        0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+        0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+        0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+        0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+        0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+        0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+        0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+        0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+        0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+        0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+        0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+        0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+        0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+        0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+        0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+        0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+        0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+        0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+        0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+        0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+        0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+        0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+        0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+        0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+        0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+        0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+        0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+        0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+        0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+        0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+        0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+        0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+        0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+        0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+        0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+        0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+        0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+        0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+        0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+        0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+        0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+        0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+        0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+        0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+        0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+        0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+        0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+        0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+        0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+        0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+        0x2d02ef8dL
+};
+
+/* Calculate CRC and generate PD_Reference number */
+MV_U32 MV_CRC(
+	IN	MV_PU8		pData, 
+	IN	MV_U16		len
+)
+{
+    MV_U16 i;
+    MV_U32 crc = MV_MAX_U32;
+
+    for (i = 0;  i < len;  i ++) {
+		crc = crc_tab[(crc ^ pData[i]) & 0xff] ^ (crc >> 8);
+    }
+    return CPU_TO_BIG_ENDIAN_32(crc);
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/common/com_util.h linux-2.6.25/drivers/scsi/mv/common/com_util.h
--- linux-2.6.25.orig/drivers/scsi/mv/common/com_util.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/common/com_util.h	2008-07-28 18:42:43.322189111 +0200
@@ -0,0 +1,69 @@
+#if !defined(COM_UTIL_H)
+#define COM_UTIL_H
+
+#include "com_type.h"
+
+#define MV_ZeroMemory(buffer, byte_count)				memset(buffer, 0, byte_count)
+#define MV_FillMemory(buffer, byte_count, pattern)		memset(buffer, pattern, byte_count)
+#define MV_CopyMemory(dest, source, byte_count)			memcpy(dest, source, byte_count)
+
+void MV_ZeroMvRequest(PMV_Request pReq);
+void MV_CopySGTable(PMV_SG_Table pTargetSGTable, PMV_SG_Table pSourceSGTable);
+
+MV_BOOLEAN MV_Equals(MV_PU8	des, MV_PU8	src, MV_U8 len);
+/* The following definition is for Intel big endian cpu. */
+#define CPU_TO_BIG_ENDIAN_32(x)		\
+	( ((MV_U32)((MV_U8)(x)))<<24 | ((MV_U32)((MV_U8)((x)>>8)))<<16 | ((MV_U32)((MV_U8)((x)>>16)))<<8 | ((MV_U32)((MV_U8)((x)>>24))) )
+
+//#define CPU_TO_BIG_ENDIAN_64(x)
+//#define CPU_TO_BIG_ENDIAN_16(x)
+
+/* -------- Added by xxp --------*/
+#ifndef _OS_BIOS
+#define CPU_TO_BIG_ENDIAN_64(x)		\
+	( ((_MV_U64)(CPU_TO_BIG_ENDIAN_32(x.low))) << 32 |	\
+	  CPU_TO_BIG_ENDIAN_32(x.high) )
+#else
+MV_U64 CPU_TO_BIG_ENDIAN_64(MV_U64 x);
+
+#endif
+
+#define CPU_TO_BIG_ENDIAN_16(x)		\
+	( ((MV_U16)((MV_U8)(x))) << 8 | (MV_U16)((MV_U8)((x)>>8)) )
+/* -------- End -------- */
+
+void SGTable_Init(
+	OUT PMV_SG_Table pSGTable,
+	IN MV_U8 flag
+	);
+
+void SGTable_Append(
+	OUT PMV_SG_Table pSGTable,
+	MV_U32 address,
+	MV_U32 addressHigh,
+	MV_U32 size
+	);
+
+MV_BOOLEAN SGTable_Available(
+	IN PMV_SG_Table pSGTable
+	);
+
+void MV_DumpRequest(PMV_Request pReq, MV_BOOLEAN detail);
+#ifdef SUPPORT_RAID6
+void MV_DumpXORRequest(PMV_XOR_Request pXORReq, MV_BOOLEAN detail);
+#endif /* SUPPORT_RAID6 */
+void MV_DumpSGTable(PMV_SG_Table pSGTable);
+const char* MV_DumpSenseKey(MV_U8 sense);
+
+MV_U32 MV_CRC(
+	IN	MV_PU8		pData, 
+	IN	MV_U16		len
+);
+
+#define MV_MOD_ADD(value, mod)	do {	\
+	(value)++;							\
+	if ((value)>=(mod)) (value)=0;		\
+	} while (0);
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/consolid.c linux-2.6.25/drivers/scsi/mv/core/consolid.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/consolid.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/consolid.c	2008-07-28 18:42:43.322189111 +0200
@@ -0,0 +1,443 @@
+#include "mv_include.h"
+
+#ifdef SUPPORT_CONSOLIDATE
+
+#include "consolid.h"
+
+#ifdef MV_DEBUG
+#define TUNE_CONSOLIDATE
+#endif
+
+#ifdef TUNE_CONSOLIDATE
+#define CONSOLIDATE_STATISTICS_COUNT	8
+static MV_U32 gConsolidateStatistics[CONSOLIDATE_STATISTICS_COUNT];
+typedef enum{
+	CONSOLIDATE_NOT_READ_WRITE,
+	CONSOLIDATE_REQUEST_TOO_BIG,
+	CONSOLIDATE_READ_WRITE_DIFFERENT,
+	CONSOLIDATE_NO_RUNNING_REQUEST,
+	CONSOLIDATE_LESS_THAN_SEQUENTIAL_THRESHOLD,
+	CONSOLIDATE_NO_RESOURCE,
+	CONSOLIDATE_GOT_PUSHED,
+	CONSOLIDATA_RESERVED0
+}Consolidate_Statistics_Enum;
+
+void UpdateConsolidateStatistics(Consolidate_Statistics_Enum catogory)
+{
+	MV_U8 i;
+	if ( gConsolidateStatistics[catogory]==0xFFFFFFFF )
+	{
+		for ( i=0; i<CONSOLIDATE_STATISTICS_COUNT; i++ )
+			MV_DPRINT(("Consolidate statistics[%d]=0x%x.\n", i, 
+			gConsolidateStatistics[i]));
+		MV_ZeroMemory(gConsolidateStatistics, sizeof(MV_U32)*CONSOLIDATE_STATISTICS_COUNT);
+	}
+
+	gConsolidateStatistics[catogory]++;
+}
+#else
+#define UpdateConsolidateStatistics(x)
+#endif
+
+/* 
+ * Instruction: How to plug-in this command consolidate sub module to your own module.
+ * 1. Include one .h file which supplies some helper funtions like CONS_GET_EXTENSION
+ * 2. Allocate memory resouce for Consolidate_Extension and Consolidate_Device
+ * 3. Initialize command consolidate module. 
+ * 	Call Consolid_InitializeExtension to initialize Consolidate_Extension
+ *	Call Consolid_InitializeDevice for each Consolidate_Device
+ * 4. When you request comes call Consolid_ModuleSendRequest
+ * 5. At proper time, please push command consolidate module.
+ *	Sometimes command consolidate is accumulating requests and hasn't fired this internal request,
+ *	if there is nothing running now, just push this internal request out.
+ */
+#include "core_cons.h"
+
+PMV_Request Consolid_GetInternalRequest(MV_PVOID This);
+void Consolid_InitialInternalRequest(MV_PVOID This, PMV_Request, MV_BOOLEAN);
+
+void Consolid_ConsolidateRequest(PConsolidate_Extension, PMV_Request, PMV_Request);
+void Consolid_CloseRequest(PConsolidate_Extension, PConsolidate_Device, PMV_Request);
+
+void Consolid_RequestCallBack(MV_PVOID This, PMV_Request pReq);
+
+/*
+ * Consolidate sub-module has got a request.
+ * Two parameters:
+ * This: is the pointer of the command initiator extention pointer.
+ * pReq: request
+ * Will fire: 
+ *		a. one internal request
+ *		b. this external request and maybe one holding internal request if exists.
+ *		c. NULL if consolidate module holds this request.
+ */
+void Consolid_ModuleSendRequest(MV_PVOID This, PMV_Request pReq)
+{
+	PConsolidate_Extension pCons = CONS_GET_EXTENSION(This);
+	MV_U16 deviceId = pReq->Device_Id;
+	PConsolidate_Device pConsDevice = NULL;
+	PMV_Request pInternal = NULL;
+	MV_LBA startLBA;
+	MV_U32 sectorCount;
+#ifdef _OS_BIOS
+	ZeroU64(startLBA);
+#endif
+
+	if ( deviceId>=MAX_DEVICE_NUMBER )
+	{
+		goto return_original_req;
+	}
+	pConsDevice = CONS_GET_DEVICE(This, deviceId);
+
+	/* 
+	 * We only handle CDB 10 read/write. 
+	 * Otherwise, change the following code which gets the LBA and Sector Count from the CDB. 
+	 */
+	if ( (pReq->Cdb[0]!=SCSI_CMD_READ_10)&&(pReq->Cdb[0]!=SCSI_CMD_WRITE_10) )
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_NOT_READ_WRITE);
+		goto return_original_req;
+	}
+
+	/* It's read/write request. But is it too big for command consolidate */
+	if ( pReq->Data_Transfer_Length>CONS_MAX_EXTERNAL_REQUEST_SIZE )
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_REQUEST_TOO_BIG);
+		goto return_original_req;
+	}
+
+	/* Check whether they are all read requests or write requests. */
+	if ( 
+		( (pReq->Cdb[0]==SCSI_CMD_READ_10)&&(!pConsDevice->Is_Read) )
+		||
+		( (pReq->Cdb[0]==SCSI_CMD_WRITE_10)&&(pConsDevice->Is_Read) )
+		)
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_READ_WRITE_DIFFERENT);
+		pConsDevice->Is_Read = (pReq->Cdb[0]==SCSI_CMD_READ_10)?1:0;
+		goto return_original_req;
+	}
+
+	/* Update the consolidate device statistic including last LBA and sequential counter. */
+	U64_SET_VALUE(startLBA, SCSI_CDB10_GET_LBA(pReq->Cdb));
+	sectorCount = SCSI_CDB10_GET_SECTOR(pReq->Cdb);
+	/* Check whether it's a sequential request. */
+	if ( U64_COMPARE_U64(startLBA, pConsDevice->Last_LBA) )
+		pConsDevice->Sequential = 0; 
+	else
+		pConsDevice->Sequential++;	/* When equals, return 0. */
+
+	/* Last_LBA is actually the next expect sequential LBA. */
+	pConsDevice->Last_LBA = U64_ADD_U32(startLBA, sectorCount);
+	if ( pConsDevice->Sequential>CONS_SEQUENTIAL_MAX )	/* To avoid overflow */
+		pConsDevice->Sequential=CONS_SEQUENTIAL_THRESHOLD;
+
+	/* Is there any requests running on this device? If no, by pass. */
+	if ( !CONS_DEVICE_IS_BUSY(This, deviceId) )
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_NO_RUNNING_REQUEST);
+		goto return_original_req;
+	}
+
+	/* Do we reach the sequential counter threshold? */
+	if ( pConsDevice->Sequential<CONS_SEQUENTIAL_THRESHOLD )
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_LESS_THAN_SEQUENTIAL_THRESHOLD);
+		goto return_original_req;
+	}
+
+	pInternal = pConsDevice->Holding_Request;
+
+	/* Don't accumulate this request too big. */
+	if ( pInternal && 
+		( (pInternal->Data_Transfer_Length+pReq->Data_Transfer_Length>CONS_MAX_INTERNAL_REQUEST_SIZE)
+		  ||
+		  (pInternal->SG_Table.Valid_Entry_Count+pReq->SG_Table.Valid_Entry_Count>pInternal->SG_Table.Max_Entry_Count)
+		)
+	   )
+	{
+		Consolid_CloseRequest(pCons, pConsDevice, pInternal);
+		CONS_SEND_REQUEST(This, pInternal);
+		pInternal = NULL;	/* After Consolid_CloseRequest, pConsDevice->Holding_Request==NULL */
+	}
+
+	/* Get one internal request if we don't have. */
+	if ( pConsDevice->Holding_Request==NULL )
+	{
+		pConsDevice->Holding_Request = Consolid_GetInternalRequest(This);
+	}
+	pInternal = pConsDevice->Holding_Request;
+
+	/* We are out of resource. */
+	if ( pInternal==NULL )
+	{
+		UpdateConsolidateStatistics(CONSOLIDATE_NO_RESOURCE);
+		goto return_original_req;
+	}
+
+	/* Now we should be able to do consolidate requests now. */
+	Consolid_ConsolidateRequest(pCons, pInternal, pReq);
+
+	/* Is this internal request bigger enough to fire? */
+	if ( pInternal->Data_Transfer_Length>=CONS_MIN_INTERNAL_REQUEST_SIZE )
+	{
+		Consolid_CloseRequest(pCons, pConsDevice, pInternal);
+		CONS_SEND_REQUEST(This, pInternal);
+		return;	/* Send this internal request. */
+	}
+	else
+	{
+		return;	/* Hold this request. */
+	}
+
+return_original_req:
+	/* 
+	 * To keep the command order, 
+	 * if we cannot do the consolidate for pReq but we hold some internal request,
+	 * run the internal request and then run the new pReq.
+	 */
+	if ( pConsDevice && (pConsDevice->Holding_Request) )
+	{
+		pInternal = pConsDevice->Holding_Request;
+		Consolid_CloseRequest(pCons, pConsDevice, pInternal);
+		/* After Consolid_CloseRequest, pConsDevice->Holding_Request is NULL. */
+		CONS_SEND_REQUEST(This, pInternal);
+	}
+	CONS_SEND_REQUEST(This, pReq);
+	return;
+}
+
+PMV_Request Consolid_GetInternalRequest(MV_PVOID This)
+{
+	PConsolidate_Extension pCons = CONS_GET_EXTENSION(This);
+	PMV_Request pReq = NULL;
+	if ( !List_Empty(&pCons->Free_Queue) )
+		pReq = List_GetFirstEntry(&pCons->Free_Queue, MV_Request, Queue_Pointer);
+
+	/* Let's intialize this request */
+	if ( pReq )
+		Consolid_InitialInternalRequest(This, pReq, MV_FALSE);
+
+	return pReq;
+}
+
+void Consolid_ReleaseInternalRequest(PConsolidate_Extension pCons, PMV_Request pReq)
+{
+	List_AddTail(&pReq->Queue_Pointer, &pCons->Free_Queue);
+}
+
+void Consolid_ConsolidateRequest(
+	IN PConsolidate_Extension pCons,
+	IN OUT PMV_Request pInternal, 
+	IN PMV_Request pExternal
+	)
+{
+	MV_U8 i;
+	PMV_Request pAttachedReq=NULL;
+
+	/* So far we only handle SCSI Read 10 and SCSI Write 10 */
+	MV_DASSERT( (pExternal->Cdb[0]==SCSI_CMD_READ_10) || (pExternal->Cdb[0]==SCSI_CMD_WRITE_10) );
+	pAttachedReq = pInternal->Org_Req;
+
+	if ( pInternal->Data_Transfer_Length==0 )
+	{
+		/* One external request is attached to that yet. */
+		pInternal->Device_Id = pExternal->Device_Id;
+		MV_DASSERT( pAttachedReq==NULL );
+
+		pInternal->Org_Req = pExternal;
+		MV_LIST_HEAD_INIT( &pExternal->Queue_Pointer );
+
+		pInternal->Cdb[0] = pExternal->Cdb[0];	/* Command type */
+		pInternal->Cdb[2] = pExternal->Cdb[2];	/* Start LBA */
+		pInternal->Cdb[3] = pExternal->Cdb[3];
+		pInternal->Cdb[4] = pExternal->Cdb[4];
+		pInternal->Cdb[5] = pExternal->Cdb[5];
+
+		if ( pExternal->Cdb[0]==SCSI_CMD_READ_10 )
+		{
+			pInternal->Cmd_Flag = CMD_FLAG_DMA | CMD_FLAG_DATA_IN;
+		}
+		else
+		{
+			pInternal->Cmd_Flag = CMD_FLAG_DMA;
+		}
+	}
+	else
+	{
+		MV_DASSERT( pInternal->Device_Id==pExternal->Device_Id );
+		MV_DASSERT( pAttachedReq!=NULL );
+		List_AddTail(&pExternal->Queue_Pointer, &pAttachedReq->Queue_Pointer);
+	}
+
+	/* Don't set the sector count every time. Just before send, set the count. */
+	pInternal->Data_Transfer_Length += pExternal->Data_Transfer_Length;
+
+	for ( i=0; i<pExternal->SG_Table.Valid_Entry_Count; i++ )
+	{
+		SGTable_Append(&pInternal->SG_Table, 
+			pExternal->SG_Table.Entry_Ptr[i].Base_Address,
+			pExternal->SG_Table.Entry_Ptr[i].Base_Address_High,
+			pExternal->SG_Table.Entry_Ptr[i].Size);
+	}
+}
+
+void Consolid_CloseRequest(
+	IN PConsolidate_Extension pCons,
+	IN PConsolidate_Device pConsDevice,
+	IN OUT PMV_Request pInternal
+	)
+{
+	/* 
+	 * This internal request is ready for handling now. 
+	 * Do whatever we need do before we send this request.
+	 */
+	MV_U32 sectorCount = pInternal->Data_Transfer_Length/512;	//TBD
+	MV_DASSERT(  pInternal->Data_Transfer_Length%512==0 );
+
+	SCSI_CDB10_SET_SECTOR(pInternal->Cdb, sectorCount);
+	pConsDevice->Holding_Request = NULL;
+}
+
+void Consolid_RequestCallBack(MV_PVOID This, PMV_Request pReq)
+{
+	PConsolidate_Extension pCons = CONS_GET_EXTENSION(This);
+	PConsolidate_Device pConsDevice = CONS_GET_DEVICE(This, pReq->Device_Id);
+	PMV_Request pExternal;
+	PMV_Request pAttachedReq = pReq->Org_Req;
+
+	if ( pReq->Scsi_Status==REQ_STATUS_SUCCESS )
+	{
+		/* Extract all the external requests. Update status and return. */
+		while ( !List_Empty(&pAttachedReq->Queue_Pointer) )
+		{
+			pExternal = List_GetFirstEntry(&pAttachedReq->Queue_Pointer, MV_Request, Queue_Pointer);
+			pExternal->Scsi_Status = REQ_STATUS_SUCCESS;
+			pExternal->Completion(pExternal->Cmd_Initiator, pExternal);
+		}
+		pAttachedReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		pAttachedReq->Completion(pAttachedReq->Cmd_Initiator, pAttachedReq);
+	}
+	else
+	{
+		/* Make sure we won't do consolidate again for these requests. */		
+		pConsDevice->Sequential = 0;
+		MV_DPRINT(("Request error in consolidate.\n"));
+
+		/* If consolidate request has error, Re-send these original requests.
+		 * They go to the hardware directly. Bypass the consolidate module. */
+		while ( !List_Empty(&pAttachedReq->Queue_Pointer) )
+		{
+			pExternal = List_GetFirstEntry(&pAttachedReq->Queue_Pointer, MV_Request, Queue_Pointer);
+			CONS_SEND_REQUEST(This, pExternal);
+		}
+		CONS_SEND_REQUEST(This, pAttachedReq);
+	}
+
+	/* Release this request back to the pool. */
+	Consolid_ReleaseInternalRequest(pCons, pReq);
+}
+
+/* Initialize the command consolidate internal request. */
+void Consolid_InitialInternalRequest(
+	IN MV_PVOID This,
+	IN OUT PMV_Request pInternal,
+	IN MV_BOOLEAN firstTime
+	)
+{
+	/*
+	 * Link pointer: 
+	 * When request is free, Queue_Pointer is linked together in the request pool queue.
+	 * When request is in use, Org_Req is pointer to the first external request.
+	 * This first external request uses Queue_Pointer to link other external requests.
+	 * We cannot use internal request's Queue_Pointer to link external requests.
+	 * Because after sendting to core driver, this pointer will be destroyed.
+	 */
+	pInternal->Org_Req = NULL;				/* Use Queue_Pointer as the linker */
+	pInternal->Req_Flag = 0;
+	pInternal->Scsi_Status = REQ_STATUS_PENDING;
+	pInternal->Data_Transfer_Length = 0;
+	pInternal->Cmd_Flag = 0;
+	SGTable_Init(&pInternal->SG_Table, 0);
+
+	/* 
+	 * Some variables only need initialization once. 
+	 * It won't change no matter during the life time. 
+	 */
+	if ( firstTime )
+	{
+		pInternal->Device_Id = 0;
+		pInternal->Tag = 0;						/* Haven't used. */
+		pInternal->Cmd_Initiator = This;
+		pInternal->Sense_Info_Buffer_Length = 0;
+		pInternal->Sense_Info_Buffer = NULL;
+		pInternal->Data_Buffer = NULL;			/* After consolidate, virtual address is not valid. */
+		pInternal->Context = NULL;
+		pInternal->Completion = Consolid_RequestCallBack;
+		MV_LIST_HEAD_INIT(&pInternal->Queue_Pointer);
+		MV_ZeroMemory(pInternal->Cdb, MAX_CDB_SIZE);
+		U64_SET_VALUE(pInternal->LBA, 0);
+		pInternal->Sector_Count = 0;
+	}
+}
+
+/* Initialize the Consolidate_Extension */
+void Consolid_InitializeExtension(MV_PVOID This)
+{
+	PConsolidate_Extension pCons = CONS_GET_EXTENSION(This);
+	//PConsolidate_Device pConsDevice;
+	PMV_Request pReq;
+	MV_U32 i;
+
+	MV_LIST_HEAD_INIT(&pCons->Free_Queue);
+	for ( i=0; i<CONS_MAX_INTERNAL_REQUEST_COUNT; i++ )
+	{
+		pReq = &pCons->Requests[i];
+
+		Consolid_InitialInternalRequest(This, pReq, MV_TRUE);
+		List_AddTail(&pReq->Queue_Pointer, &pCons->Free_Queue);
+	}
+
+	//MV_ASSERT( CONS_SEQUENTIAL_THRESHOLD>MAX_REQUEST_NUMBER );//TBD
+}
+
+/* 
+ * Initialize the Consolidate_Device.
+ * I don't initialize all the devices at once.
+ * Caller should call device one by one.
+ * So in this way, consolidate module doesn't all the Consolidate_Device are together
+ * or they are embedded in some caller data structure.
+ * One more advantage is that caller itself can map the Device_Id to related Consolidate_Device buffer.
+ * We don't need contiguous Device_Id.
+ */
+void Consolid_InitializeDevice(MV_PVOID This, MV_U16 Device_Id)
+{
+	PConsolidate_Device pConsDevice = CONS_GET_DEVICE(This, Device_Id);
+
+	MV_ZeroMemory(pConsDevice, sizeof(Consolidate_Device));
+}
+
+
+/*
+ * Caller pushes us to fire the holding request if any.
+ */
+void
+Consolid_PushFireRequest(
+	MV_PVOID This,
+	MV_U16 Device_Id
+	)
+{
+	PConsolidate_Extension pCons = CONS_GET_EXTENSION(This);
+	PConsolidate_Device pConsDevice = CONS_GET_DEVICE(This, Device_Id);
+	PMV_Request pInternal = pConsDevice->Holding_Request;
+
+	if ( pInternal==NULL ) return;
+
+	UpdateConsolidateStatistics(CONSOLIDATE_GOT_PUSHED);
+
+	Consolid_CloseRequest(pCons, pConsDevice, pInternal);
+	/* After Consolid_CloseRequest pConsDevice->Holding_Request is NULL. */
+	CONS_SEND_REQUEST(This, pInternal);
+}
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/consolid.h linux-2.6.25/drivers/scsi/mv/core/consolid.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/consolid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/consolid.h	2008-07-28 18:42:43.323188803 +0200
@@ -0,0 +1,58 @@
+#ifndef _CONSOLIDATE_H
+#define _CONSOLIDATE_H
+
+/*
+ * Here is the definition for the command consolidate sub module
+ * This is only changed when we modify the consolidate algorithm.
+ */
+#define CONS_MAX_INTERNAL_REQUEST_COUNT	32
+
+#define CONS_MAX_EXTERNAL_REQUEST_SIZE	(1024*64)
+#define CONS_SEQUENTIAL_MAX				0x7FFF		/* Avoid overflow. It's determined by Sequential variable size */
+#define CONS_SEQUENTIAL_THRESHOLD		64			/* Must bigger than OS outstanding request. Refer to Consolid_RequestCallBack */
+
+#define CONS_MAX_INTERNAL_REQUEST_SIZE	(1024*128)	/* The maximum request size hardware can handle. */
+#define CONS_MIN_INTERNAL_REQUEST_SIZE	(1024*128)	/* We'll accumulate the request to this size and then fire. */
+
+
+typedef struct _Consolidate_Extension
+{
+	MV_Request	Requests[CONS_MAX_INTERNAL_REQUEST_COUNT];
+	List_Head	Free_Queue;
+}Consolidate_Extension, *PConsolidate_Extension;
+
+typedef struct _Consolidate_Device
+{
+	MV_LBA		Last_LBA;				/* last LBA*/
+	PMV_Request Holding_Request;		/* Internal request which already consolidate some external requests. */
+	MV_U16		Sequential;				/* sequential counter */
+	MV_BOOLEAN	Is_Read;				/* The last request is read or write. */
+	MV_U8		Reserved0;
+	MV_U16		Reserved1[2];			
+}Consolidate_Device, *PConsolidate_Device;
+
+void
+Consolid_ModuleSendRequest(
+	MV_PVOID This,
+	PMV_Request pReq
+	);
+
+void 
+Consolid_InitializeExtension(
+	MV_PVOID This
+	);
+
+void
+Consolid_InitializeDevice(
+	MV_PVOID This,
+	MV_U16 Device_Id
+	);
+
+void
+Consolid_PushFireRequest(
+	MV_PVOID This,
+	MV_U16 Device_Id
+	);
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_api.c linux-2.6.25/drivers/scsi/mv/core/core_api.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_api.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_api.c	2008-07-28 18:42:43.323188803 +0200
@@ -0,0 +1,723 @@
+#include "mv_include.h"
+
+#ifdef CORE_SUPPORT_API
+
+#include "core_api.h"
+#include "core_exp.h"
+#include "core_inter.h"
+#include "core_init.h"
+#include "com_error.h"
+
+typedef MV_BOOLEAN (*CORE_Management_Command_Handler)(PCore_Driver_Extension, PMV_Request);
+#ifndef _OS_BIOS
+CORE_Management_Command_Handler core_pd_cmd_handler[];
+#endif
+
+MV_BOOLEAN
+Core_MapHDId(
+	IN PCore_Driver_Extension pCore,
+	IN MV_U16 HDId,
+	OUT MV_PU8 portId,
+	OUT MV_PU8 deviceId
+	);
+
+MV_VOID
+Core_GetHDInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PHD_Info pHD 
+	);
+
+MV_VOID
+Core_GetExpInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PExp_Info pExp 
+	);
+
+MV_VOID
+Core_GetPMInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+    OUT PPM_Info pPM 
+	);
+
+#ifndef BIOS_NOT_SUPPORT
+MV_VOID
+Core_GetHDConfiguration(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PHD_Config pHD 
+	);
+
+MV_VOID
+Core_SetHDConfiguration(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    IN PHD_Config pHD 
+	);
+#endif /* #ifndef BIOS_NOT_SUPPORT */
+
+/*
+ * Exposed Functions
+ */
+MV_VOID
+Core_GetHDInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 HDId,
+    OUT PHD_Info pHD 
+	)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)extension;
+	MV_U16 startId, endId;
+	MV_U8 portId, deviceId;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+	MV_U16 i;
+
+	if ( HDId==0xFF )	/* Get all the HD information */
+	{
+		/* First set invalid flag in buffer */
+		for (i=0; i<MAX_HD_SUPPORTED_API; i++) {
+			pHD[i].ID = i;
+			pHD[i].Type = DEVICE_TYPE_NONE;
+		}
+		startId = 0; 
+		endId = MV_MAX_HD_DEVICE_ID-1;
+	}
+	else
+	{
+		startId = HDId;
+		endId = HDId;
+	}
+
+	for ( i=startId; i<=endId; i++ )
+	{
+		if ( Core_MapHDId(pCore, i, &portId, &deviceId) )
+		{
+			pPort = &pCore->Ports[portId];
+			pDevice = &pPort->Device[deviceId];
+			Core_GetHDInformation( pCore, pPort, pDevice, pHD );
+		} 
+/*
+		else
+		{
+			pHD->ID = i;
+			pHD->Type = DEVICE_TYPE_NONE;
+		}
+*/
+		//MV_DASSERT( pHD->Id==i );
+		pHD++;
+	}
+}
+
+#ifdef SUPPORT_PM
+MV_VOID
+Core_GetExpInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 ExpId,
+    OUT PExp_Info pExp 
+	)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)extension;
+	MV_U16 startId, endId;
+	MV_U8 portId, deviceId;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+	MV_U16 i;
+
+	if ( ExpId==0xFF )	/* Get all the HD information */
+	{
+		startId = 0; 
+		endId = MAX_EXPANDER_SUPPORTED-1;
+	}
+	else
+	{
+		startId = ExpId;
+		endId = ExpId;
+	}
+
+	for ( i=startId; i<=endId; i++ )
+	{
+		// TBD: dunno how to map IDs for expander yet, just used HD map for now
+		if ( Core_MapHDId(pCore, i, &portId, &deviceId) )
+		{
+			pPort = &pCore->Ports[portId];
+			pDevice = &pPort->Device[deviceId];
+			Core_GetExpInformation( pCore, pPort, pDevice, pExp );
+		} 
+		else
+		{
+			pExp->ID = i;
+			pExp->Type = DEVICE_TYPE_NONE;
+		}
+		pExp++;
+	}
+}
+
+MV_VOID
+Core_GetPMInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 PMId,
+    OUT PPM_Info pPM 
+	)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)extension;
+	MV_U16 startId, endId;
+	//MV_U8 portId, deviceId;
+	PDomain_Port pPort = NULL;
+	//PDomain_Device pDevice = NULL;
+	MV_U16 i;
+
+	if ( PMId==0xFF )	/* Get all the HD information */
+	{
+		startId = 0; 
+		endId = pCore->Port_Num - 1;
+	}
+	else
+	{
+		startId = PMId;
+		endId = PMId;
+	}
+
+	for ( i=startId; i<=endId; i++ )
+	{
+		pPort = &pCore->Ports[i];
+		if ( pPort->Type != PORT_TYPE_PM )
+		{
+			if ( PMId != 0xFF )
+			{
+				// TBD: not a PM, return error
+			}
+		}
+		else 
+		{
+			Core_GetPMInformation( pCore, pPort, pPM );
+			pPM++;
+		}
+	}
+}
+#endif	/* #ifdef SUPPORT_PM */
+
+#ifndef BIOS_NOT_SUPPORT
+MV_VOID
+Core_GetHDConfig(
+	IN MV_PVOID extension,
+	IN MV_U16 HDId,
+    OUT PHD_Config pHD 
+	)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)extension;
+	MV_U16 startId, endId;
+	MV_U8 portId, deviceId;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+	MV_U16 i;
+
+	if ( HDId==0xFF )	/* Get all the HD configuration */
+	{
+		/* First set invalid flag in buffer */
+		for (i=0; i<MAX_HD_SUPPORTED_API; i++)
+			pHD[i].HDID = 0xFF;
+		startId = 0; 
+		endId = MV_MAX_HD_DEVICE_ID-1;
+	}
+	else
+	{
+		startId = HDId;
+		endId = HDId;
+	}
+
+
+	for ( i=startId; i<=endId; i++ )
+	{
+		if ( Core_MapHDId(pCore, i, &portId, &deviceId) )
+		{
+			pPort = &pCore->Ports[portId];
+			pDevice = &pPort->Device[deviceId];
+			Core_GetHDConfiguration( pCore, pPort, pDevice, pHD );
+		} 
+//		else
+//			pHD->HDID = 0xFF;
+		pHD++;
+	}
+}
+#endif	/* #ifndef BIOS_NOT_SUPPORT */
+/*
+ * Internal Functions
+ */
+MV_BOOLEAN
+Core_MapHDId(
+	IN PCore_Driver_Extension pCore,
+	IN MV_U16 HDId,
+	OUT MV_PU8 portId,
+	OUT MV_PU8 deviceId
+	)
+{
+	if ( portId ) *portId = PATA_MapPortId(HDId);
+	if ( deviceId ) *deviceId = PATA_MapDeviceId(HDId);
+
+	if ( ((portId)&&(*portId>=MAX_PORT_NUMBER))
+		|| ((deviceId)&&(*deviceId>=MAX_DEVICE_PER_PORT))
+		)
+		return MV_FALSE;
+	else
+		return MV_TRUE;
+}
+
+MV_VOID
+Core_GetHDInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PHD_Info pHD 
+	)
+{
+	pHD->ID = pDevice->Id ;	
+	if ( !(pDevice->Status&DEVICE_STATUS_FUNCTIONAL) )
+	{
+		pHD->Type = DEVICE_TYPE_NONE;
+		return;
+	}
+
+	// TBD: check if device type is correct; if not, generate sense
+	pHD->Type = DEVICE_TYPE_HD;
+	pHD->ParentID = pPort->Id;
+#if 0
+	pHD->ParentType = DEVICE_TYPE_PORT;
+	pHD->PhyID = 0;
+#else
+	if ( pPort->Type==PORT_TYPE_PM )
+	{
+		pHD->ParentType = DEVICE_TYPE_PM;
+		pHD->PhyID = pDevice->PM_Number;
+	}
+	else
+	{
+		pHD->ParentType = DEVICE_TYPE_PORT;
+		pHD->PhyID = pPort->Id;
+	}
+#endif
+
+	pHD->Status = 0;
+	if ( pPort->Type==PORT_TYPE_PATA )
+        pHD->HDType = HD_TYPE_PATA;
+	else
+		pHD->HDType = HD_TYPE_SATA;
+	if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+		pHD->HDType |= HD_TYPE_ATAPI;
+
+	pHD->PIOMode = pDevice->PIO_Mode;
+	pHD->MDMAMode = pDevice->MDMA_Mode;
+	pHD->UDMAMode = pDevice->UDMA_Mode;
+
+	if ( pDevice->Capacity & DEVICE_CAPACITY_NCQ_SUPPORTED )
+		pHD->FeatureSupport |= HD_FEATURE_NCQ;		
+	if ( pDevice->Capacity & DEVICE_CAPACITY_WRITECACHE_SUPPORTED )
+		pHD->FeatureSupport |= HD_FEATURE_WRITE_CACHE;		
+	if ( pDevice->Capacity & DEVICE_CAPACITY_48BIT_SUPPORTED )
+		pHD->FeatureSupport |= HD_FEATURE_48BITS;		
+	if ( pDevice->Capacity & DEVICE_CAPACITY_SMART_SUPPORTED )
+		pHD->FeatureSupport |= HD_FEATURE_SMART;	
+
+	if ( pDevice->Capacity & DEVICE_CAPACITY_RATE_1_5G )
+		pHD->FeatureSupport |= HD_FEATURE_1_5G;
+	else if ( pDevice->Capacity & DEVICE_CAPACITY_RATE_3G )
+		pHD->FeatureSupport |= HD_FEATURE_3G;
+
+	MV_CopyMemory(pHD->Model, pDevice->Model_Number, 40);
+#ifndef BIOS_NOT_SUPPORT
+	MV_CopyMemory(pHD->SerialNo, pDevice->Serial_Number, 20);
+	MV_CopyMemory(pHD->FWVersion, pDevice->Firmware_Revision, 8);
+#endif
+
+	*(MV_PU32)pHD->WWN = pDevice->WWN;
+	pHD->Size = pDevice->Max_LBA;
+}
+
+#ifdef SUPPORT_PM
+MV_VOID
+Core_GetExpInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PExp_Info pExp 
+	)
+{
+	// TBD
+}
+
+MV_VOID
+Core_GetPMInformation(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+    OUT PPM_Info pPM 
+	)
+{
+#ifndef RAID_SIMULATE_CONFIGURATION
+	//MV_U32 temp;
+	//MV_LPVOID portMmio = pPort->Mmio_Base;
+
+	pPM->Type = DEVICE_TYPE_PM;
+	pPM->ParentType = DEVICE_TYPE_PORT;
+	pPM->ID = pPort->Id;
+	pPM->ParentID = pPort->Id;
+
+	pPM->VendorId = pPort->PM_Vendor_Id;
+	pPM->DeviceId = pPort->PM_Device_Id;
+	pPM->ProductRevision = pPort->PM_Product_Revision;
+	pPM->PMSpecRevision = pPort->PM_Spec_Revision;
+	pPM->NumberOfPorts = pPort->PM_Num_Ports;
+#endif
+}
+#endif	/* #ifdef SUPPORT_PM */
+
+#ifndef BIOS_NOT_SUPPORT
+MV_VOID
+Core_GetHDConfiguration(
+	PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice,
+    OUT PHD_Config pHD 
+	)
+{
+	if ( !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+	{
+		pHD->HDID = 0xFF;
+		return;
+	}
+
+	pHD->HDID = pDevice->Id ;	
+
+	if (pDevice->Setting & DEVICE_SETTING_WRITECACHE_ENABLED)
+		pHD->WriteCacheOn = MV_TRUE;
+	else
+		pHD->WriteCacheOn = MV_FALSE;
+
+	if ( pDevice->Setting & DEVICE_SETTING_SMART_ENABLED )
+		pHD->SMARTOn = MV_TRUE;
+	else
+		pHD->SMARTOn = MV_FALSE;
+}
+#endif /* #ifndef BIOS_NOT_SUPPORT */
+
+MV_BOOLEAN core_pd_request_get_HD_info(PCore_Driver_Extension pCore, PMV_Request pMvReq)
+{
+	PHD_Info pHDInfo = (PHD_Info)pMvReq->Data_Buffer;
+	MV_U16 HDID; 
+
+#ifdef DEBUG_BIOS
+	//MV_DUMP32(0xCCCCEEE1);
+#endif
+	MV_CopyMemory(&HDID, &pMvReq->Cdb[2], 2);
+	Core_GetHDInfo( pCore, HDID, pHDInfo );
+	if (HDID != 0xFF && pHDInfo->Type == DEVICE_TYPE_NONE)
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = ERR_INVALID_HD_ID;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+	}
+	else
+		pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+	return MV_TRUE;
+}
+
+#ifdef SUPPORT_PM
+MV_BOOLEAN core_pd_request_get_expander_info( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	PExp_Info pExpInfo = (PExp_Info)pMvReq->Data_Buffer;
+	MV_U16 ExpID; 
+	MV_U8	status = REQ_STATUS_SUCCESS;
+
+	MV_CopyMemory(&ExpID, &pMvReq->Cdb[2], 2);
+	if (ExpID != 0xFF && ExpID > MAX_EXPANDER_SUPPORTED)
+	{
+		status = ERR_INVALID_EXP_ID;
+	}
+	else
+	{
+		Core_GetExpInfo( pCore, ExpID, pExpInfo );
+
+		if (ExpID != 0xFF && pExpInfo->Type == DEVICE_TYPE_NONE)
+		{
+			status = ERR_INVALID_EXP_ID;
+		}
+	}
+
+	if (status != REQ_STATUS_SUCCESS)
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = status;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+	}
+	else
+		pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+
+	return MV_TRUE;
+}
+
+MV_BOOLEAN core_pd_request_get_PM_info( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	PPM_Info pPMInfo = (PPM_Info)pMvReq->Data_Buffer;
+	MV_U16 PMID; 
+
+	MV_CopyMemory(&PMID, &pMvReq->Cdb[2], 2);
+	if (PMID != 0xFF && PMID > MAX_PM_SUPPORTED)
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = ERR_INVALID_PM_ID;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+		return MV_TRUE;
+	}
+
+	Core_GetPMInfo( pCore, PMID, pPMInfo );
+	
+	pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+	return MV_TRUE;
+}
+#endif	/*#ifdef SUPPORT_PM */
+
+#ifndef BIOS_NOT_SUPPORT
+MV_BOOLEAN core_pd_request_get_HD_config( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	PHD_Config pHDConfig = (PHD_Config)pMvReq->Data_Buffer;
+	MV_U16 ConfigID; 
+
+	MV_CopyMemory(&ConfigID, &pMvReq->Cdb[2], 2);
+	Core_GetHDConfig( pCore, ConfigID, pHDConfig );
+
+	if (ConfigID != 0xFF && pHDConfig->HDID == 0xFF)
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = ERR_INVALID_HD_ID;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+	}
+	else
+		pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+
+	return MV_TRUE;
+}
+
+MV_BOOLEAN core_pd_request_get_HD_status( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	PHD_Status pHDStatus = (PHD_Status)pMvReq->Data_Buffer;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+	MV_U8 portId, deviceId;
+	MV_U16 HDId; 
+	MV_U8	cacheMode = 0;
+	MV_U8 status = REQ_STATUS_SUCCESS;
+
+	MV_CopyMemory(&HDId, &pMvReq->Cdb[2], 2);
+
+	if ( Core_MapHDId(pCore, HDId, &portId, &deviceId) )
+	{
+		pPort = &pCore->Ports[portId];
+		pDevice = &pPort->Device[deviceId];
+
+		if (pDevice->Setting & DEVICE_SETTING_SMART_ENABLED)
+		{
+			if ( !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+			{
+				status = ERR_INVALID_HD_ID;
+			}
+			else 
+			{
+				if (pMvReq->Cdb[4] == APICDB4_PD_SMART_RETURN_STATUS)
+				{
+					cacheMode = CDB_CORE_SMART_RETURN_STATUS;
+				}
+				else
+					status = ERR_INVALID_REQUEST;
+			}
+
+			if (status == REQ_STATUS_SUCCESS)
+			{
+				// Convert it into SCSI_CMD_MARVELL_SPECIFIC request.
+				pMvReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+				pMvReq->Cdb[1] = CDB_CORE_MODULE;
+				pMvReq->Cdb[2] = cacheMode;
+				pMvReq->Device_Id = pDevice->Id;
+				if (pHDStatus)
+					pHDStatus->HDID = pDevice->Id;
+			}
+		}
+		else
+			status = ERR_INVALID_REQUEST;
+	} 
+	else
+	{
+		status = ERR_INVALID_HD_ID;
+	}
+
+	if (status != REQ_STATUS_SUCCESS)
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = status;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+		return MV_TRUE;	
+	}
+	else
+	{
+		pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		return MV_FALSE;	// Need to access hardware.
+	}
+}
+
+MV_BOOLEAN core_pd_request_set_HD_config( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	//PHD_Config pHDConfig = (PHD_Config)pMvReq->Data_Buffer;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+	MV_U8 portId, deviceId;
+	MV_U16 HDId; 
+	MV_U8	cacheMode = 0;
+	MV_U8 status = REQ_STATUS_PENDING;
+
+	MV_CopyMemory(&HDId, &pMvReq->Cdb[2], 2);
+
+	if ( Core_MapHDId(pCore, HDId, &portId, &deviceId) )
+	{
+		pPort = &pCore->Ports[portId];
+		pDevice = &pPort->Device[deviceId];
+
+		if ( !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+		{
+			status = ERR_INVALID_HD_ID;
+		}
+		else if ( pDevice->Device_Type & DEVICE_TYPE_ATAPI )
+		{
+			status = ERR_INVALID_REQUEST;
+		}
+		else 
+		{
+			if (pMvReq->Cdb[4] == APICDB4_PD_SET_WRITE_CACHE_OFF)
+			{
+				cacheMode = CDB_CORE_DISABLE_WRITE_CACHE;
+			}
+			else if (pMvReq->Cdb[4] == APICDB4_PD_SET_WRITE_CACHE_ON)
+			{
+				cacheMode = CDB_CORE_ENABLE_WRITE_CACHE;
+			}
+			else if (pMvReq->Cdb[4] == APICDB4_PD_SET_SMART_OFF)
+			{
+				if ( !(pDevice->Setting&DEVICE_SETTING_SMART_ENABLED) )
+					status = REQ_STATUS_SUCCESS;
+				cacheMode = CDB_CORE_DISABLE_SMART;
+			}
+			else if (pMvReq->Cdb[4] == APICDB4_PD_SET_SMART_ON)
+			{
+				if ( pDevice->Setting&DEVICE_SETTING_SMART_ENABLED )
+					status = REQ_STATUS_SUCCESS;
+				cacheMode = CDB_CORE_ENABLE_SMART;
+			}
+			else
+			{
+				status = ERR_INVALID_REQUEST;
+			}
+		}
+
+		if (status == REQ_STATUS_PENDING)
+		{
+			// Convert it into SCSI_CMD_MARVELL_SPECIFIC request.
+			pMvReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+			pMvReq->Cdb[1] = CDB_CORE_MODULE;
+			pMvReq->Cdb[2] = cacheMode;
+			pMvReq->Device_Id = pDevice->Id;
+		}
+	} 
+	else
+	{
+		status = ERR_INVALID_HD_ID;
+	}
+
+	if (status == REQ_STATUS_SUCCESS)
+	{
+		pMvReq->Scsi_Status = status;
+		return MV_TRUE;
+	}
+	else if (status == REQ_STATUS_PENDING)
+	{
+		//pMvReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		return MV_FALSE;	// Need to access hardware.
+	}
+	else
+	{
+		if (pMvReq->Sense_Info_Buffer != NULL)
+			((MV_PU8)pMvReq->Sense_Info_Buffer)[0] = status;
+		pMvReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+		return MV_TRUE;	
+	}
+}
+
+MV_BOOLEAN core_pd_request_BSL_dump( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	// TBD
+	pMvReq->Scsi_Status = REQ_STATUS_ERROR;
+	return MV_TRUE;
+}
+
+MV_BOOLEAN core_pd_request_HD_MP_check( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	// TBD
+	pMvReq->Scsi_Status = REQ_STATUS_ERROR;
+	return MV_TRUE;
+}
+
+MV_BOOLEAN core_pd_request_HD_get_MP_status( PCore_Driver_Extension pCore, PMV_Request pMvReq )
+{
+	// TBD
+	pMvReq->Scsi_Status = REQ_STATUS_ERROR;
+	return MV_TRUE;
+}
+#endif	/* #ifndef BIOS_NOT_SUPPORT */
+
+CORE_Management_Command_Handler BASEATTR core_pd_cmd_handler[APICDB1_PD_MAX] = 
+{
+	core_pd_request_get_HD_info,
+#ifdef SUPPORT_PM
+	core_pd_request_get_expander_info,
+	core_pd_request_get_PM_info,
+#else
+	NULL,
+	NULL,
+#endif
+#ifndef BIOS_NOT_SUPPORT
+	core_pd_request_get_HD_config,
+	core_pd_request_set_HD_config,
+	core_pd_request_BSL_dump,
+	core_pd_request_HD_MP_check,
+	core_pd_request_HD_get_MP_status,
+	core_pd_request_get_HD_status
+#else
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL
+#endif
+};
+
+MV_BOOLEAN 
+Core_pd_command(
+	IN MV_PVOID extension,
+	IN PMV_Request pReq
+	)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)extension;
+
+	if ( pReq->Cdb[1] >= APICDB1_PD_MAX ) 
+	{
+		pReq->Scsi_Status = REQ_STATUS_INVALID_PARAMETER;
+		return MV_TRUE;
+	}
+	return core_pd_cmd_handler[pReq->Cdb[1]](pCore, pReq);
+}
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_api.h linux-2.6.25/drivers/scsi/mv/core/core_api.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_api.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_api.h	2008-07-28 18:42:43.323188803 +0200
@@ -0,0 +1,45 @@
+#if !defined(CORE_API_H)
+#define CORE_API_H
+
+#ifdef CORE_SUPPORT_API
+
+#define HD_WRITECACHE_OFF		0
+#define HD_WRITECACHE_ON		1
+
+MV_VOID
+Core_GetHDInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 HDId,
+    OUT PHD_Info pHD );
+
+MV_VOID
+Core_GetExpInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 ExpId,
+    OUT PExp_Info pExp 
+	);
+
+MV_VOID
+Core_GetPMInfo(
+	IN MV_PVOID extension,
+	IN MV_U16 PMId,
+    OUT PPM_Info pPM 
+	);
+
+MV_VOID
+Core_GetHDConfig(
+	IN MV_PVOID extension,
+	IN MV_U16 HDId,
+    OUT PHD_Config pHD 
+	);
+
+MV_BOOLEAN 
+Core_pd_command(
+	IN MV_PVOID extension, 
+	IN PMV_Request pReq
+	);
+
+#endif
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_ata.h linux-2.6.25/drivers/scsi/mv/core/core_ata.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_ata.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_ata.h	2008-07-28 18:42:43.324188843 +0200
@@ -0,0 +1,170 @@
+#if !defined(CORE_ATA_H)
+#define CORE_ATA_H
+
+
+/*
+ * ATA IDE Command definition 
+ */
+/* PIO command */
+#define ATA_CMD_READ_PIO				0x20
+#define ATA_CMD_READ_PIO_EXT			0x24
+#define ATA_CMD_READ_PIO_MULTIPLE_EXT 	0x29
+#define ATA_CMD_WRITE_PIO				0x30
+#define ATA_CMD_WRITE_PIO_EXT			0x34
+#define ATA_CMD_WRITE_PIO_MULTIPLE_EXT	0x39
+
+/* DMA read write command */
+#define ATA_CMD_READ_DMA				0xC8	/* 28 bit DMA read */
+#define ATA_CMD_READ_DMA_QUEUED			0xC7	/* 28 bit TCQ DMA read */
+#define ATA_CMD_READ_DMA_EXT			0x25	/* 48 bit DMA read */
+#define ATA_CMD_READ_DMA_QUEUED_EXT		0x26	/* 48 bit TCQ DMA read */
+#define ATA_CMD_READ_FPDMA_QUEUED		0x60	/* NCQ DMA read: SATA only. Always 48 bit */
+
+#define ATA_CMD_WRITE_DMA				0xCA	
+#define ATA_CMD_WRITE_DMA_QUEUED		0xCC
+#define ATA_CMD_WRITE_DMA_EXT  			0x35
+#define ATA_CMD_WRITE_DMA_QUEUED_EXT	0x36
+#define ATA_CMD_WRITE_FPDMA_QUEUED		0x61
+
+/* Identify command */
+#define ATA_CMD_IDENTIFY_ATA			0xEC
+#define ATA_CMD_IDENTIY_ATAPI			0xA1
+
+#define ATA_CMD_VERIFY					0x40	/* 28 bit read verifty */
+#define ATA_CMD_VERIFY_EXT				0x42	/* 48 bit read verify */
+
+#define ATA_CMD_FLUSH					0xE7	/* 28 bit flush */
+#define ATA_CMD_FLUSH_EXT				0xEA	/* 48 bit flush */
+
+#define ATA_CMD_PACKET					0xA0
+#define ATA_CMD_SMART					0xB0
+	#define ATA_CMD_ENABLE_SMART				0xD8
+	#define ATA_CMD_DISABLE_SMART				0xD9
+	#define ATA_CMD_SMART_RETURN_STATUS			0xDA
+
+#define ATA_CMD_SET_FEATURES			0xEF
+	#define ATA_CMD_ENABLE_WRITE_CACHE			0x02
+	#define ATA_CMD_SET_TRANSFER_MODE			0x03
+	#define ATA_CMD_DISABLE_READ_LOOK_AHEAD		0x55
+	#define ATA_CMD_DISABLE_WRITE_CACHE			0x82
+	#define ATA_CMD_ENABLE_READ_LOOK_AHEAD		0xAA
+
+#define ATA_CMD_STANDBY_IMMEDIATE		0xE0
+#define ATA_CMD_SEEK					0x70
+#define ATA_CMD_READ_LOG_EXT			0x2F
+
+struct _ATA_TaskFile;
+typedef struct _ATA_TaskFile ATA_TaskFile, *PATA_TaskFile;
+
+struct _ATA_TaskFile {
+	MV_U8	Features;
+	MV_U8	Sector_Count;
+	MV_U8	LBA_Low;
+	MV_U8	LBA_Mid;
+	MV_U8	LBA_High;
+	MV_U8	Device;
+	MV_U8	Command;
+
+	MV_U8	Control;
+
+	/* extension */
+	MV_U8	Feature_Exp;
+	MV_U8	Sector_Count_Exp;
+	MV_U8	LBA_Low_Exp;
+	MV_U8	LBA_Mid_Exp;
+	MV_U8	LBA_High_Exp;
+};
+
+/* ATA device identify frame */
+typedef struct _ATA_Identify_Data {
+	MV_U16 General_Config;							/*	0	*/
+	MV_U16 Obsolete0;								/*	1	*/
+	MV_U16 Specific_Config;							/*	2	*/
+	MV_U16 Obsolete1;								/*	3	*/
+	MV_U16 Retired0[2];								/*	4-5	*/
+	MV_U16 Obsolete2;								/*	6	*/
+	MV_U16 Reserved0[2];							/*	7-8	*/
+	MV_U16 Retired1;								/*	9	*/
+	MV_U8 Serial_Number[20];				        /*	10-19	*/
+	MV_U16 Retired2[2];								/*	20-21	*/
+	MV_U16 Obsolete3;								/*	22	*/
+	MV_U8 Firmware_Revision[8];						/*	23-26	*/
+	MV_U8 Model_Number[40];							/*	27-46	*/
+	MV_U16 Maximum_Block_Transfer;					/*	47	*/
+	MV_U16 Reserved1;								/*	48	*/
+	MV_U16 Capabilities[2];							/*	49-50	*/
+	MV_U16 Obsolete4[2];							/*	51-52	*/
+	MV_U16 Fields_Valid;							/*	53	*/
+	MV_U16 Obsolete5[5];							/*	54-58	*/
+	MV_U16 Current_Multiple_Sector_Setting;			/*	59	*/
+	MV_U16 User_Addressable_Sectors[2];				/*	60-61	*/
+	MV_U16 ATAPI_DMADIR;							/*	62	*/
+	MV_U16 Multiword_DMA_Modes;						/*	63	*/
+	MV_U16 PIO_Modes;								/*	64	*/
+	MV_U16 Minimum_Multiword_DMA_Cycle_Time;		/*	65	*/
+	MV_U16 Recommended_Multiword_DMA_Cycle_Time;	/*	66	*/
+	MV_U16 Minimum_PIO_Cycle_Time;					/*	67	*/
+	MV_U16 Minimum_PIO_Cycle_Time_IORDY;			/*	68	*/
+	MV_U16 Reserved2[2];							/*	69-70	*/
+	MV_U16 ATAPI_Reserved[4];						/*	71-74	*/
+	MV_U16 Queue_Depth;								/*	75	*/
+	MV_U16 SATA_Capabilities;						/*	76	*/
+	MV_U16 SATA_Reserved;							/*	77	*/
+	MV_U16 SATA_Feature_Supported;					/*	78	*/
+	MV_U16 SATA_Feature_Enabled;					/*	79	*/
+ 	MV_U16 Major_Version;							/*	80	*/
+	MV_U16 Minor_Version;							/*	81	*/
+	MV_U16 Command_Set_Supported[2];				/*	82-83	*/
+	MV_U16 Command_Set_Supported_Extension;			/*	84	*/
+	MV_U16 Command_Set_Enabled[2];					/*	85-86	*/
+	MV_U16 Command_Set_Default;						/*	87	*/
+	MV_U16 UDMA_Modes;								/*	88	*/
+	MV_U16 Time_For_Security_Erase;					/*	89	*/
+	MV_U16 Time_For_Enhanced_Security_Erase;		/*	90	*/
+	MV_U16 Current_Advanced_Power_Manage_Value;		/*	91	*/
+	MV_U16 Master_Password_Revision;				/*	92	*/
+	MV_U16 Hardware_Reset_Result;					/*	93	*/
+	MV_U16 Acoustic_Manage_Value;					/*	94	*/
+	MV_U16 Stream_Minimum_Request_Size;				/*	95	*/
+	MV_U16 Stream_Transfer_Time_DMA;				/*	96	*/
+	MV_U16 Stream_Access_Latency;					/*	97	*/
+	MV_U16 Stream_Performance_Granularity[2];		/*	98-99	*/
+	MV_U16 Max_LBA[4];								/*	100-103	*/
+	MV_U16 Stream_Transfer_Time_PIO;				/*	104	*/	
+	MV_U16 Reserved3;								/*	105	*/
+	MV_U16 Physical_Logical_Sector_Size;			/*	106	*/
+	MV_U16 Delay_Acoustic_Testing;					/*	107	*/
+	MV_U16 NAA;										/*	108	*/
+	MV_U16 Unique_ID1;								/*	109	*/
+	MV_U16 Unique_ID2;								/*	110	*/
+	MV_U16 Unique_ID3;								/*	111	*/
+	MV_U16 Reserved4[4];							/*	112-115	*/
+	MV_U16 Reserved5;								/*	116	*/
+	MV_U16 Words_Per_Logical_Sector[2];				/*	117-118	*/
+	MV_U16 Reserved6[8];							/*	119-126	*/
+	MV_U16 Removable_Media_Status_Notification;		/*	127	*/
+	MV_U16 Security_Status;							/*	128	*/
+	MV_U16 Vendor_Specific[31];						/*	129-159	*/
+	MV_U16 CFA_Power_Mode;							/*	160	*/
+	MV_U16 Reserved7[15];							/*	161-175	*/
+	MV_U16 Current_Media_Serial_Number[30];			/*	176-205	*/
+	MV_U16 Reserved8[49];							/*	206-254	*/
+	MV_U16 Integrity_Word;							/*	255	*/
+} ATA_Identify_Data, *PATA_Identify_Data;
+
+#define ATA_REGISTER_DATA			0x08
+#define ATA_REGISTER_ERROR			0x09
+#define ATA_REGISTER_FEATURES		0x09
+#define ATA_REGISTER_SECTOR_COUNT	0x0A
+#define ATA_REGISTER_LBA_LOW		0x0B
+#define ATA_REGISTER_LBA_MID		0x0C
+#define ATA_REGISTER_LBA_HIGH		0x0D
+#define ATA_REGISTER_DEVICE			0x0E
+#define ATA_REGISTER_STATUS			0x0F
+#define ATA_REGISTER_COMMAND		0x0F
+
+#define ATA_REGISTER_ALT_STATUS		0x16
+#define ATA_REGISTER_DEVICE_CONTROL	0x16
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_cons.h linux-2.6.25/drivers/scsi/mv/core/core_cons.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_cons.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_cons.h	2008-07-28 18:42:43.324188843 +0200
@@ -0,0 +1,29 @@
+#ifndef _CORE_DIRVER_CONSOLIDATE_H
+#define _CORE_DRIVER_CONSOLIDATE_H
+
+#include "core_inter.h"
+#include "core_exp.h"
+
+/*
+ * When you plug-in this command consolidate sub-module to some module
+ * Please define the following the definition.
+ * This is maintained by caller.
+ */
+/* Get the consolidate sub module extension */
+#define CONS_GET_EXTENSION(This)					\
+	(((PCore_Driver_Extension)(This))->pConsolid_Extent)
+
+/* Get the device related information consolidate module needs */
+#define CONS_GET_DEVICE(This, Device_Id)	\
+	&(((PCore_Driver_Extension)(This))->pConsolid_Device[(Device_Id)])
+
+/* For this device or port, is there any request running? If yes, busy. */
+#define CONS_DEVICE_IS_BUSY(This, deviceId)	\
+	(((PCore_Driver_Extension)(This))->Ports[PATA_MapPortId(deviceId)].Running_Slot!=0)
+
+extern void Core_InternalSendRequest(MV_PVOID This, PMV_Request pReq);
+/* In case there is something wrong. We need resend these requests and by pass them. */
+#define CONS_SEND_REQUEST	Core_InternalSendRequest
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_exp.c linux-2.6.25/drivers/scsi/mv/core/core_exp.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_exp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_exp.c	2008-07-28 18:42:43.327188786 +0200
@@ -0,0 +1,3990 @@
+#include "mv_include.h"
+#ifdef _OS_BIOS
+#include "biosmain.h"
+extern PMV_DATA_STRUCT BASEATTR MyDriverDataBaseOff;
+#endif
+
+#include "com_event_define.h"
+
+#include "core_exp.h"
+#include "core_inter.h"
+#include "com_tag.h"
+
+#include "core_sata.h"
+#include "core_ata.h"
+
+#include "core_init.h"
+
+#if defined(_OS_LINUX) 
+#include "hba_header.h" /* to be removed */
+#include "hba_exp.h"
+#endif /* _OS_LINUX */
+
+#ifdef __AC_DBG__
+#include "linux_helper.h"
+#endif /* __AC_DBG__ */
+
+#ifdef CORE_SUPPORT_API
+#include "core_api.h"
+#endif
+
+#ifdef SOFTWARE_XOR
+#include "core_xor.h"
+#endif
+
+#define FIS_REG_H2D_SIZE_IN_DWORD	5
+#ifndef _OS_BIOS
+/* For debug purpose only. */
+PCore_Driver_Extension gCore = NULL;
+#endif
+
+extern MV_VOID SCSI_To_FIS(MV_PVOID pCore, PMV_Request pReq, MV_U8 tag, PATA_TaskFile pTaskFile);
+
+extern MV_BOOLEAN Category_CDB_Type(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq
+	);
+
+extern MV_BOOLEAN ATAPI_CDB2TaskFile(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq, 
+	OUT PATA_TaskFile pTaskFile
+	);
+
+extern MV_BOOLEAN ATA_CDB2TaskFile(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq, 
+	IN MV_U8 tag,	//TBD: Do we really need it?
+	OUT PATA_TaskFile pTaskFile
+	);
+
+extern void Device_IssueReadLogExt(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	);
+
+extern MV_BOOLEAN mvDeviceStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Device pDevice
+	);
+
+void CompleteRequest(
+	IN PCore_Driver_Extension pCore,
+	IN PMV_Request pReq,
+	IN PATA_TaskFile taskFiles
+	);
+
+void CompleteRequestAndSlot(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PMV_Request pReq,
+	IN PATA_TaskFile taskFiles,
+	IN MV_U8 slotId
+	);
+
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+void Core_ResetChannel(MV_PVOID Device);
+
+static MV_VOID __core_req_timeout_handler(MV_PVOID data)
+{
+	PMV_Request req = (PMV_Request) data;
+	PCore_Driver_Extension pcore;
+	PDomain_Device dev;
+	PHBA_Extension phba;
+
+	if ( NULL == req )
+		return;
+
+	pcore = HBA_GetModuleExtension(req->Cmd_Initiator, MODULE_CORE);
+	dev   = &pcore->Ports[PATA_MapPortId(req->Device_Id)].Device[PATA_MapDeviceId(req->Device_Id)];
+	phba = HBA_GetModuleExtension(req->Cmd_Initiator, MODULE_HBA);
+	
+	hba_spin_lock_irq(&phba->lock);
+	Core_ResetChannel((MV_PVOID) dev);
+	hba_spin_unlock_irq(&phba->lock);
+}
+#endif /* SUPPORT_ERROR_HANDLING && _OS_LINUX */
+
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+// Read TaskFile
+void readTaskFiles(IN PDomain_Port pPort, PDomain_Device pDevice, PATA_TaskFile pTaskFiles)
+{
+	MV_U32 taskFile[3];
+
+	if (pPort->Type==PORT_TYPE_PATA)
+	{
+		if ( pDevice->Is_Slave )
+		{
+			taskFile[1] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SLAVE_TF1);
+			taskFile[2] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SLAVE_TF2);
+		}
+		else
+		{
+			taskFile[1] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_MASTER_TF1);
+			taskFile[2] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_MASTER_TF2);
+		}
+
+		pTaskFiles->Sector_Count = (MV_U8)((taskFile[1] >> 24) & 0xFF);
+		pTaskFiles->Sector_Count_Exp = (MV_U8)((taskFile[1] >> 16) & 0xFF);
+		pTaskFiles->LBA_Low = (MV_U8)((taskFile[1] >> 8) & 0xFF);
+		pTaskFiles->LBA_Low_Exp = (MV_U8)(taskFile[1] & 0xFF);
+
+		pTaskFiles->LBA_Mid = (MV_U8)((taskFile[2] >> 24) & 0xFF);
+		pTaskFiles->LBA_Mid_Exp = (MV_U8)((taskFile[2] >> 16) & 0xFF);
+		pTaskFiles->LBA_High = (MV_U8)((taskFile[2] >> 8) & 0xFF);
+		pTaskFiles->LBA_High_Exp = (MV_U8)(taskFile[2] & 0xFF);
+	}
+	else
+	{
+//		taskFile[0] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_TFDATA);
+		taskFile[1] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SIG);
+//		taskFile[2] = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SCR);
+
+		pTaskFiles->Sector_Count = (MV_U8)((taskFile[1]) & 0xFF);
+		pTaskFiles->LBA_Low = (MV_U8)((taskFile[1] >> 8) & 0xFF);
+		pTaskFiles->LBA_Mid = (MV_U8)((taskFile[1] >> 16) & 0xFF);
+		pTaskFiles->LBA_High = (MV_U8)((taskFile[1] >> 24) & 0xFF);
+
+	}
+}
+#endif
+
+//TBD: In Dump. How many request can be? Not always 1.
+MV_U32 Core_ModuleGetResourceQuota(enum Resource_Type type, MV_U16 maxIo)
+{
+	MV_U32 size = 0;
+	MV_U8 sgEntryCount;
+	
+	/* Extension quota */
+	if ( type==RESOURCE_CACHED_MEMORY )		
+	{
+		size = ROUNDING(sizeof(Core_Driver_Extension), 8);
+	#ifdef SUPPORT_CONSOLIDATE
+		if ( maxIo>1 )
+		{
+			size += ROUNDING(sizeof(Consolidate_Extension), 8);
+			size += ROUNDING(sizeof(Consolidate_Device), 8)*MAX_DEVICE_NUMBER;
+		}
+	#endif
+
+		/* resource for SG Entry */
+		if (maxIo==1)
+			sgEntryCount = MAX_SG_ENTRY_REDUCED;
+		else
+			sgEntryCount = MAX_SG_ENTRY;
+		size += sizeof(MV_SG_Entry) * sgEntryCount * INTERNAL_REQ_COUNT;
+
+		size += sizeof(MV_Request) * INTERNAL_REQ_COUNT;
+
+#ifdef SUPPORT_CONSOLIDATE
+		/* resource for Consolidate_Extension->Requests[] SG Entry */
+		if ( maxIo>1 )
+			size += sizeof(MV_SG_Entry) * sgEntryCount * CONS_MAX_INTERNAL_REQUEST_COUNT;
+#endif
+
+		MV_DUMPC32(0xCCCC8801);
+		MV_DUMPC32(size);
+		//MV_HALTKEY;
+        return size;
+	}
+	
+	/* Uncached memory quota */
+	if ( type==RESOURCE_UNCACHED_MEMORY )
+	{
+		/* 
+		 * SATA port alignment quota:
+		 * Command list and received FIS is 64 byte aligned.
+		 * Command table is 128 byte aligned.
+		 * Data buffer is 8 byte aligned.
+		 * This is different with AHCI.
+		 */
+        /* 
+		 * PATA port alignment quota: Same with SATA.
+		 * The only difference is that PATA doesn't have the FIS.
+		 */
+	#ifndef _OS_BIOS
+		MV_DPRINT(("Command List Size = 0x%x.\n", (MV_U32)SATA_CMD_LIST_SIZE));
+		MV_DPRINT(("Received FIS Size = 0x%x.\n", (MV_U32)SATA_RX_FIS_SIZE));
+		MV_DPRINT(("Command Table Size = 0x%x.\n", (MV_U32)SATA_CMD_TABLE_SIZE));
+		MV_ASSERT(SATA_CMD_LIST_SIZE==ROUNDING(SATA_CMD_LIST_SIZE, 64));
+		MV_ASSERT(SATA_RX_FIS_SIZE==ROUNDING(SATA_RX_FIS_SIZE, 64));
+		MV_ASSERT(SATA_CMD_TABLE_SIZE==ROUNDING(SATA_CMD_TABLE_SIZE, 128));
+		MV_ASSERT(SATA_SCRATCH_BUFFER_SIZE==ROUNDING(SATA_SCRATCH_BUFFER_SIZE, 8));
+	#endif
+		if ( maxIo>1 )
+		{
+			size = 64 + SATA_CMD_LIST_SIZE*MAX_PORT_NUMBER;								/* Command List*/
+			size += 64 + SATA_RX_FIS_SIZE*MAX_SATA_PORT_NUMBER;							/* Received FIS */
+			size += 128 + SATA_CMD_TABLE_SIZE*MAX_SLOT_NUMBER*MAX_PORT_NUMBER;			/* Command Table */
+			size += 8 + SATA_SCRATCH_BUFFER_SIZE*MAX_DEVICE_NUMBER;						/* Buffer for initialization like identify */
+		}
+		else
+		{
+		#ifndef HIBERNATION_ROUNTINE
+			size = 64 + SATA_CMD_LIST_SIZE*MAX_PORT_NUMBER;
+			size += 64 + SATA_RX_FIS_SIZE*MAX_SATA_PORT_NUMBER;
+			size += 128 + SATA_CMD_TABLE_SIZE*MAX_PORT_NUMBER;
+			size += 8 + SATA_SCRATCH_BUFFER_SIZE*MAX_DEVICE_NUMBER;
+		#else
+			size = 64 + SATA_CMD_LIST_SIZE;			/* Command List*/
+			size += 64 + SATA_RX_FIS_SIZE;			/* Received FIS */
+			size += 128 + SATA_CMD_TABLE_SIZE; 		/* Command Table */	
+			size += 8 + SATA_SCRATCH_BUFFER_SIZE;	/* Buffer for initialization like identify */
+		#endif
+		}
+
+		//MV_DUMPC32(0xCC000002);
+		//MV_DUMPC32(size);
+		
+		MV_DUMPC32(0xCCCC8802);
+		MV_DUMPC32(size);
+		//MV_HALTKEY;
+		return size;
+	}
+
+	return 0;
+}
+
+//TBD: In Dump
+void Core_ModuleInitialize(MV_PVOID This, MV_U32 extensionSize, MV_U16 maxIo)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	PMV_Request pReq;
+	Assigned_Uncached_Memory dmaResource;
+	PDomain_Port port;
+	MV_PVOID memVir;
+	MV_PHYSICAL_ADDR memDMA;
+	Controller_Infor controller;
+	MV_PTR_INTEGER temp, tmpSG;
+	MV_U32 offset, internalReqSize;
+	MV_U8 i,j, flagSaved, sgEntryCount;
+	MV_U32 vsr_c[MAX_SATA_PORT_NUMBER];
+	MV_U8 vsrSkipPATAPort = 0;
+	MV_PVOID pTopLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	
+#ifndef _OS_BIOS
+	gCore = pCore;
+#endif
+	MV_DUMPC32(0xCCCCBB12);
+
+	flagSaved=pCore->VS_Reg_Saved;
+
+	if(flagSaved==VS_REG_SIG)
+	{
+		for ( j=0; j<MAX_SATA_PORT_NUMBER; j++ )
+		{
+			port = &pCore->Ports[j];
+			vsr_c[j]=port->VS_RegC;
+		}
+		/* Save the PATA Port detection skip flag */
+		vsrSkipPATAPort = pCore->Flag_Fastboot_Skip & FLAG_SKIP_PATA_PORT;
+	}
+
+	/* 
+	 * Zero core driver extension. After that, I'll ignore many variables initialization. 
+	 */
+	MV_ZeroMemory(This, extensionSize);
+
+	if(flagSaved==VS_REG_SIG)
+	{
+		pCore->VS_Reg_Saved=flagSaved;
+
+		for ( j=0; j<MAX_SATA_PORT_NUMBER; j++ )
+		{
+			port = &pCore->Ports[j];
+			port->VS_RegC=vsr_c[j];
+		}
+		/* Restore the PATA Port detection skip flag */
+		/* Only this flag should survive the S3 */
+		/* The others should be kept as default (0) */
+		pCore->Flag_Fastboot_Skip = vsrSkipPATAPort;
+	}
+
+	pCore->State = CORE_STATE_IDLE;
+
+	/* Set up controller information */
+	HBA_GetControllerInfor(pCore, &controller);
+	pCore->Vendor_Id = controller.Vendor_Id;
+	pCore->Device_Id = controller.Device_Id;
+	pCore->Revision_Id = controller.Revision_Id;
+	for ( i=0; i<MAX_BASE_ADDRESS; i++ )
+	{
+		pCore->Base_Address[i] = controller.Base_Address[i];
+	}
+	pCore->Mmio_Base = controller.Base_Address[MV_PCI_BAR];
+
+	pCore->Adapter_State = ADAPTER_INITIALIZING;
+	MV_LIST_HEAD_INIT(&pCore->Waiting_List);
+	MV_LIST_HEAD_INIT(&pCore->Internal_Req_List);
+
+	if ( maxIo==1 )
+		pCore->Is_Dump = MV_TRUE;
+	else
+		pCore->Is_Dump = MV_FALSE;
+
+	if (flagSaved!=VS_REG_SIG) {	/* Added for tuning boot up time */
+		/* This initialization is during boot up time, but not S3 */
+		/* Read registers modified by BIOS to set detection flag */
+		if ( (pCore->Device_Id==DEVICE_ID_THOR_4S1P_NEW) ||
+			 (pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH) ||
+			 (pCore->Device_Id==DEVICE_ID_THORLITE_2S1P)) {
+			MV_U32 tmpReg = 0;
+			/* Read Bit[3] of PCI CNFG offset 60h to get flag for */
+			/* PATA port enable/disable (0 - default, need to detect) */
+#ifdef _OS_WINDOWS
+#undef MV_PCI_READ_CONFIG_DWORD
+#define MV_PCI_READ_CONFIG_DWORD(mod_ext, offset, reg) \
+                reg = MV_PCI_READ_DWORD(mod_ext, offset)
+#endif /* _OS_WINDOWS */
+			MV_PCI_READ_CONFIG_DWORD(pTopLayer, 0x60, tmpReg);
+			tmpReg &= MV_BIT(3);
+
+			pCore->Flag_Fastboot_Skip |= (tmpReg >> 3);		/* bit 0 */
+			/* Read Bit[10], Bit [11] of BAR5 offset A4h to get flag for */
+			/* PATA device detection (0 - default, need to detect) and */
+			/* PM detection (0 - default, need to detect) */
+			tmpReg = MV_REG_READ_DWORD(pCore->Mmio_Base, VENDOR_DETECT) & 
+						(VENDOR_DETECT_PATA | VENDOR_DETECT_PM);
+			pCore->Flag_Fastboot_Skip |= (tmpReg >> 9);		/* bit 1, 2 */
+		}
+	}
+
+	if ( (pCore->Device_Id==DEVICE_ID_THORLITE_2S1P)||(pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH) )
+	{
+		pCore->SATA_Port_Num = 2;
+		pCore->PATA_Port_Num = 1;
+		pCore->Port_Num = 3;
+#ifndef _OS_BIOS
+		MV_DPRINT(("DEVICE_ID_THORLITE_2S1P is found.\n"));
+#endif
+
+	}
+	else if ( pCore->Device_Id==DEVICE_ID_THORLITE_0S1P )
+	{
+		pCore->SATA_Port_Num = 0;
+		pCore->PATA_Port_Num = 1;
+		pCore->Port_Num = 1;
+#ifndef _OS_BIOS
+		MV_DPRINT(("DEVICE_ID_THORLITE_0S1P is found.\n"));
+#endif
+
+	}
+	else
+	{
+		pCore->SATA_Port_Num = 4;
+		pCore->PATA_Port_Num = 1;
+		pCore->Port_Num = 5;
+#ifndef _OS_BIOS
+		MV_DPRINT(("DEVICE_ID_THOR is found.\n"));
+#endif
+
+	}
+
+#if /*(VER_OEM==VER_OEM_ASUS) ||*/(VER_OEM==VER_OEM_INTEL)
+	pCore->Port_Num -= pCore->PATA_Port_Num;
+	pCore->PATA_Port_Num = 0;
+#else
+	if (pCore->Flag_Fastboot_Skip & FLAG_SKIP_PATA_PORT) {
+		pCore->Port_Num -= pCore->PATA_Port_Num;
+		pCore->PATA_Port_Num = 0;
+	}
+#endif
+
+	if (pCore->Is_Dump)
+		sgEntryCount = MAX_SG_ENTRY_REDUCED;
+	else
+		sgEntryCount = MAX_SG_ENTRY;
+
+	tmpSG = (MV_PTR_INTEGER)This + ROUNDING(sizeof(Core_Driver_Extension),8);
+	temp = 	tmpSG + sizeof(MV_SG_Entry) * sgEntryCount * INTERNAL_REQ_COUNT;
+
+	internalReqSize = MV_REQUEST_SIZE * INTERNAL_REQ_COUNT;
+	MV_ASSERT( extensionSize >= ROUNDING(sizeof(Core_Driver_Extension),8) + internalReqSize );
+	for ( i=0; i<INTERNAL_REQ_COUNT; i++ )
+	{
+		pReq = (PMV_Request)temp;
+		pReq->SG_Table.Entry_Ptr = (PMV_SG_Entry)tmpSG;
+		pReq->SG_Table.Max_Entry_Count = sgEntryCount;
+		List_AddTail(&pReq->Queue_Pointer, &pCore->Internal_Req_List);
+		tmpSG += sizeof(MV_SG_Entry) * sgEntryCount;
+		temp += MV_REQUEST_SIZE;	/* MV_Request is 64bit aligned. */
+	}	
+//	temp = ROUNDING( (MV_PTR_INTEGER)temp, 8 );		/* Don't round the extension pointer */
+
+#ifdef SUPPORT_CONSOLIDATE	
+	// Allocate resource for Consolidate_Extension->Requests[].
+	tmpSG = temp;
+	temp = temp + sizeof(MV_SG_Entry) * sgEntryCount * CONS_MAX_INTERNAL_REQUEST_COUNT;
+
+	if ( pCore->Is_Dump )
+	{
+		pCore->pConsolid_Device = NULL;
+		pCore->pConsolid_Extent = NULL;
+	}
+	else
+	{
+		MV_ASSERT( extensionSize>=
+			( ROUNDING(sizeof(Core_Driver_Extension),8) + internalReqSize + ROUNDING(sizeof(Consolidate_Extension),8) + ROUNDING(sizeof(Consolidate_Device),8)*MAX_DEVICE_NUMBER )
+			); 
+		pCore->pConsolid_Extent = (PConsolidate_Extension)(temp);
+
+		//Initialize some fields for pCore->pConsolid_Extent->Requests[i]
+		for (i=0; i<CONS_MAX_INTERNAL_REQUEST_COUNT; i++)
+		{
+			pReq = &pCore->pConsolid_Extent->Requests[i];
+
+			pReq->SG_Table.Max_Entry_Count = sgEntryCount;
+			pReq->SG_Table.Entry_Ptr = (PMV_SG_Entry)tmpSG;
+			tmpSG += sizeof(MV_SG_Entry) * sgEntryCount;
+		}
+
+		pCore->pConsolid_Device = (PConsolidate_Device)((MV_PTR_INTEGER)pCore->pConsolid_Extent + ROUNDING(sizeof(Consolidate_Extension),8));
+	}
+#endif
+
+	/* Port_Map and Port_Num will be read from the register */
+
+	/* Init port data structure */
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		port = &pCore->Ports[i];
+		
+		port->Id = i;
+		port->Port_State = PORT_STATE_IDLE;
+
+		port->Core_Extension = pCore;
+#ifdef _OS_BIOS
+/* BIOS use far pointer to access MMIO_BASE */
+		port->Mmio_Base = (MV_U32)pCore->Mmio_Base + 0x100 + (i * 0x80);
+		//port->Mmio_SCR = (MV_U32)port->Mmio_Base + PORT_SCR;
+#else
+		port->Mmio_Base = (MV_PU8)pCore->Mmio_Base + 0x100 + (i * 0x80);
+		port->Mmio_SCR = (MV_PU8)port->Mmio_Base + PORT_SCR;
+#endif
+
+		Tag_Init(&port->Tag_Pool, MAX_TAG_NUMBER);
+
+		for (j=0; j<MAX_DEVICE_PER_PORT; j++) 
+		{
+			port->Device[j].Id = i*MAX_DEVICE_PER_PORT + j;
+			port->Device[j].PPort = port;
+			port->Device[j].Is_Slave = 0;	/* Which one is the slave will be determined during discovery. */
+#if defined(SUPPORT_TIMER) && defined(_OS_WINDOWS)
+			port->Device[j].Timer_ID = NO_CURRENT_TIMER;
+#endif
+			port->Device[j].Reset_Count = 0;
+		}
+
+		port->Device_Number = 0;
+
+		//TBD: Set function table for each port here.
+		if ( i>=pCore->SATA_Port_Num )
+			port->Type = PORT_TYPE_PATA;
+		else
+			port->Type = PORT_TYPE_SATA;
+	}
+
+	/* Get uncached memory */
+	HBA_GetResource(pCore, RESOURCE_UNCACHED_MEMORY, &dmaResource);
+	memVir = dmaResource.Virtual_Address;
+	memDMA = dmaResource.Physical_Address;
+	
+	/* Assign uncached memory for command list (64 byte align) */
+	offset = (MV_U32)(ROUNDING(memDMA.value,64)-memDMA.value);
+	memDMA.value += offset;
+	memVir = (MV_PU8)memVir + offset;
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		port = &pCore->Ports[i];
+		port->Cmd_List = memVir;
+		port->Cmd_List_DMA = memDMA;
+	#ifdef HIBERNATION_ROUNTINE
+		if((!pCore->Is_Dump)|| (i==(pCore->Port_Num-1)))
+	#endif
+		{
+			memVir = (MV_PU8)memVir + SATA_CMD_LIST_SIZE;
+			memDMA.value += SATA_CMD_LIST_SIZE;
+		}
+	}
+
+	/* Assign uncached memory for received FIS (64 byte align) */
+	offset = (MV_U32)(ROUNDING(memDMA.value,64)-memDMA.value);
+	memDMA.value += offset;
+	memVir = (MV_PU8)memVir + offset;
+	for ( i=0; i<pCore->SATA_Port_Num; i++ )
+	{
+		port = &pCore->Ports[i];	
+		port->RX_FIS = memVir;
+		port->RX_FIS_DMA = memDMA;
+	#ifdef HIBERNATION_ROUNTINE
+		if((!pCore->Is_Dump)|| (i==(pCore->SATA_Port_Num-1)))
+	#endif
+		{
+			memVir = (MV_PU8)memVir + SATA_RX_FIS_SIZE;
+			memDMA.value += SATA_RX_FIS_SIZE;
+		}
+	}
+
+	/* Assign the 32 command tables. (128 byte align) */
+	offset = (MV_U32)(ROUNDING(memDMA.value,128)-memDMA.value);
+	memDMA.value += offset;
+	memVir = (MV_PU8)memVir + offset;
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		port = &pCore->Ports[i];
+		port->Cmd_Table = memVir;
+		port->Cmd_Table_DMA = memDMA;
+
+		if ( !pCore->Is_Dump )
+		{
+			memVir = (MV_PU8)memVir + SATA_CMD_TABLE_SIZE * MAX_SLOT_NUMBER;
+			memDMA.value += SATA_CMD_TABLE_SIZE * MAX_SLOT_NUMBER;
+		}
+		else
+		{
+		#ifdef HIBERNATION_ROUNTINE
+			if(i==(pCore->Port_Num-1))
+		#endif
+			{
+				memVir = (MV_PU8)memVir + SATA_CMD_TABLE_SIZE;
+				memDMA.value += SATA_CMD_TABLE_SIZE;
+			}
+		}
+	}
+
+	/* Assign the scratch buffer (8 byte align) */
+	offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value);
+	memDMA.value += offset;
+	memVir = (MV_PU8)memVir + offset;
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		port = &pCore->Ports[i];
+		for ( j=0; j<MAX_DEVICE_PER_PORT; j++ )
+		{
+			port->Device[j].Scratch_Buffer = memVir;
+			port->Device[j].Scratch_Buffer_DMA = memDMA;
+		
+		#ifdef HIBERNATION_ROUNTINE
+			if((!pCore->Is_Dump)|| (i==(pCore->Port_Num-1)))
+		#endif
+			{
+				memVir = (MV_PU8)memVir + SATA_SCRATCH_BUFFER_SIZE;
+				memDMA.value += SATA_SCRATCH_BUFFER_SIZE;
+			}
+		}
+	}
+
+	/* Let me confirm the following assumption */
+	MV_ASSERT( sizeof(SATA_FIS_REG_H2D)==sizeof(MV_U32)*FIS_REG_H2D_SIZE_IN_DWORD );
+	MV_ASSERT( sizeof(MV_Command_Table)==0x80+MAX_SG_ENTRY*sizeof(MV_SG_Entry) );
+	MV_ASSERT( sizeof(ATA_Identify_Data)==512 ); 
+	MV_ASSERT( MAX_TAG_NUMBER==MAX_SLOT_NUMBER );
+
+#ifdef SUPPORT_CONSOLIDATE
+	if ( !pCore->Is_Dump )
+	{
+		Consolid_InitializeExtension(This);
+		for ( i=0; i<MAX_DEVICE_NUMBER; i++ )
+			Consolid_InitializeDevice(This, i);
+	}
+#endif
+}
+
+void Core_ModuleStart(MV_PVOID This)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+
+	mvAdapterStateMachine(pCore);
+}
+
+#ifdef _OS_BIOS
+void Core_ReInitBaseAddress(MV_PVOID This)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+ 	PDomain_Port pPort;
+ 	MV_U8 i;
+  	PMV_DATA_STRUCT pDriverData = MyDriverDataBaseOff;
+	for ( i = 0; i<pCore->Port_Num; i++) {
+		pPort = &pCore->Ports[i];
+		pPort->Cmd_List_DMA.low = MVVirtual2PhyicalAddress((MV_LPVOID)pPort->Cmd_List);
+		pPort->RX_FIS_DMA.low = MVVirtual2PhyicalAddress((MV_LPVOID)pPort->RX_FIS);
+		pPort->Cmd_Table_DMA.low = MVVirtual2PhyicalAddress((MV_LPVOID)pPort->Cmd_Table);
+
+		/* Set the sata port register */
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_LST_ADDR_HI, pPort->Cmd_List_DMA.high);
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_LST_ADDR, pPort->Cmd_List_DMA.low);
+		MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_LST_ADDR);
+
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR_HI, pPort->RX_FIS_DMA.high);
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR, pPort->RX_FIS_DMA.low);
+		MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR);
+
+	}
+}
+#endif
+
+void Core_ModuleShutdown(MV_PVOID This)
+{
+#ifndef _OS_BIOS
+	/* 
+	 * This function is equivalent to ahci_port_stop 
+	 */
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	MV_U32 tmp, i;
+	MV_LPVOID mmio;
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		mmio = pCore->Ports[i].Mmio_Base;
+
+		tmp = MV_REG_READ_DWORD(mmio, PORT_CMD);
+		if ( pCore->Ports[i].Type==PORT_TYPE_SATA )
+			tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
+		else
+			tmp &= ~PORT_CMD_START;
+		MV_REG_WRITE_DWORD(mmio, PORT_CMD, tmp);
+		MV_REG_READ_DWORD(mmio, PORT_CMD); /* flush */
+
+		/* 
+		 * spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
+		 * this is slightly incorrect.
+		 */
+		HBA_SleepMillisecond(pCore, 500);
+	}
+
+	/* Disable the controller interrupt */
+	tmp = MV_REG_READ_DWORD(pCore->Mmio_Base, HOST_CTL);
+	tmp &= ~(HOST_IRQ_EN);
+	MV_REG_WRITE_DWORD(pCore->Mmio_Base, HOST_CTL, tmp);
+#endif
+}
+
+void Core_ModuleNotification(MV_PVOID This, enum Module_Event event, MV_PVOID event_param)
+{
+}
+
+void Core_HandleWaitingList(PCore_Driver_Extension pCore);
+void Core_InternalSendRequest(MV_PVOID This, PMV_Request pReq);
+
+void Core_ModuleSendRequest(MV_PVOID This, PMV_Request pReq)
+{	
+#ifdef SUPPORT_CONSOLIDATE
+	{
+		PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+		PDomain_Device pDevice;
+		MV_U8 portId = PATA_MapPortId(pReq->Device_Id);
+		MV_U8 deviceId = PATA_MapDeviceId(pReq->Device_Id);
+		
+		pDevice = &pCore->Ports[portId].Device[deviceId];
+		if ( (!(pDevice->Device_Type&DEVICE_TYPE_ATAPI)) && (!pCore->Is_Dump) )
+			Consolid_ModuleSendRequest(pCore, pReq);
+		else
+			Core_InternalSendRequest(pCore, pReq);
+	}
+#else
+	Core_InternalSendRequest(This, pReq);
+#endif
+}
+
+void Core_InternalSendRequest(MV_PVOID This, PMV_Request pReq)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	//MV_DUMPRUN(0xCCF1);
+	/* Check whether we can handle this request */
+	switch (pReq->Cdb[0])
+	{
+		case SCSI_CMD_INQUIRY:
+		case SCSI_CMD_START_STOP_UNIT:
+		case SCSI_CMD_TEST_UNIT_READY:
+		case SCSI_CMD_READ_10:
+		case SCSI_CMD_WRITE_10:
+		case SCSI_CMD_VERIFY_10:
+		case SCSI_CMD_READ_CAPACITY_10:
+		case SCSI_CMD_REQUEST_SENSE:
+		case SCSI_CMD_MODE_SELECT_10:
+		case SCSI_CMD_MODE_SENSE_10:
+		case SCSI_CMD_MARVELL_SPECIFIC:
+		default:
+			if ( pReq->Cmd_Initiator==pCore )
+			{
+				if ( !SCSI_IS_READ(pReq->Cdb[0]) && !SCSI_IS_WRITE(pReq->Cdb[0]) )
+				{
+					/* Reset request or request sense command. */
+					List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);		/* Add to the header. */
+				}
+				else
+				{
+					#ifdef SUPPORT_CONSOLIDATE
+					/* Consolidate request */
+					MV_DASSERT( !pCore->Is_Dump );
+					List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);	/* Append to the tail. */
+					#else
+					MV_ASSERT(MV_FALSE);
+					#endif
+				}
+			}
+			else
+			{
+				List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);		/* Append to the tail. */
+			}
+			Core_HandleWaitingList(pCore);
+			break;
+	}
+}
+
+void SATA_PrepareCommandHeader(PDomain_Port pPort, PMV_Request pReq, MV_U8 tag)
+{
+	MV_PHYSICAL_ADDR table_addr;
+	PMV_Command_Header header = NULL;
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+	PDomain_Device pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+#ifdef DEBUG_BIOS
+	MV_PU8	pHead;
+	MV_U16 tmp;
+#endif
+
+	header = SATA_GetCommandHeader(pPort, tag);
+	/* 
+	 * Set up the command header.
+	 * TBD: Table_Address and Table_Address_High are fixed. Needn't set every time.
+	 */
+	header->FIS_Length = FIS_REG_H2D_SIZE_IN_DWORD;
+	header->Packet_Command = (pReq->Cmd_Flag&CMD_FLAG_PACKET)?1:0;
+	header->Reset = 0;
+	header->NCQ = (pReq->Cmd_Flag&CMD_FLAG_NCQ)?1:0;
+
+#ifdef SUPPORT_PM
+	header->PM_Port = pDevice->PM_Number;
+#else
+	header->PM_Port = 0;
+#endif
+	*((MV_U16 *) header) = CPU_TO_LE_16( *((MV_U16 *) header) );
+	header->PRD_Entry_Count = CPU_TO_LE_16(pSGTable->Valid_Entry_Count);
+
+	table_addr.low = pPort->Cmd_Table_DMA.low + SATA_CMD_TABLE_SIZE*tag;
+	MV_ASSERT(table_addr.low>=pPort->Cmd_Table_DMA.low);	//TBD
+	table_addr.high = pPort->Cmd_Table_DMA.high;
+
+	header->Table_Address = CPU_TO_LE_32(table_addr.low);
+	header->Table_Address_High = CPU_TO_LE_32(table_addr.high);
+#ifdef DEBUG_BIOS
+	pHead=(MV_PU8)header;
+	MV_DUMPC32(pReq->Cmd_Flag);
+	MV_DUMPC32(0xCCCCDDD2);
+	MV_DUMPC32(pPort->Cmd_List_DMA.low);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[0]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[1]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[2]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[3]));
+#endif
+}
+
+void PATA_PrepareCommandHeader(PDomain_Port pPort, PMV_Request pReq, MV_U8 tag)
+{
+#ifdef DEBUG_BIOS
+	MV_PU8	pHead;
+	MV_U16 tmp;
+#endif
+	MV_PHYSICAL_ADDR table_addr;
+	PMV_PATA_Command_Header header = NULL;
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+
+	header = PATA_GetCommandHeader(pPort, tag);
+	/* 
+	 * Set up the command header.
+	 * TBD: TCQ, Diagnostic_Command, Reset
+	 * TBD: Table_Address and Table_Address_High are fixed. Needn't set every time.
+	 */
+	header->PIO_Sector_Count = 0;		/* Only for PIO multiple sector commands */
+	header->Controller_Command = 0;
+	header->TCQ = 0;
+	header->Packet_Command = (pReq->Cmd_Flag&CMD_FLAG_PACKET)?1:0;
+
+#ifdef USE_DMA_FOR_ALL_PACKET_COMMAND
+	if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+	{
+		//if ( pReq->Cdb[0]!=SCSI_CMD_INQUIRY )	//ATAPI???
+			header->DMA = (pReq->Cmd_Flag&CMD_FLAG_NON_DATA)?0:1;
+		//else
+		//	header->DMA = 0;
+	}
+	else
+	{
+		header->DMA = (pReq->Cmd_Flag&CMD_FLAG_DMA)?1:0;
+	}
+#elif defined(USE_PIO_FOR_ALL_PACKET_COMMAND)
+	if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+	{
+		header->DMA = 0;
+	}
+	else
+	{
+		header->DMA = (pReq->Cmd_Flag&CMD_FLAG_DMA)?1:0;
+	}
+#else	
+	header->DMA = (pReq->Cmd_Flag&CMD_FLAG_DMA)?1:0;
+#endif
+
+	header->Data_In = (pReq->Cmd_Flag&CMD_FLAG_DATA_IN)?1:0;
+	header->Non_Data = (pReq->Cmd_Flag&CMD_FLAG_NON_DATA)?1:0;
+
+	header->PIO_Sector_Command = 0;
+	header->Is_48Bit = (pReq->Cmd_Flag&CMD_FLAG_48BIT)?1:0;
+	header->Diagnostic_Command = 0;
+	header->Reset = 0;
+
+	header->Is_Slave = pPort->Device[PATA_MapDeviceId(pReq->Device_Id)].Is_Slave;
+
+	*((MV_U16 *) header) = CPU_TO_LE_16( *((MV_U16 *) header) );
+	header->PRD_Entry_Count = CPU_TO_LE_16(pSGTable->Valid_Entry_Count);
+
+	table_addr.low = pPort->Cmd_Table_DMA.low + SATA_CMD_TABLE_SIZE*tag;
+	MV_ASSERT( table_addr.low>=pPort->Cmd_Table_DMA.low);	//TBD
+	table_addr.high = pPort->Cmd_Table_DMA.high;
+
+	header->Table_Address = CPU_TO_LE_32(table_addr.low);
+	header->Table_Address_High = CPU_TO_LE_32(table_addr.high);
+
+#ifdef DEBUG_BIOS
+	pHead=(MV_PU8)header;
+	MV_DUMPC32(pReq->Cmd_Flag);
+	MV_DUMPC32(0xCCCC7701);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[0]));
+	MV_DUMPC32(0xCCCC7702);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[4]));
+	MV_DUMPC32(0xCCCC7703);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[8]));
+	MV_DUMPC32(0xCCCC7704);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pHead[12]));
+#endif
+
+
+}
+
+/*
+ * Fill SATA command table
+ */
+MV_VOID SATA_PrepareCommandTable(
+	PDomain_Port pPort, 
+	PMV_Request pReq, 
+	MV_U8 tag,
+	PATA_TaskFile pTaskFile
+	)
+{
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+	PMV_SG_Entry pSGEntry = NULL;
+	MV_U8 i;
+#ifdef DEBUG_BIOS
+	MV_PU8 pTable;
+#endif
+
+	/* Step 1: fill the command FIS: MV_Command_Table */
+	SCSI_To_FIS(pPort->Core_Extension, pReq, tag, pTaskFile);
+
+	/* Step 2. fill the ATAPI CDB */
+	if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+	{
+		MV_CopyMemory(pCmdTable->ATAPI_CDB, pReq->Cdb, MAX_CDB_SIZE);
+	}
+
+	/* Step 3: fill the PRD Table if necessary. */
+	if ( (pSGTable) && (pSGTable->Valid_Entry_Count) )
+	{
+		/* "Transfer Byte Count" in AHCI and 614x PRD table is zero based. */
+		for ( i=0; i<pSGTable->Valid_Entry_Count; i++ )
+		{
+			pSGEntry = &pCmdTable->PRD_Entry[i];
+			pSGEntry->Base_Address = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Base_Address);
+			pSGEntry->Base_Address_High = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Base_Address_High);
+			pSGEntry->Size = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Size-1);
+		}
+	}
+	else
+	{	
+		MV_DASSERT( !SCSI_IS_READ(pReq->Cdb[0]) && !SCSI_IS_WRITE(pReq->Cdb[0]) );
+	}
+#ifdef DEBUG_BIOS	
+
+	pTable=(MV_PU8)pPort->Cmd_Table;
+	//MV_DUMPC32(0xCCCCEEE2);
+	//MV_DUMPC16((MV_U16)pTable);
+	//MV_DUMPC32(pPort->Cmd_Table_DMA.low);
+	MV_DUMPC32(0xCCCCEE21);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[0]));
+	MV_DUMPC32(0xCCCCEE22);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[4]));
+	MV_DUMPC32(0xCCCCEE23);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[8]));
+	MV_DUMPC32(0xCCCCEE24);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[12]));
+#if 1
+	MV_DUMPC32(0xCCCCEEE3);
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[0]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[4]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[8]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[12]));
+	//MV_DUMPC32(0xCCCCEEE4);
+	//(MV_U32)(*(MV_PU32)&pTable[136]) =(MV_U32) 0;
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[128]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[132]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[136]));
+	MV_DUMPC32(pCmdTable->PRD_Entry[0].Size);
+
+	MV_DUMPC32(0xCCCCEEE5);
+
+	#ifdef DEBUG_BIOS
+		if((MV_U32)(*(MV_PU32)&pTable[132]) != 0)
+		{
+			MV_DUMPC32(0xCCCC990D);
+			MV_HALTKEY;
+		}
+	#endif
+	
+	//pSGEntry = &pCmdTable->PRD_Entry[0];
+	//MV_DUMPC32(pSGEntry->Base_Address);
+	//MV_DUMPC32(pSGEntry->Base_Address_High);
+	//MV_DUMPC32(pSGTable->Entry[0].Base_Address);
+	//MV_DUMPC32(pSGTable->Entry[0].Base_Address_High);
+	//MV_DUMPC8(pSGTable->Valid_Entry_Count);
+	//MV_DPRINT("CmdTableAddr=0x%x,CmdTableAddrP=0x%x\n",(MV_U32)(MV_U16)pPort->Cmd_Table,(MV_U32)pPort->Cmd_Table_DMA.low);
+	//MV_DPRINT("CmdBlk0=0x%x,CmdBlk1=0x%x,CmdBlk2=0x%x,CmdBlk3=0x%x,"
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->FIS[0])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->FIS[4])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->FIS[8])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->FIS[12]));
+
+
+	//MV_DPRINT("ATAPI0=0x%x,ATAPI1=0x%x,ATAPI2=0x%x,ATAPI3=0x%x\n"
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[0])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[4])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[8])
+	//			,(MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[12]));
+	//MV_DPRINT("SGAddr=0x%x,SGAddrH=0x%x,REV=0x%x,SGCnt=0x%x\n",(MV_U32)(*(MV_PU32)&pTable[128]),(MV_U32)(*(MV_PU32)&pTable[132]),(MV_U32)(*(MV_PU32)&pTable[136]),pCmdTable->PRD_Entry[0].Size);
+#endif
+
+	MV_ENTERLINE;
+#endif
+
+}
+
+/*
+ * Fill the PATA command table
+*/
+MV_VOID PATA_PrepareCommandTable(
+	PDomain_Port pPort, 
+	PMV_Request pReq, 
+	MV_U8 tag,
+	PATA_TaskFile pTaskFile
+	)
+{
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+#ifdef DEBUG_BIOS
+	MV_PU8 pTable;
+#endif
+
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+	PMV_SG_Entry pSGEntry = NULL;
+	MV_PU8 pU8 = (MV_PU8)pCmdTable;
+	MV_U8 i, device_index;
+
+	device_index = PATA_MapDeviceId(pReq->Device_Id);
+
+	/* Step 1: Fill the command block */
+	(*pU8)=pTaskFile->Features; pU8++;
+	(*pU8)=pTaskFile->Feature_Exp; pU8++;
+	(*pU8)=pTaskFile->Sector_Count; pU8++;
+	(*pU8)=pTaskFile->Sector_Count_Exp; pU8++;
+	(*pU8)=pTaskFile->LBA_Low; pU8++;
+	(*pU8)=pTaskFile->LBA_Low_Exp; pU8++;
+	(*pU8)=pTaskFile->LBA_Mid; pU8++;
+	(*pU8)=pTaskFile->LBA_Mid_Exp; pU8++;
+	(*pU8)=pTaskFile->Command; pU8++;
+	(*pU8)=pTaskFile->Device; pU8++;
+	(*pU8)=pTaskFile->LBA_High; pU8++;
+	(*pU8)=pTaskFile->LBA_High_Exp; pU8++;
+	*((MV_PU32)pU8) = 0L;
+    
+	/* Step 2: Fill the ATAPI CDB */
+	if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+	{
+		MV_CopyMemory(pCmdTable->ATAPI_CDB, pReq->Cdb, MAX_CDB_SIZE);
+	}
+
+	/* Step 3: Fill the PRD Table if necessary. */
+	if ( (pSGTable) && (pSGTable->Valid_Entry_Count) )
+	{
+		//TBD: C0 board AHCI is 0 based while Ax board AHCI PATA is 1 based.
+		#if 0
+		/* For thor, we can just copy the PRD table */
+		MV_CopyMemory( 
+			pCmdTable->PRD_Entry, 
+			pSGTable->Entry,
+			sizeof(MV_SG_Entry)*pSGTable->Valid_Entry_Count 
+			);
+		#else
+		/* "Transfer Byte Count" in AHCI and 614x PRD table is zero based. */
+		for ( i=0; i<pSGTable->Valid_Entry_Count; i++ )
+		{
+			pSGEntry = &pCmdTable->PRD_Entry[i];
+			pSGEntry->Base_Address = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Base_Address);
+			pSGEntry->Base_Address_High = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Base_Address_High);
+			pSGEntry->Size = CPU_TO_LE_32(pSGTable->Entry_Ptr[i].Size-1);
+		}
+		#endif
+	}
+	else
+	{	
+		MV_DASSERT( !SCSI_IS_READ(pReq->Cdb[0]) && !SCSI_IS_WRITE(pReq->Cdb[0]) );
+	}
+
+#ifdef DEBUG_BIOS	
+
+	pTable=(MV_PU8)pPort->Cmd_Table;
+	//MV_DUMPC32(0xCCCCEEE2);
+	//MV_DUMPC16((MV_U16)pTable);
+	//MV_DUMPC32(pPort->Cmd_Table_DMA.low);
+	MV_DUMPC32(0xCCCCEE21);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[0]));
+	MV_DUMPC32(0xCCCCEE22);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[4]));
+	MV_DUMPC32(0xCCCCEE23);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[8]));
+	MV_DUMPC32(0xCCCCEE24);
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->FIS[12]));
+#if 1
+	MV_DUMPC32(0xCCCCEEE3);
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[0]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[4]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[8]));
+	//MV_DUMPC32((MV_U32)(*(MV_PU32)&pCmdTable->ATAPI_CDB[12]));
+	//MV_DUMPC32(0xCCCCEEE4);
+	//(MV_U32)(*(MV_PU32)&pTable[136]) =(MV_U32) 0;
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[128]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[132]));
+	MV_DUMPC32((MV_U32)(*(MV_PU32)&pTable[136]));
+	MV_DUMPC32(pCmdTable->PRD_Entry[0].Size);
+
+	MV_DUMPC32(0xCCCCEEE5);
+
+	#ifdef DEBUG_BIOS
+		if((MV_U32)(*(MV_PU32)&pTable[132]) != 0)
+		{
+			MV_DUMPC32(0xCCCC990D);
+			MV_HALTKEY;
+		}
+	#endif
+	
+#endif
+
+	MV_ENTERLINE;
+#endif
+}
+
+void SATA_SendFrame(PDomain_Port pPort, PMV_Request pReq, MV_U8 tag)
+{
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+#ifdef _OS_BIOS
+	MV_U32 	DelayTimer=100;
+	MV_U32 	irqStatus=0;
+	PCore_Driver_Extension pCore= pPort->Core_Extension;
+   	PMV_DATA_STRUCT pDriverData = MyDriverDataBaseOff;
+#endif
+
+	MV_DASSERT( (pPort->Running_Slot&(1<<tag))==0 );
+	MV_DASSERT( pPort->Running_Req[tag]==0 );
+	MV_DASSERT( (MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)&(1<<tag))==0 );
+	MV_DASSERT( (MV_REG_READ_DWORD(portMmio, PORT_SCR_ACT)&(1<<tag))==0 );
+
+#ifdef MV_DEBUG
+	if ( pPort->Running_Slot!=0 )
+	{
+		//MV_DPRINT(("M: S=0x%x, T=0x%x.\n", pPort->Running_Slot, tag));
+	}
+#endif
+
+	pPort->Running_Slot |= 1<<tag;
+	pPort->Running_Req[tag] = pReq;
+
+	if ( pReq->Cmd_Flag&CMD_FLAG_NCQ )
+		pPort->Setting |= PORT_SETTING_NCQ_RUNNING;
+	else
+		pPort->Setting &= ~PORT_SETTING_NCQ_RUNNING;
+
+	if ( pReq->Scsi_Status==REQ_STATUS_RETRY )
+	{
+		MV_PRINT("Retry request...");
+		MV_DumpRequest(pReq, MV_FALSE);
+		pPort->Setting |= PORT_SETTING_DURING_RETRY;
+	}
+	else
+	{
+		pPort->Setting &= ~PORT_SETTING_DURING_RETRY;
+	}
+
+	if ( pPort->Setting&PORT_SETTING_NCQ_RUNNING )
+	{
+		MV_REG_WRITE_DWORD(portMmio, PORT_SCR_ACT, 1<<tag);
+		MV_REG_READ_DWORD(portMmio, PORT_SCR_ACT);	/* flush */
+	}
+
+#ifdef _OS_BIOS
+/* Cleare All interrupt status */
+	MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_STAT,0xFFFFFFFF);
+	//HBA_SleepMillisecond(NULL, 10);
+#endif	
+
+#ifdef DEBUG_BIOS
+	MV_DUMPC32(0xCCCCCCFE);
+	MV_DUMPC32(MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE));
+	MV_DUMPC32(MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT));
+	if(MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)||MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT))	
+	{	
+		MV_DUMPC32(0xCCCCCCFF);
+		MV_HALTKEY;
+	}
+	MV_ENTERLINE;
+#endif
+
+#ifdef MV_IRQ_MODE
+	pDriverData->mvIRQStatus=MV_FALSE;
+	__asm	sti;
+#endif
+
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD_ISSUE, 1<<tag);
+	MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE);	/* flush */
+	
+	/* The End of SATA_SendFrame() for OS code. */
+	
+#ifdef _OS_BIOS
+
+#ifdef MV_IRQ_MODE
+	while(!pDriverData->mvIRQStatus && DelayTimer)
+	{
+		MV_DUMPC16(0xC990);
+		HBA_SleepMillisecond(NULL, 1);
+		DelayTimer--;
+	}	
+#else	/* #ifdef MV_IRQ_MODE */
+	while(DelayTimer && !irqStatus)
+	{
+		HBA_SleepMillisecond(NULL, 1);
+		irqStatus = MV_REG_READ_DWORD(pCore->Mmio_Base, HOST_IRQ_STAT);
+		DelayTimer--;
+		MV_DUMPC16(0xC990);
+	}
+#endif	/* #ifdef MV_IRQ_MODE */
+
+	if((pDriverData->mvIRQStatus==MV_FALSE)||(DelayTimer == 0))
+	{
+		MV_DUMPC32(0xCCC4422);
+		if(!Core_InterruptServiceRoutine(pCore))
+		{
+			MV_DUMPC32(0xCCCC4423);
+			MV_HALTKEY;
+		}
+	}
+	
+	if ( pPort->Type==PORT_TYPE_PATA )
+		PATA_PortHandleInterrupt(pCore, pPort);
+	else
+		SATA_PortHandleInterrupt(pCore, pPort);
+#endif	/* #ifdef _OS_BIOS */
+
+	MV_DUMPC32(0xCCCC4421);
+}
+
+//Hardware Reset. Added by Lily
+
+MV_BOOLEAN Core_WaitingForIdle(MV_PVOID pExtension)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)pExtension;
+	PDomain_Port pPort = NULL;
+	MV_U8 i;
+
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		pPort = &pCore->Ports[i];
+
+		if ( pPort->Running_Slot!=0 )
+			return MV_FALSE;
+	}
+	
+	return MV_TRUE;
+}
+
+MV_BOOLEAN ResetController(PCore_Driver_Extension pCore);
+
+//TBD: Replace this function with existing functions.
+void Core_ResetHardware(MV_PVOID pExtension)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)pExtension;
+	MV_U32 i, j;
+	PDomain_Port pPort = NULL;
+	PDomain_Device pDevice = NULL;
+
+	/* Re-initialize some variables to make the reset go. */
+	//TBD: Any more variables?
+	pCore->Adapter_State = ADAPTER_INITIALIZING;
+	for ( i=0; i<MAX_PORT_NUMBER; i++ )
+	{
+		pPort = &pCore->Ports[i];
+		pPort->Port_State = PORT_STATE_IDLE;
+		for ( j=0; j<MAX_DEVICE_PER_PORT; j++ )
+		{
+			pDevice = &pPort->Device[j];
+			pDevice->State = DEVICE_STATE_IDLE;
+		}
+	}
+
+	/* Go through the mvAdapterStateMachine. */
+	if( pCore->Resetting==0 )
+	{
+		pCore->Resetting = 1;
+		if( !mvAdapterStateMachine(pCore) )
+		{
+			MV_ASSERT(MV_FALSE);
+		}
+	}
+	else
+	{
+		/* I suppose that we only have one chance to call Core_ResetHardware. */
+		MV_DASSERT(MV_FALSE);
+	}
+	
+	return;
+}
+
+void PATA_LegacyPollSenseData(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+#ifndef _OS_BIOS
+	/* Use legacy mode to poll the sense data. */
+	//TBD: Let me fake the sense data first to see what's gonna happen.
+	//The sense data is got from the trace.
+	/* 
+	 * This sense data says:
+	 * Format: Fixed format sense data
+	 * Sense key: Hardware error
+	 * Sense code and qualifier: 08h 03h LOGICAL UNIT COMMUNICATION CRC ERROR
+	 */
+	MV_U8 fakeSense[]={0xF0, 0x00, 0x04, 0x00, 0x00, 0x01, 
+		0xEA, 0x0A, 0x74, 0x00, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00};
+	MV_U32 size = MV_MIN(sizeof(fakeSense)/sizeof(MV_U8), pReq->Sense_Info_Buffer_Length);
+
+	MV_CopyMemory(pReq->Sense_Info_Buffer, fakeSense, size);
+#endif
+
+}
+
+void Core_FillSenseData(PMV_Request pReq, MV_U8 senseKey, MV_U8 adSenseCode)
+{
+	if (pReq->Sense_Info_Buffer != NULL) {
+		((MV_PU8)pReq->Sense_Info_Buffer)[0] = 0x70;	/* Current */
+		((MV_PU8)pReq->Sense_Info_Buffer)[2] = senseKey;
+		((MV_PU8)pReq->Sense_Info_Buffer)[7] = 0;		/* additional sense length */
+		((MV_PU8)pReq->Sense_Info_Buffer)[12] = adSenseCode;	/* additional sense code */
+	}
+}
+
+void mvScsiInquiry(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+#ifndef _OS_BIOS
+	PDomain_Device pDevice = NULL;
+	MV_U8 portId, deviceId;
+
+	portId = PATA_MapPortId(pReq->Device_Id);
+	deviceId = PATA_MapDeviceId(pReq->Device_Id);
+	pDevice = &pCore->Ports[portId].Device[deviceId];
+	MV_ZeroMemory(pReq->Data_Buffer, pReq->Data_Transfer_Length);
+
+	if ( pReq->Cdb[1] & CDB_INQUIRY_EVPD )
+	{
+		MV_U8 MV_INQUIRY_VPD_PAGE0_DATA[6] = {0x00, 0x00, 0x00, 0x02, 0x00, 0x80};
+		MV_U32 tmpLen = 0;
+		pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+
+		/* Shall return the specific page of Vital Production Data */
+		switch (pReq->Cdb[2]) {
+		case 0x00:	/* Supported VPD pages */
+			tmpLen = MV_MIN(pReq->Data_Transfer_Length, 6);
+			MV_CopyMemory(pReq->Data_Buffer, MV_INQUIRY_VPD_PAGE0_DATA, tmpLen);
+			break;
+		case 0x80:	/* Unit Serial Number VPD Page */
+			if (pReq->Data_Transfer_Length > 1)
+				*(((MV_PU8)(pReq->Data_Buffer)) + 1) = 0x80;
+			tmpLen = MV_MIN(pReq->Data_Transfer_Length, 4);
+			if (tmpLen >= 4) {
+				tmpLen = MV_MIN((pReq->Data_Transfer_Length-4), 20);
+				MV_CopyMemory(((MV_PU8)(pReq->Data_Buffer)+4), pDevice->Serial_Number, tmpLen);
+				*(((MV_PU8)(pReq->Data_Buffer)) + 3) = (MV_U8)tmpLen;
+				tmpLen += 4;
+			}
+			break;
+		case 0x83:	/* Device Identification VPD Page */
+			/* Here is using Vendor Specific Identifier Format */
+			if (pReq->Data_Transfer_Length > 8) {
+				*(((MV_PU8)(pReq->Data_Buffer)) + 1) = 0x83;
+				*(((MV_PU8)(pReq->Data_Buffer)) + 4) = 0x02;	/* Code Set */
+				tmpLen = MV_MIN((pReq->Data_Transfer_Length-8), 40);
+				MV_CopyMemory(((MV_PU8)(pReq->Data_Buffer)+8), pDevice->Model_Number, tmpLen);
+				*(((MV_PU8)(pReq->Data_Buffer)) + 7) = (MV_U8)tmpLen;	/* Identifier Length */
+				*(((MV_PU8)(pReq->Data_Buffer)) + 3) = (MV_U8)(tmpLen + 4);	/* Page Length */
+			}
+			tmpLen += 8;
+			break;
+		default:
+			pReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+			Core_FillSenseData(pReq, SCSI_SK_ILLEGAL_REQUEST, SCSI_ASC_INVALID_FEILD_IN_CDB);
+			break;
+		}
+		pReq->Data_Transfer_Length = tmpLen;
+	} 
+	else
+	{
+		/* Standard inquiry */
+		if (pReq->Cdb[2]!=0) {
+			/* PAGE CODE field must be zero when EVPD is zero for a valid request */
+			/* sense key as ILLEGAL REQUEST and additional sense code as INVALID FIELD IN CDB */
+			pReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+			Core_FillSenseData(pReq, SCSI_SK_ILLEGAL_REQUEST, SCSI_ASC_INVALID_FEILD_IN_CDB);
+			return;
+		}
+		
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+
+		/* console */
+		if ( pReq->Device_Id == CONSOLE_ID )	
+		{
+			MV_U8 bConsoleInquiryPage[64] = {
+				0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
+				'M', 'a', 'r', 'v', 'e', 'l', 'l', ' ',
+				0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,  /* "Raid Con" */
+				0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,  /* "sole    " */
+				0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,  /* "1.00    " */
+				0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,  /* "SX/RSAF-" */
+				0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,  /* "TE1.00  " */
+				0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
+			};
+
+			MV_CopyMemory( pReq->Data_Buffer, 
+						   bConsoleInquiryPage, 
+						   MV_MIN(pReq->Data_Transfer_Length, 64)
+						 );
+
+			pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+			return;
+		}
+
+#endif 
+
+		if ( (portId>=pCore->Port_Num)||(deviceId>=MAX_DEVICE_PER_PORT) )
+		{
+			pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+		}
+		else
+		{
+//			pDevice = &pCore->Ports[portId].Device[deviceId];
+			if ( pDevice->Status&DEVICE_STATUS_FUNCTIONAL )
+			{
+#if 0
+				#define STANDARD_INQUIRY_DATA_SIZE	36
+				MV_U8 MV_INQUIRY_DATA[STANDARD_INQUIRY_DATA_SIZE] = {
+					0, 0, 0x02, 0x02, STANDARD_INQUIRY_DATA_SIZE - 5, 0, 0, 0x13,
+					'M', 'a', 'r', 'v', 'e', 'l', 'l', ' ',
+					'P', 'r', 'o', 'd ', 'u', 'c', 't', ' ', 'I', 'd', 'e', 'n', 't', 'i', 'f', 'c',
+					'1', '.', '0', '1'};
+
+				MV_CopyMemory( pReq->Data_Buffer, 
+								MV_INQUIRY_DATA, 
+								MV_MIN(pReq->Data_Transfer_Length, STANDARD_INQUIRY_DATA_SIZE)
+								);
+#else
+				{
+					MV_U8 Vendor[9],Product[17], temp[24];
+				    MV_U8 buff[42];
+					MV_U32 inquiryLen;
+					MV_ZeroMemory(buff, 42);
+					
+					if (pDevice->Device_Type & \
+					    DEVICE_TYPE_ATAPI) {
+						buff[0] = 0x05;
+						buff[1] = 0x00 | 1U<<7; 
+					} else {
+						buff[0] = 0;
+						buff[1] = 0;
+					}
+
+					buff[2] = 0x05;   //TBD 3  /*claim conformance to SCSI-3*/
+					buff[3] = 0x12;    /* set RESPONSE DATA FORMAT to 2*/
+					buff[4] = 42 - 5;
+					buff[6] = 0x0;     /* tagged queuing*/
+					buff[7] = 0X13;	//TBD 2;
+
+					MV_CopyMemory(temp, pDevice->Model_Number, 24);
+					{
+						MV_U32 i;
+						for (i = 0; i < 9; i++)
+						{
+							if (temp[i] == ' ')
+							{
+								break;
+							}
+						}
+						if (i == 9)
+						{
+							if (((temp[0] == 'I') && (temp[1] == 'C')) ||
+								((temp[0] == 'H') && (temp[1] == 'T')) ||
+								((temp[0] == 'H') && (temp[1] == 'D')) ||
+								((temp[0] == 'D') && (temp[1] == 'K')))
+							{ /*Hitachi*/
+								Vendor[0] = 'H';
+								Vendor[1] = 'i';
+								Vendor[2] = 't';
+								Vendor[3] = 'a';
+								Vendor[4] = 'c';
+								Vendor[5] = 'h';
+								Vendor[6] = 'i';
+								Vendor[7] = ' ';
+								Vendor[8] = '\0';
+							}
+							else if ((temp[0] == 'S') && (temp[1] == 'T'))
+							{
+								/*Seagate*/
+								Vendor[0] = 'S';
+								Vendor[1] = 'e';
+								Vendor[2] = 'a';
+								Vendor[3] = 'g';
+								Vendor[4] = 'a';
+								Vendor[5] = 't';
+								Vendor[6] = 'e';
+								Vendor[7] = ' ';
+								Vendor[8] = '\0';
+							}
+							else
+							{
+								/*Unkown*/
+								Vendor[0] = 'A';
+								Vendor[1] = 'T';
+								Vendor[2] = 'A';
+								Vendor[3] = ' ';
+								Vendor[4] = ' ';
+								Vendor[5] = ' ';
+								Vendor[6] = ' ';
+								Vendor[7] = ' ';
+								Vendor[8] = '\0';
+							}
+							MV_CopyMemory(Product, temp, 16);
+							Product[16] = '\0';
+						}
+						else
+						{
+							MV_U32 j = i;
+							MV_CopyMemory(Vendor, temp, j);
+							for (; j < 9; j++)
+							{
+								Vendor[j] = ' ';
+							}
+							Vendor[8] = '\0';
+							for (; i < 24; i++)
+							{
+								if (temp[i] != ' ')
+								{
+									break;
+								}
+							}
+							MV_CopyMemory(Product, &temp[i], 24 - i);
+							Product[16] = '\0';
+						}
+						MV_CopyMemory(&buff[8], Vendor, 8);
+						MV_CopyMemory(&buff[16], Product, 16);
+						MV_CopyMemory(&buff[32], pDevice->Firmware_Revision, 4);
+					}
+					MV_CopyMemory(&buff[36], "MVSATA", 6);
+
+					/*buff[32] = '3';*/
+
+					inquiryLen = 42;
+					MV_CopyMemory( pReq->Data_Buffer, 
+								buff, 
+								MV_MIN(pReq->Data_Transfer_Length, inquiryLen)
+								);
+					pReq->Data_Transfer_Length = MV_MIN(pReq->Data_Transfer_Length, inquiryLen);
+				}
+#endif
+				pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+			}
+			else
+			{
+				pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+			}
+		}
+	}
+#endif	/* #ifndef _OS_BIOS */
+}
+
+#ifndef _OS_BIOS
+#define MAX_MODE_PAGE_LENGTH	28
+MV_U32 Core_get_mode_page_caching(MV_PU8 pBuf, PDomain_Device pDevice)
+{
+	pBuf[0] = 0x08;		/* Page Code, PS = 0; */
+	pBuf[1] = 0x12;		/* Page Length */
+	/* set the WCE and RCD bit based on device identification data */
+	if (pDevice->Setting & DEVICE_SETTING_WRITECACHE_ENABLED)
+		pBuf[2] |= MV_BIT(2);
+	pBuf[3] = 0;	/* Demand read/write retention priority */
+	pBuf[4] = 0xff;	/* Disable pre-fetch trnasfer length (4,5) */
+	pBuf[5] = 0xff;	/* all anticipatory pre-fetching is disabled */
+	pBuf[6] = 0;	/* Minimum pre-fetch (6,7) */
+	pBuf[7] = 0;
+	pBuf[8] = 0;	/* Maximum pre-fetch (8,9) */
+	pBuf[9] = 0x01;
+	pBuf[10] = 0;	/* Maximum pre-fetch ceiling (10,11) */
+	pBuf[11] = 0x01;
+//	pBuf[12] |= MV_BIT(5);	/* How do I know if Read Ahead is enabled or disabled???  */
+	pBuf[12] = 0x00;
+	pBuf[13] = 0x01;	/* Number of cache segments */
+	pBuf[14] = 0xff;	/* Cache segment size (14, 15) */
+	pBuf[15] = 0xff;
+	return 0x14;	/* Total page length in byte */
+}
+
+#endif
+
+void mvScsiModeSense(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+#ifndef _OS_BIOS
+	MV_U8 pageCode = pReq->Cdb[2] & 0x3F;		/* Same for mode sense 6 and 10 */
+	MV_U8 ptmpBuf[MAX_MODE_PAGE_LENGTH];
+	MV_U32 pageLen = 0, tmpLen = 0;
+	PDomain_Device pDevice = NULL;
+	MV_U8 portId, deviceId;
+	MV_U8 *buf = pReq->Data_Buffer;
+
+	portId = PATA_MapPortId(pReq->Device_Id);
+	deviceId = PATA_MapDeviceId(pReq->Device_Id);
+	pDevice = &pCore->Ports[portId].Device[deviceId];
+
+	MV_ZeroMemory(buf, pReq->Data_Transfer_Length);
+	MV_ZeroMemory(ptmpBuf, MAX_MODE_PAGE_LENGTH);
+	/* Block Descriptor Length set to 0 - No Block Descriptor */
+
+	switch (pageCode) {
+	case 0x3F:		/* Return all pages */
+	case 0x08:		/* Caching mode page */
+		if (pReq->Cdb[0]==SCSI_CMD_MODE_SENSE_6) {
+			pageLen = Core_get_mode_page_caching((ptmpBuf+4), pDevice);
+			ptmpBuf[0] = (MV_U8)(4 + pageLen - 1);	/* Mode data length */
+			ptmpBuf[2] = 0x10;
+			tmpLen = MV_MIN(pReq->Data_Transfer_Length, (pageLen+4));
+		}
+		else {	/* Mode Sense 10 */
+			pageLen = Core_get_mode_page_caching((ptmpBuf+8), pDevice);
+			/* Mode Data Length, it does not include the number of bytes in */
+			/* Mode Data Length field */
+			tmpLen = 8 + pageLen - 2;
+			ptmpBuf[0] = (MV_U8)(((MV_U16)tmpLen) >> 8);
+			ptmpBuf[1] = (MV_U8)tmpLen;
+			ptmpBuf[2] = 0x00;
+			ptmpBuf[3] = 0x10;
+			tmpLen = MV_MIN(pReq->Data_Transfer_Length, (pageLen+8));
+		}
+		MV_CopyMemory(buf, ptmpBuf, tmpLen);
+		pReq->Data_Transfer_Length = tmpLen;
+		pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		break;
+	default:
+		pReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+		Core_FillSenseData(pReq, SCSI_SK_ILLEGAL_REQUEST, SCSI_ASC_INVALID_FEILD_IN_CDB);
+		break;
+	}
+#endif
+}
+
+void mvScsiReportLun(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+#ifndef _OS_BIOS
+	MV_U32 allocLen, lunListLen;
+	MV_PU8 pBuf = pReq->Data_Buffer;
+
+	allocLen = ((MV_U32)(pReq->Cdb[6] << 24)) |
+			   ((MV_U32)(pReq->Cdb[7] << 16)) |
+			   ((MV_U32)(pReq->Cdb[8] << 8)) |
+			   ((MV_U32)(pReq->Cdb[9]));
+
+	/* allocation length should not less than 16 bytes */
+	if (allocLen < 16) {
+		pReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+		Core_FillSenseData(pReq, SCSI_SK_ILLEGAL_REQUEST, SCSI_ASC_INVALID_FEILD_IN_CDB);
+		return;
+	}
+
+	MV_ZeroMemory(pBuf, pReq->Data_Transfer_Length);
+	/* Only LUN 0 has device */
+	lunListLen = 8;
+	pBuf[0] = (MV_U8)((lunListLen & 0xFF000000) >> 24);
+	pBuf[1] = (MV_U8)((lunListLen & 0x00FF0000) >> 16);
+	pBuf[2] = (MV_U8)((lunListLen & 0x0000FF00) >> 8);
+	pBuf[3] = (MV_U8)(lunListLen & 0x000000FF);
+	pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+#endif
+}
+
+void mvScsiReadCapacity(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+#ifndef _OS_BIOS
+	PDomain_Device pDevice = NULL;
+	MV_LBA maxLBA;
+	MV_U32 blockLength;
+	MV_PU32 pU32Buffer;
+	MV_U8 portId, deviceId;
+
+	portId = PATA_MapPortId(pReq->Device_Id);
+	deviceId = PATA_MapDeviceId(pReq->Device_Id);
+#ifndef SECTOR_SIZE
+	#define SECTOR_SIZE	512	//TBD
+#endif
+
+	MV_DASSERT( portId < MAX_PORT_NUMBER );
+
+	if ((pReq->Cdb[8] & MV_BIT(1)) == 0)
+	{
+		if ( pReq->Cdb[2] || pReq->Cdb[3] || pReq->Cdb[4] || pReq->Cdb[5] )
+		{
+			pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+			return;
+		}
+	}
+
+    /* 
+	 * The disk size as indicated by the ATA spec is the total addressable
+     * sectors on the drive ; while the SCSI translation of the command
+     * should be the last addressable sector.
+     */
+	pDevice = &pCore->Ports[portId].Device[deviceId];
+	maxLBA.value = pDevice->Max_LBA.value-1;
+	blockLength = SECTOR_SIZE;			//TBD
+	pU32Buffer = (MV_PU32)pReq->Data_Buffer;
+	MV_ASSERT(maxLBA.high==0);	//TBD: Support Read Capactiy 16 
+
+	pU32Buffer[0] = CPU_TO_BIG_ENDIAN_32(maxLBA.low);
+	pU32Buffer[1] = CPU_TO_BIG_ENDIAN_32(blockLength);
+
+	pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+#endif /* #ifndef _OS_BIOS */
+
+}
+
+void Port_Monitor(PDomain_Port pPort);
+#if defined(SUPPORT_ERROR_HANDLING)
+
+MV_BOOLEAN Core_IsInternalRequest(PCore_Driver_Extension pCore, 
+				  PMV_Request pReq)
+{
+	PDomain_Device pDevice;
+	MV_U8 portId = PATA_MapPortId(pReq->Device_Id);
+	MV_U8 deviceId = PATA_MapDeviceId(pReq->Device_Id);
+
+	if ( portId>=MAX_PORT_NUMBER )
+		return MV_FALSE;
+	if ( deviceId>=MAX_DEVICE_PER_PORT )
+		return MV_FALSE;
+
+	pDevice = &pCore->Ports[portId].Device[deviceId];
+	if ( pReq==pDevice->Internal_Req ) 
+		return MV_TRUE;
+	else
+		return MV_FALSE;
+}
+
+void Core_ResetChannel(MV_PVOID Device)
+{
+	PDomain_Device pDevice = (PDomain_Device)Device;
+	PDomain_Port pPort = pDevice->PPort;
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 tmp;
+	MV_U8 i;
+	
+	MV_DPRINT(("Request time out. Resetting channel %d.\n", pPort->Id));
+#ifdef SUPPORT_EVENT
+	HBA_AddEvent( pCore, EVT_ID_HD_TIMEOUT, pDevice->Id, 3, 0, NULL );
+#endif /* SUPPORT_EVENT */
+
+	/* toggle the start bit in cmd register */
+	tmp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp & ~MV_BIT(0));
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp | MV_BIT(0));
+	HBA_SleepMillisecond( pCore, 100 );
+
+	Port_Monitor( pPort );
+	pDevice->Reset_Count++;
+
+	/* Whether it's during reset, we got reset again. */
+	if ( pPort->Port_State!=PORT_STATE_INIT_DONE )
+	{
+		MV_PRINT("Timeout during reset.\n");
+	}
+
+	/* put all the running requests back into waiting list */
+	for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+	{
+		pReq = pPort->Running_Req[i];
+
+		if (pReq) {
+			/*
+			 * If this channel has multiple devices, pReq is 
+			 * not the internal request of pDevice
+			 */
+			if ( !Core_IsInternalRequest(pCore, pReq) )
+			{
+				List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);
+			}
+			else 
+			{
+				/* Can be reset command or request sense command */
+				if ( SCSI_IS_REQUEST_SENSE(pReq->Cdb[0]) )
+				{
+					MV_ASSERT( pReq->Org_Req!=NULL );
+					if ( pReq->Org_Req )
+						List_Add( &((PMV_Request)pReq->Org_Req)->Queue_Pointer, &pCore->Waiting_List);
+				}
+			}
+			
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+			hba_remove_timer(pReq);
+			pReq->eh_flag = 1;
+#endif /* defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */
+			pPort->Running_Req[i] = NULL;
+			pPort->Running_Slot &= ~(1L<<i);
+			Tag_ReleaseOne(&pPort->Tag_Pool, i);
+		}
+	}
+	
+	/* reset device related variables */
+	for ( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+	{
+		pDevice = &pPort->Device[i];
+		
+		pDevice->Device_Type = 0;
+		pDevice->Need_Notify = MV_FALSE;
+#ifdef SUPPORT_TIMER 
+		if( pDevice->Timer_ID != NO_CURRENT_TIMER )
+		{
+			Timer_CancelRequest( pCore, pDevice->Timer_ID );
+			pDevice->Timer_ID = NO_CURRENT_TIMER;
+		}
+#endif /* SUPPORT_TIMER */
+		pDevice->Outstanding_Req = 0;
+		
+		/*
+		 * Go through the waiting list. If there is some reset 
+		 * request, remove that request. 
+		 */
+		mvRemoveDeviceWaitingList(pCore, pDevice->Id, MV_FALSE);
+	}
+
+	// reset the tag stack - to guarantee soft reset is issued at slot 0
+	Tag_Init( &pPort->Tag_Pool, MAX_TAG_NUMBER );
+
+	for( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+	{
+		if( (pPort->Device[i].Status & DEVICE_STATUS_FUNCTIONAL) && 
+			(pPort->Device[i].Internal_Req != NULL) )
+		{
+			pCore->Total_Device_Count--;
+			ReleaseInternalReqToPool( pCore, pPort->Device[i].Internal_Req );
+			pPort->Device[i].Internal_Req = NULL;
+		}
+	}
+	pPort->Port_State = PORT_STATE_IDLE;
+	if ( pPort->Type == PORT_TYPE_PATA )
+		PATA_PortReset( pPort, MV_TRUE );
+	else
+		SATA_PortReset( pPort, MV_FALSE );
+}
+#endif /* SUPPORT_ERROR_HANDLING || _OS_LINUX */
+
+MV_BOOLEAN HandleInstantRequest(PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+	/* 
+	 * Some of the requests can be returned immediately without hardware 
+	 * access. 
+	 * Handle Inquiry and Read Capacity.
+	 * If return MV_TRUE, means the request can be returned to OS now.
+	 */
+	PDomain_Device pDevice = NULL;
+	MV_U8 portId, deviceId;
+
+	portId = PATA_MapPortId(pReq->Device_Id);
+	deviceId = PATA_MapDeviceId(pReq->Device_Id);
+
+	if ( portId < MAX_PORT_NUMBER )				
+		pDevice = &pCore->Ports[portId].Device[deviceId];
+
+	if (pDevice && 
+	    (pDevice->Device_Type & DEVICE_TYPE_ATAPI) &&
+	    (pDevice->Status & DEVICE_STATUS_FUNCTIONAL))
+	{
+#ifdef _OS_LINUX
+		switch (pReq->Cdb[0]) 
+		{
+		case SCSI_CMD_MODE_SENSE_6:
+			/* convert to atapi cdb12 */
+			pReq->Cdb[8] = pReq->Cdb[4];
+			pReq->Cdb[4] = 0;
+			pReq->Cdb[0] = SCSI_CMD_MODE_SENSE_10;
+		case SCSI_CMD_INQUIRY:
+		case SCSI_CMD_READ_CAPACITY_10:
+		case SCSI_CMD_READ_CAPACITY_16:
+		case SCSI_CMD_REPORT_LUN:
+		case SCSI_CMD_MODE_SENSE_10:
+			if (pReq->Cmd_Initiator == 
+			    HBA_GetModuleExtension(pReq->Cmd_Initiator, MODULE_HBA))
+				HBA_kunmap_sg(pReq);
+			break;
+		default:
+			break;
+		}
+#endif /* _OS_LINUX */
+		return MV_FALSE;
+	}
+
+	switch ( pReq->Cdb[0] )
+	{
+	case SCSI_CMD_INQUIRY:
+		mvScsiInquiry(pCore, pReq);
+		return MV_TRUE;
+	case SCSI_CMD_MODE_SENSE_6:
+	case SCSI_CMD_MODE_SENSE_10:
+		mvScsiModeSense(pCore, pReq);
+		return MV_TRUE;
+	case SCSI_CMD_REPORT_LUN:
+		mvScsiReportLun(pCore, pReq);
+		return MV_TRUE;
+	case SCSI_CMD_READ_CAPACITY_10:
+		mvScsiReadCapacity(pCore, pReq);
+		return MV_TRUE;
+#ifdef _OS_LINUX
+	case SCSI_CMD_READ_CAPACITY_16: /* 0x9e SERVICE_ACTION_IN */
+		if ((pReq->Cdb[1] & 0x1f) == 0x10 /* SAI_READ_CAPACITY_16 */) {
+			MV_PU32 pU32Buffer = (MV_PU32)pReq->Data_Buffer;
+			MV_LBA maxLBA;
+			MV_U32 blockLength = SECTOR_SIZE;
+			maxLBA.value = pDevice->Max_LBA.value-1;;
+
+			pU32Buffer[0] = CPU_TO_BIG_ENDIAN_32(maxLBA.low);
+			pU32Buffer[1] = CPU_TO_BIG_ENDIAN_32(maxLBA.high);
+			pU32Buffer[2] =  CPU_TO_BIG_ENDIAN_32(blockLength);;
+			pReq->Scsi_Status = REQ_STATUS_SUCCESS;;
+		}
+		else
+			pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+		return MV_TRUE;
+#endif /* _OS_LINUX */
+	case SCSI_CMD_REQUEST_SENSE:	/* This is only for Thor hard disk */
+	case SCSI_CMD_TEST_UNIT_READY:
+	case SCSI_CMD_RESERVE_6:	/* For Thor, just return good status */
+	case SCSI_CMD_RELEASE_6:
+#ifdef CORE_IGNORE_START_STOP_UNIT
+	case SCSI_CMD_START_STOP_UNIT:
+#endif
+		pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		return MV_TRUE;
+#ifdef CORE_SUPPORT_API
+	case APICDB0_PD:
+		return Core_pd_command(pCore, pReq);
+#endif /* CORE_SUPPORT_API */
+	}
+
+	return MV_FALSE;
+}
+
+MV_QUEUE_COMMAND_RESULT
+PrepareAndSendCommand(
+	IN PCore_Driver_Extension pCore,
+	IN PMV_Request pReq
+	)
+{
+#ifdef MV_IRQ_MODE
+   	PMV_DATA_STRUCT pDriverData = MyDriverDataBaseOff;
+#endif
+
+	PDomain_Device pDevice = NULL;
+	PDomain_Port pPort = NULL;
+	MV_BOOLEAN isPATA = MV_FALSE;
+	MV_U8 tag, i, count=0;
+	ATA_TaskFile taskFile;
+	MV_BOOLEAN ret;
+
+	/* Associate this request to the corresponding device and port */
+	pDevice = &pCore->Ports[PATA_MapPortId(pReq->Device_Id)].Device[PATA_MapDeviceId(pReq->Device_Id)];
+	pPort = pDevice->PPort;
+
+	if ( !(pDevice->Status&DEVICE_STATUS_FUNCTIONAL) )
+	{
+		pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+		return MV_QUEUE_COMMAND_RESULT_FINISHED;
+	}
+
+	/* Set the Cmd_Flag to indicate which type of commmand it is. */
+	if ( !Category_CDB_Type(pDevice, pReq) )
+	{
+		pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+		/* Invalid request and can be returned to OS now. */
+		return MV_QUEUE_COMMAND_RESULT_FINISHED;
+	}
+	//MV_DUMPC32(0xCCCC5502);
+
+	MV_DASSERT( pPort!=NULL );
+	if ( pPort->Running_Slot!=0 )	/* Some requests are running. */
+	{
+		if (	( (pReq->Cmd_Flag&CMD_FLAG_NCQ) && !(pPort->Setting&PORT_SETTING_NCQ_RUNNING) )
+			||  ( !(pReq->Cmd_Flag&CMD_FLAG_NCQ) && (pPort->Setting&PORT_SETTING_NCQ_RUNNING) )
+			)
+		{
+			return MV_QUEUE_COMMAND_RESULT_FULL;			
+		}
+	
+		/* In order for request sense to immediately follow the error request. */
+		if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+			return MV_QUEUE_COMMAND_RESULT_FULL;
+
+		/* One request at a time */
+		if ( (pReq->Scsi_Status==REQ_STATUS_RETRY)
+			|| (pPort->Setting&PORT_SETTING_DURING_RETRY) 
+			)
+			return MV_QUEUE_COMMAND_RESULT_FULL;
+	}
+	//MV_DUMPC32(0xCCCC5503);
+
+	/* we always reserve one slot in case of PM hot plug */
+	for (i=0; i<MAX_SLOT_NUMBER; i++)
+	{
+		if (pPort->Running_Slot & MV_BIT(i))
+			count++;
+	}
+	if (count >= (MAX_SLOT_NUMBER - 1))
+	{
+		return MV_QUEUE_COMMAND_RESULT_FULL;
+	}
+
+	isPATA = (pPort->Type==PORT_TYPE_PATA)?1:0;
+	//MV_DUMPC32(0xCCCC5504);
+
+	/* Get one slot for this request. */
+	tag = Tag_GetOne(&pPort->Tag_Pool);
+
+	if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+		ret = ATAPI_CDB2TaskFile(pDevice, pReq, &taskFile);
+	else
+		ret = ATA_CDB2TaskFile(pDevice, pReq, tag, &taskFile);
+	if ( !ret )
+	{
+		pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+		Tag_ReleaseOne(&pPort->Tag_Pool, tag);
+		/* Invalid request and can be returned to OS now. */
+		return MV_QUEUE_COMMAND_RESULT_FINISHED;	
+	}
+
+	//MV_DUMPC32(0xCCCC5505);
+	
+#ifdef _OS_BIOS
+	/* All ports share memeory, so need Cmd_List and Cmd_Table before send command */
+	if(pPort->Cmd_List)
+		MV_ZeroMemory((MV_PU8)pPort->Cmd_List + tag,SATA_CMD_LIST_SIZE);
+	if(pPort->Cmd_Table)
+		MV_ZeroMemory((MV_PU8)pPort->Cmd_Table+ tag,SATA_CMD_TABLE_SIZE);
+	if(pPort->RX_FIS)
+		MV_ZeroMemory((MV_PU8)pPort->RX_FIS+ tag,SATA_RX_FIS_SIZE);
+#endif	/* #ifdef _OS_BIOS */
+
+	MV_DUMPC32(0xCCCC5506);
+
+	if ( !isPATA )
+		SATA_PrepareCommandHeader(pPort, pReq, tag);
+	else
+		PATA_PrepareCommandHeader(pPort, pReq, tag);
+
+
+	if ( !isPATA )
+		SATA_PrepareCommandTable(pPort, pReq, tag, &taskFile);
+	else
+		PATA_PrepareCommandTable(pPort, pReq, tag, &taskFile);
+	MV_DUMPC32(0xCCCC5507);
+
+
+
+	SATA_SendFrame(pPort, pReq, tag);
+	/* Request is send to the hardware and not finished yet. */
+	return MV_QUEUE_COMMAND_RESULT_SENDTED;
+}
+
+void Core_HandleWaitingList(PCore_Driver_Extension pCore)
+{
+	PMV_Request pReq = NULL;
+	MV_QUEUE_COMMAND_RESULT result;
+#ifdef SUPPORT_HOT_PLUG	
+	PDomain_Device pDevice;
+	MV_U8 portId, deviceId;
+#endif	
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+	MV_U32 timeout;
+#endif /* efined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */
+	//MV_DUMPRUN(0xCCF2);
+
+	/* Get the request header */
+	while ( !List_Empty(&pCore->Waiting_List) )
+	{
+		pReq = (PMV_Request) List_GetFirstEntry(&pCore->Waiting_List, 
+							MV_Request, 
+							Queue_Pointer);
+		if ( NULL == pReq ) {
+			MV_ASSERT(0);
+			break;
+		}
+
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+		pReq->eh_flag = 0;
+		hba_init_timer(pReq);
+#endif /* defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */
+
+		/* During reset, we still have internal requests need to 
+		 *be handled. */
+
+		//TBD: Internal request is always at the beginning.
+		if ( (pCore->Need_Reset)&&(pReq->Cmd_Initiator!=pCore) ) 
+		{
+			/* Return the request back. */
+			List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);
+			return;
+		}
+	
+#ifdef SUPPORT_HOT_PLUG
+		/* hot plug - device is gone, reject this request */
+		if ( pReq->Device_Id != CONSOLE_ID )
+		{
+			portId = PATA_MapPortId(pReq->Device_Id);
+			deviceId = PATA_MapDeviceId(pReq->Device_Id);
+			pDevice = &pCore->Ports[portId].Device[deviceId];
+
+			if ( !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+			{
+				pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+				CompleteRequest(pCore, pReq, NULL);
+				return;
+			}
+
+			/* Reset is not done yet. */
+			if ( pDevice->State!=DEVICE_STATE_INIT_DONE )
+			{
+				/* check if it is the reset commands */
+				if ( !Core_IsInternalRequest(pCore, pReq) )
+				{
+					List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List); /* Return the request back. */
+					return;
+				} 
+				else 
+				{
+					/* 
+					 * Cannot be the request sense. 
+					 * It's not pushed back. 
+					 */
+					MV_ASSERT( !SCSI_IS_REQUEST_SENSE(pReq->Cdb[0]) );
+				}
+			}
+		}
+#endif /* SUPPORT_HOT_PLUG */
+
+		/* Whether we can handle this request without hardware access? */
+		if ( HandleInstantRequest(pCore, pReq) ) 
+		{
+			CompleteRequest(pCore, pReq, NULL);
+			continue;
+		}
+
+#if !defined(_OS_BIOS) && defined(_OS_WINDOWS)
+		/* handle the cmd which data length is > 128k 
+		 * We suppose the data length was multiples of 128k first. 
+		 * If not, we will still verify multiples of 128k since 
+		 * no data transfer.
+		 */
+		if(pReq->Cdb[0] == SCSI_CMD_VERIFY_10)
+		{
+			PDomain_Device pDevice = &pCore->Ports[PATA_MapPortId(pReq->Device_Id)].Device[PATA_MapDeviceId(pReq->Device_Id)];
+			MV_U32 sectors = SCSI_CDB10_GET_SECTOR(pReq->Cdb);
+			
+			if((!(pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED)) && (sectors > MV_MAX_TRANSFER_SECTOR)){
+				MV_ASSERT(!pReq->Splited_Count );
+				pReq->Splited_Count = (MV_U8)((sectors + MV_MAX_TRANSFER_SECTOR -1)/MV_MAX_TRANSFER_SECTOR) - 1;
+				sectors = MV_MAX_TRANSFER_SECTOR; 
+				SCSI_CDB10_SET_SECTOR(pReq->Cdb, sectors);
+			}
+		}
+#endif /* !defined(_OS_BIOS) && defined(_OS_WINDOWS) */
+
+		result = PrepareAndSendCommand(pCore, pReq);	
+		//MV_PRINT("Send request.\n");
+		//MV_DumpRequest(pReq, MV_FALSE);
+
+		switch ( result )
+		{
+			case MV_QUEUE_COMMAND_RESULT_FINISHED:
+				CompleteRequest(pCore, pReq, NULL);
+				break;
+
+			case MV_QUEUE_COMMAND_RESULT_FULL:
+				List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);
+				return;
+
+			case MV_QUEUE_COMMAND_RESULT_SENDTED:
+			{
+				portId = PATA_MapPortId(pReq->Device_Id);
+				deviceId = PATA_MapDeviceId(pReq->Device_Id);
+				pDevice = &pCore->Ports[portId].Device[deviceId];
+				pDevice->Outstanding_Req++;
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+				/*
+				 * timeout to 15 secs if the port has just
+				 * been reset.
+				 */
+				if (pReq->eh_flag) 
+				{
+					timeout = HBA_REQ_TIMER_AFTER_RESET;
+					pReq->eh_flag = 0; 
+				}
+				else
+				{
+					timeout = HBA_REQ_TIMER;
+				}
+
+				/* double timeout value for atapi (writers) */
+				if (pDevice->Device_Type & DEVICE_TYPE_ATAPI)
+					timeout = timeout * 2 + 5;
+
+				hba_add_timer(pReq,
+					      timeout,
+					      __core_req_timeout_handler);
+
+#elif defined(SUPPORT_ERROR_HANDLING)
+#ifdef SUPPORT_TIMER
+				/* start timer for error handling */
+				if( pDevice->Timer_ID == NO_CURRENT_TIMER )
+				{
+					// if no timer is running right now
+					pDevice->Timer_ID = Timer_AddRequest( pCore, REQUEST_TIME_OUT, Core_ResetChannel, pDevice );
+				}
+#endif /* SUPPORT_TIMER */
+#endif /* defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */
+#if 0
+				{
+					MV_U8 i;
+					PMV_Request pTmpRequest = NULL;
+					PDomain_Port pPort = pDevice->PPort;
+					/* When there is reset command, other commands won't come here. */
+					if ( SCSI_IS_READ(pReq->Cdb[0]) || SCSI_IS_WRITE(pReq->Cdb[0]) )
+					{
+						for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+						{
+							pTmpRequest = pPort->Running_Req[i];
+							if ( pTmpRequest && (pTmpRequest->Device_Id==pReq->Device_Id) ) 
+							{
+								MV_DASSERT( !SCSI_IS_INTERNAL(pTmpRequest->Cdb[0]) );
+							}
+						}
+
+					}
+				}
+#endif /* 0 */
+				break;
+			}
+			default:
+				MV_ASSERT(MV_FALSE);
+		}
+	}
+	
+#ifdef SUPPORT_CONSOLIDATE
+	{
+		MV_U8 i,j;
+		PDomain_Port pPort;
+
+		if ( pCore->Is_Dump ) return;
+
+		/* 
+		* If there is no more request we can do, 
+		* force command consolidate to run the holding request. 
+		*/
+		for ( i=0; i<MAX_PORT_NUMBER; i++ )
+		{
+			pPort = &pCore->Ports[i];
+			for ( j=0; j<MAX_DEVICE_PER_PORT; j++ )
+			{
+				if ( (pPort->Device[j].Status&DEVICE_STATUS_FUNCTIONAL)
+					&& (pPort->Device[j].Outstanding_Req==0) )
+				{
+					Consolid_PushFireRequest(pCore, i*MAX_DEVICE_PER_PORT+j);
+				}
+			}
+		}
+	}
+#endif /* SUPPORT_CONSOLIDATE */
+}
+
+/*
+ * Interrupt service routine and related funtion
+ * We can split this function to two functions. 
+ * One is used to check and clear interrupt, called in ISR. 
+ * The other is used in DPC.
+ */
+void SATA_PortHandleInterrupt(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort
+	);
+void PATA_PortHandleInterrupt(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort
+	);
+void SATA_HandleSerialError(
+	IN PDomain_Port pPort,
+	IN MV_U32 serialError
+	);
+void SATA_HandleHotplugInterrupt(
+	IN PDomain_Port pPort,
+	IN MV_U32 serialError
+	);
+
+MV_BOOLEAN Core_InterruptServiceRoutine(MV_PVOID This)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	MV_U32	irqStatus;
+	MV_U8 i;
+	PDomain_Port pPort = NULL;
+#ifdef _OS_BIOS	
+   	PMV_DATA_STRUCT pDriverData = MyDriverDataBaseOff;
+	MV_U32                   tmp;
+#endif
+
+	/* Get interrupt status */
+	irqStatus = MV_REG_READ_DWORD(pCore->Mmio_Base, HOST_IRQ_STAT);
+
+	MV_DUMPC32(0xCCCC7781);
+	MV_DUMPC32(irqStatus);
+
+#ifdef _OS_BIOS
+	if(pCore->host_reseting)
+	{
+		MV_REG_WRITE_DWORD(pCore->Mmio_Base, 8, 1);
+		tmp = MV_IO_READ_DWORD(pCore->Base_Address[4], 0);
+		MV_IO_WRITE_DWORD(pCore->Base_Address[4], 0, (tmp | MV_BIT(18)));
+		tmp = MV_IO_READ_DWORD(pCore->Base_Address[4], 8);
+		MV_IO_WRITE_DWORD(pCore->Base_Address[4], 8, (tmp | MV_BIT(18)));
+
+		pDriverData->PortIRQStatus = 0;
+		return MV_TRUE;
+	}
+#endif
+	
+	irqStatus &= pCore->Port_Map;
+
+	if (!irqStatus ) 
+	{
+#ifdef _OS_BIOS	
+		pDriverData->PortIRQStatus = 0;
+#endif
+		MV_DUMPC32(0xCCCC7782);
+		MV_HALTKEY;
+		return MV_FALSE;
+	}
+
+	
+#ifndef _OS_BIOS
+	for ( i=0; i<pCore->Port_Num; i++ ) 
+	{
+		MV_DUMPC16(0xCCFA);
+		MV_DUMPC16(irqStatus);
+		
+		if ( !(irqStatus&(1<<i)) )	/* no interrupt for this port. */
+			continue;
+
+		pPort = &pCore->Ports[i];
+		if ( pPort->Type==PORT_TYPE_PATA )
+			PATA_PortHandleInterrupt(pCore, pPort);
+		else
+			SATA_PortHandleInterrupt(pCore, pPort);
+	}
+	/* If we need to do hard reset. And the controller is idle now. */
+	if ( (pCore->Need_Reset) && (!pCore->Resetting) )
+	{
+		//MV_DUMPRUN(0xCCFE);
+		if( Core_WaitingForIdle(pCore) )
+			Core_ResetHardware(pCore);
+	}
+
+	Core_HandleWaitingList(pCore);
+#else
+/* Cleare All interrupt status */
+		MV_REG_WRITE_DWORD(pCore->Mmio_Base, HOST_IRQ_STAT,irqStatus);
+		MV_REG_READ_DWORD(pCore->Mmio_Base, HOST_IRQ_STAT);
+		HBA_SleepMillisecond(NULL, 1);
+		pDriverData->PortIRQStatus = irqStatus;
+		pDriverData->mvIRQStatus=MV_TRUE;
+
+#endif
+
+	return MV_TRUE;
+}
+void SATA_HandleSerialError(
+	IN PDomain_Port pPort,
+	IN MV_U32 serialError
+	)
+{
+	//TBD
+#ifndef _OS_BIOS
+	MV_PRINT("Error: Port_HandleSerialError port=%d error=0x%x.\n", pPort->Id, serialError);
+#endif
+
+}
+
+/*added hot-plug by Lily */
+void SATA_ResetPort(PCore_Driver_Extension pCore, MV_U8 portId);
+
+#ifdef SUPPORT_HOT_PLUG
+void Device_SoftReset(PDomain_Port pPort, PDomain_Device pDevice);
+
+void mvRemoveDeviceWaitingList(MV_PVOID This, MV_U16 deviceId, 
+			       MV_BOOLEAN returnOSRequest)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	PMV_Request pReq = NULL;
+	List_Head *pPos;
+	List_Head remove_List;
+	MV_U8 count = 0, myCount=0, i;
+	PDomain_Device pDevice;
+	MV_U8 portNum = PATA_MapPortId(deviceId);
+	MV_U8 deviceNum = PATA_MapDeviceId(deviceId);
+	pDevice = &pCore->Ports[portNum].Device[deviceNum];
+
+	LIST_FOR_EACH(pPos, &pCore->Waiting_List) {
+		count++;
+	}
+
+	if (count!=0){
+		MV_LIST_HEAD_INIT(&remove_List);
+	}
+
+	/* 
+	 * If returnOSRequest is MV_FALSE, actually we just remove the 
+	 * internal reset command. 
+	 */
+	while ( count>0 )
+	{
+		pReq = (PMV_Request)List_GetFirstEntry(&pCore->Waiting_List, MV_Request, Queue_Pointer);
+
+		if ( pReq->Device_Id==deviceId )
+		{
+			/* 
+			 * TBD: should make change to the 
+			 * mvRemovePortWaitingList too.
+			 */
+			if ( !Core_IsInternalRequest(pCore, pReq) )
+			{
+				if ( returnOSRequest ) {
+					pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+					List_AddTail(&pReq->Queue_Pointer, &remove_List);
+					myCount++;
+				} else {
+					List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);
+				}
+			}
+			else 
+			{
+				/* Reset command or request sense */
+				if ( SCSI_IS_REQUEST_SENSE(pReq->Cdb[0]) )
+				{
+					MV_ASSERT( pReq->Org_Req!=NULL );
+					pReq = (PMV_Request)pReq->Org_Req;
+					if ( pReq ) {
+						if ( returnOSRequest ) {
+							pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+							List_AddTail(&pReq->Queue_Pointer, &remove_List);
+							myCount++;
+						} else {
+							List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);
+						}
+					}
+				} else {
+					/* Reset command is removed. */
+				}
+			}
+		}
+		else
+		{
+			List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);
+		}
+		count--;
+	}//end of while
+
+	for (i=0; i<myCount; i++){
+		pReq = (PMV_Request)List_GetFirstEntry(&remove_List, MV_Request, Queue_Pointer);
+		MV_DASSERT(pReq && (pReq->Scsi_Status==REQ_STATUS_NO_DEVICE));
+		CompleteRequest(pCore, pReq, NULL);
+	}//end of for
+}
+
+void mvRemovePortWaitingList( MV_PVOID This, MV_U8 portId )
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	PMV_Request pReq;
+	List_Head *pPos;
+	List_Head remove_List;
+	MV_U8 count = 0, myCount=0, i;
+
+	LIST_FOR_EACH(pPos, &pCore->Waiting_List) {
+		count++;
+	}
+
+	if (count!=0){
+		MV_LIST_HEAD_INIT(&remove_List);
+	}
+
+	while ( count>0 )
+	{
+		pReq = (PMV_Request)List_GetFirstEntry(&pCore->Waiting_List, MV_Request, Queue_Pointer);
+		if ( PATA_MapPortId(pReq->Device_Id) == portId )
+		{
+			if ( pReq->Cmd_Initiator==pCore ) {
+				if ( SCSI_IS_READ(pReq->Cdb[0]) || SCSI_IS_WRITE(pReq->Cdb[0]) ) {
+					/* Command consolidate, should return */
+					pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+					List_AddTail(&pReq->Queue_Pointer, &remove_List);
+					myCount++;
+				} else if ( SCSI_IS_REQUEST_SENSE(pReq->Cdb[0]) ) {
+					/* Request sense */
+					MV_ASSERT( pReq->Org_Req!=NULL );
+					pReq = (PMV_Request)pReq->Org_Req;
+					if ( pReq ) {
+						pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+						List_AddTail(&pReq->Queue_Pointer, &remove_List);
+						myCount++;
+					} 
+				} else {
+					/* Reset command. Ignore. */
+				}
+			} else {
+				pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+				List_AddTail(&pReq->Queue_Pointer, &remove_List);
+				myCount++;
+			}
+		}
+		else
+		{
+			List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);
+		}
+		count--;
+	}//end of while
+
+	for (i=0; i<myCount; i++){
+		pReq = (PMV_Request)List_GetFirstEntry(&remove_List, MV_Request, Queue_Pointer);
+		MV_DASSERT(pReq && (pReq->Scsi_Status==REQ_STATUS_NO_DEVICE));
+		CompleteRequest(pCore, pReq, NULL);
+	}//end of for
+
+}
+
+void mvHandleDeviceUnplug (PCore_Driver_Extension pCore, PDomain_Port pPort)
+{
+	PMV_Request pReq;
+	//PDomain_Device pDevice;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U8 i; //j = 0;
+	MV_U32 temp;
+	#ifdef RAID_DRIVER
+	//MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	#else
+	//MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+
+	/*
+	 * wait for PORT_SCR_STAT to become non-zero(for a faked plug-out irq)
+	 * 
+	 */
+/*	HBA_SleepMillisecond(pCore, 50);
+
+	while (j < 95) {
+		HBA_SleepMillisecond(pCore, 10);
+		if ((MV_REG_READ_DWORD(portMmio, PORT_SCR_STAT) & 0xf))
+			break;
+		j++;
+	} */
+
+	if( !SATA_PortDeviceDetected(pPort) )
+	{		
+		/* clear the start bit in cmd register, 
+		   stop the controller from handling anymore requests */
+		temp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+		MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp & ~MV_BIT(0));
+
+		SATA_PortReportNoDevice( pCore, pPort );
+		
+		/* Device is gone. Return the Running_Req */
+		for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+		{
+			pReq =  pPort->Running_Req[i];
+			if ( pReq !=NULL )
+			{
+				pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+				CompleteRequestAndSlot(pCore, pPort, pReq, NULL, i);
+			}
+		}
+		
+		if( pPort->Type == PORT_TYPE_PM )
+		{
+			pPort->Setting &= ~PORT_SETTING_PM_FUNCTIONAL;
+			pPort->Setting &= ~PORT_SETTING_PM_EXISTING;
+		}
+	}
+}
+
+void sendDummyFIS( PDomain_Port pPort )
+{
+	MV_U8 tag = Tag_GetOne(&pPort->Tag_Pool);
+	PMV_Command_Header header = SATA_GetCommandHeader(pPort, tag);
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+	PSATA_FIS_REG_H2D pFIS = (PSATA_FIS_REG_H2D)pCmdTable->FIS;
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 old_stat;
+
+	MV_DASSERT( tag == 0 );
+
+	mvDisableIntr(portMmio, old_stat);
+
+	MV_ZeroMemory(header, sizeof(MV_Command_Header));
+	MV_ZeroMemory(pCmdTable, sizeof(MV_Command_Table));
+	
+	header->FIS_Length = 0;
+	header->Reset = 0;
+	header->PM_Port = 0xE;
+	
+	header->Table_Address = pPort->Cmd_Table_DMA.low + SATA_CMD_TABLE_SIZE*tag;
+	header->Table_Address_High = pPort->Cmd_Table_DMA.high;//TBD
+	
+	pFIS->FIS_Type = SATA_FIS_TYPE_REG_H2D;
+	pFIS->PM_Port = 0;
+	pFIS->Control = 0;
+
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD_ISSUE, 1<<tag);
+	MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE);	/* flush */
+
+	HBA_SleepMicrosecond(pCore, 10);
+	
+	Tag_ReleaseOne(&pPort->Tag_Pool, tag);
+	mvEnableIntr(portMmio, old_stat);
+}
+
+void mvHandleDevicePlugin (PCore_Driver_Extension pCore, PDomain_Port pPort)
+{
+	PDomain_Device pDevice = &pPort->Device[0];
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U8 i;
+	MV_U32 temp;
+
+	if( pCore->Total_Device_Count >= MAX_DEVICE_SUPPORTED )
+		return;
+	
+	/* hardware workaround - send dummy FIS first to clear FIFO */
+	temp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp & ~MV_BIT(0));
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp | MV_BIT(0));
+	Tag_Init( &pPort->Tag_Pool, MAX_TAG_NUMBER );
+	sendDummyFIS( pPort );
+
+	// start command handling on this port
+	temp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp & ~MV_BIT(0));
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp | MV_BIT(0));
+
+	HBA_SleepMillisecond(pCore, 2000);
+
+	// reset the tag stack - to guarantee soft reset is issued at slot 0
+	Tag_Init( &pPort->Tag_Pool, MAX_TAG_NUMBER );
+
+	// do software reset
+	MV_DPRINT(("Detected device plug-in, doing soft reset\n"));
+
+	/* Always turn the PM bit on - otherwise won't work! */
+	temp = MV_REG_READ_DWORD(portMmio, PORT_CMD);					
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD, temp | MV_BIT(17));
+	temp=MV_REG_READ_DWORD(portMmio, PORT_CMD);	/* flush */
+
+	if (! (SATA_PortSoftReset( pCore, pPort )) )
+		return;
+
+	if( pPort->Type == PORT_TYPE_PM ) 
+	{
+		/* need to send notifications for all of these devices */
+		for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+		{
+			pDevice = &pPort->Device[i];
+			pDevice->Id = (pPort->Id)*MAX_DEVICE_PER_PORT + i;
+			pDevice->Need_Notify = MV_TRUE;
+			pDevice->State = DEVICE_STATE_IDLE;
+			pDevice->Device_Type = 0;
+		}
+
+		SATA_InitPM( pPort );
+	} 
+	else
+	{
+		/* not a PM - turn off the PM bit in command register */
+		temp = MV_REG_READ_DWORD(portMmio, PORT_CMD);					
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD, temp & (~MV_BIT(17)));
+		temp=MV_REG_READ_DWORD(portMmio, PORT_CMD);	/* flush */
+
+		if( SATA_PortDeviceDetected(pPort) )
+		{
+			if ( SATA_PortDeviceReady(pPort) )
+			{					
+				pDevice->Internal_Req = GetInternalReqFromPool(pCore);
+				if( pDevice->Internal_Req == NULL )
+				{
+					MV_DPRINT(("ERROR: Unable to get an internal request buffer\n"));
+					// can't initialize without internal buffer - just set this disk down
+					pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+					pDevice->State = DEVICE_STATE_INIT_DONE;
+				}
+				else 
+				{
+					pDevice->Status = DEVICE_STATUS_EXISTING|DEVICE_STATUS_FUNCTIONAL;
+					pDevice->State = DEVICE_STATE_RESET_DONE;
+					pPort->Device_Number++;
+					pDevice->Id = (pPort->Id)*MAX_DEVICE_PER_PORT;
+					pDevice->Need_Notify = MV_TRUE;
+				}
+				
+				#ifndef _OS_BIOS
+				mvDeviceStateMachine (pCore, pDevice);
+				#endif
+			}
+		}
+	}
+}
+
+#ifdef SUPPORT_PM
+void mvHandlePMUnplug (PCore_Driver_Extension pCore, PDomain_Device pDevice)
+{
+	PMV_Request pReq;
+	PDomain_Port pPort = pDevice->PPort;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	List_Head *pPos;
+	MV_U8 i, count;
+	MV_U32 temp, cmdIssue;
+	#ifdef RAID_DRIVER
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	#else
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+
+	pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+	pPort->Device_Number--;
+	if( pDevice->Internal_Req != NULL )
+	{
+		pCore->Total_Device_Count--;
+		ReleaseInternalReqToPool( pCore, pDevice->Internal_Req );
+		pDevice->Internal_Req = NULL;
+	}
+
+	cmdIssue = MV_REG_READ_DWORD( portMmio, PORT_CMD_ISSUE );
+
+	/* toggle the start bit in cmd register */
+	temp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp & ~MV_BIT(0));
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, temp | MV_BIT(0));
+	HBA_SleepMillisecond( pCore, 100 );
+
+	/* check for requests that are not finished, clear the port,
+	 * then resend again */
+	for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+	{
+		pReq = pPort->Running_Req[i];
+
+		if( pReq != NULL )
+		{
+			if( pReq->Device_Id == pDevice->Id )
+			{
+				pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+				CompleteRequestAndSlot(pCore, pPort, pReq, NULL, i);
+			}
+			else if ( cmdIssue & (1<<i) )
+			{
+				if( PrepareAndSendCommand( pCore, pReq ) == MV_QUEUE_COMMAND_RESULT_SENDTED )
+				{
+#ifdef SUPPORT_ERROR_HANDLING
+#ifdef SUPPORT_TIMER
+					/* start timer for error handling */
+					if( pDevice->Timer_ID == NO_CURRENT_TIMER )
+					{
+						// if no timer is running right now
+						pDevice->Timer_ID = Timer_AddRequest( pCore, REQUEST_TIME_OUT, Core_ResetChannel, pDevice );
+					}
+#endif /* SUPPORT_TIMER */
+#endif /* SUPPORT_ERROR_HANDLING */
+					pDevice->Outstanding_Req++;
+				}
+				else
+					MV_DASSERT(MV_FALSE);		// shouldn't happens
+			}
+		}
+	}
+
+	count = 0;
+	LIST_FOR_EACH(pPos, &pCore->Waiting_List) {
+		count++;
+	}
+	while ( count>0 )
+	{
+		pReq = (PMV_Request)List_GetFirstEntry(&pCore->Waiting_List, MV_Request, Queue_Pointer);
+
+		if ( pReq->Device_Id == pDevice->Id )
+		{
+			pReq->Scsi_Status = REQ_STATUS_NO_DEVICE;
+			CompleteRequest(pCore, pReq, NULL);
+		}
+		else
+		{
+			List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);
+		}
+		count--;
+	}
+
+	/* clear x bit */
+	mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_PSCR_SERROR_REG_NUM, 0, pDevice->PM_Number, MV_TRUE );
+	temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+	mvPMDevReWrReg( pPort, MV_Write_Reg, MV_SATA_PSCR_SERROR_REG_NUM, temp, pDevice->PM_Number, MV_TRUE);
+
+#ifdef RAID_DRIVER
+	RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL, 
+				(MV_PVOID)(&pDevice->Id));
+#else
+#ifdef _OS_LINUX
+	HBA_ModuleNotification(pUpperLayer, 
+			       EVENT_DEVICE_REMOVAL, 
+			       pDevice->Id);
+#else /* _OS_LINUX */
+	HBA_ModuleNotification(pUpperLayer, 
+			       EVENT_DEVICE_REMOVAL, 
+			       (MV_PVOID) (&pDevice->Id));
+#endif /* _OS_LINUX */
+#endif /* RAID_DRIVER */
+}
+
+extern MV_BOOLEAN mvDeviceStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Device pDevice
+	);
+
+void mvHandlePMPlugin (PCore_Driver_Extension pCore, PDomain_Device pDevice)
+{
+	PDomain_Port pPort = pDevice->PPort;
+
+	if( pCore->Total_Device_Count >= MAX_DEVICE_SUPPORTED )
+		return;
+
+	pDevice->Need_Notify = MV_TRUE;
+	pDevice->Device_Type = 0;
+	HBA_SleepMillisecond(pCore, 1000);
+	SATA_InitPMPort( pPort, pDevice->PM_Number );
+	mvDeviceStateMachine(pCore, pDevice);
+}
+#endif	/* #ifdef SUPPORT_PM */
+#endif	/* #ifdef SUPPORT_HOT_PLUG */
+
+void SATA_HandleHotplugInterrupt(
+	IN PDomain_Port pPort,
+	IN MV_U32 intStatus
+	)
+{
+#ifdef SUPPORT_HOT_PLUG
+	PDomain_Device pDevice = &pPort->Device[0];
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_U8 i, plugout=0, plugin=0;
+	//MV_U32 count = 0;
+	MV_U32 temp;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	//List_Head *pPos;
+	#ifdef RAID_DRIVER
+	//MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	#else
+	//MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+
+	MV_U32 hotPlugDevice = intStatus & PORT_IRQ_PHYRDY;
+	MV_U32 hotPlugPM = (intStatus & PORT_IRQ_ASYNC_NOTIF) || (intStatus & PORT_IRQ_SDB_FIS);	
+		
+#ifdef _OS_LINUX	
+	MV_DBG(DMSG_ACDB, "__MV__ Hotplug int status : %x.\n", intStatus);
+#endif /* _OS_LINUX */
+	intStatus &= ~(PORT_IRQ_D2H_REG_FIS|PORT_IRQ_SDB_FIS|PORT_IRQ_PIO_DONE);
+
+	/* if a hard drive or a PM is plugged in/out of the controller */
+	if( hotPlugDevice )
+	{
+		intStatus &= ~PORT_IRQ_PHYRDY;
+
+		/* use Phy status to determine if this is a plug in/plug out */
+		HBA_SleepMillisecond(pCore, 500);
+		if ((MV_REG_READ_DWORD(portMmio, PORT_SCR_STAT) & 0xf) == 0)
+			plugout = MV_TRUE;
+		else
+			plugin = MV_TRUE;
+
+		/* following are special cases, so we take care of these first */
+		if( plugout )
+		{
+			if ( (pPort->Type != PORT_TYPE_PM ) && (pDevice->Status & DEVICE_STATUS_EXISTING) &&
+			     !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+			{
+				// a bad drive was unplugged
+				pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+				MV_DPRINT(("bad drive was unplugged\n"));
+				return;
+			}
+
+			if ( (pPort->Setting & PORT_SETTING_PM_EXISTING) && 
+			     !(pPort->Setting & PORT_SETTING_PM_FUNCTIONAL) )
+			{
+				// a bad PM was unplugged
+				pPort->Setting &= ~PORT_SETTING_PM_EXISTING;
+				MV_DPRINT(("bad PM was unplugged\n"));
+				return;
+			}
+		}
+		
+		if ( ((pPort->Type == PORT_TYPE_PM) && (pPort->Setting & PORT_SETTING_PM_FUNCTIONAL)) ||
+		     ((pPort->Type != PORT_TYPE_PM) && (pDevice->Status & DEVICE_STATUS_FUNCTIONAL)) 
+			)
+		{
+			if( plugout )
+				mvHandleDeviceUnplug( pCore, pPort );
+		}
+		else
+		{
+			if( plugin )
+				mvHandleDevicePlugin( pCore, pPort );
+		}
+	}
+					
+	/* if a drive was plugged in/out of a PM */
+	if ( hotPlugPM ) 
+	{
+		intStatus &= ~PORT_IRQ_ASYNC_NOTIF;
+		intStatus &= ~PORT_IRQ_SDB_FIS;
+
+		mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_GSCR_ERROR_REG_NUM, 0, 0xF, MV_TRUE );
+		temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+		if (temp == 0)
+			return;
+
+		// better solution???
+		for (i=0; i<MAX_DEVICE_PER_PORT; i++)	
+		{
+			if( temp & MV_BIT(i) )
+			{
+				pDevice = &pPort->Device[i];
+				pDevice->PM_Number = i;
+				break;
+			}
+		}
+		
+		/* make sure it's a hot plug SDB */
+		mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_PSCR_SERROR_REG_NUM, 0, pDevice->PM_Number, MV_TRUE );
+		temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+		if( !( (temp & MV_BIT(16)) || (temp & MV_BIT(26)) ) )
+			return;
+
+		/* check phy status to determine plug in/plug out */
+		HBA_SleepMillisecond(pCore, 500);
+		mvPMDevReWrReg(pPort, MV_Read_Reg, MV_SATA_PSCR_SSTATUS_REG_NUM, 0, pDevice->PM_Number, MV_TRUE);
+		temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+		if( (temp & 0xF) == 0 )
+			plugout = MV_TRUE;
+		else
+			plugin = MV_TRUE;
+
+		if ( plugout && (pDevice->Status & DEVICE_STATUS_EXISTING) &&
+			 !(pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+		{
+			// a bad drive was unplugged
+			pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+
+			/* clear x bit */
+			mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_PSCR_SERROR_REG_NUM, 0, pDevice->PM_Number, MV_TRUE );
+			temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+			mvPMDevReWrReg( pPort, MV_Write_Reg, MV_SATA_PSCR_SERROR_REG_NUM, temp, pDevice->PM_Number, MV_TRUE);
+
+			MV_DPRINT(("bad drive was unplugged\n"));
+			return;
+		}
+
+		if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+		{
+			if (plugout)
+				mvHandlePMUnplug(pCore, pDevice);
+		}
+		else
+		{
+			if (plugin)
+			mvHandlePMPlugin( pCore, pDevice );
+		}
+	}
+
+#endif
+	if ( intStatus )
+	{
+#ifndef _OS_BIOS
+		MV_PRINT("Error: SATA_HandleHotplugInterrupt port=%d intStatus=0x%x.\n", pPort->Id, intStatus);
+#endif
+	}
+}
+
+void mvCompleteSlots( PDomain_Port pPort, MV_U32 completeSlot, PATA_TaskFile taskFiles )
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+#ifdef MV_DEBUG
+	MV_LPVOID port_mmio = pPort->Mmio_Base;
+#endif
+	PDomain_Device pDevice;
+	PMV_Request pReq = NULL, pOrgReq = NULL;
+	MV_U8 slotId;
+
+	/* Complete finished commands. All of them are finished successfully.
+	 * There are three situations code will come here.
+	 * 1. No error for both NCQ and Non-NCQ.
+	 * 2. Under NCQ, some requests are completed successfully. At lease one is not.
+	 *	For the error command, by specification, SActive isn't cleared.
+	 * 3. Under non-NCQ, since no interrupt coalescing, no succesful request. 
+	 *  Hardware will return one request is completed. But software clears it above. */
+
+	for ( slotId=0; slotId<MAX_SLOT_NUMBER; slotId++ )
+	{
+		if ( !(completeSlot&(1L<<slotId)) )
+			continue;
+
+		MV_DASSERT( (MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE)&(1<<slotId))==0 );
+		MV_DASSERT( (MV_REG_READ_DWORD(port_mmio, PORT_SCR_ACT)&(1<<slotId))==0 );
+
+		completeSlot &= ~(1L<<slotId);
+				
+		/* This slot is finished. */
+		pReq = pPort->Running_Req[slotId];
+		MV_DASSERT( pReq );
+		pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+
+		if ( pReq->Scsi_Status==REQ_STATUS_RETRY )
+		{
+			MV_PRINT("Retried request is finished...");
+			MV_DumpRequest(pReq, MV_FALSE);
+		}
+	
+#ifndef _OS_BIOS
+		if ( Core_IsInternalRequest(pCore, pReq)&&(pReq->Org_Req) )
+		{
+			/* This internal request is used to request sense. */
+			MV_ASSERT( pDevice->Device_Type&DEVICE_TYPE_ATAPI );
+			pOrgReq = pReq->Org_Req;
+			/* Copy sense from the scratch buffer to the request sense buffer. */
+			MV_CopyMemory(
+					pOrgReq->Sense_Info_Buffer,
+					pReq->Data_Buffer,
+					MV_MIN(pOrgReq->Sense_Info_Buffer_Length, pReq->Data_Transfer_Length)
+					);
+			pOrgReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+			/* remove internal req's timer */
+			hba_remove_timer(pReq);
+			pReq = pOrgReq;
+		}
+		else
+#endif
+		{
+			pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		}
+
+		CompleteRequestAndSlot(pCore, pPort, pReq, taskFiles, slotId);
+		MV_DUMPC32(0xCCCC4402);
+		MV_ENTERLINE;
+		//MV_HALTKEY
+
+		if ( completeSlot==0 )
+			break;
+	}
+}
+
+void SATA_PortHandleInterrupt(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort
+	)
+{
+	PDomain_Device pDevice = &pPort->Device[0];
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_LPVOID port_mmio = pPort->Mmio_Base;
+	MV_U32 orgIntStatus, intStatus, serialError, commandIssue, serialActive, temp;
+	PMV_Request pReq = NULL, pOrgReq = NULL;
+	MV_U32 completeSlot = 0;
+	MV_U8 slotId = 0, i;
+	MV_BOOLEAN hasError = MV_FALSE, finalError = MV_FALSE;
+	MV_U32 errorSlot = 0;
+	ATA_TaskFile	taskFiles;
+#ifdef MV_DEBUG
+	MV_U32 orgSerialError, orgCommandIssue, orgSerialActive, orgCompleteSlot, orgRunningSlot;
+#endif
+
+#ifdef _OS_BIOS	
+	MV_U32 temp=2000;
+	while ( (completeSlot==0) && (temp>0) )
+	{
+		HBA_SleepMillisecond(pCore, 1);
+		commandIssue = MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);
+		completeSlot = (~commandIssue) & pPort->Running_Slot;
+		temp--;
+	}
+#endif
+
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+	readTaskFiles(pPort, pDevice, &taskFiles);
+#endif
+
+	/* Read port interrupt status register */
+	orgIntStatus = MV_REG_READ_DWORD(port_mmio, PORT_IRQ_STAT);
+	intStatus = orgIntStatus;
+
+	MV_DUMPC32(0xCCCC4405);
+	MV_DUMPC32(commandIssue);
+	MV_DUMPC32(pPort->Running_Slot);
+	MV_DUMPC32(0xCCCC4406);
+	MV_DUMPC32(intStatus);
+
+#ifndef _OS_BIOS
+	if ( pPort->Setting&PORT_SETTING_NCQ_RUNNING )
+	{
+		serialActive = MV_REG_READ_DWORD(port_mmio, PORT_SCR_ACT);
+		completeSlot =  (~serialActive) & pPort->Running_Slot;
+	}
+	else
+	{
+		commandIssue = MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);
+		completeSlot = (~commandIssue) & pPort->Running_Slot;
+	}
+#else
+	if (completeSlot == 0)
+	{
+		MV_DUMPC32(0xCCCC44FF);
+		MV_HALTKEY;
+		pReq = pPort->Running_Req[slotId];
+		CompleteRequestAndSlot(pCore, pPort, pReq, &taskFiles, slotId);
+		MV_DUMPC32(0xCCCC4401);
+		MV_HALTKEY;
+		return;
+	}
+#endif
+
+#ifdef MV_DEBUG
+	orgCommandIssue = commandIssue;
+	orgSerialActive = serialActive;
+	orgCompleteSlot = completeSlot;
+	orgRunningSlot = pPort->Running_Slot;
+#endif
+
+	intStatus &= ~(PORT_IRQ_D2H_REG_FIS|PORT_IRQ_SDB_FIS|PORT_IRQ_PIO_DONE);	/* Used to check request is done. */
+	intStatus &= ~(PORT_IRQ_DMAS_FIS|PORT_IRQ_PIOS_FIS);						/* Needn't care. */
+
+	/* Error handling */
+	if ( 
+			(intStatus&PORT_IRQ_TF_ERR)
+		||	(intStatus&PORT_IRQ_LINK_RECEIVE_ERROR)
+		||	(intStatus&PORT_IRQ_LINK_TRANSMIT_ERROR)
+		)
+	{
+		MV_DUMPC32(0xCCCC4411);
+		MV_HALTKEY;
+
+		MV_PRINT("Interrupt Error: 0x%x orgIntStatus: 0x%x completeSlot=0x%x.\n", 
+			intStatus, orgIntStatus, completeSlot);
+		if (intStatus&PORT_IRQ_TF_ERR)
+		{
+			/* Don't do error handling when receive link error. 
+			 * Wait until we got the Task File Error */
+
+			/* read serial error only when there is error */
+			serialError = MV_REG_READ_DWORD(port_mmio, PORT_SCR_ERR);
+			MV_REG_WRITE_DWORD(port_mmio, PORT_SCR_ERR, serialError);
+
+			/* Handle serial error interrupt */
+			if ( serialError )
+			{
+				MV_DUMPC32(0xCCCC4405);
+				MV_HALTKEY;
+				SATA_HandleSerialError(pPort, serialError); 
+			}
+
+			MV_DUMPC32(serialError);
+#ifdef MV_DEBUG
+			orgSerialError = serialError;
+#endif
+
+			/* read errorSlot only when there is error */
+			errorSlot = MV_REG_READ_DWORD(port_mmio, PORT_CMD);
+			MV_DUMPC32(errorSlot);
+
+			hasError = MV_TRUE;
+			errorSlot = (errorSlot>>8)&0x1F;
+
+			if ( pPort->Setting&PORT_SETTING_DURING_RETRY )
+				finalError = MV_TRUE;
+			else
+			{
+				/* if the error request is any internal requests, we don't retry 
+				 *     1) read log ext - don't retry
+				 *	   2) any initialization requests such as identify - buffer
+				 *		  will conflict when we try to send read log ext to retry
+				 *	   3) request sense - included in the ATAPI condition below
+				 */
+				pReq = pPort->Running_Req[errorSlot];
+				if ( pReq != NULL && Core_IsInternalRequest(pCore, pReq) )
+					finalError = MV_TRUE;
+
+				/* For ATAPI device, we don't do retry. OS already has done a lot.
+				* ATAPI device: one request at a time. */
+				if ( completeSlot==((MV_U32)1L<<errorSlot) )
+				{
+					pReq = pPort->Running_Req[errorSlot];
+					MV_ASSERT( pReq!=NULL );
+					pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+					if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+						finalError = MV_TRUE;
+				}
+			}
+		}
+		intStatus &= ~(PORT_IRQ_TF_ERR|PORT_IRQ_LINK_RECEIVE_ERROR|PORT_IRQ_LINK_TRANSMIT_ERROR);		
+	}
+
+
+	/* Final Error: we give up this error request. Only one request is running. 
+	 * And during retry we won't use NCQ command. */
+	if ( finalError )
+	{
+		MV_ASSERT( !(pPort->Setting&PORT_SETTING_NCQ_RUNNING) );
+		MV_ASSERT( completeSlot==((MV_U32)1L<<errorSlot) );
+		MV_ASSERT( pPort->Running_Slot==completeSlot );
+
+	#ifndef _OS_BIOS
+		/* clear global before channel */
+		MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+		MV_REG_WRITE_DWORD(port_mmio, PORT_IRQ_STAT, orgIntStatus);
+	#endif /* _OS_BIOS */
+
+		/* This is the failed request. */
+		pReq = pPort->Running_Req[errorSlot];
+		MV_ASSERT( pReq!=NULL );
+		pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+
+	#ifndef _OS_BIOS
+		if( Core_IsInternalRequest(pCore, pReq) )
+		{
+			if( pReq->Org_Req )
+			{
+				/* This internal request is used to request sense. */
+				MV_ASSERT( pDevice->Device_Type&DEVICE_TYPE_ATAPI );
+				pOrgReq = pReq->Org_Req;
+				pOrgReq->Scsi_Status = REQ_STATUS_ERROR;
+
+				/* remove internal req's timer */
+				hba_remove_timer(pReq);
+				pReq = pOrgReq;
+			}
+			else if( pReq->Cdb[2] == CDB_CORE_READ_LOG_EXT )
+			{
+				pReq->Scsi_Status = REQ_STATUS_ERROR;
+			}
+			else
+			{
+				/* This internal request is initialization request like identify */
+				MV_DASSERT( pDevice->State != DEVICE_STATE_INIT_DONE );
+				pReq->Scsi_Status = REQ_STATUS_ERROR;
+			}
+		}
+		else
+	#endif /* _OS_BIOS */
+		{
+			if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+			{
+				pReq->Scsi_Status = REQ_STATUS_REQUEST_SENSE;
+			}
+			else
+			{
+			#ifndef _OS_BIOS	
+				MV_DPRINT(("Finally SATA error for Req 0x%x.\n", pReq->Cdb[0]));
+			#endif
+				pReq->Scsi_Status = REQ_STATUS_ERROR;
+			}
+		}
+
+		CompleteRequestAndSlot(pCore, pPort, pReq, &taskFiles, (MV_U8)errorSlot);
+
+		/* Handle interrupt status register */
+		if ( intStatus )
+		{
+			MV_DUMPC32(0xCCCC4403);
+			MV_DUMPC32(intStatus);
+			SATA_HandleHotplugInterrupt(pPort, intStatus);
+		}
+		return;
+	}
+
+	/* The first time to hit the error. Under error condition, figure out all the successful requests. */
+	if ( hasError )
+	{
+		MV_ASSERT( !finalError );
+		if ( pPort->Setting&PORT_SETTING_NCQ_RUNNING )
+		{
+			/* For NCQ command, if error happens on one slot.
+			 * This slot is not completed. SActive is not cleared. */
+		}
+		else
+		{
+			/* For Non-NCQ command, last command is the error command. 
+			 * ASIC will stop whenever there is an error.
+			 * And we only have one request if there is no interrupt coalescing or NCQ. */
+			MV_ASSERT( completeSlot==((MV_U32)1L<<errorSlot) );
+
+			/* The error command is finished but we clear it to make it to be retried. */
+			completeSlot=0;
+		}
+		/* Now all the completed commands are completed successfully. */
+
+		/* Reset this port to prepare for the retry. At least one request will be retried. */
+
+		MV_ASSERT( finalError==MV_FALSE );
+
+		/* Toggle the port start bit to clear up the hardware to prepare for the retry. */
+		temp = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_CMD);
+		temp &= ~PORT_CMD_START;
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_CMD, temp );
+		HBA_SleepMillisecond(pCore, 1);
+		temp |= PORT_CMD_START;
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_CMD, temp );
+		HBA_SleepMillisecond(pCore, 1);
+		MV_PRINT("Toggle CMD register start stop bit at port 0x%x.\n", pPort->Id);
+
+		/* Toggle should before we clear the channel interrupt status but not the global interrupt. */
+		MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+		/* Abort all the others requests and retry. */
+		for ( slotId=0; slotId<MAX_SLOT_NUMBER; slotId++ )
+		{
+			pReq = pPort->Running_Req[slotId];
+			if ( !(completeSlot&(1L<<slotId)) && pReq )
+			{
+				pReq->Cmd_Flag &= 0xFF;	/* Remove NCQ setting. */
+				pReq->Scsi_Status = REQ_STATUS_RETRY;
+
+				/* Put requests to the queue head but don't run them. Should run ReadLogExt first. */
+				pPort->Running_Req[slotId] = NULL;
+				pPort->Running_Slot &= ~(1L<<slotId);
+				Tag_ReleaseOne(&pPort->Tag_Pool, slotId);
+				hba_remove_timer(pReq);
+				List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);		/* Add to the header. */
+				MV_PRINT("Abort error requests....\n");
+				MV_DumpRequest(pReq, MV_FALSE);
+			}
+		}
+		MV_ASSERT( MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE)==0 );
+		MV_ASSERT( MV_REG_READ_DWORD(port_mmio, PORT_IRQ_STAT)==0 );
+		MV_ASSERT( (MV_REG_READ_DWORD(mmio, HOST_IRQ_STAT)&(1L<<pPort->Id))==0 );
+
+		/* Send ReadLogExt command to clear the outstanding commands on the device. 
+		 * This request will be put to the queue head because it's Cmd_Initiator is Core Driver. 
+		 * Consider the port multiplier. */
+		for ( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pDevice = &pPort->Device[i];
+			if ( 
+				!(pDevice->Device_Type&DEVICE_TYPE_ATAPI)
+				&& (pDevice->Capacity&DEVICE_CAPACITY_READLOGEXT_SUPPORTED)
+				//&& (pPort->Setting&PORT_SETTING_NCQ_RUNNING)
+				)
+			{
+				Device_IssueReadLogExt(pPort, pDevice);
+			}
+			else
+			{
+				#ifndef _OS_BIOS
+				Core_HandleWaitingList(pCore);	//TBD: Should port based.
+				#endif
+			}
+		}
+
+		/* Needn't run interrupt_handle_bottom_half except the hot plug.
+		 * Toggle start bit will clear all the interrupt. So don't clear interrupt again. 
+		 * Otherwise it'll clear Read Log Ext interrupt. 
+		 * If Device_IssueReadLogExt is called, needn't run Core_HandleWaitingList. */
+		//TBD: How about the hot plug.
+		
+		/* handle completed slots */
+		if( completeSlot )
+			mvCompleteSlots( pPort, completeSlot, &taskFiles );
+		return;
+	}
+
+	/* if no error */
+
+#ifndef _OS_BIOS
+	/* clear global before channel */
+	MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+	MV_REG_WRITE_DWORD(port_mmio, PORT_IRQ_STAT, orgIntStatus);
+#endif
+
+	/* handle completed slots */
+	if( completeSlot )
+		mvCompleteSlots( pPort, completeSlot, &taskFiles );
+
+	/* Handle interrupt status register */
+	if ( intStatus )
+	{
+		MV_DUMPC32(0xCCCC4403);
+		MV_DUMPC32(intStatus);
+		SATA_HandleHotplugInterrupt(pPort, intStatus);
+	}
+}
+
+void PATA_PortHandleInterrupt(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort
+	)
+{
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_LPVOID port_mmio = pPort->Mmio_Base;
+	MV_U32 intStatus, orgIntStatus, commandIssue, taskFile=0, stateMachine, portCommand;
+	MV_U32 temp;
+	PMV_Request pReq = NULL, pOrgReq = NULL;
+	MV_U32 completeSlot = 0;
+	MV_U8 slotId = 0;
+	MV_BOOLEAN hasOneAlready = MV_FALSE;
+	MV_BOOLEAN hasError = MV_FALSE, needReset = MV_FALSE;
+	PDomain_Device pDevice=NULL;
+	ATA_TaskFile	taskFiles;
+    	
+	/* Read port interrupt status register */
+	intStatus = MV_REG_READ_DWORD(port_mmio, PORT_IRQ_STAT);
+	orgIntStatus = intStatus;
+#ifdef _OS_BIOS
+	MV_REG_WRITE_DWORD(port_mmio, PORT_IRQ_STAT,intStatus);
+	HBA_SleepMillisecond(pCore, 10);
+#endif
+
+	MV_DUMPC32(0xCCCCDD01);
+	MV_DUMPC32(intStatus);
+	/* 
+	 * Workaround for PATA non-data command.
+	 * PATA non-data command, CI is not ready yet when interrupt is triggered.
+	 */
+	commandIssue = MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);
+	completeSlot = (~commandIssue) & pPort->Running_Slot;
+
+/* Thor Lite D0 and Thor B0 */
+if ( (pCore->Device_Id!=DEVICE_ID_THOR_4S1P_NEW) && (pCore->Revision_Id!=0xB0) && (pCore->Revision_Id!=0xB1) )
+{
+	temp=1000;
+	while ( (completeSlot==0) && (temp>0) )
+	{
+	#ifndef _OS_BIOS
+		HBA_SleepMillisecond(pCore, 2);
+	#else
+		HBA_SleepMillisecond(pCore, 1);
+	#endif
+		commandIssue = MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);
+		completeSlot = (~commandIssue) & pPort->Running_Slot;
+		temp--;
+	}
+
+
+#ifdef DEBUG_BIOS
+	MV_DUMPC32(0xCCCC5501);
+	MV_DUMPC32(MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE));
+	MV_DUMPC32(pPort->Running_Slot);
+	MV_DUMPC32(commandIssue);
+#endif
+
+
+	
+	if ( (completeSlot==0)&&(pPort->Running_Slot!=0) )
+	{
+		MV_DPRINT(("INT but no request completed: 0x%x CI: 0x%x Running: 0x%x\n", 
+			intStatus, commandIssue, pPort->Running_Slot));
+		/*
+		 * Workaround:
+		 * If ATAPI read abort happens, got one interrupt but CI is not cleared.
+		 */
+		stateMachine = MV_REG_READ_DWORD(port_mmio, PORT_INTERNAL_STATE_MACHINE);
+		if ( stateMachine==0x60007013 )
+		{
+            pCore->Need_Reset = 1;
+			needReset = MV_TRUE;
+
+			/* Actually one request is finished. We need figure out which one it is. */
+			portCommand = MV_REG_READ_DWORD(port_mmio, PORT_CMD);
+			MV_ASSERT( portCommand&MV_BIT(15) );	/* Command is still running */
+			portCommand = (portCommand>>8)&0x1F;
+			MV_ASSERT( portCommand<MAX_SLOT_NUMBER );
+			MV_DPRINT(("Read abort happens on slot %d.\n", portCommand));
+			completeSlot |= (1<<portCommand);
+		}
+	}
+}
+#ifdef _OS_BIOS
+	if (completeSlot == 0)
+	{
+		MV_DUMPC32(0xCCCC55FF);
+		MV_HALTKEY;
+		pReq = pPort->Running_Req[slotId];
+		pPort->Running_Req[slotId] = NULL;
+		pPort->Running_Slot &= ~(1L<<slotId);
+		Tag_ReleaseOne(&pPort->Tag_Pool, slotId);
+		
+#ifdef SUPPORT_ERROR_HANDLING
+		pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+		pDevice->Outstanding_Req--;
+
+#ifdef SUPPORT_TIMER
+		/* request for this device came back, so we cancel the timer */
+		Timer_CancelRequest( pCore, pDevice->Timer_ID );
+		pDevice->Timer_ID = NO_CURRENT_TIMER;
+
+		/* if there are more outstanding requests, we send a new timer */
+		if ( pDevice->Outstanding_Req > 0 )
+		{
+			pDevice->Timer_ID = Timer_AddRequest( pCore, REQUEST_TIME_OUT, Core_ResetChannel, pDevice );
+		}
+#endif /* SUPPORT_TIMER */
+#endif /* SUPPORT_ERROR_HANDLING */
+
+		CompleteRequest(pCore, pReq, &taskFiles);
+		MV_DUMPC32(0xCCCC550E);
+		MV_HALTKEY;
+		return;
+	}
+#endif
+
+	/* Handle interrupt status register */
+	intStatus &= ~MV_BIT(0); intStatus &= ~MV_BIT(2);
+#ifdef ENABLE_PATA_ERROR_INTERRUPT
+	hasError = (intStatus!=0) ? MV_TRUE : MV_FALSE;
+
+	/*
+	 * Workaround:
+	 * If error interrupt bit is set. We cannot clear it.
+	 * Try to use PORT_CMD PORT_CMD_PATA_START bit to clear the error interrupt but didn't work.
+	 * So we have to disable PATA error interrupt.
+	 */
+#endif
+
+	/* Complete finished commands */
+	for ( slotId=0; slotId<MAX_SLOT_NUMBER; slotId++ )
+	{
+		MV_DUMPC32(0xCCCCDD03);
+		if ( !(completeSlot&(1L<<slotId)) )
+			continue;
+
+		completeSlot &= ~(1L<<slotId);
+		MV_DASSERT( completeSlot==0 );	//TBD: If no interrupt coleascing.
+
+		/* This slot is finished. */
+		pReq = pPort->Running_Req[slotId];
+		MV_DASSERT(pReq);
+
+#if defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+		hba_remove_timer(pReq);
+#endif /* defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */
+		pPort->Running_Req[slotId] = NULL;
+		pPort->Running_Slot &= ~(1L<<slotId);
+		Tag_ReleaseOne(&pPort->Tag_Pool, slotId);
+		MV_DASSERT( (MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_CMD_ISSUE)&(1<<slotId))==0 );
+
+		pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+
+	#ifndef ENABLE_PATA_ERROR_INTERRUPT
+		/* 
+ 		 * Workaround:
+ 		 * Sometimes we got error interrupt bit but the status is still 0x50.
+		 * In this case, the command is completed without error.
+		 * So we have to check the task status to make sure it's really an error or not.
+		 */
+		#ifndef _OS_BIOS
+		HBA_SleepMicrosecond(pCore, 2);
+		#else
+		HBA_SleepMillisecond(pCore, 2);
+		#endif
+		if ( !pDevice->Is_Slave )
+			taskFile = MV_REG_READ_DWORD(port_mmio, PORT_MASTER_TF0);
+		else
+			taskFile = MV_REG_READ_DWORD(port_mmio, PORT_SLAVE_TF0);
+			MV_DUMPC32(0xCCCCDD04);
+			MV_DUMPC32(taskFile);
+		if ( taskFile&MV_BIT(0) )
+		{
+			
+			MV_DUMPC32(0xCCCCDD05);
+			//MV_HALTKEY;
+			hasError = MV_TRUE;
+			MV_DPRINT(("PATA request returns with error 0x%x.\n", taskFile));
+		}
+
+		#ifdef MV_DEBUG
+		if ( !(taskFile&MV_BIT(0)) && ( intStatus ) )
+		{
+			MV_DPRINT(("Error interrupt is set but status is 0x50.\n"));
+		
+		}
+		#endif
+	#endif
+
+		if ( (hasError)&&(pCore->Device_Id!=DEVICE_ID_THOR_4S1P_NEW)&&(pCore->Revision_Id!=0xB0)&&(pCore->Revision_Id!=0xB1) )
+		{
+			MV_DUMPC32(0xCCCCDD06);
+			if ( pDevice->Device_Type==DEVICE_TYPE_ATAPI )
+			{
+				/*
+				 * Workaround: 
+				 * Write request if device abort, hardware state machine got wrong.
+				 * Need do reset to recover.
+				 * If the error register is 0x40, we think the error happens.
+			 	 * Suppose this problem only happens on ODD. HDD won't write abort.
+ 				 */
+				MV_DUMPC32(0xCCCCDD07);
+				taskFile = taskFile>>24;  /* Get the error register */
+				if ( taskFile==0x40 )	//TBD: How come I see many read request got this kind of error?
+				{
+					pCore->Need_Reset = 1;
+					needReset = MV_TRUE;
+				}
+			}
+		}
+	#ifndef _OS_BIOS	
+		if ( Core_IsInternalRequest(pCore, pReq)&&(pReq->Org_Req) )
+		{
+			/* This internal request is used to request sense. */
+			pOrgReq = pReq->Org_Req;
+			if ( hasError )
+			{
+				MV_ASSERT( hasOneAlready==MV_FALSE );
+				hasOneAlready = MV_TRUE;
+				pOrgReq->Scsi_Status = REQ_STATUS_ERROR;
+			}
+			else
+			{
+				/* Copy sense from the scratch buffer to the request sense buffer. */
+				MV_CopyMemory(
+						pOrgReq->Sense_Info_Buffer,
+						pReq->Data_Buffer,
+						MV_MIN(pOrgReq->Sense_Info_Buffer_Length, pReq->Data_Transfer_Length)
+						);
+				pOrgReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+			}
+			pReq = pOrgReq;
+		}
+		else
+	#endif
+	
+		{
+			if ( hasError )
+			{
+				MV_DUMPC32(0xCCCCDD08);
+#ifndef _OS_BIOS
+				MV_ASSERT( hasOneAlready==MV_FALSE );
+				hasOneAlready = MV_TRUE;
+
+				if ( needReset )
+				{
+					/* Get sense data using legacy mode or fake a sense data here. */
+					PATA_LegacyPollSenseData(pCore, pReq);
+					pReq->Scsi_Status = REQ_STATUS_HAS_SENSE;
+				}
+				else
+#endif
+				{
+					if ( pReq->Cmd_Flag&CMD_FLAG_PACKET )
+						pReq->Scsi_Status = REQ_STATUS_REQUEST_SENSE;
+					else
+						pReq->Scsi_Status = REQ_STATUS_ERROR;
+				}
+			}
+			else
+			{
+				pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+				MV_DUMPC32(0xCCCCDD09);
+			}
+		}
+
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+		readTaskFiles(pPort, pDevice, &taskFiles);
+#endif
+
+#ifdef SUPPORT_ERROR_HANDLING
+		pDevice->Outstanding_Req--;
+#ifdef SUPPORT_TIMER
+		/* request for this device came back, so we cancel the timer */
+		Timer_CancelRequest( pCore, pDevice->Timer_ID );
+		pDevice->Timer_ID = NO_CURRENT_TIMER;
+
+		/* if there are more outstanding requests, we send a new timer */
+		if ( pDevice->Outstanding_Req > 0 )
+		{
+			pDevice->Timer_ID = Timer_AddRequest( pCore, REQUEST_TIME_OUT, Core_ResetChannel, pDevice );
+		}
+#endif /* SUPPORT_TIMER */
+#endif /* SUPPORT_ERROR_HANDLING */
+
+		CompleteRequest(pCore, pReq, &taskFiles);  
+		MV_DUMPC32(0xCCCC5502);
+		MV_ENTERLINE;
+		//MV_HALTKEY
+
+
+		if ( completeSlot==0 )
+			break;
+	}
+
+	/* 
+	 * Clear the interrupt. It'll re-start the hardware to handle the next slot. 
+	 * I clear the interrupt after I've checked the CI register.
+	 * Currently we handle one request everytime in case if there is an error I don't know which one it is.
+	 */
+#ifndef _OS_BIOS
+	MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+#endif
+	MV_REG_WRITE_DWORD(port_mmio, PORT_IRQ_STAT, orgIntStatus);
+
+#ifndef _OS_BIOS
+	/* If there is more requests on the slot, we have to push back there request. */
+	//TBD: Request order can be wrong now.
+	if ( needReset )
+	{
+		for ( slotId=0; slotId<MAX_SLOT_NUMBER; slotId++ )
+		{
+			pReq = pPort->Running_Req[slotId];
+			if ( pReq )
+			{
+				List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);
+				pPort->Running_Req[slotId] = NULL;
+				Tag_ReleaseOne(&pPort->Tag_Pool, slotId);
+			}
+		}
+		pPort->Running_Slot = 0;
+	}
+
+#endif
+	MV_DUMPC32(0xCCCC5503);
+	MV_DUMPC32(MV_REG_READ_DWORD(port_mmio, PORT_IRQ_STAT));
+}
+
+#ifndef _OS_BIOS
+void Device_MakeRequestSenseRequest(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Device pDevice,
+	IN PMV_Request pNewReq,
+	IN PMV_Request pOrgReq
+	)
+{
+	PMV_SG_Table pSGTable = &pNewReq->SG_Table;
+	//MV_U8 senseSize = SATA_SCRATCH_BUFFER_SIZE;
+	MV_U8 senseSize = 18;
+
+/*	MV_ZeroMemory(pNewReq, sizeof(MV_Request)); */
+	MV_ZeroMvRequest(pNewReq);
+
+	pNewReq->Device_Id = pDevice->Id;
+
+	pNewReq->Scsi_Status = REQ_STATUS_PENDING;
+	pNewReq->Cmd_Initiator = pCore;
+
+	pNewReq->Data_Transfer_Length = senseSize;
+	pNewReq->Data_Buffer = pDevice->Scratch_Buffer;
+
+	pNewReq->Org_Req = pOrgReq;
+
+	pNewReq->Cmd_Flag = CMD_FLAG_DATA_IN;
+#ifdef USE_DMA_FOR_ALL_PACKET_COMMAND	
+	pNewReq->Cmd_Flag |=CMD_FLAG_DMA;
+#endif
+
+	pNewReq->Completion = NULL;
+
+	/* Make the SG table. */
+	SGTable_Init(pSGTable, 0);
+	SGTable_Append(
+		pSGTable, 
+		pDevice->Scratch_Buffer_DMA.low,
+		pDevice->Scratch_Buffer_DMA.high,
+		senseSize
+		);
+	MV_DASSERT( senseSize%2==0 );
+
+	/* Request Sense request */
+	pNewReq->Cdb[0]=SCSI_CMD_REQUEST_SENSE;
+	pNewReq->Cdb[4]=senseSize;
+
+	/* Fixed sense data format is 18 bytes. */
+	MV_ZeroMemory(pNewReq->Data_Buffer, senseSize);
+}
+#endif
+
+void CompleteRequest(
+	IN PCore_Driver_Extension pCore,
+	IN PMV_Request pReq,
+	IN PATA_TaskFile pTaskFile
+	)
+{
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+	PHD_Status pHDStatus;
+#endif
+	PDomain_Port pPort = &pCore->Ports[PATA_MapPortId(pReq->Device_Id)];
+	PDomain_Device pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+	MV_DUMPC32(0xCCCC6601);
+	MV_DUMPC32(pReq->Scsi_Status);
+
+	//TBD: Some of the command, we need read the received FIS like smart command.
+#ifdef _OS_BIOS
+ #ifdef MV_SUPPORT_ATAPI
+	if	((pReq->Scsi_Status != REQ_STATUS_SUCCESS) && (pReq->Cmd_Flag & CMD_FLAG_PACKET))	
+		return;		/* ATAPI Command fail, do not call callback for BIOS.*/
+ #endif
+#else 	/* #ifdef _OS_BIOS */
+#ifdef _OS_WINDOWS
+	if(pReq->Splited_Count)
+	{
+		if(pReq->Scsi_Status == REQ_STATUS_SUCCESS)
+		{
+			MV_U32 sectors;
+			MV_LBA lba;
+			
+			pReq->Splited_Count--;
+
+			lba.value = SCSI_CDB10_GET_LBA(pReq->Cdb) + MV_MAX_TRANSFER_SECTOR;
+			sectors = MV_MAX_TRANSFER_SECTOR;
+			SCSI_CDB10_SET_LBA(pReq->Cdb, lba.value);
+			SCSI_CDB10_SET_SECTOR(pReq->Cdb, sectors);
+
+			pReq->Scsi_Status = REQ_STATUS_PENDING;
+
+			Core_ModuleSendRequest(pCore, pReq);
+
+			return;
+		}
+		else
+			pReq->Splited_Count = 0;
+	}
+#endif	/* _OS_WINDOWS */
+
+#if (SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX)
+	hba_remove_timer(pReq);
+#endif /* defined(SUPPORT_ERROR_HANDLING) && defined(_OS_LINUX) */	
+
+	if (pReq->Scsi_Status == REQ_STATUS_REQUEST_SENSE)
+	{
+		/* Use the internal request to request sense. */
+		Device_MakeRequestSenseRequest(pCore, pDevice, pDevice->Internal_Req, pReq);
+		/* pReq is linked to the */
+		Core_ModuleSendRequest(pCore, pDevice->Internal_Req);
+
+		return;
+	}
+
+
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+	if (pTaskFile != NULL)
+	
+	{
+		if (pReq->Scsi_Status == REQ_STATUS_SUCCESS)
+		{
+			if (pReq->Cdb[0] == SCSI_CMD_MARVELL_SPECIFIC && pReq->Cdb[1] == CDB_CORE_MODULE)
+			{
+				if (pReq->Cdb[2] == CDB_CORE_DISABLE_WRITE_CACHE)
+					pDevice->Setting &= ~DEVICE_SETTING_WRITECACHE_ENABLED;
+				else if (pReq->Cdb[2] == CDB_CORE_ENABLE_WRITE_CACHE)
+					pDevice->Setting |= DEVICE_SETTING_WRITECACHE_ENABLED;
+				else if (pReq->Cdb[2] == CDB_CORE_DISABLE_SMART)
+					pDevice->Setting &= ~DEVICE_SETTING_SMART_ENABLED;
+				else if (pReq->Cdb[2] == CDB_CORE_ENABLE_SMART)
+					pDevice->Setting |= DEVICE_SETTING_SMART_ENABLED;
+				else if (pReq->Cdb[2] == CDB_CORE_SMART_RETURN_STATUS)
+				{
+					pHDStatus = (PHD_Status)pReq->Data_Buffer;
+					if (pHDStatus == NULL)
+					{
+#ifdef SUPPORT_EVENT
+					if (pTaskFile->LBA_Mid == 0xF4 && pTaskFile->LBA_High == 0x2C)
+							HBA_AddEvent( pCore, EVT_ID_HD_SMART_THRESHOLD_OVER, pDevice->Id, SEVERITY_WARNING, 0, NULL );
+#endif
+					}
+					else
+					{
+						if (pTaskFile->LBA_Mid == 0xF4 && pTaskFile->LBA_High == 0x2C)
+							pHDStatus->SmartThresholdExceeded = MV_TRUE;
+						else
+							pHDStatus->SmartThresholdExceeded = MV_FALSE;
+					}
+				}
+			}
+		}
+		else
+		{
+			if (pReq->Sense_Info_Buffer != NULL)
+				((MV_PU8)pReq->Sense_Info_Buffer)[0] = REQ_STATUS_ERROR;
+			pReq->Scsi_Status = REQ_STATUS_ERROR_WITH_SENSE;
+		}
+	}
+#endif
+
+	/* Do something if necessary to return back the request. */
+	if ( (pReq->Cdb[0]==SCSI_CMD_MARVELL_SPECIFIC) && (pReq->Cdb[1]==CDB_CORE_MODULE) ) 
+	{
+		if ( pReq->Cdb[2]==CDB_CORE_SHUTDOWN )
+		{
+			if ( pReq->Device_Id<MAX_DEVICE_NUMBER-1 )
+			{
+				pReq->Device_Id++;
+				pReq->Scsi_Status = REQ_STATUS_PENDING;
+				Core_ModuleSendRequest(pCore, pReq);
+				return;
+			}
+			else
+			{
+				pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+			}
+		}
+	}
+#endif /* #ifdef _OS_BIOS */
+
+	pReq->Completion(pReq->Cmd_Initiator, pReq);
+}
+
+void CompleteRequestAndSlot(
+	IN PCore_Driver_Extension pCore,
+	IN PDomain_Port pPort,
+	IN PMV_Request pReq,
+	IN PATA_TaskFile pTaskFile,
+	IN MV_U8 slotId
+	)
+{
+#ifdef SUPPORT_ERROR_HANDLING
+	PDomain_Device pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+#endif		
+	pPort->Running_Req[slotId] = NULL;
+	pPort->Running_Slot &= ~(1L<<slotId);
+	Tag_ReleaseOne(&pPort->Tag_Pool, slotId);
+	MV_DASSERT( (MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_CMD_ISSUE)&(1<<slotId))==0 );
+
+	if ( pPort->Type!=PORT_TYPE_PATA )
+	{
+		MV_DASSERT( (MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SCR_ACT)&(1<<slotId))==0 );
+	}
+
+#ifdef SUPPORT_ERROR_HANDLING
+	pDevice->Outstanding_Req--;
+#ifdef SUPPORT_TIMER
+	/* request for this device came back, so we cancel the timer */
+	Timer_CancelRequest( pCore, pDevice->Timer_ID );
+	pDevice->Timer_ID = NO_CURRENT_TIMER;
+
+	/* if there are more outstanding requests, we send a new timer */
+	if ( pDevice->Outstanding_Req > 0 )
+	{
+		pDevice->Timer_ID = Timer_AddRequest( pCore, REQUEST_TIME_OUT, Core_ResetChannel, pDevice );
+	}
+#endif /* SUPPORT_TIMER */
+#endif /* SUPPORT_ERROR_HANDLING */
+	CompleteRequest(pCore, pReq, pTaskFile);
+}
+
+void Port_Monitor(PDomain_Port pPort)
+{
+#ifndef _OS_BIOS
+	MV_U8 i;
+	MV_PRINT("Port_Monitor: Running_Slot=0x%x.\n", pPort->Running_Slot);
+	
+	for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+	{
+		if ( pPort->Running_Req[i]!=NULL )
+			MV_DumpRequest(pPort->Running_Req[i], MV_FALSE);
+	}
+#endif
+
+}
+
+void Core_ModuleMonitor(MV_PVOID This)
+{
+#ifndef _OS_BIOS
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	PMV_Request pReq = NULL;
+	PList_Head head = &pCore->Waiting_List;
+	PDomain_Port pPort = NULL;
+	MV_U8 i;
+
+	//TBD: typeof
+	MV_PRINT("Core_ModuleMonitor Waiting_List:\n");
+	for (pReq = LIST_ENTRY((head)->next, MV_Request, Queue_Pointer);	\
+	     &pReq->Queue_Pointer != (head); 	\
+	     pReq = LIST_ENTRY(pReq->Queue_Pointer.next, MV_Request, Queue_Pointer))
+	{
+		MV_DumpRequest(pReq, MV_FALSE);
+	}
+
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		MV_PRINT("Port[%d]:\n", i);
+		pPort = &pCore->Ports[i];
+		Port_Monitor(pPort);
+	}
+
+#endif	/* #ifndef _OS_BIOS */
+
+}
+
+void Core_ModuleReset(MV_PVOID This)
+{
+#ifndef _OS_BIOS	
+	MV_U32 extensionSize = 0; 
+
+	extensionSize = ( ROUNDING(sizeof(Core_Driver_Extension),8)
+#ifdef SUPPORT_CONSOLIDATE
+					+ ROUNDING(sizeof(Consolidate_Extension),8) + ROUNDING(sizeof(Consolidate_Device),8)*MAX_DEVICE_NUMBER
+#endif
+					);
+			
+	/* Re-initialize all the variables even discard all the requests. */
+	Core_ModuleInitialize(This, extensionSize, 32);
+
+	//TBD: Merge with Core_ResetHardware
+#if 0
+	{
+		PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+
+		pCore->Adapter_State = ADAPTER_INITIALIZING;
+		for ( i=0; i<MAX_PORT_NUMBER; i++ )
+		{
+			pPort = &pCore->Ports[i];
+			pPort->Port_State = PORT_STATE_IDLE;
+			for ( j=0; j<MAX_DEVICE_PER_PORT; j++ )
+			{
+				pDevice = &pPort->Device[j];
+				pDevice->State = DEVICE_STATE_IDLE;
+			}
+		}
+	}
+#endif 
+#endif
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_exp.h linux-2.6.25/drivers/scsi/mv/core/core_exp.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_exp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_exp.h	2008-07-28 18:42:43.327188786 +0200
@@ -0,0 +1,38 @@
+#if !defined(CORE_EXPOSE_H)
+#define CORE_EXPOSE_H
+
+/* Product device id */
+#define VENDOR_ID                           0x11AB
+
+#define DEVICE_ID_THORLITE_2S1P             0x6121
+#define DEVICE_ID_THORLITE_0S1P             0x6101
+#define DEVICE_ID_THORLITE_1S1P             0x6111
+#define DEVICE_ID_THOR_4S1P                 0x6141
+#define DEVICE_ID_THOR_4S1P_NEW             0x6145
+/* Revision ID starts from B1 */
+#define DEVICE_ID_THORLITE_2S1P_WITH_FLASH  0x6122
+
+MV_U32 Core_ModuleGetResourceQuota(enum Resource_Type type, MV_U16 maxIo);
+void Core_ModuleInitialize(MV_PVOID, MV_U32, MV_U16);
+void Core_ModuleStart(MV_PVOID);
+void Core_ModuleShutdown(MV_PVOID);
+void Core_ModuleNotification(MV_PVOID, enum Module_Event, MV_PVOID);
+void Core_ModuleSendRequest(MV_PVOID, PMV_Request);
+void Core_ModuleMonitor(MV_PVOID);
+void Core_ModuleReset(MV_PVOID pExtension);
+#ifdef _OS_BIOS
+void Core_ReInitBaseAddress(MV_PVOID This);
+#endif
+
+MV_BOOLEAN Core_InterruptServiceRoutine(MV_PVOID This);
+
+#ifdef SUPPORT_ERROR_HANDLING
+#define REQUEST_TIME_OUT			5		// in multiples of TIMER_INTERVAL, see hba_timer.h
+#endif
+
+void mvRemoveDeviceWaitingList(MV_PVOID This, MV_U16 deviceId,
+			       MV_BOOLEAN returnOSRequest);
+void mvRemovePortWaitingList( MV_PVOID This, MV_U8 portId );
+
+#endif /* CORE_EXPOSE_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_init.c linux-2.6.25/drivers/scsi/mv/core/core_init.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_init.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_init.c	2008-07-28 18:42:43.329188696 +0200
@@ -0,0 +1,2808 @@
+#include "mv_include.h"
+
+#include "core_exp.h"
+
+#include "core_init.h"
+#include "core_inter.h"
+
+#include "core_thor.h"
+
+#include "core_sata.h"
+
+#include "hba_header.h"
+
+static void Device_IssueIdentify(PDomain_Port pPort, PDomain_Device pDevice);
+static void Device_IssueSetUDMAMode(PDomain_Port pPort, PDomain_Device pDevice);
+static void Device_IssueSetPIOMode(PDomain_Port pPort, PDomain_Device pDevice);
+static void Device_EnableWriteCache(PDomain_Port pPort, PDomain_Device pDevice);
+static void Device_EnableReadAhead(PDomain_Port pPort, PDomain_Device pDevice);
+
+extern void Core_HandleWaitingList(PCore_Driver_Extension pCore);
+
+static MV_BOOLEAN mvChannelStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Port pPort
+	);
+
+MV_BOOLEAN mvDeviceStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Device pDevice
+	);
+
+#ifdef SUPPORT_HOT_PLUG
+void Device_SoftReset(PDomain_Port pPort, PDomain_Device pDevice)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+
+	pDevice->State = DEVICE_STATE_RESET_DONE;
+#ifndef _OS_BIOS
+	mvDeviceStateMachine (pCore, pDevice);
+#endif
+}
+#endif	/* #ifdef SUPPORT_HOT_PLUG */ 
+
+PMV_Request GetInternalReqFromPool( PCore_Driver_Extension pCore )
+{
+	if( !List_Empty(&pCore->Internal_Req_List) )
+		return ((PMV_Request)List_GetFirstEntry(&pCore->Internal_Req_List, MV_Request, Queue_Pointer));
+	else
+		return NULL;
+}
+
+void ReleaseInternalReqToPool( PCore_Driver_Extension pCore, PMV_Request pReq)
+{
+/*	MV_ZeroMemory( pReq, sizeof(MV_Request) );	*/
+	MV_ZeroMvRequest(pReq);
+	List_AddTail( &pReq->Queue_Pointer, &pCore->Internal_Req_List );	
+}
+
+//TBD: Refer to CamAtaDevResetStart
+/*
+ * Initialize this port including possible device hard or soft reset.
+ */
+ //Lily test
+
+#ifdef SUPPORT_PM
+void mvPMDevReWrReg(
+	PDomain_Port pPort, 
+	MV_U8 read, 
+	MV_U8 PMreg, 
+	MV_U32 regVal, 
+	MV_U8 PMport, 
+	MV_BOOLEAN control)
+{
+	MV_U8 tag = Tag_GetOne(&pPort->Tag_Pool);
+	PMV_Command_Header header = SATA_GetCommandHeader(pPort, tag);
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+	PSATA_FIS_REG_H2D pFIS = (PSATA_FIS_REG_H2D)pCmdTable->FIS;
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 old_stat, loop=5000;
+
+	mvDisableIntr(portMmio, old_stat);
+
+	MV_ZeroMemory(header, sizeof(MV_Command_Header));
+	MV_ZeroMemory(pCmdTable, sizeof(MV_Command_Table));
+	
+	header->FIS_Length = FIS_REG_H2D_SIZE_IN_DWORD;
+	header->PM_Port = control? 0xF : PMport;
+	
+	*((MV_U16 *) header) = CPU_TO_LE_16( *((MV_U16 *) header) );
+	header->Table_Address = CPU_TO_LE_32(pPort->Cmd_Table_DMA.low + SATA_CMD_TABLE_SIZE*tag);
+	header->Table_Address_High = CPU_TO_LE_32(pPort->Cmd_Table_DMA.high);//TBD
+	
+	pFIS->FIS_Type = SATA_FIS_TYPE_REG_H2D;
+	pFIS->PM_Port = control? 0xF : PMport;
+	pFIS->Command =  (read)?MV_ATA_COMMAND_PM_READ_REG : MV_ATA_COMMAND_PM_WRITE_REG;
+	pFIS->Features = PMreg;
+	pFIS->Device = PMport;
+	pFIS->C = 1;
+	
+	if (!read)
+	{
+		pFIS->LBA_Low =  (MV_U8)((regVal & 0xff00) >> 8);
+		pFIS->LBA_Mid = (MV_U8)((regVal & 0xff0000) >> 16);
+		pFIS->LBA_High = (MV_U8)((regVal & 0xff000000) >> 24) ;
+		pFIS->Sector_Count = (MV_U8)(regVal & 0xff);
+	}
+
+	MV_DASSERT( (MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)&(1<<tag))==0 );
+	
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD_ISSUE, 1<<tag);
+	MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE);	/* flush */
+	
+	//temp = MV_REG_READ_DWORD(portMmio, PORT_SCR_ERR);
+	//MV_REG_WRITE_DWORD(portMmio, PORT_SCR_ERR, temp);
+	//temp = MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT);
+	//MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_STAT, temp);
+	//MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+
+	// make sure CI is cleared before moving on
+	loop = 5000;
+	while(loop > 0) {
+		if( (MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)&(1<<tag)) == 0 )
+			break;
+		HBA_SleepMillisecond(pCore, 10);
+		loop--;
+	}
+
+	if ( (MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)&(1<<tag)) != 0 )
+		MV_DPRINT(("read/write PM register: CI not cleared!\n"));
+	
+	Tag_ReleaseOne(&pPort->Tag_Pool, tag);
+	mvEnableIntr(portMmio, old_stat);
+	
+}
+
+static MV_U8 mvGetSataDeviceType(PDomain_Port pPort)
+{
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 tmp;
+	
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_SIG);
+	
+   	 if(tmp == 0x96690101)
+   	 {
+   	 	MV_DPRINT(("Port Multiplier detected.\n"));
+       	 return PORT_TYPE_PM;
+   	 }
+    
+   	 return 0;
+}
+#endif	// support PM
+
+MV_BOOLEAN SATA_DoSoftReset(PDomain_Port pPort, MV_U8 PMPort)
+{
+	MV_U8 tag = Tag_GetOne(&pPort->Tag_Pool);
+	PMV_Command_Header header = SATA_GetCommandHeader(pPort, tag);
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+	PSATA_FIS_REG_H2D pFIS = (PSATA_FIS_REG_H2D)pCmdTable->FIS;
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 old_stat;
+	MV_U32 temp = 0, count = 0;
+	MV_U8 reset = 1;
+
+	if( PMPort == 0xF )
+		MV_DASSERT( tag == 0 );
+
+	mvDisableIntr(portMmio, old_stat);
+
+	do
+	{
+		MV_ZeroMemory(header, sizeof(MV_Command_Header));
+		MV_ZeroMemory(pCmdTable, sizeof(MV_Command_Table));
+	
+		header->FIS_Length = FIS_REG_H2D_SIZE_IN_DWORD;
+		header->Reset = (reset)?1:0;
+		header->PM_Port = PMPort;
+		
+		*((MV_U16 *) header) = CPU_TO_LE_16( *((MV_U16 *) header) );
+		header->Table_Address = CPU_TO_LE_32(pPort->Cmd_Table_DMA.low + SATA_CMD_TABLE_SIZE*tag);
+		header->Table_Address_High = CPU_TO_LE_32(pPort->Cmd_Table_DMA.high);//TBD
+	
+		pFIS->FIS_Type = SATA_FIS_TYPE_REG_H2D;
+		pFIS->PM_Port = PMPort;
+//		pFIS->Device = 0x40;
+		pFIS->Control = (reset)?MV_BIT(2):0;
+
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD_ISSUE, 1<<tag);
+		MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE);	/* flush */
+
+		HBA_SleepMillisecond(pCore, 2);
+
+		//temp = MV_REG_READ_DWORD(portMmio, PORT_SCR_ERR);
+		//MV_REG_WRITE_DWORD(portMmio, PORT_SCR_ERR, temp);
+		//temp = MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT);
+		//MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_STAT, temp);
+		//MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+
+		reset = reset ^ 1; /*SRST CLEAR*/
+
+		count = 0;
+		// make sure CI is cleared before moving on
+		do {
+			temp = MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE) & (1<<tag);
+			count++;
+			HBA_SleepMillisecond(pCore, 10);
+		} while (temp != 0 && count < 1000);
+
+	}while(reset==0);
+	
+	Tag_ReleaseOne(&pPort->Tag_Pool, tag);
+	mvEnableIntr(portMmio, old_stat);
+
+	if (temp != 0)
+	{
+		MV_DPRINT(("\nsoft reset: CI is not cleared!\n"));
+		return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+#ifdef SUPPORT_PM
+MV_BOOLEAN SATA_SoftResetDevice(PDomain_Port pPort, MV_U8 portNum)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+#ifndef _OS_BIOS
+	MV_U32 status1, status2, loop = 5000;
+#else
+	MV_U32 status1, status2, loop = 1000;
+#endif
+
+	if (! (SATA_DoSoftReset(pPort, portNum)) )
+		return MV_FALSE;
+
+	while(loop>0)
+	{
+		status1 = MV_REG_READ_DWORD(portMmio, PORT_SCR_STAT) & 0xf;
+		status2 = MV_REG_READ_DWORD(portMmio, PORT_TFDATA) & 0xff;
+		if (((status1 & 0xf) == 0x3) && ((status2 & 0x80) == 0))
+		{
+			MV_DPRINT(("loop = %x\n", loop));
+			pPort->Type = mvGetSataDeviceType( pPort );
+			return MV_TRUE;
+		}
+		#ifndef _OS_BIOS	
+		HBA_SleepMicrosecond(pCore, 1000);
+		#else
+		HBA_SleepMillisecond(pCore, 1);
+		#endif
+		loop--;
+	}
+	MV_DPRINT(("Did not detect device after soft reset\n"));
+	return MV_FALSE;
+}
+
+MV_BOOLEAN SATA_SoftResetPMDevice(PDomain_Port pPort, MV_U8 portNum) 
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+#ifndef _OS_BIOS
+	MV_U32 status, PMPort, loop = 5000;
+#else
+	MV_U32 status, PMPort, loop = 1000;
+#endif
+
+	MV_DUMPC32(0xCCCC5563);
+	
+	if (! (SATA_DoSoftReset(pPort, portNum)) )
+		return MV_FALSE;
+	MV_DUMPC32(0xCCCC5564);
+	
+	while(loop>0)
+	{
+		status = MV_REG_READ_DWORD(portMmio, PORT_TFDATA) & 0xff;
+		PMPort = (MV_REG_READ_DWORD(portMmio, PORT_TFDATA) & 0xf00000) >> 20;
+
+		if ( ((status & 0x80) == 0) && (PMPort == portNum) )
+		{
+			MV_DUMPC32(0xCCCC5565);
+			MV_DPRINT(("loop = %x\n", loop));
+			return MV_TRUE;
+		}
+		#ifndef _OS_BIOS	
+		HBA_SleepMicrosecond(pCore, 1000);
+		#else
+		HBA_SleepMillisecond(pCore, 1);
+		#endif
+		
+		loop--;
+	}
+	MV_DUMPC32(0xCCCC5566);
+	MV_DPRINT(("Did not detect device after soft reset\n"));
+	return MV_FALSE;
+}
+
+MV_BOOLEAN PMPortDeviceDetected(PDomain_Port pPort, MV_U8 portNum)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 read_result;
+	MV_U32 loop = 100;
+
+	while(loop>0)
+	{
+		/*Detect the sata device*/
+		mvPMDevReWrReg(pPort, MV_Read_Reg, MV_SATA_PSCR_SSTATUS_REG_NUM, 0, portNum, MV_TRUE);
+		read_result = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+
+		if( (read_result & 0xFFF) == 0x123 || (read_result & 0xFFF) == 0x113 )
+		{
+			MV_DPRINT(("the device detected on PM port %d, Port %d ", portNum, pPort->Id));
+
+			/* clears X bit in SError */
+			mvPMDevReWrReg( pPort, MV_Write_Reg, MV_SATA_PSCR_SERROR_REG_NUM, 0xFFFFFFFF, portNum, MV_TRUE);
+			return MV_TRUE;
+		}
+		HBA_SleepMillisecond(pCore, 1);
+		loop--;
+	}
+	return MV_FALSE;
+}
+#endif	// support PM
+
+void SATA_InitPMPort (PDomain_Port pPort, MV_U8 portNum)
+{
+	PDomain_Device pDevice = &pPort->Device[portNum];
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 signature, tmp;
+#ifdef SUPPORT_ERROR_HANDLING
+	#ifdef RAID_DRIVER
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	#else
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+#endif
+	if( PMPortDeviceDetected(pPort, portNum) ) 
+	{
+		if ( SATA_SoftResetPMDevice(pPort, portNum) )
+		{
+			signature = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_PM_FIS_0);
+
+			if ( signature==0xEB140101 )				/* ATAPI signature */	
+				pDevice->Device_Type |= DEVICE_TYPE_ATAPI;			
+			else
+				MV_DASSERT( signature==0x00000101 );	/* ATA signature */
+				
+			pDevice->Internal_Req = GetInternalReqFromPool(pCore);
+			if( pDevice->Internal_Req == NULL )
+			{
+				MV_DPRINT(("ERROR: Unable to get an internal request buffer\n"));
+				// can't initialize without internal buffer - just set this disk down
+				pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+				pDevice->State = DEVICE_STATE_INIT_DONE;
+			}
+			else 
+			{
+				pDevice->Status = DEVICE_STATUS_EXISTING|DEVICE_STATUS_FUNCTIONAL;
+				pDevice->State = DEVICE_STATE_RESET_DONE;
+				pDevice->PM_Number = portNum;
+				pPort->Device_Number++;
+			}
+		}
+		else 
+		{
+			MV_DPRINT(("soft reset failed on PM port %d\n", portNum));
+
+#ifdef SUPPORT_ERROR_HANDLING
+			if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+			{
+				pCore->Total_Device_Count--;
+				ReleaseInternalReqToPool( pCore, pDevice->Internal_Req );
+				pDevice->Internal_Req = NULL;
+
+				#ifdef RAID_DRIVER
+				RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL, (MV_PVOID)(&pDevice->Id));
+				#else
+#ifdef _OS_LINUX 
+				HBA_ModuleNotification(pUpperLayer, 
+						       EVENT_DEVICE_REMOVAL, 
+						       pDevice->Id);
+#else /* _OS_LINUX */
+				HBA_ModuleNotification(pUpperLayer, 
+						       EVENT_DEVICE_REMOVAL, 
+						       (MV_PVOID)&pDevice->Id);
+#endif /* _OS_LINUX */
+				#endif
+			}
+#endif
+			pDevice->Status = DEVICE_STATUS_EXISTING;
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+			pDevice->Need_Notify = MV_FALSE;
+			
+			/* toggle the start bit in cmd register */
+			tmp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+			MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp & ~MV_BIT(0));
+			MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp | MV_BIT(0));
+			HBA_SleepMillisecond( pCore, 100 );
+		}
+	}
+	else
+		pDevice->Need_Notify = MV_FALSE;
+}
+
+void SATA_InitPM (PDomain_Port pPort)
+{
+	PDomain_Device pDevice;
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 numPMPorts, temp;
+	MV_U8 i;
+
+	pPort->Setting |= PORT_SETTING_PM_EXISTING;
+	pPort->Setting |= PORT_SETTING_PM_FUNCTIONAL;
+	
+	/* fill in various information about the PM */
+
+	/* check how many ports the PM has */
+	mvPMDevReWrReg(pPort, MV_Read_Reg, MV_SATA_GSCR_INFO_REG_NUM, 0, 0xF, MV_TRUE);
+	numPMPorts = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 ) & 0xF;		
+	if ( numPMPorts > MAX_DEVICE_PER_PORT )
+		numPMPorts = MAX_DEVICE_PER_PORT;
+	else if ( numPMPorts < MAX_DEVICE_PER_PORT )
+	{
+		for( i=(MV_U8)numPMPorts; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pPort->Device[i].Status = DEVICE_STATUS_NO_DEVICE;
+			pPort->Device[i].State = DEVICE_STATE_INIT_DONE;
+		}
+	}
+	pPort->PM_Num_Ports = (MV_U8)numPMPorts;
+
+	/* vendor ID & device ID */
+	mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_GSCR_ID_REG_NUM, 0, 0xF, MV_TRUE );
+	temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+	pPort->PM_Vendor_Id = (MV_U16)temp;
+	pPort->PM_Device_Id = (MV_U16)(temp >> 16);
+
+	/* product & spec revisions */
+	mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_GSCR_REVISION_REG_NUM, 0, 0xF, MV_TRUE );
+	temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+	pPort->PM_Product_Revision = (MV_U8)((temp & 0xFF00) >> 8);
+	if ( temp & MV_BIT(3) )
+		pPort->PM_Spec_Revision = 12;
+	else if ( temp & MV_BIT(2) )
+		pPort->PM_Spec_Revision = 11;
+	else if ( temp & MV_BIT(1) )
+		pPort->PM_Spec_Revision = 10;
+	else
+		pPort->PM_Spec_Revision = 0;
+
+	/* enable asychronous notification bit for hot plug */
+	mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_GSCR_FEATURES_ENABLE_REG_NUM, 0, 0xF, MV_TRUE );
+	temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+	mvPMDevReWrReg( pPort, MV_Write_Reg, MV_SATA_GSCR_FEATURES_ENABLE_REG_NUM, 
+					temp | MV_BIT(3), 0xF, MV_TRUE );
+
+	/* enable N & X bit in SError for hot plug */
+	mvPMDevReWrReg( pPort, MV_Read_Reg, MV_SATA_GSCR_ERROR_ENABLE_REG_NUM, 0, 0xF, MV_TRUE );
+	temp = MV_REG_READ_DWORD( portMmio, PORT_PM_FIS_0 );
+	mvPMDevReWrReg( pPort, MV_Write_Reg, MV_SATA_GSCR_ERROR_ENABLE_REG_NUM, 
+					MV_BIT(16) | MV_BIT(26), 0xF, MV_TRUE );
+
+	for( i=0; i<numPMPorts; i++ )
+	{
+		pDevice = &pPort->Device[i];
+		pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+		pDevice->State = DEVICE_STATE_INIT_DONE;
+
+		/*enable the device port*/
+		mvPMDevReWrReg(pPort, MV_Write_Reg, MV_SATA_PSCR_SCONTROL_REG_NUM, 0x01, i, MV_TRUE);	
+		HBA_SleepMillisecond(pCore, 1);
+		mvPMDevReWrReg(pPort, MV_Write_Reg, MV_SATA_PSCR_SCONTROL_REG_NUM, 0x00, i, MV_TRUE);
+		HBA_SleepMillisecond(pCore, 1);
+	
+		SATA_InitPMPort( pPort, i );
+	}
+
+	/* Wait for each port to finish setting flags before starting state machine*/
+	for( i=0; i<numPMPorts; i++ )
+	{
+		pDevice = &pPort->Device[i];
+		if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+			mvDeviceStateMachine( pCore, pDevice );
+	}
+
+	if( pPort->Device_Number == 0 )
+		mvDeviceStateMachine( pCore, &pPort->Device[0] );
+}
+
+MV_BOOLEAN SATA_PortSoftReset( PCore_Driver_Extension pCore, PDomain_Port pPort )
+{
+	PDomain_Device pDevice = &pPort->Device[0];
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 tmp;
+	MV_U8 i;
+
+	if (! (SATA_SoftResetDevice(pPort, 0xF)) )
+	{
+		/* toggle the start bit in cmd register */
+		tmp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+		MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp & ~MV_BIT(0));
+		MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp | MV_BIT(0));
+		HBA_SleepMillisecond( pCore, 100 );
+
+		if( (pPort->Type != PORT_TYPE_PM) && (pDevice->Status & DEVICE_STATUS_FUNCTIONAL) )
+			SATA_PortReportNoDevice( pCore, pPort );		
+
+		/* had trouble detecting device on this port, so we report existing
+		   but not functional */
+		pDevice->Status = DEVICE_STATUS_EXISTING;
+		pDevice->State = DEVICE_STATE_INIT_DONE;
+
+		/* set the rest of the device on this port */
+		for (i=1; i<MAX_DEVICE_PER_PORT; i++)
+		{	
+			pDevice = &pPort->Device[i];
+			pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+		}
+				
+		#ifndef _OS_BIOS
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+		return MV_FALSE;
+	}
+
+	/* toggle the start bit in cmd register to make sure hardware
+	   is clean after soft reset */
+	tmp = MV_REG_READ_DWORD( portMmio, PORT_CMD );
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp & ~MV_BIT(0));
+	MV_REG_WRITE_DWORD( portMmio, PORT_CMD, tmp | MV_BIT(0));
+	HBA_SleepMillisecond( pCore, 100 );
+	
+	return MV_TRUE;
+}
+
+void SATA_PortReportNoDevice (PCore_Driver_Extension pCore, PDomain_Port pPort)
+{
+	PDomain_Device pDevice;
+	MV_U8 temp, i;
+	#ifdef RAID_DRIVER
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	#else
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+
+	mvRemovePortWaitingList( pCore, pPort->Id );
+
+	/* if PM - clear all the device attached to the port */
+	if( pPort->Type == PORT_TYPE_PM )
+		temp = MAX_DEVICE_PER_PORT-1;
+	else
+		temp = 0;
+		
+	for( i=0; i<=temp; i++ )
+	{
+		pDevice = &pPort->Device[i];
+
+		if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+		{
+			if( pDevice->Internal_Req != NULL )
+			{
+				pCore->Total_Device_Count--;
+				ReleaseInternalReqToPool( pCore, pDevice->Internal_Req );
+				pDevice->Internal_Req = NULL;
+			}
+
+#ifdef RAID_DRIVER
+			RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL, (MV_PVOID)(&pDevice->Id));
+#else
+#ifdef _OS_LINUX
+			HBA_ModuleNotification(pUpperLayer, 
+					       EVENT_DEVICE_REMOVAL, 
+					       pDevice->Id);
+#else /* _OS_LINUX */
+			HBA_ModuleNotification(pUpperLayer, 
+					       EVENT_DEVICE_REMOVAL, 
+					       (MV_PVOID) &pDevice->Id);
+#endif /* _OS_LINUX */
+#endif /* RAID_DRIVER */
+			pPort->Device_Number--;
+		}
+		
+		pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+		pDevice->State = DEVICE_STATE_INIT_DONE;
+	}
+}
+
+void SATA_PortReset(
+	PDomain_Port pPort,
+	MV_BOOLEAN hardReset
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+
+	PDomain_Device pDevice = &pPort->Device[0];
+	MV_U32 signature;
+	//MV_U32 SControl, SStatus;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	//MV_U32 numPMPorts = 0;
+	MV_U32 tmp, old_stat;
+	MV_U8 i;
+	MV_U8 skip = MV_FALSE;
+
+	/* No running commands at this moment */
+	MV_ASSERT( pPort->Running_Slot==0 );
+	MV_ASSERT( pPort->Port_State==PORT_STATE_IDLE );
+
+	pPort->Device_Number = 0;
+
+	MV_DPRINT(("Enter SATA_PortReset.\n"));
+	/* If we already reached the max number of devices supported,
+	   disregard the rest */
+	if( pCore->Total_Device_Count >= MAX_DEVICE_SUPPORTED )
+	{
+		for( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pPort->Device[i].State = DEVICE_STATE_INIT_DONE;
+			pPort->Device[i].Status = DEVICE_STATUS_NO_DEVICE;
+		}
+		MV_DPRINT(("We have too many devices %d.", pCore->Total_Device_Count));
+		return;
+	}
+
+	if ( hardReset )
+	{
+		//TBD:
+		MV_ASSERT(MV_FALSE);
+	}
+
+#ifdef FORCE_1_5_G
+	/* It'll trigger OOB. Looks like PATA hardware reset. 
+	 * Downgrade 3G to 1.5G
+	 * If Port Multiplier is attached, only the PM is downgraded. */
+	{
+		//TBD: Not tested
+		SStatus = MV_REG_READ_DWORD(portMmio, PORT_SCR_STAT);
+		if ( (SStatus&0xF0)==0x20 )
+		{
+			/* 3G */
+			SControl = MV_REG_READ_DWORD(portMmio, PORT_SCR_CTL);
+			SControl &= ~0x000000FF;
+			SControl |= 0x11;
+			MV_REG_WRITE_DWORD(portMmio, PORT_SCR_CTL, SControl);
+            HBA_SleepMillisecond(pCore, 2);
+			SControl &= ~0x000000FF;
+			SControl |= 0x10;
+			MV_REG_WRITE_DWORD(portMmio, PORT_SCR_CTL, SControl);
+			HBA_SleepMillisecond(pCore, 2);
+		}
+	}
+#endif
+
+	if( !SATA_PortDeviceDetected(pPort) )
+	{
+#if defined(SUPPORT_ERROR_HANDLING)
+		if( pPort->Setting & PORT_SETTING_PM_FUNCTIONAL )
+		{
+			pPort->Setting &= ~PORT_SETTING_PM_FUNCTIONAL;
+			pPort->Setting &= ~PORT_SETTING_PM_EXISTING;
+			MV_DPRINT(("PM on port %d is gone\n", pPort->Id));
+			SATA_PortReportNoDevice( pCore, pPort );
+		}
+		else if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL ) 
+		{
+			MV_DPRINT(("device on port %d is gone\n", pPort->Id));
+			SATA_PortReportNoDevice( pCore, pPort );
+		}
+#endif
+
+		// fixed: have to set each device individually - or hot plug will have problem
+		for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+		{	
+			pDevice = &pPort->Device[i];
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+		}
+				
+		#ifndef _OS_BIOS
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+		return;
+	}
+
+	if( !SATA_PortDeviceReady(pPort) )
+	{	
+		MV_DUMPC32(0xCCCCBB83);
+#if defined(SUPPORT_ERROR_HANDLING)
+		if( pPort->Setting & PORT_SETTING_PM_FUNCTIONAL ) 
+		{
+			pPort->Setting &= ~PORT_SETTING_PM_FUNCTIONAL;
+			MV_DPRINT(("PM on port %d is non-functional\n", pPort->Id));
+			SATA_PortReportNoDevice( pCore, pPort );
+		}
+		else if( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+		{
+			MV_DPRINT(("device on port %d is non-functional\n", pPort->Id));
+			SATA_PortReportNoDevice( pCore, pPort );
+			pDevice->Status = DEVICE_STATUS_EXISTING;		
+		}
+#endif
+		for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+		{	
+			pDevice = &pPort->Device[i];
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+		}
+				
+		#ifndef _OS_BIOS
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+		return;
+	}
+
+
+#ifdef SUPPORT_PM
+	/* link error work around */
+	mvDisableIntr( portMmio, old_stat );
+	MV_REG_WRITE_DWORD( pPort->Mmio_Base, PORT_VSR_ADDR, 0x5 );
+	tmp = MV_REG_READ_DWORD( pPort->Mmio_Base, PORT_VSR_DATA );
+	MV_REG_WRITE_DWORD( pPort->Mmio_Base, PORT_VSR_DATA, tmp | MV_BIT(26));
+	HBA_SleepMillisecond( pCore, 1 );
+	mvEnableIntr( portMmio, old_stat );
+
+	if ( (pCore->State!=CORE_STATE_STARTED) &&
+		 (pCore->Flag_Fastboot_Skip & FLAG_SKIP_PM) )
+		 skip = MV_TRUE;
+
+	if(!skip) /*Not skip in running time*/
+	{
+		MV_DPRINT(("SATA_PortReset\n"));
+
+		/* Always turn the PM bit on - otherwise won't work! */
+		tmp = MV_REG_READ_DWORD(portMmio, PORT_CMD);					
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD, tmp | MV_BIT(17));
+		tmp=MV_REG_READ_DWORD(portMmio, PORT_CMD);	/* flush */
+
+		HBA_SleepMillisecond(pCore, 200);
+
+		if (! (SATA_PortSoftReset( pCore, pPort )) )
+		{
+#if defined(SUPPORT_ERROR_HANDLING) || defined(_OS_LINUX)
+			if( pPort->Setting & PORT_SETTING_PM_FUNCTIONAL ) 
+			{
+				pPort->Setting &= ~PORT_SETTING_PM_FUNCTIONAL;
+				SATA_PortReportNoDevice( pCore, pPort );
+			}
+#endif
+			return;
+		}
+	} else {
+		MV_DPRINT(("SATA_PortReset is skipped.\n"));
+	}
+
+	if( pPort->Type == PORT_TYPE_PM ) 
+	{
+		SATA_InitPM( pPort );
+	} 
+	else
+#endif	
+	{
+
+#ifdef SUPPORT_PM
+	/* not a PM - turn off the PM bit in command register */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_CMD);					
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD, tmp & (~MV_BIT(17)));
+	tmp=MV_REG_READ_DWORD(portMmio, PORT_CMD);	/* flush */
+#endif
+
+	signature = MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_SIG);
+	MV_DUMPC32(0xCCCCBB84);
+	MV_DUMPC32(signature);
+
+	if ( signature==0xEB140101 )				/* ATAPI signature */
+	{		
+		pDevice->Device_Type |= DEVICE_TYPE_ATAPI;
+	}
+	else
+	{
+		MV_DASSERT( signature==0x00000101 );	/* ATA signature */
+	}
+
+	/* Device is ready */
+	pDevice->Internal_Req = GetInternalReqFromPool(pCore);
+	if( pDevice->Internal_Req == NULL )
+	{
+		MV_DPRINT(("ERROR: Unable to get an internal request buffer\n"));
+		// can't initialize without internal buffer - just set this disk down
+		pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+		pDevice->State = DEVICE_STATE_INIT_DONE;
+	}
+	else 
+	{
+		pDevice->Status = DEVICE_STATUS_EXISTING|DEVICE_STATUS_FUNCTIONAL;
+		pDevice->State = DEVICE_STATE_RESET_DONE;
+		pPort->Device_Number = 1;	/* We have one device here. */
+	}
+
+	/* set the rest of the devices on this port */
+	for ( i=1; i<MAX_DEVICE_PER_PORT; i++ )
+	{
+		pDevice = &pPort->Device[i];
+		pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+		pDevice->State = DEVICE_STATE_INIT_DONE;
+	}
+	
+	mvDeviceStateMachine(pCore, &pPort->Device[0]);
+
+	}
+}
+
+MV_VOID PATA_MakeControllerCommandBlock(
+	MV_PU16 pControllerCmd,
+	MV_U8 address, 
+	MV_U8 data, 
+	MV_BOOLEAN master, 
+	MV_BOOLEAN write
+	)
+{
+	*pControllerCmd = 0L;
+
+	if ( write )
+		*((MV_PU8)pControllerCmd) = data;
+	*pControllerCmd |= (address<<8);
+	if ( !master )
+		*pControllerCmd |= MV_BIT(13);
+	if ( !write )
+		*pControllerCmd |= MV_BIT(14);
+}
+
+/* Poll the ATA register using enhanced mode. Exp register and Data is not included. */
+MV_BOOLEAN PATA_PollControllerCommand(
+	PDomain_Port pPort, 
+	MV_U8 slot,
+	MV_U8 registerAddress,
+	MV_U8 registerData,
+	MV_BOOLEAN master,
+	MV_BOOLEAN write,
+	PATA_TaskFile pTaskFile
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_LPVOID port_mmio = pPort->Mmio_Base;
+	/* Cannot use Is_Slave to judge whether it's a master device. The flag may be not ready yet. */
+
+	PMV_PATA_Command_Header header = PATA_GetCommandHeader(pPort, slot);
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, slot);
+	MV_PU16 pCmdTableU16 = (MV_PU16)pCmdTable;
+	MV_U32 loop = 1000, i;
+	MV_U32 temp = 0;
+
+	/* Always use the first slot */
+	MV_ASSERT( (pPort->Running_Slot&(1L<<slot))==0 );
+	MV_ZeroMemory( pTaskFile, sizeof(ATA_TaskFile) );
+	MV_ZeroMemory(header, sizeof(MV_PATA_Command_Header));
+
+	/* Command list */
+	header->Controller_Command = 1;
+	header->PIO_Sector_Count = 1;	/* How many command */
+
+	if ( !master )
+		header->Is_Slave = 1;
+
+	header->Table_Address = pPort->Cmd_Table_DMA.low;
+	header->Table_Address_High = pPort->Cmd_Table_DMA.high;
+
+	PATA_MakeControllerCommandBlock(pCmdTableU16++, registerAddress, registerData, master, write);
+	
+	MV_REG_WRITE_DWORD(port_mmio, PORT_CMD_ISSUE, MV_BIT(0));
+	MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);	/* flush */
+
+	/* Loop command issue to check whether it's finished. Hardware won't trigger interrupt. */
+	while ( loop>0 )
+	{
+		/* check interrupt */
+		temp = MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE);
+
+		if ( temp ==0 )	/* It's done. */
+		{
+			/* Anyway it's still better to clear the interrupt. */
+			temp = MV_REG_READ_DWORD(port_mmio, PORT_IRQ_STAT);
+			MV_REG_WRITE_DWORD(port_mmio, PORT_IRQ_STAT, temp);
+			MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, (1L<<pPort->Id));
+
+			if ( master )
+			{
+				for (i=0; i<1000; i++)
+				{
+					temp = MV_REG_READ_DWORD(port_mmio, PORT_MASTER_TF0);
+					if(temp == 0x7F) break;
+					if((temp & 0x80) == 0) break;
+					HBA_SleepMillisecond(pCore, 1);
+				}
+
+				pTaskFile->Command = (MV_U8)temp;
+				pTaskFile->Device = (MV_U8)(temp>>8);
+				pTaskFile->Features = (MV_U8)(temp>>24);
+				temp = MV_REG_READ_DWORD(port_mmio, PORT_MASTER_TF1);
+				pTaskFile->LBA_Low = (MV_U8)(temp>>8);
+				pTaskFile->Sector_Count = (MV_U8)(temp>>24);
+				temp = MV_REG_READ_DWORD(port_mmio, PORT_MASTER_TF2);
+				pTaskFile->LBA_High = (MV_U8)(temp>>8);
+				pTaskFile->LBA_Mid = (MV_U8)(temp>>24);
+			}
+			else
+			{
+				for (i=0; i<1000; i++)
+				{
+					temp = MV_REG_READ_DWORD(port_mmio, PORT_SLAVE_TF0);
+					if(temp == 0x7F) break;
+					if((temp & 0x80) == 0) break;
+					HBA_SleepMillisecond(pCore, 1);
+				}
+
+				pTaskFile->Command = (MV_U8)temp;
+				pTaskFile->Device = (MV_U8)(temp>>8);
+				pTaskFile->Features = (MV_U8)(temp>>24);
+				temp = MV_REG_READ_DWORD(port_mmio, PORT_SLAVE_TF1);
+				pTaskFile->LBA_Low = (MV_U8)(temp>>8);
+				pTaskFile->Sector_Count = (MV_U8)(temp>>24);
+				temp = MV_REG_READ_DWORD(port_mmio, PORT_SLAVE_TF2);
+				pTaskFile->LBA_High = (MV_U8)(temp>>8);
+				pTaskFile->LBA_Mid = (MV_U8)(temp>>24);
+			}
+			return MV_TRUE;
+		}
+		loop--;
+		HBA_SleepMillisecond(pCore, 1);
+	}
+
+	/* If this command is not completed and hardware is not cleared, we'll have trouble. */
+	MV_DASSERT( MV_REG_READ_DWORD(port_mmio, PORT_CMD_ISSUE)==0 );
+
+	return MV_FALSE;
+}
+
+MV_BOOLEAN PATA_PortDeviceWaitForBusy(
+	PDomain_Port pPort, 
+	MV_BOOLEAN master
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	ATA_TaskFile taskFile;
+	MV_U32 retry = 5000;	/* Totally 5 seconds */
+	do {
+		if ( master ) 
+			PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xA0, master, MV_TRUE, &taskFile);
+		else
+			PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xB0, master, MV_TRUE, &taskFile);
+		HBA_SleepMillisecond(pCore, 1);
+		retry--;
+	} while ( (taskFile.Command&MV_BIT(7)) && (retry>0) );
+
+#if 1
+	if ( taskFile.Command&MV_BIT(7) )
+	{
+		MV_DPRINT(("Port %d %s is busy retry=%d.\n", 
+			pPort->Id, master?"master":"slave", (5000-retry)));
+	}
+	else
+	{
+		MV_DPRINT(("Port %d %s is not busy retry=%d.\n", 
+			pPort->Id, master?"master":"slave", (5000-retry)));
+	}
+#endif
+
+	return ( !(taskFile.Command&MV_BIT(7)) );
+}
+
+MV_BOOLEAN PATA_PortDeviceDetected(PDomain_Port pPort, MV_BOOLEAN master, MV_BOOLEAN * isATAPI)
+{
+	ATA_TaskFile taskFile;
+
+	if ( master ) 
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xA0, master, MV_TRUE, &taskFile);
+	else
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xB0, master, MV_TRUE, &taskFile);
+
+#if 0
+	MV_DPRINT(("PATA_PortDeviceDetected: Sector_Count=0x%x, LBA_Low=0x%x, LBA_Mid=0x%x, LBA_High=0x%x.\n",
+		taskFile.Sector_Count, taskFile.LBA_Low, taskFile.LBA_Mid, taskFile.LBA_High));
+#endif
+
+	if ( //(taskFile.Sector_Count==0x01) && 
+		(taskFile.LBA_Low==0x01)
+		&& (taskFile.LBA_Mid==0x14)
+		&& (taskFile.LBA_High==0xEB) )
+	{
+		/* ATAPI signature found */
+		*isATAPI = MV_TRUE;
+		return MV_TRUE;
+	}
+
+	if ( (taskFile.Sector_Count==0x01)
+		&& (taskFile.LBA_Low==0x01)
+		&& (taskFile.LBA_Mid==0x00)
+		&& (taskFile.LBA_High==0x00) )
+	{
+		
+		// if status is 0, conclude that drive is not present
+		if ( taskFile.Command == 0)
+			return MV_FALSE;
+
+		/* ATA signature found */
+		*isATAPI = MV_FALSE;
+		return MV_TRUE;
+	}
+	
+	return MV_FALSE;
+}
+
+MV_BOOLEAN PATA_PortDeviceReady(PDomain_Port pPort, MV_BOOLEAN master, MV_BOOLEAN * isATAPI)
+{
+	ATA_TaskFile taskFile;
+
+	if ( master ) 
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xA0, master, MV_TRUE, &taskFile);
+	else
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xB0, master, MV_TRUE, &taskFile);
+	
+#if 0
+	MV_DPRINT(("PATA_PortDeviceReady: Sector_Count=0x%x, LBA_Low=0x%x, LBA_Mid=0x%x, LBA_High=0x%x.\n",
+		taskFile.Sector_Count, taskFile.LBA_Low, taskFile.LBA_Mid, taskFile.LBA_High));
+#endif
+
+	if ( /* (taskFile.Sector_Count==0x01) && */
+		(taskFile.LBA_Low==0x01)
+		&& (taskFile.LBA_Mid==0x14)
+		&& (taskFile.LBA_High==0xEB) )
+	{
+		/* ATAPI device */
+		*isATAPI = MV_TRUE;
+		//MV_DUMPC32(0xCCCCDDDF);
+		//MV_HALTKEY
+
+		return MV_TRUE;	/* ATAPI is always ready. */
+	}
+
+	if ( (taskFile.Sector_Count==0x01)
+		&& (taskFile.LBA_Low==0x01)
+		&& (taskFile.LBA_Mid==0x00)
+		&& (taskFile.LBA_High==0x00) )
+	{
+		/* ATA device */
+		*isATAPI = MV_FALSE;
+		if ( (taskFile.Command&0x50)==0x50 )
+			return MV_TRUE;
+		else
+            return MV_FALSE;
+
+		//MV_DUMPC32(0xCCCCDDDE);
+		//MV_HALTKEY
+	}
+
+	return MV_FALSE;
+}
+
+void PATA_PortReset(
+	PDomain_Port pPort,
+	MV_BOOLEAN hardReset
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PDomain_Device pDevice = NULL;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_BOOLEAN temp, isMaster;
+	ATA_TaskFile taskFile;
+	MV_U8 i;
+	MV_U32 registerValue;
+	MV_U32 retry;
+	MV_U8  skip = MV_FALSE;
+	MV_BOOLEAN working[2];	/* Check whether the master/slave device is functional. */
+	MV_BOOLEAN isATAPI[2];	/* Check whether it's ATAPI device. */
+	MV_BOOLEAN unplug[2];
+#ifdef SUPPORT_ERROR_HANDLING	
+#ifdef RAID_DRIVER
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+#else
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+#endif /* RAID_DRIVER */
+#endif /* SUPPORT_ERROR_HANDLING */
+	MV_DUMPC32(0xCCCCBB91);	
+	/* No running commands at this moment */
+	MV_ASSERT( pPort->Running_Slot==0 );
+	MV_ASSERT( pPort->Port_State==PORT_STATE_IDLE );
+
+#ifdef MV_DEBUG
+	{
+		MV_U8 i;
+		for ( i=0; i<MAX_SLOT_NUMBER; i++ )
+		{
+			MV_DASSERT(pPort->Running_Req[i]==NULL);
+		}
+	}
+#endif
+		
+	/* If we already reached the max number of devices supported,
+	   disregard the rest */
+	if( pCore->Total_Device_Count >= MAX_DEVICE_SUPPORTED )
+	{
+		for( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pPort->Device[i].State = DEVICE_STATE_INIT_DONE;
+			pPort->Device[i].Status = DEVICE_STATUS_NO_DEVICE;
+		}
+		return;
+	}
+
+	unplug[0]=MV_FALSE;
+	unplug[1]=MV_FALSE;
+	pPort->Device_Number = 0;
+	/*
+	 * For PATA device, reset signal is shared between master and slave.
+	 * So both hard reset and soft reset are port based, not device based.
+	 */
+	if ( hardReset )
+	{
+#if 1
+		registerValue = MV_REG_READ_DWORD(portMmio, PORT_CMD);
+		MV_ASSERT( !(registerValue&PORT_CMD_PATA_HARD_RESET) );
+
+		registerValue |= PORT_CMD_PATA_HARD_RESET;
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD, registerValue);
+
+		do {
+			registerValue = MV_REG_READ_DWORD(portMmio, PORT_CMD);
+		} while ( registerValue&PORT_CMD_PATA_HARD_RESET );
+
+		HBA_SleepMillisecond(pCore, 2);
+		MV_DASSERT( MV_REG_READ_DWORD(portMmio, PORT_CMD_ISSUE)==0 );
+#endif
+	}
+	MV_DUMPC32(0xCCCCBB92);	
+
+	if ( (pCore->State!=CORE_STATE_STARTED) &&
+		 (pCore->Flag_Fastboot_Skip & FLAG_SKIP_PATA_DEVICE) )
+		 skip = MV_TRUE;
+
+	if (skip)
+	{
+		for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+		{
+			pDevice = &pPort->Device[i];
+			pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+		}
+	}
+	else
+	{
+#if 1
+		/* Do soft reset. Soft reset is port based, not device based. */
+		pDevice = &pPort->Device[0];
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE_CONTROL, MV_BIT(2), MV_TRUE, MV_TRUE, &taskFile);
+		HBA_SleepMicrosecond(pCore, 10);	/* At least 5 microseconds. */
+
+		pDevice = &pPort->Device[0];
+		PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE_CONTROL, 0, MV_TRUE, MV_TRUE, &taskFile);
+		HBA_SleepMillisecond(pCore, 5);		/* At least 2 millisecond. */
+#endif
+
+		isMaster = MV_TRUE;
+
+		for ( i=2; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pDevice = &pPort->Device[i];
+			pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+		}
+
+		/* 
+		 * Check master and slave devices. Master is at device[0], Slave is at device [1].
+		 */
+		/* Slave/Master device. Detect first. After it's totally done, we can send request to the devices. */
+		for ( i=0; i<2; i++ )
+		{
+			pDevice = NULL;/* Shouldn't use pDevice here. */
+
+			/* Wait for busy after the reset */
+			temp = PATA_PortDeviceWaitForBusy(pPort, isMaster);
+
+			/* 
+			* Suppose after waiting for 5 seconds for the BSY signal, we only need check the signature once.
+			* But I found one ATAPI device BSY is clear right away.
+			* But the first time we read the signature, it's all 0x7F. 
+			* Only after a while, it will return the correct value.
+			*/
+			if ( temp )
+			{
+		#if 0	
+				retry = 2000;	//TBD: Kind of too long. 10 millisecond is not enough.
+		#else
+				retry = 20;
+		#endif
+		
+				do {
+					temp = PATA_PortDeviceDetected(pPort, isMaster, &isATAPI[i]);
+					temp &= PATA_PortDeviceReady(pPort, isMaster, &isATAPI[i]);
+					retry--;
+					HBA_SleepMillisecond(pCore, 1);
+				} while ( (retry>0)&&(!temp) );
+
+				if ( !temp )
+				{
+					if ( isMaster ) 
+						PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xA0, isMaster, MV_TRUE, &taskFile);
+					else
+						PATA_PollControllerCommand(pPort, 0, ATA_REGISTER_DEVICE, 0xB0, isMaster, MV_TRUE, &taskFile);
+
+					MV_DPRINT(("PATA task file: Sector_Count=0x%x, LBA_Low=0x%x, LBA_Mid=0x%x, LBA_High=0x%x retry=%d.\n",
+						taskFile.Sector_Count, taskFile.LBA_Low, taskFile.LBA_Mid, taskFile.LBA_High, (20-retry)));
+				}
+				else
+				{
+					MV_DPRINT(("PATA is detected and ready after retry %d.\n", (20-retry)));
+				}
+			}
+
+			working[i] = temp;
+			isMaster = MV_FALSE;
+
+			pDevice = &pPort->Device[i];
+			if ( isATAPI[i] ) 
+				pDevice->Device_Type |= DEVICE_TYPE_ATAPI;
+			else
+				pDevice->Device_Type &= ~DEVICE_TYPE_ATAPI;
+			pDevice->Is_Slave = (i==0)?MV_FALSE:MV_TRUE;
+
+			/* 
+			 * If the device has been reset for too many times, 
+			 * just set down this disk. It's better to set 
+			 * MEDIA ERROR to the timeout request. 
+			 */
+			if ( pDevice->Reset_Count>CORE_MAX_RESET_COUNT )
+				working[i] = MV_FALSE;
+
+			if ( !working[i] ) 
+			{
+				if ( pDevice->Status&DEVICE_STATUS_FUNCTIONAL )
+				{
+					pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+#ifndef _OS_BIOS
+					MV_DPRINT(("Port %d %s is gone.\n", pPort->Id, pDevice->Is_Slave?"slave":"master"));
+#endif
+					unplug[i] = MV_TRUE;
+				}
+				else
+				{
+					pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+			#ifndef _OS_BIOS
+					MV_DPRINT(("Port %d %s not ready.\n", pPort->Id, pDevice->Is_Slave?"slave":"master"));
+			#endif
+				}
+				pDevice->State = DEVICE_STATE_INIT_DONE;
+			}
+			else
+			{
+				//MV_DUMPC32(0xCCCCDDFF);
+			#ifndef _OS_BIOS
+				MV_DPRINT(("Port %d %s ready.\n", pPort->Id, pDevice->Is_Slave?"slave":"master"));
+			#endif
+				
+				pDevice->Internal_Req = GetInternalReqFromPool(pCore);
+				if( pDevice->Internal_Req == NULL )
+				{
+					MV_DPRINT(("ERROR: Unable to get an internal request buffer\n"));
+					// can't initialize without internal buffer - just set this disk down
+					pDevice->Status = DEVICE_STATUS_NO_DEVICE;
+					pDevice->State = DEVICE_STATE_INIT_DONE;
+				}
+				else 
+				{
+					pDevice->Status = DEVICE_STATUS_EXISTING|DEVICE_STATUS_FUNCTIONAL;
+					pPort->Device_Number++;
+				}
+			}
+		}
+		//MV_DUMPC32(0xCCCCBB93);	
+
+		/* Set Device State for all devices first */
+		for ( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pDevice = &pPort->Device[i];
+			if ( pDevice->Status & DEVICE_STATUS_FUNCTIONAL )
+			{
+				pDevice->State = DEVICE_STATE_RESET_DONE;
+				/* Don't start mvDeviceStateMachine now. 
+				 * It may trigger other devices to send DMA request before resetting is done. */
+			}
+		}
+
+		/* After all the flags are set, we can do some related to the state machine and waiting list. */
+		for ( i=0; i<2; i++ )
+		{
+			pDevice = &pPort->Device[i];
+			if ( unplug[i] ) 
+			{
+				pCore->Total_Device_Count--;
+				ReleaseInternalReqToPool( pCore, pDevice->Internal_Req );
+				pDevice->Internal_Req = NULL;
+
+				mvRemoveDeviceWaitingList( pCore, pDevice->Id, MV_TRUE );
+
+			#ifdef SUPPORT_ERROR_HANDLING	
+				#ifdef RAID_DRIVER
+					RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL, (MV_PVOID)(&pDevice->Id));
+				#else
+#ifdef _OS_LINUX
+					HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL, pDevice->Id);
+#endif /* _OS_LINUX */
+#ifdef _OS_WINDOWS
+					HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_REMOVAL,(MV_PVOID)(&pDevice->Id));
+#endif /* _OS_WINDOWS */
+				#endif
+			#endif
+			} 
+		}
+
+		/* Then run the status machine.*/
+		for ( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+		{
+			pDevice = &pPort->Device[i];
+			if ( pDevice->Status & DEVICE_STATUS_FUNCTIONAL ) 
+			{
+				mvDeviceStateMachine(pCore, pDevice);
+			}
+		}
+	}
+
+	if ( pPort->Device_Number==0 )
+	{
+		/* Just use the first device to make the ball roll. */
+		#ifndef _OS_BIOS
+		mvDeviceStateMachine(pCore, &pPort->Device[0]);
+		#endif
+	}
+
+}
+
+static MV_BOOLEAN mvChannelStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Port pPort
+	)
+{
+	MV_U8 i;
+	MV_U8 portState;
+	PDomain_Device pDevice;
+	PDomain_Port pOrgPort = pPort;
+	#ifdef RAID_DRIVER
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_RAID);	//TBD;
+	//MV_U16 plugInDeviceId;	//may change it later
+	#else
+	MV_PVOID pUpperLayer = HBA_GetModuleExtension(pCore, MODULE_HBA);
+	#endif
+
+	if ( pPort==NULL )
+		portState = PORT_STATE_IDLE;
+	else {
+		portState = pPort->Port_State;
+	}
+
+#ifdef _OS_BIOS
+	/* Each step: if fail like no device, should go to the end. */
+	/* Channel state machine */
+	/* To do reset */
+	for( i=0; i<pCore->Port_Num; i++ )
+	{
+		MV_DUMPC32(0xCCCCBB80);
+		pPort = &pCore->Ports[i];
+		MV_DASSERT( pPort->Port_State==PORT_STATE_IDLE );
+		if ( pPort->Type==PORT_TYPE_PATA )
+			PATA_PortReset( pPort, MV_TRUE );
+		else
+			SATA_PortReset( pPort, MV_FALSE );
+	}
+		MV_DUMPC32(0xCCCCBBB7);
+
+	/* 
+	 * Each port will call mvDeviceStateMachine for its devices. 
+	 * When all the devices for that port are done, will call mvChannelStateMachine.
+	 */
+
+	/* Check whether all the ports are done. */
+	for ( i=0; i<pCore->Port_Num; i++ )
+	{
+		pPort = &pCore->Ports[i];
+		if ( pPort->Port_State!=PORT_STATE_INIT_DONE )
+			return MV_TRUE;
+	}
+		//MV_DUMPC32(0xCCCCBBB8);
+
+	/* Discovery procedure is finished. */
+	if(pCore->Need_Reset == 0)
+	{
+		if ( pCore->State==CORE_STATE_IDLE )
+		{
+			pCore->State = CORE_STATE_STARTED;
+			//MV_DUMPC32(0xCCCCBBB9);
+			HBA_ModuleStarted(pCore);	/* The first time initialization */
+		}
+#ifndef _OS_BIOS
+		else
+		{
+			//MV_DUMPC32(0xCCCCBBBA);
+
+			/* check which device on this port needs to be reported */
+			for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+			{
+				pDevice = &pOrgPort->Device[i];
+				if ( pDevice->Need_Notify )
+				{
+#ifdef RAID_DRIVER
+					RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, (MV_PVOID)(&pDevice->Id));
+#else
+#ifdef _OS_LINUX
+					HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, pDevice->Id);
+#else /* _OS_LINUX */
+					HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, (MV_PVOID) &pDevice->Id);
+#endif /* _OS_LINUX */
+#endif /* RAID_DRIVER */
+					pDevice->Need_Notify = MV_FALSE;	
+				}
+			}
+		} 
+#endif /* _OS_BIOS (ifndef) */
+
+	}
+	else
+	{
+			//MV_DUMPC32(0xCCCCBBBC);
+		pCore->Need_Reset = 0;
+		pCore->Resetting = 0;
+
+		/* Begin to handle request again. */
+		Core_HandleWaitingList(pCore);
+	}
+	//MV_DUMPC32(0xCCCCBBBF);
+	return MV_TRUE;
+
+#else
+
+	//Each step: if fail like no device, should go to the end.
+	/* Channel state machine */
+	switch ( portState )
+	{
+		case PORT_STATE_IDLE:
+			/* To do reset */
+			for( i=0; i<pCore->Port_Num; i++ )
+			{
+				pPort = &pCore->Ports[i];
+				MV_DASSERT( pPort->Port_State==PORT_STATE_IDLE );
+				if ( pPort->Type==PORT_TYPE_PATA )
+					PATA_PortReset( pPort, MV_TRUE );
+				else
+					SATA_PortReset( pPort, MV_FALSE );
+			}
+			break;
+
+		/* 
+		 * Each port will call mvDeviceStateMachine for its devices. 
+		 * When all the devices for that port are done, will call mvChannelStateMachine.
+		 */
+
+		case PORT_STATE_INIT_DONE:
+
+			/* Check whether all the ports are done. */
+			for ( i=0; i<pCore->Port_Num; i++ )
+			{
+				pPort = &pCore->Ports[i];
+				if ( pPort->Port_State!=PORT_STATE_INIT_DONE )
+					return MV_TRUE;
+			}
+
+			/* Discovery procedure is finished. */
+			if(pCore->Need_Reset == 0)
+			{
+				if ( pCore->State==CORE_STATE_IDLE )
+				{
+					pCore->State = CORE_STATE_STARTED;
+					HBA_ModuleStarted(pCore);	/* The first time initialization */
+				}
+				else
+				{
+					/* check which device on this port needs to be reported */
+					for (i=0; i<MAX_DEVICE_PER_PORT; i++)
+					{
+						pDevice = &pOrgPort->Device[i];
+						if ( pDevice->Need_Notify )
+						{
+		#ifdef RAID_DRIVER
+							RAID_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, (MV_PVOID)(&pDevice->Id));
+		#else
+#ifdef _OS_LINUX
+							HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, pDevice->Id);
+#else /* _OS_LINUX */
+							HBA_ModuleNotification(pUpperLayer, EVENT_DEVICE_ARRIVAL, (MV_PVOID) &pDevice->Id);
+#endif /* _OS_LINUX */
+		#endif
+							pDevice->Need_Notify = MV_FALSE;	
+						}
+					}
+				}
+			}
+			else
+			{
+				pCore->Need_Reset = 0;
+				pCore->Resetting = 0;
+
+				/* Begin to handle request again. */
+				Core_HandleWaitingList(pCore);
+			}
+			break;
+	}
+
+	return MV_TRUE;
+
+#endif
+
+}
+
+MV_BOOLEAN mvDeviceStateMachine(
+	PCore_Driver_Extension pCore,
+	PDomain_Device pDevice
+	)
+{
+	MV_U8 i;
+	PDomain_Port pPort = pDevice->PPort;
+#ifdef _OS_BIOS
+	if( pDevice->State==DEVICE_STATE_INIT_DONE)
+	{
+		MV_DUMPC32(0xCCCCBBB3);
+		//MV_HALTKEY;
+		/* Initialization procedure is done. */
+		return MV_TRUE;
+	}
+	
+	if( pDevice->State==DEVICE_STATE_RESET_DONE)
+	{
+		MV_DUMPC32(0xCCCCBBB4);
+		//MV_HALTKEY;
+		/* To do identify */
+		Device_IssueIdentify( pPort, pDevice);
+	}
+#if 0
+	if( pDevice->State==DEVICE_STATE_IDENTIFY_DONE )
+	{
+#if 0//DEBUG_BIOS
+		MV_DUMPC32(0xCCCCBBB5);
+		MV_HALTKEY
+#endif
+		/* To do set PIO mode */
+		Device_IssueSetPIOMode(pPort, pDevice);
+	}
+
+
+	if( pDevice->State==DEVICE_STATE_SET_PIO_DONE)
+	{
+#if 0//DEBUG_BIOS
+		MV_DUMPC32(0xCCCCBBB6);
+		MV_HALTKEY
+#endif
+
+		/* To do set UDMA mode */
+		Device_IssueSetUDMAMode(pPort, pDevice);
+	}
+
+#else
+	if( pDevice->State==DEVICE_STATE_IDENTIFY_DONE)
+	{
+		MV_DUMPC32(0xCCCCBBB6);
+		//MV_HALTKEY;
+		/* To do set UDMA mode */
+		Device_IssueSetUDMAMode(pPort, pDevice);
+	}
+
+#endif
+
+	if ( pDevice->State==DEVICE_STATE_SET_UDMA_DONE )
+	{
+		/* Initialization procedure is done. */
+		pDevice->State = PORT_STATE_INIT_DONE;
+	}
+
+	return MV_TRUE;
+
+#else
+
+	switch ( pDevice->State )
+	{
+		case DEVICE_STATE_RESET_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_RESET_DONE.\n", pDevice->Id));
+
+			/* To do identify */
+			Device_IssueIdentify(pDevice->PPort, pDevice); 
+			break;
+
+		case DEVICE_STATE_IDENTIFY_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_IDENTIFY_DONE.\n", pDevice->Id));
+
+			/* To do set UDMA mode */
+			Device_IssueSetPIOMode(pDevice->PPort, pDevice);
+			break;
+
+		case DEVICE_STATE_SET_UDMA_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_SET_UDMA_DONE.\n", pDevice->Id));
+
+			/* To do set PIO mode */
+			Device_EnableWriteCache(pDevice->PPort, pDevice);
+			break;
+
+		case DEVICE_STATE_SET_PIO_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_SET_PIO_DONE.\n", pDevice->Id));
+
+			/* To do enable write cache */
+			Device_IssueSetUDMAMode(pDevice->PPort, pDevice);
+			break;
+
+		case DEVICE_STATE_ENABLE_WRITE_CACHE_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_ENABLE_WRITE_CACHE_DONE.\n", pDevice->Id));
+
+            /* To do enable read ahead */
+			Device_EnableReadAhead( pDevice->PPort, pDevice );
+			break;
+
+		case DEVICE_STATE_ENABLE_READ_AHEAD_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_ENABLE_READ_AHEAD_DONE.\n", pDevice->Id));
+
+			/* Initialization procedure is done. */
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+			pCore->Total_Device_Count++;
+
+        	/* No break here. */
+
+		case DEVICE_STATE_INIT_DONE:
+			MV_DPRINT(("Device %d DEVICE_STATE_INIT_DONE.\n", pDevice->Id));
+
+			/* Check whether all devices attached to this port are done. */
+			for ( i=0; i<MAX_DEVICE_PER_PORT; i++ )
+			{
+				if ( pPort->Device[i].State!=DEVICE_STATE_INIT_DONE )
+					return MV_TRUE;
+			}
+			pPort->Port_State = PORT_STATE_INIT_DONE;
+			mvChannelStateMachine(pCore, pDevice->PPort);
+			break;
+
+		default:
+			break;
+	}
+
+	return MV_TRUE;
+#endif
+}
+
+/* 
+ * Global controller reset 
+ */
+MV_BOOLEAN
+ResetController(PCore_Driver_Extension pCore)
+{
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_U32 tmp;
+	MV_BOOLEAN ret = MV_TRUE;
+
+/* #if (VER_OEM==VER_OEM_ASUS) */
+	MV_U8 i=0;
+/* #endif */
+
+	/* Reset controller */
+	tmp = MV_REG_READ_DWORD(mmio, HOST_CTL);
+	if ((tmp & HOST_RESET) == 0) {
+#ifdef _OS_BIOS
+		pCore->host_reseting = 1;
+#endif
+/* #if (VER_OEM==VER_OEM_ASUS) */
+		if(pCore->VS_Reg_Saved!=VS_REG_SIG)
+		{
+			for ( i=0; i<pCore->SATA_Port_Num; i++ )
+			{
+				Domain_Port *port;
+				port = &pCore->Ports[i];
+				MV_REG_WRITE_DWORD(port->Mmio_Base, PORT_VSR_ADDR, 0xc);
+				port->VS_RegC= MV_REG_READ_DWORD(port->Mmio_Base, PORT_VSR_DATA);
+				pCore->VS_Reg_Saved=VS_REG_SIG;
+			}
+		}
+/* #endif */
+		MV_REG_WRITE_DWORD(mmio, HOST_CTL, tmp|HOST_RESET);
+		MV_REG_READ_DWORD(mmio, HOST_CTL); /* flush */
+	}
+
+	/* Reset must complete within 1 second, or the hardware should be considered fried. */
+	HBA_SleepMillisecond(pCore, 1000);
+
+	tmp = MV_REG_READ_DWORD(mmio, HOST_CTL);
+	if (tmp & HOST_RESET) {
+		MV_ASSERT(MV_FALSE);	//TBD;
+		ret = MV_FALSE;
+	}
+
+#ifdef _OS_BIOS
+	pCore->host_reseting = 0;
+#endif
+/* #if (VER_OEM==VER_OEM_ASUS) */
+	if(pCore->VS_Reg_Saved==VS_REG_SIG)
+	{
+		for ( i=0; i<pCore->SATA_Port_Num; i++ )
+		{
+			Domain_Port *port;
+			port = &pCore->Ports[i];
+			MV_REG_WRITE_DWORD(port->Mmio_Base, PORT_VSR_ADDR, 0xc);
+			MV_REG_WRITE_DWORD(port->Mmio_Base, PORT_VSR_DATA, port->VS_RegC);
+		}
+	}
+	/* link error work around */
+	for ( i=0; i<pCore->SATA_Port_Num; i++ )
+	{
+		MV_U32 tmp, old_stat;
+		Domain_Port *port;
+		port = &pCore->Ports[i];
+
+		mvDisableIntr( port->Mmio_Base, old_stat );
+		MV_REG_WRITE_DWORD( port->Mmio_Base, PORT_VSR_ADDR, 0x5 );
+		tmp = MV_REG_READ_DWORD( port->Mmio_Base, PORT_VSR_DATA );
+		MV_REG_WRITE_DWORD( port->Mmio_Base, PORT_VSR_DATA, tmp | MV_BIT(26));
+		HBA_SleepMillisecond( pCore, 1 );
+		mvEnableIntr( port->Mmio_Base, old_stat );
+	}
+
+/* #endif */
+	return ret;
+}
+
+void PATA_ResetPort(PCore_Driver_Extension pCore, MV_U8 portId)
+{
+	PDomain_Port pPort = &pCore->Ports[portId];
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 tmp;
+
+	/* Make sure port is not active. If yes, stop the port. */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_CMD);
+	/* For ACHI, four bits are avaiable. For 614x, PORT_CMD_FIS_ON is reserved. */
+	if (tmp & (PORT_CMD_PATA_LIST_ON | PORT_CMD_PATA_START)) {
+		tmp &= ~(PORT_CMD_PATA_LIST_ON | PORT_CMD_PATA_START);
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD, tmp);
+		MV_REG_READ_DWORD(portMmio, PORT_CMD); /* flush */
+
+		/* spec says 500 msecs for each bit, so
+			* this is slightly incorrect.
+			*/
+		HBA_SleepMillisecond(pCore, 500);
+	}
+
+	/* Clear error register if any */
+
+	/* Ack any pending irq events for this port */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT)&0xF;
+	if (tmp)
+		MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_STAT, tmp);
+	/* Ack pending irq in the host interrupt status register */
+	MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, 1 << portId);
+
+	/* set irq mask (enables interrupts) */
+#ifdef ENABLE_PATA_ERROR_INTERRUPT
+	MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_MASK, DEF_PORT_PATA_IRQ);
+#else
+	/* 
+	 * Workaround
+	 * If PATA device has a error, even the error bit in the interrupt register is cleared.
+	 * Internal hardware will trigger one more(OS has no idea).
+	 * So because there is interrupt bit not cleared, the next command won't be issued.
+	 */
+	MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_MASK, MV_BIT(2)|MV_BIT(0));
+#endif
+}
+
+void SATA_ResetPort(PCore_Driver_Extension pCore, MV_U8 portId)
+{
+	PDomain_Port pPort = &pCore->Ports[portId];
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_LPVOID portMmio = pPort->Mmio_Base;
+	MV_U32 tmp, j;
+
+	/* Make sure port is not active. If yes, stop the port. */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_CMD);
+	/* For ACHI, four bits are avaiable. For 614x, PORT_CMD_FIS_ON is reserved. */
+	if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
+			PORT_CMD_FIS_RX | PORT_CMD_START)) {
+		tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
+				PORT_CMD_FIS_RX | PORT_CMD_START);
+		MV_REG_WRITE_DWORD(portMmio, PORT_CMD, tmp);
+		MV_REG_READ_DWORD(portMmio, PORT_CMD); /* flush */
+
+		/* spec says 500 msecs for each bit, so
+			* this is slightly incorrect.
+			*/
+		HBA_SleepMillisecond(pCore, 500);
+	}
+
+	//TBD: PORT_CMD enable bit(5): PIO command will issue PIO setup interrupt bit. 
+	// Only after clear the PIO setup interrupt bit, the hardware will issue the PIO done interrupt bit.
+	//TBD: Maybe in this case, we needn't enable PIO setup interrupt bit but for some others we should.
+
+	#ifdef AHCI
+	/* For 614x, it's reserved. */
+	MV_REG_WRITE_DWORD(portMmio, PORT_CMD, PORT_CMD_SPIN_UP);
+	#endif
+
+	/* Wait for SATA DET(Device Detection) */
+	j = 0;
+	while (j < 100) {
+		HBA_SleepMillisecond(pCore, 10);
+		tmp = MV_REG_READ_DWORD(portMmio, PORT_SCR_STAT);
+		if ((tmp & 0xf) == 0x3)
+			break;
+		j++;
+	}
+
+	
+	/* Clear SATA error */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_SCR_ERR);
+	MV_REG_WRITE_DWORD(portMmio, PORT_SCR_ERR, tmp);
+
+	/* Ack any pending irq events for this port */
+	tmp = MV_REG_READ_DWORD(portMmio, PORT_IRQ_STAT);
+	if (tmp)
+		MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_STAT, tmp);
+	/* Ack pending irq in the host interrupt status register */
+	MV_REG_WRITE_DWORD(mmio, HOST_IRQ_STAT, 1 << portId);
+
+	/* set irq mask (enables interrupts) */
+	MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_MASK, DEF_PORT_IRQ);
+
+
+	/* FIFO controller workaround for 6121-B0B1, 6111-B0B1, and 6145-A0 */
+	if ( 
+		( (pCore->Device_Id==DEVICE_ID_THORLITE_2S1P)&&(pCore->Revision_Id==0xB0||pCore->Revision_Id==0xB1) )
+		||
+		( (pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH)&&(pCore->Revision_Id==0xB0||pCore->Revision_Id==0xB1) )
+		||
+		( (pCore->Device_Id==DEVICE_ID_THORLITE_1S1P)&&(pCore->Revision_Id==0xB0||pCore->Revision_Id==0xB1) )	//TBD: Don't know this device ID.
+		||
+		( (pCore->Device_Id==DEVICE_ID_THOR_4S1P_NEW)&&(pCore->Revision_Id==0xA0) )
+	)
+	{
+		tmp = (MV_REG_READ_DWORD( portMmio, PORT_FIFO_CTL ) & 0xFFFFF0FF ) | 0x500;
+		MV_REG_WRITE_DWORD( portMmio, PORT_FIFO_CTL, tmp);
+		MV_REG_READ_DWORD( portMmio, PORT_FIFO_CTL);		/* flush */
+	}
+}
+/*
+ * It's equivalent to ahci_host_init and ahci_port_start
+ */
+void InitChip(PCore_Driver_Extension pCore)
+{
+	MV_LPVOID mmio = pCore->Mmio_Base;
+	MV_U8 i;
+	PDomain_Port pPort;
+	MV_U32 tmp;
+	
+	pCore->Capacity = MV_REG_READ_DWORD(mmio, HOST_CAP);
+	
+	/* 
+	 * For 614x, enable enhanced mode for PATA and interrupt. 
+	 * For AHCI, enable AHCI.
+	 */
+	tmp = MV_REG_READ_DWORD(mmio, HOST_CTL);
+	MV_REG_WRITE_DWORD(mmio, HOST_CTL, (MV_U32)(tmp | HOST_IRQ_EN | HOST_MVL_EN));
+	tmp = MV_REG_READ_DWORD(mmio, HOST_CTL);
+
+	/* Ports implemented: enable ports */
+	pCore->Port_Map = MV_REG_READ_DWORD(mmio, HOST_PORTS_IMPL);
+	tmp = MV_REG_READ_DWORD(mmio, HOST_CAP);
+	//MV_DASSERT( pCore->Port_Num == ((tmp & 0x1f) + 1) );
+
+	/* Initialize ports */
+	for ( i = 0; i<pCore->Port_Num; i++) {
+		pPort = &pCore->Ports[i];
+		/* make sure port is not active */
+		if ( pPort->Type==PORT_TYPE_PATA )
+			PATA_ResetPort(pCore, i);
+		else
+			SATA_ResetPort(pCore, i);
+	}
+
+
+	/* Initialize port, set uncached memory pointer. */
+	for ( i = 0; i<pCore->Port_Num; i++) {
+		pPort = &pCore->Ports[i];
+
+		/* Set the sata port register */
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_LST_ADDR_HI, pPort->Cmd_List_DMA.high);
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_LST_ADDR, pPort->Cmd_List_DMA.low);
+		MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_LST_ADDR);
+
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR_HI, pPort->RX_FIS_DMA.high);
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR, pPort->RX_FIS_DMA.low);
+		MV_REG_READ_DWORD(pPort->Mmio_Base, PORT_FIS_ADDR);
+
+		/* AHCI is different with Thor */
+		#ifdef AHCI
+		MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_CMD, 
+			PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |	PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP | PORT_CMD_START );
+		#else
+		if ( pPort->Type==PORT_TYPE_PATA )
+		{	/* 12<<24: Bit 24-28: Indicates ATAPI command CDB length in bytes */
+			MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_CMD, (12L<<24) | PORT_CMD_PATA_INTERRUPT | PORT_CMD_PATA_START );
+		}
+		else
+		{
+			/* 
+			 * Workaround: Don't enable PORT_CMD_FIS_RX otherwise system will hang.
+			 */
+			MV_REG_WRITE_DWORD(pPort->Mmio_Base, PORT_CMD, PORT_CMD_START );
+		}
+		#endif
+	}
+
+	MV_DUMPC32(0xCCCCFF01);
+	//MV_DUMPC32(MV_REG_READ_DWORD(mmio, HOST_CTL));
+	//MV_DUMPC32(MV_REG_READ_DWORD(mmio, HOST_IRQ_STAT));
+	//MV_HALTKEY
+	//MV_DPRINT("HostCtrl=0x%x,HostIntStatus=0x%x\n",MV_REG_READ_DWORD(mmio, HOST_CTL),MV_REG_READ_DWORD(mmio, HOST_IRQ_STAT));
+
+}
+
+MV_BOOLEAN mvAdapterStateMachine(
+	IN OUT MV_PVOID This
+	)
+{
+#ifdef _OS_BIOS
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	MV_U8 i=0;
+	PDomain_Port pPort=NULL;
+	MV_DUMPC32(0xCCCCBB11);
+
+
+	//MV_DPRINT("mvAdapterStateMachine Start\n");
+	switch (pCore->Adapter_State)
+	{
+		case ADAPTER_INITIALIZING:
+			MV_DUMPC32(0xCCCCBB01);
+			for(i=0;i<100;i++)
+			{
+				MV_DUMPC32(0xCCCCBBFF);
+				if(ResetController(pCore))
+					break;
+			}
+
+			if(i==100)
+				return MV_FALSE;
+
+			InitChip(pCore);
+			pCore->Adapter_State = ADAPTER_READY;
+			break;
+
+		case ADAPTER_READY:
+			{
+				MV_DUMPC32(0xCCCCBB02);
+				pPort=&pCore->Ports[0];
+				pPort->Port_State= PORT_STATE_IDLE;
+				mvChannelStateMachine(pCore, pPort);
+				//MV_DUMPC32(0xCCCCBB03);
+			}
+			break;
+
+		default:
+			break;
+	}
+
+	return MV_TRUE;
+#else
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+
+	switch (pCore->Adapter_State)
+	{
+		case ADAPTER_INITIALIZING:
+			if(ResetController(pCore) == MV_FALSE) 
+				return MV_FALSE;
+
+			InitChip(pCore);
+			pCore->Adapter_State = ADAPTER_READY;
+			//TBD: How about Linux? Does Linux need timer?
+			//if( !HBA_IsLdrDump(NULL) )	//TBD
+
+#ifdef SUPPORT_TIMER
+			Timer_AddRequest( pCore, 1, mvAdapterStateMachine, pCore );
+#else
+			HBA_RequestTimer(pCore, 1000, (MV_VOID(*)(MV_PVOID))mvAdapterStateMachine);	//TBD: 1 second
+#endif
+
+			break;
+
+		case ADAPTER_READY:
+			mvChannelStateMachine(pCore, NULL);
+			break;
+
+		default:
+			break;
+	}
+	return MV_TRUE;
+#endif
+
+}
+
+void Device_ParseIdentifyData(
+	IN PDomain_Device pDevice,
+	IN PATA_Identify_Data pATAIdentify
+	);
+
+static void Core_InternalReqCallback(
+	 IN PCore_Driver_Extension pCore,
+	 IN PMV_Request pReq
+	 )
+{
+	PDomain_Port pPort; 
+	PDomain_Device pDevice; 
+	PATA_Identify_Data pATAIdentify;
+	MV_U8 portId, deviceId;
+
+	portId = PATA_MapPortId(pReq->Device_Id);
+	deviceId = PATA_MapDeviceId(pReq->Device_Id);
+
+	pPort = &pCore->Ports[portId];
+	pDevice = &pPort->Device[deviceId];
+
+	//It's possible that CDB_CORE_READ_LOG_EXT returns error and come here
+	//because we send CDB_CORE_READ_LOG_EXT no matter NCQ is running or not.
+	if ( pReq->Cdb[2]!=CDB_CORE_READ_LOG_EXT ) 
+	{
+		if( pReq->Scsi_Status != REQ_STATUS_SUCCESS )
+		{
+			/* request didn't finish correctly - we set device to existing
+			   and finish state machine */
+			pDevice->Status = DEVICE_STATUS_EXISTING;
+			pDevice->State = DEVICE_STATE_INIT_DONE;
+			mvDeviceStateMachine(pCore, pDevice);
+			return;
+		}
+	}
+
+	pATAIdentify = (PATA_Identify_Data)pPort->Device[deviceId].Scratch_Buffer;
+
+	/* Handle internal request like identify */
+	MV_DASSERT( pReq->Cdb[0]==SCSI_CMD_MARVELL_SPECIFIC );
+	MV_DASSERT( pReq->Cdb[1]==CDB_CORE_MODULE );
+	MV_ASSERT( portId < MAX_PORT_NUMBER );
+
+	if ( pReq->Cdb[2]==CDB_CORE_IDENTIFY )
+	{
+#ifdef _OS_LINUX
+		hba_swap_buf_le16((MV_PU16) pATAIdentify, 
+				  sizeof(ATA_Identify_Data)/sizeof(MV_U16));
+#endif /* _OS_LINUX  */
+		Device_ParseIdentifyData(pDevice, pATAIdentify);
+
+		MV_ASSERT( pDevice->State == DEVICE_STATE_RESET_DONE );
+		pDevice->State = DEVICE_STATE_IDENTIFY_DONE;
+		#ifndef _OS_BIOS		
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+		return;
+	}
+	else if ( pReq->Cdb[2]==CDB_CORE_SET_UDMA_MODE )
+	{
+		pDevice->State = DEVICE_STATE_SET_UDMA_DONE;
+		#ifndef _OS_BIOS		
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+	}
+	else if ( pReq->Cdb[2]==CDB_CORE_SET_PIO_MODE )
+	{
+		pDevice->State = DEVICE_STATE_SET_PIO_DONE;
+		#ifndef _OS_BIOS		
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+	}
+	else if ( pReq->Cdb[2]==CDB_CORE_ENABLE_WRITE_CACHE )
+	{
+		pDevice->State = DEVICE_STATE_ENABLE_WRITE_CACHE_DONE;
+		#ifndef _OS_BIOS		
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+	}
+	else if ( pReq->Cdb[2]==CDB_CORE_ENABLE_READ_AHEAD )
+	{
+		pDevice->State = DEVICE_STATE_ENABLE_READ_AHEAD_DONE;
+		#ifndef _OS_BIOS		
+		mvDeviceStateMachine(pCore, pDevice);
+		#endif
+	}
+	else if ( pReq->Cdb[2]==CDB_CORE_READ_LOG_EXT )
+	{
+		/* Do nothing. Just use this command to clear outstanding IO during error handling. */
+		MV_PRINT("Read Log Ext is finished on device 0x%x.\n", pDevice->Id);
+	}
+}
+
+static void Device_IssueIdentify(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PMV_Request pReq = pDevice->Internal_Req;
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	/* Prepare identify ATA task */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_IDENTIFY;
+	pReq->Device_Id = pDevice->Id;
+
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = sizeof(ATA_Identify_Data);
+	pReq->Data_Buffer = pDevice->Scratch_Buffer;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+	MV_DASSERT( SATA_SCRATCH_BUFFER_SIZE>=sizeof(ATA_Identify_Data) );
+
+	/* Make SG table */
+	SGTable_Init(pSGTable, 0);
+	SGTable_Append(pSGTable, 
+				pDevice->Scratch_Buffer_DMA.low, 
+				pDevice->Scratch_Buffer_DMA.high,
+				pReq->Data_Transfer_Length
+				); 
+	MV_DASSERT( pReq->Data_Transfer_Length%2==0 );
+	//MV_DUMPC32(0xCCCCBB40);
+
+	/* Send this internal request */
+	Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+}
+
+void Device_IssueReadLogExt(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PMV_Request pReq = pDevice->Internal_Req;
+	PMV_SG_Table pSGTable = &pReq->SG_Table;
+
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+	MV_PRINT("Device_IssueReadLogExt on device 0x%x.\n", pDevice->Id);
+
+	//TBD: Disable NCQ after we found NCQ error.
+	pDevice->Capacity &= ~(DEVICE_CAPACITY_NCQ_SUPPORTED);
+
+	/* We support READ LOG EXT command with log page of 10h. */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_READ_LOG_EXT;
+	pReq->Device_Id = pDevice->Id;
+
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = SATA_SCRATCH_BUFFER_SIZE;
+	pReq->Data_Buffer = pDevice->Scratch_Buffer;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+	MV_DASSERT( SATA_SCRATCH_BUFFER_SIZE>=sizeof(ATA_Identify_Data) );
+
+	/* Make SG table */
+	SGTable_Init(pSGTable, 0);
+	SGTable_Append(pSGTable, 
+				pDevice->Scratch_Buffer_DMA.low, 
+				pDevice->Scratch_Buffer_DMA.high,
+				pReq->Data_Transfer_Length
+				); 
+	MV_DASSERT( pReq->Data_Transfer_Length%2==0 );
+	//MV_DUMPC32(0xCCCCBB40);
+
+	/* Send this internal request */
+	Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+}
+
+static MV_VOID mvAta2HostString(IN MV_U16 *source,
+                                OUT MV_U16 *target,
+                                IN MV_U32 wordsCount
+                               )
+{
+    MV_U32 i;
+    for (i=0 ; i < wordsCount; i++)
+    {
+        target[i] = (source[i] >> 8) | ((source[i] & 0xff) << 8);
+        target[i] = MV_LE16_TO_CPU(target[i]);
+    }
+}
+
+void Device_ParseIdentifyData(
+	IN PDomain_Device pDevice,
+	IN PATA_Identify_Data pATAIdentify
+	)
+{
+	PDomain_Port pPort = pDevice->PPort;
+	MV_U8 i;
+	MV_U32 temp;
+
+	/* Get serial number, firmware revision and model number. */
+#ifndef BIOS_NOT_SUPPORT
+	MV_CopyMemory(pDevice->Serial_Number, pATAIdentify->Serial_Number, 20);
+	MV_CopyMemory(pDevice->Firmware_Revision, pATAIdentify->Firmware_Revision, 8);
+#endif
+
+	MV_CopyMemory(pDevice->Model_Number, pATAIdentify->Model_Number, 40);
+#ifndef BIOS_NOT_SUPPORT
+	mvAta2HostString((MV_U16 *)pDevice->Serial_Number, (MV_U16 *)pDevice->Serial_Number, 10);
+	mvAta2HostString((MV_U16 *)pDevice->Firmware_Revision, (MV_U16 *)pDevice->Firmware_Revision, 4);
+#endif
+
+	mvAta2HostString((MV_U16 *)pDevice->Model_Number, (MV_U16 *)pDevice->Model_Number, 20);
+
+	/* Capacity: 48 bit LBA, smart, write cache and NCQ */
+	pDevice->Capacity = 0;
+	pDevice->Setting = 0;
+	if ( pATAIdentify->Command_Set_Supported[1] & MV_BIT(10) )
+	{
+#ifndef _OS_BIOS
+		MV_DPRINT(("Device: %d 48 bit supported.\n", pDevice->Id));
+#endif
+
+		pDevice->Capacity |= DEVICE_CAPACITY_48BIT_SUPPORTED;
+	}
+	else
+	{
+#ifndef _OS_BIOS
+		MV_DPRINT(("Device: %d 48 bit not supported.\n", pDevice->Id));
+
+#endif
+
+	}
+
+	if ( pATAIdentify->Command_Set_Supported[0] & MV_BIT(0) ) 
+	{
+		pDevice->Capacity |= DEVICE_CAPACITY_SMART_SUPPORTED;
+		if ( pATAIdentify->Command_Set_Enabled[0] & MV_BIT(0) )
+		{
+			pDevice->Setting |= DEVICE_SETTING_SMART_ENABLED;
+		}
+	}	
+	if ( pATAIdentify->Command_Set_Supported[0] & MV_BIT(5) ) 
+	{
+		pDevice->Capacity |= DEVICE_CAPACITY_WRITECACHE_SUPPORTED;
+		if ( pATAIdentify->Command_Set_Enabled[0] & MV_BIT(5) )
+		{
+			pDevice->Setting |= DEVICE_SETTING_WRITECACHE_ENABLED;
+		}
+	}
+	if ( pATAIdentify->SATA_Capabilities & MV_BIT(8) )
+	{
+	#ifndef _OS_BIOS
+		if (pDevice->Capacity & DEVICE_CAPACITY_48BIT_SUPPORTED)
+			pDevice->Capacity |= DEVICE_CAPACITY_NCQ_SUPPORTED;
+	#endif
+	}
+	if ( pATAIdentify->Command_Set_Supported_Extension & MV_BIT(5) )
+	{
+		if ( pATAIdentify->Command_Set_Default & MV_BIT(5) )
+			pDevice->Capacity |= DEVICE_CAPACITY_READLOGEXT_SUPPORTED;
+	}
+
+	temp = MV_REG_READ_DWORD( pPort->Mmio_Base, PORT_SCR_STAT );
+	if ( ((temp >> 4) & 0xF) == 1 )
+		pDevice->Capacity |= DEVICE_CAPACITY_RATE_1_5G;
+	else if ( ((temp >> 4) & 0xF) == 2 )
+		pDevice->Capacity |= DEVICE_CAPACITY_RATE_3G;
+
+	/* Disk size */
+	if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+	{
+		pDevice->Max_LBA.low = *((MV_PU32)&pATAIdentify->Max_LBA[0]);
+		pDevice->Max_LBA.high = *((MV_PU32)&pATAIdentify->Max_LBA[2]);
+	}else 
+	{
+		pDevice->Max_LBA.low = *((MV_PU32)&pATAIdentify->User_Addressable_Sectors[0]);
+		pDevice->Max_LBA.high = 0;
+	}
+	
+	/* PIO, MDMA and UDMA mode */	
+   	if ( ( pATAIdentify->Fields_Valid&MV_BIT(1) )
+		&& ( pATAIdentify->PIO_Modes&0x0F ) )	
+	{
+       	if ( (MV_U8)pATAIdentify->PIO_Modes>=0x2 )
+		  	pDevice->PIO_Mode = 0x04; 
+		else
+	  		pDevice->PIO_Mode = 0x03; 
+	}
+    else
+	{
+       	pDevice->PIO_Mode = 0x02;
+	}
+
+	pDevice->MDMA_Mode = 0xFF;
+	if ( pATAIdentify->Multiword_DMA_Modes & MV_BIT(2) )
+		pDevice->MDMA_Mode = 2;
+	else if ( pATAIdentify->Multiword_DMA_Modes & MV_BIT(1) )
+		pDevice->MDMA_Mode = 1;
+	else if ( pATAIdentify->Multiword_DMA_Modes & MV_BIT(0) )
+		pDevice->MDMA_Mode = 0;
+
+	pDevice->UDMA_Mode = 0xFF;
+    if ( pATAIdentify->Fields_Valid&MV_BIT(2) )
+	{
+		for ( i=0; i<7; i++ )
+		{
+			if ( pATAIdentify->UDMA_Modes & MV_BIT(i) )
+				pDevice->UDMA_Mode = i;	
+		}
+	}	
+	MV_DUMPC32(0xCCCCFFF2);
+	MV_DUMPC32(pDevice->Max_LBA.low);
+	//MV_HALTKEY;
+//#ifndef BIOS_NOT_SUPPORT
+	/* CRC identify buffer to get the U32 GUID. */
+	pDevice->WWN = MV_CRC((MV_PU8)pATAIdentify, sizeof(ATA_Identify_Data));
+//#endif
+
+	//TBD: MV_U16 Status;
+	//TBD: MV_U8 Queue_Depth;	
+}
+
+static void Device_IssueSetMDMAMode(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq = pDevice->Internal_Req;
+    MV_U32 temp;
+	MV_U32 offset;
+	MV_LPVOID base;
+	MV_BOOLEAN memoryIO=MV_FALSE;
+	MV_U8 mode = pDevice->MDMA_Mode;
+
+	/* Only if the Device doesn't support UDMA, we'll use MDMA mode. */
+	MV_ASSERT( pDevice->UDMA_Mode==0xFF );
+	/* Is that possible that one device doesn't support either UDMA and MDMA? */
+	MV_ASSERT( (pDevice->MDMA_Mode<=2) );
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	/* Set controller timing register for PATA port before set the device MDMA mode. */
+	if ( pPort->Type==PORT_TYPE_PATA )
+	{
+		if ( pCore->Device_Id==DEVICE_ID_THORLITE_2S1P 
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_0S1P )
+		{
+			if ( pCore->Revision_Id==0xA0 )
+			{
+				/* Thorlite A0 */
+				temp = MV_IO_READ_DWORD(pCore->Base_Address[4], 0);
+				temp &= 0xFFFF00FF;
+				temp |= 0x0000A800;
+				MV_IO_WRITE_DWORD(pCore->Base_Address[4], 0, temp);
+
+				if ( !pDevice->Is_Slave ) 
+					offset = 0x10;
+				else
+					offset = 0x14;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thorlite B0 */
+				MV_DASSERT( (pCore->Revision_Id==0xB0)||(pCore->Revision_Id==0xB1) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		} 
+		else
+		{
+			MV_DASSERT( (pCore->Device_Id==DEVICE_ID_THOR_4S1P)||(pCore->Device_Id==DEVICE_ID_THOR_4S1P_NEW) );
+			if ( pCore->Revision_Id==0x00	/* A0 */
+				|| pCore->Revision_Id==0x01	/* A1 */
+				|| pCore->Revision_Id==0x10	/* B0 and C0 */	)
+			{
+				/* Thor A0-C0 */
+				if ( !pDevice->Is_Slave )
+					offset = 0x08;
+				else
+					offset = 0x0c;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thor D0 = Thor New A0 */
+				MV_DASSERT( (pCore->Revision_Id==0xA0) || (pCore->Revision_Id==0xA1) ||
+							(pCore->Revision_Id==0xA2) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		}
+
+		if ( !memoryIO )
+		{
+			temp = MV_IO_READ_DWORD(base, offset);
+			temp &= 0xFFFFFF3F;
+			temp |= ((MV_U32)mode)<<6;
+			temp |= 0x100;		/* Enable MDAM */
+			MV_IO_WRITE_DWORD(base, offset, temp);
+		}
+		else
+		{
+			temp = MV_REG_READ_DWORD(base, offset);
+			temp &= 0xFFFFFF3F;
+			temp |= ((MV_U32)mode)<<6;
+			temp |= 0x100;		/* Enable MDAM */
+			MV_REG_WRITE_DWORD(base, offset, temp);
+		}
+	}
+
+	/* Prepare set UDMA mode task */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_SET_UDMA_MODE;
+	pReq->Cdb[3] = mode;
+	/* Means we are setting MDMA mode. I still use CDB_CORE_SET_UDMA_MODE because I don't want to change the state machine. */
+	pReq->Cdb[4] = MV_TRUE;
+	pReq->Device_Id = pDevice->Id;
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+
+	/* Send this internal request */
+	Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+}
+
+static void Device_IssueSetUDMAMode(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq = pDevice->Internal_Req;
+    MV_U32 temp;
+	MV_U32 offset;
+	MV_LPVOID base;
+	MV_BOOLEAN memoryIO=MV_FALSE;
+	MV_U8 mode = pDevice->UDMA_Mode;
+	
+	if ( pDevice->UDMA_Mode==0xFF )
+	{
+		Device_IssueSetMDMAMode(pPort, pDevice);
+		return;
+	}
+
+	//GT 10/19/2006 11:07AM
+	//Check PATA port cable if 40_pin or 80_pin
+	if ( pPort->Type==PORT_TYPE_PATA )
+	{
+		temp = MV_IO_READ_DWORD(pCore->Base_Address[4], 0);
+		if( temp & MV_BIT(8) )	//40_pin cable
+		{
+			if ( mode>2 ) 
+				mode = 2;
+			pDevice->UDMA_Mode = mode;
+		}
+	}	
+	
+	/* Hardware team required us to downgrade UDMA mode to zero. */
+	//if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+	//	mode = 0;
+	//if ( mode>=5 ) mode = 4; //???
+
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	if ( (pCore->Device_Id!=DEVICE_ID_THOR_4S1P_NEW) && (pCore->Revision_Id!=0xB0) && (pCore->Revision_Id!=0xB1) )
+	{
+		/* Degrade ATAPI device UDMA mode always to 2. */
+		if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+		{
+			if ( mode>2 ) mode = 2;
+		}
+		else
+		{
+			/*
+			* Workaround:
+			* Thor lite A0 has problem with Hitesh(IBM) HDD under UDMA 5
+			* And it has problem with any HDD under UDMA 6
+			* So we degrade the HDD mode to 5 and ignore Hitesh HDD for now.
+			*/
+			if ( mode>5 ) mode = 5;
+		}
+	}
+
+	/* 
+	 * Set controller timing register for PATA port before set the device UDMA mode.
+	 * Thorlite A0:	To enable timing programming, BAR 4 offset x0, write a800
+	 *				To set values, BAR 4 offset x10, x14
+	 * Thorlite B0:	BAR 5 offset xa0, xa4
+	 * Thor A0~C0:	BAR 4 offset x8, xc
+	 * Thor D0(=Thor New A0):BAR 5 offset xa0, xa4 ( Same as Thorlite B0 )
+	 */
+	if ( pPort->Type==PORT_TYPE_PATA )
+	{
+		if ( pCore->Device_Id==DEVICE_ID_THORLITE_2S1P 
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_0S1P )
+		{
+			if ( pCore->Revision_Id==0xA0 )
+			{
+				/* Thorlite A0 */
+				temp = MV_IO_READ_DWORD(pCore->Base_Address[4], 0);
+				temp &= 0xFFFF00FF;
+				temp |= 0x0000A800;
+				MV_IO_WRITE_DWORD(pCore->Base_Address[4], 0, temp);
+
+				if ( !pDevice->Is_Slave ) 
+					offset = 0x10;
+				else
+					offset = 0x14;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thorlite B0 */
+				MV_DASSERT( (pCore->Revision_Id==0xB0)||(pCore->Revision_Id==0xB1) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		} 
+		else
+		{
+			MV_DASSERT( (pCore->Device_Id==DEVICE_ID_THOR_4S1P)||(pCore->Device_Id==DEVICE_ID_THOR_4S1P_NEW) );
+			if ( pCore->Revision_Id==0x00	/* A0 */
+				|| pCore->Revision_Id==0x01	/* A1 */
+				|| pCore->Revision_Id==0x10	/* B0 and C0 */	)
+			{
+				/* Thor A0-C0 */
+				if ( !pDevice->Is_Slave )
+					offset = 0x08;
+				else
+					offset = 0x0c;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thor D0 = Thor New A0 */
+				MV_DASSERT( (pCore->Revision_Id==0xA0) || (pCore->Revision_Id==0xA1) ||
+							(pCore->Revision_Id==0xA2) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		}
+
+		if ( !memoryIO )
+		{
+			temp = MV_IO_READ_DWORD(base, offset);
+			temp &= 0xFFFFFFF8;
+			temp |= (MV_U32)mode;
+			MV_IO_WRITE_DWORD(base, offset, temp);
+		}
+		else
+		{
+			temp = MV_REG_READ_DWORD(base, offset);
+			temp &= 0xFFFFFFF8;
+			temp |= (MV_U32)mode;
+			MV_REG_WRITE_DWORD(base, offset, temp);
+		}
+	}
+
+	/* Prepare set UDMA mode task */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_SET_UDMA_MODE;
+	pReq->Cdb[3] = mode;
+	/* Not setting MDMA but UDMA mode. */
+	pReq->Cdb[4] = MV_FALSE;
+	pReq->Device_Id = pDevice->Id;
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+
+	/* Send this internal request */
+	Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+}
+
+static void Device_IssueSetPIOMode(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq = pDevice->Internal_Req;
+    MV_U32 temp;
+	MV_U32 offset;
+	MV_LPVOID base;
+	MV_BOOLEAN memoryIO=MV_FALSE;
+	MV_U8 mode = pDevice->PIO_Mode;
+	
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	/* Hardware team required us to downgrade PIO mode to zero. */
+	if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+		mode = 0;
+
+	//MV_DUMPC32(0xCCCCBB70);
+	/* 
+	 * Set controller timing register for PATA port before set the device UDMA mode.
+	 * Thorlite A0:	To enable timing programming, BAR 4 offset x0, write a800
+	 *				To set values, BAR 4 offset x10, x14
+	 * Thorlite B0:	BAR 5 offset xa0, xa4
+	 * Thor A0~C0:	BAR 4 offset x8, xc
+	 * Thor D0:		BAR 5 offset xa0, xa4 ( Same as Thorlite B0 )
+	 */
+	if ( pPort->Type==PORT_TYPE_PATA )
+	{
+		//MV_DUMPC32(0xCCCCBB71);
+		if ( pCore->Device_Id==DEVICE_ID_THORLITE_2S1P 
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_2S1P_WITH_FLASH
+			|| pCore->Device_Id==DEVICE_ID_THORLITE_0S1P )
+		{
+			if ( pCore->Revision_Id==0xA0 )
+			{
+				/* Thorlite A0 */
+				temp = MV_IO_READ_DWORD(pCore->Base_Address[4], 0);
+				temp &= 0xFFFF00FF;
+				temp |= 0x0000A800;
+				MV_IO_WRITE_DWORD(pCore->Base_Address[4], 0, temp);
+
+				if ( !pDevice->Is_Slave ) 
+					offset = 0x10;
+				else
+					offset = 0x14;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thorlite B0 */
+				MV_DASSERT( (pCore->Revision_Id==0xB0)||(pCore->Revision_Id==0xB1) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		} 
+		else
+		{
+			//MV_DUMPC32(0xCCCCBB72);
+			MV_DASSERT( (pCore->Device_Id==DEVICE_ID_THOR_4S1P)||(pCore->Device_Id==DEVICE_ID_THOR_4S1P_NEW) );
+			if ( pCore->Revision_Id==0x00	/* A0 */
+				|| pCore->Revision_Id==0x01	/* A1 */
+				|| pCore->Revision_Id==0x10	/* B0 and C0 */	)
+			{
+				/* Thor A0-C0 */
+				if ( !pDevice->Is_Slave )
+					offset = 0x08;
+				else
+					offset = 0x0c;
+				base = pCore->Base_Address[4];
+				memoryIO = MV_FALSE;
+			}
+			else
+			{
+				/* Thor D0 = Thor New A0 */
+				MV_DASSERT( (pCore->Revision_Id==0xA0) || (pCore->Revision_Id==0xA1) ||
+							(pCore->Revision_Id==0xA2) );
+				if ( !pDevice->Is_Slave )
+					offset = 0xA0;
+				else
+					offset = 0xA4;
+				base = pCore->Base_Address[5];
+				memoryIO = MV_TRUE;
+			}
+		}
+
+		if ( !memoryIO )
+		{
+			temp = MV_IO_READ_DWORD(base, offset);
+			temp &= 0xFFFFFFC7;
+			temp |= ((MV_U32)mode<<3);
+			MV_IO_WRITE_DWORD(base, offset, temp);
+		}
+		else
+		{
+			temp = MV_REG_READ_DWORD(base, offset);
+			temp &= 0xFFFFFFC7;
+			temp |= ((MV_U32)mode<<3);
+			MV_REG_WRITE_DWORD(base, offset, temp);
+		}
+	}
+	MV_DUMPC32(0xCCCCBB73);
+	//MV_HALTKEY;
+
+	/* Prepare set PIO mode task */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_SET_PIO_MODE;
+	pReq->Cdb[3] = mode;
+	pReq->Device_Id = pDevice->Id;
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+
+	/* Send this internal request */
+	Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+	//MV_DUMPC32(0xCCCCBB74);
+}
+
+#ifndef BIOS_NOT_SUPPORT
+static void Device_EnableWriteCache(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq = pDevice->Internal_Req;
+
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	/* Prepare enable write cache command */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_ENABLE_WRITE_CACHE;
+	pReq->Device_Id = pDevice->Id;
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+
+	/* skip if this is ATAPI device */
+	if( pDevice->Device_Type & DEVICE_TYPE_ATAPI )
+	{
+		pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		pReq->Completion(pCore, pReq);
+	}
+	else
+	{
+		/* Send this internal request */
+		Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+	}
+}
+
+static void Device_EnableReadAhead(
+	IN PDomain_Port pPort,
+	IN PDomain_Device pDevice
+	)
+{
+	PCore_Driver_Extension pCore = pPort->Core_Extension;
+	PMV_Request pReq = pDevice->Internal_Req;
+
+/*	MV_ZeroMemory(pReq, MV_REQUEST_SIZE);*/
+	MV_ZeroMvRequest(pReq);
+
+	/* Prepare enable read ahead command */
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_ENABLE_READ_AHEAD;
+	pReq->Device_Id = pDevice->Id;
+	//pReq->Req_Flag;
+	pReq->Cmd_Initiator = pPort->Core_Extension;
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Completion = (void(*)(MV_PVOID,PMV_Request))Core_InternalReqCallback;
+
+	/* skip if this is ATAPI device */
+	if( pDevice->Device_Type & DEVICE_TYPE_ATAPI )
+	{
+		pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+		pReq->Completion(pCore, pReq);
+	}
+	else
+	{
+		/* Send this internal request */
+		Core_ModuleSendRequest(pPort->Core_Extension, pReq);
+	}
+}
+#endif	/* #ifndef BIOS_NOT_SUPPORT */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_init.h linux-2.6.25/drivers/scsi/mv/core/core_init.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_init.h	2008-07-28 18:42:43.329188696 +0200
@@ -0,0 +1,122 @@
+#if !defined(CORE_INIT_H)
+#define CORE_INIT_H
+
+#include "core_inter.h"
+
+typedef enum mvAdapterState
+{
+    ADAPTER_INITIALIZING,
+    ADAPTER_READY,
+    ADAPTER_FATAL_ERROR
+} MV_ADAPTER_STATE;
+
+typedef enum mvChannelState	//TBD
+{
+    CHANNEL_NOT_CONNECTED,
+    CHANNEL_CONNECTED,
+    CHANNEL_IN_SRST,
+    CHANNEL_PM_STAGGERED_SPIN_UP,
+    CHANNEL_PM_SRST_DEVICE,
+    CHANNEL_READY,
+    CHANNEL_PM_HOT_PLUG,
+} MV_CHANNEL_STATE;
+
+MV_BOOLEAN mvAdapterStateMachine(
+	MV_PVOID This
+	);
+
+void SATA_PortReset(
+	PDomain_Port pPort,
+	MV_BOOLEAN hardReset
+	);
+
+void PATA_PortReset(
+	PDomain_Port pPort,
+	MV_BOOLEAN hardReset
+	);
+
+MV_BOOLEAN SATA_DoSoftReset(PDomain_Port pPort, MV_U8 PMPort);
+
+#define SATA_PortDeviceDetected(port)	\
+	 ( MV_REG_READ_DWORD(port->Mmio_Base, PORT_SCR_STAT) & 0x01 )
+
+#define SATA_PortDeviceReady(port)		\
+	(								\
+		( ( (MV_REG_READ_DWORD(port->Mmio_Base, PORT_SCR_STAT) & 0x0F00 ) >> 8) != PORT_SSTATUS_IPM_NO_DEVICE )		\
+	)
+
+#define FIS_REG_H2D_SIZE_IN_DWORD	5
+
+/* PM related - move elsewhere? */
+#define MV_ATA_COMMAND_PM_READ_REG              0xe4
+#define MV_ATA_COMMAND_PM_WRITE_REG             0xe8
+
+#define MV_SATA_GSCR_ID_REG_NUM                 0
+#define MV_SATA_GSCR_REVISION_REG_NUM           1
+#define MV_SATA_GSCR_INFO_REG_NUM               2
+#define MV_SATA_GSCR_ERROR_REG_NUM              32
+#define MV_SATA_GSCR_ERROR_ENABLE_REG_NUM       33
+#define MV_SATA_GSCR_FEATURES_REG_NUM           64
+#define MV_SATA_GSCR_FEATURES_ENABLE_REG_NUM    96
+
+#define MV_SATA_PSCR_SSTATUS_REG_NUM            0
+#define MV_SATA_PSCR_SERROR_REG_NUM             1
+#define MV_SATA_PSCR_SCONTROL_REG_NUM           2
+#define MV_SATA_PSCR_SACTIVE_REG_NUM            3
+
+#define MV_Read_Reg  1
+#define MV_Write_Reg 0
+
+void mvPMDevReWrReg(
+	PDomain_Port pPort, 
+	MV_U8 read, 
+	MV_U8 PMreg, 
+	MV_U32 regVal, 
+	MV_U8 PMport, 
+	MV_BOOLEAN control
+	);
+
+void SATA_InitPM (
+    PDomain_Port pPort
+	);
+
+void SATA_InitPMPort (
+	PDomain_Port pPort,
+	MV_U8 portNum
+	);
+
+MV_BOOLEAN SATA_SoftResetDevice(
+	PDomain_Port pPort, 
+	MV_U8 portNum
+	);
+
+MV_BOOLEAN SATA_PortSoftReset( 
+	PCore_Driver_Extension pCore, 
+	PDomain_Port pPort 
+	);
+
+void SATA_PortReportNoDevice (
+    PCore_Driver_Extension pCore, 
+	PDomain_Port pPort
+	);
+
+PMV_Request GetInternalReqFromPool( 
+	PCore_Driver_Extension pCore
+	);
+
+void ReleaseInternalReqToPool( 
+	PCore_Driver_Extension pCore, 
+	PMV_Request pReq
+	);
+
+#define mvDisableIntr(portMmio, old_stat) do{ \
+		old_stat = MV_REG_READ_DWORD(portMmio, PORT_IRQ_MASK); \
+		MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_MASK, 0);\
+		}while(0)
+
+#define mvEnableIntr(portMmio, old_stat)	MV_REG_WRITE_DWORD(portMmio, PORT_IRQ_MASK, old_stat)
+
+#define CORE_MAX_RESET_COUNT	10
+
+#endif
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_inter.h linux-2.6.25/drivers/scsi/mv/core/core_inter.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_inter.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_inter.h	2008-07-28 18:42:43.329188696 +0200
@@ -0,0 +1,70 @@
+#if !defined(CORE_MAIN_H)
+#define CORE_MAIN_H
+
+#include "core_thor.h"
+
+#ifdef SUPPORT_CONSOLIDATE
+#include "consolid.h"
+#endif
+
+#ifdef _OS_WINDOWS
+#define CPU_TO_LE_16(x) x
+#define CPU_TO_LE_32(x) x
+#endif /* _OS_WINDOWS  */
+
+struct _Domain_Port;
+typedef struct _Domain_Port Domain_Port, *PDomain_Port;
+
+struct _Domain_Device;
+typedef struct _Domain_Device Domain_Device, *PDomain_Device;
+
+#define CORE_STATE_IDLE			0
+#define CORE_STATE_STARTED		1
+
+/* Flag definition for Fast Boot Skip */
+#define FLAG_SKIP_PATA_PORT		MV_BIT(0)
+#define FLAG_SKIP_PATA_DEVICE	MV_BIT(1)
+#define FLAG_SKIP_PM			MV_BIT(2)
+
+typedef struct _Core_Driver_Extension
+{
+	MV_LPVOID	Mmio_Base;						/* Memory IO base address */
+	MV_U16		Vendor_Id;
+	MV_U16		Device_Id;
+	MV_U8		State;
+	MV_U8		Revision_Id;
+	MV_U8		VS_Reg_Saved;
+	MV_U8		Flag_Fastboot_Skip;
+
+	MV_U32		Capacity;						
+	MV_U32		Port_Map;
+	MV_U8		Port_Num;						/* How much ports we have? */
+	MV_U8		SATA_Port_Num;
+	MV_U8		PATA_Port_Num;
+	MV_U8		Adapter_State;					/* Adatper state */
+	MV_U8		Is_Dump;						/* Is during dump */
+	MV_U8		Need_Reset;						/* Need_Reset == 1 means controller need reset. Lily 3/7/2006*/
+	MV_U8		Resetting;
+#ifdef _OS_BIOS
+	MV_U8		host_reseting;
+#else
+	MV_U8		Reserved1;
+#endif
+
+	MV_U8		Total_Device_Count;
+	MV_U8		Reserved2[3];
+
+	MV_LPVOID	Base_Address[MAX_BASE_ADDRESS];	/* Base Address */
+	Domain_Port Ports[MAX_PORT_NUMBER];			/* Domain Ports */
+
+	List_Head	Waiting_List; 					/* Waiting Request Queue */
+	List_Head	Internal_Req_List;				/* Internal Request Queue */
+
+#ifdef SUPPORT_CONSOLIDATE
+	PConsolidate_Extension	pConsolid_Extent;	
+	PConsolidate_Device		pConsolid_Device;
+#endif
+}Core_Driver_Extension, *PCore_Driver_Extension;
+
+#endif /* CORE_MAIN_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_sata.h linux-2.6.25/drivers/scsi/mv/core/core_sata.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_sata.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_sata.h	2008-07-28 18:42:43.329188696 +0200
@@ -0,0 +1,51 @@
+#if !defined(FIS_H)
+#define FIS_H
+
+/* SATA FIS: Register-Host to Device*/
+typedef struct _SATA_FIS_REG_H2D
+{
+	MV_U8	FIS_Type;
+#ifdef __MV_BIG_ENDIAN_BITFIELD__
+	MV_U8	C : 1;
+	MV_U8	Reserved0 : 3;
+	MV_U8	PM_Port : 4;
+#else
+	MV_U8	PM_Port : 4;
+	MV_U8	Reserved0 : 3;
+	MV_U8	C : 1;
+#endif /* __MV_BIG_ENDIAN_BITFIELD__ */
+	MV_U8	Command;
+	MV_U8	Features;
+
+	MV_U8	LBA_Low;
+	MV_U8	LBA_Mid;
+	MV_U8	LBA_High;
+	MV_U8	Device;
+
+	MV_U8	LBA_Low_Exp;
+	MV_U8	LBA_Mid_Exp;
+	MV_U8	LBA_High_Exp;
+	MV_U8	Features_Exp;
+
+	MV_U8	Sector_Count;
+	MV_U8	Sector_Count_Exp;
+	MV_U8	Reserved1;
+	MV_U8	Control;
+
+	MV_U8	Reserved2[4];
+} SATA_FIS_REG_H2D, *PSATA_FIS_REG_H2D;
+
+/* FIS type definition */
+#define SATA_FIS_TYPE_REG_H2D			0x27	/* Register FIS - Host to Device */
+#define SATA_FIS_TYPE_REG_D2H			0x34	/* Register FIS - Device to Host */
+
+#define SATA_FIS_TYPE_DMA_ACTIVATE		0x39	/* DMA Activate FIS - Device to Host */
+#define SATA_FIS_TYPE_DMA_SETUP			0x41	/* DMA Setup FIS - Bi-directional */
+
+#define SATA_FIS_TYPE_DATA				0x46	/* Data FIS - Bi-directional */
+#define SATA_FIS_TYPE_BIST_ACTIVATE		0x58	/* BIST Activate FIS - Bi-directional */
+#define SATA_FIS_TYPE_PIO_SETUP			0x5F	/* PIO Setup FIS - Device to Host */
+#define SATA_FIS_TYPE_SET_DEVICE_BITS	0xA1	/* Set Device Bits FIS - Device to Host */
+
+#endif /* FIS_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_thor.h linux-2.6.25/drivers/scsi/mv/core/core_thor.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_thor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_thor.h	2008-07-28 18:42:43.330188775 +0200
@@ -0,0 +1,480 @@
+#if !defined(CORE_SATA_H)
+#define CORE_SATA_H
+
+#include "core_ata.h"
+#include "com_tag.h"
+
+#define MAX_SATA_PORT_NUMBER   4
+#if /*(VER_OEM==VER_OEM_ASUS) || */(VER_OEM == VER_OEM_INTEL)
+#define MAX_PATA_PORT_NUMBER   0
+#else
+#define MAX_PATA_PORT_NUMBER   1
+#endif
+#define MAX_PORT_NUMBER        (MAX_SATA_PORT_NUMBER + MAX_PATA_PORT_NUMBER)
+
+#ifdef SUPPORT_PM
+#define MAX_DEVICE_PER_PORT    5
+#else
+#ifdef _OS_BIOS
+#define MAX_DEVICE_PER_PORT    5 /* following WinDriver, but bios not suppport PM currently*/
+#else
+#define MAX_DEVICE_PER_PORT    2
+#endif
+#endif
+
+#define MAX_DEVICE_NUMBER			(MAX_PORT_NUMBER*MAX_DEVICE_PER_PORT)
+
+#ifdef _OS_BIOS
+#define MAX_SLOT_NUMBER				1
+#else
+#define MAX_SLOT_NUMBER				32
+#endif
+
+#define INTERNAL_REQ_COUNT			MAX_DEVICE_SUPPORTED
+
+//TBD: Checking the following naming
+#define SATA_CMD_LIST_SIZE			(32 * MAX_SLOT_NUMBER)
+#define SATA_RX_FIS_SIZE			256
+
+#define SATA_CMD_TABLE_HEADER_SIZE	0x80
+#define SATA_CMD_TABLE_SG_SIZE		(MAX_SG_ENTRY * 16)
+#define SATA_CMD_TABLE_SIZE			(SATA_CMD_TABLE_HEADER_SIZE + SATA_CMD_TABLE_SG_SIZE)
+
+#define SATA_SCRATCH_BUFFER_SIZE	sizeof(ATA_Identify_Data)
+
+/*
+*struct _MV_Command_Header;
+*typedef struct _MV_Command_Header MV_Command_Header, *PMV_Command_Header;
+*/
+
+/* various functions for master/slave support */
+#define PATA_MapDeviceId(ID)		( ID % MAX_DEVICE_PER_PORT )
+#define PATA_MapPortId(ID)			( ID / MAX_DEVICE_PER_PORT )	//TBD: Change the name
+
+#define DEVICE_TYPE_ATAPI						MV_BIT(0)
+
+/* Device initialization state */
+#define DEVICE_STATE_IDLE						0x0
+#define DEVICE_STATE_RESET_DONE					0x1
+#define DEVICE_STATE_IDENTIFY_DONE				0x2
+#define DEVICE_STATE_SET_UDMA_DONE				0x3
+#define DEVICE_STATE_SET_PIO_DONE				0x4
+#define DEVICE_STATE_ENABLE_WRITE_CACHE_DONE	0x5
+#define DEVICE_STATE_ENABLE_READ_AHEAD_DONE		0x6
+#define DEVICE_STATE_INIT_DONE					0xFF
+
+/* Device status */
+#define DEVICE_STATUS_NO_DEVICE					MV_BIT(0)
+#define DEVICE_STATUS_EXISTING					MV_BIT(1)
+#define DEVICE_STATUS_FUNCTIONAL				MV_BIT(2)
+
+/* 3G and TCQ */
+#define DEVICE_CAPACITY_48BIT_SUPPORTED			MV_BIT(0)
+#define	DEVICE_CAPACITY_SMART_SUPPORTED			MV_BIT(1)
+#define	DEVICE_CAPACITY_WRITECACHE_SUPPORTED	MV_BIT(2)
+#define DEVICE_CAPACITY_NCQ_SUPPORTED			MV_BIT(3)
+#define DEVICE_CAPACITY_RATE_1_5G			MV_BIT(4)
+#define DEVICE_CAPACITY_RATE_3G				MV_BIT(5)	
+#define DEVICE_CAPACITY_READLOGEXT_SUPPORTED		MV_BIT(6)
+
+
+#define DEVICE_SETTING_SMART_ENABLED			MV_BIT(0)
+#define DEVICE_SETTING_WRITECACHE_ENABLED		MV_BIT(1)
+
+struct _Domain_Device {
+	MV_U16 Id;
+	MV_U8 Device_Type;				/* ATA or ATAPI */
+	MV_U8 State;					/* DEVICE_STATE_XXX */
+	
+	MV_U8 Status;					/* DEVICE_STATUS_XXX */
+	MV_BOOLEAN Is_Slave;
+#ifndef BIOS_NOT_SUPPORT
+	MV_BOOLEAN Need_Notify;			/* added for PM hot plug */
+	MV_U8 Reserved0;
+#endif
+
+	struct _Domain_Port * PPort;	/* Shortcut to the port. */
+
+	/* 
+	 * Different device should have a different struct here. 
+	 * Now it's SATA device only.
+	 */
+	MV_U16 Capacity;				/* Be able to support NCQ, 48 bit LBA. */
+	MV_U16 Setting;					/* The supported features are enabled or not. */
+
+	MV_U8 PM_Number;
+	MV_U8 PIO_Mode;
+	MV_U8 MDMA_Mode;
+	MV_U8 UDMA_Mode;
+
+	MV_U64 Max_LBA;
+#ifndef BIOS_NOT_SUPPORT
+	MV_U8 Queue_Depth;
+#endif
+	MV_U8 Timer_ID;					/* for error handling */
+	MV_U8 Outstanding_Req;			/* for error handling */
+	MV_U8 Reset_Count;
+	MV_U8 Reserved1[4];
+
+
+#ifndef BIOS_NOT_SUPPORT
+	MV_U8 Serial_Number[20];
+#endif
+	MV_U8 Model_Number[40];
+#ifndef BIOS_NOT_SUPPORT
+	MV_U8 Firmware_Revision[8];
+#endif
+	MV_U32 WWN;
+
+	/* The scratch buffer used for initialization like identify */
+	MV_PVOID Scratch_Buffer;
+	MV_PHYSICAL_ADDR Scratch_Buffer_DMA;
+
+	/* Internal request used in device initialization */
+	PMV_Request Internal_Req;
+
+};
+
+/* Port initialization state */
+
+#define PORT_STATE_IDLE					0x00
+#define PORT_STATE_INIT_DONE			0xFF
+
+#define PORT_TYPE_SATA					0
+#define PORT_TYPE_PATA					1
+#define PORT_TYPE_PM					4 /*PM Support, lily tested*/
+
+#define PORT_CAPACITY_NCQ_SUPPORTED		MV_BIT(0)
+
+#define PORT_SETTING_NCQ_RUNNING		MV_BIT(0)
+#define PORT_SETTING_PM_EXISTING		MV_BIT(1)
+#define PORT_SETTING_PM_FUNCTIONAL		MV_BIT(2)	// added by Harriet for PM hot plug 
+#define PORT_SETTING_DURING_RETRY		MV_BIT(3)
+
+struct _Domain_Port {
+	MV_PVOID Core_Extension;
+
+	MV_U8 Id;
+	MV_U8 Port_State;
+	MV_U8 Type;					/* PORT_TYPE_XXX */
+	MV_U8 Capacity;				/* PORT_CAPACITY_XXX */
+	MV_U8 Setting;				/* PORT_SETTING_XXX */
+	MV_U8 Device_Number;		/* How many devices this port has now? */	
+	MV_U16 PM_Vendor_Id;
+	MV_U16 PM_Device_Id;
+	MV_U8 PM_Product_Revision;
+	MV_U8 PM_Spec_Revision;
+	MV_U8 PM_Num_Ports;
+#ifndef BIOS_NOT_SUPPORT
+	MV_U8 Reserved0[3];
+#endif
+	MV_LPVOID Mmio_Base;		/* Base address for SATA Port Registers */
+#ifndef _OS_BIOS
+	MV_LPVOID Mmio_SCR;			/* Base address for sata register(SCR) */
+#endif
+	MV_PVOID Cmd_List;			/* Can be PMV_PATA_Command_Header or PMV_Command_Header */
+	MV_PHYSICAL_ADDR Cmd_List_DMA;
+
+	/* Received FIS */
+	MV_PVOID RX_FIS;
+	MV_PHYSICAL_ADDR RX_FIS_DMA;
+
+	/* The 32 command tables. */
+	MV_PVOID Cmd_Table;
+	MV_PHYSICAL_ADDR Cmd_Table_DMA;
+
+	/* Running MV_Requests are linked together. */	//TBD: Too much memory?
+	PMV_Request Running_Req[MAX_SLOT_NUMBER];	
+
+	/* Which slot has requests running. */
+	MV_U32	Running_Slot;
+#ifndef BIOS_NOT_SUPPORT
+//	MV_U32	Reserved1;
+	MV_U32	VS_RegC;
+#endif
+
+	struct _Domain_Device Device[MAX_DEVICE_PER_PORT];	
+
+	//Timer: for time out checking.
+
+	Tag_Stack Tag_Pool;
+};
+
+/*
+ * Hardware related format. Never change their size. Must follow hardware specification.
+ */
+/* AHCI a little difference */
+typedef struct _MV_Command_Header
+{
+#ifdef __MV_BIG_ENDIAN_BITFIELD__
+	MV_U8	Reserved0 : 2;
+	MV_U8	Packet_Command : 1;
+	MV_U8	FIS_Length : 5;
+			
+	MV_U8	PM_Port : 4;
+	MV_U8	NCQ : 1;
+	MV_U8	Reserved1: 2;
+	MV_U8	Reset : 1;
+#else /* default to __MV_LITTLE_ENDIAN_BITFIELD__ */
+	MV_U8	FIS_Length : 5;		/* Command FIS Length in DWORD */
+	MV_U8	Packet_Command : 1;	/* ATAPI packet command */
+	MV_U8	Reserved0 : 2;
+
+	MV_U8	Reset : 1;
+	MV_U8	Reserved1: 2;
+	MV_U8	NCQ : 1;
+	MV_U8	PM_Port : 4;
+#endif /* __MV_BIG_ENDIAN_BITFIELD__ */
+	MV_U16	PRD_Entry_Count : 16;
+
+	MV_U32	Reserved2;
+	MV_U32	Table_Address;
+	MV_U32	Table_Address_High;
+
+	MV_U32	Reserved3[4];
+} MV_Command_Header, *PMV_Command_Header;
+
+typedef struct _MV_PATA_Command_Header
+{
+#ifdef __MV_BIG_ENDIAN_BITFIELD__
+	MV_U8	Packet_Command : 1;	
+	MV_U8	TCQ : 1;		
+	MV_U8	Controller_Command : 1;	
+	MV_U8	PIO_Sector_Count : 5;   
+
+	MV_U8	Is_Slave : 1;		
+	MV_U8	Reset : 1;		
+	MV_U8	Diagnostic_Command : 1;	
+	MV_U8	Is_48Bit : 1;		
+	MV_U8	PIO_Sector_Command : 1;	
+	MV_U8	Non_Data : 1;		
+	MV_U8	Data_In : 1;		
+	MV_U8	DMA : 1;		
+#else /* __MV_BIG_ENDIAN_BITFIELD__ */
+	MV_U8	PIO_Sector_Count : 5;   /* PIO command data block size in sector */
+	MV_U8	Controller_Command : 1;	/* If 1, command is for the controller instead of the device */
+	MV_U8	TCQ : 1;		/* TCQ command */
+	MV_U8	Packet_Command : 1;	/* ATAPI packet command */
+
+	MV_U8	DMA : 1;		/* DMA command */
+	MV_U8	Data_In : 1;		/* Data is from device to host. */
+	MV_U8	Non_Data : 1;		/* Non data command */
+	MV_U8	PIO_Sector_Command : 1;	/* PIO multiple sectors commands including read/write sector, read/write multiple. */
+	MV_U8	Is_48Bit : 1;		/* 48 bit command */
+	MV_U8	Diagnostic_Command : 1;	/* Execute device diagnostic command */
+	MV_U8	Reset : 1;		/* Device reset command */
+	MV_U8	Is_Slave : 1;		/* 0 for master and 1 for slave */
+#endif /* __MV_BIG_ENDIAN_BITFIELD__ */
+	MV_U16	PRD_Entry_Count;
+
+
+	MV_U32	Reserved0;
+	MV_U32	Table_Address;
+	MV_U32	Table_Address_High;
+
+	MV_U32	Reserved3[4];
+} MV_PATA_Command_Header, *PMV_PATA_Command_Header;
+
+/* SATA Command Table: same with AHCI */
+typedef struct _MV_Command_Table
+{
+	MV_U8	FIS[64];								/* Command FIS */
+	MV_U8	ATAPI_CDB[32];							/* ATAPI CDB */
+	MV_U8	Reserve0[32];
+	MV_SG_Entry PRD_Entry[MAX_SG_ENTRY];		/* 32 */
+} MV_Command_Table, *PMV_Command_Table;
+
+#define DIMMSGTABLE_SIZE sizeof(AHCI_DIMM_SG_TABLE)
+
+#define	MV_PCI_BAR			 5
+#define	MV_CMD_ATAPI		 (1L << 5)
+#define	MV_CMD_WRITE		 (1L << 6)
+
+#define	RX_FIS_D2H_REG		 0x40	/* offset of D2H Register FIS data */
+
+	/* global controller registers */
+#define	HOST_CAP			 0x00 	/* host capabilities */
+#define	HOST_CTL			 0x04	/* global host control */
+#define	HOST_IRQ_STAT		 0x08 	/* interrupt status */
+#define	HOST_PORTS_IMPL		 0x0c 	/* bitmap of implemented ports */
+#define	HOST_VERSION		 0x10 	/* AHCI spec. version compliancy */
+
+	/* HOST_CTL bits */
+#define	HOST_RESET			(1L << 0)  /* reset controller; self-clear */
+#define	HOST_IRQ_EN		 	(1L << 1)  /* global IRQ enable */
+#define	HOST_MVL_EN		 	(1L << 31) /* AHCI enabled */
+
+	/* HOST_CAP bits */
+#define	HOST_CAP_64			(1L << 31) /* PCI DAC (64-bit DMA) support */
+
+	/* Vendor specific register */
+#define VENDOR_DETECT		0xA4	/* PATA device/PM detection */
+	/* VENDOR_DETECT bits */
+#define VENDOR_DETECT_PATA	(1L << 10)	/* PATA device detection (bit10) (0 - default) */
+#define VENDOR_DETECT_PM	(1L << 11)	/* PM device detection (bit11) (0 - default) */
+
+	/* registers for each SATA port */
+#define	PORT_LST_ADDR		0x00 /* command list DMA addr */
+#define	PORT_LST_ADDR_HI	0x04 /* command list DMA addr hi */
+#define	PORT_FIS_ADDR		0x08 /* FIS rx buf addr */
+#define	PORT_FIS_ADDR_HI	0x0c /* FIS rx buf addr hi */
+#define	PORT_IRQ_STAT		0x10 /* interrupt status */
+#define	PORT_IRQ_MASK		0x14 /* interrupt enable/disable mask */
+#define	PORT_CMD			0x18 /* port command */
+
+	/* For SATA port */
+#define	PORT_TFDATA			0x20	/* taskfile data */
+#define	PORT_SIG			0x24	/* device TF signature */
+#define	PORT_CMD_ISSUE		0x38 	/* command issue */
+#define	PORT_FIFO_CTL		0x44	/* vendor unique FIFO control */
+#define	PORT_SCR			0x28 	/* SATA phy register block */
+#define	PORT_SCR_STAT		0x28 	/* SATA phy register: SStatus */
+#define	PORT_SCR_CTL		0x2c 	/* SATA phy register: SControl */
+#define	PORT_SCR_ERR		0x30 	/* SATA phy register: SError */
+#define	PORT_SCR_ACT		0x34 	/* SATA phy register: SActive */
+#define	PORT_PM_FIS_0		0x3c	/* port multiplier FIS content 0 */
+#define	PORT_PM_FIS_1		0x40	/* port multiplier FIS content 1 */
+
+/* #if (VER_OEM==VER_OEM_ASUS) */
+#define	PORT_VSR_ADDR		0x78	/* port Vendor Specific Register Address */
+#define	PORT_VSR_DATA		0x7c	/* port Vendor Specific Register Data */
+/* #endif */
+#define VS_REG_SIG			0xab
+
+	/* For PATA port */
+#define	PORT_MASTER_TF0		0x20
+#define	PORT_MASTER_TF1		0x24
+#define	PORT_MASTER_TF2		0x28
+#define	PORT_SLAVE_TF0		0x30
+#define	PORT_SLAVE_TF1		0x3c
+#define	PORT_SLAVE_TF2		0x40
+#define	PORT_INTERNAL_STATE_MACHINE	0x48
+
+
+#ifdef AHCI
+	/* PORT_IRQ_{STAT,MASK} bits */
+#define	PORT_IRQ_COLD_PRES		(1L << 31)	/* cold presence detect */
+#define	PORT_IRQ_TF_ERR			(1L << 30)	/* task file error */
+#define	PORT_IRQ_HBUS_ERR		(1L << 29)	/* host bus fatal error */
+#define	PORT_IRQ_HBUS_DATA_ERR	(1L << 28)	/* host bus data error */
+#define	PORT_IRQ_IF_ERR			(1L << 27)	/* interface fatal error */
+#define	PORT_IRQ_IF_NONFATAL	(1L << 26)	/* interface non-fatal error */
+#define	PORT_IRQ_OVERFLOW		(1L << 24)	/* xfer exhausted available S/G */
+#define	PORT_IRQ_BAD_PMP		(1L << 23)	/* incorrect port multiplier */
+
+#define	PORT_IRQ_PHYRDY			(1L << 22)	 /* PhyRdy changed */
+#define PORT_IRQ_ASYNC_NOTIF	(1L << 20)	 /* Asynchronous Notification, SDB FIS */
+#define	PORT_IRQ_DEV_ILCK		(1L << 7)		/* device interlock */
+#define	PORT_IRQ_CONNECT		(1L << 6)		/* port connect change status */
+#define	PORT_IRQ_SG_DONE		(1L << 5)		/* descriptor processed */
+#define	PORT_IRQ_UNK_FIS		(1L << 4)		/* unknown FIS rx'd */
+#define	PORT_IRQ_SDB_FIS		(1L << 3)		/* Set Device Bits FIS rx'd */
+#define	PORT_IRQ_DMAS_FIS		(1L << 2)		/* DMA Setup FIS rx'd */
+#define	PORT_IRQ_PIOS_FIS		(1L << 1)		/* PIO Setup FIS rx'd */
+#define	PORT_IRQ_D2H_REG_FIS	(1L << 0)		/* D2H Register FIS rx'd */
+
+#define	PORT_IRQ_FATAL	(PORT_IRQ_TF_ERR |\
+				  		PORT_IRQ_HBUS_ERR |\
+				  		PORT_IRQ_HBUS_DATA_ERR |\
+				  		PORT_IRQ_IF_ERR)
+#define	DEF_PORT_IRQ	(PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |\
+				  		PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |\
+				  		PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |\
+				  		PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |\
+				  		PORT_IRQ_D2H_REG_FIS)
+#else
+	/* PORT_IRQ_{STAT,MASK} bits for SATA port */
+#define	PORT_IRQ_SIGNATURE_FIS	(1L << 31)	/* Signature FIS received */
+#define	PORT_IRQ_TF_ERR			(1L << 30)	/* task file error */
+#define	PORT_IRQ_PHYRDY			(1L << 22)	/* PhyRdy changed */
+
+#define	PORT_IRQ_BIST			(1L << 21)	/* BIST activate FIS received */
+#define	PORT_IRQ_ASYNC_NOTIF	(1L << 20)	/* Asynchronous notification received */
+#define	PORT_IRQ_LINK_RECEIVE_ERROR	(1L << 7)
+#define	PORT_IRQ_LINK_TRANSMIT_ERROR (1L << 6)
+#define	PORT_IRQ_PIO_DONE		(1L << 5)		/* PIO Data-in Done */
+#define	PORT_IRQ_UNK_FIS		(1L << 4)		/* unknown FIS rx'd */
+#define	PORT_IRQ_SDB_FIS		(1L << 3)		/* Set Device Bits FIS rx'd */
+#define	PORT_IRQ_DMAS_FIS		(1L << 2)		/* DMA Setup FIS rx'd */
+#define	PORT_IRQ_PIOS_FIS		(1L << 1)		/* PIO Setup FIS rx'd */
+#define	PORT_IRQ_D2H_REG_FIS	(1L << 0)		/* D2H Register FIS rx'd */
+
+#if 0
+#define	DEF_PORT_IRQ		 (MV_U32)(\
+				PORT_IRQ_SIGNATURE_FIS | PORT_IRQ_TF_ERR |\
+				PORT_IRQ_PHYRDY | \
+				PORT_IRQ_BIST |	PORT_IRQ_ASYNC_NOTIF | \
+				PORT_IRQ_LINK_RECEIVE_ERROR | PORT_IRQ_LINK_TRANSMIT_ERROR |\
+				PORT_IRQ_PIO_DONE | PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | PORT_IRQ_D2H_REG_FIS)
+#else
+#define	DEF_PORT_IRQ		 (MV_U32)(\
+				PORT_IRQ_SIGNATURE_FIS | PORT_IRQ_TF_ERR |\
+				PORT_IRQ_PHYRDY | \
+				PORT_IRQ_BIST |	PORT_IRQ_ASYNC_NOTIF | \
+				PORT_IRQ_PIO_DONE | PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | PORT_IRQ_D2H_REG_FIS)
+#endif
+
+	/* PORT_IRQ_{STAT,MASK} bits for PATA port */
+#define	PORT_IRQ_PATA_DEVICE0_DONE	MV_BIT(0)
+#define	PORT_IRQ_PATA_DEVICE0_ERROR	MV_BIT(1)
+#define	PORT_IRQ_PATA_DEVICE1_DONE	MV_BIT(2)
+#define	PORT_IRQ_PATA_DEVICE1_ERROR	MV_BIT(3)
+#define	DEF_PORT_PATA_IRQ	(PORT_IRQ_PATA_DEVICE0_DONE | PORT_IRQ_PATA_DEVICE0_ERROR\
+								| PORT_IRQ_PATA_DEVICE1_DONE | PORT_IRQ_PATA_DEVICE1_ERROR)
+#endif
+
+#ifdef AHCI
+	/* PORT_CMD bits */
+#define	PORT_CMD_LIST_ON		(1L << 15)	/* cmd list DMA engine running */
+#define	PORT_CMD_FIS_ON			(1L << 14)	/* FIS DMA engine running */
+#define	PORT_CMD_FIS_RX			(1L << 4)		/* Enable FIS receive DMA engine */
+#define	PORT_CMD_POWER_ON		(1L << 2)		/* Power up device */
+#define	PORT_CMD_SPIN_UP		(1L << 1)		/* Spin up device */
+#define	PORT_CMD_START			(1L << 0)		/* Enable port DMA engine */
+
+#define	PORT_CMD_ICC_ACTIVE		(0x1L << 28)	/* Put i/f in active state */
+#define	PORT_CMD_ICC_PARTIAL	(0x2L << 28)	/* Put i/f in partial state */
+#define	PORT_CMD_ICC_SLUMBER	(0x6L << 28)	/* Put i/f in slumber state */
+#else
+	/* PORT_CMD bits for SATA port */
+#define	PORT_CMD_LIST_ON		(1L << 15)	/* cmd list DMA engine running */
+#define	PORT_CMD_FIS_ON			(1L << 14)	/* FIS DMA engine running */
+
+#define	PORT_CMD_FIS_RX			(1L << 4)		/* Enable FIS receive DMA engine */
+#define	PORT_CMD_START			(1L << 0)		/* Enable port DMA engine */
+
+	/* PORT_CMD bits for PATA port */
+#define	PORT_CMD_PATA_LIST_ON	MV_BIT(15)
+#define	PORT_CMD_PATA_HARD_RESET	MV_BIT(3)
+#define	PORT_CMD_PATA_INTERRUPT MV_BIT(1)
+#define	PORT_CMD_PATA_START		MV_BIT(0)
+#endif
+
+#define	PORT_SSTATUS_IPM_NO_DEVICE	0x0L	/* IPM: device not present or communication not established */
+#define	PORT_SSTATUS_IPM_ACTIVE		0x1L	/* IPM: Interface in active state */
+#define	PORT_SSTATUS_IPM_PARTIAL	0x2L	/* IPM: Interface in partical power management state */
+#define	PORT_SSTATUS_IPRM_SLUMBER	0x6L	/* IPM: Interface in slumber power management state */
+
+#define	PORT_TF_STATUS_BSY	(1L<<7)	/* Task file status: BSY */
+#define	PORT_TF_STATUS_DRQ 	(1L<<3)	/* Task file status: DRQ */
+#define	PORT_TF_STATUS_ERR 	(1L<<0)	/* Task file status: ERR */
+
+
+typedef enum _MV_QUEUE_COMMAND_RESULT
+{
+    MV_QUEUE_COMMAND_RESULT_FINISHED = 0,
+    MV_QUEUE_COMMAND_RESULT_FULL,
+    MV_QUEUE_COMMAND_RESULT_SENDTED,
+} MV_QUEUE_COMMAND_RESULT;
+
+#define SATA_GetCommandHeader(pPort, slot)	\
+	((PMV_Command_Header)pPort->Cmd_List + slot)
+
+#define PATA_GetCommandHeader(pPort, slot)	\
+	((PMV_PATA_Command_Header)pPort->Cmd_List + slot)
+
+#define Port_GetCommandTable(pPort, slot)	\
+	((PMV_Command_Table)((MV_PU8)pPort->Cmd_Table + slot * SATA_CMD_TABLE_SIZE))
+
+#endif /* CORE_SATA_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_xor.c linux-2.6.25/drivers/scsi/mv/core/core_xor.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_xor.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_xor.c	2008-07-28 18:42:43.330188775 +0200
@@ -0,0 +1,320 @@
+#include "mv_include.h"
+#include "core_inter.h"
+
+#ifdef RAID_DRIVER
+#ifdef SOFTWARE_XOR
+
+/*
+ * Software XOR operations
+ */
+void mvXORWrite (MV_PVOID This, PMV_XOR_Request pXORReq);
+void mvXORCompare (MV_PVOID This, PMV_XOR_Request pXORReq);
+void mvXORDMA (MV_PVOID This, PMV_XOR_Request pXORReq);
+
+void Core_ModuleSendXORRequest(MV_PVOID This, PMV_XOR_Request pXORReq)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+
+	switch (pXORReq->Request_Type) 
+	{
+		case XOR_REQUEST_WRITE:
+			mvXORWrite (pCore, pXORReq);
+			break;
+		case XOR_REQUEST_COMPARE:
+			mvXORCompare (pCore, pXORReq);
+			break;
+		case XOR_REQUEST_DMA:
+			mvXORDMA (pCore, pXORReq);
+			break;
+		default:
+			pXORReq->Request_Status = XOR_STATUS_INVALID_REQUEST;
+			break;
+	}
+	pXORReq->Completion( pXORReq->Cmd_Initiator, pXORReq );
+}
+
+void mvXORInit(
+	PMV_SG_Entry            *pSGPtr,
+	MV_PU32	                SGSizePtr,
+	MV_PVOID                *pVirPtr,
+	PMV_SG_Table            SGListPtr,
+	MV_U8                   tableCount,
+	MV_PU32                 minSizePtr)
+{
+	MV_U8 id;
+	for ( id=0; id<tableCount; id++ ) {
+		pSGPtr[id] = SGListPtr[id].Entry_Ptr;
+		pVirPtr[id] = (MV_PVOID)
+			( (MV_PTR_INTEGER)pSGPtr[id]->Base_Address 
+			| (MV_PTR_INTEGER)pSGPtr[id]->Base_Address_High<<32 );
+		SGSizePtr[id] = pSGPtr[id]->Size;
+		if ( *minSizePtr > SGSizePtr[id] ) *minSizePtr=SGSizePtr[id];
+	}
+}
+
+void mvXORUpdateEntry(
+	PMV_SG_Entry	*pSGPtr,
+	MV_PU32			SGSizePtr,
+	MV_PVOID		*pVirPtr,
+	MV_U32			finishSize,
+	MV_U8			tableCount,
+	MV_PU32			minSizePtr)
+{
+	MV_U8 id;
+	for ( id=0; id<tableCount; id++ ) {
+		if ( SGSizePtr[id] > finishSize )
+			SGSizePtr[id] -= finishSize;
+		else {
+			pSGPtr[id]++;
+			pVirPtr[id] = (MV_PVOID)
+					( (MV_PTR_INTEGER)pSGPtr[id]->Base_Address 
+					| (MV_PTR_INTEGER)pSGPtr[id]->Base_Address_High<<32 );
+			SGSizePtr[id] = pSGPtr[id]->Size;
+		}
+		if ( *minSizePtr > SGSizePtr[id] ) *minSizePtr=SGSizePtr[id];
+	}
+}
+
+MV_U8 mvXORByte(
+	MV_PU8			*pSourceVirPtr,
+	PMV_XOR_Request	pXORReq,
+	MV_U8			tId
+)
+{
+	MV_U8 xorResult, sId;
+
+	xorResult = GF_Multiply(*pSourceVirPtr[0], pXORReq->Coef[tId][0]);
+	for ( sId=1; sId<pXORReq->Source_SG_Table_Count; sId++ ) {
+		xorResult = GF_Add(xorResult,
+						   GF_Multiply(*pSourceVirPtr[sId], pXORReq->Coef[tId][sId]));
+	}
+	return xorResult;
+}
+
+#ifdef SUPPORT_XOR_DWORD
+MV_U32 mvXORDWord(
+	MV_PU32			*pSourceVirPtr,
+	PMV_XOR_Request	pXORReq,
+	MV_U8			tId
+)
+{
+	MV_U8	sId;
+	MV_U32 xorResult;
+
+	xorResult = GF_Multiply(*pSourceVirPtr[0], pXORReq->Coef[tId][0]);
+	for ( sId=1; sId<pXORReq->Source_SG_Table_Count; sId++ ) {
+		xorResult = GF_Add(xorResult,
+						   GF_Multiply(*pSourceVirPtr[sId], pXORReq->Coef[tId][sId]));
+	}
+	return xorResult;
+}
+#endif
+
+/* The SG Table should have the virtual address instead of the physical address. */
+void mvXORWrite(MV_PVOID This, PMV_XOR_Request pXORReq)
+{
+	PMV_SG_Entry	pSourceSG[XOR_SOURCE_SG_COUNT];
+	PMV_SG_Entry	pTargetSG[XOR_TARGET_SG_COUNT];
+	MV_U32			sourceSize[XOR_SOURCE_SG_COUNT];
+	MV_U32			targetSize[XOR_TARGET_SG_COUNT];
+	MV_U32 i;
+	MV_U8 sId,tId;									/* source index and target index. */
+	MV_U32 size, remainSize, minSize;
+#ifdef SUPPORT_XOR_DWORD
+	MV_PU32			pSourceVir[XOR_SOURCE_SG_COUNT];
+	MV_PU32			pTargetVir[XOR_TARGET_SG_COUNT];
+	MV_U32			xorResult, Dword_size;
+#else
+	MV_PU8			pSourceVir[XOR_SOURCE_SG_COUNT];
+	MV_PU8			pTargetVir[XOR_TARGET_SG_COUNT];
+	MV_U8			xorResult;
+#endif
+
+	/* Initialize these two variables. */
+	remainSize = pXORReq->Source_SG_Table_List[0].Byte_Count;	/* All the SG table should have same Byte_Count */
+	minSize = remainSize;
+	/* Initialize XOR source */
+	mvXORInit(pSourceSG, sourceSize, (MV_PVOID)pSourceVir,
+			  pXORReq->Source_SG_Table_List,
+			  pXORReq->Source_SG_Table_Count,
+			  &minSize);
+
+	/* Initialize XOR target */
+	mvXORInit(pTargetSG, targetSize, (MV_PVOID)pTargetVir,
+			  pXORReq->Target_SG_Table_List,
+			  pXORReq->Target_SG_Table_Count,
+			  &minSize);
+
+/*
+	for ( sId=0; sId<pXORReq->Source_SG_Table_Count; sId++ ) 
+	{
+		pSourceSG[sId] = pXORReq->Source_SG_Table_List[sId].Entry;
+		sourceSize[sId] = pSourceSG[sId]->Size;
+		pSourceVir[sId] = (MV_PVOID)
+			( (MV_PTR_INTEGER)pSourceSG[sId]->Base_Address 
+			| (MV_PTR_INTEGER)pSourceSG[sId]->Base_Address_High<<32 );
+		MV_DASSERT( remainSize==pXORReq->Source_SG_Table_List[sId].Byte_Count );
+		if ( minSize>sourceSize[sId] ) minSize=sourceSize[sId];
+	}
+
+	for ( tId=0; tId<pXORReq->Target_SG_Table_Count; tId++ ) 
+	{
+		pTargetSG[tId] = pXORReq->Target_SG_Table_List[tId].Entry;
+		targetSize[tId] = pTargetSG[tId]->Size;
+		pTargetVir[tId] = (MV_PVOID)
+			( (MV_PTR_INTEGER)pTargetSG[tId]->Base_Address 
+			| (MV_PTR_INTEGER)pTargetSG[tId]->Base_Address_High<<32 );
+		MV_DASSERT( remainSize==pXORReq->Target_SG_Table_List[tId].Byte_Count );
+		if ( minSize>targetSize[tId] ) minSize=targetSize[tId];
+	}
+*/
+
+	/* Navigate all the SG table, calculate the target xor value. */
+	while ( remainSize>0 ) 
+	{
+		size = minSize;
+#ifdef SUPPORT_XOR_DWORD
+		MV_DASSERT( !(size%4) );
+		Dword_size = size/4;
+		for ( i=0; i<Dword_size; i++ ) 
+#else
+		for ( i=0; i<size; i++ ) 
+#endif
+		{
+			for ( tId=0; tId<pXORReq->Target_SG_Table_Count; tId++ )
+			{
+#ifdef SUPPORT_XOR_DWORD
+				xorResult = mvXORDWord(pSourceVir, pXORReq, tId);
+#else
+				xorResult = mvXORByte(pSourceVir, pXORReq, tId);
+#endif
+
+/*
+				tmp = GF_Multiply(*pSourceVir[0], pXORReq->Coef[tId][0]);
+
+				for ( sId=1; sId<pXORReq->Source_SG_Table_Count; sId++ )
+				{
+					tmp = GF_Add(tmp,
+								GF_Multiply(*pSourceVir[sId], pXORReq->Coef[tId][sId]));
+				}
+				*pTargetVir[tId] = tmp;
+*/
+				*pTargetVir[tId] = xorResult;
+				pTargetVir[tId]++;
+			}
+
+			for ( sId=0; sId<pXORReq->Source_SG_Table_Count; sId++ )
+				pSourceVir[sId]++;
+		}
+
+		/* Update entry pointer, size */
+		MV_DASSERT( remainSize>=size );
+		remainSize -= size;
+		minSize = remainSize;
+		/* Update XOR source */
+		mvXORUpdateEntry(pSourceSG, sourceSize, (MV_PVOID)pSourceVir,
+						 size, pXORReq->Source_SG_Table_Count, &minSize);
+		/* Update XOR target */
+		mvXORUpdateEntry(pTargetSG, targetSize, (MV_PVOID)pTargetVir,
+						 size, pXORReq->Target_SG_Table_Count, &minSize);
+/*
+
+		for ( sId=0; sId<pXORReq->Source_SG_Table_Count; sId++ )
+		{
+			if ( sourceSize[sId]>size )
+			{
+				sourceSize[sId]-=size;
+			}
+			else
+			{
+				pSourceSG[sId]++;
+				pSourceVir[sId] = (MV_PVOID)
+					( (MV_PTR_INTEGER)pSourceSG[sId]->Base_Address | (MV_PTR_INTEGER)pSourceSG[sId]->Base_Address_High<<32 );
+				sourceSize[sId] = pSourceSG[sId]->Size;
+			}
+			if ( minSize>sourceSize[sId] ) minSize=sourceSize[sId];
+		}
+
+		for ( tId=0; tId<pXORReq->Target_SG_Table_Count; tId++ )
+		{
+			if ( targetSize[tId]>size )
+			{
+				targetSize[tId]-=size;
+			}
+			else
+			{
+				pTargetSG[tId]++;
+				pTargetVir[tId] = (MV_PVOID)
+					( (MV_PTR_INTEGER)pTargetSG[tId]->Base_Address | (MV_PTR_INTEGER)pTargetSG[tId]->Base_Address_High<<32 );
+				targetSize[tId] = pTargetSG[tId]->Size;
+			}
+			if ( minSize>targetSize[tId] ) minSize=targetSize[tId];
+		}
+*/
+	}
+
+	pXORReq->Request_Status = XOR_STATUS_SUCCESS;
+}
+
+//TBD: consolidate compare and write
+void mvXORCompare (MV_PVOID This, PMV_XOR_Request pXORReq)
+{
+	PMV_SG_Entry	pSourceSG[XOR_SOURCE_SG_COUNT];
+	MV_U32			sourceSize[XOR_SOURCE_SG_COUNT];
+	MV_U32			totalSize, remainSize, minSize, size, i;
+	MV_U8			sId;
+#ifdef SUPPORT_XOR_DWORD
+	MV_PU32			pSourceVir[XOR_SOURCE_SG_COUNT];
+	MV_U32			xorResult, Dword_size;
+#else
+	MV_PU8			pSourceVir[XOR_SOURCE_SG_COUNT];
+	MV_U8			xorResult;
+#endif
+
+	/* All the SG table should have same Byte_Count */
+	totalSize = remainSize = minSize = pXORReq->Source_SG_Table_List[0].Byte_Count;
+	mvXORInit(pSourceSG, sourceSize, (MV_PVOID)pSourceVir,
+			  pXORReq->Source_SG_Table_List,
+			  pXORReq->Source_SG_Table_Count,
+			  &minSize);
+	while ( remainSize>0 ) {
+		size = minSize;
+#ifdef SUPPORT_XOR_DWORD
+		MV_DASSERT( !(size%4) );
+		Dword_size = size/4;
+		for ( i=0; i<Dword_size; i++ ) {
+			xorResult = mvXORDWord(pSourceVir, pXORReq, 0);
+#else
+		for ( i=0; i<size; i++ ) {
+			xorResult = mvXORByte(pSourceVir, pXORReq, 0);
+#endif
+			if (xorResult != 0)	{
+				pXORReq->Request_Status = XOR_STATUS_ERROR;
+#ifdef SUPPORT_XOR_DWORD
+				pXORReq->Error_Offset = totalSize - remainSize + i*4;
+#else
+				pXORReq->Error_Offset = totalSize - remainSize + i;
+#endif
+				return;
+			}
+			for ( sId=0; sId<pXORReq->Source_SG_Table_Count; sId++ )
+				pSourceVir[sId]++;
+		}
+
+		/* Update entry pointer, size */
+		MV_DASSERT( remainSize>=size );
+		remainSize -= size;
+		minSize = remainSize;
+		mvXORUpdateEntry(pSourceSG, sourceSize, (MV_PVOID)pSourceVir,
+						 size, pXORReq->Source_SG_Table_Count, &minSize);
+	}
+}
+
+void mvXORDMA (MV_PVOID This, PMV_XOR_Request pXORReq)
+{
+	MV_ASSERT( MV_FALSE );
+}
+
+#endif	/* SOFTWARE_XOR */
+#endif	/* RAID_DRIVER */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/core_xor.h linux-2.6.25/drivers/scsi/mv/core/core_xor.h
--- linux-2.6.25.orig/drivers/scsi/mv/core/core_xor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/core_xor.h	2008-07-28 18:42:43.331188775 +0200
@@ -0,0 +1,8 @@
+#ifndef CORE_XOR_H
+#define CORE_XOR_H
+
+void Core_ModuleSendXORRequest(MV_PVOID This, PMV_XOR_Request pXORReq);
+
+#endif
+
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/core/scsi2sata.c linux-2.6.25/drivers/scsi/mv/core/scsi2sata.c
--- linux-2.6.25.orig/drivers/scsi/mv/core/scsi2sata.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/core/scsi2sata.c	2008-07-28 18:42:43.331188775 +0200
@@ -0,0 +1,591 @@
+#include "mv_include.h"
+
+#include "core_inter.h"
+
+#include "core_sata.h"
+#include "core_ata.h"
+
+/*
+ * Translate SCSI command to SATA FIS
+ * The following SCSI command set is the minimum required.
+ *		Standard Inquiry
+ *		Read Capacity
+ *		Test Unit Ready
+ *		Start/Stop Unit
+ *		Read 10
+ *		Write 10
+ *		Request Sense
+ *		Mode Sense/Select
+ */
+MV_VOID SCSI_To_FIS(MV_PVOID This, PMV_Request pReq, MV_U8 tag, PATA_TaskFile pTaskFile)
+{
+	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;
+	PDomain_Port pPort = &pCore->Ports[PATA_MapPortId(pReq->Device_Id)];
+	PDomain_Device pDevice = &pPort->Device[PATA_MapDeviceId(pReq->Device_Id)];
+	PMV_Command_Table pCmdTable = Port_GetCommandTable(pPort, tag);
+
+	PSATA_FIS_REG_H2D pFIS = (PSATA_FIS_REG_H2D)pCmdTable->FIS;
+
+	/* 
+	 * TBD
+	 * 1. SoftReset is not supported yet.
+	 * 2. PM_Port
+	 */
+	pFIS->FIS_Type = SATA_FIS_TYPE_REG_H2D;
+
+#ifdef SUPPORT_PM
+	pFIS->PM_Port = pDevice->PM_Number;
+#else
+	pFIS->PM_Port = 0;
+#endif
+
+	pFIS->C = 1;	/* Update command register rather than devcie control register */
+	pFIS->Command = pTaskFile->Command;
+	pFIS->Features = pTaskFile->Features;
+	pFIS->Device = pTaskFile->Device;
+	pFIS->Control = pTaskFile->Control;
+
+	pFIS->LBA_Low = pTaskFile->LBA_Low;
+	pFIS->LBA_Mid = pTaskFile->LBA_Mid;
+	pFIS->LBA_High = pTaskFile->LBA_High;
+	pFIS->Sector_Count = pTaskFile->Sector_Count;
+
+	/* No matter it's 48bit or not, I've set the values. */
+	pFIS->LBA_Low_Exp = pTaskFile->LBA_Low_Exp;
+	pFIS->LBA_Mid_Exp = pTaskFile->LBA_Mid_Exp;
+	pFIS->LBA_High_Exp = pTaskFile->LBA_High_Exp;
+	pFIS->Features_Exp = pTaskFile->Feature_Exp;
+	pFIS->Sector_Count_Exp = pTaskFile->Sector_Count_Exp;
+
+#if 0//DEBUG_BIOS
+	MV_DUMPC32(0xCCCCEEA1);
+	MV_DUMP8(pFIS->FIS_Type);
+	MV_DUMP8(pFIS->Command);
+#endif
+
+}
+
+/* Set MV_Request.Cmd_Flag */
+MV_BOOLEAN Category_CDB_Type(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq
+	)
+{
+	//pReq->Cmd_Flag = 0;//TBD: Don't set. HBA has set some bits.
+	PDomain_Port pPort = pDevice->PPort;
+
+	switch ( pReq->Cdb[0] )
+	{
+		case SCSI_CMD_READ_10:
+		case SCSI_CMD_WRITE_10:
+		case SCSI_CMD_VERIFY_10:
+			/* 
+			 * 
+			 * CMD_FLAG_DATA_IN
+			 * CMD_FLAG_NON_DATA
+			 * CMD_FLAG_DMA
+			 */
+			if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+				pReq->Cmd_Flag |= CMD_FLAG_PACKET;
+
+			if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+				pReq->Cmd_Flag |= CMD_FLAG_48BIT;
+
+			if ( pDevice->Capacity&DEVICE_CAPACITY_NCQ_SUPPORTED )
+			{
+				// might be a PM - assert is no longer true
+				//MV_DASSERT( pPort->Type==PORT_TYPE_SATA );		
+				if ( (pReq->Cdb[0]==SCSI_CMD_READ_10)
+					|| (pReq->Cdb[0]==SCSI_CMD_WRITE_10) )
+				{
+					if ( (pPort->Running_Slot==0)
+						|| (pPort->Setting&PORT_SETTING_NCQ_RUNNING) )
+					{
+						/* hardware workaround:
+						 * don't do NCQ on silicon image PM */
+						if( !((pPort->Setting & PORT_SETTING_PM_EXISTING) && 
+							(pPort->PM_Vendor_Id == 0x1095 )) )
+						{
+							if ( pReq->Scsi_Status!=REQ_STATUS_RETRY )
+								pReq->Cmd_Flag |= CMD_FLAG_NCQ;
+						}
+					}
+				}
+			}
+
+			break;
+
+		case SCSI_CMD_MARVELL_SPECIFIC:
+			{
+				/* This request should be for core module */
+				if ( pReq->Cdb[1]!=CDB_CORE_MODULE )
+					return MV_FALSE;
+				switch ( pReq->Cdb[2] )
+				{
+					case CDB_CORE_IDENTIFY:
+					case CDB_CORE_READ_LOG_EXT:
+						pReq->Cmd_Flag |= CMD_FLAG_DATA_IN;
+						break;
+					
+					case CDB_CORE_SET_UDMA_MODE:
+					case CDB_CORE_SET_PIO_MODE:
+					case CDB_CORE_ENABLE_WRITE_CACHE:
+					case CDB_CORE_DISABLE_WRITE_CACHE:
+					case CDB_CORE_ENABLE_SMART:
+					case CDB_CORE_DISABLE_SMART:
+					case CDB_CORE_SMART_RETURN_STATUS:
+					case CDB_CORE_ENABLE_READ_AHEAD:
+					case CDB_CORE_DISABLE_READ_AHEAD:
+						pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+						break;
+
+					case CDB_CORE_SHUTDOWN:
+						if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+							return MV_FALSE;
+						MV_DPRINT(("Shutdown on device %d.\n", pReq->Device_Id));
+						pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+						break;
+
+					default:
+						return MV_FALSE;
+				}
+				break;
+			}
+		case SCSI_CMD_START_STOP_UNIT:	
+		case SCSI_CMD_SYNCHRONIZE_CACHE_10:
+			if ( !(pDevice->Device_Type & DEVICE_TYPE_ATAPI )){
+				if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+					pReq->Cmd_Flag |= CMD_FLAG_48BIT;
+				pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+				break;
+			}	//We will send this command to CD drive directly if it is ATAPI device.		
+		case SCSI_CMD_INQUIRY:
+		case SCSI_CMD_READ_CAPACITY_10:
+		case SCSI_CMD_TEST_UNIT_READY:
+		case SCSI_CMD_MODE_SENSE_10:
+		case SCSI_CMD_MODE_SELECT_10:
+		case SCSI_CMD_PREVENT_MEDIUM_REMOVAL:
+		case SCSI_CMD_READ_TOC:
+		case SCSI_CMD_REQUEST_SENSE:
+		default:
+			if ( pDevice->Device_Type&DEVICE_TYPE_ATAPI )
+			{
+				//MV_DPRINT(("Other requests: 0x%x.\n", pReq->Cdb[0]));
+				pReq->Cmd_Flag |= CMD_FLAG_PACKET;
+
+				#if 0 //TBD: Refer to TranslateSCSIRequest
+				if ( pReq->Data_Transfer_Length==0 )
+				{
+					pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+				}
+				else
+				{
+					if ( pReq->Cdb[0]==SCSI_CMD_INQUIRY
+						|| pReq->Cdb[0]==SCSI_CMD_READ_CAPACITY_10 
+						|| pReq->Cdb[0]==SCSI_CMD_REQUEST_SENSE 
+						|| pReq->Cdb[0]==SCSI_CMD_REPORT_LUN
+						|| pReq->Cdb[0]==SCSI_CMD_READ_DISC_STRUCTURE
+						|| pReq->Cdb[0]==SCSI_CMD_READ_TOC
+						|| pReq->Cdb[0]==SCSI_CMD_READ_SUB_CHANNEL
+						|| pReq->Cdb[0]==SCSI_CMD_READ_CD
+						|| pReq->Cdb[0]==SCSI_CMD_GET_EVENT_STATUS_NOTIFICATION
+						)
+					{
+						pReq->Cmd_Flag |= CMD_FLAG_DATA_IN;
+					}
+					else if ( pReq->Cdb[0]==SCSI_CMD_MODE_SENSE_10 )
+					{
+						/* Data out */
+					}
+					else
+					{
+						MV_DPRINT(("Should be data-in or data-out: 0x%x.\n", pReq->Cdb[0]));
+						//MV_ASSERT(MV_FALSE);
+					}
+				}
+				#endif
+
+				break;
+			}
+			else
+			{
+#ifndef _OS_BIOS			
+				MV_DPRINT(("Error: Unknown request: 0x%x.\n", pReq->Cdb[0]));
+#endif
+				return MV_FALSE;
+			}
+	}
+
+	return MV_TRUE;
+}
+
+
+MV_BOOLEAN ATA_CDB2TaskFile(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq, 
+	IN MV_U8 tag,
+	OUT PATA_TaskFile pTaskFile
+	)
+{
+	MV_ZeroMemory(pTaskFile, sizeof(ATA_TaskFile));
+
+	switch ( pReq->Cdb[0] )
+	{
+		case SCSI_CMD_READ_10:
+		case SCSI_CMD_WRITE_10:
+			{
+				
+				/* 
+				 * The OS maximum tranfer length is set to 128K.
+				 * For ATA_CMD_READ_DMA and ATA_CMD_WRITE_DMA,
+				 * the max size they can handle is 256 sectors.
+				 * And Sector_Count==0 means 256 sectors.
+				 * If OS request max lenght>128K, for 28 bit device, we have to split requests.
+				 */
+				MV_DASSERT( ( (((MV_U16)pReq->Cdb[7])<<8) | (pReq->Cdb[8]) ) <= 256 );
+
+				/*
+				 * 24 bit LBA can express 128GB.
+				 * 4 bytes LBA like SCSI_CMD_READ_10 can express 2TB.
+				 */
+			
+				/* Make sure Cmd_Flag has set already. */
+				if ( pReq->Cmd_Flag&CMD_FLAG_NCQ )
+				{
+					//MV_DASSERT( pReq->Cmd_Flag&CMD_FLAG_48BIT );	//TBD: Do we need set 48bit for NCQ
+					
+					pTaskFile->Features = pReq->Cdb[8];
+					pTaskFile->Feature_Exp = pReq->Cdb[7];
+
+					pTaskFile->Sector_Count = tag<<3;
+					
+					pTaskFile->LBA_Low = pReq->Cdb[5];
+					pTaskFile->LBA_Mid = pReq->Cdb[4];
+					pTaskFile->LBA_High = pReq->Cdb[3];
+					pTaskFile->LBA_Low_Exp = pReq->Cdb[2];
+		
+					pTaskFile->Device = MV_BIT(6);
+
+					if ( pReq->Cdb[0]==SCSI_CMD_READ_10 )
+						pTaskFile->Command = ATA_CMD_READ_FPDMA_QUEUED;
+					else if ( pReq->Cdb[0]==SCSI_CMD_WRITE_10 )
+						pTaskFile->Command = ATA_CMD_WRITE_FPDMA_QUEUED;
+				}
+				else if ( pReq->Cmd_Flag&CMD_FLAG_48BIT )
+				{
+					MV_DASSERT( !(pReq->Cmd_Flag&CMD_FLAG_NCQ) );
+
+					pTaskFile->Sector_Count = pReq->Cdb[8];
+					pTaskFile->Sector_Count_Exp = pReq->Cdb[7];
+
+					pTaskFile->LBA_Low = pReq->Cdb[5];
+					pTaskFile->LBA_Mid = pReq->Cdb[4];
+					pTaskFile->LBA_High = pReq->Cdb[3];
+					pTaskFile->LBA_Low_Exp = pReq->Cdb[2];
+
+					pTaskFile->Device = MV_BIT(6);
+
+					if ( pReq->Cdb[0]==SCSI_CMD_READ_10 )
+						pTaskFile->Command = ATA_CMD_READ_DMA_EXT;
+					else if ( pReq->Cdb[0]==SCSI_CMD_WRITE_10 )
+						pTaskFile->Command = ATA_CMD_WRITE_DMA_EXT;
+				}
+				else
+				{
+					/* 28 bit DMA */
+					pTaskFile->Sector_Count = pReq->Cdb[8];		/* Could be zero */
+	
+					pTaskFile->LBA_Low = pReq->Cdb[5];
+					pTaskFile->LBA_Mid = pReq->Cdb[4];
+					pTaskFile->LBA_High = pReq->Cdb[3];
+			
+					pTaskFile->Device = MV_BIT(6) | (pReq->Cdb[2]&0xF);
+					
+					MV_DASSERT( (pReq->Cdb[2]&0xF0)==0 );
+
+					if ( pReq->Cdb[0]==SCSI_CMD_READ_10 )
+						pTaskFile->Command = ATA_CMD_READ_DMA;
+					else if ( pReq->Cdb[0]==SCSI_CMD_WRITE_10 )
+						pTaskFile->Command = ATA_CMD_WRITE_DMA;
+				}
+
+				break;
+			}
+
+		case SCSI_CMD_VERIFY_10:
+			/* 
+			 * For verify command, the size may need use two MV_U8, especially Windows.
+			 * For 28 bit device, we have to split the request.
+			 * For 48 bit device, we use ATA_CMD_VERIFY_EXT.
+			 */
+			if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+			{
+				pTaskFile->Sector_Count = pReq->Cdb[8];
+				pTaskFile->Sector_Count_Exp = pReq->Cdb[7];
+
+				pTaskFile->LBA_Low = pReq->Cdb[5];
+				pTaskFile->LBA_Mid = pReq->Cdb[4];
+				pTaskFile->LBA_High = pReq->Cdb[3];
+				pTaskFile->LBA_Low_Exp = pReq->Cdb[2];
+
+				pTaskFile->Device = MV_BIT(6);
+
+				pTaskFile->Command = ATA_CMD_VERIFY_EXT;
+			}
+			else
+			{
+				//TBD: If the device doesn't support 48 bit LBA. We have to split this request.
+				//ATA_CMD_VERIFY
+				//MV_ASSERT(MV_FALSE);
+				//Sorry here I didn't do the verify exact as the OS required. 
+				//It need effort to split request. Currently I just pretect I've fulfilled the request.
+				pTaskFile->Sector_Count = pReq->Cdb[8];
+				
+				pTaskFile->LBA_Low = pReq->Cdb[5];
+				pTaskFile->LBA_Mid = pReq->Cdb[4];
+				pTaskFile->LBA_High = pReq->Cdb[3];
+
+				pTaskFile->Device = MV_BIT(6) | (pReq->Cdb[2]&0xF);
+				
+				MV_DASSERT( (pReq->Cdb[2]&0xF0)==0 );
+
+				pTaskFile->Command = ATA_CMD_VERIFY;				
+			}
+
+			break;
+
+		case SCSI_CMD_MARVELL_SPECIFIC:
+			{
+				/* This request should be for core module */
+				if ( pReq->Cdb[1]!=CDB_CORE_MODULE )
+					return MV_FALSE;
+				switch ( pReq->Cdb[2] )
+				{
+					case CDB_CORE_IDENTIFY:
+						pTaskFile->Command = ATA_CMD_IDENTIFY_ATA;
+						break;
+					
+					case CDB_CORE_SET_UDMA_MODE:
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_SET_TRANSFER_MODE;
+						pTaskFile->Sector_Count = 0x40 | pReq->Cdb[3];
+						MV_DASSERT( pReq->Cdb[4]==MV_FALSE );	/* Use UDMA mode */
+						//TBD: Check the 80-conductor cable in order to enable UDMA greater than 2.
+						break;
+
+					case CDB_CORE_SET_PIO_MODE:
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_SET_TRANSFER_MODE;
+						pTaskFile->Sector_Count = 0x08 | pReq->Cdb[3];
+						break;
+
+					case CDB_CORE_ENABLE_WRITE_CACHE:
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_ENABLE_WRITE_CACHE;
+						break;
+			
+					case CDB_CORE_DISABLE_WRITE_CACHE:
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_DISABLE_WRITE_CACHE;
+						break;
+
+					case CDB_CORE_ENABLE_SMART:
+						pTaskFile->Command = ATA_CMD_SMART;
+						pTaskFile->Features = ATA_CMD_ENABLE_SMART;
+						pTaskFile->LBA_Mid = 0x4F;
+						pTaskFile->LBA_High = 0xC2;
+						break;
+
+					case CDB_CORE_DISABLE_SMART:
+						pTaskFile->Command = ATA_CMD_SMART;
+						pTaskFile->Features = ATA_CMD_DISABLE_SMART;
+						pTaskFile->LBA_Mid = 0x4F;
+						pTaskFile->LBA_High = 0xC2;
+						break;
+
+					case CDB_CORE_SMART_RETURN_STATUS:
+						pTaskFile->Command = ATA_CMD_SMART;
+						pTaskFile->Features = ATA_CMD_SMART_RETURN_STATUS;
+						pTaskFile->LBA_Mid = 0x4F;
+						pTaskFile->LBA_High = 0xC2;
+						break;
+
+					case CDB_CORE_SHUTDOWN:
+						if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+							pTaskFile->Command = ATA_CMD_FLUSH_EXT;
+						else
+							pTaskFile->Command = ATA_CMD_FLUSH;
+						break;
+
+					case CDB_CORE_ENABLE_READ_AHEAD:	
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_ENABLE_READ_LOOK_AHEAD;
+						break;
+
+					case CDB_CORE_DISABLE_READ_AHEAD:
+						pTaskFile->Command = ATA_CMD_SET_FEATURES;
+						pTaskFile->Features = ATA_CMD_DISABLE_READ_LOOK_AHEAD;
+						break;
+					
+					case CDB_CORE_READ_LOG_EXT:
+						pTaskFile->Command = ATA_CMD_READ_LOG_EXT;
+						pTaskFile->Sector_Count = 1;	/* Read one sector */
+						pTaskFile->LBA_Low = 0x10;		/* Page 10h */
+						break;
+
+					default:
+						return MV_FALSE;
+				}
+				break;
+			}
+		case SCSI_CMD_SYNCHRONIZE_CACHE_10:
+			if ( pDevice->Capacity&DEVICE_CAPACITY_48BIT_SUPPORTED )
+				pTaskFile->Command = ATA_CMD_FLUSH_EXT;
+			else
+				pTaskFile->Command = ATA_CMD_FLUSH;
+			pTaskFile->Device = MV_BIT(6);
+			break;
+		case SCSI_CMD_START_STOP_UNIT:
+			if (pReq->Cdb[4] & MV_BIT(0))
+			{
+				pTaskFile->Command = ATA_CMD_SEEK;
+				pTaskFile->Device = MV_BIT(6);
+			}
+			else
+			{
+				pTaskFile->Command = ATA_CMD_STANDBY_IMMEDIATE;
+			}
+			break;
+		case SCSI_CMD_REQUEST_SENSE:
+		case SCSI_CMD_MODE_SELECT_10:
+		case SCSI_CMD_MODE_SENSE_10:
+		#ifndef _OS_BIOS	
+			MV_DPRINT(("Error: Unknown request: 0x%x.\n", pReq->Cdb[0]));
+		#endif
+
+		default:
+			return MV_FALSE;
+	}
+
+	/* 
+	 * Attention: Never return before this line if your return is MV_TRUE.
+	 * We need set the slave DEV bit here. 
+	 */
+	if ( pDevice->Is_Slave )		
+		pTaskFile->Device |= MV_BIT(4);
+
+	return MV_TRUE;
+}
+
+MV_BOOLEAN ATAPI_CDB2TaskFile(
+	IN PDomain_Device pDevice,
+	IN PMV_Request pReq, 
+	OUT PATA_TaskFile pTaskFile
+	)
+{
+	MV_ZeroMemory(pTaskFile, sizeof(ATA_TaskFile));
+
+	/* At the same time, set the command category as well. */
+	switch ( pReq->Cdb[0] )
+	{
+	case SCSI_CMD_MARVELL_SPECIFIC:
+		/* This request should be for core module */
+		if ( pReq->Cdb[1]!=CDB_CORE_MODULE )
+			return MV_FALSE;
+
+		switch ( pReq->Cdb[2] )
+		{
+		case CDB_CORE_IDENTIFY:
+			pTaskFile->Command = ATA_CMD_IDENTIY_ATAPI;
+			break;
+					
+		case CDB_CORE_SET_UDMA_MODE:
+			pTaskFile->Command = ATA_CMD_SET_FEATURES;
+			pTaskFile->Features = ATA_CMD_SET_TRANSFER_MODE;
+			if ( pReq->Cdb[4]==MV_TRUE )
+				pTaskFile->Sector_Count = 0x20 | pReq->Cdb[3];	/* MDMA mode */
+			else
+				pTaskFile->Sector_Count = 0x40 | pReq->Cdb[3];	/* UDMA mode*/
+
+			//TBD: Check the 80-conductor cable in order to enable UDMA greater than 2.
+			break;
+					
+		case CDB_CORE_SET_PIO_MODE:
+			pTaskFile->Command = ATA_CMD_SET_FEATURES;
+			pTaskFile->Features = ATA_CMD_SET_TRANSFER_MODE;
+			pTaskFile->Sector_Count = 0x08 | pReq->Cdb[3];
+			break;
+
+		default:
+			return MV_FALSE;
+		}
+		break;
+#ifdef _OS_LINUX
+	case SCSI_CMD_READ_DISC_INFO:
+	/* unimplemented SCSI cmds */
+	/*	return MV_FALSE;   */
+#endif /* _OS_LINUX */
+	case SCSI_CMD_READ_10:
+	case SCSI_CMD_WRITE_10:
+	case SCSI_CMD_VERIFY_10:
+	case SCSI_CMD_INQUIRY:
+	case SCSI_CMD_READ_CAPACITY_10:
+	case SCSI_CMD_TEST_UNIT_READY:
+	case SCSI_CMD_MODE_SENSE_10:
+	case SCSI_CMD_MODE_SELECT_10:
+	case SCSI_CMD_PREVENT_MEDIUM_REMOVAL:
+	case SCSI_CMD_READ_TOC:
+	case SCSI_CMD_START_STOP_UNIT:
+	case SCSI_CMD_SYNCHRONIZE_CACHE_10:
+	case SCSI_CMD_REQUEST_SENSE:
+	default:
+		/* 
+		 * Use packet command 
+		 */
+		/* Features: DMA, OVL, DMADIR */
+#if defined(USE_DMA_FOR_ALL_PACKET_COMMAND)
+		if ( !(pReq->Cmd_Flag&CMD_FLAG_NON_DATA) )
+		{
+			//if ( pReq->Cdb[0]!=SCSI_CMD_INQUIRY ) //ATAPI???
+			pTaskFile->Features |= MV_BIT(0);
+		}
+#elif defined(USE_PIO_FOR_ALL_PACKET_COMMAND)
+		/* do nothing */
+#else
+		if ( pReq->Cmd_Flag&CMD_FLAG_DMA )
+			//if ( SCSI_IS_READ(pReq->Cdb[0]) || 
+			//   SCSI_IS_WRITE(pReq->Cdb[0]) )
+			pTaskFile->Features |= MV_BIT(0);
+#endif
+		//TBD: OVL: overlapped.
+		//TBD: DMADIR in IDENTIFY PACKET DEVICE word 62
+
+		//TBD: Sector Count: Tag
+
+		/* Byte count low and byte count high */
+		if ( pReq->Data_Transfer_Length>0xFFFF )
+		{
+			pTaskFile->LBA_Mid = 0xFF;
+			pTaskFile->LBA_High = 0xFF;
+		}
+		else
+		{
+			pTaskFile->LBA_Mid = (MV_U8)pReq->Data_Transfer_Length;
+			pTaskFile->LBA_High = (MV_U8)(pReq->Data_Transfer_Length>>8);
+		}
+
+		pTaskFile->Command = ATA_CMD_PACKET;
+
+		break;
+	}
+
+	/* 
+	 * Attention: Never return before this line if your return is MV_TRUE.
+	 * We need set the slave DEV bit here. 
+	 */
+	if ( pDevice->Is_Slave )		
+		pTaskFile->Device |= MV_BIT(4);
+
+	return MV_TRUE;
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/color_print.h linux-2.6.25/drivers/scsi/mv/linux/color_print.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/color_print.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/color_print.h	2008-07-28 18:42:43.331188775 +0200
@@ -0,0 +1,53 @@
+#ifndef __COLOR_PRINT_H__
+#define __COLOR_PRINT_H__
+/*
+ *  add fun to your (debug) output
+ *  A.C.
+ *  Sep 26th, 2006
+ *
+ *  color number verified in rxvt
+ */
+
+
+#define	   FG_BLACK     30
+#define	   FG_RED       31
+#define	   FG_GREEN     32
+#define	   FG_YELLOW    33
+#define	   FG_BLUE      34
+#define	   FG_MAGENTA   35
+#define	   FG_CYAN      36
+#define	   FG_WHITE     37
+
+#define	   BG_BLACK     40
+#define	   BG_RED       41
+#define	   BG_GREEN     42
+#define	   BG_YELLOW    43
+#define	   BG_BLUE      44
+#define	   BG_MAGENTA   45
+#define	   BG_CYAN      46
+#define	   BG_WHITE     47
+
+#define __TERM_COLOR(fg, bg) "\e["#fg";"#bg"m"
+
+#define TERM_COLOR(f,b) __TERM_COLOR(f,b)
+ 
+/* this varies , how do I get to know the default setting? */
+/* OK, default to set all attr off */
+#define DEFAULT_THEME "\e[0m"
+
+#ifdef __COLOR_DEBUG__
+#define __SET_THEME(fg, bg, x) TERM_COLOR(FG_##fg,BG_##bg) x DEFAULT_THEME
+#else  /*  __COLOR_DEBUG__ */
+#define __SET_THEME(fg, bg, x) x
+#endif /*  __COLOR_DEBUG__ */
+
+/* short for X_ON_BLACK */
+#define RED(x)      __SET_THEME(RED, BLACK, x)
+#define GREEN(x)    __SET_THEME(GREEN, BLACK, x)
+#define BLUE(x)     __SET_THEME(BLUE, BLACK, x)
+#define MAGENTA(x)  __SET_THEME(MAGENTA, BLACK, x)
+#define CYAN(x)     __SET_THEME(CYAN, BLACK, x)
+#define WHITE(x)    __SET_THEME(WHITE, BLACK, x)
+#define YELLOW(x)   __SET_THEME(YELLOW, BLACK, x)
+
+#endif /* __COLOR_PRINT_H__ */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_exp.c linux-2.6.25/drivers/scsi/mv/linux/hba_exp.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_exp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_exp.c	2008-07-28 18:42:43.332188768 +0200
@@ -0,0 +1,582 @@
+#include "mv_include.h"
+#include "mv_os.h"
+
+#include "hba_header.h"
+#include "linux_helper.h"
+#ifdef CACHE_MODULE_SUPPORT
+#include "cache_mod.h"
+#endif
+/* For debug purpose only. */
+PHBA_Extension gHBA = NULL;
+
+extern Module_Interface module_set[];
+
+/*
+ * 
+ * Module interface function table
+ *
+ */
+MV_U32 HBA_ModuleGetResourceQuota(enum Resource_Type type, MV_U16 maxIo)
+{
+	MV_U32 size = 0;
+
+	/* HBA Extension quota */
+	if (type == RESOURCE_CACHED_MEMORY) {
+		/* Fixed memory */
+		size = OFFSET_OF(HBA_Extension, Memory_Pool);
+		size = ROUNDING(size, 8);
+
+		/* MV_Request pool */
+		/* MV_Request is 64bit aligned. */
+		size += maxIo * MV_REQUEST_SIZE;
+		
+		if (maxIo > 1)
+			size += sizeof(MV_SG_Entry) * MAX_SG_ENTRY * maxIo; 
+		else 
+			size += sizeof(MV_SG_Entry) * MAX_SG_ENTRY_REDUCED * maxIo;
+
+		/* Timer pool */
+		size += Timer_GetResourceQuota(maxIo);
+
+		MV_ASSERT(size == ROUNDING(size, 8));
+
+#ifdef SUPPORT_EVENT
+		size += sizeof(Driver_Event_Entry) * MAX_EVENTS;
+#endif
+		MV_ASSERT(size == ROUNDING(size, 8));
+
+		return size;
+	}
+
+	/* HBA doesn't need other kind of memory resource. */
+	return 0;
+}
+
+void HBA_ModuleInitialize(MV_PVOID This, MV_U32 extension_size, MV_U16 max_io)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	MV_PTR_INTEGER temp = (MV_PTR_INTEGER)pHBA->Memory_Pool;
+	MV_U8 i;
+	PMV_Request pReq = NULL;
+ 	MV_U32 sgt_size, sg_num;
+
+#ifdef SUPPORT_EVENT
+	PDriver_Event_Entry pEvent = NULL;
+#endif
+    
+	gHBA = pHBA;
+
+	MV_ASSERT( sizeof(MV_Request)==ROUNDING(sizeof(MV_Request),8) );
+	/* 
+	 * Initialize data structure however following variables have been set already.
+	 *	Device_Extension
+	 *	Is_Dump
+	 *	Base_Address
+	 *	Adapter_Bus_Number and Adapter_Device_Number
+	 *	Vendor_Id, Device_Id and Revision_Id
+	 */
+	pHBA->State = DRIVER_STATUS_IDLE;
+	pHBA->Io_Count = 0;
+	pHBA->Max_Io = max_io;
+
+	pHBA->Module_Manage.status = 0;
+
+	/* Initialize the free request queue. */
+	MV_LIST_HEAD_INIT(&pHBA->Free_Request);
+	MV_LIST_HEAD_INIT(&pHBA->Waiting_Request);
+	temp = ROUNDING( ((MV_PTR_INTEGER)temp), 8 );
+
+	if (max_io > 1)
+                sg_num = MAX_SG_ENTRY;
+        else
+                sg_num = MAX_SG_ENTRY_REDUCED;
+        sgt_size = sizeof(MV_SG_Entry) * sg_num; 
+
+	for ( i=0; i<max_io; i++ )
+	{
+		pReq = (PMV_Request)temp;
+		temp += MV_REQUEST_SIZE;
+
+		/* sg table */
+                pReq->SG_Table.Entry_Ptr = (PMV_SG_Entry) temp; 
+                pReq->SG_Table.Max_Entry_Count = sg_num; 
+                temp += sizeof(MV_SG_Entry) * sg_num;
+		List_AddTail(&pReq->Queue_Pointer, &pHBA->Free_Request);
+	}	
+
+#ifdef SUPPORT_EVENT
+	
+	MV_LIST_HEAD_INIT(&pHBA->Stored_Events);
+	MV_LIST_HEAD_INIT(&pHBA->Free_Events);
+	pHBA->Num_Stored_Events = 0;
+	pHBA->SequenceNumber = 0;	// Event sequence number
+
+	MV_ASSERT( sizeof(Driver_Event_Entry)==ROUNDING(sizeof(Driver_Event_Entry),8) );
+	temp = ROUNDING( ((MV_PTR_INTEGER)temp), 8 );
+
+	for ( i=0; i<MAX_EVENTS; i++ )
+	{
+		pEvent = (PDriver_Event_Entry)temp;
+		List_AddTail( &pEvent->Queue_Pointer, &pHBA->Free_Events );
+		temp += sizeof( Driver_Event_Entry );
+	}
+
+#endif
+
+	/* Initialize timer module. */
+	Timer_Initialize(&pHBA->Timer_Module, (MV_PU8)temp);
+
+#ifdef SUPPORT_TIMER
+	/* kick off the timer */
+	Timer_CheckRequest(pHBA->Device_Extension);
+#endif
+}
+
+void HBA_ModuleStart(MV_PVOID This)
+{
+	/* There is nothing we need do here. Just finish this function. */
+	HBA_ModuleStarted(This);
+}
+
+void HBA_ModuleShutdown(MV_PVOID This)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	/* Clear the HBA data structure. Reset some variables if necessary. */
+
+	/* At this momment, the outstanding request count should be zero. */
+	MV_ASSERT(pHBA->Io_Count == 0);
+	
+	/* Stop the Timer */
+	Timer_Stop(&pHBA->Timer_Module);
+}
+
+void HBA_ModuleNotification(MV_PVOID This, 
+			    enum Module_Event event, 
+			    MV_U32 event_param)
+{
+#ifdef SUPPORT_HOT_PLUG
+	hba_msg_insert(This, event, event_param);
+#endif /* SUPPORT_HOT_PLUG */
+}
+
+void HBA_ModuleSendRequest(MV_PVOID This, PMV_Request pReq)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	switch ( pReq->Cdb[0] )
+	{
+	case APICDB0_ADAPTER:
+		if (pReq->Cdb[1] == APICDB1_ADAPTER_GETINFO)
+			mvGetAdapterInfo( pHBA, pReq );
+		else
+			pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+		break;
+
+#ifdef SUPPORT_EVENT
+	case APICDB0_EVENT:
+		if (pReq->Cdb[1] == APICDB1_EVENT_GETEVENT)
+			mvGetEvent( pHBA, pReq );
+		else
+			pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+		break;
+#endif  /* SUPPORT_EVENT */
+
+	default:
+		pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+	}
+	pReq->Completion(pReq->Cmd_Initiator, pReq);
+}
+
+/* helper functions related to HBA_ModuleSendRequest */
+void mvGetAdapterInfo( MV_PVOID This, PMV_Request pReq )
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	PAdapter_Info pAdInfo;
+
+	/* initialize */
+	pAdInfo = (PAdapter_Info)pReq->Data_Buffer;
+	MV_ZeroMemory(pAdInfo, sizeof(Adapter_Info));
+
+	/* TBD: some info are missing, will fill in later */
+
+	pAdInfo->DriverVersion.VerMajor = VER_MAJOR;
+	pAdInfo->DriverVersion.VerMinor = VER_MINOR;
+	pAdInfo->DriverVersion.VerOEM = VER_OEM;
+	pAdInfo->DriverVersion.VerBuild = VER_BUILD;
+
+	pAdInfo->SystemIOBusNumber = pHBA->Adapter_Bus_Number;
+	pAdInfo->SlotNumber = pHBA->Adapter_Device_Number;
+	pAdInfo->VenDevID = pHBA->Vendor_Id;
+	pAdInfo->SubVenDevID = pHBA->Device_Id;
+
+	if ( pHBA->Device_Id == DEVICE_ID_THORLITE_2S1P ||
+	     pHBA->Device_Id == DEVICE_ID_THORLITE_2S1P_WITH_FLASH )
+		pAdInfo->PortCount = 3;
+	else if ( pHBA->Device_Id == DEVICE_ID_THORLITE_0S1P )
+		pAdInfo->PortCount = 1;
+	else
+		pAdInfo->PortCount = 5;
+
+	pAdInfo->AlarmSupport = MV_FALSE;
+	pAdInfo->MaxBlockPerPD = 8;		/* hardcoded to 8 for now */
+
+	pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+}
+
+#ifdef SUPPORT_EVENT
+
+void mvGetEvent( MV_PVOID This, PMV_Request pReq )
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	PEventRequest pEventReq = (PEventRequest)pReq->Data_Buffer;
+	PDriver_Event_Entry pfirst_event;
+	MV_U8 count = 0;
+
+	pEventReq->Count = 0;
+	
+	if ( pHBA->Num_Stored_Events > 0 )
+	{	
+		MV_DASSERT( !List_Empty(&pHBA->Stored_Events) );
+		while (!List_Empty(&pHBA->Stored_Events) && ( count < MAX_EVENTS_RETURNED))
+		{
+			pfirst_event = List_GetFirstEntry((&pHBA->Stored_Events), Driver_Event_Entry, Queue_Pointer);
+			MV_CopyMemory( &pEventReq->Events[count], &pfirst_event->Event, sizeof (DriverEvent));
+			pHBA->Num_Stored_Events--;
+			List_AddTail( &pfirst_event->Queue_Pointer, &pHBA->Free_Events );
+			count++;
+		}
+		pEventReq->Count = count;
+	}
+
+	pReq->Scsi_Status = REQ_STATUS_SUCCESS;
+	return;
+}
+
+#endif
+
+void HBA_ModuleReset(MV_PVOID extension)
+{
+	HBA_ModuleInitialize(extension, sizeof(HBA_Extension), 32);//TBD
+}
+
+/*
+ * 
+ * Other exposed functions
+ *
+ */
+extern void HBA_HandleWaitingList(PHBA_Extension pHBA);
+/* The extension is the calling module extension. It can be any module extension. */
+void HBA_ModuleStarted(MV_PVOID extension)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PModule_Manage p_module_manage = &pHBA->Module_Manage;
+	MV_U16 module_id = Module_GetModuleId(extension);
+
+	/*MV_ASSERT( (module_id>=0) && (module_id<MAX_MODULE_NUMBER) );*/
+	MV_ASSERT( module_id<MAX_MODULE_NUMBER );
+
+	p_module_manage->status |= (1<<module_id);
+
+	/* Whether all the modules are started. */
+	if ( module_id==0 )
+	{
+		MV_PRINT("success to init chip\n");
+		
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+		atomic_set(&pHBA->hba_sync, 0);
+#else
+		complete(&pHBA->cmpl);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+
+		/* We are totally ready for requests handling. */
+		pHBA->State = DRIVER_STATUS_STARTED;
+
+		/* Trigger request handling */
+		HBA_HandleWaitingList(pHBA);
+
+		/* Module 0 is the last module */
+		HBA_ModuleNotification(pHBA, EVENT_MODULE_ALL_STARTED, 0);
+	}
+	else
+	{
+		/* Start the next module. From the lowerer to the higher. */
+		Module_StartAll(p_module_manage, module_id-1);
+	}
+}
+
+void HBA_GetResource(
+	MV_PVOID extension,
+	enum Resource_Type type,
+	MV_PVOID resource
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PModule_Manage pModuleManage = &pHBA->Module_Manage;
+	MV_U16 moduleId = Module_GetModuleId(extension);
+
+	PAssigned_Uncached_Memory pResource = (PAssigned_Uncached_Memory)resource;
+
+	if ( type==RESOURCE_UNCACHED_MEMORY )
+	{
+		pResource->Physical_Address.value = pModuleManage->resource[moduleId].uncached_physical_address.value;
+		pResource->Virtual_Address = pModuleManage->resource[moduleId].uncached_address;
+		pResource->Byte_Size = pModuleManage->resource[moduleId].uncached_size;
+		return;
+	}
+
+	MV_ASSERT(MV_FALSE);
+}
+
+void HBA_GetControllerInfor(
+	IN MV_PVOID extension,
+	OUT PController_Infor pController
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	MV_U8 i;
+	for ( i=0; i<MAX_BASE_ADDRESS; i++ )
+	{
+		pController->Base_Address[i] = pHBA->Base_Address[i];
+	}
+	pController->Vendor_Id = pHBA->Vendor_Id;
+	pController->Device_Id = pHBA->Device_Id;
+	pController->Revision_Id = pHBA->Revision_Id;
+}
+
+void HBA_SleepMillisecond(
+	IN MV_PVOID extension,
+	IN MV_U32 millisecond
+	)
+{
+	mdelay(millisecond);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && defined(__x86_64__)
+	touch_nmi_watchdog();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+	/* can't remember exactly in what version this was introduced. */
+	touch_softlockup_watchdog();
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) */
+
+#endif
+
+}
+void HBA_SleepMicrosecond(
+	IN MV_PVOID extension,
+	IN MV_U32 microseconds
+	)
+{
+	while (microseconds > 1000) {
+		udelay(1000);
+		microseconds -= 1000;
+	}
+
+	udelay(microseconds);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && defined(__x86_64__)
+	touch_nmi_watchdog();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+	/* can't remember exactly in what version this was introduced. */
+	touch_softlockup_watchdog();
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) */
+
+#endif
+								
+}
+
+void HBA_TimerRoutine(unsigned long DeviceExtension) //TO DO ???
+{
+#ifndef SUPPORT_TIMER
+	PHBA_Extension pHBA   = (PHBA_Extension)Module_GetHBAExtension(DeviceExtension);
+	PTimer_Module  pTimer = &pHBA->Timer_Module;
+	unsigned long  flags;
+
+#ifdef __AC_DBG__
+	unsigned long now;
+	
+	MV_DASSERT( pTimer->routine!=NULL );
+	
+	now = jiffies;
+	spin_lock_irqsave(&pHBA->lock, flags);
+	pTimer->routine(pTimer->context);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+	MV_DBG(DMSG_ACDB, "Timer routine %p used %lu jiffies.\n", 
+	       pTimer->routine, jiffies-now);
+	/*dump_stack()*/
+#else /* __AC_DBG__ */
+	MV_DASSERT( pTimer->routine!=NULL );
+	spin_lock_irqsave(pHBA->lock, flags);
+	pTimer->routine(pTimer->context);
+	spin_unlock_irqrestore(pHBA->lock, flags);
+#endif /* __AC_DBG__ */
+#endif /* SUPPORT_TIMER */
+
+}
+
+void HBA_RequestTimer(
+	IN MV_PVOID extension,
+	IN MV_U32 millisecond,
+	MV_VOID (*routine) (MV_PVOID)
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PTimer_Module pTimer = &pHBA->Timer_Module;
+	u64 jif_offset;
+	
+	pTimer->routine = routine;
+	pTimer->context = extension;
+
+	del_timer(&pHBA->timer);
+	pHBA->timer.function = HBA_TimerRoutine;
+	pHBA->timer.data = (unsigned long)extension;
+	jif_offset = (u64) (millisecond * HZ);
+	do_div(jif_offset, 1000);
+	pHBA->timer.expires = jiffies + 1 + jif_offset;
+	add_timer(&pHBA->timer);
+}
+
+MV_VOID HBA_ModuleMonitor(MV_PVOID extension)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)extension;
+	MV_PRINT("HBA: Io_Count=0x%x.\n", pHBA->Io_Count);
+}
+
+MV_VOID 
+HBA_GetNextModuleSendFunction(
+	IN MV_PVOID self_extension,
+	OUT MV_PVOID *next_extension,
+	OUT MV_VOID (**next_function)(MV_PVOID , PMV_Request)
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(self_extension);
+	MV_U16 module_id = Module_GetModuleId(self_extension);
+
+	module_id++;
+	*next_extension = pHBA->Module_Manage.resource[module_id].module_extension;
+	*next_function = module_set[module_id].module_sendrequest;
+}
+
+MV_VOID 
+HBA_GetNextModuleExtension(
+	IN MV_PVOID self_extension,
+	OUT MV_PVOID *next_extension
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(self_extension);
+	MV_U16 module_id = Module_GetModuleId(self_extension);
+
+	module_id++;
+	*next_extension = pHBA->Module_Manage.resource[module_id].module_extension;
+}
+
+MV_PVOID
+HBA_GetModuleExtension(
+	IN MV_PVOID self_extension,
+	IN MV_U8 module_id
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(self_extension);
+
+	MV_DASSERT( module_id<MAX_MODULE_NUMBER );
+	return pHBA->Module_Manage.resource[module_id].module_extension;
+}
+
+/* Seconds since midnight, Jan 1, 1970 */
+MV_U32 HBA_GetTimeInSecond(void)
+{
+	/*
+	 * Seconds since January 1, 1970, 00:00:00 GMT. 
+	 * A negative number is the number of milliseconds before January 1, 1970, 00:00:00 GMT.
+	 */
+	struct timeval tv;
+	do_gettimeofday(&tv);
+	return (MV_U32)tv.tv_sec;
+}
+
+/* Millisecond passed in this day */
+MV_U32 HBA_GetMillisecondInDay(void)
+{
+	MV_U32 ret = 0;
+	struct timespec tv;
+	struct timeval x;
+	do_gettimeofday(&x);
+	tv.tv_sec = x.tv_sec;
+	tv.tv_nsec = x.tv_usec*NSEC_PER_SEC;
+	ret = (MV_U32)(((signed long long) tv.tv_sec * NSEC_PER_SEC) + tv.tv_nsec);
+	return ret;
+}
+
+void hba_spin_lock_irq(spinlock_t* plock)
+{
+	WARN_ON(irqs_disabled());
+	spin_lock_irq(plock);                             	
+}
+
+void hba_spin_unlock_irq(spinlock_t* plock)
+{
+	spin_unlock_irq(plock);                             	
+}
+
+void hba_swap_buf_le16(u16 *buf, unsigned int words)
+{
+#ifdef __BIG_ENDIAN
+	unsigned int i;
+
+	for (i=0; i < words; i++)
+                buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+#ifdef __AC_PROF__
+unsigned long __hba_current_time(void)
+{
+	return jiffies;
+}
+#endif /* __AC_PROF__ */
+
+#ifdef SUPPORT_EVENT
+MV_BOOLEAN HBA_AddEvent( 
+	IN MV_PVOID extension,
+	IN MV_U32 eventID,
+	IN MV_U16 deviceID,        
+	IN MV_U8 severityLevel,        
+	IN MV_U8 param_cnt, 
+	IN MV_PU32 params
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PDriver_Event_Entry pEvent;
+	static MV_U32 sequenceNo = 1;
+	if (param_cnt > MAX_EVENT_PARAMS)
+		return MV_FALSE;
+
+	if ( List_Empty(&pHBA->Free_Events) )
+	{
+		// No free entry, we need to reuse the oldest entry from Stored_Events.
+		MV_ASSERT(!List_Empty(&pHBA->Stored_Events));
+		MV_ASSERT(pHBA->Num_Stored_Events == MAX_EVENTS);
+		pEvent = List_GetFirstEntry((&pHBA->Stored_Events), Driver_Event_Entry, Queue_Pointer);
+	}
+	else
+	{
+		pEvent = List_GetFirstEntry((&pHBA->Free_Events), Driver_Event_Entry, Queue_Pointer);
+		pHBA->Num_Stored_Events++;
+		MV_ASSERT(pHBA->Num_Stored_Events <= MAX_EVENTS);
+	}
+
+	pEvent->Event.AdapterID = pHBA->Adapter_Device_Number;  
+	pEvent->Event.EventID = eventID; 
+	pEvent->Event.SequenceNo = sequenceNo++;
+	pEvent->Event.Severity = severityLevel;
+	pEvent->Event.DeviceID = deviceID;
+//	pEvent->Event.Param_Cnt = param_cnt;
+	pEvent->Event.TimeStamp = HBA_GetTimeInSecond();
+
+	if (param_cnt > 0 && params != NULL)
+		MV_CopyMemory( (MV_PVOID)pEvent->Event.Params, (MV_PVOID)params, param_cnt * 4 );
+
+	List_AddTail( &pEvent->Queue_Pointer, &pHBA->Stored_Events );
+
+	return MV_TRUE;
+}
+#endif /* SUPPORT_EVENT */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_exp.h linux-2.6.25/drivers/scsi/mv/linux/hba_exp.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_exp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_exp.h	2008-07-28 18:42:43.332188768 +0200
@@ -0,0 +1,130 @@
+#if !defined(HBA_EXPOSE_H)
+#define HBA_EXPOSE_H
+
+#ifdef SUPPORT_EVENT
+#include "com_event_struct.h"
+#include "com_event_define.h"
+#endif
+
+/*
+ * Module_Interface function table
+ */
+MV_U32 HBA_ModuleGetResourceQuota(enum Resource_Type type, MV_U16 maxIo);
+void HBA_ModuleInitialize(MV_PVOID, MV_U32, MV_U16);
+void HBA_ModuleStart(MV_PVOID);
+void HBA_ModuleShutdown(MV_PVOID);
+void HBA_ModuleNotification(MV_PVOID, enum Module_Event, MV_U32);
+void HBA_ModuleSendRequest(MV_PVOID, PMV_Request);
+void HBA_ModuleMonitor(MV_PVOID);
+void HBA_ModuleReset(MV_PVOID extension);
+
+/*
+ * Other exposed functions
+ */
+void HBA_ModuleStarted(MV_PVOID extension);
+
+void HBA_GetResource(
+	MV_PVOID extension,
+	enum Resource_Type type,
+	MV_PVOID resource
+	);
+/* 
+ * For HBA_GetResource. If the type is RESOURCE_UNCACHED_MEMORY, 
+ * resource data type is PAssigned_Uncached_Memory.
+ */
+typedef struct _Assigned_Uncached_Memory
+{
+	MV_PVOID			Virtual_Address;
+	MV_PHYSICAL_ADDR	Physical_Address;
+	MV_U32				Byte_Size;
+	MV_U32				Reserved0;
+} Assigned_Uncached_Memory, *PAssigned_Uncached_Memory;
+
+typedef struct _Controller_Infor
+{
+	MV_LPVOID Base_Address[MAX_BASE_ADDRESS];
+	MV_U16 Vendor_Id;
+	MV_U16 Device_Id;
+	MV_U8 Revision_Id;
+	MV_U8 Reserved[3];
+} Controller_Infor, *PController_Infor;
+
+#ifdef SUPPORT_EVENT
+// wrapper for DriverEvent, needed to implement queue
+typedef struct _Driver_Event_Entry
+{
+	List_Head Queue_Pointer;
+	DriverEvent Event;
+} Driver_Event_Entry, *PDriver_Event_Entry;
+#endif
+
+void HBA_GetControllerInfor(
+	IN MV_PVOID extension,
+	OUT PController_Infor pController
+	);
+
+void HBA_SleepMillisecond(
+	IN MV_PVOID extension,
+	IN MV_U32 millisecond
+	);
+
+void HBA_SleepMicrosecond(
+	IN MV_PVOID extension,
+	IN MV_U32 microsecond
+	);
+
+void HBA_RequestTimer(
+	IN MV_PVOID extension,
+	IN MV_U32 millisecond,
+	MV_VOID (*TimerService) (MV_PVOID)
+	);
+
+void HBA_GetNextModuleSendFunction(
+	IN MV_PVOID self_extension,
+	OUT MV_PVOID *next_extension,
+	OUT MV_VOID (**next_function)(MV_PVOID , PMV_Request)
+	);
+
+MV_PVOID
+HBA_GetModuleExtension(
+	IN MV_PVOID self_extension,
+	IN MV_U8 module_id
+	);
+
+void HBA_GetNextModuleExtension(
+	IN MV_PVOID self_extension,
+	OUT MV_PVOID *next_extension
+	);
+
+/* It returns the seconds since midnight, Jan 1, 1970*/
+MV_U32 HBA_GetTimeInSecond(void);
+
+/* It returns the millisecond since midnight. It's in one day only. */
+MV_U32 HBA_GetMillisecondInDay(void);
+
+#ifdef SUPPORT_EVENT
+MV_BOOLEAN HBA_AddEvent( 
+	IN MV_PVOID extension,
+	IN MV_U32 eventId,
+	IN MV_U16 deviceId,
+	IN MV_U8 severityLevel,
+	IN MV_U8 param_cnt,
+	IN MV_PU32 params
+	);
+#endif
+
+void mvGetAdapterInfo( MV_PVOID This, PMV_Request pReq );
+
+void hba_spin_lock_irq(spinlock_t* plock);
+void hba_spin_unlock_irq(spinlock_t* plock);
+void hba_swap_buf_le16(u16 *buf, unsigned int words);
+
+#ifdef __AC_PROF__
+unsigned long __hba_current_time(void);
+#endif /* __AC_PROF__ */
+
+#ifdef SUPPORT_EVENT
+void mvGetEvent( MV_PVOID This, PMV_Request pReq );
+#endif /* SUPPORT_EVENT */
+
+#endif /* HBA_EXPOSE_H */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_header.h linux-2.6.25/drivers/scsi/mv/linux/hba_header.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_header.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_header.h	2008-07-28 18:42:43.332188768 +0200
@@ -0,0 +1,20 @@
+/*
+ *
+ * a work around for the header hell of Odin driver
+ * July 6th, 2006
+ * A.C. <ake at marvell dot com>
+ */
+
+#ifndef __MV_HBA_HEADER_LINUX__
+#define  __MV_HBA_HEADER_LINUX__
+
+struct _HBA_Extension;
+typedef struct _HBA_Extension HBA_Extension, *PHBA_Extension;
+
+#include "hba_mod.h"
+#ifndef SUPPORT_TIMER
+#include "hba_timer.h"
+#endif
+#include "hba_inter.h"
+
+#endif /* __MV_HBA_HEADER_LINUX__ */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_inter.h linux-2.6.25/drivers/scsi/mv/linux/hba_inter.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_inter.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_inter.h	2008-07-28 18:42:43.333188740 +0200
@@ -0,0 +1,80 @@
+#if !defined(HBA_INTERNAL_H)
+
+#define HBA_INTERNAL_H
+
+#include "hba_header.h"
+
+struct _HBA_Extension
+{
+	/* Device extention */
+	MV_PVOID host_data;
+
+	struct list_head        next;
+	struct pci_dev 		*pcidev;
+	spinlock_t              lock;
+	struct semaphore	sem;
+	struct timer_list	timer;
+	struct Scsi_Host	*host;
+
+	MV_PVOID 	uncached_virtual_address[MAX_MODULE_NUMBER];
+	MV_U32          uncached_size[MAX_MODULE_NUMBER];
+	dma_addr_t      uncached_physical[MAX_MODULE_NUMBER];
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_t                hba_sync;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+	struct completion       cmpl;
+
+	/* System resource */
+	MV_PVOID                Base_Address[MAX_BASE_ADDRESS];
+
+	MV_U32                  State;	
+	/* Is OS during hibernation or crash dump? */
+	MV_BOOLEAN              Is_Dump;
+	/* Outstanding requests count */
+	MV_U8                   Io_Count;
+	/* Maximum requests number we can handle */
+	MV_U16                  Max_Io;
+
+	/* Adapter information */
+	MV_U8                   Adapter_Bus_Number;
+	MV_U8                   Adapter_Device_Number;
+	MV_U16                  Vendor_Id;
+	MV_U16                  Device_Id;
+	MV_U8                   Revision_Id;
+	MV_U8                   Reserved0;
+
+	/* Module management related variables */
+	struct _Module_Manage   Module_Manage;
+
+	/* Timer module */
+	struct _Timer_Module    Timer_Module;
+
+	/* Free MV_Request queue */
+	List_Head               Free_Request;
+	/* MV_Request waiting queue */
+	List_Head               Waiting_Request;
+
+#ifdef SUPPORT_EVENT
+	List_Head               Stored_Events;
+	List_Head               Free_Events;
+	MV_U32	                SequenceNumber;
+	MV_U8                   Num_Stored_Events;
+#endif /* SUPPORT_EVENT */
+
+#ifdef CACHE_MODULE_SUPPORT
+	MV_PVOID                cache_res;
+#endif
+	/* 
+	 * Memory pool can be used as variable data structures like timer 
+	 * This item must always be put at the end of this data structure.
+	 */
+	MV_U8                   Memory_Pool[1];
+};
+
+#define DRIVER_STATUS_IDLE      1    /* The first status */
+#define DRIVER_STATUS_STARTING  2    /* Begin to start all modules */
+#define DRIVER_STATUS_STARTED   3    /* All modules are all settled. */
+
+#endif /* HBA_INTERNAL_H */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_mod.c linux-2.6.25/drivers/scsi/mv/linux/hba_mod.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_mod.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_mod.c	2008-07-28 18:42:43.333188740 +0200
@@ -0,0 +1,391 @@
+#include "mv_include.h"
+#include "linux_main.h"
+#include "hba_mod.h"
+
+#include "hba_inter.h"
+
+/*
+ * Pre-defined module function table
+ * Please match this predefined module set with the enum Module_Id
+ * Pay attention: If you want to change the following data structure,
+ * please change Module_Id as well.
+ */
+Module_Interface module_set[MAX_MODULE_NUMBER] = 
+{
+	{
+		MODULE_HBA,
+		HBA_ModuleGetResourceQuota,
+		HBA_ModuleInitialize,
+		HBA_ModuleStart,
+		HBA_ModuleShutdown,
+		NULL, /* - USE ME AND DIE!! HBA_ModuleNotification, */
+		HBA_ModuleSendRequest,
+		HBA_ModuleReset,
+		HBA_ModuleMonitor,
+	#ifdef SUPPORT_VIRTUAL_AND_PHYSICAL_SG
+		HBA_ModuleGetVirtualSG
+	#endif
+	},
+#ifdef CACHE_MODULE_SUPPORT
+	{
+		MODULE_CACHE,
+		Cache_ModuleGetResourceQuota,
+		Cache_ModuleInitialize,
+		Cache_ModuleStart,
+		Cache_ModuleShutdown,
+		Cache_ModuleNotification,
+		Cache_ModuleSendRequest,
+		Cache_ModuleReset,
+		Cache_ModuleMonitor,
+	#ifdef SUPPORT_VIRTUAL_AND_PHYSICAL_SG
+		Cache_ModuleGetVirtualSG
+	#endif
+	},
+#endif
+#ifdef RAID_DRIVER
+	{
+		MODULE_RAID,
+		RAID_ModuleGetResourceQuota,
+		RAID_ModuleInitialize,
+		RAID_ModuleStart,
+		RAID_ModuleShutdown,
+		RAID_ModuleNotification,
+		RAID_ModuleSendRequest,
+		RAID_ModuleReset,
+		RAID_ModuleMonitor,
+	#ifdef SUPPORT_VIRTUAL_AND_PHYSICAL_SG
+		RAID_ModuleGetVirtualSG
+	#endif
+	},
+#endif
+	{
+		MODULE_CORE,
+		Core_ModuleGetResourceQuota,
+		Core_ModuleInitialize,
+		Core_ModuleStart,
+		Core_ModuleShutdown,
+		Core_ModuleNotification,
+		Core_ModuleSendRequest,
+		Core_ModuleReset,
+		Core_ModuleMonitor,
+	#ifdef SUPPORT_VIRTUAL_AND_PHYSICAL_SG
+		NULL
+	#endif
+	}
+};
+
+void Module_InitializeAll(PModule_Manage p_module_manage, MV_U16 max_io)
+{
+	MV_I8 i = 0;
+	MV_PVOID module_extension = NULL;
+	MV_U32 extension_size = 0;
+
+	/* Module initialization is one synchronized function. */
+	for ( i=MAX_MODULE_NUMBER-1; i>=0; i-- )
+	{
+		/* I use this chance to check whether the module_set matches with Module_Id */
+		MV_ASSERT( module_set[i].module_id==i );
+
+		if ( module_set[i].module_initialize )
+		{
+			module_extension = p_module_manage->resource[i].module_extension;
+			extension_size = p_module_manage->resource[i].extension_size;
+			module_set[i].module_initialize(module_extension, extension_size, max_io);
+		}
+	}
+}
+
+void Module_StartAll(PModule_Manage p_module_manage, MV_U8 begin_module)
+{
+	MV_I8 i = 0;
+
+	/* 
+	 * Start module from the lower level, the first one is the core driver.
+	 * Every time we only start one module.
+	 */
+	for ( i=begin_module; i>=0; i-- )
+	{
+		MV_ASSERT(begin_module<MAX_MODULE_NUMBER);
+		if ( module_set[i].module_start )
+		{
+			module_set[i].module_start(
+				p_module_manage->resource[i].module_extension);
+			return;
+		}
+
+		/* If the module_start function is NULL, continue to the next. */
+		p_module_manage->status |= (1<<i);
+	}
+}
+
+MV_U32 mod_get_mem_size(PHBA_Extension pHBA, enum Resource_Type type,
+			MV_U16 max_io)
+{
+        int i = 0;
+        unsigned long quota = 0;
+        unsigned long  oneQuota = 0;
+
+        for (i=0; i<MAX_MODULE_NUMBER; i++) {
+                if (module_set[i].get_mem_size != NULL) {
+                        oneQuota = module_set[i].get_mem_size(type, max_io);
+                        quota += ROUNDING(oneQuota, 8);
+                        MV_DBG(DMSG_KERN,
+                               "%s quota for module %d is 0x%lx.\n", 
+                               type == RESOURCE_CACHED_MEMORY? "Cached memory" : "Uncached memory",
+                               i, 
+                               oneQuota);
+
+                        if (oneQuota) {
+                                if (type == RESOURCE_UNCACHED_MEMORY) {
+                                        MV_PVOID uncached_virtual = NULL;
+                                        uncached_virtual = pci_alloc_consistent(pHBA->pcidev,
+                                                                                                  oneQuota,
+                                                                                                  &pHBA->uncached_physical[i]);
+                                        pHBA->uncached_size[i] = oneQuota;
+                                        if (uncached_virtual != NULL)
+                                                pHBA->uncached_virtual_address[i] = uncached_virtual;
+#ifdef CACHE_MODULE_SUPPORT
+                                        else if (i == MODULE_CACHE) 
+                                                MV_DPRINT(("Module %d asks for uncached memory failed.\n", i));
+#endif
+                                        else
+                                                return -1;
+                                }
+                        }
+                }
+        }
+
+        /* Each extension needs one extension header which is hidden from module. */
+        if ( type==RESOURCE_CACHED_MEMORY )
+                quota += MODULE_HEADER_SIZE * MAX_MODULE_NUMBER;
+
+        MV_DBG(DMSG_KERN, "%s quota totally is 0x%lx.\n",
+                type==RESOURCE_CACHED_MEMORY? "Cached memory" : "Uncached memory",
+                quota);
+
+        return quota;
+}
+
+void Module_AssignModuleExtension(MV_PVOID device_extension, 
+				  MV_U16 max_io)
+{
+	MV_PTR_INTEGER ptemp = (MV_PTR_INTEGER)device_extension;
+	PHBA_Extension pHBA = NULL;
+	PModule_Manage module_manage = NULL;
+	PModule_Header header = NULL;
+	MV_U8 module_id;
+	MV_U32 require;
+
+	MV_ASSERT(MODULE_HBA==0);
+	pHBA = (PHBA_Extension)( (MV_PTR_INTEGER)device_extension+MODULE_HEADER_SIZE );
+	module_manage = &pHBA->Module_Manage;
+
+	for (module_id=0; module_id<MAX_MODULE_NUMBER; module_id++) {
+		if (module_set[module_id].get_mem_size==NULL )
+			continue;
+
+		require = module_set[module_id].get_mem_size(RESOURCE_CACHED_MEMORY, max_io);
+		require = ROUNDING(require, 8);
+		
+		header = (PModule_Header)ptemp;
+		header->extension_size = require;
+		header->header_size = MODULE_HEADER_SIZE;
+		header->module_id = module_id;
+		header->hba_extension = pHBA;
+
+		module_manage->resource[module_id].module_extension = (MV_PVOID)(ptemp+MODULE_HEADER_SIZE);
+		module_manage->resource[module_id].extension_size = require;
+
+		ptemp += MODULE_HEADER_SIZE+require;
+	}
+}
+
+void
+Module_AssignUncachedMemory(
+	IN PModule_Manage module_manage,
+	IN MV_PVOID virtual_addr,
+	IN MV_PHYSICAL_ADDR physical_addr,
+	IN MV_U32 memory_size,
+	IN MV_U16 max_io,
+	MV_U8 module_id
+	)
+{
+	MV_PTR_INTEGER temp_virtual = (MV_PTR_INTEGER)virtual_addr;
+	//MV_PHYSICAL_ADDR temp_physical = physical_addr;
+
+	MV_U32 require;
+
+	/* Assign Uncached Memory */
+	if ( module_set[module_id].get_mem_size == NULL )
+		return;
+
+	require = module_set[module_id].get_mem_size(RESOURCE_UNCACHED_MEMORY,
+						     max_io);
+	require = ROUNDING(require, 8);	
+
+	module_manage->resource[module_id].uncached_size = require;
+	module_manage->resource[module_id].uncached_address = (MV_PVOID)virtual_addr;
+	module_manage->resource[module_id].uncached_physical_address = physical_addr;
+
+	temp_virtual += require;
+	/* Do we have enough uncached memory? */
+	MV_ASSERT( (temp_virtual-(MV_PTR_INTEGER)virtual_addr)<=memory_size );
+}
+
+void Module_ShutdownAll(PModule_Manage p_module_manage)
+{
+	MV_I8 i = 0;
+	MV_PVOID module_extension = NULL;
+
+	/* Module stop is one synchronized function. */
+	for ( i=MAX_MODULE_NUMBER-1; i>=0; i-- )
+	{
+		if ( module_set[i].module_stop )
+		{
+			module_extension = p_module_manage->resource[i].module_extension;
+			module_set[i].module_stop(module_extension);
+		}
+	}
+}
+
+
+void *mv_hba_init_ext(struct pci_dev *dev)
+{
+	int i;
+
+	PModule_Header pheader;
+	PHBA_Extension phba;
+	PModule_Manage pmod;
+
+	unsigned long total_size = 0;
+	unsigned long size = 0;
+
+	unsigned long addr;
+	unsigned long range;
+
+	dma_addr_t    dma_addr;
+	BUS_ADDRESS   bus_addr;
+	MV_PHYSICAL_ADDR phy_addr;
+	
+
+	/* allocate normal (CACHED) mem */
+	for (i=0; i<MAX_MODULE_NUMBER; i++) {
+		size = module_set[i].get_mem_size(RESOURCE_CACHED_MEMORY,
+						  MAX_REQUEST_NUMBER);
+
+		if ( 0 != size )
+			total_size += ROUNDING(size, 8);
+		
+		WARN_ON(size != ROUNDING(size, 8));
+		
+	}
+
+	/* init hba ext structure */
+	total_size += ROUNDING(MODULE_HEADER_SIZE * MAX_MODULE_NUMBER, 8);
+
+	MV_DBG(DMSG_HBA, "THOR : Memory quota is 0x%lx bytes.\n",
+	       total_size);
+
+	pheader = (PModule_Header) vmalloc(total_size);
+	if ( NULL == pheader )
+		return NULL;
+
+	memset(pheader, 0, total_size);
+	Module_AssignModuleExtension(pheader, MAX_REQUEST_NUMBER);
+	
+	phba = (PHBA_Extension) head_to_hba(pheader);
+	phba->host_data = pheader;
+	phba->pcidev    = dev;
+	phba->Vendor_Id = dev->vendor;
+	phba->Device_Id = dev->device;
+
+	/* map pci resource */
+	if (pci_read_config_byte(dev, PCI_REVISION_ID, &phba->Revision_Id)) {
+		printk("THOR : Failed to get hba's revision id.\n");
+		goto ext_err_mem;
+	}
+	
+	for (i=0; i<MAX_BASE_ADDRESS; i++) {
+		addr  = pci_resource_start(dev, i);
+		range = pci_resource_len(dev, i);
+
+		if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+			phba->Base_Address[i] =(MV_PVOID) ioremap(addr, range);
+		else
+			phba->Base_Address[i] =(MV_PVOID) addr;
+
+		MV_DBG(DMSG_HBA, "THOR : BAR %d : %p.\n", i, 
+		       phba->Base_Address[i]);
+	}
+	
+	/* allocate consistent dma memory (uncached) */
+	size = 0;
+	total_size = 0;
+	pmod = &phba->Module_Manage;
+	
+	for (i=0; i<MAX_MODULE_NUMBER; i++) {
+		size = module_set[i].get_mem_size(RESOURCE_UNCACHED_MEMORY,
+						  MAX_REQUEST_NUMBER);
+		if (0 == size) 
+			continue;
+		
+		WARN_ON(size != ROUNDING(size, 8));
+
+		size = ROUNDING(size, 8);
+		pmod->resource[i].uncached_address = (MV_PVOID) \
+			pci_alloc_consistent(dev, size, &dma_addr);
+
+		if ( NULL == pmod->resource[i].uncached_address )
+			goto ext_err_dma;
+		
+		pmod->resource[i].uncached_size = size;
+		bus_addr = (BUS_ADDRESS) dma_addr;
+		phy_addr.low  = LO_BUSADDR(bus_addr);
+		phy_addr.high = HI_BUSADDR(bus_addr);
+		pmod->resource[i].uncached_physical_address = phy_addr;
+		
+	}
+
+	MV_DBG(DMSG_HBA, "THOR : HBA ext struct init'ed at %p.\n", phba);
+	return phba;
+
+ext_err_dma:
+	for (i=0; i<MAX_MODULE_NUMBER; i++) {
+		if ( pmod->resource[i].uncached_size ) {
+			phy_addr = pmod->resource[i].uncached_physical_address;
+			dma_addr = (dma_addr_t) ( phy_addr.low | \
+						  ((u64) phy_addr.high)<<32 );
+			pci_free_consistent(dev, 
+					    pmod->resource[i].uncached_size,
+					    pmod->resource[i].uncached_address,
+					    dma_addr);
+		}
+	}
+ext_err_mem:
+	vfree(pheader);
+	return NULL;
+}
+
+void mv_hba_release_ext(PHBA_Extension phba)
+{
+	int i;
+	
+	dma_addr_t dma_addr;
+	MV_PHYSICAL_ADDR phy_addr;
+	
+	PModule_Manage pmod  = &phba->Module_Manage;
+	
+	for (i=0; i<MAX_MODULE_NUMBER; i++) {
+		if ( pmod->resource[i].uncached_size ) {
+			phy_addr = pmod->resource[i].uncached_physical_address;
+			dma_addr = (dma_addr_t) ( phy_addr.low | \
+						  ((u64) phy_addr.high)<<32 );
+			pci_free_consistent(phba->pcidev, 
+					    pmod->resource[i].uncached_size,
+					    pmod->resource[i].uncached_address,
+					    dma_addr);
+		}
+	}
+	
+	vfree(phba->host_data);
+}
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_mod.h linux-2.6.25/drivers/scsi/mv/linux/hba_mod.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_mod.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_mod.h	2008-07-28 18:42:43.333188740 +0200
@@ -0,0 +1,121 @@
+#if !defined(MODULE_MANAGE_H)
+
+#define MODULE_MANAGE_H
+
+#include "hba_exp.h"
+#include "core_exp.h"
+#ifdef RAID_DRIVER
+#include "raid_exp.h"
+#endif
+
+/*
+ * Module definition
+ */
+typedef struct _Module_Interface
+{
+	MV_U8 module_id;
+	MV_U32 (*get_mem_size)(enum Resource_Type type, MV_U16 max_io);
+	void (*module_initialize)(MV_PVOID extension, MV_U32 extension_size, MV_U16 max_io);
+	void (*module_start)(MV_PVOID extension);
+	void (*module_stop)(MV_PVOID extension);
+	void (*module_notification)(MV_PVOID extension, enum Module_Event event, MV_PVOID param);
+	void (*module_sendrequest)(MV_PVOID extension, PMV_Request pReq);
+	void (*module_reset)(MV_PVOID extension);
+	void (*module_monitor)(MV_PVOID extension);
+} Module_Interface, *PModule_Interface;
+
+/*
+ * Module Management
+ */
+typedef struct _Module_Header
+{
+	/* 
+	 * Here is the hidden module header. 
+	 * Module is not aware of this except for HBA module management.
+	 */
+	MV_U8		header_size;		/* Module header size */
+	MV_U8		module_id;			/* It's also the module is in enum Module_Id */
+	MV_U8		reserved0[2];
+	MV_U32		extension_size;		/* size of the extension, header is not included. */
+	MV_PVOID	hba_extension;		/* point to the pHBA extension, not the header. */
+
+} Module_Header, * PModule_Header;
+
+/* Size must be 64 bit rounded. */
+#define MODULE_HEADER_SIZE	ROUNDING(sizeof(Module_Header), 8)
+
+#define Module_GetModuleHeader(extension)	\
+	((PModule_Header)((MV_PTR_INTEGER)extension-MODULE_HEADER_SIZE))
+
+#define Module_GetModuleId(extension)		\
+	(Module_GetModuleHeader(extension)->module_id)
+
+#define Module_GetHBAExtension(extension)	\
+	(Module_GetModuleHeader(extension)->hba_extension)
+
+#define Module_IsStarted(p_module_manage, module_id)	\
+	(p_module_manage->status&=(1<<(module_id)))
+
+#define head_to_hba(head)	\
+	((MV_PTR_INTEGER)head+MODULE_HEADER_SIZE)
+
+typedef struct _Module_Resource
+{
+	/* Extension assigned to this module */
+	MV_PVOID	    module_extension;
+	MV_U32		    extension_size;	/* Extension size */
+	MV_U32		    uncached_size;	/* Uncached memory size */
+        /* Uncached memory virtual address */
+	MV_PVOID	    uncached_address;	
+	/* Uncached memory physical address */
+	MV_PHYSICAL_ADDR    uncached_physical_address;	
+} Module_Resource, *PModule_Resource;
+
+typedef struct _Module_Manage
+{
+	Module_Resource resource[MAX_MODULE_NUMBER];
+        /* One bit for one module. If started, the bit is set. */
+	MV_U8			status;	
+	MV_U8			reserved0[7];
+} Module_Manage, *PModule_Manage;
+
+void HBA_ModuleStarted(MV_PVOID This);
+
+MV_U32 mod_get_mem_size(PHBA_Extension pHBA, enum Resource_Type type, 
+			MV_U16 max_io);
+
+void 
+Module_AssignModuleExtension(
+	IN MV_PVOID device_extension, 
+	IN MV_U16 max_io
+	);
+
+void
+Module_AssignUncachedMemory(
+	IN PModule_Manage module_manage,
+	IN MV_PVOID virtual_addr,
+	IN MV_PHYSICAL_ADDR physical_addr,
+	IN MV_U32 memory_size,
+	IN MV_U16 max_io,
+	IN MV_U8 module_id
+	);
+
+void 
+Module_InitializeAll(
+	IN PModule_Manage p_module_manage,
+	IN MV_U16 max_io
+	);
+
+void 
+Module_StartAll(
+	IN PModule_Manage p_module_manage, 
+	IN MV_U8 begin_module
+	);
+
+void Module_ShutdownAll(IN PModule_Manage p_module_manage);
+
+void *mv_hba_init_ext(struct pci_dev *dev);
+void mv_hba_release_ext(PHBA_Extension phba);
+
+#endif /* MODULE_MANAGE_H */
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_timer.c linux-2.6.25/drivers/scsi/mv/linux/hba_timer.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_timer.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_timer.c	2008-07-28 18:42:43.334188718 +0200
@@ -0,0 +1,367 @@
+#include "mv_include.h"
+
+#include "hba_header.h"
+
+/* how long a time between which should each keeper work be done */
+#define KEEPER_SHIFT HZ/2
+
+static hba_msg_queue mv_msg_queue;
+static struct workqueue_struct *mv_msg_workqueue;
+static struct work_struct mv_msg_work_t;
+static struct work_struct *mv_msg_work = &mv_msg_work_t;
+static struct timer_list mv_keeper_timer;
+
+static int __msg_queue_state;
+
+/* non-zero value will make keeper_timer stop re-registrating itself */
+static unsigned int mv_keeper_exit_flag;
+
+
+static inline int queue_state_get(void)
+{
+	return __msg_queue_state;
+}
+
+static inline void queue_state_set(int state)
+{
+	__msg_queue_state = state;
+}
+
+static void hba_proc_msg(hba_msg *pmsg)
+{
+	PHBA_Extension phba;
+	struct scsi_device *psdev;
+
+	/* we don't do things without pmsg->data */
+	if ( NULL == pmsg->data )
+		return;
+
+	phba = (PHBA_Extension) Module_GetHBAExtension(pmsg->data);
+
+	
+	MV_DBG(DMSG_HBA, "__MV__ In hba_proc_msg.\n");
+
+	MV_ASSERT(pmsg);
+
+	switch (pmsg->msg) {
+	case EVENT_DEVICE_ARRIVAL:
+		if ( scsi_add_device(phba->host, 0, pmsg->param, 0) )
+			MV_DBG(DMSG_SCSI, 
+			       "__MV__ add scsi disk %d-%d-%d failed.\n",
+			       0, pmsg->param, 0);
+		else
+			MV_DBG(DMSG_SCSI,
+			       "__MV__ add scsi disk %d-%d-%d.\n",
+			       0, pmsg->param, 0);
+		break;
+	case EVENT_DEVICE_REMOVAL:
+		psdev = scsi_device_lookup( phba->host, 0, pmsg->param, 0);
+
+		if ( NULL != psdev ) {
+			MV_DBG(DMSG_SCSI, 
+			       "__MV__ remove scsi disk %d-%d-%d.\n",
+			       0, pmsg->param, 0);
+			scsi_remove_device(psdev);
+			scsi_device_put(psdev);
+		} else {
+			MV_DBG(DMSG_SCSI,
+			       "__MV__ no disk to remove %d-%d-%d\n",
+			       0, pmsg->param, 0);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+/* a work queue func */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+static void mv_proc_queue(void *data)
+#else
+static void mv_proc_queue(struct work_struct *data)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
+{
+	hba_msg *pmsg;
+	
+	/* work on queue non-stop, pre-empty me! */
+	queue_state_set(MSG_QUEUE_PROC);
+
+	while (1) {
+		MV_DBG(DMSG_HBA, "__MV__ process queue starts.\n");
+		MV_LOCK_IRQ(&mv_msg_queue.lock);
+		if ( list_empty(&mv_msg_queue.tasks) ) {
+			/* it's important we put queue_state_set here. */
+			queue_state_set(MSG_QUEUE_IDLE);
+			MV_UNLOCK_IRQ(&mv_msg_queue.lock);
+			MV_DBG(DMSG_HBA, "__MV__ process queue ends.\n");
+			break;
+		}
+		pmsg = list_entry(mv_msg_queue.tasks.next, hba_msg, msg_list);
+		MV_UNLOCK_IRQ(&mv_msg_queue.lock);
+
+		hba_proc_msg(pmsg);
+		
+		/* clean the pmsg before returning it to free?*/
+		pmsg->data = NULL;
+		MV_LOCK_IRQ(&mv_msg_queue.lock);
+		list_move_tail(&pmsg->msg_list, &(mv_msg_queue.free));
+		MV_UNLOCK_IRQ(&mv_msg_queue.lock);
+		MV_DBG(DMSG_HBA, "__MV__ process queue ends.\n");
+	}
+
+}
+
+static inline MV_U32 hba_msg_queue_empty(void)
+{
+	return list_empty(&(mv_msg_queue.tasks));
+}
+
+
+static void hba_house_keeper(unsigned long data)
+{
+	/* test to see if it's neccessary, when you have time - A.C. */
+	if ( mv_keeper_exit_flag ) {
+		MV_DBG(DMSG_HBA, "__MV__ Mom calls me home.\n");
+		return;
+	}
+
+	if (!hba_msg_queue_empty() && MSG_QUEUE_IDLE == queue_state_get()) {
+		if ( !queue_work(mv_msg_workqueue, mv_msg_work))
+			MV_DBG(DMSG_HBA, "__MV__ work queue insert error.\n");
+	}
+
+	mv_keeper_timer.expires = jiffies + KEEPER_SHIFT;
+	add_timer(&mv_keeper_timer);
+}
+
+
+static void hba_msg_queue_init(void)
+{
+	int i;
+	
+	spin_lock_init(&mv_msg_queue.lock);
+
+/* as we're in init, there should be no need to hold the spinlock*/
+	INIT_LIST_HEAD(&(mv_msg_queue.free));
+	INIT_LIST_HEAD(&(mv_msg_queue.tasks));
+
+
+	for (i=0; i<MSG_QUEUE_DEPTH; i++) {
+		list_add_tail(&mv_msg_queue.msgs[i].msg_list, 
+			      &mv_msg_queue.free);
+	}
+	
+	mv_msg_workqueue = create_singlethread_workqueue("MV_RAID");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+	INIT_WORK(mv_msg_work, mv_proc_queue, NULL);
+#else
+	INIT_WORK(mv_msg_work, mv_proc_queue);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
+}
+
+
+void hba_house_keeper_init(void)
+{
+	hba_msg_queue_init();
+
+	queue_state_set(MSG_QUEUE_IDLE);
+
+	init_timer(&mv_keeper_timer);
+}
+
+void hba_house_keeper_run(void)
+{
+	mv_keeper_timer.function = hba_house_keeper;
+	mv_keeper_timer.data     = 0;
+	mv_keeper_timer.expires  = jiffies+KEEPER_SHIFT;
+
+	add_timer(&mv_keeper_timer);
+}
+
+void hba_house_keeper_exit(void)
+{
+	
+	mv_keeper_exit_flag = 1; /* stop its re-registration */
+
+	/* stop timer before workqueue as work is scheduled by timer */
+	del_timer_sync(&mv_keeper_timer);
+
+	flush_workqueue(mv_msg_workqueue);
+
+	/* in fact, destroy_workqueue does flush_workqueue ...  */
+	destroy_workqueue(mv_msg_workqueue);
+}
+
+void hba_msg_insert(void *data, unsigned int msg, unsigned int param)
+{
+	hba_msg *pmsg;
+	unsigned long flags;
+
+	MV_DBG(DMSG_HBA, "__MV__ msg insert  %d.\n", msg);
+
+	spin_lock_irqsave(&mv_msg_queue.lock, flags);
+	if ( list_empty(&mv_msg_queue.free) ) {
+		/* should wreck some havoc ...*/
+		MV_DBG(DMSG_HBA, "-- MV -- Message queue is full.\n");
+		spin_unlock_irqrestore(&mv_msg_queue.lock, flags);
+		return;
+	}
+
+	pmsg = list_entry(mv_msg_queue.free.next, hba_msg, msg_list);
+	pmsg->data = data;
+	pmsg->msg  = msg;
+
+	switch (msg) {
+	case EVENT_DEVICE_REMOVAL:
+	case EVENT_DEVICE_ARRIVAL:
+		pmsg->param = param;
+		break;
+	default:
+		pmsg->param = param;
+                /*(NULL==param)?0:*((unsigned int*) param);*/
+		break;
+	}
+
+	list_move_tail(&pmsg->msg_list, &mv_msg_queue.tasks);
+	spin_unlock_irqrestore(&mv_msg_queue.lock, flags);
+}
+
+MV_U32 Timer_GetResourceQuota(MV_U16 max_io)
+{
+	return 0;
+}
+
+void Timer_Initialize(
+	IN OUT PTimer_Module This,
+	IN MV_PU8 pool)
+{
+#ifdef SUPPORT_TIMER
+	MV_PTR_INTEGER temp = (MV_PTR_INTEGER)pool;
+	PTimer_Request pTimerReq;
+	MV_U8 i;
+
+	Tag_Init( &This->Tag_Pool, MAX_TIMER_REQUEST );
+	This->Time_Stamp.value = 0;
+#endif
+}
+
+void Timer_Stop(PTimer_Module This)
+{
+}
+
+#ifdef SUPPORT_TIMER 
+MV_U8 Timer_AddRequest(	
+	IN MV_PVOID extension,
+	IN MV_U32 time_unit,
+	IN VOID (*routine) (MV_PVOID),
+	IN MV_PVOID context
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PTimer_Module pTimer = &pHBA->Timer_Module;
+	PTimer_Request pTimerReq;
+	MV_U8 index;
+
+	if ( !Tag_IsEmpty( &pTimer->Tag_Pool ) )
+	{
+		index = Tag_GetOne( &pTimer->Tag_Pool );
+		pTimerReq = &pTimer->Running_Requests[index];
+
+		pTimerReq->Valid = MV_TRUE;
+		pTimerReq->Context = context;
+		pTimerReq->Routine = routine;
+		pTimerReq->Time_Stamp.value = pTimer->Time_Stamp.value + time_unit * TIMER_INTERVAL;	
+
+		return index;
+	}
+
+	// shouldn't happen - we should always allocate enough timer slots for all devices
+	MV_DASSERT( MV_FALSE );
+	return NO_CURRENT_TIMER;
+}
+	
+void Timer_CheckRequest(	
+	IN MV_PVOID DeviceExtension
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)head_to_hba(DeviceExtension);
+	PTimer_Module pTimer = &pHBA->Timer_Module;
+	PTimer_Request pTimerReq;
+	MV_U8 i;
+	List_Head *pPos;
+			
+	pTimer->Time_Stamp.value += TIMER_INTERVAL;
+
+	for (i=0; i<MAX_TIMER_REQUEST; i++)
+	{
+		pTimerReq = &pTimer->Running_Requests[i];
+		
+		if( pTimerReq->Valid && (pTimerReq->Time_Stamp.value <= pTimer->Time_Stamp.value) )
+		{
+			// time to call the function
+			MV_DPRINT(("Timer checking requests: found request @ time %d\n", pTimerReq->Time_Stamp.value));
+			MV_DASSERT( pTimerReq->Routine != NULL );
+			pTimerReq->Routine( pTimerReq->Context );
+
+			if( pTimerReq->Valid )
+			{
+				pTimerReq->Valid = MV_FALSE;
+				Tag_ReleaseOne( &pTimer->Tag_Pool, i );
+			}
+		}
+	}
+#if 0
+	ScsiPortNotification( RequestTimerCall,
+						  pHBA->Device_Extension,
+						  Timer_CheckRequest,
+						  TIMER_INTERVAL * 1000 );
+#endif						 
+}
+
+void Timer_CancelRequest(
+	IN MV_PVOID extension,
+	IN MV_U8 request_index
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)Module_GetHBAExtension(extension);
+	PTimer_Module pTimer = &pHBA->Timer_Module;
+	PTimer_Request pTimerReq;
+
+	pTimerReq = &pTimer->Running_Requests[request_index];
+	pTimerReq->Valid = MV_FALSE;
+	Tag_ReleaseOne( &pTimer->Tag_Pool, request_index );
+}
+#endif
+
+/* for req */
+void hba_add_timer(PMV_Request req, int timeout,
+		   MV_VOID (*function)(MV_PVOID data))
+{
+	req->eh_timeout.data = (unsigned long)req;
+	/* timeout is in unit of second */
+	req->eh_timeout.expires = jiffies + timeout*HZ;
+	req->eh_timeout.function = (void (*)(unsigned long)) function;
+
+	add_timer(&req->eh_timeout);
+	return;
+}
+	
+void hba_remove_timer(PMV_Request req)
+{
+	/* should be using del_timer_sync, but ... HBA->lock ... */
+	if ( req->eh_timeout.function ) {
+		del_timer(&req->eh_timeout);
+		req->eh_timeout.function = NULL;
+	}
+}
+
+void hba_init_timer(PMV_Request req)
+{
+	/*
+	 * as we have no init routine for req, we'll do init_timer every 
+	 * time it is used until we could uniformly init. all reqs
+	 */
+	req->eh_timeout.function = NULL;
+	init_timer(&req->eh_timeout);
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/hba_timer.h linux-2.6.25/drivers/scsi/mv/linux/hba_timer.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/hba_timer.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/hba_timer.h	2008-07-28 18:42:43.334188718 +0200
@@ -0,0 +1,104 @@
+#if !defined(TIMER_H)
+
+#define TIMER_H
+
+#include "mv_include.h"
+
+#define HBA_REQ_TIMER_AFTER_RESET 15
+#define HBA_REQ_TIMER 10
+#define HBA_REQ_TIMER_IOCTL (HBA_REQ_TIMER_AFTER_RESET+3)
+
+enum _tag_hba_msg_state{
+	MSG_QUEUE_IDLE=0,
+	MSG_QUEUE_PROC
+};
+
+typedef struct _tag_hba_msg {
+	MV_PVOID data;
+	MV_U32   msg;
+	MV_U32   param;
+	struct  list_head msg_list;
+}hba_msg;
+
+#define MSG_QUEUE_DEPTH 32
+
+typedef struct _tag_hba_msg_queue {
+	spinlock_t lock;
+	struct list_head free;
+	struct list_head tasks;
+	hba_msg msgs[MSG_QUEUE_DEPTH];
+}hba_msg_queue;
+
+void hba_house_keeper_init(void);
+void hba_house_keeper_run(void);
+void hba_house_keeper_exit(void);
+void hba_msg_insert(void *data, unsigned int msg, unsigned int param);
+
+void hba_init_timer(PMV_Request req);
+void hba_remove_timer(PMV_Request req);
+void hba_add_timer(PMV_Request req, int timeout,
+		   MV_VOID (*function)(MV_PVOID data));
+
+#define TIMER_INTERVAL			1000		// millisecond
+#define MAX_TIMER_REQUEST		20			// same as the total number of devices
+#define NO_CURRENT_TIMER		MAX_TAG_NUMBER + 1		// for each device to keep track
+
+typedef struct _Timer_Request
+{
+	List_Head Queue_Pointer;
+	MV_PVOID Context;
+	MV_VOID (*Routine) (MV_PVOID);
+	MV_BOOLEAN Valid;
+	MV_U8 Reserved0[3];
+
+	MV_U64 Time_Stamp;		// when this requested function wants to be called
+} Timer_Request, *PTimer_Request;
+
+#ifdef SUPPORT_TIMER
+
+typedef struct _Timer_Module
+{
+	Timer_Request Running_Requests[MAX_TIMER_REQUEST];
+	Tag_Stack Tag_Pool;
+
+	MV_U64 Time_Stamp;		// current time
+} Timer_Module, *PTimer_Module;
+
+#else
+
+typedef struct _Timer_Module
+{
+	MV_PVOID context;
+	MV_VOID (*routine) (MV_PVOID);
+} Timer_Module, *PTimer_Module;
+
+#endif
+
+/* 
+ * Exposed functions 
+ */
+MV_U32 Timer_GetResourceQuota(MV_U16 max_io);
+
+void Timer_Stop(PTimer_Module This);
+
+void Timer_Initialize(
+	IN OUT PTimer_Module This,
+	IN MV_PU8 pool);	
+
+MV_U8 Timer_AddRequest(	
+	IN MV_PVOID extension,
+	IN MV_U32 time_unit,
+	IN MV_VOID (*routine) (MV_PVOID),
+	IN MV_PVOID context
+	);
+
+void Timer_CheckRequest(	
+	IN MV_PVOID extension
+	);
+
+void Timer_CancelRequest(
+	IN MV_PVOID extension,
+	IN MV_U8 request_index
+	);
+
+#endif /* TIMER_H */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_helper.c linux-2.6.25/drivers/scsi/mv/linux/linux_helper.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_helper.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_helper.c	2008-07-28 19:25:47.461194498 +0200
@@ -0,0 +1,481 @@
+#include "mv_include.h"
+#include "mv_os.h"
+
+#include "hba_header.h"
+
+#include "linux_main.h"
+#include "linux_sense.h"
+#include "linux_helper.h"
+
+void GenerateSGTable(
+	IN PHBA_Extension pHBA,
+	IN struct scsi_cmnd *SCpnt,
+	OUT PMV_SG_Table pSGTable
+	);
+void HBARequestCallback(
+	MV_PVOID This,
+	PMV_Request pReq
+	);
+
+void __hba_dump_req_info(PMV_Request preq)
+{
+	unsigned long lba =0;
+
+	switch (preq->Cdb[0]) {
+	case SCSI_CMD_READ_10:
+	case SCSI_CMD_WRITE_10:
+		lba = preq->Cdb[2]<<24 | preq->Cdb[3]<<16 | preq->Cdb[4]<<8 | \
+			preq->Cdb[5];
+		break;
+	default:
+		lba = 0;
+		break;
+	} 
+
+	MV_DBG(DMSG_PROF_FREQ, 
+	       "_MV_ req "RED("%p")
+	       " dev %d : cmd %2X : lba %lu - %lu : length %d.\n",
+	       preq, preq->Device_Id, preq->Cdb[0], lba,
+	       lba + preq->Data_Transfer_Length/512,
+	       preq->Data_Transfer_Length);
+}
+
+MV_BOOLEAN TranslateSCSIRequest(PHBA_Extension pHBA, struct scsi_cmnd *pSCmd, PMV_Request pReq)
+{
+	
+	pReq->Device_Id = mv_scmd_target(pSCmd);	
+
+	/* Cmd_Flag */	//TBD: For Linux: Is that possible to set these flags or need read the Cdb
+	pReq->Cmd_Flag = 0;
+
+	/*
+	 * Set three flags: CMD_FLAG_NON_DATA, CMD_FLAG_DATA_IN and CMD_FLAG_DMA
+	 */
+	if ( pSCmd->sdb.length==0 ) //TBD lily
+	{
+		pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+	}
+	else
+	{
+		//if ( Srb->SrbFlags&SRB_FLAGS_DATA_IN )
+		//	pReq->Cmd_Flag |= CMD_FLAG_DATA_IN; TBD ?? Lily
+		/*We need to optimize the flags setting. Lily*/
+		if(SCSI_IS_READ(pSCmd->cmnd[0]))
+			pReq->Cmd_Flag |= CMD_FLAG_DATA_IN; /*NOTE!possible to result in ERROR */
+		if ( SCSI_IS_READ(pSCmd->cmnd[0]) || SCSI_IS_WRITE(pSCmd->cmnd[0]) )
+			pReq->Cmd_Flag |= CMD_FLAG_DMA;
+	}
+
+	pReq->Sense_Info_Buffer_Length = SCSI_SENSE_BUFFERSIZE;  //TBD
+	pReq->Data_Transfer_Length = pSCmd->sdb.length;
+
+	//To handle some special CMDs,lily
+	memset(pReq->Cdb, 0, MAX_CDB_SIZE);
+	
+	switch (pSCmd->sc_data_direction) {
+	case DMA_FROM_DEVICE:
+		pReq->Cmd_Flag = CMD_FLAG_DATA_IN | CMD_FLAG_DMA;
+		break;
+	default:
+		break;
+	}
+
+	switch(pSCmd->cmnd[0]){
+	case READ_TOC:
+		pReq->Cdb[0] = READ_TOC;
+		pReq->Cdb[1] = pSCmd->cmnd[1];
+		pReq->Cdb[2] = pSCmd->cmnd[2];
+		pReq->Cdb[6] = pSCmd->cmnd[6];
+		pReq->Cdb[7] = pSCmd->cmnd[7];
+		pReq->Cdb[8] = pSCmd->cmnd[8];
+		break;
+	case REQUEST_SENSE:
+		break;
+	case MODE_SELECT:
+		pReq->Cdb[0] = MODE_SELECT_10;
+		pReq->Cdb[1] = pSCmd->cmnd[1];
+		pReq->Cdb[8] = pSCmd->cmnd[4];
+		break;
+		
+	case FORMAT_UNIT:
+		pReq->Cdb[0] = 0x24; //ATAPI opcodes
+		break;
+		
+	case READ_CAPACITY: //TBD
+		pReq->Cdb[0] = pSCmd->cmnd[0];
+		break;
+
+	case TEST_UNIT_READY:                       //TBD
+		pReq->Cdb[0] = pSCmd->cmnd[0];
+		break;
+
+	case READ_6:
+		pReq->Cdb[0] = READ_10;
+		pReq->Cdb[3] = pSCmd->cmnd[1]&0x1f;
+		pReq->Cdb[4] = pSCmd->cmnd[2];
+		pReq->Cdb[5] = pSCmd->cmnd[3];
+		pReq->Cdb[8] = pSCmd->cmnd[4];
+		pReq->Cdb[9] = pSCmd->cmnd[5];
+		break;
+
+	case WRITE_6:
+		pReq->Cdb[0] = WRITE_10;
+		pReq->Cdb[3] = pSCmd->cmnd[1]&0x1f;
+		pReq->Cdb[4] = pSCmd->cmnd[2];
+		pReq->Cdb[5] = pSCmd->cmnd[3];
+		pReq->Cdb[8] = pSCmd->cmnd[4];
+		pReq->Cdb[9] = pSCmd->cmnd[5];
+		break;
+#if 0
+	case READ_12:
+		pReq->Cdb[0] = READ_10;
+		pReq->Cdb[1] = pSCmd->cmnd[1];
+		pReq->Cdb[2] = pSCmd->cmnd[2];
+		pReq->Cdb[3] = pSCmd->cmnd[3];
+		pReq->Cdb[4] = pSCmd->cmnd[4];
+		pReq->Cdb[5] = pSCmd->cmnd[5];
+		pReq->Cdb[7] = pSCmd->cmnd[8];
+		pReq->Cdb[8] = pSCmd->cmnd[9];
+		pReq->Cdb[9] = pSCmd->cmnd[11];
+		break;
+
+	case WRITE_12:
+		pReq->Cdb[0] = WRITE_10;
+		pReq->Cdb[1] = pSCmd->cmnd[1];
+		pReq->Cdb[2] = pSCmd->cmnd[2];
+		pReq->Cdb[3] = pSCmd->cmnd[3];
+		pReq->Cdb[4] = pSCmd->cmnd[4];
+		pReq->Cdb[5] = pSCmd->cmnd[5];
+		pReq->Cdb[7] = pSCmd->cmnd[8];
+		pReq->Cdb[8] = pSCmd->cmnd[9];
+		pReq->Cdb[9] = pSCmd->cmnd[11];
+		break;
+#endif
+	default:
+		memcpy(pReq->Cdb, pSCmd->cmnd, MAX_CDB_SIZE);
+		break;
+	}
+	
+	if (SCSI_IS_INSTANT(pSCmd->cmnd[0]) && pSCmd->sdb.table.nents) {
+		struct scatterlist *sg = (struct scatterlist *) pSCmd->sdb.table.sgl;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+		if ( pSCmd->use_sg > 1 )
+			MV_DBG(DMSG_SCSI, 
+			       "_MV_ more than 1 sg list in instant cmd.\n");
+		pReq->Data_Buffer = kmalloc(sg->length, GFP_ATOMIC);
+		if (pReq->Data_Buffer) {
+			memset(pReq->Data_Buffer, 0, sg->length);
+		}
+#else
+		pReq->Data_Buffer = kzalloc(sg->length, GFP_ATOMIC);
+#endif /* 2.6.14 */
+		if ( NULL == pReq->Data_Buffer )
+			return MV_FALSE;
+
+		pReq->Data_Transfer_Length = sg->length;
+		MV_SCp(pSCmd)->map_atomic = 1;
+//#elif //TBD
+#endif /* 2.5.0  */
+	} else {
+		pReq->Data_Buffer = pSCmd->sdb.table.sgl;
+	}
+
+	pReq->Sense_Info_Buffer = pSCmd->sense_buffer;
+
+	/* Init the SG table first no matter it's data command or non-data command. */
+	SGTable_Init(&pReq->SG_Table, 0);
+	if ( pSCmd->sdb.length )
+	{
+		GenerateSGTable(pHBA, pSCmd, &pReq->SG_Table);
+	}
+
+	pReq->Org_Req = pSCmd;
+	pReq->Context = NULL;
+
+	pReq->LBA.value = 0;
+	pReq->Sector_Count = 0;
+
+	pReq->Tag = pSCmd->tag; 
+	pReq->Scsi_Status = REQ_STATUS_PENDING;
+
+	pReq->Completion = HBARequestCallback;
+
+#ifdef __AC_REQ_TRACE__
+	MV_DBG(DMSG_PROF_FREQ, "_MV_ OS REQ : ");
+	__hba_dump_req_info(pReq);
+#endif /* __AC_REQ_TRACE__ */
+	return MV_TRUE;
+}
+
+MV_BOOLEAN TranslateOSRequest(
+	IN PHBA_Extension pHBA,
+	IN struct scsi_cmnd * pSCmd,
+	OUT PMV_Request pReq
+	)
+{
+	pReq->Cmd_Initiator = pHBA; //TODO
+	pReq->Org_Req = pSCmd;
+	pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST; //TBD??
+
+ 	return TranslateSCSIRequest(pHBA, pSCmd, pReq);
+}
+
+/* This is the only function that OS request can be returned. */
+void HBARequestCallback(
+	MV_PVOID This,
+	PMV_Request pReq
+	)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+	struct scsi_cmnd *pSCmd = (struct scsi_cmnd *)pReq->Org_Req;
+
+	/* Return this request to OS. */
+	HBA_Translate_Req_Status_To_OS_Status(pHBA, pSCmd, pReq);
+	
+	List_Add(&pReq->Queue_Pointer, &pHBA->Free_Request);
+	pHBA->Io_Count--;
+}
+
+void GenerateSGTable(
+	IN PHBA_Extension pHBA,
+	IN struct scsi_cmnd *SCpnt,
+	OUT PMV_SG_Table pSGTable
+	)
+{
+	struct scatterlist *sg = (struct scatterlist *)SCpnt->sdb.table.sgl;
+	unsigned int sg_count = 0;
+	BUS_ADDRESS busaddr = 0;
+	int i;
+
+	MV_DBG(DMSG_FREQ,
+	       "In GenerateSGTable.\n");
+
+	if (SCpnt->sdb.length > (mv_scmd_host(SCpnt)->max_sectors << 9)) {
+		MV_DBG(DMSG_SCSI, "ERROR: request length exceeds "
+		       "the maximum alowed value, %x %x\n",
+		       pHBA->Device_Id, pHBA->Revision_Id);
+	}
+
+	if (SCpnt->sdb.table.nents) {
+		unsigned int length;
+		sg = (struct scatterlist *) SCpnt->sdb.table.sgl;
+		if (MV_SCp(SCpnt)->mapped == 0) {
+			MV_DBG(DMSG_FREQ,"__MV__ call pci_map_sg.\n");
+			sg_count = pci_map_sg(pHBA->pcidev, 
+					      sg,
+					      SCpnt->sdb.table.nents,
+					      scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+			if (sg_count != SCpnt->sdb.table.nents) {
+				MV_PRINT("WARNING sg_count(%d) != "
+					 "SCpnt->use_sg(%d)\n",
+					 (unsigned int) sg_count, 
+					 SCpnt->sdb.table.nents);
+			}
+			MV_SCp(SCpnt)->mapped = 1;
+		}
+
+		for (i = 0; i < sg_count; i++) {
+			busaddr = sg_dma_address(&sg[i]);
+			length = sg_dma_len(&sg[i]);
+			
+			SGTable_Append( pSGTable, 
+					LO_BUSADDR(busaddr), 
+					HI_BUSADDR(busaddr),
+					length );
+		}
+	} else {
+		if (MV_SCp(SCpnt)->mapped == 0) {
+			MV_DBG(DMSG_SCSI_FREQ, 
+			       "_MV_ pci_map_single for scmd.\n");
+
+			busaddr = pci_map_single(pHBA->pcidev,
+						 SCpnt->sdb.table.sgl,
+						 SCpnt->sdb.length,
+						 scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+			MV_SCp(SCpnt)->bus_address = busaddr;
+
+			MV_SCp(SCpnt)->mapped = 1;
+		}
+
+		SGTable_Append( pSGTable, 
+				LO_BUSADDR(busaddr), 
+				HI_BUSADDR(busaddr),
+				SCpnt->sdb.length);
+	}
+}
+
+/*need to be optimized lily*/
+void HBA_kunmap_sg(void* pReq)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	void *buf;
+	struct scsi_cmnd *scmd = NULL;
+	struct scatterlist *sg = NULL;
+	PMV_Request        req = NULL;
+
+	req  = (PMV_Request) pReq;
+	scmd = (struct scsi_cmnd *) req->Org_Req;
+
+	if (scmd)
+		sg = (struct scatterlist *) scmd->sdb.table.sgl;
+
+	if (NULL == sg) {
+		MV_DBG(DMSG_HBA, "no org_req found in the req.\n");
+		return;
+	}
+		
+	if (MV_SCp(scmd)->map_atomic) {
+		WARN_ON(!irqs_disabled());
+		buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+		memcpy(buf, req->Data_Buffer, sg->length);
+		kunmap_atomic(buf, KM_IRQ0);
+		kfree(req->Data_Buffer);
+		/* other process might want access to it ... */
+		req->Data_Buffer = scmd->sdb.table.sgl;
+		MV_SCp(scmd)->map_atomic = 0;
+	}
+#endif	
+}
+
+static void hba_shutdown_req_cb(MV_PVOID this, PMV_Request req)
+{
+	PHBA_Extension phba = (PHBA_Extension) this;
+
+	List_Add(&req->Queue_Pointer, &phba->Free_Request);
+	phba->Io_Count--;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&phba->hba_sync, 0);
+#else
+	complete(&phba->cmpl);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+/* will wait for atomic value atomic to become zero until timed out */
+/* return how much 'timeout' is left or 0 if already timed out */
+int __hba_wait_for_atomic_timeout(atomic_t *atomic, unsigned long timeout)
+{
+	unsigned intv = HZ/20; 
+
+	while (timeout) {
+		if ( 0 == atomic_read(atomic) )
+			break;
+
+		if ( timeout < intv )
+			intv = timeout;
+		set_current_state(TASK_INTERRUPTIBLE);
+		timeout -= (intv - schedule_timeout(intv));
+	}
+	return timeout;
+}
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+
+void hba_send_shutdown_req(PHBA_Extension phba)
+{
+	unsigned long flags;
+	PMV_Request pReq;
+
+	/*Send MV_REQUEST to do something.*/	
+	pReq = kmalloc(sizeof(MV_Request), GFP_ATOMIC);
+
+	/* should we reserve a req for this ? */
+	if ( NULL == pReq ) {
+		printk("THOR : cannot allocate memory for req.\n");
+		return;
+	}
+
+	pReq->Cmd_Initiator = phba;
+	pReq->Org_Req = pReq; /*no ideas.*/
+	pReq->Scsi_Status = REQ_STATUS_INVALID_REQUEST;
+	pReq->Completion = hba_shutdown_req_cb;
+	
+#ifdef RAID_DRIVER
+	pReq->Cdb[0] = APICDB0_LD;
+	pReq->Cdb[1] = APICDB1_LD_SHUTDOWN;
+#else
+	pReq->Device_Id = 0;
+	pReq->Cmd_Flag = 0;
+	pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+	pReq->Sense_Info_Buffer_Length = 0;  
+	pReq->Data_Transfer_Length = 0;
+	pReq->Data_Buffer = NULL;
+	pReq->Sense_Info_Buffer = NULL;
+	SGTable_Init(&pReq->SG_Table, 0);
+	pReq->Cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+	pReq->Cdb[1] = CDB_CORE_MODULE;
+	pReq->Cdb[2] = CDB_CORE_SHUTDOWN;
+	pReq->Context = NULL;
+	pReq->LBA.value = 0;
+	pReq->Sector_Count = 0;
+	pReq->Scsi_Status = REQ_STATUS_PENDING;
+#endif
+
+	spin_lock_irqsave(&phba->lock, flags);
+	List_AddTail(&pReq->Queue_Pointer, &phba->Waiting_Request);
+	phba->Io_Count++;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&phba->hba_sync, 1);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+	HBA_HandleWaitingList(phba);
+	spin_unlock_irqrestore(&phba->lock, flags);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	__hba_wait_for_atomic_timeout(&phba->hba_sync, 10*HZ);
+#else
+	wait_for_completion_timeout(&phba->cmpl, 10*HZ);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+}
+
+MV_BOOLEAN HBA_CanHandleRequest (PMV_Request pReq)
+{
+	switch ( pReq->Cdb[0] )
+	{
+		case APICDB0_ADAPTER:
+			if ( pReq->Cdb[1] == APICDB1_ADAPTER_GETINFO )
+				return MV_TRUE;
+			else
+				return MV_FALSE;
+#ifdef SUPPORT_EVENT
+		case APICDB0_EVENT:
+			return MV_TRUE;
+#endif  /* SUPPORT_EVENT */
+		default:
+			return MV_FALSE;
+	}
+}
+
+void HBA_HandleWaitingList(PHBA_Extension pHBA)
+{
+	PMV_Request pReq = NULL;
+	MV_PVOID pNextExtension = NULL;
+	MV_VOID (*pNextFunction)(MV_PVOID , PMV_Request) = NULL;
+
+	/* Get the request header */
+	while ( !List_Empty(&pHBA->Waiting_Request) ) {
+		pReq = (PMV_Request)List_GetFirstEntry(&pHBA->Waiting_Request,
+						       MV_Request, 
+						       Queue_Pointer);
+		MV_DASSERT( pReq != NULL );
+
+		if ( NULL == pReq )
+			break;
+#if 0
+		pCore = pHBA->Module_Manage.resource[MODULE_CORE].module_extension;
+		//TBD: To the lower module
+		module_set[MODULE_CORE].module_sendrequest(pCore, pReq);
+#else
+		if ( HBA_CanHandleRequest(pReq) ) {
+			HBA_ModuleSendRequest( pHBA, pReq );
+		} else {
+			//TBD: performance
+			HBA_GetNextModuleSendFunction(pHBA, 
+							&pNextExtension, 
+							&pNextFunction);
+			MV_DASSERT( pNextExtension!=NULL );
+			MV_DASSERT( pNextFunction!=NULL );
+			pNextFunction(pNextExtension, pReq);
+		}
+#endif
+	}
+}
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_helper.h linux-2.6.25/drivers/scsi/mv/linux/linux_helper.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_helper.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_helper.h	2008-07-28 18:42:43.335188743 +0200
@@ -0,0 +1,41 @@
+/*
+ *  Yet another header added to solve the dependence hell
+ *
+ *
+ *  July 6th, 2006 A.C. <ake at marvell dot com>
+ *
+ */
+#ifndef __LINUX_HELPER_H__
+#define __LINUX_HELPER_H__
+
+#include "mv_os.h"
+#include "hba_header.h"
+
+MV_BOOLEAN TranslateOSRequest( IN PHBA_Extension pHBA,
+			       IN struct scsi_cmnd * pSCmd,
+			       OUT PMV_Request pReq );
+
+void HBARequestCallback( MV_PVOID This,
+			 PMV_Request pReq );
+
+
+
+void GenerateSGTable( IN PHBA_Extension pHBA,
+		      IN struct scsi_cmnd *SCpnt,
+		      OUT PMV_SG_Table pSGTable );
+
+
+MV_BOOLEAN TranslateSCSIRequest(PHBA_Extension pHBA, 
+				struct scsi_cmnd *pSCmd, 
+				PMV_Request pReq );
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+int __hba_wait_for_atomic_timeout(atomic_t *atomic, unsigned long timeout);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+
+void __hba_dump_req_info(PMV_Request preq);
+
+void hba_send_shutdown_req(PHBA_Extension phba);
+void HBA_HandleWaitingList(PHBA_Extension pHBA);
+#endif /*__LINUX_HELPER_H__*/
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_iface.c linux-2.6.25/drivers/scsi/mv/linux/linux_iface.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_iface.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_iface.c	2008-07-28 18:42:43.335188743 +0200
@@ -0,0 +1,589 @@
+/*
+ * Linux Driver for 61xx
+ * linux_procinfo.c
+ * lily initialized on Feb. 15 2006
+ *
+ * restructured & expanded to be linux_iface.c - Kernel/CLI interface
+ * August 2006
+ * Albert Ke < ake at marvell dot com >
+ *
+ */
+
+#include "mv_os.h"
+#include "mv_include.h"
+#include "linux_iface.h"
+#include "linux_main.h"
+#include "linux_helper.h"
+
+
+extern PHBA_Extension mv_device_extension_list[MV_DEVICE_MAX_SLOT];
+extern unsigned int mv_device_count;
+extern void HBA_HandleWaitingList(PHBA_Extension pHBA);
+
+static int mv_open(struct inode *inode, struct file *file);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mv_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static int mv_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+		    unsigned long arg);
+#endif /* HAVE_UNLOCKED_IOCTL */
+
+#define IOCTL_BUF_LEN 8192
+static unsigned char ioctl_buf[IOCTL_BUF_LEN];
+
+struct file_operations mv_fops = {
+	.owner   =    THIS_MODULE,
+	.open    =    mv_open,
+#ifdef HAVE_UNLOCKED_IOCTL
+	.unlocked_ioctl = mv_ioctl,
+#else
+	.ioctl   =    mv_ioctl,
+#endif
+	.release =    NULL
+};
+
+void IOHBARequestCallback(MV_PVOID This, PMV_Request pReq)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+
+	List_Add(&pReq->Queue_Pointer, &pHBA->Free_Request);
+	pHBA->Io_Count--;
+}
+
+void ioctlcallback(MV_PVOID This, PMV_Request pReq)
+{
+	PHBA_Extension pHBA = (PHBA_Extension)This;
+
+	List_Add(&pReq->Queue_Pointer, &pHBA->Free_Request);
+	pHBA->Io_Count--;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&pHBA->hba_sync, 0);
+#else
+	complete(&pHBA->cmpl);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+}
+
+#ifdef RAID_DRIVER
+static MV_U16 API2Driver_ID(MV_U16 API_ID)
+{
+	MV_U16	returnID = API_ID;
+	returnID &= 0xfff;
+	return returnID;
+}
+
+static LD_Info ldinfo[LDINFO_NUM] = {{0}};
+static int mv_proc_ld_info(struct Scsi_Host *host)
+{
+	PModule_Header pheader = get_hba_ext_header(host);
+	PHBA_Extension pHBA = (PHBA_Extension)head_to_hba(pheader);
+	PMV_Request pReq;
+	MV_U8 Cdb[MAX_CDB_SIZE]; 
+	MV_U16 LD_ID = 0XFF;
+	unsigned long flags;
+
+	Cdb[0] = APICDB0_LD;
+	Cdb[1] = APICDB1_LD_GETINFO;
+	Cdb[2] = LD_ID & 0xff;
+	Cdb[3] = API2Driver_ID(LD_ID)>>8;
+	
+	spin_lock_irqsave(&pHBA->lock, flags);
+	pReq = List_GetFirstEntry((&pHBA->Free_Request), MV_Request, Queue_Pointer);
+	if (pReq == NULL) {
+		spin_unlock_irqrestore(&pHBA->lock, flags);
+		return -1;/*FAIL.*/
+	}
+
+	pReq->Cmd_Initiator = pHBA;
+	pReq->Org_Req = pReq;/*No ideas.*/
+	pReq->Device_Id = CONSOLE_ID;
+	pReq->Cmd_Flag = 0;
+
+	if (SCSI_IS_READ(Cdb[0]))
+		pReq->Cmd_Flag |= CMD_FLAG_DATA_IN;
+	if ( SCSI_IS_READ(Cdb[0]) || SCSI_IS_WRITE(Cdb[0]) )
+		pReq->Cmd_Flag |= CMD_FLAG_DMA;
+	
+	pReq->Data_Transfer_Length = LDINFO_NUM*sizeof(LD_Info);
+	memset(ldinfo, 0, LDINFO_NUM*sizeof(LD_Info));
+	pReq->Data_Buffer = ldinfo;
+	SGTable_Init(&pReq->SG_Table, 0);
+	memcpy(pReq->Cdb, Cdb, MAX_CDB_SIZE);
+	pReq->Context = NULL;
+	pReq->LBA.value = 0;    
+	pReq->Sector_Count = 0; 
+	pReq->Completion = ioctlcallback;
+	List_Add(&pReq->Queue_Pointer, &pHBA->Waiting_Request);
+	pHBA->Io_Count++;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&pHBA->hba_sync, 1);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+	HBA_HandleWaitingList(pHBA);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	if ( !__hba_wait_for_atomic_timeout(&pHBA->hba_sync, 
+					    HBA_REQ_TIMER_IOCTL*HZ) ) {
+#else
+	if (wait_for_completion_timeout(&pHBA->cmpl, 
+					HBA_REQ_TIMER_IOCTL*HZ) == 0) {
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+		MV_DBG(DMSG_HBA, "__MV__ ioctl req timed out.\n");
+	        return -1; /*FAIL.*/
+	}
+
+	return 0;/*SUCCESS.*/
+}
+
+
+static char* mv_ld_status(int status)
+{
+	switch (status) {
+	case LD_STATUS_FUNCTIONAL:
+		return "online";
+	case LD_STATUS_DEGRADE:
+		return "degraded";		
+	case LD_STATUS_DELETED:
+		return "deleted";
+	case LD_STATUS_PARTIALLYOPTIMAL:
+		return "partially optimal";
+	case LD_STATUS_OFFLINE:
+		return "offline";
+	case LD_STATUS_MIGRATION:
+		return "migration";
+	}
+	return "Unknown Status";
+}
+
+static char* mv_ld_raid_mode(int status)
+{
+	switch (status) {
+	case LD_MODE_RAID0:
+		return "RAID0";
+	case LD_MODE_RAID1:
+		return "RAID1";
+	case LD_MODE_RAID10:
+		return "RAID10";
+	case LD_MODE_RAID1E:
+		return "RAID1E";
+	case LD_MODE_RAID5:
+		return "RAID5";
+	case LD_MODE_RAID50:
+		return "RAID50";
+	case LD_MODE_RAID6:
+		return "RAID6";
+	case LD_MODE_JBOD:
+		return "JBOD";
+	}
+	return "Unknown RAID Mode";
+}
+
+static char* mv_ld_bga_status(int status)
+{
+	switch (status) {
+	case LD_BGA_STATE_RUNNING:
+		return "running";
+	case LD_BGA_STATE_ABORTED:
+		return "aborted";
+	case LD_BGA_STATE_PAUSED:
+		return "paused";
+	case LD_BGA_STATE_AUTOPAUSED:
+		return "auto paused";
+	case LD_BGA_STATE_DDF_PENDING:
+		return "DDF pending";
+	}
+	return "N/A";
+}
+
+static int mv_ld_get_status(struct Scsi_Host *host, MV_U16 ldid, LD_Status *ldstatus)
+{
+	PModule_Header pheader = get_hba_ext_header(host);
+	PHBA_Extension pHBA = (PHBA_Extension)head_to_hba(pheader);
+	PMV_Request pReq;
+	MV_U8 Cdb[MAX_CDB_SIZE]; 
+	MV_U16 LD_ID = ldid;/*0XFF;*/
+	unsigned long flags;
+
+	Cdb[0] = APICDB0_LD;
+	Cdb[1] = APICDB1_LD_GETSTATUS;
+	Cdb[2] = LD_ID & 0xff;
+	Cdb[3] = API2Driver_ID(LD_ID)>>8;
+	
+	spin_lock_irqsave(&pHBA->lock, flags);
+	pReq = List_GetFirstEntry((&pHBA->Free_Request), MV_Request, Queue_Pointer);
+	if (pReq == NULL) {
+		spin_unlock_irqrestore(&pHBA->lock, flags);
+		return -1;/*FAIL.*/
+	}
+
+	pReq->Cmd_Initiator = pHBA;
+	pReq->Org_Req = pReq;/*No ideas.*/
+	pReq->Device_Id = CONSOLE_ID;
+	pReq->Cmd_Flag = 0;
+
+	if (SCSI_IS_READ(Cdb[0]))
+		pReq->Cmd_Flag |= CMD_FLAG_DATA_IN;
+	if ( SCSI_IS_READ(Cdb[0]) || SCSI_IS_WRITE(Cdb[0]) )
+		pReq->Cmd_Flag |= CMD_FLAG_DMA;
+
+	/* Data Buffer */
+	pReq->Data_Transfer_Length = sizeof(LD_Status);
+	memset(ldstatus,0,sizeof(LD_Status));
+	pReq->Data_Buffer = ldstatus;
+	
+	SGTable_Init(&pReq->SG_Table, 0);
+	memcpy(pReq->Cdb, Cdb, MAX_CDB_SIZE);
+	pReq->Context = NULL;
+	pReq->LBA.value = 0;    
+	pReq->Sector_Count = 0; 
+	pReq->Completion = ioctlcallback;
+	List_Add(&pReq->Queue_Pointer, &pHBA->Waiting_Request);
+	pHBA->Io_Count++;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&pHBA->hba_sync, 1);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+	HBA_HandleWaitingList(pHBA);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	if ( !__hba_wait_for_atomic_timeout(&pHBA->hba_sync, 
+					    HBA_REQ_TIMER_IOCTL*HZ) ) {
+#else
+	if ( !wait_for_completion_timeout(&pHBA->cmpl, 
+					  HBA_REQ_TIMER_IOCTL*HZ) ) {
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+		MV_DBG(DMSG_HBA, "__MV__ ioctl req timed out.\n");
+	        return -1; /*FAIL.*/
+	}
+
+	return 0;/*SUCCESS.*/
+}
+
+static int mv_ld_show_status(char *buf, PLD_Status pld_status)
+{
+	char *str, *str1;
+	int ret = 0;
+
+	if ( LD_BGA_STATE_RUNNING == pld_status->BgaState)
+	{
+		if (LD_BGA_REBUILD == pld_status->Bga)
+			str = "rebuilding";
+		else if (LD_BGA_INIT_QUICK == pld_status->Bga || LD_BGA_INIT_BACK == pld_status->Bga)
+			str = "initializing";
+		else if (LD_BGA_CONSISTENCY_CHECK == pld_status->Bga || LD_BGA_CONSISTENCY_FIX == pld_status->Bga)
+			str = "synchronizing";
+		else if (LD_BGA_MIGRATION == pld_status->Bga)
+			str = "extending";
+		else
+			str = "unknown bga action";
+		ret = sprintf(buf, "  %s is %d%% done", str, pld_status->BgaPercentage);
+	}
+	else if ( (LD_BGA_STATE_ABORTED == pld_status->BgaState) || (LD_BGA_STATE_PAUSED == pld_status->BgaState) || (LD_BGA_STATE_AUTOPAUSED == pld_status->BgaState))
+	{
+		if (LD_BGA_REBUILD == pld_status->Bga)
+			str = "rebuilding";
+		else if (LD_BGA_INIT_QUICK == pld_status->Bga || LD_BGA_INIT_BACK == pld_status->Bga)
+			str = "initializing";
+		else if (LD_BGA_CONSISTENCY_CHECK == pld_status->Bga || LD_BGA_CONSISTENCY_FIX == pld_status->Bga)
+			str = "synchronizing";
+		else if (LD_BGA_MIGRATION == pld_status->Bga)
+			str = "extending";
+		else
+			str = "unknown bga action";
+
+		if (LD_BGA_STATE_ABORTED == pld_status->BgaState)
+			str1 = "aborted";
+		else if (LD_BGA_STATE_PAUSED == pld_status->BgaState)
+			str1 = "paused";
+		else if (LD_BGA_STATE_AUTOPAUSED == pld_status->BgaState)
+			str1 = "auto paused";
+		else
+			str1 = "aborted";
+		ret = sprintf(buf, "  %s is %s", str, str1);
+	}
+	return ret;
+}
+#endif /*RAID_DRIVER*/
+
+int mv_linux_proc_info(struct Scsi_Host *pSHost, char *pBuffer, 
+		       char **ppStart,off_t offset, int length, int inout)
+{
+	int len = 0;
+	int datalen = 0;/*use as a temp flag.*/
+#ifdef RAID_DRIVER
+	int i = 0;
+	int j = 0;
+	int ret = -1;
+	LD_Status ld_status;
+	char *tmp = NULL;
+	int tmplen = 0;
+#endif	
+	if (!pSHost || !pBuffer)
+	        return (-ENOSYS);
+	if (inout == 1) {
+	/* User write is not supported. */
+		return (-ENOSYS);
+	}
+
+	len = sprintf(pBuffer,"Marvell Thor Driver , Version %s\n", mv_version_linux);
+	
+#ifdef RAID_DRIVER
+	if ( mv_proc_ld_info(pSHost) == -1 ) {
+		len = sprintf(pBuffer,"Marvell Thor Driver is busy NOW, please try later.\n");
+		goto out;
+	} else {
+		for (i = 0; i < MAX_LD_SUPPORTED; i++) {
+			if (ldinfo[i].Status != LD_STATUS_INVALID) {
+				if (ldinfo[i].Status == LD_STATUS_OFFLINE
+				        && ldinfo[i].BGAStatus == LD_BGA_STATE_RUNNING) {
+					ldinfo[i].BGAStatus = LD_BGA_STATE_AUTOPAUSED;
+				}
+				if (ldinfo[i].Status == LD_STATUS_MISSING) {
+					ldinfo[i].Status = LD_STATUS_OFFLINE;
+				}
+			} else {
+				break;
+			}
+		}
+	}
+	
+	len += sprintf(pBuffer+len,"Index RAID\tStatus  \t\tBGA Status\n");
+	for ( i = 0 ; i < LDINFO_NUM ; i++) {
+		if ( ldinfo[i].Size.value == 0 ) {
+			if ( i == 0 ) {
+				len += sprintf(pBuffer+len,"NO Logical Disk\n");
+			}
+			break;
+		}
+
+		len += sprintf(pBuffer+len,
+			"%-5d %s\t%s",
+			ldinfo[i].ID,
+			mv_ld_raid_mode(ldinfo[i].RaidMode),
+			mv_ld_status(ldinfo[i].Status)
+			);
+
+		tmplen = 24 -strlen(mv_ld_status(ldinfo[i].Status));
+		while (j < tmplen) {
+			len += sprintf(pBuffer+len, "%s", " ");
+			j++;
+		}
+		j = 0;
+
+		len += sprintf(pBuffer+len, "%s", mv_ld_bga_status(ldinfo[i].BGAStatus));
+
+		if (ldinfo[i].BGAStatus != LD_BGA_STATE_NONE) {
+			ret = mv_ld_get_status(pSHost,ldinfo[i].ID,&ld_status);
+			if (ret == 0) {
+				if (ld_status.Status != LD_STATUS_INVALID) {
+					if (ld_status.Status == LD_STATUS_MISSING)
+						ld_status.Status = LD_STATUS_OFFLINE;
+					ld_status.BgaState = ldinfo[i].BGAStatus;
+				}
+				len += mv_ld_show_status(pBuffer+len,&ld_status);
+				ret = -1;
+			}
+		}
+
+		tmp = NULL;
+		tmplen = 0;
+		len += sprintf(pBuffer+len,"\n");
+	}
+
+out:
+#endif
+		
+	datalen = len - offset;
+	if (datalen < 0 ) {
+		datalen = 0;
+		*ppStart = pBuffer + len;
+	} else {
+		*ppStart = pBuffer + offset;
+	}
+	return datalen;
+} 
+
+/*
+ *Character Device Interface.
+ */
+
+static int mv_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor_number;
+	int retval = -ENODEV;
+	unsigned long flags = 0;
+	
+	MV_LOCK_IRQSAVE(&inode->i_lock, flags);
+	minor_number = MINOR(inode->i_rdev);
+	if (minor_number >= mv_device_count) {
+		printk("MV : No such device.\n");
+		goto out;
+	}
+	retval = 0;
+out:
+	MV_UNLOCK_IRQRESTORE(&inode->i_lock, flags);
+	return retval;
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mv_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+#else
+static int mv_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 
+		    unsigned long arg)
+#endif /* HAVE_UNLOCKED_IOCTL */ 
+{
+	PHBA_Extension	pHBA;
+	PMV_Request    pReq = NULL;
+	int error = 0;
+	int ret   = 0;
+	int sptdwb_size = sizeof(SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER);
+	int console_id  = CONSOLE_ID;
+	unsigned long flags;
+	PSCSI_PASS_THROUGH_DIRECT_WITH_BUFFER psptdwb = NULL;
+
+#ifdef HAVE_UNLOCKED_IOCTL
+	pHBA = mv_device_extension_list[MINOR(file->f_dentry->d_inode->i_rdev)];
+#else
+	pHBA = mv_device_extension_list[MINOR(inode->i_rdev)];
+#endif /* HAVE_UNLOCKED_IOCTL */ 
+
+	if (cmd == 1) {
+		if ( copy_to_user((void *)arg,
+				(void *)&console_id,
+				sizeof(int)) != 0 ) {
+			MV_DBG( DMSG_IOCTL, 
+				"Marvell : Get CONSOLE_ID Error.\n" );
+			return -EIO;
+		}
+		return 0;
+	}
+
+	if (cmd == 2) {
+		if ( copy_to_user((void *)arg,
+				(void *)&mv_device_count,
+				sizeof(unsigned int)) != 0 ) {
+			MV_DBG( DMSG_IOCTL, 
+				"Marvell : Get Device Number Error.\n" );
+			return -EIO;
+		}
+		return 0;
+	}
+
+	psptdwb = kmalloc(sptdwb_size, GFP_ATOMIC);
+	
+	if ( NULL == psptdwb ) 
+		return -ENOMEM;
+
+	error = copy_from_user(psptdwb, (void *)arg, sptdwb_size);
+
+	if (error) {
+		ret = -EIO;
+		goto clean_psp;
+	}
+
+	if (psptdwb->sptd.DataTransferLength) {
+		if ( psptdwb->sptd.DataTransferLength > IOCTL_BUF_LEN ) {
+			MV_DBG(DMSG_HBA, "__MV__ not enough buf space.\n");
+			ret = -ENOMEM;
+			goto clean_psp;
+		}
+			
+		psptdwb->sptd.DataBuffer = ioctl_buf;
+		memset(ioctl_buf, 0, psptdwb->sptd.DataTransferLength);
+		
+		error = copy_from_user( psptdwb->sptd.DataBuffer,
+					((PSCSI_PASS_THROUGH_DIRECT_WITH_BUFFER)arg)->sptd.DataBuffer,
+					psptdwb->sptd.DataTransferLength);
+		if (error) {
+			ret = -EIO;
+			goto clean_pspbuf;
+		}
+	} else {
+		psptdwb->sptd.DataBuffer = NULL;
+	}
+
+	spin_lock_irqsave(&pHBA->lock, flags);
+	
+	/*Translate request to MV_REQUEST*/	
+	pReq = List_GetFirstEntry((&pHBA->Free_Request), MV_Request, Queue_Pointer);	
+	if ( NULL == pReq ) {
+		ret = -ENOMEM;
+		spin_unlock_irqrestore(&pHBA->lock, flags);
+		goto clean_pspbuf;
+	}
+
+	pReq->Cmd_Initiator = pHBA;
+	pReq->Org_Req = pReq;/*No ideas.*/
+	pReq->Scsi_Status = psptdwb->sptd.ScsiStatus;
+	pReq->Device_Id = psptdwb->sptd.TargetId;
+	pReq->Cmd_Flag = 0;
+
+	if (psptdwb->sptd.DataTransferLength == 0) {
+		pReq->Cmd_Flag |= CMD_FLAG_NON_DATA;
+	} else {
+		if (SCSI_IS_READ(psptdwb->sptd.Cdb[0]))
+			pReq->Cmd_Flag |= CMD_FLAG_DATA_IN;
+		if ( SCSI_IS_READ(psptdwb->sptd.Cdb[0]) || SCSI_IS_WRITE(psptdwb->sptd.Cdb[0]) )
+			pReq->Cmd_Flag |= CMD_FLAG_DMA;
+	}
+
+	pReq->Data_Transfer_Length = psptdwb->sptd.DataTransferLength;
+	pReq->Data_Buffer = psptdwb->sptd.DataBuffer;
+	pReq->Sense_Info_Buffer = psptdwb->Sense_Buffer;
+
+	SGTable_Init(&pReq->SG_Table, 0);
+	if ( psptdwb->sptd.DataTransferLength ) {
+		/*GenerateSGTable(pHBA, pSCmd, &pReq->SG_Table);*/
+	}
+	
+	memcpy(pReq->Cdb, psptdwb->sptd.Cdb, MAX_CDB_SIZE);
+	pReq->Context = NULL;
+	pReq->LBA.value = 0;    
+	pReq->Sector_Count = 0; 
+	pReq->Completion = ioctlcallback;
+
+	List_Add(&pReq->Queue_Pointer, &pHBA->Waiting_Request);
+    	pHBA->Io_Count++;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&pHBA->hba_sync, 1);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+	/*Handle Request.*/
+	HBA_HandleWaitingList(pHBA);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	if ( !__hba_wait_for_atomic_timeout(&pHBA->hba_sync, 
+					    HBA_REQ_TIMER_IOCTL*HZ) ) {
+#else
+	if ( !wait_for_completion_timeout(&pHBA->cmpl, 
+					  HBA_REQ_TIMER_IOCTL*HZ) ) {
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+		MV_DBG(DMSG_HBA, "__MV__ ioctl req timed out.\n");
+	        ret = -EIO;
+	        goto clean_pspbuf;
+	}
+
+	if (psptdwb->sptd.DataTransferLength) {
+		error = copy_to_user(((PSCSI_PASS_THROUGH_DIRECT_WITH_BUFFER)arg)->sptd.DataBuffer,
+				     psptdwb->sptd.DataBuffer,
+				     psptdwb->sptd.DataTransferLength);
+		if (error) {
+			ret = -EIO;
+			goto clean_pspbuf;
+		}
+	}
+		
+	error = copy_to_user((void*)arg, psptdwb, sptdwb_size);
+	
+	if (error)
+		ret = -EIO;
+
+clean_pspbuf:
+/* use static buf instead.
+	if (psptdwb->sptd.DataBuffer)
+	kfree(psptdwb->sptd.DataBuffer);
+*/
+clean_psp:
+	kfree(psptdwb);
+	return ret;
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_iface.h linux-2.6.25/drivers/scsi/mv/linux/linux_iface.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_iface.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_iface.h	2008-07-28 18:42:43.335188743 +0200
@@ -0,0 +1,50 @@
+/*
+ *
+ *  Kernel/CLI interface
+ *
+ *  July 2006
+ *  A.C. <ake at marvell dot com>
+ *
+ */
+
+#ifndef __MV_HBA_LINUX_INTERFACE__
+#define __MV_HBA_LINUX_INTERFACE__
+
+/*Request Structure.*/
+#define SENSE_INFO_BUFFER_SIZE		32
+#define MAX_COMMAND_SIZE		16
+
+/*For Character Device Interface*/
+#define MV_DEVICE_MAX_SLOT 4
+
+#define LDINFO_NUM MAX_LD_SUPPORTED * MAX_NUM_ADAPTERS
+#define HDINFO_NUM MAX_HD_SUPPORTED * MAX_NUM_ADAPTERS
+
+typedef struct _SCSI_PASS_THROUGH_DIRECT {
+	unsigned short Length;
+	unsigned char  ScsiStatus;
+	unsigned char  PathId;
+	unsigned char  TargetId;
+	unsigned char  Lun;
+	unsigned char  CdbLength;
+	unsigned char  SenseInfoLength;
+	unsigned char  DataIn;
+	unsigned long  DataTransferLength;
+	unsigned long  TimeOutValue;
+	void           *DataBuffer;
+	unsigned long  SenseInfoOffset;
+	unsigned char  Cdb[16];
+}SCSI_PASS_THROUGH_DIRECT, *PSCSI_PASS_THROUGH_DIRECT;
+
+typedef struct _SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER{
+	SCSI_PASS_THROUGH_DIRECT        sptd;
+	unsigned long                   Filler;
+	unsigned char                   Sense_Buffer[SENSE_INFO_BUFFER_SIZE];
+}SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER, *PSCSI_PASS_THROUGH_DIRECT_WITH_BUFFER;
+
+int mv_linux_proc_info(struct Scsi_Host *pSHost, char *pBuffer, 
+		       char **ppStart,  off_t offset, int length, int inout);
+
+void IOHBARequestCallback(MV_PVOID This, PMV_Request pReq);
+
+#endif /* ifndef __MV_HBA_LINUX_INTERFACE__ */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_main.c linux-2.6.25/drivers/scsi/mv/linux/linux_main.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_main.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_main.c	2008-07-28 18:42:43.336188708 +0200
@@ -0,0 +1,525 @@
+/*
+ * Linux Driver for 61xx
+ * Copyright (C) 2006 Marvell Technology Group Ltd.. All Rights Reserved.
+ * linux_main.c
+ * lily initialized on Feb. 15 2006
+ *
+ *  ioctl handler has been implemented.
+ *  June 2006, Zax Liu < zaxl at marvell dot com >
+ *
+ *  implement ioctl the 2.6.11 plus way ( not rely on BKL )
+ *  July 2006, Albert Ke < ake at marvell dot com >
+ *
+ */
+
+#include "mv_os.h"
+#include "mv_include.h"
+
+#include "hba_header.h"
+
+#include "linux_main.h"
+#include "linux_iface.h"
+#include "linux_helper.h"
+
+#include "com_define.h" 
+#include "com_type.h"
+#include "mv_config.h"
+#include "mv_include.h"
+
+/* 
+ * module parameter 
+ *
+ * refer to ../common/com_dbg.h 
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 7)
+unsigned int mv_dbg_opts = 0;
+module_param(mv_dbg_opts, uint, S_IRWXU | S_IRWXG);
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 7) */
+
+static const struct pci_device_id mv_pci_ids[] = {
+	{PCI_DEVICE(VENDOR_ID, DEVICE_ID_THORLITE_0S1P)},
+	{PCI_DEVICE(VENDOR_ID, DEVICE_ID_THORLITE_2S1P)},
+	{PCI_DEVICE(VENDOR_ID, DEVICE_ID_THOR_4S1P)},
+	{PCI_DEVICE(VENDOR_ID, DEVICE_ID_THOR_4S1P_NEW)},
+	{PCI_DEVICE(VENDOR_ID, DEVICE_ID_THORLITE_2S1P_WITH_FLASH)},
+	{0}
+};
+
+static struct list_head mv_hba_ext_list;
+
+unsigned int mv_device_count;
+/* TODO : try to get rid of this - A.C. */
+PHBA_Extension mv_device_extension_list[MV_DEVICE_MAX_SLOT];
+
+int mv_major = -1;
+extern struct file_operations mv_fops;
+
+static void release_host(PHBA_Extension phba)
+{
+	unsigned long flags;
+	int i;
+	struct pci_dev *pcidev = phba->pcidev;
+
+	scsi_remove_host(phba->host);
+	scsi_host_put(phba->host);
+
+	phba->host = NULL;
+
+	hba_send_shutdown_req(phba);
+
+	spin_lock_irqsave(&phba->lock, flags);
+	del_timer_sync(&phba->timer);
+	spin_unlock_irqrestore(&phba->lock, flags);
+
+	free_irq(phba->pcidev->irq, phba);
+
+	for ( i=0; i<MAX_BASE_ADDRESS; i++ )
+		if (pci_resource_flags(pcidev, i) & IORESOURCE_MEM)
+			iounmap(phba->Base_Address[i]);
+
+	mv_hba_release_ext(phba);
+	mv_device_count--;
+}
+
+
+/* notifier block to get notified on system shutdown/halt/reboot/down */
+static int mv_linux_halt(struct notifier_block *nb, unsigned long event,
+			 void *buf)
+{
+	PHBA_Extension phba = NULL;
+	unsigned long flags;
+
+	switch (event) {
+	case SYS_RESTART:
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		list_for_each_entry(phba, &mv_hba_ext_list, next) {
+			hba_send_shutdown_req(phba);
+			
+			spin_lock_irqsave(&phba->lock, flags);
+			del_timer_sync(&phba->timer);
+			spin_unlock_irqrestore(&phba->lock, flags);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block mv_linux_notifier = {
+	mv_linux_halt, NULL, 0
+};
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
+static irqreturn_t mv_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static irqreturn_t mv_intr_handler(int irq, void *dev_id)
+#endif
+{
+	/* MV_FALSE should be equal to IRQ_NONE (0) */
+	irqreturn_t retval = MV_FALSE;
+	unsigned long flags;
+	MV_PVOID pcore;
+
+	PHBA_Extension pHBA = (PHBA_Extension)dev_id;
+	PModule_Manage module_manage = &pHBA->Module_Manage;
+	
+	spin_lock_irqsave(&pHBA->lock, flags);
+	pcore = module_manage->resource[MODULE_CORE].module_extension;
+	retval = Core_InterruptServiceRoutine(pcore);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+
+	return IRQ_RETVAL(retval);
+}
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 7)
+static enum scsi_eh_timer_return mv_linux_timed_out(struct scsi_cmnd *cmd)
+{
+	static int i;
+
+	MV_DBG(DMSG_SCSI, "__MV__ scmd timed out : ");
+	MV_DBG( DMSG_SCSI,
+		"%p (%d/%d/%d cdb=(%x-%x-%x)).\n", 
+		cmd, mv_scmd_channel(cmd), 
+		mv_scmd_target(cmd), mv_scmd_lun(cmd),
+		*(cmd->cmnd), *(cmd->cmnd+1), 
+		*(cmd->cmnd+2) );
+
+	if ( i++ > 5 )
+		return EH_NOT_HANDLED;
+	else
+		return EH_RESET_TIMER;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 7) */
+
+
+static int mv_linux_queue_command(struct scsi_cmnd *pSCmd, 
+				  void (*done) (struct scsi_cmnd *))
+{
+	struct Scsi_Host *phost = mv_scmd_host(pSCmd);
+	PModule_Header pheader = get_hba_ext_header(phost);
+	PHBA_Extension pHBA = (PHBA_Extension)head_to_hba(pheader);
+	PMV_Request pReq;
+	unsigned long flags;
+
+	if ( done == NULL ) {
+		MV_PRINT( ": in queuecommand, done function can't be NULL\n");
+		return 0;
+    	}
+
+	spin_lock_irqsave(&pHBA->lock, flags);
+
+	MV_DBG(DMSG_SCSI_FREQ,
+	       "mv_linux_queue_command %p (%d/%d/%d/%d cdb=(%x-%x-%x))\n", 
+	       pSCmd, phost->host_no, mv_scmd_channel(pSCmd), 
+	       mv_scmd_target(pSCmd), mv_scmd_lun(pSCmd),
+	       *(pSCmd->cmnd), *(pSCmd->cmnd+1), 
+	       *(pSCmd->cmnd+2));
+
+	pSCmd->result = 0;
+ 	pSCmd->scsi_done = done;
+	MV_SCp(pSCmd)->bus_address = 0;
+	MV_SCp(pSCmd)->mapped = 0;
+	MV_SCp(pSCmd)->map_atomic = 0;
+	
+	if ( mv_scmd_channel(pSCmd) ) {
+		pSCmd->result = DID_BAD_TARGET << 16;
+		goto done;
+	}
+
+	/* 
+	 * Get mv_request resource and translate the scsi_cmnd request to mv_request.
+	 */
+	MV_DASSERT( !List_Empty(&pHBA->Free_Request) );
+	pReq = List_GetFirstEntry((&pHBA->Free_Request), MV_Request, Queue_Pointer);
+	if ( pReq == NULL ) {
+		spin_unlock_irqrestore(&pHBA->lock, flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+	
+	if ( !TranslateOSRequest(pHBA,pSCmd, pReq) ) {
+		/* 
+		 * Even TranslateOSRequest failed, 
+		 * it still should set some of the variables to the MV_Request
+		 * especially MV_Request.Org_Req and MV_Request.Scsi_Status;
+		 */
+		MV_DBG( DMSG_HBA,
+			"ERROR - Translation from OS Request failed.\n" );
+		/* this is tihs */
+		pHBA->Io_Count++;
+
+		HBARequestCallback(pHBA, pReq);
+		spin_unlock_irqrestore(&pHBA->lock, flags);
+		return 0;
+	}
+
+	/* 
+	 * Queue this request. 
+	 * Cannot return with BUSY when core driver is not ready. It'll fail hibernation. 
+	 */
+	List_AddTail(&pReq->Queue_Pointer, &pHBA->Waiting_Request);
+	pHBA->Io_Count++;
+
+	if ( pHBA->State != DRIVER_STATUS_STARTED ) {
+		MV_ASSERT(0);
+		/*if ( pHBA->State==DRIVER_STATUS_IDLE )
+		  {
+		  pHBA->State = DRIVER_STATUS_STARTING;
+		  Module_StartAll(module_manage, MODULE_CORE);
+		  }*/
+	} else {
+		HBA_HandleWaitingList(pHBA);
+	}
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+
+	return 0;
+done:
+	pSCmd->scsi_done(pSCmd);
+	spin_unlock_irqrestore(&pHBA->lock, flags);
+	return 0;
+}
+
+#if 0
+static void ac_dump_info(struct scsi_cmnd *cmd)
+{
+	return;
+}
+
+static int mv_linux_abort(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host;
+	PHBA_Extension phba;
+	int  ret = FAILED;
+
+	MV_PRINT("__MV__ abort command %p.\n", cmd);
+
+	return ret;
+}
+#endif /* 0 */
+
+static int mv_linux_reset (struct scsi_cmnd *cmd)
+{
+	MV_PRINT("__MV__ reset handler %p.\n", cmd);
+	return FAILED;
+}
+
+struct scsi_host_template mv_driver_template = {
+	module:                         THIS_MODULE,                         
+	name:                           "Marvell 88SE61xx Storage Controller",
+	proc_name:                      mv_driver_name,
+	proc_info:                      mv_linux_proc_info,
+	queuecommand:                   mv_linux_queue_command,
+#if 0
+	eh_abort_handler:		mv_linux_abort,
+	eh_device_reset_handler:	mv_linux_reset,
+	eh_bus_reset_handler:		mv_linux_reset,
+#endif /* 0 */
+	eh_host_reset_handler:		mv_linux_reset,
+#if  LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 7) && \
+	LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
+	eh_timed_out:                   mv_linux_timed_out,
+#endif
+	/* save 2 for ioctl */
+	can_queue:                      MAX_REQUEST_NUMBER-2,
+	this_id:                        -1,
+	max_sectors:                    MV_MAX_TRANSFER_SECTOR,
+	sg_tablesize:                   MAX_SG_ENTRY,
+	cmd_per_lun:                    MAX_REQUEST_NUMBER-2,
+	use_clustering:                 DISABLE_CLUSTERING,
+	emulated:                       0
+};
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 15) /* should be .16 */
+static struct scsi_transport_template mv_transport_template = {
+        .eh_timed_out   =  mv_linux_timed_out,
+};
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) */
+
+static int mv_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	unsigned int ret = PCIBIOS_SUCCESSFUL;
+	struct Scsi_Host *shost = NULL;
+	PHBA_Extension   phba  = NULL;
+	
+	ret = pci_enable_device(dev);
+	if (ret) {
+		printk("THOR : enable device failed.\n");
+		return ret;
+	}
+	
+	ret = pci_request_regions(dev, mv_driver_name);
+	if (ret)
+		goto err_req_region;
+	
+	
+	if ( !pci_set_dma_mask(dev, DMA_64BIT_MASK) ) {
+		ret = pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
+		if (ret) {
+			ret = pci_set_consistent_dma_mask(dev, 
+							  DMA_32BIT_MASK);
+			if (ret)
+				goto err_dma_mask;
+		}
+	} else {
+		ret = pci_set_dma_mask(dev, DMA_32BIT_MASK);
+		if (ret)
+			goto err_dma_mask;
+		
+		ret = pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK);
+		if (ret) 
+			goto err_dma_mask;
+		
+	}
+		
+	pci_set_master(dev);
+
+	printk("Marvell (S)ATA Controller is found, using IRQ %d.\n",
+	       dev->irq);
+	
+	phba = (PHBA_Extension) mv_hba_init_ext(dev);
+
+	if ( NULL == phba ) {
+		ret = -ENOMEM;
+		goto err_dma_mask;
+	}
+	
+	list_add_tail(&phba->next, &mv_hba_ext_list);
+		
+	/* increase hba counter? */
+
+	spin_lock_init(&phba->lock);
+
+	Module_InitializeAll(&phba->Module_Manage, MAX_REQUEST_NUMBER);
+
+	init_timer(&phba->timer);
+	sema_init(&phba->sem, 0);
+	init_completion(&phba->cmpl);
+	
+	spin_lock_irq(&phba->lock);
+	Module_StartAll(&phba->Module_Manage, MODULE_CORE);
+	spin_unlock_irq(&phba->lock);
+
+	shost = scsi_host_alloc(&mv_driver_template, sizeof(void *));
+	
+	if ( NULL == shost ) {
+		printk("THOR : Unable to allocate a scsi host.\n");
+		goto err_host_alloc;
+	}
+	
+	/* TODO : a saner way is needed - A.C. */
+	*((PModule_Header *)shost->hostdata) = \
+		Module_GetModuleHeader(phba);
+
+	phba->host = shost;
+
+	shost->irq          = dev->irq;
+	shost->max_id       = MV_MAX_TARGET_NUMBER;
+	shost->max_lun      = MV_MAX_LUN_NUMBER;
+	shost->max_channel  = 0;
+	shost->max_cmd_len  = 16;
+	
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 15) /* should be .16 */
+        shost->transportt   = &mv_transport_template;
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19)
+	if ((ret = request_irq(dev->irq, mv_intr_handler, IRQF_SHARED,
+				mv_driver_name, phba)) < 0) {
+#else
+	if ((ret = request_irq(dev->irq, mv_intr_handler, SA_SHIRQ,
+				mv_driver_name, phba)) < 0) {
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19) */
+		printk("THOR : Error upon requesting IRQ %d.\n", dev->irq);
+		goto  err_request_irq;
+	}
+
+	/* wait for MODULE(CORE,RAID,HBA) init */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
+	atomic_set(&phba->hba_sync, 1);
+	if (0 == __hba_wait_for_atomic_timeout(&phba->hba_sync, 30 * HZ)) {
+#else
+	if (0 == wait_for_completion_timeout(&phba->cmpl, 30 * HZ)) {
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) */
+		ret = -ENODEV;
+		goto err_wait_irq;
+
+	}
+
+	if (mv_device_count == 0) {
+		register_reboot_notifier(&mv_linux_notifier); 
+		hba_house_keeper_run();
+	}
+	
+	/* TODO : I'm sure there's a better/saner way - A.C. */
+	if (-1 == mv_major) {
+		if ((mv_major = register_chrdev(0, 
+						mv_driver_name, 
+						&mv_fops)) < 0) {
+			printk("THOR : Failed to register character device");
+			ret = -ENODEV;
+			goto err_register_chrdev;
+		}
+	}
+	
+	mv_device_extension_list[mv_device_count++] = phba;
+
+	if (0 != (ret = scsi_add_host(shost, &dev->dev)))
+		goto err_add_host;
+
+	scsi_scan_host(shost);
+
+	return 0;
+
+err_add_host:
+	if (mv_major >= 0)
+		unregister_chrdev(mv_major, mv_driver_name);
+
+err_register_chrdev:
+	if (mv_device_count == 0) {
+		unregister_reboot_notifier(&mv_linux_notifier); 
+	}
+	
+err_wait_irq:
+	free_irq(dev->irq, phba);	
+
+err_request_irq:
+	scsi_host_put(shost);
+
+err_host_alloc:
+	list_del(&phba->next);
+	Module_ShutdownAll(&phba->Module_Manage);
+	mv_hba_release_ext(phba);
+
+err_dma_mask:
+	pci_release_regions(dev);
+
+err_req_region:
+	pci_disable_device(dev);
+
+	return ret;
+}
+
+static void mv_remove(struct pci_dev *dev)
+{
+	PHBA_Extension phba;
+	
+	list_for_each_entry(phba, &mv_hba_ext_list, next) {
+		if ( phba->pcidev == dev ) {
+			list_del(&phba->next);
+			release_host(phba);
+
+			pci_release_regions(dev);
+			pci_disable_device(dev);
+			break; /* one hba for one pci device */
+		}
+	}
+
+	if (mv_device_count == 0)
+		unregister_reboot_notifier(&mv_linux_notifier); 
+}
+
+static struct pci_driver mv_pci_driver = {
+	.name     = "mv_thor",
+	.id_table = mv_pci_ids,
+	.probe    = mv_probe,
+	.remove   = mv_remove,
+};
+
+static int __init mv_linux_driver_init(void)
+{
+	/* default to only show no msg */
+	mv_dbg_opts = 0;
+
+	INIT_LIST_HEAD(&mv_hba_ext_list);
+	/* bg thread init - refer to hba_timer.[ch] */
+	hba_house_keeper_init();
+	
+	return pci_register_driver(&mv_pci_driver);
+}
+
+static void __exit mv_linux_driver_exit(void)
+{
+	if (mv_major >= 0) {
+		unregister_chrdev(mv_major, mv_driver_name);
+	}
+
+	hba_house_keeper_exit();
+	
+	pci_unregister_driver(&mv_pci_driver);
+}
+
+MODULE_AUTHOR ("Marvell Semiconductor Inc.,");
+MODULE_DESCRIPTION ("thor SATA hba driver");
+
+//MODULE_LICENSE("Proprietary");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, mv_pci_ids);
+
+module_init(mv_linux_driver_init);
+module_exit(mv_linux_driver_exit);
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_main.h linux-2.6.25/drivers/scsi/mv/linux/linux_main.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_main.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_main.h	2008-07-28 18:42:43.336188708 +0200
@@ -0,0 +1,44 @@
+#ifndef _LINUX_MAIN_H
+#define _LINUX_MAIN_H
+
+#include "mv_os.h"
+
+#include "hba_header.h"
+
+extern PHBA_Extension g_Linux_HBA;
+
+struct _MV_SCP {
+	MV_U16           mapped;
+	MV_U16           map_atomic;
+	BUS_ADDRESS bus_address;
+};
+
+#define MV_SCp(cmd) ((struct _MV_SCP *) &((struct scsi_cmnd *)cmd)->SCp)
+
+#define get_hba_ext_header(phost) (*(PModule_Header *)phost->hostdata)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+#define mv_scmd_host(cmd)    cmd->device->host
+#define mv_scmd_channel(cmd) cmd->device->channel
+#define mv_scmd_target(cmd)  cmd->device->id
+#define mv_scmd_lun(cmd)     cmd->device->lun
+#else
+#define mv_scmd_host(cmd)    cmd->host
+#define mv_scmd_channel(cmd) cmd->channel
+#define mv_scmd_target(cmd)  cmd->target
+#define mv_scmd_lun(cmd)     cmd->lun
+#endif
+
+#define SCSI_IS_INSTANT(cmd) \
+             ((cmd == SCSI_CMD_INQUIRY) || \
+	      (cmd == SCSI_CMD_READ_CAPACITY_10) || \
+	      (cmd == SCSI_CMD_READ_CAPACITY_16) || \
+	      (cmd == SCSI_CMD_REPORT_LUN) || \
+	      (cmd == SCSI_CMD_MODE_SENSE_6) || \
+	      (cmd == SCSI_CMD_MODE_SENSE_10))
+
+#define LO_BUSADDR(x) ((MV_U32)(x))
+#define HI_BUSADDR(x) (sizeof(BUS_ADDRESS)>4? (x)>>32 : 0)
+
+#endif /*_LINUX_MAIN_H*/
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_sense.c linux-2.6.25/drivers/scsi/mv/linux/linux_sense.c
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_sense.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_sense.c	2008-07-28 19:15:57.969188999 +0200
@@ -0,0 +1,103 @@
+#include "mv_include.h"
+#include "mv_os.h"
+
+#include "hba_header.h"
+#include "linux_main.h"
+#include "linux_sense.h"
+#include "linux_helper.h"
+
+void HBA_Translate_Req_Status_To_OS_Status(
+	IN PHBA_Extension pHBA,
+	IN struct scsi_cmnd *scmd,
+	IN PMV_Request pReq
+	)
+{
+	PSENSE_DATA  senseBuffer = (PSENSE_DATA) scmd->sense_buffer;
+	int i;
+	unsigned char *buf;
+	struct scatterlist *sg;
+
+	/* we really wanna do this? */
+	if (scmd &&
+	    (SCSI_CMD_MODE_SENSE_6 == scmd->cmnd[0]) &&
+	    scmd->sdb.table.nents && 
+	    !(MV_SCp(scmd)->map_atomic)) {
+		/* 
+		 * MODE_SENSE is an instant cmd for SATA devices, thus 
+		 * map_atomic should be 1 before we call HBA_kunmap_sg.
+		 * while for ATAPI it'll call HBA_kunmap_sg before handling
+		 * the command, so its map_atomic should be 0, and we'll need
+		 * to copy its buffer from sg list.
+		 * - this is how we tell ATAPI from ATA/SATA mode sense -
+		 */
+		sg = (struct scatterlist *) pReq->Data_Buffer;
+		buf =(unsigned char *) kmap_atomic(sg_page(sg), KM_IRQ0)\
+			+ sg->offset;
+		/* 
+		 * ATAPI's Mode parameter header is always 8 bytes 
+		 * while MODE_SENSE_6's is 4 bytes.
+		 */
+		for (i=4;i<pReq->Data_Transfer_Length-4;i++){
+			*(buf+i) = *(buf+i+4);
+		}
+			
+		kunmap_atomic(buf, KM_IRQ0);
+	}
+
+	HBA_kunmap_sg(pReq);
+
+	if (MV_SCp(scmd)->mapped) {
+		if (scmd->sdb.table.nents) {
+			MV_DBG(DMSG_FREQ, "__MV__ call pci_unmap_sg.\n");
+			pci_unmap_sg(pHBA->pcidev,
+				     (struct scatterlist *)scmd->sdb.table.sgl,
+				     scmd->sdb.table.nents,
+				     scsi_to_pci_dma_dir(scmd->sc_data_direction));
+		} else {
+			MV_DBG(DMSG_FREQ,"__MV__ call pci_unmap_single.\n");
+			pci_unmap_single(pHBA->pcidev,
+					 MV_SCp(scmd)->bus_address,
+					 scmd->sdb.length,
+					 scsi_to_pci_dma_dir(scmd->sc_data_direction));
+		}
+	}
+
+	MV_DBG(DMSG_SCSI_FREQ,
+	       "HBA_Translate_Req_Status_To_OS_Status:"
+	       " pReq->Scsi_Status = %x pcmd = %p.\n", 
+	       pReq->Scsi_Status, scmd);
+	
+	switch(pReq->Scsi_Status) {
+	case REQ_STATUS_SUCCESS:
+		scmd->result = (DID_OK<<16);
+		break;
+	case REQ_STATUS_MEDIA_ERROR: //TBD
+		scmd->result = (DID_BAD_TARGET<<16);
+		break;
+	case REQ_STATUS_BUSY:
+		scmd->result = (DID_BUS_BUSY<<16);
+		break;
+	case REQ_STATUS_NO_DEVICE:
+		scmd->result = (DID_NO_CONNECT<<16);
+		break;
+	case REQ_STATUS_HAS_SENSE:
+		/* Sense buffer data is valid already. */
+		scmd->result  = (DRIVER_SENSE << 24) | (DID_OK << 16);
+		senseBuffer->Valid = 1;
+
+		MV_DBG(DMSG_SCSI, "MV Sense: response %x SK %s length %x ASC %x "
+		       "ASCQ %x.\n", ((MV_PU8)senseBuffer)[0],
+		       MV_DumpSenseKey(((MV_PU8)senseBuffer)[2]),
+		       ((MV_PU8)senseBuffer)[7],
+		       ((MV_PU8)senseBuffer)[12],
+		       ((MV_PU8)senseBuffer)[13]);
+		break;
+	default:
+		scmd->result = (DRIVER_INVALID|SUGGEST_ABORT)<<24;
+		scmd->result |= DID_ABORT<<16;
+		break;
+	}
+
+	scmd->scsi_done(scmd);
+}
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/linux_sense.h linux-2.6.25/drivers/scsi/mv/linux/linux_sense.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/linux_sense.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/linux_sense.h	2008-07-28 18:42:43.336188708 +0200
@@ -0,0 +1,28 @@
+#if !defined(_LINUX_SENSE_H)
+#define _LINUX_SENSE_H
+
+typedef struct _SENSE_DATA {
+    MV_U8 ErrorCode:7;
+    MV_U8 Valid:1;
+    MV_U8 SegmentNumber;
+    MV_U8 SenseKey:4;
+    MV_U8 Reserved:1;
+    MV_U8 IncorrectLength:1;
+    MV_U8 EndOfMedia:1;
+    MV_U8 FileMark:1;
+    MV_U8 Information[4];
+    MV_U8 AdditionalSenseLength;
+    MV_U8 CommandSpecificInformation[4];
+    MV_U8 AdditionalSenseCode;
+    MV_U8 AdditionalSenseCodeQualifier;
+    MV_U8 FieldReplaceableUnitCode;
+    MV_U8 SenseKeySpecific[3];
+} SENSE_DATA, *PSENSE_DATA;
+
+void HBA_Translate_Req_Status_To_OS_Status(
+	PHBA_Extension pHBA,
+	struct scsi_cmnd *pSCmd,
+	PMV_Request pReq
+	);
+	
+#endif
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/linux/mv_os.h linux-2.6.25/drivers/scsi/mv/linux/mv_os.h
--- linux-2.6.25.orig/drivers/scsi/mv/linux/mv_os.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/linux/mv_os.h	2008-07-28 18:42:43.337188803 +0200
@@ -0,0 +1,254 @@
+#if !defined(LINUX_OS_H)
+#define LINUX_OS_H
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/reboot.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/nmi.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+
+#if defined(__AC_DBG__) && defined(CONFIG_X86)
+#include <linux/timex.h>
+#include <asm/msr.h>
+#endif /* __AC_DBG__ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+
+/* OS specific flags */
+#define _OS_LINUX
+#define _64_BIT_COMPILER
+
+#ifdef CONFIG_64BIT
+#ifndef _64_SYS_
+#define _64_SYS_
+#endif
+#endif /* CONFIG_64BIT */
+
+#if defined(__LITTLE_ENDIAN)
+#define __MV_LITTLE_ENDIAN__
+#elif defined(__BIG_ENDIAN)
+#define __MV_BIG_ENDIAN__
+#else
+#error "screwed by endianness"
+#endif 
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define __MV_LITTLE_ENDIAN_BITFIELD__
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define __MV_BIG_ENDIAN_BITFIELD__
+#else
+#error "screwed by endianness"
+#endif 
+
+#define CPU_TO_LE_16 cpu_to_le16
+#define CPU_TO_LE_32 cpu_to_le32
+#define LE_TO_CPU_16 le16_to_cpu
+#define LE_TO_CPU_32 le32_to_cpu
+
+#ifndef scsi_to_pci_dma_dir
+	#define scsi_to_pci_dma_dir(scsi_dir) ((int)(scsi_dir))
+#endif
+
+/*
+ *
+ * Primary Data Type Definition 
+ *
+ */
+#include "com_define.h" 
+
+/* the use of it should be controlled ... carefully */
+#ifdef __COLOR_DEBUG__
+#include "color_print.h"
+#endif /* __COLOR_DEBUG__ */
+#ifndef __COLOR_PRINT_H__
+#define RED(x)		x
+#define GREEN(x)	x
+#define BLUE(x)		x
+#define MAGENTA(x)	x
+#define CYAN(x)		x
+#define WHITE(x)	x
+#define YELLOW(x)	x
+#endif /* __COLOR_PRINT_H__ */
+
+
+typedef _MV_U64 BUS_ADDRESS;
+
+#define MV_INLINE __inline
+#define CDB_INQUIRY_EVPD 1 //TBD
+
+/*
+ *
+ *	Exposed function and macro
+ *
+ */
+
+/* System dependent macro for flushing CPU write cache */
+#define MV_CPU_WRITE_BUFFER_FLUSH()
+
+/* System dependent little endian from / to CPU conversions */
+/*
+MV_U64 MV_CPU_TO_LE64(MV_U64 x) { return cpu_to_le64(x); }
+MV_U32 MV_CPU_TO_LE32(MV_U32 x) { return cpu_to_le32(x); }
+MV_U16 MV_CPU_TO_LE16(MV_U16 x) { return cpu_to_le16(x); }
+MV_U64 MV_LE64_TO_CPU(MV_U64 x) { return le64_to_cpu(x); }
+MV_U32 MV_LE32_TO_CPU(MV_U32 x) { return le32_to_cpu(x); }
+MV_U16 MV_LE16_TO_CPU(MV_U16 x) { return le16_to_cpu(x); }
+*/
+
+#define MV_CPU_TO_LE16(x)   cpu_to_le16(x)	
+#define MV_CPU_TO_LE32(x)   cpu_to_le32(x)
+#define MV_CPU_TO_LE64(x)   cpu_to_le64(x)
+
+#define MV_LE16_TO_CPU(x)   le16_to_cpu(x)
+#define MV_LE32_TO_CPU(x)   le32_to_cpu(x)
+#define MV_LE64_TO_CPU(x)   le64_to_cpu(x)
+
+#ifdef __MV_DEBUG__
+#define MV_DEBUG
+#else
+#ifdef MV_DEBUG
+#undef MV_DEBUG
+#endif /* MV_DEBUG */
+#endif /* __MV_DEBUG__ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* OS macro definition */
+#define MV_MAX_TRANSFER_SECTOR  (MV_MAX_TRANSFER_SIZE/512)
+
+/* register read write: memory io */
+#define MV_REG_WRITE_BYTE(base, offset, val)    \
+    writeb(val, base + offset)
+#define MV_REG_WRITE_WORD(base, offset, val)    \
+    writew(val, base + offset)
+#define MV_REG_WRITE_DWORD(base, offset, val)    \
+    writel(val, base + offset)
+
+#define MV_REG_READ_BYTE(base, offset)			\
+	readb(base + offset)
+#define MV_REG_READ_WORD(base, offset)			\
+	readw(base + offset)
+#define MV_REG_READ_DWORD(base, offset)			\
+	readl(base + offset)
+
+/* register read write: port io */
+#define MV_IO_WRITE_BYTE(base, offset, val)    \
+    outb(val, (unsigned)(MV_PTR_INTEGER)(base + offset))
+#define MV_IO_WRITE_WORD(base, offset, val)    \
+    outw(val, (unsigned)(MV_PTR_INTEGER)(base + offset))
+#define MV_IO_WRITE_DWORD(base, offset, val)    \
+    outl(val, (unsigned)(MV_PTR_INTEGER)(base + offset))
+
+#define MV_IO_READ_BYTE(base, offset)			\
+	inb((unsigned)(MV_PTR_INTEGER)(base + offset))
+#define MV_IO_READ_WORD(base, offset)			\
+	inw((unsigned)(MV_PTR_INTEGER)(base + offset))
+#define MV_IO_READ_DWORD(base, offset)			\
+	inl((unsigned)(MV_PTR_INTEGER)(base + offset))
+
+#define MV_PCI_READ_CONFIG_DWORD(hba, offset, reg) \
+	pci_read_config_dword(((PHBA_Extension)hba)->pcidev, offset, &reg)
+
+#define MV_PCI_WRITE_CONFIG_DWORD(hba, offset, reg) \
+	pci_write_config_dword(((PHBA_Extension)hba)->pcidev, offset, reg)
+
+#define MV_PCI_READ_CONFIG_WORD(hba, offset, reg) \
+	pci_read_config_word(((PHBA_Extension)hba)->pcidev, offset, &reg)
+
+#define MV_PCI_WRITE_CONFIG_WORD(hba, offset, reg) \
+	pci_write_config_word(((PHBA_Extension)hba)->pcidev, offset, reg)
+
+#define MV_LOCK(plock)       spin_lock(plock)
+#define MV_UNLOCK(plock)     spin_unlock(plock)
+
+#define MV_LOCK_IRQ(plock)   do { WARN_ON(irqs_disabled()); \
+                                    spin_lock_irq(plock); \
+                            } while(0)
+
+#define MV_UNLOCK_IRQ(plock) do { \
+                                    spin_unlock_irq(plock); \
+                            } while(0)
+
+#define MV_LOCK_IRQSAVE(plock, flag)   spin_lock_irqsave(plock, flag)
+#define MV_UNLOCK_IRQRESTORE(plock, flag) spin_unlock_irqrestore(plock, flag)
+
+#define MV_DECLARE_TIMER(x) struct timer_list x
+
+
+/*Driver Version for Command Line Interface Query.*/
+#define VER_MAJOR	1
+
+ /* VER_MINOR 1 for RAID, 0 for non-RAID */
+#ifdef RAID_DRIVER
+#define VER_MINOR        1       
+#define VER_BUILD        3
+#else  /* RAID_DRIVER */
+#define VER_MINOR        0
+#define VER_BUILD        9
+#endif /* RAID_DRIVER */
+
+/* OEM Account definition */
+#define VER_OEM_GENERIC  0
+#define VER_OEM_INTEL    1
+#define VER_OEM_ASUS     2
+
+#ifdef __OEM_INTEL__
+#define VER_OEM          VER_OEM_INTEL
+#elif defined(__OEM__ASUS__)
+#define VER_OEM          VER_OEM_ASUS
+#else
+#define VER_OEM          VER_OEM_GENERIC
+#endif /* __OEM_INTEL__ */
+
+#define VER_TEST
+
+#define mv_driver_name   "mv61xx"
+
+/* call VER_VAR_TO_STRING */
+#define NUM_TO_STRING(num1, num2, num3, num4) #num1"."#num2"."#num3"."#num4
+#define VER_VAR_TO_STRING(major, minor, oem, build) NUM_TO_STRING(major, \
+								  minor, \
+								  oem,   \
+								  build)
+
+#define mv_version_linux   VER_VAR_TO_STRING(VER_MAJOR, VER_MINOR,       \
+					     VER_OEM, VER_BUILD) VER_TEST
+
+void HBA_kunmap_sg(void* pReq);
+
+#endif /* LINUX_OS_H */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/Makefile linux-2.6.25/drivers/scsi/mv/Makefile
--- linux-2.6.25.orig/drivers/scsi/mv/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/Makefile	2008-07-28 18:42:43.337188803 +0200
@@ -0,0 +1,46 @@
+#
+#
+# Note: This Makefile is for 2.6 kernel, non-raid only, at present.
+#
+# V0.0.0.1 Ver.Make for kbuild
+# A.C. <ake at marvell dot com>
+
+CORE_OBJS       := core/core_exp.o core/core_xor.o\
+		   core/core_api.o core/core_init.o core/scsi2sata.o
+
+COMM_OBJS       := common/com_util.o \
+		   common/com_u64.o common/com_scsi.o common/com_tag.o
+
+# OS-Dependent Objs
+OSDEP_OBJS      := linux/linux_sense.o  linux/linux_helper.o   \
+		   linux/hba_mod.o      linux/hba_timer.o \
+		   linux/hba_exp.o      linux/linux_iface.o \
+		   linux/linux_main.o   
+
+LIBMV_OBJS	:= $(CORE_OBJS) $(COMM_OBJS) $(raid_objs_y)
+
+OSD_OBJS        := $(OSDEP_OBJS) $(LIBMV_OBJS)
+
+KERNEL_SRC    :=   $(srctree)/$(src)
+
+obj-$(CONFIG_SCSI_MV_THOR) :=   mv61xx.o
+
+mv61xx-objs   :=   $(OSD_OBJS) 
+
+EXTRA_CFLAGS  :=   -I$(KERNEL_SRC)/common -I$(KERNEL_SRC)/core \
+		   -I$(KERNEL_SRC)/linux  -I$(KERNEL_SRC)/. 
+
+EXTRA_CFLAGS  +=   -D__MV_LINUX__ $(INCLUDE_DIR)
+
+ifneq ($(CONFIG_64BIT), y)
+EXTRA_CFLAGS  +=   -D_32_LEGACY_
+else
+EXTRA_CFLAGS  +=   -D_64_SYS_
+endif
+
+EXTRA_CFLAGS  +=   -include $(KERNEL_SRC)/mv_config.h
+
+EXTRA_CFLAGS  +=   -D__MV_DEBUG__
+
+EXTRA_CFLAGS  += -D__AC_DBG__
+
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/mv_config.h linux-2.6.25/drivers/scsi/mv/mv_config.h
--- linux-2.6.25.orig/drivers/scsi/mv/mv_config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/mv_config.h	2008-07-28 18:42:43.337188803 +0200
@@ -0,0 +1,67 @@
+#ifndef __MV_CONFIGURATION_H__
+#define __MV_CONFIGURATION_H__
+
+/* HBA macro definition */
+#define MV_MAX_TRANSFER_SIZE    (128*1024)
+#define MAX_REQUEST_NUMBER      32
+#define MAX_BASE_ADDRESS        6
+
+/* Core driver macro definition */
+#define MAX_DEVICE_SUPPORTED    32
+#define MAX_HD_NUMBER           32
+#define MAX_SG_ENTRY            32
+#define MAX_SG_ENTRY_REDUCED    16
+
+//#define ENABLE_PATA_ERROR_INTERRUPT
+
+#ifndef ENABLE_PATA_ERROR_INTERRUPT
+	#define USE_DMA_FOR_ALL_PACKET_COMMAND
+	/* It's dangerous. Never enable it unless we have to. */
+	#define PRD_SIZE_WORD_ALIGN	
+#endif
+
+//#define HIBERNATION_ROUNTINE
+
+//#define CORE_SUPPORT_API
+
+//#define SUPPORT_SCSI_PASSTHROUGH
+
+//#define SUPPORT_CONSOLIDATE
+
+/* hot plug & PM */
+#define SUPPORT_HOT_PLUG        1
+#define SUPPORT_PM              1
+
+//#define SUPPORT_TIMER 1
+#define SUPPORT_ERROR_HANDLING  1
+
+//#define SUPPORT_CONSOLIDATE	1
+
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+#define MV_MAX_TARGET_NUMBER    21 // console
+#else
+#define MV_MAX_TARGET_NUMBER    22 // max 5 ports, 4 device each
+#endif /* SUPPORT_SCSI_PASSTHROUGH */
+#define MV_MAX_LUN_NUMBER       1
+
+#define CONSOLE_ID (MV_MAX_TARGET_NUMBER - 1) * MV_MAX_LUN_NUMBER
+
+/* RAID */
+#ifdef RAID_DRIVER
+#define BGA_SUPPORT             1
+#define SOFTWARE_XOR            1
+#define SUPPORT_FREE_POLICY     1
+#define SUPPORT_RAID1E          1
+//#define SUPPORT_SRL           1
+
+//#define SUPPORT_RAID6
+#ifdef SUPPORT_RAID6
+#define USE_MATH_LIBARY
+#define SUPPORT_READ_MODIFY_WRITE
+#endif /* SUPPORT_RAID6 */
+
+#endif /* RAID_DRIVER */
+
+//#define SUPPORT_CACHE         1
+
+#endif /* __MV_CONFIGURATION_H__ */
diff -urNp linux-2.6.25.orig/drivers/scsi/mv/mv_include.h linux-2.6.25/drivers/scsi/mv/mv_include.h
--- linux-2.6.25.orig/drivers/scsi/mv/mv_include.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.25/drivers/scsi/mv/mv_include.h	2008-07-28 18:42:43.338197174 +0200
@@ -0,0 +1,38 @@
+#ifndef MV_INCLUDE_H
+#define MV_INCLUDE_H
+
+#include "mv_config.h"
+
+#include "mv_os.h"
+#include "com_type.h"
+#include "com_u64.h"
+
+#include "com_util.h"
+#include "com_list.h"
+
+#include "com_dbg.h"
+
+#include "hba_exp.h"
+
+#ifdef SUPPORT_TIMER
+#include "hba_timer.h"
+#endif
+
+#include "com_scsi.h"
+
+#include "com_api.h"
+#include "com_struct.h"
+#ifdef SUPPORT_SCSI_PASSTHROUGH
+#include "com_ioctl.h"
+#endif
+
+#ifdef RAID_DRIVER
+#include "raid_exp.h"
+#endif
+
+#ifdef SUPPORT_RAID6
+#include "math_gf.h"
+#endif
+
+#endif /* MV_INCLUDE_H */
+
