summaryrefslogtreecommitdiff
path: root/target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch
diff options
context:
space:
mode:
authorkaloz <kaloz@3c298f89-4303-0410-b956-a3cf2f4a3e73>2012-05-01 07:00:17 +0000
committerkaloz <kaloz@3c298f89-4303-0410-b956-a3cf2f4a3e73>2012-05-01 07:00:17 +0000
commit7e7649baa9de9d592a76e150970e3079d7610138 (patch)
tree7e3c3b52269e3c7564c6a88d67dc4ea6219b041c /target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch
parent7877ca154f130fe0c83a5f151320c3ce902195d1 (diff)
[coldfire]: switch to 2.6.38
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@31546 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch')
-rw-r--r--target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch11199
1 files changed, 11199 insertions, 0 deletions
diff --git a/target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch b/target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch
new file mode 100644
index 0000000000..6b889914a8
--- /dev/null
+++ b/target/linux/coldfire/patches/001-Coldfire-architecture-support-in-Linux-2.6.38.patch
@@ -0,0 +1,11199 @@
+From c462e1a613c8f84bb503189b0796d036dd1e5376 Mon Sep 17 00:00:00 2001
+From: Jason Jin <jason.jin@freescale.com>
+Date: Thu, 4 Aug 2011 09:59:35 +0800
+Subject: [PATCH 01/52] Coldfire architecture support in Linux 2.6.38
+
+Coldfire architecture support in Linux 2.6.38.
+
+Signed-off-by: Alison Wang <b18965@freescale.com>
+Signed-off-by: Jason Jin <jason.jin@freescale.com>
+---
+ arch/m68k/Kconfig | 279 +++++++++-
+ arch/m68k/Kconfig.debug | 9 +
+ arch/m68k/Makefile | 48 ++-
+ arch/m68k/boot/Makefile | 68 +++
+ arch/m68k/coldfire/Makefile | 10 +
+ arch/m68k/coldfire/common/Makefile | 7 +
+ arch/m68k/coldfire/common/cache.c | 45 ++
+ arch/m68k/coldfire/common/clk.c | 51 ++
+ arch/m68k/coldfire/common/entry.S | 745 ++++++++++++++++++++++++
+ arch/m68k/coldfire/common/head.S | 466 +++++++++++++++
+ arch/m68k/coldfire/common/ints.c | 544 ++++++++++++++++++
+ arch/m68k/coldfire/common/muldi3.S | 73 +++
+ arch/m68k/coldfire/common/signal.c | 991 ++++++++++++++++++++++++++++++++
+ arch/m68k/coldfire/common/traps.c | 457 +++++++++++++++
+ arch/m68k/include/asm/atomic.h | 33 +-
+ arch/m68k/include/asm/bitops_mm.h | 12 +-
+ arch/m68k/include/asm/bootinfo.h | 42 ++
+ arch/m68k/include/asm/cacheflush_mm.h | 14 +-
+ arch/m68k/include/asm/cf-sram.h | 21 +
+ arch/m68k/include/asm/cf_bitops.h | 443 ++++++++++++++
+ arch/m68k/include/asm/cf_cacheflush.h | 20 +
+ arch/m68k/include/asm/cf_entry.h | 153 +++++
+ arch/m68k/include/asm/cf_io.h | 185 ++++++
+ arch/m68k/include/asm/cf_pgalloc.h | 112 ++++
+ arch/m68k/include/asm/cf_pgtable.h | 364 ++++++++++++
+ arch/m68k/include/asm/cf_raw_io.h | 188 ++++++
+ arch/m68k/include/asm/cf_tlbflush.h | 66 +++
+ arch/m68k/include/asm/cf_uaccess.h | 262 +++++++++
+ arch/m68k/include/asm/cf_virtconvert.h | 63 ++
+ arch/m68k/include/asm/cfcache.h | 146 +++++
+ arch/m68k/include/asm/cfmmu.h | 112 ++++
+ arch/m68k/include/asm/coldfire.h | 70 ++-
+ arch/m68k/include/asm/delay_mm.h | 42 ++-
+ arch/m68k/include/asm/div64.h | 11 +-
+ arch/m68k/include/asm/dma.h | 109 ++++-
+ arch/m68k/include/asm/elf.h | 70 +++-
+ arch/m68k/include/asm/io_mm.h | 234 ++++++--
+ arch/m68k/include/asm/irq.h | 14 +-
+ arch/m68k/include/asm/machdep.h | 15 +-
+ arch/m68k/include/asm/mcfdspi.h | 59 ++
+ arch/m68k/include/asm/mcfsim.h | 121 ++++
+ arch/m68k/include/asm/mcfuart.h | 64 ++
+ arch/m68k/include/asm/mmu.h | 15 +-
+ arch/m68k/include/asm/mmu_context.h | 184 ++++++-
+ arch/m68k/include/asm/page.h | 10 +-
+ arch/m68k/include/asm/page_mm.h | 61 ++
+ arch/m68k/include/asm/page_offset.h | 21 +-
+ arch/m68k/include/asm/pgalloc.h | 12 +-
+ arch/m68k/include/asm/pgtable_mm.h | 36 ++-
+ arch/m68k/include/asm/processor.h | 45 ++-
+ arch/m68k/include/asm/ptrace.h | 48 ++-
+ arch/m68k/include/asm/raw_io.h | 15 +-
+ arch/m68k/include/asm/segment.h | 17 +
+ arch/m68k/include/asm/setup.h | 36 ++
+ arch/m68k/include/asm/signal.h | 10 +-
+ arch/m68k/include/asm/string.h | 20 +-
+ arch/m68k/include/asm/swab.h | 15 +-
+ arch/m68k/include/asm/system_mm.h | 23 +-
+ arch/m68k/include/asm/tlbflush.h | 24 +-
+ arch/m68k/include/asm/uaccess_mm.h | 80 ++--
+ arch/m68k/include/asm/unistd.h | 11 +-
+ arch/m68k/include/asm/virtconvert.h | 11 +
+ arch/m68k/kernel/Makefile | 23 +-
+ arch/m68k/kernel/asm-offsets.c | 28 +
+ arch/m68k/kernel/dma.c | 47 ++-
+ arch/m68k/kernel/process.c | 66 +++
+ arch/m68k/kernel/setup.c | 72 ++-
+ arch/m68k/kernel/sys_m68k.c | 80 +++
+ arch/m68k/kernel/time.c | 142 +++++-
+ arch/m68k/kernel/vmlinux-cf.lds | 142 +++++
+ arch/m68k/kernel/vmlinux.lds.S | 12 +-
+ arch/m68k/lib/checksum.c | 129 +++++
+ arch/m68k/lib/muldi3.c | 13 +
+ arch/m68k/lib/string.c | 68 +++
+ arch/m68k/lib/uaccess.c | 247 ++++++++
+ arch/m68k/mm/Makefile | 2 +
+ arch/m68k/mm/cache.c | 19 +
+ arch/m68k/mm/cf-mmu.c | 311 ++++++++++
+ arch/m68k/mm/cf-sram.c | 80 +++
+ arch/m68k/mm/hwtest.c | 10 +
+ arch/m68k/mm/init.c | 15 +-
+ arch/m68k/mm/kmap.c | 82 +++-
+ arch/m68k/mm/memory.c | 17 +
+ fs/namespace.c | 8 +
+ include/linux/fsl_devices.h | 14 +-
+ 85 files changed, 8962 insertions(+), 197 deletions(-)
+ create mode 100644 arch/m68k/boot/Makefile
+ create mode 100644 arch/m68k/coldfire/Makefile
+ create mode 100644 arch/m68k/coldfire/common/Makefile
+ create mode 100644 arch/m68k/coldfire/common/cache.c
+ create mode 100644 arch/m68k/coldfire/common/clk.c
+ create mode 100644 arch/m68k/coldfire/common/entry.S
+ create mode 100644 arch/m68k/coldfire/common/head.S
+ create mode 100644 arch/m68k/coldfire/common/ints.c
+ create mode 100644 arch/m68k/coldfire/common/muldi3.S
+ create mode 100644 arch/m68k/coldfire/common/signal.c
+ create mode 100644 arch/m68k/coldfire/common/traps.c
+ create mode 100644 arch/m68k/include/asm/cf-sram.h
+ create mode 100644 arch/m68k/include/asm/cf_bitops.h
+ create mode 100644 arch/m68k/include/asm/cf_cacheflush.h
+ create mode 100644 arch/m68k/include/asm/cf_entry.h
+ create mode 100644 arch/m68k/include/asm/cf_io.h
+ create mode 100644 arch/m68k/include/asm/cf_pgalloc.h
+ create mode 100644 arch/m68k/include/asm/cf_pgtable.h
+ create mode 100644 arch/m68k/include/asm/cf_raw_io.h
+ create mode 100644 arch/m68k/include/asm/cf_tlbflush.h
+ create mode 100644 arch/m68k/include/asm/cf_uaccess.h
+ create mode 100644 arch/m68k/include/asm/cf_virtconvert.h
+ create mode 100644 arch/m68k/include/asm/cfcache.h
+ create mode 100644 arch/m68k/include/asm/cfmmu.h
+ create mode 100644 arch/m68k/include/asm/mcfdspi.h
+ create mode 100644 arch/m68k/kernel/vmlinux-cf.lds
+ create mode 100644 arch/m68k/mm/cf-mmu.c
+ create mode 100644 arch/m68k/mm/cf-sram.c
+
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -9,6 +9,14 @@ config MMU
+ bool
+ default y
+
++config GENERIC_TIME
++ bool "Enable generic timer"
++ default n
++
++config GENERIC_CLOCKEVENTS
++ bool "Enable generic clockevents"
++ default n
++
+ config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+@@ -34,7 +42,7 @@ config GENERIC_CALIBRATE_DELAY
+
+ config TIME_LOW_RES
+ bool
+- default y
++ default n
+
+ config GENERIC_IOMAP
+ bool
+@@ -46,7 +54,7 @@ config ARCH_MAY_HAVE_PC_FDC
+ default y
+
+ config NO_IOPORT
+- def_bool y
++ def_bool !(M5445X || M547X_8X || M5441X)
+
+ config NO_DMA
+ def_bool SUN3
+@@ -105,6 +113,35 @@ config PCMCIA
+ To compile this driver as modules, choose M here: the
+ modules will be called pcmcia_core and ds.
+
++config COLDFIRE
++ bool "ColdFire V4e support"
++ default y
++ select CFV4E
++ help
++ Say Y if you want to build a kernel to run on one of the ColdFire
++ V4e boards.
++
++config CFV4E
++ bool
++ depends on COLDFIRE
++ select MMU_CFV4E if MMU
++ default y
++
++config FPU
++ bool "ColdFire V4e FPU support"
++ default n
++ help
++ This enables support for CFV4E FPU feature.
++
++config MCD_DMA
++ bool "ColdFire MCD DMA support"
++ depends on M547X_8X
++ default y
++ help
++ This enables support for the ColdFire 547x/548x family
++ multichannel DMA support. Many drivers need it.
++ If you want it, say Y
++
+ config AMIGA
+ bool "Amiga support"
+ select MMU_MOTOROLA if MMU
+@@ -122,6 +159,16 @@ config ATARI
+ this kernel on an Atari, say Y here and browse the material
+ available in <file:Documentation/m68k>; otherwise say N.
+
++config PCI
++ bool "PCI bus support"
++ depends on M54455 || M547X_8X
++ default n
++ help
++ Find out whether you have a PCI motherboard. PCI is the name of a
++ bus system, i.e. the way the CPU talks to the other stuff inside
++ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
++ VESA. If you have PCI, say Y, otherwise N.
++
+ config MAC
+ bool "Macintosh support"
+ select MMU_MOTOROLA if MMU
+@@ -276,6 +323,147 @@ config M68060
+ If you anticipate running this kernel on a computer with a MC68060
+ processor, say Y. Otherwise, say N.
+
++config M5445X
++ bool "MCF5445x support"
++ depends on COLDFIRE
++ select GENERIC_TIME
++ select USB_EHCI_FSL
++ select HAVE_FSL_USB_DR
++ help
++ This option will add support for the MCF544x processor with mmu.
++
++config M54451
++ bool
++ depends on M5445X
++ default n
++
++config M54455
++ bool
++ depends on M5445X
++ default n
++
++choice
++ prompt "Model"
++ depends on M5445X
++ default M54451EVB
++ config M54451EVB
++ bool "M54451EVB"
++ select M54451
++ config M54455EVB
++ bool "M54455EVB"
++ select M54455
++endchoice
++
++config HAVE_FSL_USB_DR
++ bool
++ default n
++
++config M547X_8X
++ bool "MCF547x/MCF548x support"
++ depends on COLDFIRE
++ help
++ This option will add support for the MCF547x/MCF548x processor with mmu.
++
++config M547X
++ bool
++ depends on M547X_8X
++ default n
++
++config M548X
++ bool
++ depends on M547X_8X
++ default n
++
++choice
++ prompt "Model"
++ depends on M547X_8X
++ default M5485CFE
++
++config M5474LITE
++ bool "MCF5474LITE"
++ select M547X
++config M5475AFE
++ bool "MCF5475AFE"
++ select M547X
++config M5475BFE
++ bool "MCF5475BFE"
++ select M547X
++config M5475CFE
++ bool "MCF5475CFE"
++ select M547X
++config M5475DFE
++ bool "MCF5475DFE"
++ select M547X
++config M5475EFE
++ bool "MCF5475EFE"
++ select M547X
++config M5475FFE
++ bool "MCF5475FFE"
++ select M547X
++config M5484LITE
++ bool "MCF5484LITE"
++ select M548X
++config M5485AFE
++ bool "MCF5485AFE"
++ select M548X
++config M5485BFE
++ bool "MCF5485BFE"
++ select M548X
++config M5485CFE
++ bool "MCF5485CFE"
++ select M548X
++config M5485DFE
++ bool "MCF5485DFE"
++ select M548X
++config M5485EFE
++ bool "MCF5485EFE"
++ select M548X
++config M5485FFE
++ bool "MCF5485FFE"
++ select M548X
++
++endchoice
++
++config M5441X
++ bool "MCF5441x support"
++ depends on COLDFIRE
++ select GENERIC_TIME
++ select USB_EHCI_FSL
++ select HAVE_FSL_USB_DR
++ help
++ This option will add support for the MCF5441x processor with mmu.
++
++config M54418
++ bool
++ depends on M5441X
++ default n
++choice
++ prompt "Model"
++ depends on M5441X
++ default M54418EVB
++ config M54418EVB
++ bool "M54418EVB"
++ select M54418
++endchoice
++
++config MCFCLK
++ int
++ default 240000000 if M54451EVB
++ default 266666666 if M54455EVB
++ default 266000000 if M547X
++ default 200000000 if M548X
++ default 250000000 if M54418EVB && !USB_M5441X_PLLCLK
++ default 150000000 if M54418EVB && USB_M5441X_PLLCLK
++ help
++ Coldfire System clock.
++
++config MCF_USER_HALT
++ bool "Coldfire User Halt Enable"
++ depends on M5445X || M547X_8X || M5441X
++ default n
++ help
++ Enables the HALT instruction in User Mode.
++
+ config MMU_MOTOROLA
+ bool
+
+@@ -283,6 +471,81 @@ config MMU_SUN3
+ bool
+ depends on MMU && !MMU_MOTOROLA
+
++config MMU_CFV4E
++ bool
++
++config SDRAM_BASE
++ hex
++ depends on COLDFIRE
++ default 0x40000000 if M5445X
++ default 0x00000000 if M547X_8X
++ default 0x40000000 if M5441X
++
++config SDRAM_SIZE
++ hex
++ depends on COLDFIRE
++ default 0x08000000 if M54451EVB
++ default 0x10000000 if M54455EVB
++ default 0x04000000 if M547X_8X
++ default 0x08000000 if M54418EVB
++
++config NOR_FLASH_BASE
++ hex "NOR Flash Base Address"
++ depends on COLDFIRE
++ default 0x00000000 if M54451EVB
++ default 0x00000000 if M54455EVB
++ default 0xE0000000 if M5475CFE
++ default 0xE0000000 if M5485CFE
++ default 0xFF800000 if M5484LITE
++ default 0xFF800000 if M5474LITE
++ default 0x00000000 if M54418EVB
++
++config DMA_BASE
++ hex
++ depends on COLDFIRE
++ default 0xef000000 if M5445X
++ default 0xef000000 if M547X_8X
++ default 0xdf000000 if M5441X
++
++config DMA_SIZE
++ hex
++ depends on COLDFIRE
++ default 0x1000000 if M5445X
++ default 0x800000 if M547X_8X
++ default 0x1000000 if M5441X
++
++config SRAM
++ bool "SRAM allocation APIs support on mcfv4 platform"
++ depends on COLDFIRE && (M5445X || M5441X)
++ default y
++ select GENERIC_ALLOCATOR
++
++config SRAM_BASE
++ hex
++ depends on COLDFIRE && SRAM
++ default 0x8ff00000 if M5445X
++ default 0x8ff00000 if M5441X
++
++config SRAM_SIZE
++ hex
++ depends on COLDFIRE && SRAM
++ default 0x8000 if M5445X
++ default 0x10000 if M5441X
++
++config SRAM_ALLOC_GRANULARITY
++ hex
++ depends on SRAM
++ default 0x200 if (M5445X || M5441X)
++
++config VDSO
++ bool "Support VDSO page"
++ depends on MMU
++ default n
++ help
++ This will enable support for the kernel mapping a vDSO page
++ in process space, and subsequently handing down the entry point
++ to the libc through the ELF auxiliary vector.
++
+ config M68KFPU_EMU
+ bool "Math emulation support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+@@ -449,6 +712,14 @@ config ZONE_DMA
+ source "drivers/pci/Kconfig"
+
+ source "drivers/zorro/Kconfig"
++endmenu
++
++menu "Power management options"
++
++config PM
++ bool "Power Management support"
++ help
++ Support processor power management modes
+
+ endmenu
+
+@@ -583,7 +854,7 @@ config DN_SERIAL
+
+ config SERIAL_CONSOLE
+ bool "Support for serial port console"
+- depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL)
++ depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO || COLDFIRE) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL || SERIAL_COLDFIRE)
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+@@ -606,6 +877,8 @@ config SERIAL_CONSOLE
+
+ endmenu
+
++source "kernel/time/Kconfig"
++
+ source "fs/Kconfig"
+
+ source "arch/m68k/Kconfig.debug"
+--- a/arch/m68k/Kconfig.debug
++++ b/arch/m68k/Kconfig.debug
+@@ -2,4 +2,13 @@ menu "Kernel hacking"
+
+ source "lib/Kconfig.debug"
+
++config BOOTPARAM
++ bool 'Compiled-in Kernel Boot Parameter'
++ depends on COLDFIRE
++
++config BOOTPARAM_STRING
++ string 'Kernel Boot Parameter'
++ default 'console=ttyS0,115200'
++ depends on BOOTPARAM
++
+ endmenu
+--- a/arch/m68k/Makefile
++++ b/arch/m68k/Makefile
+@@ -1,6 +1,8 @@
+ #
+ # m68k/Makefile
+ #
++# Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++#
+ # This file is included by the global makefile so that you can add your own
+ # architecture-specific flags and dependencies. Remember to do have actions
+ # for "archclean" and "archdep" for cleaning up and making dependencies for
+@@ -10,13 +12,13 @@
+ # License. See the file "COPYING" in the main directory of this archive
+ # for more details.
+ #
+-# Copyright (C) 1994 by Hamish Macdonald
+-#
+
+-KBUILD_DEFCONFIG := multi_defconfig
++KBUILD_DEFCONFIG := amiga_defconfig#multi_defconfig
+
+ # override top level makefile
++ifndef CONFIG_COLDFIRE
+ AS += -m68020
++endif
+ LDFLAGS := -m m68kelf
+ KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
+ ifneq ($(SUBARCH),$(ARCH))
+@@ -30,12 +32,18 @@ ifdef CONFIG_SUN3
+ LDFLAGS_vmlinux = -N
+ endif
+
++ifdef CONFIG_COLDFIRE
++OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
++# LDFLAGS_vmlinux = --verbose
++endif
++
+ CHECKFLAGS += -D__mc68000__
+
+ # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
+ KBUILD_CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
+
+ # enable processor switch if compiled only for a single cpu
++ifndef CONFIG_COLDFIRE
+ ifndef CONFIG_M68020
+ ifndef CONFIG_M68030
+
+@@ -49,6 +57,22 @@ endif
+
+ endif
+ endif
++endif
++
++ifdef CONFIG_M5445X
++KBUILD_CFLAGS += -march=isac -mcpu=54455 -msoft-float -g
++KBUILD_AFLAGS += -march=isac -mcpu=54455 -msoft-float
++endif
++
++ifdef CONFIG_M547X_8X
++KBUILD_CFLAGS += -mcfv4e -g
++KBUILD_AFLAGS += -mcfv4e
++endif
++
++ifdef CONFIG_M5441X
++KBUILD_CFLAGS += -march=isac -mcpu=54418 -msoft-float -g
++KBUILD_AFLAGS += -march=isac -mcpu=54418 -msoft-float
++endif
+
+ ifdef CONFIG_KGDB
+ # If configured for kgdb support, include debugging infos and keep the
+@@ -57,8 +81,12 @@ KBUILD_CFLAGS := $(subst -fomit-frame-po
+ endif
+
+ ifndef CONFIG_SUN3
++ifndef CONFIG_COLDFIRE
+ head-y := arch/m68k/kernel/head.o
+ else
++head-y := arch/m68k/coldfire/common/head.o
++endif
++else
+ head-y := arch/m68k/kernel/sun3-head.o
+ endif
+
+@@ -79,7 +107,20 @@ core-$(CONFIG_SUN3) += arch/m68k/sun3/
+ core-$(CONFIG_M68040) += arch/m68k/fpsp040/
+ core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
+ core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
++core-$(CONFIG_COLDFIRE) += arch/m68k/coldfire/
++
++ifdef CONFIG_COLDFIRE
++boot := arch/m68k/boot
++
++all: uImage
+
++zImage zImage.srec uImage uImage.srec vmlinux.srec: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
++
++archclean:
++ $(Q)$(MAKE) $(clean)=$(boot)
++
++else
+ all: zImage
+
+ lilo: vmlinux
+@@ -117,6 +158,7 @@ endif
+
+ archclean:
+ rm -f vmlinux.gz vmlinux.bz2
++endif
+
+ install:
+ sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
+--- /dev/null
++++ b/arch/m68k/boot/Makefile
+@@ -0,0 +1,68 @@
++#
++# arch/m68k/boot/Makefile
++#
++# Based on arch/sh/boot/Makefile by Stuart Menefy
++#
++# Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++# by Kurt Mahan <kmahan@freescale.com>
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License. See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++
++MKIMAGE := $(srctree)/scripts/mkuboot.sh
++
++#
++# Assign safe dummy values if these variables are not defined,
++# in order to suppress error message.
++#
++CONFIG_SDRAM_BASE ?= 0x40000000
++CONFIG_IMG_START ?= 0x00020000
++
++export CONFIG_SDRAM_BASE CONFIG_IMG_START
++
++targets := zImage zImage.srec vmlinux.srec uImage uImage.srec
++
++$(obj)/zImage: $(obj)/vmlinux.bin FORCE
++ $(call if_changed,gzip)
++ @echo ' Image $@ is ready'
++
++OBJCOPYFLAGS_zImage.srec := -I binary -O srec
++$(obj)/zImage.srec: $(obj)/zImage
++ $(call if_changed,objcopy)
++
++KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
++ $$[$(CONFIG_SDRAM_BASE) + \
++ $(CONFIG_IMG_START)]')
++
++KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \
++ $$[$(CONFIG_SDRAM_BASE) + \
++ $(CONFIG_IMG_START)]')
++
++quiet_cmd_uimage = UIMAGE $@
++ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A m68k -O linux -T kernel \
++ -C gzip -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
++ -n 'Linux-$(KERNELRELEASE)' -d $< $@
++
++$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
++ $(call if_changed,uimage)
++ @echo ' Image $@ is ready'
++
++$(obj)/vmlinux.bin: vmlinux FORCE
++ $(call if_changed,objcopy)
++
++$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
++ $(call if_changed,gzip)
++
++OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
++$(obj)/vmlinux.srec: $(obj)/vmlinux.bin
++ $(call if_changed,objcopy)
++
++OBJCOPYFLAGS_uImage.srec := -I binary -O srec
++$(obj)/uImage.srec: $(obj)/uImage
++ $(call if_changed,objcopy)
++
++clean-files += uImage uImage.srec \
++ zImage zImage.srec \
++ vmlinux.srec vmlinux.bin vmlinux.bin.gz
+--- /dev/null
++++ b/arch/m68k/coldfire/Makefile
+@@ -0,0 +1,10 @@
++#
++# Makefile for Linux arch/m68k/coldfire source directory
++#
++
++obj-y += common/
++obj-$(CONFIG_VDSO) += vdso/
++
++obj-$(CONFIG_M5445X) += m5445x/
++obj-$(CONFIG_M547X_8X) += m547x/
++obj-$(CONFIG_M5441X) += m5441x/
+--- /dev/null
++++ b/arch/m68k/coldfire/common/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile for Linux arch/m68k/coldfire/common source directory
++#
++
++obj-y:= entry.o cache.o signal.o muldi3.o traps.o ints.o clk.o
++extra-y:= head.o
++
+--- /dev/null
++++ b/arch/m68k/coldfire/common/cache.c
+@@ -0,0 +1,45 @@
++/*
++ * linux/arch/m68k/coldfire/cache.c
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Matt Waddel Matt.Waddel@freescale.com
++ * Kurt Mahan kmahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/interrupt.h>
++#include <asm/cfcache.h>
++#include <asm/coldfire.h>
++#include <asm/system.h>
++
++/* Cache Control Reg shadow reg */
++unsigned long shadow_cacr;
++
++/**
++ * cacr_set - Set the Cache Control Register
++ * @x Value to set
++ */
++void cacr_set(unsigned long x)
++{
++ shadow_cacr = x;
++
++ __asm__ __volatile__ ("movec %0, %%cacr"
++ : /* no outputs */
++ : "r" (shadow_cacr));
++}
++
++/**
++ * cacr_get - Get the current value of the Cache Control Register
++ *
++ * @return CACR value
++ */
++unsigned long cacr_get(void)
++{
++ return shadow_cacr;
++}
+--- /dev/null
++++ b/arch/m68k/coldfire/common/clk.c
+@@ -0,0 +1,51 @@
++/***************************************************************************/
++
++/*
++ * clk.c -- general ColdFire CPU kernel clk handling
++ *
++ * Copyright (C) 2009, Greg Ungerer (gerg@snapgear.com)
++ * Copyright (C) 2011 Freescale Semiconductore, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++*/
++
++/***************************************************************************/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/clk.h>
++#include <asm/coldfire.h>
++
++/***************************************************************************/
++
++struct clk *clk_get(struct device *dev, const char *id)
++{
++ return NULL;
++}
++EXPORT_SYMBOL(clk_get);
++
++int clk_enable(struct clk *clk)
++{
++ return 0;
++}
++EXPORT_SYMBOL(clk_enable);
++
++void clk_disable(struct clk *clk)
++{
++}
++EXPORT_SYMBOL(clk_disable);
++
++void clk_put(struct clk *clk)
++{
++}
++EXPORT_SYMBOL(clk_put);
++
++unsigned long clk_get_rate(struct clk *clk)
++{
++ return MCF_CLK;
++}
++EXPORT_SYMBOL(clk_get_rate);
++/***************************************************************************/
+--- /dev/null
++++ b/arch/m68k/coldfire/common/entry.S
+@@ -0,0 +1,745 @@
++/*
++ * arch/m68k/coldfire/entry.S
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Matt Waddel Matt.Waddel@freescale.com
++ * Kurt Mahan kmahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * Based on:
++ *
++ * arch/m68knommu/platform/5307/entry.S &
++ * arch/m68k/kernel/entry.S
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file README.legal in the main directory of this archive
++ * for more details.
++ */
++
++#include <linux/sys.h>
++#include <linux/linkage.h>
++#include <asm/cf_entry.h>
++#include <asm/errno.h>
++#include <asm/setup.h>
++#include <asm/segment.h>
++#include <asm/traps.h>
++#include <asm/unistd.h>
++#include <asm/asm-offsets.h>
++
++/*
++ * TASK_INFO:
++ *
++ * - TINFO_PREEMPT (struct thread_info / preempt_count)
++ * Used to keep track of preemptability
++ * - TINFO_FLAGS (struct thread_info / flags - include/asm-m68k/thread_info.h)
++ * Various bit flags that are checked for scheduling/tracing
++ * Bits 0-7 are checked every exception exit
++ * 8-15 are checked every syscall exit
++ *
++ * TIF_SIGPENDING 6
++ * TIF_NEED_RESCHED 7
++ * TIF_DELAYED_TRACE 14
++ * TIF_SYSCALL_TRACE 15
++ * TIF_MEMDIE 16 (never checked here)
++ */
++
++.bss
++
++sw_ksp:
++.long 0
++
++sw_usp:
++.long 0
++
++.text
++
++.globl system_call
++.globl buserr
++.globl trap
++.globl resume
++.globl ret_from_exception
++.globl ret_from_signal
++.globl sys_call_table
++.globl ret_from_interrupt
++.globl inthandler
++
++ENTRY(buserr)
++#ifdef CONFIG_COLDFIRE_FOO
++ movew #0x2700,%sr /* lock interrupts */
++#endif
++ SAVE_ALL_INT
++#ifdef CONFIG_COLDFIRE_FOO
++ movew PT_0FF_SR(%sp),%d3 /* get original %sr */
++ oril #0x2000,%d3 /* set supervisor mode in it */
++ movew %d3,%sr /* recover irq state */
++#endif
++ GET_CURRENT(%d0)
++ movel %sp,%sp@- /* stack frame pointer argument */
++ jsr buserr_c
++ addql #4,%sp
++ jra .Lret_from_exception
++
++ENTRY(trap)
++ SAVE_ALL_INT
++ GET_CURRENT(%d0)
++ movel %sp,%sp@- /* stack frame pointer argument */
++ jsr trap_c
++ addql #4,%sp
++ jra .Lret_from_exception
++
++ /* After a fork we jump here directly from resume,
++ %d1 contains the previous task schedule_tail */
++ENTRY(ret_from_fork)
++ movel %d1,%sp@-
++ jsr schedule_tail
++ addql #4,%sp
++ jra .Lret_from_exception
++
++do_trace_entry:
++ movel #-ENOSYS,%d1 /* needed for strace */
++ movel %d1,%sp@(PT_OFF_D0)
++ subql #4,%sp
++ SAVE_SWITCH_STACK
++ jbsr syscall_trace
++ RESTORE_SWITCH_STACK
++ addql #4,%sp
++ movel %sp@(PT_OFF_ORIG_D0),%d0
++ cmpl #NR_syscalls,%d0
++ jcs syscall
++badsys:
++ movel #-ENOSYS,%d1
++ movel %d1,%sp@(PT_OFF_D0)
++ jra ret_from_exception
++
++do_trace_exit:
++ subql #4,%sp
++ SAVE_SWITCH_STACK
++ jbsr syscall_trace
++ RESTORE_SWITCH_STACK
++ addql #4,%sp
++ jra .Lret_from_exception
++
++ENTRY(ret_from_signal)
++ RESTORE_SWITCH_STACK
++ addql #4,%sp
++ jra .Lret_from_exception
++
++ENTRY(system_call)
++ SAVE_ALL_SYS
++
++ GET_CURRENT(%d1)
++ /* save top of frame */
++ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
++
++ /* syscall trace */
++ tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
++ jmi do_trace_entry /* SYSCALL_TRACE is set */
++ cmpl #NR_syscalls,%d0
++ jcc badsys
++syscall:
++ movel #sys_call_table,%a0
++ asll #2,%d0
++ addl %d0,%a0
++ movel %a0@,%a0
++ jsr %a0@
++ movel %d0,%sp@(PT_OFF_D0) /* save the return value */
++ret_from_syscall:
++ movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
++ jne syscall_exit_work /* flags set so process */
++1: RESTORE_ALL
++
++syscall_exit_work:
++ btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
++ bnes 1b /* if so, skip resched, signals */
++
++ btstl #15,%d0 /* check if SYSCALL_TRACE */
++ jne do_trace_exit
++ btstl #14,%d0 /* check if DELAYED_TRACE */
++ jne do_delayed_trace
++ btstl #6,%d0 /* check if SIGPENDING */
++ jne do_signal_return
++ pea resume_userspace
++ jra schedule
++
++ENTRY(ret_from_exception)
++.Lret_from_exception:
++ btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
++ bnes 1f /* if so, skip resched, signals */
++ movel %d0,%sp@- /* Only allow interrupts when we are */
++ move %sr,%d0 /* last one on the kernel stack, */
++ andl #ALLOWINT,%d0 /* otherwise stack overflow can occur */
++ move %d0,%sr /* during heavy interrupt load. */
++ movel %sp@+,%d0
++
++resume_userspace:
++ moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
++ jne exit_work /* SIGPENDING and/or NEED_RESCHED set */
++1: RESTORE_ALL
++
++exit_work:
++ /* save top of frame */
++ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
++ btstl #6,%d0 /* check for SIGPENDING in flags */
++ jne do_signal_return
++ pea resume_userspace
++ jra schedule
++
++do_signal_return:
++ subql #4,%sp /* dummy return address */
++ SAVE_SWITCH_STACK
++ pea %sp@(SWITCH_STACK_SIZE)
++ clrl %sp@-
++ bsrl do_signal
++ addql #8,%sp
++ RESTORE_SWITCH_STACK
++ addql #4,%sp
++ jbra resume_userspace
++
++do_delayed_trace:
++ bclr #7,%sp@(PT_OFF_SR) /* clear trace bit in SR */
++ pea 1 /* send SIGTRAP */
++ movel %curptr,%sp@-
++ pea LSIGTRAP
++ jbsr send_sig
++ addql #8,%sp
++ addql #4,%sp
++ jbra resume_userspace
++
++/*
++ * This is the interrupt handler (for all hardware interrupt
++ * sources). It figures out the vector number and calls the appropriate
++ * interrupt service routine directly.
++ */
++ENTRY(inthandler)
++ SAVE_ALL_INT
++ GET_CURRENT(%d0)
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ addil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++ /* put exception # in d0 */
++ movel %sp@(PT_VECTOR),%d0
++ swap %d0 /* extract bits 25:18 */
++ lsrl #2,%d0
++ andl #0x0ff,%d0
++
++ movel %sp,%sp@-
++ movel %d0,%sp@- /* put vector # on stack */
++auto_irqhandler_fixup = . + 2
++ jbsr process_int /* process the IRQ */
++ addql #8,%sp /* pop parameters off stack */
++
++ret_from_interrupt:
++
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ subil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++ jeq ret_from_last_interrupt
++2: RESTORE_ALL
++
++ ALIGN
++ret_from_last_interrupt:
++ moveb %sp@(PT_OFF_SR),%d0
++ andl #(~ALLOWINT>>8)&0xff,%d0
++ jne 2b
++
++ /* check if we need to do software interrupts */
++ tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
++ jeq .Lret_from_exception
++ pea ret_from_exception
++ jra do_softirq
++
++ENTRY(user_inthandler)
++ SAVE_ALL_INT
++ GET_CURRENT(%d0)
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ addil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++ /* put exception # in d0 */
++ movel %sp@(PT_VECTOR),%d0
++user_irqvec_fixup = . + 2
++ swap %d0 /* extract bits 25:18 */
++ lsrl #2,%d0
++ andl #0x0ff,%d0
++
++ movel %sp,%sp@-
++ movel %d0,%sp@- /* put vector # on stack */
++user_irqhandler_fixup = . + 2
++ jbsr process_int /* process the IRQ */
++ addql #8,%sp /* pop parameters off stack */
++
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ subil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++ jeq ret_from_last_interrupt
++ RESTORE_ALL
++
++/* Handler for uninitialized and spurious interrupts */
++
++ENTRY(bad_inthandler)
++ SAVE_ALL_INT
++ GET_CURRENT(%d0)
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ addil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++
++ movel %sp,%sp@-
++ jsr handle_badint
++ addql #4,%sp
++
++ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
++ subil #0x10000,%d0
++ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
++ jeq ret_from_last_interrupt
++ RESTORE_ALL
++
++ENTRY(sys_fork)
++ SAVE_SWITCH_STACK
++ pea %sp@(SWITCH_STACK_SIZE)
++ jbsr m68k_fork
++ addql #4,%sp
++ RESTORE_SWITCH_STACK
++ rts
++
++ENTRY(sys_clone)
++ SAVE_SWITCH_STACK
++ pea %sp@(SWITCH_STACK_SIZE)
++ jbsr m68k_clone
++ addql #4,%sp
++ RESTORE_SWITCH_STACK
++ rts
++
++ENTRY(sys_vfork)
++ SAVE_SWITCH_STACK
++ pea %sp@(SWITCH_STACK_SIZE)
++ jbsr m68k_vfork
++ addql #4,%sp
++ RESTORE_SWITCH_STACK
++ rts
++
++ENTRY(sys_sigsuspend)
++ SAVE_SWITCH_STACK
++ pea %sp@(SWITCH_STACK_SIZE)
++ jbsr do_sigsuspend
++ addql #4,%sp
++ RESTORE_SWITCH_STACK
++ rts
++
++ENTRY(sys_sigreturn)
++ SAVE_SWITCH_STACK
++ jbsr do_sigreturn
++ RESTORE_SWITCH_STACK
++ rts
++
++ENTRY(sys_rt_sigreturn)
++ SAVE_SWITCH_STACK
++ jbsr do_rt_sigreturn
++ RESTORE_SWITCH_STACK
++ rts
++
++resume:
++ /*
++ * Beware - when entering resume, prev (the current task) is
++ * in a0, next (the new task) is in a1,so don't change these
++ * registers until their contents are no longer needed.
++ */
++
++ /* save sr */
++ movew %sr,%d0
++ movew %d0,%a0@(TASK_THREAD+THREAD_SR)
++
++ /* save usp */
++ /* Save USP via %a1 (which is saved/restored from %d0) */
++ movel %a1,%d0
++ movel %usp,%a1
++ movel %a1,%a0@(TASK_THREAD+THREAD_USP)
++ movel %d0,%a1
++
++ /* save non-scratch registers on stack */
++ SAVE_SWITCH_STACK
++
++ /* save current kernel stack pointer */
++ movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
++#ifdef CONFIG_FPU
++ /* save floating point context */
++ fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
++
++1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
++ jeq 3f
++2:
++ fmovemd %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
++3:
++#endif
++ /* Return previous task in %d1 */
++ movel %curptr,%d1
++
++ /* switch to new task (a1 contains new task) */
++ movel %a1,%curptr
++#ifdef CONFIG_FPU
++ /* restore floating point context */
++1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
++ jeq 3f
++2:
++ fmovemd %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
++/* frestore %a1@(TASK_THREAD+THREAD_FPCNTL)*/
++3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
++#endif
++ /* restore the kernel stack pointer */
++ movel %a1@(TASK_THREAD+THREAD_KSP),%sp
++
++ /* restore non-scratch registers */
++ RESTORE_SWITCH_STACK
++
++ /* restore user stack pointer */
++ movel %a1@(TASK_THREAD+THREAD_USP),%a0
++ movel %a0,%usp
++
++ /* restore status register */
++ movew %a1@(TASK_THREAD+THREAD_SR),%d0
++ movew %d0,%sr
++
++ rts
++
++.data
++ALIGN
++sys_call_table:
++ .long sys_ni_syscall /* 0 - old "setup()" system call*/
++ .long sys_exit
++ .long sys_fork
++ .long sys_read
++ .long sys_write
++ .long sys_open /* 5 */
++ .long sys_close
++ .long sys_waitpid
++ .long sys_creat
++ .long sys_link
++ .long sys_unlink /* 10 */
++ .long sys_execve
++ .long sys_chdir
++ .long sys_time
++ .long sys_mknod
++ .long sys_chmod /* 15 */
++ .long sys_chown16
++ .long sys_ni_syscall /* old break syscall holder */
++ .long sys_stat
++ .long sys_lseek
++ .long sys_getpid /* 20 */
++ .long sys_mount
++ .long sys_oldumount
++ .long sys_setuid16
++ .long sys_getuid16
++ .long sys_stime /* 25 */
++ .long sys_ptrace
++ .long sys_alarm
++ .long sys_fstat
++ .long sys_pause
++ .long sys_utime /* 30 */
++ .long sys_ni_syscall /* old stty syscall holder */
++ .long sys_ni_syscall /* old gtty syscall holder */
++ .long sys_access
++ .long sys_nice
++ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
++ .long sys_sync
++ .long sys_kill
++ .long sys_rename
++ .long sys_mkdir
++ .long sys_rmdir /* 40 */
++ .long sys_dup
++ .long sys_pipe
++ .long sys_times
++ .long sys_ni_syscall /* old prof syscall holder */
++ .long sys_brk /* 45 */
++ .long sys_setgid16
++ .long sys_getgid16
++ .long sys_signal
++ .long sys_geteuid16
++ .long sys_getegid16 /* 50 */
++ .long sys_acct
++ .long sys_umount /* recycled never used phys() */
++ .long sys_ni_syscall /* old lock syscall holder */
++ .long sys_ioctl
++ .long sys_fcntl /* 55 */
++ .long sys_ni_syscall /* old mpx syscall holder */
++ .long sys_setpgid
++ .long sys_ni_syscall /* old ulimit syscall holder */
++ .long sys_ni_syscall
++ .long sys_umask /* 60 */
++ .long sys_chroot
++ .long sys_ustat
++ .long sys_dup2
++ .long sys_getppid
++ .long sys_getpgrp /* 65 */
++ .long sys_setsid
++ .long sys_sigaction
++ .long sys_sgetmask
++ .long sys_ssetmask
++ .long sys_setreuid16 /* 70 */
++ .long sys_setregid16
++ .long sys_sigsuspend
++ .long sys_sigpending
++ .long sys_sethostname
++ .long sys_setrlimit /* 75 */
++ .long sys_old_getrlimit
++ .long sys_getrusage
++ .long sys_gettimeofday
++ .long sys_settimeofday
++ .long sys_getgroups16 /* 80 */
++ .long sys_setgroups16
++ .long old_select
++ .long sys_symlink
++ .long sys_lstat
++ .long sys_readlink /* 85 */
++ .long sys_uselib
++ .long sys_swapon
++ .long sys_reboot
++ .long sys_old_readdir
++ .long old_mmap /* 90 */
++ .long sys_munmap
++ .long sys_truncate
++ .long sys_ftruncate
++ .long sys_fchmod
++ .long sys_fchown16 /* 95 */
++ .long sys_getpriority
++ .long sys_setpriority
++ .long sys_ni_syscall /* old profil syscall holder */
++ .long sys_statfs
++ .long sys_fstatfs /* 100 */
++ .long sys_ni_syscall /* ioperm for i386 */
++ .long sys_socketcall
++ .long sys_syslog
++ .long sys_setitimer
++ .long sys_getitimer /* 105 */
++ .long sys_newstat
++ .long sys_newlstat
++ .long sys_newfstat
++ .long sys_ni_syscall
++ .long sys_ni_syscall /* 110 */ /* iopl for i386 */
++ .long sys_vhangup
++ .long sys_ni_syscall /* obsolete idle() syscall */
++ .long sys_ni_syscall /* vm86old for i386 */
++ .long sys_wait4
++ .long sys_swapoff /* 115 */
++ .long sys_sysinfo
++ .long sys_ipc
++ .long sys_fsync
++ .long sys_sigreturn
++ .long sys_clone /* 120 */
++ .long sys_setdomainname
++ .long sys_newuname
++ .long sys_cacheflush /* modify_ldt for i386 */
++ .long sys_adjtimex
++ .long sys_mprotect /* 125 */
++ .long sys_sigprocmask
++ .long sys_ni_syscall /* old "create_module" */
++ .long sys_init_module
++ .long sys_delete_module
++ .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
++ .long sys_quotactl
++ .long sys_getpgid
++ .long sys_fchdir
++ .long sys_bdflush
++ .long sys_sysfs /* 135 */
++ .long sys_personality
++ .long sys_ni_syscall /* for afs_syscall */
++ .long sys_setfsuid16
++ .long sys_setfsgid16
++ .long sys_llseek /* 140 */
++ .long sys_getdents
++ .long sys_select
++ .long sys_flock
++ .long sys_msync
++ .long sys_readv /* 145 */
++ .long sys_writev
++ .long sys_getsid
++ .long sys_fdatasync
++ .long sys_sysctl
++ .long sys_mlock /* 150 */
++ .long sys_munlock
++ .long sys_mlockall
++ .long sys_munlockall
++ .long sys_sched_setparam
++ .long sys_sched_getparam /* 155 */
++ .long sys_sched_setscheduler
++ .long sys_sched_getscheduler
++ .long sys_sched_yield
++ .long sys_sched_get_priority_max
++ .long sys_sched_get_priority_min /* 160 */
++ .long sys_sched_rr_get_interval
++ .long sys_nanosleep
++ .long sys_mremap
++ .long sys_setresuid16
++ .long sys_getresuid16 /* 165 */
++ .long sys_getpagesize
++ .long sys_ni_syscall /* old sys_query_module */
++ .long sys_poll
++ .long sys_nfsservctl
++ .long sys_setresgid16 /* 170 */
++ .long sys_getresgid16
++ .long sys_prctl
++ .long sys_rt_sigreturn
++ .long sys_rt_sigaction
++ .long sys_rt_sigprocmask /* 175 */
++ .long sys_rt_sigpending
++ .long sys_rt_sigtimedwait
++ .long sys_rt_sigqueueinfo
++ .long sys_rt_sigsuspend
++ .long sys_pread64 /* 180 */
++ .long sys_pwrite64
++ .long sys_lchown16;
++ .long sys_getcwd
++ .long sys_capget
++ .long sys_capset /* 185 */
++ .long sys_sigaltstack
++ .long sys_sendfile
++ .long sys_ni_syscall /* streams1 */
++ .long sys_ni_syscall /* streams2 */
++ .long sys_vfork /* 190 */
++ .long sys_getrlimit
++ .long sys_mmap2
++ .long sys_truncate64
++ .long sys_ftruncate64
++ .long sys_stat64 /* 195 */
++ .long sys_lstat64
++ .long sys_fstat64
++ .long sys_chown
++ .long sys_getuid
++ .long sys_getgid /* 200 */
++ .long sys_geteuid
++ .long sys_getegid
++ .long sys_setreuid
++ .long sys_setregid
++ .long sys_getgroups /* 205 */
++ .long sys_setgroups
++ .long sys_fchown
++ .long sys_setresuid
++ .long sys_getresuid
++ .long sys_setresgid /* 210 */
++ .long sys_getresgid
++ .long sys_lchown
++ .long sys_setuid
++ .long sys_setgid
++ .long sys_setfsuid /* 215 */
++ .long sys_setfsgid
++ .long sys_pivot_root
++ .long sys_ni_syscall
++ .long sys_ni_syscall
++ .long sys_getdents64 /* 220 */
++ .long sys_gettid
++ .long sys_tkill
++ .long sys_setxattr
++ .long sys_lsetxattr
++ .long sys_fsetxattr /* 225 */
++ .long sys_getxattr
++ .long sys_lgetxattr
++ .long sys_fgetxattr
++ .long sys_listxattr
++ .long sys_llistxattr /* 230 */
++ .long sys_flistxattr
++ .long sys_removexattr
++ .long sys_lremovexattr
++ .long sys_fremovexattr
++ .long sys_futex /* 235 */
++ .long sys_sendfile64
++ .long sys_mincore
++ .long sys_madvise
++ .long sys_fcntl64
++ .long sys_readahead /* 240 */
++ .long sys_io_setup
++ .long sys_io_destroy
++ .long sys_io_getevents
++ .long sys_io_submit
++ .long sys_io_cancel /* 245 */
++ .long sys_fadvise64
++ .long sys_exit_group
++ .long sys_lookup_dcookie
++ .long sys_epoll_create
++ .long sys_epoll_ctl /* 250 */
++ .long sys_epoll_wait
++ .long sys_remap_file_pages
++ .long sys_set_tid_address
++ .long sys_timer_create
++ .long sys_timer_settime /* 255 */
++ .long sys_timer_gettime
++ .long sys_timer_getoverrun
++ .long sys_timer_delete
++ .long sys_clock_settime
++ .long sys_clock_gettime /* 260 */
++ .long sys_clock_getres
++ .long sys_clock_nanosleep
++ .long sys_statfs64
++ .long sys_fstatfs64
++ .long sys_tgkill /* 265 */
++ .long sys_utimes
++ .long sys_fadvise64_64
++ .long sys_mbind
++ .long sys_get_mempolicy
++ .long sys_set_mempolicy /* 270 */
++ .long sys_mq_open
++ .long sys_mq_unlink
++ .long sys_mq_timedsend
++ .long sys_mq_timedreceive
++ .long sys_mq_notify /* 275 */
++ .long sys_mq_getsetattr
++ .long sys_waitid
++ .long sys_ni_syscall /* for sys_vserver */
++ .long sys_add_key
++ .long sys_request_key /* 280 */
++ .long sys_keyctl
++ .long sys_ioprio_set
++ .long sys_ioprio_get
++ .long sys_inotify_init
++ .long sys_inotify_add_watch /* 285 */
++ .long sys_inotify_rm_watch
++ .long sys_migrate_pages
++ .long sys_openat
++ .long sys_mkdirat
++ .long sys_mknodat /* 290 */
++ .long sys_fchownat
++ .long sys_futimesat
++ .long sys_fstatat64
++ .long sys_unlinkat
++ .long sys_renameat /* 295 */
++ .long sys_linkat
++ .long sys_symlinkat
++ .long sys_readlinkat
++ .long sys_fchmodat
++ .long sys_faccessat /* 300 */
++ .long sys_ni_syscall /* Reserved for pselect6 */
++ .long sys_ni_syscall /* Reserved for ppoll */
++ .long sys_unshare
++ .long sys_set_robust_list
++ .long sys_get_robust_list /* 305 */
++ .long sys_splice
++ .long sys_sync_file_range
++ .long sys_tee
++ .long sys_vmsplice
++ .long sys_move_pages /* 310 */
++ .long sys_sched_setaffinity
++ .long sys_sched_getaffinity
++ .long sys_kexec_load
++ .long sys_getcpu
++ .long sys_epoll_pwait /* 315 */
++ .long sys_utimensat
++ .long sys_signalfd
++ .long sys_timerfd_create
++ .long sys_eventfd
++ .long sys_fallocate /* 320 */
++ .long sys_timerfd_settime
++ .long sys_timerfd_gettime
++ .long sys_signalfd4
++ .long sys_eventfd2
++ .long sys_epoll_create1 /* 325 */
++ .long sys_dup3
++ .long sys_pipe2
++ .long sys_inotify_init1
++ .long sys_preadv
++ .long sys_pwritev /* 330 */
++ .long sys_rt_tgsigqueueinfo
++ .long sys_perf_event_open
++ .long sys_get_thread_area
++ .long sys_set_thread_area
++ .long sys_atomic_cmpxchg_32 /* 335 */
++ .long sys_atomic_barrier
++ .long sys_fanotify_init
++ .long sys_fanotify_mark
++ .long sys_prlimit64
+--- /dev/null
++++ b/arch/m68k/coldfire/common/head.S
+@@ -0,0 +1,466 @@
++/*
++ * head.S is the MMU enabled ColdFire specific initial boot code
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Matt Waddel Matt.Waddel@freescale.com
++ * Kurt Mahan kmahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * Parts of this code came from arch/m68k/kernel/head.S
++ */
++#include <linux/linkage.h>
++#include <linux/init.h>
++#include <asm/bootinfo.h>
++#include <asm/setup.h>
++#include <asm/entry.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/coldfire.h>
++#include <asm/cfcache.h>
++
++#define DEBUG
++
++.globl kernel_pg_dir
++.globl availmem
++.globl set_context
++.globl set_fpga
++
++#ifdef DEBUG
++/* When debugging use readable names for labels */
++#ifdef __STDC__
++#define L(name) .head.S.##name
++#else
++#define L(name) .head.S./**/name
++#endif
++#else
++#ifdef __STDC__
++#define L(name) .L##name
++#else
++#define L(name) .L/**/name
++#endif
++#endif
++
++/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
++#ifndef __INITDATA
++#define __INITDATA .data
++#define __FINIT .previous
++#endif
++
++/*
++ * Kernel mapped to physical ram address.
++ *
++ * M5445x:
++ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
++ * Data[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - cached
++ * Code[0]: Not Mapped
++ * Code[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - cached
++ *
++ * M547x/M548x
++ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
++ * Data[1]: 0x00000000 -> 0x0FFFFFFF SDRAM - uncached
++ * Code[0]: Not Mapped
++ * Code[1]: 0x00000000 -> 0x0FFFFFFF SDRAM - cached
++ *
++ * M5441X:
++ * Data[0]: 0xE0000000 -> 0xFFFFFFFF System regs
++ * Data[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - cached
++ * Code[0]: Not Mapped
++ * Code[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - cached
++ */
++#if defined(CONFIG_M5445X)
++#define ACR0_DEFAULT #0xF00FA048 /* System Regs uncached/precise */
++#define ACR1_DEFAULT #0x400FA028 /* SDRAM cached/copyback */
++#define ACR2_DEFAULT #0x00000000 /* Not mapped */
++#define ACR3_DEFAULT #0x400FA028 /* SDRAM cached/copyback */
++#elif defined(CONFIG_M547X_8X)
++#define ACR0_DEFAULT #0xF00FA048 /* System Regs */
++#define ACR1_DEFAULT #0x000FA028 /* SDRAM cached/copy-back */
++#define ACR2_DEFAULT #0x00000000 /* Not mapped */
++#define ACR3_DEFAULT #0x000FA028 /* Instruction cached/copy-back */
++#elif defined(CONFIG_M5441X)
++#define ACR0_DEFAULT #0xE01FA048 /* System Regs */
++#define ACR1_DEFAULT #0x400FA028 /* SDRAM cached/copyback */
++#define ACR4_DEFAULT #0x00000000 /* Not mapped */
++#define ACR5_DEFAULT #0x00000000 /* Not mapped */
++#define ACR2_DEFAULT #0x00000000 /* Not mapped */
++#define ACR3_DEFAULT #0x400FA028 /* Instruction cached/copy-back */
++#define ACR6_DEFAULT #0x00000000 /* Not mapped */
++#define ACR7_DEFAULT #0x00000000 /* Not mapped */
++#endif
++
++/* ACR mapping for FPGA (maps 0) */
++#define ACR0_FPGA #0x000FA048 /* ACR0 enable FPGA */
++
++/* Several macros to make the writing of subroutines easier:
++ * - func_start marks the beginning of the routine which setups the frame
++ * register and saves the registers, it also defines another macro
++ * to automatically restore the registers again.
++ * - func_return marks the end of the routine and simply calls the prepared
++ * macro to restore registers and jump back to the caller.
++ * - func_define generates another macro to automatically put arguments
++ * onto the stack call the subroutine and cleanup the stack again.
++ */
++
++.macro load_symbol_address symbol,register
++ movel #\symbol,\register
++.endm
++
++.macro func_start name,saveregs,savesize,stack=0
++L(\name):
++ linkw %a6,#-\stack
++ subal #(\savesize),%sp
++ moveml \saveregs,%sp@
++.set stackstart,-\stack
++
++.macro func_return_\name
++ moveml %sp@,\saveregs
++ addal #(\savesize),%sp
++ unlk %a6
++ rts
++.endm
++.endm
++
++.macro func_return name
++ func_return_\name
++.endm
++
++.macro func_call name
++ jbsr L(\name)
++.endm
++
++.macro move_stack nr,arg1,arg2,arg3,arg4
++.if \nr
++ move_stack "(\nr-1)",\arg2,\arg3,\arg4
++ movel \arg1,%sp@-
++.endif
++.endm
++
++.macro func_define name,nr=0
++.macro \name arg1,arg2,arg3,arg4
++ move_stack \nr,\arg1,\arg2,\arg3,\arg4
++ func_call \name
++.if \nr
++ lea %sp@(\nr*4),%sp
++.endif
++.endm
++.endm
++
++func_define serial_putc,1
++
++.macro putc ch
++ pea \ch
++ func_call serial_putc
++ addql #4,%sp
++.endm
++
++.macro dputc ch
++#ifdef DEBUG
++ putc \ch
++#endif
++.endm
++
++func_define putn,1
++
++.macro dputn nr
++#ifdef DEBUG
++ putn \nr
++#endif
++.endm
++
++/*
++ mmu_map - creates a new TLB entry
++
++ virt_addr Must be on proper boundary
++ phys_addr Must be on proper boundary
++ itlb MMUOR_ITLB if instruction TLB or 0
++ asid address space ID
++ shared_global MMUTR_SG if shared between different ASIDs or 0
++ size_code MMUDR_SZ1M 1 MB
++ MMUDR_SZ4K 4 KB
++ MMUDR_SZ8K 8 KB
++ MMUDR_SZ16M 16 MB
++ cache_mode MMUDR_INC instruction non-cacheable
++ MMUDR_IC instruction cacheable
++ MMUDR_DWT data writethrough
++ MMUDR_DCB data copyback
++ MMUDR_DNCP data non-cacheable, precise
++ MMUDR_DNCIP data non-cacheable, imprecise
++ super_prot MMUDR_SP if user mode generates exception or 0
++ readable MMUDR_R if permits read access (data TLB) or 0
++ writable MMUDR_W if permits write access (data TLB) or 0
++ executable MMUDR_X if permits execute access (instruction TLB) or 0
++ locked MMUDR_LK prevents TLB entry from being replaced or 0
++ temp_data_reg a data register to use for temporary values
++*/
++.macro mmu_map virt_addr,phys_addr,itlb,asid,shared_global,size_code, \
++ cache_mode,super_prot,readable,writable,executable,locked,temp_data_reg
++ /* Set up search of TLB. */
++ movel #(\virt_addr+1), \temp_data_reg
++ movel \temp_data_reg, MMUAR
++ /* Search. */
++ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
++ movew \temp_data_reg, (MMUOR)
++ /* Set up tag value. */
++ movel #(\virt_addr + \asid + \shared_global + MMUTR_V), \temp_data_reg
++ movel \temp_data_reg, MMUTR
++ /* Set up data value. */
++ movel #(\phys_addr + \size_code + \cache_mode + \super_prot + \
++ \readable + \writable + \executable + \locked), \temp_data_reg
++ movel \temp_data_reg, MMUDR
++ /* Save it. */
++ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
++ movew \temp_data_reg, (MMUOR)
++.endm /* mmu_map */
++
++.macro mmu_unmap virt_addr,itlb,temp_data_reg
++ /* Set up search of TLB. */
++ movel #(\virt_addr+1), \temp_data_reg
++ movel \temp_data_reg, MMUAR
++ /* Search. */
++ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
++ movew \temp_data_reg, (MMUOR)
++ /* Test for hit. */
++ movel MMUSR,\temp_data_reg
++ btst #MMUSR_HITN,\temp_data_reg
++ beq 1f
++ /* Read the TLB. */
++ movel #(MMUOR_RW + MMUOR_ACC +\itlb), \temp_data_reg
++ movew \temp_data_reg, (MMUOR)
++ movel MMUSR,\temp_data_reg
++ /* Set up tag value. */
++ movel #0, \temp_data_reg
++ movel \temp_data_reg, MMUTR
++ /* Set up data value. */
++ movel #0, \temp_data_reg
++ movel \temp_data_reg, MMUDR
++ /* Save it. */
++ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
++ movew \temp_data_reg, (MMUOR)
++1:
++.endm /* mmu_unmap */
++
++/* .text */
++.section ".text.head","ax"
++ENTRY(_stext)
++/* Version numbers of the bootinfo interface -- if we later pass info
++ * from boot ROM we might want to put something real here.
++ *
++ * The area from _stext to _start will later be used as kernel pointer table
++ */
++ bras 1f /* Jump over bootinfo version numbers */
++
++ .long BOOTINFOV_MAGIC
++ .long 0
++1: jmp __start
++
++.equ kernel_pg_dir,_stext
++.equ .,_stext+0x1000
++
++ENTRY(_start)
++ jra __start
++__INIT
++ENTRY(__start)
++/* Save the location of u-boot info - cmd line, bd_info, etc. */
++ movel %a7,%a4 /* Don't use %a4 before cf_early_init */
++ addl #0x00000004,%a4 /* offset past top */
++ addl #(PAGE_OFFSET-CONFIG_SDRAM_BASE),%a4 /* high mem offset */
++
++/* Setup initial stack pointer */
++ movel #CONFIG_SDRAM_BASE+0x1000,%sp
++
++/* Setup usp */
++ subl %a0,%a0
++ movel %a0,%usp
++
++#if defined(CONFIG_M5445X) || defined(CONFIG_M5441X)
++#if defined(CONFIG_SRAM)
++ movel #(CONFIG_SRAM_BASE+0x221), %d0
++#else
++ movel #0x80000000, %d0
++#endif
++
++#ifdef CONFIG_M5441X
++ movec %d0, %rambar
++#else
++ movec %d0, %rambar1
++#endif
++#elif defined(CONFIG_M547X_8X)
++ movel #MCF_MBAR, %d0
++ movec %d0, %mbar
++ move.l #(MCF_RAMBAR0 + 0x21), %d0
++ movec %d0, %rambar0
++ move.l #(MCF_RAMBAR1 + 0x21), %d0
++ movec %d0, %rambar1
++#endif
++
++ movew #0x2700,%sr
++
++/* reset cache */
++ movel #(CF_CACR_ICINVA + CF_CACR_DCINVA),%d0
++ movecl %d0,%cacr
++
++ movel #(MMU_BASE+1),%d0
++ movecl %d0,%mmubar
++ movel #MMUOR_CA,%a0 /* Clear tlb entries */
++ movew %a0,(MMUOR)
++ movel #(MMUOR_CA + MMUOR_ITLB),%a0 /* Use ITLB for searches */
++ movew %a0,(MMUOR)
++ movel #0,%a0 /* Clear Addr Space User ID */
++ movecl %a0,%asid
++
++/* setup ACRs */
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X) || defined(CONFIG_M5441X)
++ movel ACR0_DEFAULT, %d0 /* ACR0 (DATA) setup */
++ movec %d0, %acr0
++ nop
++ movel ACR1_DEFAULT, %d0 /* ACR1 (DATA) setup */
++ movec %d0, %acr1
++ nop
++ movel ACR2_DEFAULT, %d0 /* ACR2 (CODE) setup */
++ movec %d0, %acr2
++ nop
++ movel ACR3_DEFAULT, %d0 /* ACR3 (CODE) setup */
++ movec %d0, %acr3
++ nop
++#endif
++ /* Turn on MMU */
++ movel #(MMUCR_EN),%a0
++ movel %a0,MMUCR
++ nop /* This synchs the pipeline after a write to MMUCR */
++
++ movel #__running_high,%a0 /* Get around PC-relative addressing. */
++ jmp %a0@
++
++ENTRY(__running_high)
++ load_symbol_address _stext,%sp
++ movel L(memory_start),%a0
++ movel %a0,availmem
++ load_symbol_address L(phys_kernel_start),%a0
++ load_symbol_address _stext,%a1
++ subl #_stext,%a1
++ addl #PAGE_OFFSET,%a1
++ movel %a1,%a0@
++
++/* zero bss */
++ lea _sbss,%a0
++ lea _ebss,%a1
++ clrl %d0
++_loop_bss:
++ movel %d0,(%a0)+
++ cmpl %a0,%a1
++ bne _loop_bss
++
++/* create dma memory mirror TLB mapping */
++#if defined(CONFIG_M5445X) || defined(CONFIG_M5441X)
++ mmu_map CONFIG_DMA_BASE, \
++ CONFIG_SDRAM_BASE, 0, 0, \
++ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
++ 0, MMUDR_LK, %d0
++#elif defined(CONFIG_M547X_8X)
++ mmu_map (CONFIG_DMA_BASE + 0*1024*1024), \
++ (CONFIG_SDRAM_BASE + 0*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 1*1024*1024), \
++ (CONFIG_SDRAM_BASE + 1*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 2*1024*1024), \
++ (CONFIG_SDRAM_BASE + 2*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 3*1024*1024), \
++ (CONFIG_SDRAM_BASE + 3*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 4*1024*1024), \
++ (CONFIG_SDRAM_BASE + 4*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 5*1024*1024), \
++ (CONFIG_SDRAM_BASE + 5*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 6*1024*1024), \
++ (CONFIG_SDRAM_BASE + 6*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++ mmu_map (CONFIG_DMA_BASE + 7*1024*1024), \
++ (CONFIG_SDRAM_BASE + 7*1024*1024), 0, 0, \
++ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
++ MMUDR_W, 0, MMUDR_LK, %d0
++#endif
++
++/* Setup initial stack pointer */
++ lea init_task,%a2
++ lea init_thread_union+THREAD_SIZE,%sp
++ subl %a6,%a6 /* clear a6 for gdb */
++
++#ifdef CONFIG_MCF_USER_HALT
++/* Setup debug control reg to allow halts from user space */
++ lea wdbg_uhe,%a0
++ wdebug (%a0)
++#endif
++
++ movel %a4,uboot_info_stk /* save uboot info to variable */
++ jsr cf_early_init
++ jmp start_kernel
++
++.section ".text.head","ax"
++set_context:
++func_start set_context,%d0,(1*4)
++ movel 12(%sp),%d0
++ movec %d0,%asid
++func_return set_context
++
++#ifdef CONFIG_M54455
++/*
++ * set_fpga(addr,val) on the M5445X
++ *
++ * Map in 0x00000000 -> 0x0fffffff and then do the write.
++ */
++set_fpga:
++ movew %sr,%d1
++ movew #0x2700,%sr
++ movel ACR0_FPGA, %d0
++ movec %d0, %acr0
++ nop
++ moveal 4(%sp),%a0
++ movel 8(%sp),%a0@
++ movel ACR0_DEFAULT, %d0
++ movec %d0, %acr0
++ nop
++ movew %d1,%sr
++ rts
++#endif
++
++ .data
++ .align 4
++
++availmem:
++ .long 0
++L(phys_kernel_start):
++ .long PAGE_OFFSET
++L(kernel_end):
++ .long 0
++L(memory_start):
++ .long PAGE_OFFSET_RAW
++
++#ifdef CONFIG_MCF_USER_HALT
++/*
++ * Enable User Halt Enable in the debug control register.
++ */
++wdbg_uhe:
++ .word 0x2c80 /* DR0 */
++ .word 0x00b0 /* 31:16 */
++ .word 0x0400 /* 15:0 -- enable UHE */
++ .word 0x0000 /* unused */
++#endif
++
++
+--- /dev/null
++++ b/arch/m68k/coldfire/common/ints.c
+@@ -0,0 +1,544 @@
++/*
++ * linux/arch/m68k/coldfire/ints.c -- General interrupt handling code
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Matt Waddel Matt.Waddel@freescale.com
++ * Kurt Mahan kmahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * Based on:
++ * linux/arch/m68k/kernel/ints.c &
++ * linux/arch/m68knommu/5307/ints.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/errno.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++
++#include <asm/system.h>
++#include <asm/irq.h>
++#include <asm/traps.h>
++#include <asm/page.h>
++#include <asm/machdep.h>
++#include <asm/irq_regs.h>
++
++#include <asm/mcfsim.h>
++
++/*
++ * IRQ Handler lists.
++ */
++static struct irq_node *irq_list[SYS_IRQS];
++static struct irq_controller *irq_controller[SYS_IRQS];
++static int irq_depth[SYS_IRQS];
++
++#define POOL_SIZE SYS_IRQS
++static struct irq_node pool[POOL_SIZE];
++static struct irq_node *get_irq_node(void);
++
++/* The number of spurious interrupts */
++unsigned int num_spurious;
++asmlinkage void handle_badint(struct pt_regs *regs);
++
++/*
++ * process_int(unsigned long vec, struct pt_regs *fp)
++ *
++ * Process an interrupt. Called from entry.S.
++ */
++asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
++{
++ struct pt_regs *old_regs;
++ struct irq_node *node;
++ old_regs = set_irq_regs(fp);
++ kstat_cpu(0).irqs[vec]++;
++
++ node = irq_list[vec];
++ if (!node)
++ handle_badint(fp);
++ else {
++ do {
++ node->handler(vec, node->dev_id);
++ node = node->next;
++ } while (node);
++ }
++
++ set_irq_regs(old_regs);
++}
++
++/*
++ * show_interrupts( struct seq_file *p, void *v)
++ *
++ * Called to show all the current interrupt information.
++ */
++int show_interrupts(struct seq_file *p, void *v)
++{
++ struct irq_controller *contr;
++ struct irq_node *node;
++ int i = *(loff_t *) v;
++
++ if ((i < NR_IRQS) && (irq_list[i])) {
++ contr = irq_controller[i];
++ node = irq_list[i];
++ seq_printf(p, "%-8s %3u: %10u %s", contr->name, i,
++ kstat_cpu(0).irqs[i], node->devname);
++ while ((node = node->next))
++ seq_printf(p, ", %s", node->devname);
++
++ seq_printf(p, "\n");
++ }
++
++ return 0;
++}
++
++/*
++ * get_irq_node(void)
++ *
++ * Get an irq node from the pool.
++ */
++struct irq_node *get_irq_node(void)
++{
++ struct irq_node *p = pool;
++ int i;
++
++ for (i = 0; i < POOL_SIZE; i++, p++) {
++ if (!p->handler) {
++ memset(p, 0, sizeof(struct irq_node));
++ return p;
++ }
++ }
++ printk(KERN_INFO "%s(%s:%d): No more irq nodes, I suggest you \
++ increase POOL_SIZE", __func__, __FILE__, __LINE__);
++ return NULL;
++}
++
++void init_irq_proc(void)
++{
++ /* Insert /proc/irq driver here */
++}
++
++int setup_irq(unsigned int irq, struct irq_node *node)
++{
++ struct irq_controller *contr;
++ struct irq_node **prev;
++ unsigned long flags;
++
++ if (irq >= NR_IRQS || !irq_controller[irq]) {
++ printk("%s: Incorrect IRQ %d from %s\n",
++ __func__, irq, node->devname);
++ return -ENXIO;
++ }
++
++ contr = irq_controller[irq];
++ spin_lock_irqsave(&contr->lock, flags);
++
++ prev = irq_list + irq;
++ if (*prev) {
++ /* Can't share interrupts unless both agree to */
++ if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
++ spin_unlock_irqrestore(&contr->lock, flags);
++ printk(KERN_INFO "%s: -BUSY-Incorrect IRQ %d\n",
++ __func__, irq);
++ return -EBUSY;
++ }
++ while (*prev)
++ prev = &(*prev)->next;
++ }
++
++ if (!irq_list[irq]) {
++ if (contr->startup)
++ contr->startup(irq);
++ else
++ contr->enable(irq);
++ }
++ node->next = NULL;
++ *prev = node;
++
++ spin_unlock_irqrestore(&contr->lock, flags);
++
++ return 0;
++}
++
++int request_irq(unsigned int irq,
++ irq_handler_t handler,
++ unsigned long flags, const char *devname, void *dev_id)
++{
++ struct irq_node *node = get_irq_node();
++ int res;
++
++ if (!node) {
++ printk(KERN_INFO "%s:get_irq_node error %x\n",
++ __func__, (unsigned int) node);
++ return -ENOMEM;
++ }
++ node->handler = handler;
++ node->flags = flags;
++ node->dev_id = dev_id;
++ node->devname = devname;
++
++ res = setup_irq(irq, node);
++ if (res)
++ node->handler = NULL;
++
++ return res;
++}
++EXPORT_SYMBOL(request_irq);
++
++void free_irq(unsigned int irq, void *dev_id)
++{
++ struct irq_controller *contr;
++ struct irq_node **p, *node;
++ unsigned long flags;
++
++ if (irq >= NR_IRQS || !irq_controller[irq]) {
++ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __func__, irq);
++ return;
++ }
++
++ contr = irq_controller[irq];
++ spin_lock_irqsave(&contr->lock, flags);
++
++ p = irq_list + irq;
++ while ((node = *p)) {
++ if (node->dev_id == dev_id)
++ break;
++ p = &node->next;
++ }
++
++ if (node) {
++ *p = node->next;
++ node->handler = NULL;
++ } else
++ printk(KERN_DEBUG "%s: Removing probably wrong IRQ %d\n",
++ __func__, irq);
++
++ if (!irq_list[irq]) {
++ if (contr->shutdown)
++ contr->shutdown(irq);
++ else
++ contr->disable(irq);
++ }
++
++ spin_unlock_irqrestore(&contr->lock, flags);
++}
++EXPORT_SYMBOL(free_irq);
++
++void enable_irq(unsigned int irq)
++{
++ struct irq_controller *contr;
++ unsigned long flags;
++
++ if (irq >= NR_IRQS || !irq_controller[irq]) {
++ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __func__, irq);
++ return;
++ }
++
++ contr = irq_controller[irq];
++ spin_lock_irqsave(&contr->lock, flags);
++ if (irq_depth[irq]) {
++ if (!--irq_depth[irq]) {
++ if (contr->enable)
++ contr->enable(irq);
++ }
++ } else
++ WARN_ON(1);
++ spin_unlock_irqrestore(&contr->lock, flags);
++}
++EXPORT_SYMBOL(enable_irq);
++
++void disable_irq(unsigned int irq)
++{
++ struct irq_controller *contr;
++ unsigned long flags;
++
++ if (irq >= NR_IRQS || !irq_controller[irq]) {
++ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __func__, irq);
++ return;
++ }
++
++ contr = irq_controller[irq];
++ spin_lock_irqsave(&contr->lock, flags);
++ if (!irq_depth[irq]++) {
++ if (contr->disable)
++ contr->disable(irq);
++ }
++ spin_unlock_irqrestore(&contr->lock, flags);
++}
++EXPORT_SYMBOL(disable_irq);
++
++void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
++EXPORT_SYMBOL(disable_irq_nosync);
++
++
++unsigned long probe_irq_on(void)
++{
++ return 0;
++}
++EXPORT_SYMBOL(probe_irq_on);
++
++int probe_irq_off(unsigned long irqs)
++{
++ return 0;
++}
++EXPORT_SYMBOL(probe_irq_off);
++
++asmlinkage void handle_badint(struct pt_regs *regs)
++{
++ kstat_cpu(0).irqs[0]++;
++ num_spurious++;
++ printk(KERN_DEBUG "unexpected interrupt from %u\n", regs->vector);
++}
++EXPORT_SYMBOL(handle_badint);
++
++unsigned int irq_canonicalize(unsigned int irq)
++{
++#ifdef CONFIG_Q40
++ if (MACH_IS_Q40 && irq == 11)
++ irq = 10;
++#endif
++ return irq;
++}
++EXPORT_SYMBOL(irq_canonicalize);
++
++#ifdef CONFIG_M5445X
++/*
++ * M5445X Implementation
++ */
++void m5445x_irq_enable(unsigned int irq)
++{
++ /* enable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* enable eport */
++ MCF_EPORT_EPPAR &= ~(3 << (irq*2)); /* level */
++ MCF_EPORT_EPDDR &= ~(1 << irq); /* input */
++ MCF_EPORT_EPIER |= 1 << irq; /* irq enabled */
++ }
++
++ if (irq < 64) {
++ /* controller 0 */
++ MCF_INTC0_ICR(irq) = 0x02;
++ MCF_INTC0_CIMR = irq;
++ } else {
++ /* controller 1 */
++ irq -= 64;
++ MCF_INTC1_ICR(irq) = 0x02;
++ MCF_INTC1_CIMR = irq;
++ }
++}
++
++void m5445x_irq_disable(unsigned int irq)
++{
++ /* disable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* disable eport */
++ MCF_EPORT_EPIER &= ~(1 << irq);
++ }
++
++ if (irq < 64) {
++ /* controller 0 */
++ MCF_INTC0_ICR(irq) = 0x00;
++ MCF_INTC0_SIMR = irq;
++ } else {
++ /* controller 1 */
++ irq -= 64;
++ MCF_INTC1_ICR(irq) = 0x00;
++ MCF_INTC1_SIMR = irq;
++ }
++}
++#elif defined(CONFIG_M547X_8X)
++/*
++ * M547X_8X Implementation
++ */
++void m547x_8x_irq_enable(unsigned int irq)
++{
++ /* enable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* enable eport */
++ MCF_EPPAR &= ~(3 << (irq*2));
++ /* level */
++ MCF_EPDDR &= ~(1 << irq);
++ /* input */
++ MCF_EPIER |= 1 << irq;
++ /* irq enabled */
++ }
++
++ if (irq < 32) {
++ /* *grumble* don't set low bit of IMRL */
++ MCF_IMRL &= (~(1 << irq) & 0xfffffffe);
++ } else {
++ MCF_IMRH &= ~(1 << (irq - 32));
++ }
++}
++
++void m547x_8x_irq_disable(unsigned int irq)
++{
++ /* disable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* disable eport */
++ MCF_EPIER &= ~(1 << irq);
++ }
++
++ if (irq < 32)
++ MCF_IMRL |= (1 << irq);
++ else
++ MCF_IMRH |= (1 << (irq - 32));
++}
++
++#elif defined(CONFIG_M5441X)
++/*
++ * M5441X Implementation
++ */
++void m5441x_irq_enable(unsigned int irq)
++{
++ /* enable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* enable eport */
++ MCF_EPORT_EPPAR &= ~(3 << (irq*2)); /* level */
++ MCF_EPORT_EPIER |= 1 << irq; /* irq enabled */
++ }
++
++ if (irq < 64) {
++ /* controller 0 */
++ MCF_INTC0_ICR(irq) = 0x02;
++ MCF_INTC0_CIMR = irq;
++ } else if (irq >= 64 && irq < 128) {
++ /* controller 1 */
++ irq -= 64;
++ MCF_INTC1_ICR(irq) = 0x02;
++ MCF_INTC1_CIMR = irq;
++ } else if (irq >= 128 && irq < 192) {
++ /* controller 2 */
++ irq -= 128;
++ MCF_INTC2_ICR(irq) = 0x02;
++ MCF_INTC2_CIMR = irq;
++ } else {
++ /* invalid irq number */
++ return;
++ }
++}
++
++void m5441x_irq_disable(unsigned int irq)
++{
++ /* disable the interrupt hardware */
++ if (irq < 64)
++ return;
++
++ /* adjust past non-hardware ints */
++ irq -= 64;
++
++ /* check for eport */
++ if ((irq > 0) && (irq < 8)) {
++ /* disable eport */
++ MCF_EPORT_EPIER &= ~(1 << irq);
++ }
++
++ if (irq < 64) {
++ /* controller 0 */
++ MCF_INTC0_ICR(irq) = 0x00;
++ MCF_INTC0_SIMR = irq;
++ } else if (irq >= 64 && irq < 128) {
++ /* controller 1 */
++ irq -= 64;
++ MCF_INTC1_ICR(irq) = 0x00;
++ MCF_INTC1_SIMR = irq;
++ } else if (irq >= 128 && irq < 192) {
++ /* controller 2 */
++ irq -= 128;
++ MCF_INTC2_ICR(irq) = 0x00;
++ MCF_INTC2_SIMR = irq;
++ }
++}
++#endif
++
++/*
++ * IRQ Controller
++ */
++#if defined(CONFIG_M5445X)
++static struct irq_controller m5445x_irq_controller = {
++ .name = "M5445X",
++ .lock = __SPIN_LOCK_UNLOCKED(m5445x_irq_controller.lock),
++ .enable = m5445x_irq_enable,
++ .disable = m5445x_irq_disable,
++};
++#elif defined(CONFIG_M547X_8X)
++static struct irq_controller m547x_8x_irq_controller = {
++ .name = "M547X_8X",
++ .lock = __SPIN_LOCK_UNLOCKED(m547x_8x_irq_controller.lock),
++ .enable = m547x_8x_irq_enable,
++ .disable = m547x_8x_irq_disable,
++};
++#elif defined(CONFIG_M5441X)
++static struct irq_controller m5441x_irq_controller = {
++ .name = "M5441X",
++ .lock = __SPIN_LOCK_UNLOCKED(m5441x_irq_controller.lock),
++ .enable = m5441x_irq_enable,
++ .disable = m5441x_irq_disable,
++};
++#else
++# error No IRQ controller defined
++#endif
++
++/*
++ * void init_IRQ(void)
++ *
++ * This function should be called during kernel startup to initialize
++ * the IRQ handling routines.
++ */
++void __init init_IRQ(void)
++{
++ int i;
++
++#if defined(CONFIG_M5445X)
++ for (i = 0; i < SYS_IRQS; i++)
++ irq_controller[i] = &m5445x_irq_controller;
++#elif defined(CONFIG_M547X_8X)
++ for (i = 0; i < SYS_IRQS; i++)
++ irq_controller[i] = &m547x_8x_irq_controller;
++#elif defined(CONFIG_M5441X)
++ for (i = 0; i < SYS_IRQS; i++)
++ irq_controller[i] = &m5441x_irq_controller;
++#endif
++}
+--- /dev/null
++++ b/arch/m68k/coldfire/common/muldi3.S
+@@ -0,0 +1,73 @@
++/*
++ * Coldfire muldi3 assembly verion
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/linkage.h>
++.globl __muldi3
++
++ENTRY(__muldi3)
++ linkw %fp,#0
++ lea %sp@(-32),%sp
++ moveml %d2-%d7/%a2-%a3,%sp@
++ moveal %fp@(8), %a2
++ moveal %fp@(12), %a3
++ moveal %fp@(16), %a0
++ moveal %fp@(20),%a1
++ movel %a3,%d2
++ andil #65535,%d2
++ movel %a3,%d3
++ clrw %d3
++ swap %d3
++ movel %a1,%d0
++ andil #65535,%d0
++ movel %a1,%d1
++ clrw %d1
++ swap %d1
++ movel %d2,%d7
++ mulsl %d0,%d7
++ movel %d2,%d4
++ mulsl %d1,%d4
++ movel %d3,%d2
++ mulsl %d0,%d2
++ mulsl %d1,%d3
++ movel %d7,%d0
++ clrw %d0
++ swap %d0
++ addl %d0,%d4
++ addl %d2,%d4
++ cmpl %d4,%d2
++ blss 1f
++ addil #65536,%d3
++1:
++ movel %d4,%d0
++ clrw %d0
++ swap %d0
++ movel %d3,%d5
++ addl %d0,%d5
++ movew %d4,%d6
++ swap %d6
++ movew %d7,%d6
++ movel %d5,%d0
++ movel %d6,%d1
++ movel %a3,%d2
++ movel %a0,%d3
++ mulsl %d3,%d2
++ movel %a2,%d3
++ movel %a1,%d4
++ mulsl %d4,%d3
++ addl %d3,%d2
++ movel %d2,%d0
++ addl %d5,%d0
++ moveml %sp@, %d2-%d7/%a2-%a3
++ lea %sp@(32),%sp
++ unlk %fp
++ rts
+--- /dev/null
++++ b/arch/m68k/coldfire/common/signal.c
+@@ -0,0 +1,991 @@
++/*
++ * linux/arch/m68k/kernel/signal.c
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Matt Waddel Matt.Waddel@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ * Derived from m68k/kernel/signal.c and the original authors are credited
++ * there.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/kernel.h>
++#include <linux/signal.h>
++#include <linux/syscalls.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/ptrace.h>
++#include <linux/unistd.h>
++#include <linux/stddef.h>
++#include <linux/highuid.h>
++#include <linux/personality.h>
++#include <linux/tty.h>
++#include <linux/binfmts.h>
++
++#include <asm/setup.h>
++#include <asm/uaccess.h>
++#include <asm/cf_pgtable.h>
++#include <asm/traps.h>
++#include <asm/ucontext.h>
++#include <asm/cacheflush.h>
++
++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
++
++asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
++
++const int frame_extra_sizes[16] = {
++ [1] = -1,
++ [2] = -1,
++ [3] = -1,
++ [4] = 0,
++ [5] = 1,
++ [6] = 1,
++ [7] = 2,
++ [8] = 3,
++ [9] = -1,
++ [10] = -1,
++ [11] = -1,
++ [12] = -1,
++ [13] = -1,
++ [14] = -1,
++ [15] = -1,
++};
++
++/*
++ * Atomically swap in the new signal mask, and wait for a signal.
++ */
++asmlinkage int do_sigsuspend(struct pt_regs *regs)
++{
++ old_sigset_t mask = regs->d3;
++ sigset_t saveset;
++
++ mask &= _BLOCKABLE;
++ spin_lock_irq(&current->sighand->siglock);
++ saveset = current->blocked;
++ siginitset(&current->blocked, mask);
++ recalc_sigpending();
++ spin_unlock_irq(&current->sighand->siglock);
++
++ regs->d0 = -EINTR;
++ while (1) {
++ current->state = TASK_INTERRUPTIBLE;
++ schedule();
++ if (do_signal(&saveset, regs))
++ return -EINTR;
++ }
++}
++
++asmlinkage int
++do_rt_sigsuspend(struct pt_regs *regs)
++{
++ sigset_t __user *unewset = (sigset_t __user *)regs->d1;
++ size_t sigsetsize = (size_t)regs->d2;
++ sigset_t saveset, newset;
++
++ /* XXX: Don't preclude handling different sized sigset_t's. */
++ if (sigsetsize != sizeof(sigset_t))
++ return -EINVAL;
++
++ if (copy_from_user(&newset, unewset, sizeof(newset)))
++ return -EFAULT;
++ sigdelsetmask(&newset, ~_BLOCKABLE);
++
++ spin_lock_irq(&current->sighand->siglock);
++ saveset = current->blocked;
++ current->blocked = newset;
++ recalc_sigpending();
++ spin_unlock_irq(&current->sighand->siglock);
++
++ regs->d0 = -EINTR;
++ while (1) {
++ current->state = TASK_INTERRUPTIBLE;
++ schedule();
++ if (do_signal(&saveset, regs))
++ return -EINTR;
++ }
++}
++
++asmlinkage int
++sys_sigaction(int sig, const struct old_sigaction __user *act,
++ struct old_sigaction __user *oact)
++{
++ struct k_sigaction new_ka, old_ka;
++ int ret;
++
++ if (act) {
++ old_sigset_t mask;
++ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
++ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
++ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
++ return -EFAULT;
++ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
++ __get_user(mask, &act->sa_mask);
++ siginitset(&new_ka.sa.sa_mask, mask);
++ }
++
++ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++
++ if (!ret && oact) {
++ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
++ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
++ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
++ return -EFAULT;
++ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
++ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
++ }
++
++ return ret;
++}
++
++asmlinkage int
++sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
++{
++ return do_sigaltstack(uss, uoss, rdusp());
++}
++
++
++/*
++ * Do a signal return; undo the signal stack.
++ *
++ * Keep the return code on the stack quadword aligned!
++ * That makes the cache flush below easier.
++ */
++
++struct sigframe {
++ char __user *pretcode;
++ int sig;
++ int code;
++ struct sigcontext __user *psc;
++ char retcode[8];
++ unsigned long extramask[_NSIG_WORDS-1];
++ struct sigcontext sc;
++};
++
++struct rt_sigframe {
++ char __user *pretcode;
++ int sig;
++ struct siginfo __user *pinfo;
++ void __user *puc;
++ char retcode[8];
++ struct siginfo info;
++ struct ucontext uc;
++};
++
++#define FPCONTEXT_SIZE 216
++#define uc_fpstate uc_filler[0]
++#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
++#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
++
++#ifdef CONFIG_FPU
++static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
++
++static inline int restore_fpu_state(struct sigcontext *sc)
++{
++ int err = 1;
++
++ if (FPU_IS_EMU) {
++ /* restore registers */
++ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
++ memcpy(current->thread.fp, sc->sc_fpregs, 24);
++ return 0;
++ }
++
++ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
++ /* Verify the frame format. */
++ if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
++ goto out;
++ if (CPU_IS_020_OR_030) {
++ if (m68k_fputype & FPU_68881 &&
++ !(sc->sc_fpstate[1] == 0x18 ||
++ sc->sc_fpstate[1] == 0xb4))
++ goto out;
++ if (m68k_fputype & FPU_68882 &&
++ !(sc->sc_fpstate[1] == 0x38 ||
++ sc->sc_fpstate[1] == 0xd4))
++ goto out;
++ } else if (CPU_IS_040) {
++ if (!(sc->sc_fpstate[1] == 0x00 ||
++ sc->sc_fpstate[1] == 0x28 ||
++ sc->sc_fpstate[1] == 0x60))
++ goto out;
++ } else if (CPU_IS_060) {
++ if (!(sc->sc_fpstate[3] == 0x00 ||
++ sc->sc_fpstate[3] == 0x60 ||
++ sc->sc_fpstate[3] == 0xe0))
++ goto out;
++ } else if (CPU_IS_CFV4E) {
++ pr_debug("restore v4e fpu state at %s\n", __func__);
++ } else
++ goto out;
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fmovem %0,%/fp0-%/fp1\n\t"
++ QCHIP_RESTORE_DIRECTIVE
++ : /* no outputs */
++ : "m" (sc->sc_fpregs[0][0])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpcr"
++ : : "m" (sc->sc_fpcntl[0])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpsr"
++ : : "m" (sc->sc_fpcntl[1])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpiar"
++ : : "m" (sc->sc_fpcntl[2])
++ : "memory");
++
++#endif
++ }
++
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("frestore %0\n\t"
++ QCHIP_RESTORE_DIRECTIVE : : "m" (*sc->sc_fpstate));
++#endif
++ err = 0;
++
++out:
++ return err;
++}
++
++static inline int rt_restore_fpu_state(struct ucontext __user *uc)
++{
++ unsigned char fpstate[FPCONTEXT_SIZE];
++ int context_size = CPU_IS_060 ? 8 : 0;
++ fpregset_t fpregs;
++ int err = 1;
++
++ if (FPU_IS_EMU) {
++ /* restore fpu control register */
++ if (__copy_from_user(current->thread.fpcntl,
++ uc->uc_mcontext.fpregs.f_fpcntl, 12))
++ goto out;
++ /* restore all other fpu register */
++ if (__copy_from_user(current->thread.fp,
++ uc->uc_mcontext.fpregs.f_fpregs, 96))
++ goto out;
++ return 0;
++ }
++
++ if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
++ goto out;
++ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
++ if (!CPU_IS_060)
++ context_size = fpstate[1];
++ /* Verify the frame format. */
++ if (!CPU_IS_060 && (fpstate[0] != fpu_version))
++ goto out;
++ if (CPU_IS_020_OR_030) {
++ if (m68k_fputype & FPU_68881 &&
++ !(context_size == 0x18 || context_size == 0xb4))
++ goto out;
++ if (m68k_fputype & FPU_68882 &&
++ !(context_size == 0x38 || context_size == 0xd4))
++ goto out;
++ } else if (CPU_IS_040) {
++ if (!(context_size == 0x00 ||
++ context_size == 0x28 ||
++ context_size == 0x60))
++ goto out;
++ } else if (CPU_IS_060) {
++ if (!(fpstate[3] == 0x00 ||
++ fpstate[3] == 0x60 ||
++ fpstate[3] == 0xe0))
++ goto out;
++ } else if (CPU_IS_CFV4E) {
++ pr_debug("restore coldfire rt v4e fpu"
++ " state at %s\n", __func__);
++ } else
++ goto out;
++ if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
++ sizeof(fpregs)))
++ goto out;
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fmovem %0,%/fp0-%/fp7\n\t"
++ QCHIP_RESTORE_DIRECTIVE
++ : /* no outputs */
++ : "m" (fpregs.f_fpregs[0][0])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpcr"
++ : : "m" (fpregs.f_fpcntl[0])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpsr"
++ : : "m" (fpregs.f_fpcntl[1])
++ : "memory");
++ __asm__ volatile ("fmovel %0,%/fpiar"
++ : : "m" (fpregs.f_fpcntl[2])
++ : "memory");
++#endif
++ }
++ if (context_size &&
++ __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
++ context_size))
++ goto out;
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("frestore %0\n\t"
++ QCHIP_RESTORE_DIRECTIVE : : "m" (*fpstate));
++#endif
++ err = 0;
++
++out:
++ return err;
++}
++#endif
++
++static inline int
++restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
++ void __user *fp, int *pd0)
++{
++ int fsize, formatvec;
++ struct sigcontext context;
++ int err = 0;
++
++ /* get previous context */
++ if (copy_from_user(&context, usc, sizeof(context)))
++ goto badframe;
++
++ /* restore passed registers */
++ regs->d1 = context.sc_d1;
++ regs->a0 = context.sc_a0;
++ regs->a1 = context.sc_a1;
++ regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
++ regs->pc = context.sc_pc;
++ regs->orig_d0 = -1; /* disable syscall checks */
++ wrusp(context.sc_usp);
++ formatvec = context.sc_formatvec;
++ regs->format = formatvec >> 12;
++ regs->vector = formatvec & 0xfff;
++
++#ifdef CONFIG_FPU
++ err = restore_fpu_state(&context);
++#endif
++
++ fsize = frame_extra_sizes[regs->format];
++ if (fsize < 0) {
++ /*
++ * user process trying to return with weird frame format
++ */
++#ifdef DEBUG
++ printk(KERN_DEBUG "user process returning with weird \
++ frame format\n");
++#endif
++ goto badframe;
++ }
++
++ /* OK. Make room on the supervisor stack for the extra junk,
++ * if necessary.
++ */
++
++ {
++ struct switch_stack *sw = (struct switch_stack *)regs - 1;
++ regs->d0 = context.sc_d0;
++#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
++ __asm__ __volatile__
++ (" movel %0,%/sp\n\t"
++ " bra ret_from_signal\n"
++ "4:\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 2b,4b\n"
++ ".previous"
++ : /* no outputs, it doesn't ever return */
++ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
++ "n" (frame_offset), "a" (fp)
++ : "a0");
++#undef frame_offset
++ /*
++ * If we ever get here an exception occurred while
++ * building the above stack-frame.
++ */
++ goto badframe;
++ }
++
++ *pd0 = context.sc_d0;
++ return err;
++
++badframe:
++ return 1;
++}
++
++static inline int
++rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
++ struct ucontext __user *uc, int *pd0)
++{
++ int fsize, temp;
++ greg_t __user *gregs = uc->uc_mcontext.gregs;
++ unsigned long usp;
++ int err;
++
++ err = __get_user(temp, &uc->uc_mcontext.version);
++ if (temp != MCONTEXT_VERSION)
++ goto badframe;
++ /* restore passed registers */
++ err |= __get_user(regs->d0, &gregs[0]);
++ err |= __get_user(regs->d1, &gregs[1]);
++ err |= __get_user(regs->d2, &gregs[2]);
++ err |= __get_user(regs->d3, &gregs[3]);
++ err |= __get_user(regs->d4, &gregs[4]);
++ err |= __get_user(regs->d5, &gregs[5]);
++ err |= __get_user(sw->d6, &gregs[6]);
++ err |= __get_user(sw->d7, &gregs[7]);
++ err |= __get_user(regs->a0, &gregs[8]);
++ err |= __get_user(regs->a1, &gregs[9]);
++ err |= __get_user(regs->a2, &gregs[10]);
++ err |= __get_user(sw->a3, &gregs[11]);
++ err |= __get_user(sw->a4, &gregs[12]);
++ err |= __get_user(sw->a5, &gregs[13]);
++ err |= __get_user(sw->a6, &gregs[14]);
++ err |= __get_user(usp, &gregs[15]);
++ wrusp(usp);
++ err |= __get_user(regs->pc, &gregs[16]);
++ err |= __get_user(temp, &gregs[17]);
++ regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
++ regs->orig_d0 = -1; /* disable syscall checks */
++ err |= __get_user(temp, &uc->uc_formatvec);
++ regs->format = temp >> 12;
++ regs->vector = temp & 0xfff;
++
++#ifdef CONFIG_FPU
++ err |= rt_restore_fpu_state(uc);
++#endif
++
++ if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
++ goto badframe;
++
++ fsize = frame_extra_sizes[regs->format];
++ if (fsize < 0) {
++ /*
++ * user process trying to return with weird frame format
++ */
++#ifdef DEBUG
++ printk(KERN_DEBUG "user process returning with weird \
++ frame format\n");
++#endif
++ goto badframe;
++ }
++
++ /* OK. Make room on the supervisor stack for the extra junk,
++ * if necessary.
++ */
++
++ {
++#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
++ __asm__ __volatile__
++ (" movel %0,%/sp\n\t"
++ " bra ret_from_signal\n"
++ "4:\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 2b,4b\n"
++ ".previous"
++ : /* no outputs, it doesn't ever return */
++ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
++ "n" (frame_offset), "a" (&uc->uc_extra)
++ : "a0");
++#undef frame_offset
++ /*
++ * If we ever get here an exception occurred while
++ * building the above stack-frame.
++ */
++ goto badframe;
++ }
++
++ *pd0 = regs->d0;
++ return err;
++
++badframe:
++ return 1;
++}
++
++asmlinkage int do_sigreturn(unsigned long __unused)
++{
++ struct switch_stack *sw = (struct switch_stack *) &__unused;
++ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
++ unsigned long usp = rdusp();
++ struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
++ sigset_t set;
++ int d0;
++
++ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++ goto badframe;
++ if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
++ (_NSIG_WORDS > 1 &&
++ __copy_from_user(&set.sig[1], &frame->extramask,
++ sizeof(frame->extramask))))
++ goto badframe;
++
++ sigdelsetmask(&set, ~_BLOCKABLE);
++ spin_lock_irq(&current->sighand->siglock);
++ current->blocked = set;
++ recalc_sigpending();
++ spin_unlock_irq(&current->sighand->siglock);
++
++ if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
++ goto badframe;
++ return d0;
++
++badframe:
++ force_sig(SIGSEGV, current);
++ return 0;
++}
++
++asmlinkage int do_rt_sigreturn(unsigned long __unused)
++{
++ struct switch_stack *sw = (struct switch_stack *) &__unused;
++ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
++ unsigned long usp = rdusp();
++ struct rt_sigframe __user *frame =
++ (struct rt_sigframe __user *)(usp - 4);
++ sigset_t set;
++ int d0;
++
++ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++ goto badframe;
++ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
++ goto badframe;
++
++ sigdelsetmask(&set, ~_BLOCKABLE);
++ spin_lock_irq(&current->sighand->siglock);
++ current->blocked = set;
++ recalc_sigpending();
++ spin_unlock_irq(&current->sighand->siglock);
++
++ if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
++ goto badframe;
++ return d0;
++
++badframe:
++ force_sig(SIGSEGV, current);
++ return 0;
++}
++
++#ifdef CONFIG_FPU
++/*
++ * Set up a signal frame.
++ */
++
++static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
++{
++ if (FPU_IS_EMU) {
++ /* save registers */
++ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
++ memcpy(sc->sc_fpregs, current->thread.fp, 24);
++ return;
++ }
++
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fsave %0\n\t"
++ QCHIP_RESTORE_DIRECTIVE
++ : : "m" (*sc->sc_fpstate) : "memory");
++#endif
++
++ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
++ fpu_version = sc->sc_fpstate[0];
++ if (CPU_IS_020_OR_030 &&
++ regs->vector >= (VEC_FPBRUC * 4) &&
++ regs->vector <= (VEC_FPNAN * 4)) {
++ /* Clear pending exception in 68882 idle frame */
++ if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
++ sc->sc_fpstate[0x38] |= 1 << 3;
++ }
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fmovemd %/fp0-%/fp1,%0"
++ : : "m" (sc->sc_fpregs[0][0])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpcr,%0"
++ : : "m" (sc->sc_fpcntl[0])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpsr,%0"
++ : : "m" (sc->sc_fpcntl[1])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpiar,%0"
++ : : "m" (sc->sc_fpcntl[2])
++ : "memory");
++
++#endif
++ }
++}
++
++static inline int rt_save_fpu_state(struct ucontext __user *uc,
++ struct pt_regs *regs)
++{
++ unsigned char fpstate[FPCONTEXT_SIZE];
++ int context_size = CPU_IS_060 ? 8 : 0;
++ int err = 0;
++
++ if (FPU_IS_EMU) {
++ /* save fpu control register */
++ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
++ current->thread.fpcntl, 12);
++ /* save all other fpu register */
++ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
++ current->thread.fp, 96);
++ return err;
++ }
++
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fsave %0\n\t"
++ QCHIP_RESTORE_DIRECTIVE
++ : : "m" (*fpstate) : "memory");
++#endif
++ err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
++ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
++ fpregset_t fpregs;
++ if (!CPU_IS_060)
++ context_size = fpstate[1];
++ fpu_version = fpstate[0];
++#ifdef CONFIG_CFV4E
++ __asm__ volatile ("fmovemd %/fp0-%/fp7,%0"
++ : : "m" (fpregs.f_fpregs[0][0])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpcr,%0"
++ : : "m" (fpregs.f_fpcntl[0])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpsr,%0"
++ : : "m" (fpregs.f_fpcntl[1])
++ : "memory");
++ __asm__ volatile ("fmovel %/fpiar,%0"
++ : : "m" (fpregs.f_fpcntl[2])
++ : "memory");
++#endif
++ err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
++ sizeof(fpregs));
++ }
++ if (context_size)
++ err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
++ context_size);
++ return err;
++
++
++ return err;
++}
++#endif
++
++static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
++ unsigned long mask)
++{
++ sc->sc_mask = mask;
++ sc->sc_usp = rdusp();
++ sc->sc_d0 = regs->d0;
++ sc->sc_d1 = regs->d1;
++ sc->sc_a0 = regs->a0;
++ sc->sc_a1 = regs->a1;
++ sc->sc_sr = regs->sr;
++ sc->sc_pc = regs->pc;
++ sc->sc_formatvec = regs->format << 12 | regs->vector;
++#ifdef CONFIG_FPU
++ save_fpu_state(sc, regs);
++#endif
++}
++
++static inline int rt_setup_ucontext(struct ucontext __user *uc,
++ struct pt_regs *regs)
++{
++ struct switch_stack *sw = (struct switch_stack *)regs - 1;
++ greg_t __user *gregs = uc->uc_mcontext.gregs;
++ int err = 0;
++
++ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
++ err |= __put_user(regs->d0, &gregs[0]);
++ err |= __put_user(regs->d1, &gregs[1]);
++ err |= __put_user(regs->d2, &gregs[2]);
++ err |= __put_user(regs->d3, &gregs[3]);
++ err |= __put_user(regs->d4, &gregs[4]);
++ err |= __put_user(regs->d5, &gregs[5]);
++ err |= __put_user(sw->d6, &gregs[6]);
++ err |= __put_user(sw->d7, &gregs[7]);
++ err |= __put_user(regs->a0, &gregs[8]);
++ err |= __put_user(regs->a1, &gregs[9]);
++ err |= __put_user(regs->a2, &gregs[10]);
++ err |= __put_user(sw->a3, &gregs[11]);
++ err |= __put_user(sw->a4, &gregs[12]);
++ err |= __put_user(sw->a5, &gregs[13]);
++ err |= __put_user(sw->a6, &gregs[14]);
++ err |= __put_user(rdusp(), &gregs[15]);
++ err |= __put_user(regs->pc, &gregs[16]);
++ err |= __put_user(regs->sr, &gregs[17]);
++ err |= __put_user((regs->format << 12) | regs->vector,
++ &uc->uc_formatvec);
++#ifdef CONFIG_FPU
++ err |= rt_save_fpu_state(uc, regs);
++#endif
++ return err;
++}
++
++static inline void push_cache(unsigned long vaddr)
++{
++#if defined(CONFIG_M5445X) || defined(CONFIG_M5441X)
++ pgd_t *pdir;
++ pmd_t *pmdp;
++ pte_t *ptep;
++ unsigned long paddr;
++
++ pdir = pgd_offset(current->mm, vaddr);
++ pmdp = pmd_offset(pdir, vaddr);
++ ptep = pte_offset_map(pmdp, vaddr);
++ paddr = ((pte_val(*ptep) & PAGE_MASK) | (vaddr & ~PAGE_MASK));
++ cf_icache_flush_range(paddr, paddr + 8);
++#elif defined(CONFIG_M547X_8X)
++ flush_icache_range(vaddr, vaddr + 8);
++#endif
++}
++
++static inline void __user *
++get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
++{
++ unsigned long usp;
++
++ /* Default to using normal stack. */
++ usp = rdusp();
++
++ /* This is the X/Open sanctioned signal stack switching. */
++ if (ka->sa.sa_flags & SA_ONSTACK) {
++ if (!sas_ss_flags(usp))
++ usp = current->sas_ss_sp + current->sas_ss_size;
++ }
++ return (void __user *)((usp - frame_size) & -8UL);
++}
++
++static void setup_frame(int sig, struct k_sigaction *ka,
++ sigset_t *set, struct pt_regs *regs)
++{
++ struct sigframe __user *frame;
++ int fsize = frame_extra_sizes[regs->format];
++ struct sigcontext context;
++ int err = 0;
++
++ if (fsize < 0) {
++#ifdef DEBUG
++ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
++ regs->format);
++#endif
++ goto give_sigsegv;
++ }
++
++ frame = get_sigframe(ka, regs, sizeof(*frame));
++
++ err |= __put_user((current_thread_info()->exec_domain
++ && current_thread_info()->exec_domain->signal_invmap
++ && sig < 32
++ ? current_thread_info()->exec_domain->signal_invmap[sig]
++ : sig),
++ &frame->sig);
++
++ err |= __put_user(regs->vector, &frame->code);
++ err |= __put_user(&frame->sc, &frame->psc);
++
++ if (_NSIG_WORDS > 1)
++ err |= copy_to_user(frame->extramask, &set->sig[1],
++ sizeof(frame->extramask));
++
++ setup_sigcontext(&context, regs, set->sig[0]);
++ err |= copy_to_user(&frame->sc, &context, sizeof(context));
++
++ /* Set up to return from userspace. */
++ err |= __put_user(frame->retcode, &frame->pretcode);
++ /* moveq #,d0; trap #0 */
++ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
++ (long __user *)(frame->retcode));
++
++ if (err)
++ goto give_sigsegv;
++
++ push_cache((unsigned long) &frame->retcode);
++
++ /* Set up registers for signal handler */
++ wrusp((unsigned long) frame);
++ regs->pc = (unsigned long) ka->sa.sa_handler;
++
++adjust_stack:
++ /* Prepare to skip over the extra stuff in the exception frame. */
++ if (regs->stkadj) {
++ struct pt_regs *tregs =
++ (struct pt_regs *)((ulong)regs + regs->stkadj);
++#ifdef DEBUG
++ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
++ regs->stkadj);
++#endif
++ /* This must be copied with decreasing addresses to
++ handle overlaps. */
++ tregs->vector = 0;
++ tregs->format = 0;
++ tregs->pc = regs->pc;
++ tregs->sr = regs->sr;
++ }
++ return;
++
++give_sigsegv:
++ force_sigsegv(sig, current);
++ goto adjust_stack;
++}
++
++static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++ sigset_t *set, struct pt_regs *regs)
++{
++ struct rt_sigframe __user *frame;
++ int fsize = frame_extra_sizes[regs->format];
++ int err = 0;
++
++ if (fsize < 0) {
++#ifdef DEBUG
++ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
++ regs->format);
++#endif
++ goto give_sigsegv;
++ }
++
++ frame = get_sigframe(ka, regs, sizeof(*frame));
++
++ if (fsize) {
++ err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
++ regs->stkadj = fsize;
++ }
++
++ err |= __put_user((current_thread_info()->exec_domain
++ && current_thread_info()->exec_domain->signal_invmap
++ && sig < 32
++ ? current_thread_info()->exec_domain->signal_invmap[sig]
++ : sig),
++ &frame->sig);
++ err |= __put_user(&frame->info, &frame->pinfo);
++ err |= __put_user(&frame->uc, &frame->puc);
++ err |= copy_siginfo_to_user(&frame->info, info);
++
++ /* Create the ucontext. */
++ err |= __put_user(0, &frame->uc.uc_flags);
++ err |= __put_user(NULL, &frame->uc.uc_link);
++ err |= __put_user((void __user *)current->sas_ss_sp,
++ &frame->uc.uc_stack.ss_sp);
++ err |= __put_user(sas_ss_flags(rdusp()),
++ &frame->uc.uc_stack.ss_flags);
++ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
++ err |= rt_setup_ucontext(&frame->uc, regs);
++ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
++
++ /* Set up to return from userspace. */
++ err |= __put_user(frame->retcode, &frame->pretcode);
++
++ /* movel #__NR_rt_sigreturn(0xAD),d0; trap #0 */
++ err |= __put_user(0x203c0000, (long *)(frame->retcode + 0));
++ err |= __put_user(0x00ad4e40, (long *)(frame->retcode + 4));
++
++ if (err)
++ goto give_sigsegv;
++
++ push_cache((unsigned long) &frame->retcode);
++
++ /* Set up registers for signal handler */
++ wrusp((unsigned long) frame);
++ regs->pc = (unsigned long) ka->sa.sa_handler;
++
++adjust_stack:
++ /* Prepare to skip over the extra stuff in the exception frame. */
++ if (regs->stkadj) {
++ struct pt_regs *tregs =
++ (struct pt_regs *)((ulong)regs + regs->stkadj);
++#ifdef DEBUG
++ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
++ regs->stkadj);
++#endif
++ /* This must be copied with decreasing addresses to
++ handle overlaps. */
++ tregs->vector = 0;
++ tregs->format = 0;
++ tregs->pc = regs->pc;
++ tregs->sr = regs->sr;
++ }
++ return;
++
++give_sigsegv:
++ force_sigsegv(sig, current);
++ goto adjust_stack;
++}
++
++static inline void
++handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
++{
++ switch (regs->d0) {
++ case -ERESTARTNOHAND:
++ if (!has_handler)
++ goto do_restart;
++ regs->d0 = -EINTR;
++ break;
++
++ case -ERESTARTSYS:
++ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
++ regs->d0 = -EINTR;
++ break;
++ }
++ /* fallthrough */
++ case -ERESTARTNOINTR:
++do_restart:
++ regs->d0 = regs->orig_d0;
++ regs->pc -= 2;
++ break;
++ }
++}
++
++/*
++ * OK, we're invoking a handler
++ */
++static void
++handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
++ sigset_t *oldset, struct pt_regs *regs)
++{
++ /* are we from a system call? */
++ if (regs->orig_d0 >= 0)
++ /* If so, check system call restarting.. */
++ handle_restart(regs, ka, 1);
++
++ /* set up the stack frame */
++ if (ka->sa.sa_flags & SA_SIGINFO)
++ setup_rt_frame(sig, ka, info, oldset, regs);
++ else
++ setup_frame(sig, ka, oldset, regs);
++
++ if (ka->sa.sa_flags & SA_ONESHOT)
++ ka->sa.sa_handler = SIG_DFL;
++
++ spin_lock_irq(&current->sighand->siglock);
++ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
++ if (!(ka->sa.sa_flags & SA_NODEFER))
++ sigaddset(&current->blocked, sig);
++ recalc_sigpending();
++ spin_unlock_irq(&current->sighand->siglock);
++}
++
++/*
++ * Note that 'init' is a special process: it doesn't get signals it doesn't
++ * want to handle. Thus you cannot kill init even with a SIGKILL even by
++ * mistake.
++ */
++asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
++{
++ siginfo_t info;
++ struct k_sigaction ka;
++ int signr;
++
++ current->thread.esp0 = (unsigned long) regs;
++
++ if (!oldset)
++ oldset = &current->blocked;
++
++ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++ if (signr > 0) {
++ /* Whee! Actually deliver the signal. */
++ handle_signal(signr, &ka, &info, oldset, regs);
++ return 1;
++ }
++
++ /* Did we come from a system call? */
++ if (regs->orig_d0 >= 0)
++ /* Restart the system call - no handlers present */
++ handle_restart(regs, NULL, 0);
++
++ return 0;
++}
+--- /dev/null
++++ b/arch/m68k/coldfire/common/traps.c
+@@ -0,0 +1,457 @@
++/*
++ * linux/arch/m68knommu/kernel/traps.c
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++/*
++ * Sets up all exception vectors
++ */
++#include <linux/sched.h>
++#include <linux/signal.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/a.out.h>
++#include <linux/user.h>
++#include <linux/string.h>
++#include <linux/linkage.h>
++#include <linux/init.h>
++#include <linux/ptrace.h>
++#include <linux/kallsyms.h>
++
++#include <asm/setup.h>
++#include <asm/fpu.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/traps.h>
++#include <asm/pgtable.h>
++#include <asm/machdep.h>
++#include <asm/siginfo.h>
++
++static char const * const vec_names[] = {
++ "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
++ "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
++ "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
++ "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
++ "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
++ "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
++ "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
++ "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
++ "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
++ "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
++ "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
++ "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
++ "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
++ "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
++ "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
++ "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
++ "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
++ "FPCP UNSUPPORTED OPERATION",
++ "MMU CONFIGURATION ERROR"
++};
++
++asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
++ unsigned long error_code);
++asmlinkage void trap_c(struct frame *fp);
++extern void __init coldfire_trap_init(void);
++
++void __init trap_init(void)
++{
++ coldfire_trap_init();
++}
++
++/* The following table converts the FS encoding of a ColdFire
++ exception stack frame into the error_code value needed by
++ do_fault. */
++
++static const unsigned char fs_err_code[] = {
++ 0, /* 0000 */
++ 0, /* 0001 */
++ 0, /* 0010 */
++ 0, /* 0011 */
++ 1, /* 0100 */
++ 0, /* 0101 */
++ 0, /* 0110 */
++ 0, /* 0111 */
++ 2, /* 1000 */
++ 3, /* 1001 */
++ 2, /* 1010 */
++ 0, /* 1011 */
++ 1, /* 1100 */
++ 1, /* 1101 */
++ 0, /* 1110 */
++ 0 /* 1111 */
++};
++
++#ifdef DEBUG
++static const char *fs_err_msg[16] = {
++ "Normal",
++ "Reserved",
++ "Interrupt during debug service routine",
++ "Reserved",
++ "X Protection",
++ "TLB X miss (opword)",
++ "TLB X miss (ext. word)",
++ "IFP in emulator mode",
++ "W Protection",
++ "Write error",
++ "TLB W miss",
++ "Reserved",
++ "R Protection",
++ "R/RMW Protection",
++ "TLB R miss",
++ "OEP in emulator mode",
++};
++#endif
++
++static inline void access_errorCF(struct frame *fp)
++{
++ unsigned long int mmusr, complainingAddress;
++ unsigned int err_code, fs;
++ int need_page_fault;
++
++ mmusr = fp->ptregs.mmusr;
++ complainingAddress = fp->ptregs.mmuar;
++#ifdef DEBUG
++ printk(KERN_DEBUG "pc %#lx, mmusr %#lx, complainingAddress %#lx\n", \
++ fp->ptregs.pc, mmusr, complainingAddress);
++#endif
++
++ /*
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ */
++
++ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
++ switch (fs) {
++ case 5: /* 0101 TLB opword X miss */
++ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
++ complainingAddress = fp->ptregs.pc;
++ break;
++ case 6: /* 0110 TLB extension word X miss */
++ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
++ complainingAddress = fp->ptregs.pc + sizeof(long);
++ break;
++ case 10: /* 1010 TLB W miss */
++ need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
++ break;
++ case 14: /* 1110 TLB R miss */
++ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
++ break;
++ default:
++ /* 0000 Normal */
++ /* 0001 Reserved */
++ /* 0010 Interrupt during debug service routine */
++ /* 0011 Reserved */
++ /* 0100 X Protection */
++ /* 0111 IFP in emulator mode */
++ /* 1000 W Protection*/
++ /* 1001 Write error*/
++ /* 1011 Reserved*/
++ /* 1100 R Protection*/
++ /* 1101 R Protection*/
++ /* 1111 OEP in emulator mode*/
++ need_page_fault = 1;
++ break;
++ }
++
++ if (need_page_fault) {
++ err_code = fs_err_code[fs];
++ if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
++ err_code |= 2; /* bit1 - write, bit0 - protection */
++ do_page_fault(&fp->ptregs, complainingAddress, err_code);
++ }
++}
++
++void die_if_kernel(char *str, struct pt_regs *fp, int nr)
++{
++ if (!(fp->sr & PS_S))
++ return;
++
++ console_verbose();
++ printk(KERN_EMERG "%s: %08x\n", str, nr);
++ printk(KERN_EMERG "PC: [<%08lx>]", fp->pc);
++ print_symbol(" %s", fp->pc);
++ printk(KERN_EMERG "\nSR: %04x SP: %p a2: %08lx\n",
++ fp->sr, fp, fp->a2);
++ printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
++ fp->d0, fp->d1, fp->d2, fp->d3);
++ printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
++ fp->d4, fp->d5, fp->a0, fp->a1);
++
++ printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
++ current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
++ show_stack(NULL, (unsigned long *)fp);
++ do_exit(SIGSEGV);
++}
++
++asmlinkage void buserr_c(struct frame *fp)
++{
++ unsigned int fs;
++
++ /* Only set esp0 if coming from user mode */
++ if (user_mode(&fp->ptregs))
++ current->thread.esp0 = (unsigned long) fp;
++
++ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
++#if defined(DEBUG)
++ printk(KERN_DEBUG "*** Bus Error *** (%x)%s\n", fs,
++ fs_err_msg[fs & 0xf]);
++#endif
++ switch (fs) {
++ case 0x5:
++ case 0x6:
++ case 0x7:
++ case 0x9:
++ case 0xa:
++ case 0xd:
++ case 0xe:
++ case 0xf:
++ access_errorCF(fp);
++ break;
++ default:
++ die_if_kernel("bad frame format", &fp->ptregs, 0);
++#if defined(DEBUG)
++ printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
++#endif
++ force_sig(SIGSEGV, current);
++ }
++}
++
++void show_trace(unsigned long *stack)
++{
++ unsigned long *endstack;
++ unsigned long addr;
++ int i;
++
++ printk(KERN_INFO "Call Trace:");
++ addr = (unsigned long)stack + THREAD_SIZE - 1;
++ endstack = (unsigned long *)(addr & -THREAD_SIZE);
++ i = 0;
++ while (stack + 1 <= endstack) {
++ addr = *stack++;
++ /*
++ * If the address is either in the text segment of the
++ * kernel, or in the region which contains vmalloc'ed
++ * memory, it *may* be the address of a calling
++ * routine; if so, print it so that someone tracing
++ * down the cause of the crash will be able to figure
++ * out the call path that was taken.
++ */
++ if (__kernel_text_address(addr)) {
++#ifndef CONFIG_KALLSYMS
++ if (i % 5 == 0)
++ printk("\n ");
++#endif
++ printk(" [<%08lx>] %pS\n", addr, (void *)addr);
++ i++;
++ }
++ }
++ printk(KERN_INFO "\n");
++}
++
++int kstack_depth_to_print = 48;
++void show_stack(struct task_struct *task, unsigned long *stack)
++{
++ unsigned long *p;
++ unsigned long *endstack;
++ int i;
++
++ if (!stack) {
++ if (task)
++ stack = (unsigned long *)task->thread.esp0;
++ else
++ stack = (unsigned long *)&stack;
++ }
++ endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
++ & -THREAD_SIZE);
++
++ printk(KERN_INFO "Stack from %08lx:", (unsigned long)stack);
++ p = stack;
++ for (i = 0; i < kstack_depth_to_print; i++) {
++ if (p + 1 > endstack)
++ break;
++ if (i % 8 == 0)
++ printk("\n ");
++ printk(" %08lx", *p++);
++ }
++ printk("\n");
++ show_trace(stack);
++}
++
++void bad_super_trap(struct frame *fp)
++{
++ console_verbose();
++ if (fp->ptregs.vector < sizeof(vec_names)/sizeof(vec_names[0]))
++ printk(KERN_WARNING "*** %s *** FORMAT=%X\n",
++ vec_names[fp->ptregs.vector],
++ fp->ptregs.format);
++ else
++ printk(KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
++ fp->ptregs.vector,
++ fp->ptregs.format);
++ printk(KERN_WARNING "Current process id is %d\n", current->pid);
++ die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
++}
++
++asmlinkage void trap_c(struct frame *fp)
++{
++ int sig;
++ siginfo_t info;
++
++ if (fp->ptregs.sr & PS_S) {
++ if (fp->ptregs.vector == VEC_TRACE) {
++ /* traced a trapping instruction */
++ current->ptrace |= PT_DTRACE;
++ } else
++ bad_super_trap(fp);
++ return;
++ }
++
++ /* send the appropriate signal to the user program */
++ switch (fp->ptregs.vector) {
++ case VEC_ADDRERR:
++ info.si_code = BUS_ADRALN;
++ sig = SIGBUS;
++ break;
++ case VEC_ILLEGAL:
++ case VEC_LINE10:
++ case VEC_LINE11:
++ info.si_code = ILL_ILLOPC;
++ sig = SIGILL;
++ break;
++ case VEC_PRIV:
++ info.si_code = ILL_PRVOPC;
++ sig = SIGILL;
++ break;
++ case VEC_COPROC:
++ info.si_code = ILL_COPROC;
++ sig = SIGILL;
++ break;
++ case VEC_TRAP1: /* gdbserver breakpoint */
++ fp->ptregs.pc -= 2;
++ info.si_code = TRAP_TRACE;
++ sig = SIGTRAP;
++ break;
++ case VEC_TRAP2:
++ case VEC_TRAP3:
++ case VEC_TRAP4:
++ case VEC_TRAP5:
++ case VEC_TRAP6:
++ case VEC_TRAP7:
++ case VEC_TRAP8:
++ case VEC_TRAP9:
++ case VEC_TRAP10:
++ case VEC_TRAP11:
++ case VEC_TRAP12:
++ case VEC_TRAP13:
++ case VEC_TRAP14:
++ info.si_code = ILL_ILLTRP;
++ sig = SIGILL;
++ break;
++ case VEC_FPBRUC:
++ case VEC_FPOE:
++ case VEC_FPNAN:
++ info.si_code = FPE_FLTINV;
++ sig = SIGFPE;
++ break;
++ case VEC_FPIR:
++ info.si_code = FPE_FLTRES;
++ sig = SIGFPE;
++ break;
++ case VEC_FPDIVZ:
++ info.si_code = FPE_FLTDIV;
++ sig = SIGFPE;
++ break;
++ case VEC_FPUNDER:
++ info.si_code = FPE_FLTUND;
++ sig = SIGFPE;
++ break;
++ case VEC_FPOVER:
++ info.si_code = FPE_FLTOVF;
++ sig = SIGFPE;
++ break;
++ case VEC_ZERODIV:
++ info.si_code = FPE_INTDIV;
++ sig = SIGFPE;
++ break;
++ case VEC_CHK:
++ case VEC_TRAP:
++ info.si_code = FPE_INTOVF;
++ sig = SIGFPE;
++ break;
++ case VEC_TRACE: /* ptrace single step */
++ info.si_code = TRAP_TRACE;
++ sig = SIGTRAP;
++ break;
++ case VEC_TRAP15: /* breakpoint */
++ info.si_code = TRAP_BRKPT;
++ sig = SIGTRAP;
++ break;
++ default:
++ info.si_code = ILL_ILLOPC;
++ sig = SIGILL;
++ break;
++ }
++ info.si_signo = sig;
++ info.si_errno = 0;
++ switch (fp->ptregs.format) {
++ default:
++ info.si_addr = (void *) fp->ptregs.pc;
++ break;
++ case 2:
++ info.si_addr = (void *) fp->un.fmt2.iaddr;
++ break;
++ case 7:
++ info.si_addr = (void *) fp->un.fmt7.effaddr;
++ break;
++ case 9:
++ info.si_addr = (void *) fp->un.fmt9.iaddr;
++ break;
++ case 10:
++ info.si_addr = (void *) fp->un.fmta.daddr;
++ break;
++ case 11:
++ info.si_addr = (void *) fp->un.fmtb.daddr;
++ break;
++ }
++ force_sig_info(sig, &info, current);
++}
++
++asmlinkage void set_esp0(unsigned long ssp)
++{
++ current->thread.esp0 = ssp;
++}
++
++/*
++ * The architecture-independent backtrace generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_stack(current, &stack);
++}
++EXPORT_SYMBOL(dump_stack);
++
++#ifdef CONFIG_M68KFPU_EMU
++asmlinkage void fpemu_signal(int signal, int code, void *addr)
++{
++ siginfo_t info;
++
++ info.si_signo = signal;
++ info.si_errno = 0;
++ info.si_code = code;
++ info.si_addr = addr;
++ force_sig_info(signal, &info, current);
++}
++#endif
+--- a/arch/m68k/include/asm/atomic.h
++++ b/arch/m68k/include/asm/atomic.h
+@@ -1,3 +1,11 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
+ #ifndef __ARCH_M68K_ATOMIC__
+ #define __ARCH_M68K_ATOMIC__
+
+@@ -30,12 +38,20 @@
+
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
++#ifndef CONFIG_COLDFIRE
++ __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI(i));
++#else
++ __asm__ __volatile__("addl %1,%0" : "=m" (*v) : ASM_DI(i), "m" (*v));
++#endif
+ }
+
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
++#ifndef CONFIG_COLDFIRE
++ __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI(i));
++#else
++ __asm__ __volatile__("subl %1,%0" : "=m" (*v) : ASM_DI(i), "m" (*v));
++#endif
+ }
+
+ static inline void atomic_inc(atomic_t *v)
+@@ -55,6 +71,14 @@ static inline int atomic_dec_and_test(at
+ return c != 0;
+ }
+
++static inline int atomic_dec_and_test_lt(volatile atomic_t *v)
++{
++ char c;
++ __asm__ __volatile__("subql #1,%1; slt %0" : "=d" (c), "=m" (*v)
++ : "m" (*v));
++ return c != 0 ;
++}
++
+ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ char c;
+@@ -167,9 +191,14 @@ static inline int atomic_sub_and_test(in
+ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ char c;
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("addl %2,%1; smi %0"
+ : "=d" (c), "+m" (*v)
+ : "id" (i));
++#else
++ __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "=m" (*v)
++ : "d" (i) , "m" (*v));
++#endif
+ return c != 0;
+ }
+
+--- a/arch/m68k/include/asm/bitops_mm.h
++++ b/arch/m68k/include/asm/bitops_mm.h
+@@ -1,12 +1,18 @@
+-#ifndef _M68K_BITOPS_H
+-#define _M68K_BITOPS_H
+ /*
+ * Copyright 1992, Linus Torvalds.
+ *
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
++#ifndef _M68K_BITOPS_H
++#define _M68K_BITOPS_H
++
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_bitops.h>
++#else
+
+ #ifndef _LINUX_BITOPS_H
+ #error only <linux/bitops.h> can be included directly
+@@ -463,4 +469,6 @@ static inline unsigned long generic_find
+
+ #endif /* __KERNEL__ */
+
++#endif /* CONFIG_COLDFIRE */
++
+ #endif /* _M68K_BITOPS_H */
+--- a/arch/m68k/include/asm/bootinfo.h
++++ b/arch/m68k/include/asm/bootinfo.h
+@@ -2,6 +2,7 @@
+ ** asm/bootinfo.h -- Definition of the Linux/m68k boot information structure
+ **
+ ** Copyright 1992 by Greg Harp
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ **
+ ** This file is subject to the terms and conditions of the GNU General Public
+ ** License. See the file COPYING in the main directory of this archive
+@@ -24,6 +25,47 @@
+ #ifndef _M68K_BOOTINFO_H
+ #define _M68K_BOOTINFO_H
+
++#ifndef __ASSEMBLY__
++/*
++ * UBoot Support
++ *
++ * bd_info structure from uboot1.3.2/arch/m68k/include/asm/u-boot.h
++ */
++struct bd_info {
++ unsigned long bi_memstart; /* start of DRAM memory */
++ unsigned long bi_memsize; /* size of DRAM memory in bytes */
++ unsigned long bi_flashstart; /* start of FLASH memory */
++ unsigned long bi_flashsize; /* size of FLASH memory */
++ unsigned long bi_flashoffset; /* reserved area for startup monitor */
++ unsigned long bi_sramstart; /* start of SRAM memory */
++ unsigned long bi_sramsize; /* size of SRAM memory */
++ unsigned long bi_mbar_base; /* base of internal registers */
++ unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
++ unsigned long bi_boot_params; /* where this board expects params */
++ unsigned long bi_ip_addr; /* IP Address */
++ unsigned char bi_enet0addr[6]; /* Ethernet 0 mac address */
++ unsigned short bi_ethspeed; /* Ethernet speed in Mbps */
++ unsigned long bi_intfreq; /* Internal Freq, in MHz */
++ unsigned long bi_busfreq; /* Bus Freq, in MHz */
++#ifdef UBOOT_EXTRA_CLOCK
++ unsigned long bi_inpfreq; /* input Freq in MHz */
++ unsigned long bi_vcofreq; /* vco Freq in MHz */
++ unsigned long bi_flbfreq; /* Flexbus Freq in MHz */
++#endif
++ unsigned long bi_baudrate; /* Console Baudrate */
++ unsigned char bi_enet1addr[6]; /* eth1 mac address */
++ unsigned char bi_enet2addr[6]; /* eth2 mac address */
++ unsigned char bi_enet3addr[6]; /* eth3 mac address */
++};
++
++struct uboot_record {
++ struct bd_info *bdi;
++ unsigned long initrd_start;
++ unsigned long initrd_end;
++ unsigned long cmd_line_start;
++ unsigned long cmd_line_stop;
++};
++#endif /* __ASSEMBLY__ */
+
+ /*
+ * Bootinfo definitions
+--- a/arch/m68k/include/asm/cacheflush_mm.h
++++ b/arch/m68k/include/asm/cacheflush_mm.h
+@@ -1,3 +1,11 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
+ #ifndef _M68K_CACHEFLUSH_H
+ #define _M68K_CACHEFLUSH_H
+
+@@ -6,6 +14,10 @@
+ /* cache code */
+ #define FLUSH_I_AND_D (0x00000808)
+ #define FLUSH_I (0x00000008)
++#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_cacheflush.h>
++#else /* !CONFIG_COLDFIRE */
+
+ /*
+ * Cache handling functions
+@@ -128,7 +140,6 @@ static inline void __flush_page_to_ram(v
+ }
+ }
+
+-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+ #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
+ #define flush_dcache_mmap_lock(mapping) do { } while (0)
+ #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+@@ -154,4 +165,5 @@ static inline void copy_from_user_page(s
+ memcpy(dst, src, len);
+ }
+
++#endif /* !CONFIG_COLDFIRE */
+ #endif /* _M68K_CACHEFLUSH_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf-sram.h
+@@ -0,0 +1,21 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Author: Lanttor.Guo@freescale.com
++ *
++ * Providing on-chip SRAM allocation and free APIs to kernel
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef CF_SRAM_H
++#define CF_SRAM_H
++
++extern int declare_sram_pool(void *start, size_t size);
++
++extern void *sram_alloc(size_t len);
++extern void sram_free(void *addr, size_t len);
++
++#endif
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_bitops.h
+@@ -0,0 +1,443 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#ifndef __CF_BITOPS__
++#define __CF_BITOPS__
++
++#ifndef _LINUX_BITOPS_H
++#error only <linux/bitops.h> can be included directly
++#endif
++
++#include <linux/compiler.h>
++
++#define test_and_set_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_test_and_set_bit(nr, vaddr) : \
++ __generic_coldfire_test_and_set_bit(nr, vaddr))
++
++static inline int __constant_coldfire_test_and_set_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++ __asm__ __volatile__ ("bset %2,(%4); sne %0"
++ : "=d" (retval), "=m" (*p)
++ : "di" (nr & 7), "m" (*p), "a" (p));
++ return retval;
++}
++
++static inline int __generic_coldfire_test_and_set_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++
++ __asm__ __volatile__ ("bset %2,%1; sne %0"
++ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "d" (nr)
++ : "memory");
++ return retval;
++}
++#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
++
++#define set_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_set_bit(nr, vaddr) : \
++ __generic_coldfire_set_bit(nr, vaddr))
++
++static inline void __constant_coldfire_set_bit(int nr,
++ volatile void *vaddr)
++{
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++ __asm__ __volatile__ ("bset %1,(%3)"
++ : "=m" (*p) : "di" (nr & 7), "m" (*p), "a" (p));
++}
++
++static inline void __generic_coldfire_set_bit(int nr,
++ volatile void *vaddr)
++{
++ __asm__ __volatile__ ("bset %1,%0"
++ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "d" (nr)
++ : "memory");
++}
++#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
++
++#define test_and_clear_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
++ __generic_coldfire_test_and_clear_bit(nr, vaddr))
++
++static inline int __constant_coldfire_test_and_clear_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++
++ __asm__ __volatile__ ("bclr %2,(%4); sne %0"
++ : "=d" (retval), "=m" (*p)
++ : "id" (nr & 7), "m" (*p), "a" (p));
++
++ return retval;
++}
++
++static inline int __generic_coldfire_test_and_clear_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++
++ __asm__ __volatile__ ("bclr %2,%1; sne %0"
++ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "d" (nr & 7)
++ : "memory");
++
++ return retval;
++}
++#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
++
++/*
++ * clear_bit() doesn't provide any barrier for the compiler.
++ */
++#define smp_mb__before_clear_bit() barrier()
++#define smp_mb__after_clear_bit() barrier()
++
++#define clear_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_clear_bit(nr, vaddr) : \
++ __generic_coldfire_clear_bit(nr, vaddr))
++
++static inline void __constant_coldfire_clear_bit(int nr,
++ volatile void *vaddr)
++{
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++ __asm__ __volatile__ ("bclr %1,(%3)"
++ : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
++}
++
++static inline void __generic_coldfire_clear_bit(int nr,
++ volatile void *vaddr)
++{
++ __asm__ __volatile__ ("bclr %1,%0"
++ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "d" (nr)
++ : "memory");
++}
++#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
++
++#define test_and_change_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_test_and_change_bit(nr, vaddr) : \
++ __generic_coldfire_test_and_change_bit(nr, vaddr))
++
++static inline int __constant_coldfire_test_and_change_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++
++ __asm__ __volatile__ ("bchg %2,(%4); sne %0"
++ : "=d" (retval), "=m" (*p)
++ : "id" (nr & 7), "m" (*p), "a" (p));
++
++ return retval;
++}
++
++static inline int __generic_coldfire_test_and_change_bit(int nr,
++ volatile void *vaddr)
++{
++ char retval;
++
++ __asm__ __volatile__ ("bchg %2,%1; sne %0"
++ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "id" (nr)
++ : "memory");
++
++ return retval;
++}
++#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
++#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
++
++#define change_bit(nr, vaddr) \
++ (__builtin_constant_p(nr) ? \
++ __constant_coldfire_change_bit(nr, vaddr) : \
++ __generic_coldfire_change_bit(nr, vaddr))
++
++static inline void __constant_coldfire_change_bit(int nr,
++ volatile void *vaddr)
++{
++ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
++ __asm__ __volatile__ ("bchg %1,(%3)"
++ : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
++}
++
++static inline void __generic_coldfire_change_bit(int nr,
++ volatile void *vaddr)
++{
++ __asm__ __volatile__ ("bchg %1,%0"
++ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
++ : "d" (nr)
++ : "memory");
++}
++
++static inline int test_bit(int nr, const unsigned long *vaddr)
++{
++ return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
++}
++
++static inline unsigned long ffz(unsigned long word)
++{
++ unsigned long result = 0;
++
++ while (word & 1) {
++ result++;
++ word >>= 1;
++ }
++ return result;
++}
++
++/* find_next_zero_bit() finds the first zero bit in a bit string of length
++ * 'size' bits, starting the search at bit 'offset'. This is largely based
++ * on Linus's ALPHA routines.
++ */
++static inline unsigned long find_next_zero_bit(void *addr,
++ unsigned long size, unsigned long offset)
++{
++ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
++ unsigned long result = offset & ~31UL;
++ unsigned long tmp;
++
++ if (offset >= size)
++ return size;
++ size -= result;
++ offset &= 31UL;
++ if (offset) {
++ tmp = *(p++);
++ tmp |= ~0UL >> (32-offset);
++ if (size < 32)
++ goto found_first;
++ if (~tmp)
++ goto found_middle;
++ size -= 32;
++ result += 32;
++ }
++ while (size & ~31UL) {
++ tmp = *(p++);
++ if (~tmp)
++ goto found_middle;
++ result += 32;
++ size -= 32;
++ }
++ if (!size)
++ return result;
++ tmp = *p;
++
++found_first:
++ tmp |= ~0UL >> size;
++found_middle:
++ return result + ffz(tmp);
++}
++
++#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
++ (size), 0)
++
++/* Ported from included/linux/bitops.h */
++static inline int ffs(int x)
++{
++ int r = 1;
++
++ if (!x)
++ return 0;
++ if (!(x & 0xffff)) {
++ x >>= 16;
++ r += 16;
++ }
++ if (!(x & 0xff)) {
++ x >>= 8;
++ r += 8;
++ }
++ if (!(x & 0xf)) {
++ x >>= 4;
++ r += 4;
++ }
++ if (!(x & 3)) {
++ x >>= 2;
++ r += 2;
++ }
++ if (!(x & 1)) {
++ x >>= 1;
++ r += 1;
++ }
++ return r;
++}
++#define __ffs(x) (ffs(x) - 1)
++
++/* find_next_bit - find the next set bit in a memory region
++ * (from asm-ppc/bitops.h)
++ */
++static inline unsigned long find_next_bit(const unsigned long *addr,
++ unsigned long size, unsigned long offset)
++{
++ unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
++ unsigned int result = offset & ~31UL;
++ unsigned int tmp;
++
++ if (offset >= size)
++ return size;
++ size -= result;
++ offset &= 31UL;
++ if (offset) {
++ tmp = *p++;
++ tmp &= ~0UL << offset;
++ if (size < 32)
++ goto found_first;
++ if (tmp)
++ goto found_middle;
++ size -= 32;
++ result += 32;
++ }
++ while (size >= 32) {
++ tmp = *p++;
++ if (tmp != 0)
++ goto found_middle;
++ result += 32;
++ size -= 32;
++ }
++ if (!size)
++ return result;
++ tmp = *p;
++
++found_first:
++ tmp &= ~0UL >> (32 - size);
++ if (tmp == 0UL) /* Are any bits set? */
++ return result + size; /* Nope. */
++found_middle:
++ return result + __ffs(tmp);
++}
++
++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
++
++#ifdef __KERNEL__
++
++/* Ported from include/linux/bitops.h */
++static inline int fls(int x)
++{
++ int r = 32;
++
++ if (!x)
++ return 0;
++ if (!(x & 0xffff0000u)) {
++ x <<= 16;
++ r -= 16;
++ }
++ if (!(x & 0xff000000u)) {
++ x <<= 8;
++ r -= 8;
++ }
++ if (!(x & 0xf0000000u)) {
++ x <<= 4;
++ r -= 4;
++ }
++ if (!(x & 0xc0000000u)) {
++ x <<= 2;
++ r -= 2;
++ }
++ if (!(x & 0x80000000u)) {
++ x <<= 1;
++ r -= 1;
++ }
++ return r;
++}
++
++static inline int __fls(int x)
++{
++ return fls(x) - 1;
++}
++
++#include <asm-generic/bitops/fls64.h>
++#include <asm-generic/bitops/sched.h>
++#include <asm-generic/bitops/hweight.h>
++#include <asm-generic/bitops/lock.h>
++
++#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
++ (size), 0)
++#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
++ (unsigned long *)(addr))
++#define minix_set_bit(nr, addr) set_bit((nr), \
++ (unsigned long *)(addr))
++#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
++ (unsigned long *)(addr))
++
++static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
++{
++ int *a = (int *)vaddr;
++ int mask;
++
++ a += nr >> 5;
++ mask = 1 << (nr & 0x1f);
++ return ((mask & *a) != 0);
++}
++
++#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
++ (unsigned long *)(addr))
++#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
++ (unsigned long *)(addr))
++#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
++ (unsigned long *)(addr))
++#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
++ (unsigned long *)(addr))
++
++static inline int ext2_test_bit(int nr, const void *vaddr)
++{
++ const unsigned char *p = vaddr;
++ return (p[nr >> 3] & (1U << (nr & 7))) != 0;
++}
++
++static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
++{
++ const unsigned long *p = vaddr, *addr = vaddr;
++ int res;
++
++ if (!size)
++ return 0;
++
++ size = (size >> 5) + ((size & 31) > 0);
++ while (*p++ == ~0UL) {
++ if (--size == 0)
++ return (p - addr) << 5;
++ }
++
++ --p;
++ for (res = 0; res < 32; res++)
++ if (!ext2_test_bit(res, p))
++ break;
++ return (p - addr) * 32 + res;
++}
++
++static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
++ unsigned offset)
++{
++ const unsigned long *addr = vaddr;
++ const unsigned long *p = addr + (offset >> 5);
++ int bit = offset & 31UL, res;
++
++ if (offset >= size)
++ return size;
++
++ if (bit) {
++ /* Look for zero in first longword */
++ for (res = bit; res < 32; res++)
++ if (!ext2_test_bit(res, p))
++ return (p - addr) * 32 + res;
++ p++;
++ }
++ /* No zero yet, search remaining full bytes for a zero */
++ res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
++ return (p - addr) * 32 + res;
++}
++
++#endif /* KERNEL */
++
++#endif /* __CF_BITOPS__ */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_cacheflush.h
+@@ -0,0 +1,20 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#ifndef M68K_CF_CACHEFLUSH_H
++#define M68K_CF_CACHEFLUSH_H
++
++#ifdef CONFIG_M5445X
++#include "cf_5445x_cacheflush.h"
++#elif defined(CONFIG_M5441X)
++#include "cf_5441x_cacheflush.h"
++#elif defined(CONFIG_M547X_8X)
++#include "cf_548x_cacheflush.h"
++#endif
++
++#endif /* M68K_CF_CACHEFLUSH_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_entry.h
+@@ -0,0 +1,153 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#ifndef __CF_M68K_ENTRY_H
++#define __CF_M68K_ENTRY_H
++
++#include <asm/setup.h>
++#include <asm/page.h>
++#include <asm/coldfire.h>
++#include <asm/cfmmu.h>
++
++/*
++ * Stack layout in 'ret_from_exception':
++ *
++ * This allows access to the syscall arguments in registers d1-d5
++ *
++ * 0(sp) - d1
++ * 4(sp) - d2
++ * 8(sp) - d3
++ * C(sp) - d4
++ * 10(sp) - d5
++ * 14(sp) - a0
++ * 18(sp) - a1
++ * 1C(sp) - a2
++ * 20(sp) - d0
++ * 24(sp) - orig_d0
++ * 28(sp) - stack adjustment
++ * 2C(sp) - sr
++ * 2E(sp) - pc
++ * 32(sp) - format & vector
++ * 36(sp) - MMUSR
++ * 3A(sp) - MMUAR
++ */
++
++/*
++ * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
++ * the whole kernel.
++ */
++
++/* the following macro is used when enabling interrupts */
++/* portable version */
++#define ALLOWINT (~0x700)
++#define MAX_NOINT_IPL 0
++
++#ifdef __ASSEMBLY__
++
++#define curptr a2
++
++LFLUSH_I_AND_D = 0x00000808
++LSIGTRAP = 5
++
++/* process bits for task_struct.ptrace */
++PT_TRACESYS_OFF = 3
++PT_TRACESYS_BIT = 1
++PT_PTRACED_OFF = 3
++PT_PTRACED_BIT = 0
++PT_DTRACE_OFF = 3
++PT_DTRACE_BIT = 2
++
++#define SAVE_ALL_INT save_all_int
++#define SAVE_ALL_SYS save_all_sys
++#define RESTORE_ALL restore_all
++/*
++ * This defines the normal kernel pt-regs layout.
++ *
++ * regs a3-a6 and d6-d7 are preserved by C code
++ * the kernel doesn't mess with usp unless it needs to
++ */
++
++/*
++ * a -1 in the orig_d0 field signifies
++ * that the stack frame is NOT for syscall
++ */
++.macro save_all_int
++ movel MMUSR, % sp@ -
++ movel MMUAR, % sp@ -
++ clrl % sp@ - | stk_adj
++ pea - 1 : w | orig d0
++ movel % d0, % sp@ - | d0
++ subal #(8*4), % sp
++ moveml % d1-%d5/%a0-%a1/%curptr, % sp@
++.endm
++
++.macro save_all_sys
++ movel MMUSR, % sp@-
++ movel MMUAR, % sp@-
++ clrl % sp@ - | stk_adj
++ movel % d0, % sp@ - | orig d0
++ movel % d0, % sp@ - | d0
++ subal #(8*4), % sp
++ moveml % d1-%d5/%a0-%a1/%curptr, % sp@
++.endm
++
++.macro restore_all
++ moveml % sp@, % a0-%a1/%curptr/%d1-%d5
++ addal #(8*4), % sp
++ movel % sp@+, % d0 | d0
++ addql #4, % sp | orig d0
++ addl % sp@+, % sp | stk_adj
++ addql #8, % sp | MMUAR & MMUSR
++ rte
++.endm
++
++#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
++
++#define SAVE_SWITCH_STACK save_switch_stack
++#define RESTORE_SWITCH_STACK restore_switch_stack
++#define GET_CURRENT(tmp) get_current tmp
++
++.macro save_switch_stack
++ subal #(6*4), % sp
++ moveml % a3-%a6/%d6-%d7, % sp@
++.endm
++
++.macro restore_switch_stack
++ moveml % sp@, % a3-%a6/%d6-%d7
++ addal #(6*4), % sp
++.endm
++
++.macro get_current reg = % d0
++ movel % sp, \reg
++ andl #-THREAD_SIZE, \reg
++ movel \reg, % curptr
++ movel % curptr@, % curptr
++.endm
++
++#else /* C source */
++
++#define STR(X) STR1(X)
++#define STR1(X) #X
++
++#define PT_OFF_ORIG_D0 0x24
++#define PT_OFF_FORMATVEC 0x32
++#define PT_OFF_SR 0x2C
++#define SAVE_ALL_INT \
++ "clrl %%sp@-;" /* stk_adj */ \
++ "pea -1:w;" /* orig d0 = -1 */ \
++ "movel %%d0,%%sp@-;" /* d0 */ \
++ "subal #(8*4),%sp" \
++ "moveml %%d1-%%d5/%%a0-%%a2,%%sp@"
++#define GET_CURRENT(tmp) \
++ "movel %%sp,"#tmp"\n\t" \
++ "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
++ "movel "#tmp",%%a2\n\t"
++
++#endif
++
++#endif /* __CF_M68K_ENTRY_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_io.h
+@@ -0,0 +1,185 @@
++/*
++ * linux/include/asm-m68k/cf_io.h
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * 9/30/08 JKM - Separated Coldfire pieces out from m68k.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#ifndef __CF_IO__
++#define __CF_IO__
++
++#ifdef __KERNEL__
++
++#include <linux/compiler.h>
++#include <asm/raw_io.h>
++#include <asm/virtconvert.h>
++
++#include <asm-generic/iomap.h>
++
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++
++#ifdef CONFIG_PCI
++
++/*
++ * IO space in Coldfire
++ */
++#define inb_p inb
++#define inw_p inw
++#define inl_p inl
++#define outb_p outb
++#define outw_p outw
++#define outl_p outl
++
++#ifndef CONFIG_COLDFIRE
++#define inb(port) in_8(port)
++#define outb(val, port) out_8((port), (val))
++#define inw(port) in_le16(port)
++#define outw(val, port) out_le16((port), (val))
++#define inl(port) in_le32(port)
++#define outl(val, port) out_le32((port), (val))
++#define insb(port, buf, nr) \
++ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
++#define outsb(port, buf, nr) \
++ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
++#define insw(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define outsw(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define insl(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++#define outsl(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++#else
++#define inb(port) pci_inb(port)
++#define outb(val, port) pci_outb((val), (port))
++#define inw(port) pci_inw(port)
++#define outw(val, port) pci_outw((val), (port))
++#define insb(a, b, c) \
++ pci_insb((volatile unsigned char *)a, (unsigned char *)b, c)
++#define insw(a, b, c) \
++ pci_insw((volatile unsigned short *)a, (unsigned short *)b, c)
++#define insl(a, b, c) \
++ pci_insl((volatile unsigned long *)a, (unsigned long *)b, c)
++#define outsb(a, b, c) \
++ pci_outsb((volatile unsigned char *)a, (const unsigned char *)b, c)
++#define outsw(a, b, c) \
++ pci_outsw((volatile unsigned short *)a, (const unsigned short *)b, c)
++#define outsl(a, b, c) \
++ pci_outsl((volatile unsigned long *)a, (const unsigned long *)b, c)
++#define inl(port) pci_inl(port)
++#define outl(val, port) pci_outl((val), (port))
++#endif
++
++#else
++/* no pci */
++
++#define inb(port) in_8(port)
++#define outb(val, port) out_8((port), (val))
++#define inw(port) in_le16(port)
++#define outw(val, port) out_le16((port), (val))
++#define inl(port) in_le32(port)
++#define outl(val, port) out_le32((port), (val))
++#define insb(port, buf, nr) \
++ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
++#define outsb(port, buf, nr) \
++ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
++#define insw(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define outsw(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define insl(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++#define outsl(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++
++#endif /* CONFIG_PCI */
++
++#define mmiowb()
++
++static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
++{
++ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
++}
++static inline void __iomem *ioremap_nocache(unsigned long physaddr,
++ unsigned long size)
++{
++ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
++}
++static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
++ unsigned long size)
++{
++ return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
++}
++static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
++ unsigned long size)
++{
++ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
++}
++
++static inline void memset_io(volatile void __iomem *addr,
++ unsigned char val, int count)
++{
++ __builtin_memset((void __force *) addr, val, count);
++}
++static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
++ int count)
++{
++ __builtin_memcpy(dst, (void __force *) src, count);
++}
++static inline void memcpy_toio(volatile void __iomem *dst,
++ const void *src, int count)
++{
++ __builtin_memcpy((void __force *) dst, src, count);
++}
++
++#define IO_SPACE_LIMIT 0xffffffff
++
++#endif /* __KERNEL__ */
++
++#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p) __va(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p) p
++
++#define __raw_readb(addr) \
++ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
++#define __raw_readw(addr) \
++ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
++#define __raw_readl(addr) \
++ ({ unsigned long __v = (*(volatile unsigned long *) (addr)); __v; })
++#define __raw_writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
++#define __raw_writew(b, addr) \
++ (void)((*(volatile unsigned short *) (addr)) = (b))
++#define __raw_writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
++
++#define memset_io(a, b, c) memset((void *)(a), (b), (c))
++#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
++#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
++
++#if !defined(readb)
++#define readb(addr) \
++ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
++#define readw(addr) \
++ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
++#define readl(addr) \
++ ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
++#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
++#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
++#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
++#endif /* readb */
++
++#endif /* _IO_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_pgalloc.h
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#ifndef M68K_CF_PGALLOC_H
++#define M68K_CF_PGALLOC_H
++#include <linux/highmem.h>
++#include <asm/coldfire.h>
++#include <asm/page.h>
++#include <asm/cf_tlbflush.h>
++#include <asm/cf_cacheflush.h>
++
++extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
++{
++ free_page((unsigned long) pte);
++}
++
++extern const char bad_pmd_string[];
++
++extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
++ unsigned long address)
++{
++ unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
++
++ if (!page)
++ return NULL;
++
++ memset((void *)page, 0, PAGE_SIZE);
++ return (pte_t *) (page);
++}
++
++extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
++{
++ return (pmd_t *) pgd;
++}
++
++#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
++#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
++
++#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
++
++#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
++ (unsigned long)(page_address(page)))
++
++#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
++
++#define pmd_pgtable(pmd) pmd_page(pmd)
++
++static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page,
++ unsigned long address)
++{
++ __free_page(page);
++}
++
++#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
++
++static inline struct page *pte_alloc_one(struct mm_struct *mm,
++ unsigned long address)
++{
++ struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
++ pte_t *pte;
++
++ if (!page)
++ return NULL;
++
++ pte = kmap(page);
++ if (pte) {
++ clear_page(pte);
++ __flush_page_to_ram(pte);
++ flush_tlb_kernel_page(pte);
++ nocache_page(pte);
++ }
++ kunmap(page);
++
++ return page;
++}
++
++extern inline void pte_free(struct mm_struct *mm, struct page *page)
++{
++ __free_page(page);
++}
++
++/*
++ * In our implementation, each pgd entry contains 1 pmd that is never allocated
++ * or freed. pgd_present is always 1, so this should never be called. -NL
++ */
++#define pmd_free(mm, pmd) BUG()
++
++extern inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
++{
++ free_page((unsigned long) pgd);
++}
++
++extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ pgd_t *new_pgd;
++
++ new_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_NOWARN);
++ if (!new_pgd)
++ return NULL;
++ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
++ memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
++ return new_pgd;
++}
++
++#define pgd_populate(mm, pmd, pte) BUG()
++
++#endif /* M68K_CF_PGALLOC_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_pgtable.h
+@@ -0,0 +1,364 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#ifndef _CF_PGTABLE_H
++#define _CF_PGTABLE_H
++
++#include <asm/cfmmu.h>
++#include <asm/page.h>
++
++#ifndef __ASSEMBLY__
++#include <asm/virtconvert.h>
++#include <linux/linkage.h>
++
++/* For virtual address to physical address conversion */
++#define VTOP(addr) __pa(addr)
++#define PTOV(addr) __va(addr)
++
++
++#endif /* !__ASSEMBLY__ */
++
++/* Page protection values within PTE. */
++
++/* MMUDR bits, in proper place. */
++#define CF_PAGE_LOCKED (0x00000002)
++#define CF_PAGE_EXEC (0x00000004)
++#define CF_PAGE_WRITABLE (0x00000008)
++#define CF_PAGE_READABLE (0x00000010)
++#define CF_PAGE_SYSTEM (0x00000020)
++#define CF_PAGE_COPYBACK (0x00000040)
++#define CF_PAGE_NOCACHE (0x00000080)
++
++#define CF_CACHEMASK (~0x00000040)
++#define CF_PAGE_MMUDR_MASK (0x000000fe)
++
++#define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE)
++
++/* MMUTR bits, need shifting down. */
++#define CF_PAGE_VALID (0x00000400)
++#define CF_PAGE_SHARED (0x00000800)
++
++#define CF_PAGE_MMUTR_MASK (0x00000c00)
++#define CF_PAGE_MMUTR_SHIFT (10)
++#define CF_ASID_MMU_SHIFT (2)
++
++/* Fake bits, not implemented in CF, will get masked out before
++ hitting hardware, and might go away altogether once this port is
++ complete. */
++#if PAGE_SHIFT < 13
++#error COLDFIRE Error: Pages must be at least 8k in size
++#endif
++#define CF_PAGE_ACCESSED (0x00001000)
++#define CF_PAGE_FILE (0x00000200)
++#define CF_PAGE_DIRTY (0x00000001)
++
++#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
++#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
++#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
++#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
++#define _DESCTYPE_MASK 0x003
++#define _CACHEMASK040 (~0x060)
++#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
++
++
++/* Externally used page protection values. */
++#define _PAGE_PRESENT (CF_PAGE_VALID)
++#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
++#define _PAGE_DIRTY (CF_PAGE_DIRTY)
++#define _PAGE_READWRITE (CF_PAGE_WRITABLE \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_SHARED \
++ | CF_PAGE_SYSTEM)
++
++/* Compound page protection values. */
++#define PAGE_NONE __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED)
++
++#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_ACCESSED)
++
++#define PAGE_INIT __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_EXEC \
++ | CF_PAGE_SYSTEM \
++ | CF_PAGE_SHARED)
++
++#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_EXEC \
++ | CF_PAGE_SYSTEM \
++ | CF_PAGE_SHARED \
++ | CF_PAGE_ACCESSED)
++
++#define PAGE_COPY __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE)
++
++/*
++ * Page protections for initialising protection_map. See mm/mmap.c
++ * for use. In general, the bit positions are xwr, and P-items are
++ * private, the S-items are shared.
++ */
++
++#define __P000 PAGE_NONE
++#define __P100 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_EXEC)
++#define __P010 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_ACCESSED)
++#define __P110 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_EXEC)
++#define __P001 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE)
++#define __P101 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_EXEC)
++#define __P011 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_ACCESSED)
++#define __P111 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_EXEC)
++
++#define __S000 PAGE_NONE
++#define __S100 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_EXEC)
++#define __S010 PAGE_SHARED
++#define __S110 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_EXEC)
++#define __S001 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE)
++#define __S101 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_EXEC)
++#define __S011 PAGE_SHARED
++#define __S111 __pgprot(CF_PAGE_VALID \
++ | CF_PAGE_ACCESSED \
++ | CF_PAGE_READABLE \
++ | CF_PAGE_WRITABLE \
++ | CF_PAGE_EXEC)
++
++#define PTE_MASK PAGE_MASK
++#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
++
++#ifndef __ASSEMBLY__
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++
++extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
++ return pte;
++}
++
++#define pmd_set(pmdp, ptep) do {} while (0)
++
++static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
++{
++ pgd_val(*pgdp) = virt_to_phys(pmdp);
++}
++
++#define __pte_page(pte) \
++ ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET))
++#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
++
++extern inline int pte_none(pte_t pte)
++{
++ return !pte_val(pte);
++}
++extern inline int pte_present(pte_t pte)
++{
++ return pte_val(pte) & CF_PAGE_VALID;
++}
++extern inline void pte_clear(struct mm_struct *mm, unsigned long addr,
++ pte_t *ptep)
++{
++ pte_val(*ptep) = 0;
++}
++
++#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
++#define pte_page(pte) virt_to_page(__pte_page(pte))
++
++extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
++#define pmd_none(pmd) pmd_none2(&(pmd))
++extern inline int pmd_bad2(pmd_t *pmd) { return 0; }
++#define pmd_bad(pmd) pmd_bad2(&(pmd))
++#define pmd_present(pmd) (!pmd_none2(&(pmd)))
++extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
++
++extern inline int pgd_none(pgd_t pgd) { return 0; }
++extern inline int pgd_bad(pgd_t pgd) { return 0; }
++extern inline int pgd_present(pgd_t pgd) { return 1; }
++extern inline void pgd_clear(pgd_t *pgdp) {}
++
++
++#define pte_ERROR(e) \
++ printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
++ __FILE__, __LINE__, pte_val(e))
++#define pmd_ERROR(e) \
++ printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
++ __FILE__, __LINE__, pmd_val(e))
++#define pgd_ERROR(e) \
++ printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
++ __FILE__, __LINE__, pgd_val(e))
++
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not...
++ * [we have the full set here even if they don't change from m68k]
++ */
++extern inline int pte_read(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_READABLE; }
++extern inline int pte_write(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_WRITABLE; }
++extern inline int pte_exec(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_EXEC; }
++extern inline int pte_dirty(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_DIRTY; }
++extern inline int pte_young(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_ACCESSED; }
++extern inline int pte_file(pte_t pte) \
++ { return pte_val(pte) & CF_PAGE_FILE; }
++static inline int pte_special(pte_t pte) { return 0; }
++
++
++extern inline pte_t pte_wrprotect(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; }
++extern inline pte_t pte_rdprotect(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; }
++extern inline pte_t pte_exprotect(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; }
++extern inline pte_t pte_mkclean(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; }
++extern inline pte_t pte_mkold(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; }
++extern inline pte_t pte_mkwrite(pte_t pte) \
++ { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; }
++extern inline pte_t pte_mkread(pte_t pte) \
++ { pte_val(pte) |= CF_PAGE_READABLE; return pte; }
++extern inline pte_t pte_mkexec(pte_t pte) \
++ { pte_val(pte) |= CF_PAGE_EXEC; return pte; }
++extern inline pte_t pte_mkdirty(pte_t pte) \
++ { pte_val(pte) |= CF_PAGE_DIRTY; return pte; }
++extern inline pte_t pte_mkyoung(pte_t pte) \
++ { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; }
++extern inline pte_t pte_mknocache(pte_t pte) \
++ { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; }
++extern inline pte_t pte_mkcache(pte_t pte) \
++ { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; }
++static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
++
++
++#define swapper_pg_dir kernel_pg_dir
++extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
++
++/* Find an entry in a pagetable directory. */
++#define pgd_index(address) ((address) >> PGDIR_SHIFT)
++
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++/* Find an entry in a kernel pagetable directory. */
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++/* Find an entry in the second-level pagetable. */
++extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
++{
++ return (pmd_t *) pgd;
++}
++
++/* Find an entry in the third-level pagetable. */
++#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
++ __pte_offset(address))
++
++/* Disable caching for page at given kernel virtual address. */
++static inline void nocache_page(void *vaddr)
++{
++ pgd_t *dir;
++ pmd_t *pmdp;
++ pte_t *ptep;
++ unsigned long addr = (unsigned long)vaddr;
++
++ dir = pgd_offset_k(addr);
++ pmdp = pmd_offset(dir, addr);
++ ptep = pte_offset_kernel(pmdp, addr);
++ *ptep = pte_mknocache(*ptep);
++}
++
++/* Enable caching for page at given kernel virtual address. */
++static inline void cache_page(void *vaddr)
++{
++ pgd_t *dir;
++ pmd_t *pmdp;
++ pte_t *ptep;
++ unsigned long addr = (unsigned long)vaddr;
++
++ dir = pgd_offset_k(addr);
++ pmdp = pmd_offset(dir, addr);
++ ptep = pte_offset_kernel(pmdp, addr);
++ *ptep = pte_mkcache(*ptep);
++}
++
++#define PTE_FILE_MAX_BITS 21
++#define PTE_FILE_SHIFT 11
++
++static inline unsigned long pte_to_pgoff(pte_t pte)
++{
++ return pte_val(pte) >> PTE_FILE_SHIFT;
++}
++
++static inline pte_t pgoff_to_pte(unsigned pgoff)
++{
++ pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
++ return pte;
++}
++
++/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
++#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
++ (offset << PTE_FILE_SHIFT) })
++#define __swp_type(x) ((x).val & 0xFF)
++#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
++#define __swp_entry_to_pte(x) (__pte((x).val))
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \
++ __pte_offset(address))
++#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) kunmap(pte)
++
++#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
++
++
++#endif /* !__ASSEMBLY__ */
++#endif /* !_CF_PGTABLE_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_raw_io.h
+@@ -0,0 +1,188 @@
++/*
++ * linux/include/asm-m68k/cf_raw_io.h
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * 09/30/08 JKM: split Coldfire pieces into separate file
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#ifndef __CF_RAW_IO__
++#define __CF_RAW_IO__
++
++#ifdef __KERNEL__
++
++#include <asm/types.h>
++
++/* Values for nocacheflag and cmode */
++#define IOMAP_FULL_CACHING 0
++#define IOMAP_NOCACHE_SER 1
++#define IOMAP_NOCACHE_NONSER 2
++#define IOMAP_WRITETHROUGH 3
++
++extern void iounmap(void __iomem *addr);
++
++extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
++ int cacheflag);
++extern void __iounmap(void *addr, unsigned long size);
++
++
++/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
++ * two accesses to memory, which may be undesirable for some devices.
++ */
++#define in_8(addr) \
++ ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
++#define in_be16(addr) \
++ ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
++#define in_be32(addr) \
++ ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
++#define in_le16(addr) \
++ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
++#define in_le32(addr) \
++ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
++
++#define out_8(addr, b) (void)((*(__force volatile u8 *) (addr)) = (b))
++#define out_be16(addr, w) (void)((*(__force volatile u16 *) (addr)) = (w))
++#define out_be32(addr, l) (void)((*(__force volatile u32 *) (addr)) = (l))
++#define out_le16(addr, w) \
++ (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
++#define out_le32(addr, l) \
++ (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
++
++
++#ifdef CONFIG_PCI
++/* pci */
++unsigned char pci_inb(long addr);
++unsigned short pci_inw(long addr);
++unsigned long pci_inl(long addr);
++
++void pci_outb(unsigned char val, long addr);
++void pci_outw(unsigned short val, long addr);
++void pci_outl(unsigned long val, long addr);
++
++void pci_insb(volatile unsigned char *addr,
++ unsigned char *buf, int len);
++void pci_insw(volatile unsigned short *addr,
++ unsigned short *buf, int len);
++void pci_insl(volatile unsigned long *addr,
++ unsigned long *buf, int len);
++
++void pci_outsb(volatile unsigned char *addr,
++ const unsigned char *buf, int len);
++void pci_outsw(volatile unsigned short *addr,
++ const unsigned short *buf, int len);
++void pci_outsl(volatile unsigned long *addr,
++ const unsigned long *buf, int len);
++
++unsigned short pci_raw_inw(long addr);
++unsigned long pci_raw_inl(long addr);
++void pci_raw_outw(unsigned short val, long addr);
++void pci_raw_outl(unsigned long val, long addr);
++
++#define raw_inb(port) pci_inb((long)((volatile unsigned char *)(port)))
++#define raw_inw(port) pci_raw_inw((long)((volatile unsigned short *)(port)))
++#define raw_inl(port) pci_raw_inl((long)((volatile unsigned long *)(port)))
++
++#define raw_outb(val, port) \
++ pci_outb((val), (long)((volatile unsigned char *)(port)))
++#define raw_outw(val, port) \
++ pci_raw_outw((val), (long)((volatile unsigned short *)(port)))
++#define raw_outl(val, port) \
++ pci_raw_outl((val), (long)((volatile unsigned long *)(port)))
++
++#define swap_inw(port) pci_inw((long)((volatile unsigned short *)(port)))
++#define swap_outw(val, port) \
++ pci_outw((val), (long)((volatile unsigned short *)(port)))
++
++#else
++/* non-pci */
++#define raw_inb in_8
++#define raw_inw in_be16
++#define raw_inl in_be32
++
++#define raw_outb(val, port) out_8((port), (val))
++#define raw_outw(val, port) out_be16((port), (val))
++#define raw_outl(val, port) out_be32((port), (val))
++
++#define swap_inw(port) in_le16((port))
++#define swap_outw(val, port) out_le16((port), (val))
++#endif
++
++static inline void raw_insb(volatile u8 __iomem *port,
++ u8 *buf, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; i < len; i++)
++ *buf++ = in_8(port);
++}
++
++static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
++ unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; i < len; i++)
++ out_8(port, *buf++);
++}
++
++static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
++{
++ unsigned int i;
++
++ for (i = 0; i < nr; i++)
++ *buf++ = raw_inw(port);
++}
++
++static inline void raw_outsw(volatile u16 *port, const u16 *buf,
++ unsigned int nr)
++{
++ unsigned int i;
++
++ for (i = 0; i < nr; i++, buf++)
++ raw_outw(*buf, port);
++}
++
++static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
++{
++ unsigned int i;
++
++ for (i = 0; i < nr; i++)
++ *buf++ = raw_inl(port);
++}
++
++static inline void raw_outsl(volatile u32 *port, const u32 *buf,
++ unsigned int nr)
++{
++ unsigned int i;
++
++ for (i = 0; i < nr; i++, buf++)
++ raw_outl(*buf, port);
++}
++
++static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
++ unsigned int nr)
++{
++#ifdef UNDEF
++ unsigned int i;
++
++ for (i = 0; i < nr; i++)
++ *buf++ = in_le16(port);
++#endif
++}
++
++static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
++ unsigned int nr)
++{
++#ifdef UNDEF
++ unsigned int i;
++
++ for (i = 0; i < nr; i++, buf++)
++ out_le16(port, *buf);
++#endif
++}
++
++#endif /* __KERNEL__ */
++
++#endif /* __CF_RAW_IO__ */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_tlbflush.h
+@@ -0,0 +1,66 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#ifndef M68K_CF_TLBFLUSH_H
++#define M68K_CF_TLBFLUSH_H
++
++#include <asm/coldfire.h>
++
++/* Flush all userspace mappings. */
++static inline void flush_tlb_all(void)
++{
++ preempt_disable();
++ *MMUOR = MMUOR_CNL;
++ preempt_enable();
++}
++
++/* Clear user TLB entries within the context named in mm */
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ preempt_disable();
++ *MMUOR = MMUOR_CNL;
++ preempt_enable();
++}
++
++/* Flush a single TLB page. */
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ preempt_disable();
++ *MMUOR = MMUOR_CNL;
++ preempt_enable();
++}
++/* Flush a range of pages from TLB. */
++
++static inline void flush_tlb_range(struct vm_area_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ preempt_disable();
++ *MMUOR = MMUOR_CNL;
++ preempt_enable();
++}
++
++/* Flush kernel page from TLB. */
++static inline void flush_tlb_kernel_page(void *addr)
++{
++ preempt_disable();
++ *MMUOR = MMUOR_CNL;
++ preempt_enable();
++}
++
++static inline void flush_tlb_kernel_range(unsigned long start,
++ unsigned long end)
++{
++ flush_tlb_all();
++}
++
++extern inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++}
++
++#endif /* M68K_CF_TLBFLUSH_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_uaccess.h
+@@ -0,0 +1,262 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#ifndef __M68K_CF_UACCESS_H
++#define __M68K_CF_UACCESS_H
++
++/*
++ * User space memory access functions
++ */
++
++/* The "moves" command is not available in the CF instruction set. */
++#include <linux/compiler.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <asm/segment.h>
++
++#define VERIFY_READ 0
++#define VERIFY_WRITE 1
++
++/* We let the MMU do all checking */
++#define access_ok(type, addr, size) 1
++
++/*
++ * The exception table consists of pairs of addresses: the first is the
++ * address of an instruction that is allowed to fault, and the second is
++ * the address at which the program should continue. No registers are
++ * modified, so it is entirely up to the continuation code to figure out
++ * what to do.
++ *
++ * All the routines below use bits of fixup code that are out of line
++ * with the main instruction path. This means when everything is well,
++ * we don't even have to jump over them. Further, they do not intrude
++ * on our cache or tlb entries.
++ */
++
++struct exception_table_entry {
++ unsigned long insn, fixup;
++};
++
++extern int __put_user_bad(void);
++extern int __get_user_bad(void);
++
++#define __put_user_asm(res, x, ptr, bwl, reg, err) \
++asm volatile ("\n" \
++ "1: move."#bwl" %2,%1\n" \
++ "2:\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "10: moveq.l %3,%0\n" \
++ " jra 2b\n" \
++ " .previous\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 1b,10b\n" \
++ " .long 2b,10b\n" \
++ " .previous" \
++ : "+d" (res), "=m" (*(ptr)) \
++ : #reg(x), "i" (err))
++
++/*
++ * These are the main single-value transfer routines. They automatically
++ * use the right size if we just have the right pointer type.
++ */
++
++#define __put_user(x, ptr) \
++({ \
++ typeof(*(ptr)) __pu_val = (x); \
++ int __pu_err = 0; \
++ __chk_user_ptr(ptr); \
++ switch (sizeof(*(ptr))) { \
++ case 1: \
++ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
++ break; \
++ case 2: \
++ __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
++ break; \
++ case 4: \
++ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
++ break; \
++ case 8: \
++ { \
++ const void __user *__pu_ptr = (ptr); \
++ asm volatile ("\n" \
++ "1: move.l %2,(%1)+\n" \
++ "2: move.l %R2,(%1)\n" \
++ "3:\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "10: movel %3,%0\n" \
++ " jra 3b\n" \
++ " .previous\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 1b,10b\n" \
++ " .long 2b,10b\n" \
++ " .long 3b,10b\n" \
++ " .previous" \
++ : "+d" (__pu_err), "+a" (__pu_ptr) \
++ : "r" (__pu_val), "i" (-EFAULT) \
++ : "memory"); \
++ break; \
++ } \
++ default: \
++ __pu_err = __put_user_bad(); \
++ break; \
++ } \
++ __pu_err; \
++})
++#define put_user(x, ptr) __put_user(x, ptr)
++
++
++#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
++ type __gu_val; \
++ asm volatile ("\n" \
++ "1: move."#bwl" %2,%1\n" \
++ "2:\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "10: move.l %3,%0\n" \
++ " subl %1,%1\n" \
++ " jra 2b\n" \
++ " .previous\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 1b,10b\n" \
++ " .previous" \
++ : "+d" (res), "=&" #reg(__gu_val) \
++ : "m" (*(ptr)), "i" (err)); \
++ (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
++})
++
++#define __get_user(x, ptr) \
++({ \
++ int __gu_err = 0; \
++ __chk_user_ptr(ptr); \
++ switch (sizeof(*(ptr))) { \
++ case 1: \
++ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
++ break; \
++ case 2: \
++ __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
++ break; \
++ case 4: \
++ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
++ break; \
++/* case 8: disabled because gcc-4.1 has a broken typeof \
++ { \
++ const void *__gu_ptr = (ptr); \
++ u64 __gu_val; \
++ asm volatile ("\n" \
++ "1: move.l (%2)+,%1\n" \
++ "2: move.l (%2),%R1\n" \
++ "3:\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "10: move.l %3,%0\n" \
++ " subl %1,%1\n" \
++ " subl %R1,%R1\n" \
++ " jra 3b\n" \
++ " .previous\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 1b,10b\n" \
++ " .long 2b,10b\n" \
++ " .previous" \
++ : "+d" (__gu_err), "=&r" (__gu_val), \
++ "+a" (__gu_ptr) \
++ : "i" (-EFAULT) \
++ : "memory"); \
++ (x) = (typeof(*(ptr)))__gu_val; \
++ break; \
++ } */ \
++ default : \
++ __gu_err = __get_user_bad(); \
++ break; \
++ } \
++ __gu_err; \
++})
++#define get_user(x, ptr) __get_user(x, ptr)
++
++unsigned long __generic_copy_from_user(void *to, const void __user *from,
++ unsigned long n);
++unsigned long __generic_copy_to_user(void __user *to, const void *from,
++ unsigned long n);
++
++#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
++ asm volatile ("\n" \
++ "1: move."#s1" (%2)+,%3\n" \
++ " move."#s1" %3,(%1)+\n" \
++ "2: move."#s2" (%2)+,%3\n" \
++ " move."#s2" %3,(%1)+\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ "3: move."#s3" (%2)+,%3\n" \
++ " move."#s3" %3,(%1)+\n" \
++ " .endif\n" \
++ "4:\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 1b,10f\n" \
++ " .long 2b,20f\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ " .long 3b,30f\n" \
++ " .endif\n" \
++ " .previous\n" \
++ "\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "10: clr."#s1" (%1)+\n" \
++ "20: clr."#s2" (%1)+\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ "30: clr."#s3" (%1)+\n" \
++ " .endif\n" \
++ " moveq.l #"#n",%0\n" \
++ " jra 4b\n" \
++ " .previous\n" \
++ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
++ : : "memory")
++
++#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
++ asm volatile ("\n" \
++ " move."#s1" (%2)+,%3\n" \
++ "11: move."#s1" %3,(%1)+\n" \
++ "12: move."#s2" (%2)+,%3\n" \
++ "21: move."#s2" %3,(%1)+\n" \
++ "22:\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ " move."#s3" (%2)+,%3\n" \
++ "31: move."#s3" %3,(%1)+\n" \
++ "32:\n" \
++ " .endif\n" \
++ "4:\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 11b,5f\n" \
++ " .long 12b,5f\n" \
++ " .long 21b,5f\n" \
++ " .long 22b,5f\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ " .long 31b,5f\n" \
++ " .long 32b,5f\n" \
++ " .endif\n" \
++ " .previous\n" \
++ "\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "5: moveq.l #"#n",%0\n" \
++ " jra 4b\n" \
++ " .previous\n" \
++ : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
++ : : "memory")
++
++#endif /* _M68K_CF_UACCESS_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cf_virtconvert.h
+@@ -0,0 +1,63 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#ifndef __CF_VIRTCONVERT__
++#define __CF_VIRTCONVERT__
++
++/*
++ * Macros used for converting between virtual and physical mappings.
++ *
++ * Coldfire Specific
++ */
++
++#ifdef __KERNEL__
++
++#include <linux/compiler.h>
++#include <linux/mmzone.h>
++#include <asm/setup.h>
++#include <asm/page.h>
++
++/*
++ * Change virtual addresses to physical addresses and vv.
++ */
++static inline unsigned long virt_to_phys(void *address)
++{
++ return __pa(address);
++}
++
++static inline void *phys_to_virt(unsigned long address)
++{
++ return __va(address);
++}
++
++/* Permanent address of a page. */
++#ifdef CONFIG_SINGLE_MEMORY_CHUNK
++#define page_to_phys(page) \
++ __pa(PAGE_OFFSET + \
++ (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
++#else
++#define page_to_phys(_page) ({ \
++ struct page *__page = _page; \
++ struct pglist_data *pgdat; \
++ pgdat = pg_data_table[page_to_nid(__page)]; \
++ page_to_pfn(__page) << PAGE_SHIFT; \
++})
++#endif
++
++/*
++ * IO bus memory addresses are 1:1 with the physical address,
++ */
++#ifdef CONFIG_PCI
++#define virt_to_bus(a) (a + PCI_DMA_BASE)
++#define bus_to_virt(a) (a - PCI_DMA_BASE)
++#else
++#define virt_to_bus(a) (a)
++#define bus_to_virt(a) (a)
++#endif
++
++#endif /* __KERNEL__ */
++#endif /* __CF_VIRTCONVERT__ */
+--- /dev/null
++++ b/arch/m68k/include/asm/cfcache.h
+@@ -0,0 +1,146 @@
++/*
++ * include/asm-m68k/cfcache.h - Coldfire Cache Controller
++ *
++ * Kurt Mahan kmahan@freescale.com
++ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++#ifndef CF_CFCACHE_H
++#define CF_CFCACHE_H
++
++/*
++ * CACR Cache Control Register
++ */
++#define CF_CACR_DEC (0x80000000) /* Data Cache Enable */
++#define CF_CACR_DW (0x40000000) /* Data default Write-protect */
++#define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */
++#define CF_CACR_DPI (0x10000000) /* Data Disable CPUSHL Invalidate */
++#define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */
++#define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */
++#define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */
++#define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */
++#define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */
++#define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */
++#define CF_CACR_DDSP (0x00800000) /* Data default supervisor-protect */
++#define CF_CACR_IVO (0x00100000) /* Invalidate only */
++#define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */
++#define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */
++#define CF_CACR_IEC (0x00008000) /* Instruction Cache Enable */
++#define CF_CACR_SPA (0x00004000) /* Search by Physical Address */
++#define CF_CACR_DNFB (0x00002000) /* Default cache-inhibited fill buf */
++#define CF_CACR_IDPI (0x00001000) /* Instr Disable CPUSHL Invalidate */
++#define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */
++#define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */
++#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */
++#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */
++#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
++
++#ifdef CONFIG_M5445X
++/*
++ * M5445x Cache Configuration
++ * - cache line size is 16 bytes
++ * - cache is 4-way set associative
++ * - each cache has 256 sets (64k / 16bytes / 4way)
++ * - I-Cache size is 16KB
++ * - D-Cache size is 16KB
++ */
++#define ICACHE_SIZE 0x4000 /* instruction - 16k */
++#define DCACHE_SIZE 0x4000 /* data - 16k */
++
++#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
++#define CACHE_SETS 0x0100 /* 256 sets */
++#define CACHE_WAYS 0x0004 /* 4 way */
++
++#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
++ CF_CACR_BCINVA+ \
++ CF_CACR_ICINVA)
++
++#ifndef CONFIG_M5445X_DISABLE_CACHE
++#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
++ CF_CACR_BEC+ \
++ CF_CACR_IEC+ \
++ CF_CACR_DESB+ \
++ CF_CACR_EUSP)
++#else
++/* cache disabled for testing */
++#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
++#endif /* CONFIG_M5445X_DISABLE_CACHE */
++
++#elif defined(CONFIG_M547X_8X)
++/*
++ * * M547x/M548x Cache Configuration
++ * * - cache line size is 16 bytes
++ * * - cache is 4-way set associative
++ * * - each cache has 512 sets (128k / 16bytes / 4way)
++ * * - I-Cache size is 32KB
++ * * - D-Cache size is 32KB
++ * */
++#define ICACHE_SIZE 0x8000 /* instruction - 32k */
++#define DCACHE_SIZE 0x8000 /* data - 32k */
++
++#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
++#define CACHE_SETS 0x0200 /* 512 sets */
++#define CACHE_WAYS 0x0004 /* 4 way */
++
++/* in for the old cpushl caching code */
++#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
++#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
++#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
++#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
++
++#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
++ CF_CACR_BCINVA+ \
++ CF_CACR_ICINVA)
++
++#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
++ CF_CACR_BEC+ \
++ CF_CACR_IEC+ \
++ CF_CACR_DESB+ \
++ CF_CACR_EUSP)
++#elif defined(CONFIG_M5441X)
++/*
++ * M5441x Cache Configuration
++ * - cache line size is 16 bytes
++ * - cache is 4-way set associative
++ * - each cache has 128 sets (8k / 16bytes / 4way)
++ * - I-Cache size is 8KB
++ * - D-Cache size is 8KB
++ */
++#define ICACHE_SIZE 0x2000 /* instruction - 8k */
++#define DCACHE_SIZE 0x2000 /* data - 8k */
++
++#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
++#define CACHE_SETS 0x0080 /* 128 sets */
++#define CACHE_WAYS 0x0004 /* 4 way */
++
++#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
++ CF_CACR_BCINVA+ \
++ CF_CACR_ICINVA)
++
++#ifndef CONFIG_M5441X_DISABLE_CACHE
++#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
++ CF_CACR_BEC+ \
++ CF_CACR_IEC+ \
++ CF_CACR_DESB+ \
++ CF_CACR_DDCM_01+ \
++ CF_CACR_EUSP)
++#else
++/* cache disabled for testing */
++#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
++#endif /* CONFIG_M5441X_DISABLE_CACHE */
++
++#endif
++
++#ifndef __ASSEMBLY__
++
++extern unsigned long shadow_cacr;
++extern void cacr_set(unsigned long x);
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* CF_CACHE_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/cfmmu.h
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Definitions for Coldfire V4e MMU
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++#include <asm/movs.h>
++
++#ifndef __CF_MMU_H__
++#define __CF_MMU_H__
++
++
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#define MMU_BASE 0xE8000000
++#elif defined(CONFIG_M5441X)
++#define MMU_BASE 0xD8000000
++#endif
++
++#define MMUCR (MMU_BASE+0x00)
++#define MMUCR_ASMN 1
++#define MMUCR_ASM (1<<MMUCR_ASMN)
++#define MMUCR_ENN 0
++#define MMUCR_EN (1<<MMUCR_ENN)
++
++#define MMUOR REG16(MMU_BASE+0x04+0x02)
++#define MMUOR_AAN 16
++#define MMUOR_AA (0xffff<<MMUOR_AAN)
++#define MMUOR_STLBN 8
++#define MMUOR_STLB (1<<MMUOR_STLBN)
++#define MMUOR_CAN 7
++#define MMUOR_CA (1<<MMUOR_CAN)
++#define MMUOR_CNLN 6
++#define MMUOR_CNL (1<<MMUOR_CNLN)
++#define MMUOR_CASN 5
++#define MMUOR_CAS (1<<MMUOR_CASN)
++#define MMUOR_ITLBN 4
++#define MMUOR_ITLB (1<<MMUOR_ITLBN)
++#define MMUOR_ADRN 3
++#define MMUOR_ADR (1<<MMUOR_ADRN)
++#define MMUOR_RWN 2
++#define MMUOR_RW (1<<MMUOR_RWN)
++#define MMUOR_ACCN 1
++#define MMUOR_ACC (1<<MMUOR_ACCN)
++#define MMUOR_UAAN 0
++#define MMUOR_UAA (1<<MMUOR_UAAN)
++
++#define MMUSR REG32(MMU_BASE+0x08)
++#define MMUSR_SPFN 5
++#define MMUSR_SPF (1<<MMUSR_SPFN)
++#define MMUSR_RFN 4
++#define MMUSR_RF (1<<MMUSR_RFN)
++#define MMUSR_WFN 3
++#define MMUSR_WF (1<<MMUSR_WFN)
++#define MMUSR_HITN 1
++#define MMUSR_HIT (1<<MMUSR_HITN)
++
++#define MMUAR REG32(MMU_BASE+0x10)
++#define MMUAR_VPN 1
++#define MMUAR_VP (0xfffffffe)
++#define MMUAR_SN 0
++#define MMUAR_S (1<<MMUAR_SN)
++
++#define MMUTR REG32(MMU_BASE+0x14)
++#define MMUTR_VAN 10
++#define MMUTR_VA (0xfffffc00)
++#define MMUTR_IDN 2
++#define MMUTR_ID (0xff<<MMUTR_IDN)
++#define MMUTR_SGN 1
++#define MMUTR_SG (1<<MMUTR_SGN)
++#define MMUTR_VN 0
++#define MMUTR_V (1<<MMUTR_VN)
++
++#define MMUDR REG32(MMU_BASE+0x18)
++#define MMUDR_PAN 10
++#define MMUDR_PA (0xfffffc00)
++#define MMUDR_SZN 8
++#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN)
++#define MMUDR_SZ1M (0<<MMUDR_SZN)
++#define MMUDR_SZ4K (1<<MMUDR_SZN)
++#define MMUDR_SZ8K (2<<MMUDR_SZN)
++#define MMUDR_SZ16M (3<<MMUDR_SZN)
++#define MMUDR_CMN 6
++#define MMUDR_INC (2<<MMUDR_CMN)
++#define MMUDR_IC (0<<MMUDR_CMN)
++#define MMUDR_DWT (0<<MMUDR_CMN)
++#define MMUDR_DCB (1<<MMUDR_CMN)
++#define MMUDR_DNCP (2<<MMUDR_CMN)
++#define MMUDR_DNCIP (3<<MMUDR_CMN)
++#define MMUDR_SPN 5
++#define MMUDR_SP (1<<MMUDR_SPN)
++#define MMUDR_RN 4
++#define MMUDR_R (1<<MMUDR_RN)
++#define MMUDR_WN 3
++#define MMUDR_W (1<<MMUDR_WN)
++#define MMUDR_XN 2
++#define MMUDR_X (1<<MMUDR_XN)
++#define MMUDR_LKN 1
++#define MMUDR_LK (1<<MMUDR_LKN)
++
++
++#ifndef __ASSEMBLY__
++#define CF_PMEGS_NUM 256
++#define CF_INVALID_CONTEXT 255
++#define CF_PAGE_PGNUM_MASK (PAGE_MASK)
++
++extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb,
++ int extension_word);
++#endif /* __ASSEMBLY__*/
++
++#endif /* !__CF_MMU_H__ */
+--- a/arch/m68k/include/asm/coldfire.h
++++ b/arch/m68k/include/asm/coldfire.h
+@@ -5,6 +5,13 @@
+ *
+ * (C) Copyright 1999-2006, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000, Lineo (www.lineo.com)
++ *
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Right Reserved.
++ * Shrek Wu b16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ /****************************************************************************/
+@@ -19,27 +26,78 @@
+ * here. Also the peripheral clock (bus clock) divide ratio is set
+ * at config time too.
+ */
++/*FIXME Jason*/
++#if 0
+ #ifdef CONFIG_CLOCK_SET
+ #define MCF_CLK CONFIG_CLOCK_FREQ
+ #define MCF_BUSCLK (CONFIG_CLOCK_FREQ / CONFIG_CLOCK_DIV)
+ #else
+ #error "Don't know what your ColdFire CPU clock frequency is??"
+ #endif
++#endif
++
++
++#define MCF_CLK CONFIG_MCFCLK
++#define MCF_BUSCLK (CONFIG_MCFCLK/2)
++
++
++#if defined(CONFIG_M520x)
++#define MCF_IPSBAR 0xFC000000
++#else
++#define MCF_IPSBAR 0x40000000
++#endif
+
++#if defined(CONFIG_M5445X) || defined(CONFIG_M5441X)
++#define MCF_MBAR 0x0
++/*
++ * Even though RAMBAR1 macro should be in the 0x8xxxxxxx range,
++ * here set the CONFIG_SDRAM_BASE value to it to use
++ * SDRAM memory, not SRAM memory.
++ */
++#define MCF_RAMBAR1 (CONFIG_SDRAM_BASE)
++#elif defined(CONFIG_M547X_8X)
++#define MCF_MBAR 0xF0000000
++#define MCF_MMUBAR 0xF1000000
++#define MCF_RAMBAR0 0xF3000000
++#define MCF_RAMBAR1 0xF3001000
++#else
+ /*
+ * Define the processor support peripherals base address.
+ * This is generally setup by the boards start up code.
+ */
+ #define MCF_MBAR 0x10000000
+ #define MCF_MBAR2 0x80000000
+-#if defined(CONFIG_M54xx)
+-#define MCF_IPSBAR MCF_MBAR
+-#elif defined(CONFIG_M520x)
+-#define MCF_IPSBAR 0xFC000000
+-#else
+-#define MCF_IPSBAR 0x40000000
+ #endif
+
++#ifdef __ASSEMBLY__
++#define REG32
++#define REG16
++#define REG08
++#else /* __ASSEMBLY__ */
++#define REG32(x) ((volatile unsigned long *)(x))
++#define REG16(x) ((volatile unsigned short *)(x))
++#define REG08(x) ((volatile unsigned char *)(x))
++
++#define MCF_REG32(x) (*(volatile unsigned long *)(MCF_MBAR+(x)))
++#define MCF_REG16(x) (*(volatile unsigned short *)(MCF_MBAR+(x)))
++#define MCF_REG08(x) (*(volatile unsigned char *)(MCF_MBAR+(x)))
++
++void cacr_set(unsigned long);
++unsigned long cacr_get(void);
++
++#define coldfire_enable_irq0(irq) MCF_INTC0_CIMR = (irq);
++
++#define coldfire_enable_irq1(irq) MCF_INTC1_CIMR = (irq);
++
++#define coldfire_disable_irq0(irq) MCF_INTC0_SIMR = (irq);
++
++#define coldfire_disable_irq1(irq) MCF_INTC1_SIMR = (irq);
++
++#define getiprh() MCF_INTC0_IPRH
++
++#endif /* __ASSEMBLY__ */
++
++
+ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x)
+ #undef MCF_MBAR
+--- a/arch/m68k/include/asm/delay_mm.h
++++ b/arch/m68k/include/asm/delay_mm.h
+@@ -1,18 +1,41 @@
+-#ifndef _M68K_DELAY_H
+-#define _M68K_DELAY_H
+-
+-#include <asm/param.h>
+-
+ /*
+ * Copyright (C) 1994 Hamish Macdonald
+ *
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
++#ifndef _M68K_DELAY_H
++#define _M68K_DELAY_H
++
++#include <asm/param.h>
++
+ static inline void __delay(unsigned long loops)
+ {
++#if defined(CONFIG_COLDFIRE)
++ /* The coldfire runs this loop at significantly different speeds
++ * depending upon long word alignment or not. We'll pad it to
++ * long word alignment which is the faster version.
++ * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
++ * than using a NOP (0x4e71) instruction because it executes in one
++ * cycle not three and doesn't allow for an arbitary delay waiting
++ * for bus cycles to finish. Also fp/a6 isn't likely to cause a
++ * stall waiting for the register to become valid if such is added
++ * to the coldfire at some stage.
++ */
++ __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t"
++ "1: subql #1, %0\n\t"
++ "jcc 1b"
++ : "=d" (loops) : "0" (loops));
++#else
+ __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
+ : "=d" (loops) : "0" (loops));
++#endif
+ }
+
+ extern void __bad_udelay(void);
+@@ -26,12 +49,17 @@ extern void __bad_udelay(void);
+ */
+ static inline void __const_udelay(unsigned long xloops)
+ {
++#if defined(CONFIG_COLDFIRE)
++
++ __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ);
++#else
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1"
+ : "=d" (xloops), "=d" (tmp)
+ : "d" (xloops), "1" (loops_per_jiffy));
+ __delay(xloops * HZ);
++#endif
+ }
+
+ static inline void __udelay(unsigned long usecs)
+@@ -46,12 +74,16 @@ static inline void __udelay(unsigned lon
+ static inline unsigned long muldiv(unsigned long a, unsigned long b,
+ unsigned long c)
+ {
++#if defined(CONFIG_COLDFIRE)
++ return (long)(((unsigned long long)a * b)/c);
++#else
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
+ : "=d" (tmp), "=d" (a)
+ : "d" (b), "d" (c), "1" (a));
+ return a;
++#endif
+ }
+
+ #endif /* defined(_M68K_DELAY_H) */
+--- a/arch/m68k/include/asm/div64.h
++++ b/arch/m68k/include/asm/div64.h
+@@ -1,12 +1,17 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_DIV64_H
+ #define _M68K_DIV64_H
+
+-#ifdef CONFIG_MMU
+-
++#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+ #include <linux/types.h>
+
+ /* n = n / base; return rem; */
+-
+ #define do_div(n, base) ({ \
+ union { \
+ unsigned long n32[2]; \
+--- a/arch/m68k/include/asm/dma.h
++++ b/arch/m68k/include/asm/dma.h
+@@ -1,7 +1,10 @@
+-#ifndef _M68K_DMA_H
+-#define _M68K_DMA_H 1
+-
+-#ifdef CONFIG_COLDFIRE
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ /*
+ * ColdFire DMA Model:
+ * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
+@@ -25,6 +28,11 @@
+ * Arthur Shipkowski (art@videon-central.com)
+ */
+
++#ifndef _M68K_DMA_H
++#define _M68K_DMA_H 1
++
++#ifdef CONFIG_COLDFIRE
++
+ #include <asm/coldfire.h>
+ #include <asm/mcfsim.h>
+ #include <asm/mcfdma.h>
+@@ -479,13 +487,106 @@ static __inline__ int get_dma_residue(un
+
+ /* it's useless on the m68k, but unfortunately needed by the new
+ bootmem allocator (but this should do it for this) */
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#define MAX_DMA_ADDRESS 0xefffffff
++#elif defined(CONFIG_M5441X)
++#define MAX_DMA_ADDRESS 0xdfffffff
++#else
+ #define MAX_DMA_ADDRESS PAGE_OFFSET
++#endif
+
++#ifndef CONFIG_COLDFIRE
+ #define MAX_DMA_CHANNELS 8
+
+ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+ extern void free_dma(unsigned int dmanr); /* release it again */
+
++#else /* not (defined(CONFIG_MCF5474) || defined(CONFIG_MCF5484)
++ || defined(CONFIG_MCF5475) || defined(CONFIG_MCF5485)) */
++/************************************************
++ * Multichannel DMA definitions *
++ ************************************************/
++#ifdef CONFIG_MCD_DMA
++#include <asm/MCD_dma.h>
++#include <asm/m5485dma.h>
++
++struct scatterlist;
++
++#define MAX_DMA_CHANNELS NCHANNELS
++/*
++ * identifiers for each initiator/requestor
++ */
++#define DMA_ALWAYS (0)
++#define DMA_DSPI_RX (1)
++#define DMA_DSPI_TX (2)
++#define DMA_DREQ0 (3)
++#define DMA_PSC0_RX (4)
++#define DMA_PSC0_TX (5)
++#define DMA_USBEP0 (6)
++#define DMA_USBEP1 (7)
++#define DMA_USBEP2 (8)
++#define DMA_USBEP3 (9)
++#define DMA_PCI_TX (10)
++#define DMA_PCI_RX (11)
++#define DMA_PSC1_RX (12)
++#define DMA_PSC1_TX (13)
++#define DMA_I2C_RX (14)
++#define DMA_I2C_TX (15)
++#define DMA_FEC0_RX (16)
++#define DMA_FEC0_TX (17)
++#define DMA_FEC1_RX (18)
++#define DMA_FEC1_TX (19)
++#define DMA_DREQ1 (20)
++#define DMA_CTM0 (21)
++#define DMA_CTM1 (22)
++#define DMA_CTM2 (23)
++#define DMA_CTM3 (24)
++#define DMA_CTM4 (25)
++#define DMA_CTM5 (26)
++#define DMA_CTM6 (27)
++#define DMA_CTM7 (28)
++#define DMA_USBEP4 (29)
++#define DMA_USBEP5 (30)
++#define DMA_USBEP6 (31)
++#define DMA_PSC2_RX (32)
++#define DMA_PSC2_TX (33)
++#define DMA_PSC3_RX (34)
++#define DMA_PSC3_TX (35)
++#define DMA_FEC_RX(x) ((x == 0) ? DMA_FEC0_RX : DMA_FEC1_RX)
++#define DMA_FEC_TX(x) ((x == 0) ? DMA_FEC0_TX : DMA_FEC1_TX)
++
++int dma_set_initiator(int);
++unsigned int dma_get_initiator(int);
++void dma_remove_initiator(int);
++int dma_set_channel(int);
++int dma_get_channel(int);
++void dma_remove_channel(int);
++int dma_set_channel_fec(int requestor);
++int dma_connect(int channel, int address);
++int dma_disconnect(int channel);
++void dma_remove_channel_by_number(int channel);
++int dma_init(void);
++#endif
++
++extern spinlock_t dma_spin_lock;
++
++static inline unsigned long claim_dma_lock(void)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&dma_spin_lock, flags);
++ return flags;
++}
++
++static inline void release_dma_lock(unsigned long flags)
++{
++ spin_unlock_irqrestore(&dma_spin_lock, flags);
++}
++#endif
++
++#ifdef CONFIG_PCI
++extern int isa_dma_bridge_buggy;
++#else
+ #define isa_dma_bridge_buggy (0)
++#endif
+
+ #endif /* _M68K_DMA_H */
+--- a/arch/m68k/include/asm/elf.h
++++ b/arch/m68k/include/asm/elf.h
+@@ -1,10 +1,17 @@
+-#ifndef __ASMm68k_ELF_H
+-#define __ASMm68k_ELF_H
+-
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ /*
+ * ELF register definitions..
+ */
+
++#ifndef __ASMm68k_ELF_H
++#define __ASMm68k_ELF_H
++
+ #include <asm/ptrace.h>
+ #include <asm/user.h>
+
+@@ -34,6 +41,26 @@
+ #define R_68K_GLOB_DAT 20
+ #define R_68K_JMP_SLOT 21
+ #define R_68K_RELATIVE 22
++/* TLS static relocations */
++#define R_68K_TLS_GD32 25
++#define R_68K_TLS_GD16 26
++#define R_68K_TLS_GD8 27
++#define R_68K_TLS_LDM32 28
++#define R_68K_TLS_LDM16 29
++#define R_68K_TLS_LDM8 30
++#define R_68K_TLS_LDO32 31
++#define R_68K_TLS_LDO16 32
++#define R_68K_TLS_LDO8 33
++#define R_68K_TLS_IE32 34
++#define R_68K_TLS_IE16 35
++#define R_68K_TLS_IE8 36
++#define R_68K_TLS_LE32 37
++#define R_68K_TLS_LE16 38
++#define R_68K_TLS_LE8 39
++/* TLS dynamic relocations */
++#define R_68K_TLS_DTPMOD32 40
++#define R_68K_TLS_DTPREL32 41
++#define R_68K_TLS_TPREL32 42
+
+ typedef unsigned long elf_greg_t;
+
+@@ -59,7 +86,7 @@ typedef struct user_m68kfp_struct elf_fp
+ is actually used on ASV. */
+ #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define ELF_EXEC_PAGESIZE 4096
+ #else
+ #define ELF_EXEC_PAGESIZE 8192
+@@ -70,8 +97,10 @@ typedef struct user_m68kfp_struct elf_fp
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define ELF_ET_DYN_BASE 0xD0000000UL
++#elif defined(CONFIG_COLDFIRE)
++#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x10000000)
+ #else
+ #define ELF_ET_DYN_BASE 0x0D800000UL
+ #endif
+@@ -115,4 +144,35 @@ typedef struct user_m68kfp_struct elf_fp
+
+ #define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
++/*
++ * VDSO
++ */
++#ifdef CONFIG_VDSO
++extern unsigned int vdso_enabled;
++
++#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
++
++#define VDSO_AUX_ENT \
++ if (vdso_enabled) \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
++
++/* additional pages */
++#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
++
++struct linux_binprm;
++extern int arch_setup_additional_pages(struct linux_binprm *bprm,
++ int executable_stack);
++
++#else
++/* no VDSO_AUX_ENT */
++#define VDSO_AUX_ENT
++#endif
++
++#define ARCH_DLINFO \
++do { \
++ /* vdso entry */ \
++ VDSO_AUX_ENT; \
++} while (0);
++
+ #endif
+--- a/arch/m68k/include/asm/io_mm.h
++++ b/arch/m68k/include/asm/io_mm.h
+@@ -1,23 +1,36 @@
+ /*
+ * linux/include/asm-m68k/io.h
+ *
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
+ * 4/1/00 RZ: - rewritten to avoid clashes between ISA/PCI and other
+ * IO access
+ * - added Q40 support
+ * - added skeleton for GG-II and Amiga PCMCIA
+ * 2/3/01 RZ: - moved a few more defs into raw_io.h
+ *
+- * inX/outX should not be used by any driver unless it does
+- * ISA access. Other drivers should use function defined in raw_io.h
++ * inX/outX/readX/writeX should not be used by any driver unless it does
++ * ISA or PCI access. Other drivers should use function defined in raw_io.h
+ * or define its own macros on top of these.
+ *
+- * inX(),outX() are for ISA I/O
++ * inX(),outX() are for PCI and ISA I/O
++ * readX(),writeX() are for PCI memory
+ * isa_readX(),isa_writeX() are for ISA memory
++ *
++ * moved mem{cpy,set}_*io inside CONFIG_PCI
+ */
+
+ #ifndef _IO_H
+ #define _IO_H
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_io.h>
++#else
++
+ #ifdef __KERNEL__
+
+ #include <linux/compiler.h>
+@@ -49,6 +62,27 @@
+ #define MULTI_ISA 0
+ #endif /* Q40 */
+
++/* GG-II Zorro to ISA bridge */
++#ifdef CONFIG_GG2
++
++extern unsigned long gg2_isa_base;
++#define GG2_ISA_IO_B(ioaddr) \
++ (gg2_isa_base + 1 + ((unsigned long)(ioaddr) * 4))
++#define GG2_ISA_IO_W(ioaddr) \
++ (gg2_isa_base + ((unsigned long)(ioaddr) * 4))
++#define GG2_ISA_MEM_B(madr) \
++ (gg2_isa_base + 1 + (((unsigned long)(madr) * 4) & 0xfffff))
++#define GG2_ISA_MEM_W(madr) \
++ (gg2_isa_base + (((unsigned long)(madr) * 4) & 0xfffff))
++
++#ifndef MULTI_ISA
++#define MULTI_ISA 0
++#else
++#undef MULTI_ISA
++#define MULTI_ISA 1
++#endif
++#endif /* GG2 */
++
+ #ifdef CONFIG_AMIGA_PCMCIA
+ #include <asm/amigayle.h>
+
+@@ -71,17 +105,22 @@
+ #undef MULTI_ISA
+ #endif
+
+-#define ISA_TYPE_Q40 (1)
+-#define ISA_TYPE_AG (2)
++#define Q40_ISA (1)
++#define GG2_ISA (2)
++#define AG_ISA (3)
+
+ #if defined(CONFIG_Q40) && !defined(MULTI_ISA)
+-#define ISA_TYPE ISA_TYPE_Q40
++#define ISA_TYPE Q40_ISA
+ #define ISA_SEX 0
+ #endif
+ #if defined(CONFIG_AMIGA_PCMCIA) && !defined(MULTI_ISA)
+-#define ISA_TYPE ISA_TYPE_AG
++#define ISA_TYPE AG_ISA
+ #define ISA_SEX 1
+ #endif
++#if defined(CONFIG_GG2) && !defined(MULTI_ISA)
++#define ISA_TYPE GG2_ISA
++#define ISA_SEX 0
++#endif
+
+ #ifdef MULTI_ISA
+ extern int isa_type;
+@@ -98,65 +137,72 @@ extern int isa_sex;
+
+ static inline u8 __iomem *isa_itb(unsigned long addr)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_IO_B(addr);
++ case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
++#endif
++#ifdef CONFIG_GG2
++ case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr);
++ case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
+ #endif
+- default: return NULL; /* avoid warnings, just in case */
+- }
++ default: return NULL; /* avoid warnings, just in case */
++ }
+ }
+ static inline u16 __iomem *isa_itw(unsigned long addr)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_IO_W(addr);
++ case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
++#endif
++#ifdef CONFIG_GG2
++ case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr);
++ case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
+ #endif
+- default: return NULL; /* avoid warnings, just in case */
+- }
++ default: return NULL; /* avoid warnings, just in case */
++ }
+ }
+ static inline u32 __iomem *isa_itl(unsigned long addr)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u32 __iomem *)AG_ISA_IO_W(addr);
++ case AG_ISA: return (u32 __iomem *)AG_ISA_IO_W(addr);
+ #endif
+- default: return 0; /* avoid warnings, just in case */
+- }
++ default: return 0; /* avoid warnings, just in case */
++ }
+ }
+ static inline u8 __iomem *isa_mtb(unsigned long addr)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
++ case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
++#endif
++#ifdef CONFIG_GG2
++ case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u8 __iomem *)addr;
++ case AG_ISA: return (u8 __iomem *)addr;
+ #endif
+- default: return NULL; /* avoid warnings, just in case */
+- }
++ default: return NULL; /* avoid warnings, just in case */
++ }
+ }
+ static inline u16 __iomem *isa_mtw(unsigned long addr)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
++ case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
++#endif
++#ifdef CONFIG_GG2
++ case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u16 __iomem *)addr;
++ case AG_ISA: return (u16 __iomem *)addr;
+ #endif
+- default: return NULL; /* avoid warnings, just in case */
+- }
++ default: return NULL; /* avoid warnings, just in case */
++ }
+ }
+
+
+@@ -167,27 +213,30 @@ static inline u16 __iomem *isa_mtw(unsig
+ #define isa_outw(val,port) (ISA_SEX ? out_be16(isa_itw(port),(val)) : out_le16(isa_itw(port),(val)))
+ #define isa_outl(val,port) (ISA_SEX ? out_be32(isa_itl(port),(val)) : out_le32(isa_itl(port),(val)))
+
+-#define isa_readb(p) in_8(isa_mtb((unsigned long)(p)))
+-#define isa_readw(p) \
+- (ISA_SEX ? in_be16(isa_mtw((unsigned long)(p))) \
+- : in_le16(isa_mtw((unsigned long)(p))))
+-#define isa_writeb(val,p) out_8(isa_mtb((unsigned long)(p)),(val))
+-#define isa_writew(val,p) \
+- (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \
+- : out_le16(isa_mtw((unsigned long)(p)),(val)))
+-
++#define isa_readb(p) in_8(isa_mtb(p))
++#define isa_readw(p) (ISA_SEX ? in_be16(isa_mtw(p)) : in_le16(isa_mtw(p)))
++#define isa_writeb(val, p) out_8(isa_mtb(p), (val))
++#define isa_writew(val, p) \
++ (ISA_SEX ? out_be16(isa_mtw(p), (val)) : out_le16(isa_mtw(p), (val)))
+ static inline void isa_delay(void)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: isa_outb(0,0x80); break;
++ case Q40_ISA:
++ isa_outb(0, 0x80);
++ break;
++#endif
++#ifdef CONFIG_GG2
++ case GG2_ISA:
++ break;
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: break;
++ case AG_ISA:
++ break;
+ #endif
+- default: break; /* avoid warnings */
+- }
++ default:
++ break; /* avoid warnings */
++ }
+ }
+
+ #define isa_inb_p(p) ({u8 v=isa_inb(p);isa_delay();v;})
+@@ -216,7 +265,10 @@ static inline void isa_delay(void)
+ (ISA_SEX ? raw_outsl(isa_itl(port), (u32 *)(buf), (nr)) : \
+ raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1))
+
++#endif /* CONFIG_ISA */
+
++
++#if defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+ #define inb isa_inb
+ #define inb_p isa_inb_p
+ #define outb isa_outb
+@@ -239,9 +291,80 @@ static inline void isa_delay(void)
+ #define readw isa_readw
+ #define writeb isa_writeb
+ #define writew isa_writew
++#endif /* CONFIG_ISA */
++
++#if defined(CONFIG_PCI)
++
++#define readl(addr) in_le32(addr)
++#define writel(val, addr) out_le32((addr), (val))
++
++/* those can be defined for both ISA and PCI - it won't work though */
++#define readb(addr) in_8(addr)
++#define readw(addr) in_le16(addr)
++#define writeb(val, addr) out_8((addr), (val))
++#define writew(val, addr) out_le16((addr), (val))
++
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++
++#ifndef CONFIG_ISA
++#define inb(port) in_8(port)
++#define outb(val, port) out_8((port), (val))
++#define inw(port) in_le16(port)
++#define outw(val, port) out_le16((port), (val))
++#define inl(port) in_le32(port)
++#define outl(val, port) out_le32((port), (val))
++#define insb(port, buf, nr) \
++ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
++#define outsb(port, buf, nr) \
++ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
++#define insw(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define outsw(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define insl(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++#define outsl(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
+
+-#else /* CONFIG_ISA */
++#else
++/*
++ * kernel with both ISA and PCI compiled in, those have
++ * conflicting defs for in/out. Simply consider port < 1024
++ * ISA and everything else PCI. read,write not defined
++ * in this case
++ */
++#define inb(port) ((port) < 1024 ? isa_inb(port) : in_8(port))
++#define inb_p(port) ((port) < 1024 ? isa_inb_p(port) : in_8(port))
++#define inw(port) ((port) < 1024 ? isa_inw(port) : in_le16(port))
++#define inw_p(port) ((port) < 1024 ? isa_inw_p(port) : in_le16(port))
++#define inl(port) ((port) < 1024 ? isa_inl(port) : in_le32(port))
++#define inl_p(port) ((port) < 1024 ? isa_inl_p(port) : in_le32(port))
++
++#define outb(val, port) (((port) < 1024) ? isa_outb((val), (port))
++ : out_8((port), (val)))
++#define outb_p(val, port) (((port) < 1024) ? isa_outb_p((val), (port))
++ : out_8((port), (val)))
++#define outw(val, port) (((port) < 1024) ? isa_outw((val), (port))
++ : out_le16((port), (val)))
++#define outw_p(val, port) (((port) < 1024) ? isa_outw_p((val), (port))
++ : out_le16((port), (val)))
++#define outl(val, port) (((port) < 1024) ? isa_outl((val), (port))
++ : out_le32((port), (val)))
++#define outl_p(val, port) (((port) < 1024) ? isa_outl_p((val), (port))
++ : out_le32((port), (val)))
++#endif
++#endif /* CONFIG_PCI */
+
++#if !defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+ /*
+ * We need to define dummy functions for GENERIC_IOMAP support.
+ */
+@@ -272,11 +395,11 @@ static inline void isa_delay(void)
+ #define writeb(val,addr) out_8((addr),(val))
+ #define readw(addr) in_le16(addr)
+ #define writew(val,addr) out_le16((addr),(val))
+-
+-#endif /* CONFIG_ISA */
+-
++#endif
++#if !defined(CONFIG_PCI)
+ #define readl(addr) in_le32(addr)
+ #define writel(val,addr) out_le32((addr),(val))
++#endif
+
+ #define mmiowb()
+
+@@ -333,4 +456,5 @@ static inline void memcpy_toio(volatile
+ */
+ #define xlate_dev_kmem_ptr(p) p
+
++#endif /* CONFIG_COLDFIRE */
+ #endif /* _IO_H */
+--- a/arch/m68k/include/asm/irq.h
++++ b/arch/m68k/include/asm/irq.h
+@@ -1,14 +1,20 @@
+-#ifndef _M68K_IRQ_H_
+-#define _M68K_IRQ_H_
+-
+ /*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
+ * This should be the same as the max(NUM_X_SOURCES) for all the
+ * different m68k hosts compiled into the kernel.
+ * Currently the Atari has 72 and the Amiga 24, but if both are
+ * supported in the kernel it is better to make room for 72.
+ */
++#ifndef _M68K_IRQ_H_
++#define _M68K_IRQ_H_
+ #if defined(CONFIG_COLDFIRE)
+-#define NR_IRQS 256
++#define SYS_IRQS 256
++#define NR_IRQS SYS_IRQS
+ #elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
+ #define NR_IRQS 200
+ #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
+--- a/arch/m68k/include/asm/machdep.h
++++ b/arch/m68k/include/asm/machdep.h
+@@ -1,6 +1,12 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_MACHDEP_H
+ #define _M68K_MACHDEP_H
+-
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+
+@@ -42,4 +48,11 @@ extern irqreturn_t arch_timer_interrupt(
+ extern void config_BSP(char *command, int len);
+ extern void do_IRQ(int irq, struct pt_regs *fp);
+
++#ifdef CONFIG_COLDFIRE
++extern void __init config_coldfire(void);
++extern void __init mmu_context_init(void);
++extern irq_handler_t mach_default_handler;
++extern void (*mach_tick)(void);
++#endif
++
+ #endif /* _M68K_MACHDEP_H */
+--- /dev/null
++++ b/arch/m68k/include/asm/mcfdspi.h
+@@ -0,0 +1,59 @@
++/*
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Author: Andrey Butok
++ *
++ * This file is based on mcfqspi.h
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ ***************************************************************************
++ * Changes:
++ * v0.001 25 March 2008 Andrey Butok
++ * Initial Release - developed on uClinux with 2.6.23 kernel.
++ *
++ */
++
++#ifndef MCFDSPI_H_
++#define MCFDSPI_H_
++
++struct coldfire_dspi_chip {
++ u8 mode;
++ u8 bits_per_word;
++ u16 void_write_data;
++ /* Only used in master mode */
++ u8 dbr; /* Double baud rate */
++ u8 pbr; /* Baud rate prescaler */
++ u8 br; /* Baud rate scaler */
++ u8 pcssck; /* PCS to SCK delay prescaler */
++ u8 pasc; /* After SCK delay prescaler */
++ u8 pdt; /* Delay after transfer prescaler */
++ u8 cssck; /* PCS to SCK delay scaler */
++ u8 asc; /* After SCK delay scaler */
++ u8 dt; /* Delay after transfer scaler */
++};
++
++struct coldfire_spi_master {
++ u16 bus_num;
++ u16 num_chipselect;
++ u8 irq_source;
++ u32 irq_vector;
++ u32 irq_mask;
++ u8 irq_lp;
++ u8 par_val;
++ u16 par_val16;
++ u32 *irq_list;
++ void (*cs_control)(u8 cs, u8 command);
++};
++#endif /*MCFDSPI_H_*/
+--- a/arch/m68k/include/asm/mcfsim.h
++++ b/arch/m68k/include/asm/mcfsim.h
+@@ -5,6 +5,12 @@
+ *
+ * (C) Copyright 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000, Lineo Inc. (www.lineo.com)
++ *
++ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ /****************************************************************************/
+@@ -45,5 +51,120 @@
+ #include <asm/m54xxsim.h>
+ #endif
+
++#if defined(CONFIG_COLDFIRE)
++#include <asm/coldfire.h>
++#endif
++
++#if defined(CONFIG_M5445X)
++#include <asm/mcf5445x_intc.h>
++#include <asm/mcf5445x_gpio.h>
++#include <asm/mcf5445x_ccm.h>
++#include <asm/mcf5445x_eport.h>
++#include <asm/mcf5445x_fbcs.h>
++#include <asm/mcf5445x_xbs.h>
++#include <asm/mcf5445x_dtim.h>
++#include <asm/mcf5445x_rtc.h>
++#include <asm/mcf5445x_scm.h>
++#elif defined(CONFIG_M547X_8X)
++#include <asm/m5485sim.h>
++#include <asm/m5485gpio.h>
++#include <asm/m5485gpt.h>
++#elif defined(CONFIG_M5441X)
++#include <asm/mcf5441x_intc.h>
++#include <asm/mcf5441x_gpio.h>
++#include <asm/mcf5441x_ccm.h>
++#include <asm/mcf5441x_eport.h>
++#include <asm/mcf5441x_fbcs.h>
++#include <asm/mcf5441x_xbs.h>
++#include <asm/mcf5441x_dtim.h>
++#include <asm/mcf5441x_rtc.h>
++#include <asm/mcf5441x_scm.h>
++#include <asm/mcf5441x_pm.h>
++#include <asm/mcf5441x_flexcan.h>
++#include <asm/mcf5441x_clock.h>
++#endif
++
++/*
++ * Define the base address of the SIM within the MBAR address space.
++ */
++#define MCFSIM_BASE 0x0 /* Base address of SIM */
++
++
++/*
++ * Bit definitions for the ICR family of registers.
++ */
++#define MCFSIM_ICR_AUTOVEC 0x80 /* Auto-vectored intr */
++#define MCFSIM_ICR_LEVEL0 0x00 /* Level 0 intr */
++#define MCFSIM_ICR_LEVEL1 0x04 /* Level 1 intr */
++#define MCFSIM_ICR_LEVEL2 0x08 /* Level 2 intr */
++#define MCFSIM_ICR_LEVEL3 0x0c /* Level 3 intr */
++#define MCFSIM_ICR_LEVEL4 0x10 /* Level 4 intr */
++#define MCFSIM_ICR_LEVEL5 0x14 /* Level 5 intr */
++#define MCFSIM_ICR_LEVEL6 0x18 /* Level 6 intr */
++#define MCFSIM_ICR_LEVEL7 0x1c /* Level 7 intr */
++
++#define MCFSIM_ICR_PRI0 0x00 /* Priority 0 intr */
++#define MCFSIM_ICR_PRI1 0x01 /* Priority 1 intr */
++#define MCFSIM_ICR_PRI2 0x02 /* Priority 2 intr */
++#define MCFSIM_ICR_PRI3 0x03 /* Priority 3 intr */
++
++/*
++ * Bit definitions for the Interrupt Mask register (IMR).
++ */
++#define MCFSIM_IMR_EINT1 0x0002 /* External intr # 1 */
++#define MCFSIM_IMR_EINT2 0x0004 /* External intr # 2 */
++#define MCFSIM_IMR_EINT3 0x0008 /* External intr # 3 */
++#define MCFSIM_IMR_EINT4 0x0010 /* External intr # 4 */
++#define MCFSIM_IMR_EINT5 0x0020 /* External intr # 5 */
++#define MCFSIM_IMR_EINT6 0x0040 /* External intr # 6 */
++#define MCFSIM_IMR_EINT7 0x0080 /* External intr # 7 */
++
++#define MCFSIM_IMR_SWD 0x0100 /* Software Watchdog intr */
++#define MCFSIM_IMR_TIMER1 0x0200 /* TIMER 1 intr */
++#define MCFSIM_IMR_TIMER2 0x0400 /* TIMER 2 intr */
++#define MCFSIM_IMR_MBUS 0x0800 /* MBUS intr */
++#define MCFSIM_IMR_UART1 0x1000 /* UART 1 intr */
++#define MCFSIM_IMR_UART2 0x2000 /* UART 2 intr */
++
++#if defined(CONFIG_M5206e)
++#define MCFSIM_IMR_DMA1 0x4000 /* DMA 1 intr */
++#define MCFSIM_IMR_DMA2 0x8000 /* DMA 2 intr */
++#elif defined(CONFIG_M5249) || defined(CONFIG_M5307)
++#define MCFSIM_IMR_DMA0 0x4000 /* DMA 0 intr */
++#define MCFSIM_IMR_DMA1 0x8000 /* DMA 1 intr */
++#define MCFSIM_IMR_DMA2 0x10000 /* DMA 2 intr */
++#define MCFSIM_IMR_DMA3 0x20000 /* DMA 3 intr */
++#endif
++
++/*
++ * Mask for all of the SIM devices. Some parts have more or less
++ * SIM devices. This is a catchall for the sandard set.
++ */
++#ifndef MCFSIM_IMR_MASKALL
++#define MCFSIM_IMR_MASKALL 0x3ffe /* All intr sources */
++#endif
++
++
++/*
++ * PIT interrupt settings, if not found in mXXXXsim.h file.
++ */
++#ifndef ICR_INTRCONF
++#define ICR_INTRCONF 0x2b /* PIT1 level 5, priority 3 */
++#endif
++#ifndef MCFPIT_IMR
++#define MCFPIT_IMR MCFINTC_IMRH
++#endif
++#ifndef MCFPIT_IMR_IBIT
++#define MCFPIT_IMR_IBIT (1 << (MCFINT_PIT1 - 32))
++#endif
++
++
++#ifndef __ASSEMBLY__
++/*
++ * Definition for the interrupt auto-vectoring support.
++ */
++extern void mcf_autovector(unsigned int vec);
++#endif /* __ASSEMBLY__ */
++
+ /****************************************************************************/
+ #endif /* mcfsim_h */
+--- a/arch/m68k/include/asm/mcfuart.h
++++ b/arch/m68k/include/asm/mcfuart.h
+@@ -5,6 +5,11 @@
+ *
+ * (C) Copyright 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000, Lineo Inc. (www.lineo.com)
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ /****************************************************************************/
+@@ -12,6 +17,59 @@
+ #define mcfuart_h
+ /****************************************************************************/
+
++#if defined(CONFIG_M5445X)
++#include <asm/mcf5445x_intc.h>
++#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
++#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
++#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
++#define MCFINT_VECBASE 64
++#define MCFINT_UART0 26
++#define MCFINT_UART1 27
++#elif defined(CONFIG_M547X_8X)
++#define MCFUART_BASE1 0x8600 /* Base address of UART1 */
++#define MCFUART_BASE2 0x8700 /* Base address of UART2 */
++#define MCFUART_BASE3 0x8800 /* Base address of UART3 */
++#define MCFUART_BASE4 0x8900 /* Base address of UART4 */
++#define MCFINT_VECBASE 64
++#define MCFINT_UART0 35
++#define MCFINT_UART1 34
++#define MCFINT_UART2 33
++#define MCFINT_UART3 32
++#elif defined(CONFIG_M5441X)
++#define MCFUART_BASE0 0xfc060000 /* Base address of UART1 */
++#define MCFUART_BASE1 0xfc064000 /* Base address of UART2 */
++#define MCFUART_BASE2 0xfc068000 /* Base address of UART3 */
++#define MCFUART_BASE3 0xfc06C000 /* Base address of UART4 */
++#define MCFUART_BASE4 0xec060000
++#define MCFUART_BASE5 0xec064000
++#define MCFUART_BASE6 0xec068000
++#define MCFUART_BASE7 0xec06C000
++#define MCFUART_BASE8 0xec070000
++#define MCFUART_BASE9 0xec074000
++
++#define MCFINT0_VECBASE 64
++#define MCFINT1_VECBASE (64 + 64)
++#define MCFINT_UART0 26
++#define MCFINT_UART1 27
++#define MCFINT_UART2 28
++#define MCFINT_UART3 29
++#define MCFINT_UART4 48
++#define MCFINT_UART5 49
++#define MCFINT_UART6 50
++#define MCFINT_UART7 51
++#define MCFINT_UART8 52
++#define MCFINT_UART9 53
++#endif
++
++#if defined(CONFIG_M5441X)
++#define MAX_PORT_NUM 10
++#elif defined(CONFIG_M547X_8X)
++#define MAX_PORT_NUM 4
++#else
++#define MAX_PORT_NUM 3
++#endif
++
++#ifndef __ASSEMBLY__
+ #include <linux/serial_core.h>
+ #include <linux/platform_device.h>
+
+@@ -21,6 +79,7 @@ struct mcf_platform_uart {
+ unsigned int irq; /* Interrupt vector */
+ unsigned int uartclk; /* UART clock rate */
+ };
++#endif
+
+ /*
+ * Define the ColdFire UART register set addresses.
+@@ -94,6 +153,11 @@ struct mcf_platform_uart {
+ #define MCFUART_USR_RXFULL 0x02 /* Receiver full */
+ #define MCFUART_USR_RXREADY 0x01 /* Receiver ready */
+
++#if defined(CONFIG_M547X_8X)
++#define MCFUART_USR_TXREADY_BN 0x0a
++#define MCFUART_USR_TXEMPTY_BN 0x0b
++#endif
++
+ #define MCFUART_USR_RXERR (MCFUART_USR_RXBREAK | MCFUART_USR_RXFRAMING | \
+ MCFUART_USR_RXPARITY | MCFUART_USR_RXOVERRUN)
+
+--- a/arch/m68k/include/asm/mmu.h
++++ b/arch/m68k/include/asm/mmu.h
+@@ -1,9 +1,22 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef __MMU_H
+ #define __MMU_H
+-
+ #ifdef CONFIG_MMU
++#ifdef CONFIG_VDSO
++typedef struct {
++ unsigned long id;
++ void *vdso;
++} mm_context_t;
++#else
+ /* Default "unsigned long" context */
+ typedef unsigned long mm_context_t;
++#endif
+ #else
+ typedef struct {
+ unsigned long end_brk;
+--- a/arch/m68k/include/asm/mmu_context.h
++++ b/arch/m68k/include/asm/mmu_context.h
+@@ -1,14 +1,21 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef __M68K_MMU_CONTEXT_H
+ #define __M68K_MMU_CONTEXT_H
+-
+ #include <asm-generic/mm_hooks.h>
++#include <asm-generic/pgtable.h>
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
+ }
+
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+
+ #include <asm/setup.h>
+ #include <asm/page.h>
+@@ -103,7 +110,7 @@ static inline void activate_mm(struct mm
+ switch_mm_0460(next_mm);
+ }
+
+-#else /* CONFIG_SUN3 */
++#elif defined(CONFIG_SUN3)
+ #include <asm/sun3mmu.h>
+ #include <linux/sched.h>
+
+@@ -151,7 +158,178 @@ static inline void activate_mm(struct mm
+ activate_context(next_mm);
+ }
+
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/coldfire.h>
++#include <asm/atomic.h>
++#include <asm/bitops.h>
++#include <asm/mmu.h>
++
++#define NO_CONTEXT 256
++#define LAST_CONTEXT 255
++#define FIRST_CONTEXT 1
++
++#ifdef CONFIG_VDSO
++#define cpu_context(mm) ((mm)->context.id)
++#else
++#define cpu_context(mm) ((mm)->context)
++#endif
++
++#ifdef CONFIG_VDSO
++extern void set_context(unsigned long context, pgd_t *pgd);
++#else
++extern void set_context(mm_context_t context, pgd_t *pgd);
+ #endif
++extern unsigned long context_map[];
++#ifdef CONFIG_VDSO
++extern unsigned long next_mmu_context;
++#else
++extern mm_context_t next_mmu_context;
++#endif
++
++extern atomic_t nr_free_contexts;
++extern struct mm_struct *context_mm[LAST_CONTEXT+1];
++extern void steal_context(void);
++
++static inline void get_mmu_context(struct mm_struct *mm)
++{
++#ifdef CONFIG_VDSO
++ unsigned long ctx;
++#else
++ mm_context_t ctx;
++#endif
++
++ if (cpu_context(mm) != NO_CONTEXT)
++ return;
++ while (atomic_dec_and_test_lt(&nr_free_contexts)) {
++ atomic_inc(&nr_free_contexts);
++ steal_context();
++ }
++ ctx = next_mmu_context;
++ while (test_and_set_bit(ctx, context_map)) {
++ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
++ if (ctx > LAST_CONTEXT)
++ ctx = 0;
++ }
++ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
++ cpu_context(mm) = ctx;
++ context_mm[ctx] = mm;
++}
++
++/*
++ * Set up the context for a new address space.
++ */
++#define init_new_context(tsk, mm) ((cpu_context(mm) = NO_CONTEXT), 0)
++/* #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) */
++
++/*
++ * We're finished using the context for an address space.
++ */
++static inline void destroy_context(struct mm_struct *mm)
++{
++ if (cpu_context(mm) != NO_CONTEXT) {
++ clear_bit(cpu_context(mm), context_map);
++ cpu_context(mm) = NO_CONTEXT;
++ atomic_inc(&nr_free_contexts);
++ }
++}
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ get_mmu_context(tsk->mm);
++ set_context(cpu_context(tsk->mm), next->pgd);
++}
++
++/*
++ * After we have set current->mm to a new value, this activates
++ * the context for the new mm so we see the new mappings.
++ */
++static inline void activate_mm(struct mm_struct *active_mm,
++ struct mm_struct *mm)
++{
++ get_mmu_context(mm);
++ set_context(cpu_context(mm), mm->pgd);
++}
++
++#define deactivate_mm(tsk, mm) do { } while (0)
++
++extern void mmu_context_init(void);
++#if defined(CONFIG_M547X_8X)
++#define prepare_arch_switch(next) load_ksp_mmu(next)
++
++static inline void load_ksp_mmu(struct task_struct *task)
++{
++ unsigned long flags;
++ struct mm_struct *mm;
++ int asid;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long mmuar;
++
++ local_irq_save(flags);
++ mmuar = task->thread.ksp;
++
++ /* Search for a valid TLB entry, if one is found, don't remap */
++ *MMUAR = mmuar;
++ *MMUOR = MMUOR_STLB | MMUOR_ADR;
++ if ((*MMUSR) & MMUSR_HIT)
++ goto end;
++
++ if (mmuar >= PAGE_OFFSET) {
++ mm = &init_mm;
++ } else {
++ printk(KERN_INFO "load_ksp_mmu: non-kernel"
++ " mm found: 0x%08x\n",
++ (unsigned int) task->mm);
++ mm = task->mm;
++ }
++
++ if (!mm)
++ goto bug;
++
++ pgd = pgd_offset(mm, mmuar);
++ if (pgd_none(*pgd))
++ goto bug;
++
++ pmd = pmd_offset(pgd, mmuar);
++ if (pmd_none(*pmd))
++ goto bug;
++
++ pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
++ : pte_offset_map(pmd, mmuar);
++ if (pte_none(*pte) || !pte_present(*pte))
++ goto bug;
++
++ set_pte(pte, pte_mkyoung(*pte));
++ asid = cpu_context(mm) & 0xff;
++ if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
++ set_pte(pte, pte_wrprotect(*pte));
++
++ *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
++ | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
++ >> CF_PAGE_MMUTR_SHIFT)
++ | MMUTR_V;
++
++ *MMUDR = (pte_val(*pte) & PAGE_MASK)
++ | ((pte->pte) & CF_PAGE_MMUDR_MASK)
++ | MMUDR_SZ8K | MMUDR_X;
++
++ *MMUOR = MMUOR_ACC | MMUOR_UAA;
++ asm ("nop");
++
++ goto end;
++
++bug:
++ printk(KERN_ERR "ksp load failed: mm=0x%08x ksp=0x%08x\n",
++ (unsigned int) mm, (unsigned int) mmuar);
++end:
++ local_irq_restore(flags);
++}
++#endif /* CONFIG_M547X_8X */
++#endif /* CONFIG_COLDFIRE */
++
+ #else /* !CONFIG_MMU */
+
+ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+--- a/arch/m68k/include/asm/page.h
++++ b/arch/m68k/include/asm/page.h
+@@ -1,12 +1,18 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_PAGE_H
+ #define _M68K_PAGE_H
+-
+ #include <linux/const.h>
+ #include <asm/setup.h>
+ #include <asm/page_offset.h>
+
+ /* PAGE_SHIFT determines the page size */
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define PAGE_SHIFT (12)
+ #else
+ #define PAGE_SHIFT (13)
+--- a/arch/m68k/include/asm/page_mm.h
++++ b/arch/m68k/include/asm/page_mm.h
+@@ -1,5 +1,17 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_PAGE_MM_H
+ #define _M68K_PAGE_MM_H
++#if PAGE_SHIFT < 13
++#define THREAD_SIZE (8192)
++#else
++#define THREAD_SIZE PAGE_SIZE
++#endif
+
+ #ifndef __ASSEMBLY__
+
+@@ -70,6 +82,49 @@ extern unsigned long m68k_memoffset;
+
+ #define WANT_PAGE_VIRTUAL
+
++#if defined(CONFIG_COLDFIRE)
++extern unsigned long cf_dma_base;
++extern unsigned long cf_dma_end;
++
++static inline unsigned long ___pa(void *vaddr)
++{
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
++ return ((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE;
++#else
++ if ((unsigned long)vaddr >= CONFIG_DMA_BASE &&
++ (unsigned long)vaddr < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) {
++ /* address is in carved out DMA range */
++ return ((unsigned long)vaddr - CONFIG_DMA_BASE)
++ + CONFIG_SDRAM_BASE;
++ } else if ((unsigned long)vaddr >= PAGE_OFFSET &&
++ (unsigned long)vaddr < (PAGE_OFFSET + CONFIG_SDRAM_SIZE)) {
++ /* normal mapping */
++ return ((unsigned long)vaddr - PAGE_OFFSET) + CONFIG_SDRAM_BASE;
++ }
++
++ return (unsigned long)vaddr;
++#endif
++}
++#define __pa(vaddr) ___pa((void *)(vaddr))
++
++static inline void *__va(unsigned long paddr)
++{
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
++ return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
++#else
++ if (paddr >= cf_dma_base && paddr <= cf_dma_end) {
++ /* mapped address for DMA */
++ return (void *)((paddr - CONFIG_SDRAM_BASE) + CONFIG_DMA_BASE);
++ } else if (paddr >= cf_dma_end &&
++ paddr < (CONFIG_SDRAM_BASE + CONFIG_SDRAM_SIZE)) {
++ /* normal mapping */
++ return (void *)((paddr - CONFIG_SDRAM_BASE) + PAGE_OFFSET);
++ }
++ return (void *)paddr;
++#endif
++}
++
++#else
+ static inline unsigned long ___pa(void *vaddr)
+ {
+ unsigned long paddr;
+@@ -91,6 +146,7 @@ static inline void *__va(unsigned long p
+ : "0" (paddr), "i" (m68k_fixup_memoffset));
+ return vaddr;
+ }
++#endif
+
+ #else /* !CONFIG_SUN3 */
+ /* This #define is a horrible hack to suppress lots of warnings. --m */
+@@ -176,4 +232,9 @@ static inline __attribute_const__ int __
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_VDSO
++/* vDSO support */
++#define __HAVE_ARCH_GATE_AREA
++#endif
++
+ #endif /* _M68K_PAGE_MM_H */
+--- a/arch/m68k/include/asm/page_offset.h
++++ b/arch/m68k/include/asm/page_offset.h
+@@ -1,10 +1,21 @@
+-/* This handles the memory map.. */
+-
++/*
++ * Page and physical memory maps.
++ *
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
+-#define PAGE_OFFSET_RAW 0x00000000
+-#else
++#if defined(CONFIG_SUN3)
+ #define PAGE_OFFSET_RAW 0x0E000000
++#elif defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X) \
++ || defined(CONFIG_M5441X)
++#define PHYS_OFFSET CONFIG_SDRAM_BASE
++#define PAGE_OFFSET_RAW (PHYS_OFFSET)
++#else
++#define PAGE_OFFSET_RAW 0x00000000
+ #endif
+ #else
+ #define PAGE_OFFSET_RAW CONFIG_RAMBASE
+--- a/arch/m68k/include/asm/pgalloc.h
++++ b/arch/m68k/include/asm/pgalloc.h
+@@ -1,14 +1,22 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef M68K_PGALLOC_H
+ #define M68K_PGALLOC_H
+-
+ #include <linux/mm.h>
+ #include <linux/highmem.h>
+ #include <asm/setup.h>
+
+ #ifdef CONFIG_MMU
+ #include <asm/virtconvert.h>
+-#ifdef CONFIG_SUN3
++#if defined(CONFIG_SUN3)
+ #include <asm/sun3_pgalloc.h>
++#elif defined(CONFIG_COLDFIRE)
++#include <asm/cf_pgalloc.h>
+ #else
+ #include <asm/motorola_pgalloc.h>
+ #endif
+--- a/arch/m68k/include/asm/pgtable_mm.h
++++ b/arch/m68k/include/asm/pgtable_mm.h
+@@ -1,6 +1,12 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_PGTABLE_H
+ #define _M68K_PGTABLE_H
+-
+ #include <asm-generic/4level-fixup.h>
+
+ #include <asm/setup.h>
+@@ -40,6 +46,8 @@
+ /* PGDIR_SHIFT determines what a third-level page table entry can map */
+ #ifdef CONFIG_SUN3
+ #define PGDIR_SHIFT 17
++#elif defined(CONFIG_COLDFIRE)
++#define PGDIR_SHIFT 22
+ #else
+ #define PGDIR_SHIFT 25
+ #endif
+@@ -54,6 +62,10 @@
+ #define PTRS_PER_PTE 16
+ #define PTRS_PER_PMD 1
+ #define PTRS_PER_PGD 2048
++#elif defined(CONFIG_COLDFIRE)
++#define PTRS_PER_PTE 512
++#define PTRS_PER_PMD 1
++#define PTRS_PER_PGD 1024
+ #else
+ #define PTRS_PER_PTE 1024
+ #define PTRS_PER_PMD 8
+@@ -66,6 +78,18 @@
+ #ifdef CONFIG_SUN3
+ #define KMAP_START 0x0DC00000
+ #define KMAP_END 0x0E000000
++#elif defined(CONFIG_COLDFIRE)
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#define VMALLOC_START 0xc0000000
++#define VMALLOC_END 0xcfffffff
++#define KMAP_START (VMALLOC_END + 1)
++#define KMAP_END (0xe8000000 - 1)
++#elif defined(CONFIG_M5441X)
++#define VMALLOC_START 0xc0000000
++#define VMALLOC_END 0xcfffffff
++#define KMAP_START (VMALLOC_END + 1)
++#define KMAP_END (0xd8000000 - 1)
++#endif
+ #else
+ #define KMAP_START 0xd0000000
+ #define KMAP_END 0xf0000000
+@@ -79,9 +103,11 @@
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
++#if !defined(CONFIG_COLDFIRE)
+ #define VMALLOC_OFFSET (8*1024*1024)
+ #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+ #define VMALLOC_END KMAP_START
++#endif
+ #else
+ extern unsigned long m68k_vmalloc_end;
+ #define VMALLOC_START 0x0f800000
+@@ -130,6 +156,8 @@ static inline void update_mmu_cache(stru
+
+ #ifdef CONFIG_SUN3
+ #include <asm/sun3_pgtable.h>
++#elif defined(CONFIG_COLDFIRE)
++#include <asm/cf_pgtable.h>
+ #else
+ #include <asm/motorola_pgtable.h>
+ #endif
+@@ -143,6 +171,10 @@ static inline void update_mmu_cache(stru
+ #else
+ # define __SUN3_PAGE_NOCACHE 0
+ #endif
++
++#ifdef CONFIG_COLDFIRE
++# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
++#else /* CONFIG_COLDFIRE */
+ #define pgprot_noncached(prot) \
+ (MMU_IS_SUN3 \
+ ? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE)) \
+@@ -151,7 +183,7 @@ static inline void update_mmu_cache(stru
+ : (MMU_IS_040 || MMU_IS_060) \
+ ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
+ : (prot)))
+-
++#endif /* CONFIG_COLDFIRE */
+ #include <asm-generic/pgtable.h>
+ #endif /* !__ASSEMBLY__ */
+
+--- a/arch/m68k/include/asm/processor.h
++++ b/arch/m68k/include/asm/processor.h
+@@ -2,6 +2,11 @@
+ * include/asm-m68k/processor.h
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ #ifndef __ASM_M68K_PROCESSOR_H
+@@ -23,6 +28,10 @@ static inline unsigned long rdusp(void)
+ #ifdef CONFIG_COLDFIRE_SW_A7
+ extern unsigned int sw_usp;
+ return sw_usp;
++#elif defined(CONFIG_COLDFIRE)
++ unsigned long usp;
++ __asm__ __volatile__("movel %/usp,%0" : "=a" (usp));
++ return usp;
+ #else
+ register unsigned long usp __asm__("a0");
+ /* move %usp,%a0 */
+@@ -36,6 +45,8 @@ static inline void wrusp(unsigned long u
+ #ifdef CONFIG_COLDFIRE_SW_A7
+ extern unsigned int sw_usp;
+ sw_usp = usp;
++#elif defined(CONFIG_COLDFIRE)
++ __asm__ __volatile__("movel %0,%/usp" : : "a" (usp));
+ #else
+ register unsigned long a0 __asm__("a0") = usp;
+ /* move %a0,%usp */
+@@ -48,11 +59,17 @@ static inline void wrusp(unsigned long u
+ * so don't change it unless you know what you are doing.
+ */
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define TASK_SIZE (0xF0000000UL)
++#elif defined(CONFIG_COLDFIRE)
++#define TASK_SIZE (0xC0000000UL)
++#else /* CONFIG_SUN3 */
++#ifdef __ASSEMBLY__
++#define TASK_SIZE (0x0E000000)
+ #else
+ #define TASK_SIZE (0x0E000000UL)
+ #endif
++#endif
+ #else
+ #define TASK_SIZE (0xFFFFFFFFUL)
+ #endif
+@@ -66,8 +83,10 @@ static inline void wrusp(unsigned long u
+ * space during mmap's.
+ */
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define TASK_UNMAPPED_BASE 0xC0000000UL
++#elif defined(CONFIG_COLDFIRE)
++#define TASK_UNMAPPED_BASE 0x60000000UL
+ #else
+ #define TASK_UNMAPPED_BASE 0x0A000000UL
+ #endif
+@@ -80,7 +99,11 @@ struct thread_struct {
+ unsigned long ksp; /* kernel stack pointer */
+ unsigned long usp; /* user stack pointer */
+ unsigned short sr; /* saved status register */
++#ifndef CONFIG_COLDFIRE
+ unsigned short fs; /* saved fs (sfc, dfc) */
++#else
++ mm_segment_t fs;
++#endif
+ unsigned long crp[2]; /* cpu root pointer */
+ unsigned long esp0; /* points to SR of stack frame */
+ unsigned long faddr; /* info about last fault */
+@@ -102,6 +125,7 @@ struct thread_struct {
+ /*
+ * Do necessary setup to start up a newly executed thread.
+ */
++#ifndef CONFIG_COLDFIRE
+ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp)
+ {
+@@ -112,7 +136,24 @@ static inline void start_thread(struct p
+ regs->sr &= ~0x2000;
+ wrusp(usp);
+ }
++#else
++/*
++ * Do necessary setup to start up a newly executed thread.
++ *
++ * pass the data segment into user programs if it exists,
++ * it can't hurt anything as far as I can tell
++ */
++#define start_thread(_regs, _pc, _usp) \
++do { \
++ set_fs(USER_DS); /* reads from user space */ \
++ (_regs)->pc = (_pc); \
++ if (current->mm) \
++ (_regs)->d5 = current->mm->start_data; \
++ (_regs)->sr &= ~0x2000; \
++ wrusp(_usp); \
++} while (0)
+
++#endif
+ #else
+
+ /*
+--- a/arch/m68k/include/asm/ptrace.h
++++ b/arch/m68k/include/asm/ptrace.h
+@@ -1,3 +1,10 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_PTRACE_H
+ #define _M68K_PTRACE_H
+
+@@ -27,27 +34,38 @@
+ stack during a system call. */
+
+ struct pt_regs {
+- long d1;
+- long d2;
+- long d3;
+- long d4;
+- long d5;
+- long a0;
+- long a1;
+- long a2;
+- long d0;
+- long orig_d0;
+- long stkadj;
++ long d1;
++ long d2;
++ long d3;
++ long d4;
++ long d5;
++ long a0;
++ long a1;
++ long a2;
++ long d0;
++ long orig_d0;
++ long stkadj;
+ #ifdef CONFIG_COLDFIRE
++#if 0
+ unsigned format : 4; /* frame format specifier */
+ unsigned vector : 12; /* vector offset */
+ unsigned short sr;
+ unsigned long pc;
++#endif
++/*FROM BSP*/
++ unsigned long mmuar;
++ unsigned long mmusr;
++ unsigned format:4; /* frame format specifier */
++ unsigned fs2:2;
++ unsigned vector:8;
++ unsigned fs1:2;
++ unsigned short sr;
++ unsigned long pc;
+ #else
+- unsigned short sr;
+- unsigned long pc;
+- unsigned format : 4; /* frame format specifier */
+- unsigned vector : 12; /* vector offset */
++ unsigned short sr;
++ unsigned long pc;
++ unsigned format:4; /* frame format specifier */
++ unsigned vector:12; /* vector offset */
+ #endif
+ };
+
+--- a/arch/m68k/include/asm/raw_io.h
++++ b/arch/m68k/include/asm/raw_io.h
+@@ -3,11 +3,19 @@
+ *
+ * 10/20/00 RZ: - created from bits of io.h and ide.h to cleanup namespace
+ *
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+-
+ #ifndef _RAW_IO_H
+ #define _RAW_IO_H
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_raw_io.h>
++#else
++
+ #ifdef __KERNEL__
+
+ #include <asm/types.h>
+@@ -60,6 +68,9 @@ extern void __iounmap(void *addr, unsign
+ #define __raw_writew(val,addr) out_be16((addr),(val))
+ #define __raw_writel(val,addr) out_be32((addr),(val))
+
++#define swap_inw(port) in_le16((port))
++#define swap_outw(val, port) out_le16((port), (val))
++
+ static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
+ {
+ unsigned int i;
+@@ -344,4 +355,6 @@ static inline void raw_outsw_swapw(volat
+
+ #endif /* __KERNEL__ */
+
++#endif /* CONFIG_COLDFIRE */
++
+ #endif /* _RAW_IO_H */
+--- a/arch/m68k/include/asm/segment.h
++++ b/arch/m68k/include/asm/segment.h
+@@ -1,3 +1,10 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_SEGMENT_H
+ #define _M68K_SEGMENT_H
+
+@@ -29,6 +36,7 @@ typedef struct {
+ * Get/set the SFC/DFC registers for MOVES instructions
+ */
+
++#ifndef CONFIG_COLDFIRE
+ static inline mm_segment_t get_fs(void)
+ {
+ #ifdef CONFIG_MMU
+@@ -56,6 +64,15 @@ static inline void set_fs(mm_segment_t v
+ #endif
+ }
+
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/current.h>
++#define get_fs() (current->thread.fs)
++#define set_fs(val) (current->thread.fs = (val))
++#define get_ds() (KERNEL_DS)
++
++#endif /* CONFIG_COLDFIRE */
++
+ #define segment_eq(a,b) ((a).seg == (b).seg)
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/m68k/include/asm/setup.h
++++ b/arch/m68k/include/asm/setup.h
+@@ -2,6 +2,7 @@
+ ** asm/setup.h -- Definition of the Linux/m68k setup information
+ **
+ ** Copyright 1992 by Greg Harp
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ **
+ ** This file is subject to the terms and conditions of the GNU General Public
+ ** License. See the file COPYING in the main directory of this archive
+@@ -40,6 +41,7 @@
+ #define MACH_HP300 9
+ #define MACH_Q40 10
+ #define MACH_SUN3X 11
++#define MACH_CFMMU 12
+
+ #define COMMAND_LINE_SIZE 256
+
+@@ -189,6 +191,14 @@ extern unsigned long m68k_machtype;
+ # define MACH_TYPE (MACH_SUN3X)
+ #endif
+
++#if !defined(CONFIG_COLDFIRE)
++# define MACH_IS_COLDFIRE (0)
++#else
++# define CONFIG_COLDFIRE_ONLY
++# define MACH_IS_COLDFIRE (1)
++# define MACH_TYPE (MACH_CFMMU)
++#endif
++
+ #ifndef MACH_TYPE
+ # define MACH_TYPE (m68k_machtype)
+ #endif
+@@ -211,23 +221,31 @@ extern unsigned long m68k_machtype;
+ #define CPUB_68030 1
+ #define CPUB_68040 2
+ #define CPUB_68060 3
++#define CPUB_CFV4E 4
+
+ #define CPU_68020 (1<<CPUB_68020)
+ #define CPU_68030 (1<<CPUB_68030)
+ #define CPU_68040 (1<<CPUB_68040)
+ #define CPU_68060 (1<<CPUB_68060)
++#define CPU_CFV4E (1<<CPUB_CFV4E)
+
+ #define FPUB_68881 0
+ #define FPUB_68882 1
+ #define FPUB_68040 2 /* Internal FPU */
+ #define FPUB_68060 3 /* Internal FPU */
+ #define FPUB_SUNFPA 4 /* Sun-3 FPA */
++#define FPUB_CFV4E 5
+
+ #define FPU_68881 (1<<FPUB_68881)
+ #define FPU_68882 (1<<FPUB_68882)
+ #define FPU_68040 (1<<FPUB_68040)
+ #define FPU_68060 (1<<FPUB_68060)
+ #define FPU_SUNFPA (1<<FPUB_SUNFPA)
++#ifdef CONFIG_M547X_8X
++#define FPU_CFV4E (1<<FPUB_CFV4E)
++#else
++#define FPU_CFV4E 0
++#endif
+
+ #define MMUB_68851 0
+ #define MMUB_68030 1 /* Internal MMU */
+@@ -235,6 +253,7 @@ extern unsigned long m68k_machtype;
+ #define MMUB_68060 3 /* Internal MMU */
+ #define MMUB_APOLLO 4 /* Custom Apollo */
+ #define MMUB_SUN3 5 /* Custom Sun-3 */
++#define MMUB_CFV4E 6
+
+ #define MMU_68851 (1<<MMUB_68851)
+ #define MMU_68030 (1<<MMUB_68030)
+@@ -242,6 +261,7 @@ extern unsigned long m68k_machtype;
+ #define MMU_68060 (1<<MMUB_68060)
+ #define MMU_SUN3 (1<<MMUB_SUN3)
+ #define MMU_APOLLO (1<<MMUB_APOLLO)
++#define MMU_CFV4E (1<<MMUB_CFV4E)
+
+ #ifdef __KERNEL__
+
+@@ -341,6 +361,14 @@ extern int m68k_is040or060;
+ # endif
+ #endif
+
++#if !defined(CONFIG_CFV4E)
++# define CPU_IS_COLDFIRE (0)
++#else
++# define CPU_IS_COLDFIRE (m68k_cputype & CPU_CFV4E)
++# define CPU_IS_CFV4E (m68k_cputype & CPU_CFV4E)
++# define MMU_IS_CFV4E (m68k_mmutype & MMU_CFV4E)
++#endif
++
+ #define CPU_TYPE (m68k_cputype)
+
+ #ifdef CONFIG_M68KFPU_EMU
+@@ -371,6 +399,14 @@ extern int m68k_realnum_memory; /* real
+ extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
+ #endif
+
++#ifdef CONFIG_CFV4E
++#define QCHIP_RESTORE_DIRECTIVE ".chip 547x"
++#define CHIP_RESTORE_DIRECTIVE .chip 547x
++#else
++#define QCHIP_RESTORE_DIRECTIVE ".chip 68k"
++#define CHIP_RESTORE_DIRECTIVE .chip 68k
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _M68K_SETUP_H */
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -1,6 +1,12 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_SIGNAL_H
+ #define _M68K_SIGNAL_H
+-
+ #include <linux/types.h>
+
+ /* Avoid too many header ordering problems. */
+@@ -150,7 +156,7 @@ typedef struct sigaltstack {
+ #ifdef __KERNEL__
+ #include <asm/sigcontext.h>
+
+-#ifndef __uClinux__
++#ifndef CONFIG_COLDFIRE /*FIXME Jason*/
+ #define __HAVE_ARCH_SIG_BITOPS
+
+ static inline void sigaddset(sigset_t *set, int _sig)
+--- a/arch/m68k/include/asm/string.h
++++ b/arch/m68k/include/asm/string.h
+@@ -1,6 +1,12 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_STRING_H_
+ #define _M68K_STRING_H_
+-
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+
+@@ -81,6 +87,18 @@ static inline char *strncpy(char *dest,
+ strcpy(__d + strlen(__d), (s)); \
+ })
+
++#define __HAVE_ARCH_STRCHR
++static inline char *strchr(const char *s, int c)
++{
++ char sc, ch = c;
++
++ for (; (sc = *s++) != ch; ) {
++ if (!sc)
++ return NULL;
++ }
++ return (char *)s - 1;
++}
++
+ #ifndef CONFIG_COLDFIRE
+ #define __HAVE_ARCH_STRCMP
+ static inline int strcmp(const char *cs, const char *ct)
+--- a/arch/m68k/include/asm/swab.h
++++ b/arch/m68k/include/asm/swab.h
+@@ -1,11 +1,18 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_SWAB_H
+ #define _M68K_SWAB_H
+-
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+
+ #define __SWAB_64_THRU_32__
+
++#if defined(__GNUC__)
+ #if defined (__mcfisaaplus__) || defined (__mcfisac__)
+ static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
+ {
+@@ -23,5 +30,11 @@ static inline __attribute_const__ __u32
+ }
+ #define __arch_swab32 __arch_swab32
+ #endif
++#endif
++
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
++# define __BYTEORDER_HAS_U64__
++# define __SWAB_64_THRU_32__
++#endif
+
+ #endif /* _M68K_SWAB_H */
+--- a/arch/m68k/include/asm/system_mm.h
++++ b/arch/m68k/include/asm/system_mm.h
+@@ -1,14 +1,35 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_SYSTEM_H
+ #define _M68K_SYSTEM_H
+-
+ #include <linux/linkage.h>
+ #include <linux/kernel.h>
+ #include <linux/irqflags.h>
+ #include <asm/segment.h>
+ #include <asm/entry.h>
++#include <asm/cfcache.h>
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_COLDFIRE
++#define FLUSH_BC (0x00040000)
++
++#define finish_arch_switch(prev) do { \
++ unsigned long tmpreg; \
++ asm volatile ("move.l %2,%0\n" \
++ "orl %1,%0\n" \
++ "movec %0,%%cacr" \
++ : "=&d" (tmpreg) \
++ : "id" (FLUSH_BC), "m" (shadow_cacr)); \
++ } while (0)
++
++#endif
++
+ /*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+--- a/arch/m68k/include/asm/tlbflush.h
++++ b/arch/m68k/include/asm/tlbflush.h
+@@ -1,8 +1,14 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _M68K_TLBFLUSH_H
+ #define _M68K_TLBFLUSH_H
+-
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+
+ #include <asm/current.h>
+
+@@ -92,7 +98,12 @@ static inline void flush_tlb_kernel_rang
+ flush_tlb_all();
+ }
+
+-#else
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++}
++
++#elif defined(CONFIG_SUN3)
+
+
+ /* Reserved PMEGs. */
+@@ -214,6 +225,13 @@ static inline void flush_tlb_kernel_page
+ sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
+ }
+
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++}
++
++#else /* CONFIG_COLDFIRE */
++#include <asm/cf_tlbflush.h>
+ #endif
+
+ #else /* !CONFIG_MMU */
+--- a/arch/m68k/include/asm/uaccess_mm.h
++++ b/arch/m68k/include/asm/uaccess_mm.h
+@@ -1,6 +1,15 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef __M68K_UACCESS_H
+ #define __M68K_UACCESS_H
+-
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_uaccess.h>
++#else
+ /*
+ * User space memory access functions
+ */
+@@ -219,6 +228,41 @@ unsigned long __generic_copy_to_user(voi
+ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
+ : : "memory")
+
++#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
++ asm volatile ("\n" \
++ " move."#s1" (%2)+,%3\n" \
++ "11: moves."#s1" %3,(%1)+\n" \
++ "12: move."#s2" (%2)+,%3\n" \
++ "21: moves."#s2" %3,(%1)+\n" \
++ "22:\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ " move."#s3" (%2)+,%3\n" \
++ "31: moves."#s3" %3,(%1)+\n" \
++ "32:\n" \
++ " .endif\n" \
++ "4:\n" \
++ "\n" \
++ " .section __ex_table,\"a\"\n" \
++ " .align 4\n" \
++ " .long 11b,5f\n" \
++ " .long 12b,5f\n" \
++ " .long 21b,5f\n" \
++ " .long 22b,5f\n" \
++ " .ifnc \""#s3"\",\"\"\n" \
++ " .long 31b,5f\n" \
++ " .long 32b,5f\n" \
++ " .endif\n" \
++ " .previous\n" \
++ "\n" \
++ " .section .fixup,\"ax\"\n" \
++ " .even\n" \
++ "5: moveq.l #"#n",%0\n" \
++ " jra 4b\n" \
++ " .previous\n" \
++ : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
++ : : "memory")
++
++#endif /* CONFIG_COLDFIRE */
+ static __always_inline unsigned long
+ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+@@ -266,40 +310,6 @@ __constant_copy_from_user(void *to, cons
+ return res;
+ }
+
+-#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
+- asm volatile ("\n" \
+- " move."#s1" (%2)+,%3\n" \
+- "11: moves."#s1" %3,(%1)+\n" \
+- "12: move."#s2" (%2)+,%3\n" \
+- "21: moves."#s2" %3,(%1)+\n" \
+- "22:\n" \
+- " .ifnc \""#s3"\",\"\"\n" \
+- " move."#s3" (%2)+,%3\n" \
+- "31: moves."#s3" %3,(%1)+\n" \
+- "32:\n" \
+- " .endif\n" \
+- "4:\n" \
+- "\n" \
+- " .section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 11b,5f\n" \
+- " .long 12b,5f\n" \
+- " .long 21b,5f\n" \
+- " .long 22b,5f\n" \
+- " .ifnc \""#s3"\",\"\"\n" \
+- " .long 31b,5f\n" \
+- " .long 32b,5f\n" \
+- " .endif\n" \
+- " .previous\n" \
+- "\n" \
+- " .section .fixup,\"ax\"\n" \
+- " .even\n" \
+- "5: moveq.l #"#n",%0\n" \
+- " jra 4b\n" \
+- " .previous\n" \
+- : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
+- : : "memory")
+-
+ static __always_inline unsigned long
+ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+--- a/arch/m68k/include/asm/unistd.h
++++ b/arch/m68k/include/asm/unistd.h
+@@ -1,6 +1,12 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef _ASM_M68K_UNISTD_H_
+ #define _ASM_M68K_UNISTD_H_
+-
+ /*
+ * This file contains the system call numbers.
+ */
+@@ -343,10 +349,11 @@
+ #define __NR_fanotify_init 337
+ #define __NR_fanotify_mark 338
+ #define __NR_prlimit64 339
++#define __NR_recvmmsg 340
+
+ #ifdef __KERNEL__
+
+-#define NR_syscalls 340
++#define NR_syscalls 341
+
+ #define __ARCH_WANT_IPC_PARSE_VERSION
+ #define __ARCH_WANT_OLD_READDIR
+--- a/arch/m68k/include/asm/virtconvert.h
++++ b/arch/m68k/include/asm/virtconvert.h
+@@ -1,5 +1,15 @@
++/*
++ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ #ifndef __VIRT_CONVERT__
+ #define __VIRT_CONVERT__
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_virtconvert.h>
++#else
+
+ /*
+ * Macros used for converting between virtual and physical mappings.
+@@ -45,3 +55,4 @@ static inline void *phys_to_virt(unsigne
+
+ #endif
+ #endif
++#endif
+--- a/arch/m68k/kernel/Makefile
++++ b/arch/m68k/kernel/Makefile
+@@ -2,16 +2,27 @@
+ # Makefile for the linux kernel.
+ #
+
+-ifndef CONFIG_SUN3
+- extra-y := head.o
++ifdef CONFIG_SUN3
++ extra-y := sun3-head.o vmlinux.lds
++ obj-y := entry.o signal.o ints.o time.o
+ else
+- extra-y := sun3-head.o
++ifndef CONFIG_COLDFIRE
++ extra-y := head.o vmlinux.lds
++ obj-y := entry.o signal.o traps.o ints.o time.o
++else # CONFIG_COLDFIRE
++ extra-y := vmlinux.lds
++ obj-y := time.o
++ ifdef CONFIG_M547X_8X
++ obj-$(CONFIG_PCI) += bios32_mcf548x.o
++ endif
++endif
+ endif
+-extra-y += vmlinux.lds
+
+-obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
+- sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
++obj-y += process.o ptrace.o module.o \
++ sys_m68k.o setup.o m68k_ksyms.o devres.o# semaphore.o
+
+ devres-y = ../../../kernel/irq/devres.o
+
+ obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
++
++EXTRA_AFLAGS := -traditional
+--- a/arch/m68k/kernel/asm-offsets.c
++++ b/arch/m68k/kernel/asm-offsets.c
+@@ -2,6 +2,15 @@
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ * Add Codlfire support
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+@@ -22,6 +31,9 @@
+ int main(void)
+ {
+ /* offsets into the task struct */
++ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
++ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
++ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+@@ -43,6 +55,7 @@ int main(void)
+ /* offsets into the thread_info struct */
+ DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
++ DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value));
+
+ /* offsets into the pt_regs */
+ DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
+@@ -57,8 +70,23 @@ int main(void)
+ DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
+ DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
++#ifdef CONFIG_COLDFIRE
++ /* Need to get the context out of struct mm for ASID setting */
++ DEFINE(MM_CONTEXT, offsetof(struct mm_struct, context));
++ /* Coldfire exception frame has vector *before* pc */
++ DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) - 4);
++#else
+ /* bitfields are a bit difficult */
+ DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
++#endif
++
++ /* offsets into the irq_handler struct */
++ DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
++ DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
++ DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
++
++ /* offsets into the kernel_stat struct */
++ DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+--- a/arch/m68k/kernel/dma.c
++++ b/arch/m68k/kernel/dma.c
+@@ -1,4 +1,7 @@
+ /*
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+@@ -12,12 +15,25 @@
+ #include <linux/scatterlist.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+-
++#include <linux/pci.h>
+ #include <asm/pgalloc.h>
+
+ void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t flag)
+ {
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X) || \
++ defined(CONFIG_M5441X)
++ /*
++ * On the M5445x platform the memory allocated with GFP_DMA
++ * is guaranteed to be DMA'able.
++ */
++ void *addr;
++
++ size = PAGE_ALIGN(size);
++ addr = kmalloc(size, GFP_DMA);
++ *handle = virt_to_phys(addr);
++ return addr;
++#else
+ struct page *page, **map;
+ pgprot_t pgprot;
+ void *addr;
+@@ -56,6 +72,7 @@ void *dma_alloc_coherent(struct device *
+ kfree(map);
+
+ return addr;
++#endif
+ }
+ EXPORT_SYMBOL(dma_alloc_coherent);
+
+@@ -63,7 +80,12 @@ void dma_free_coherent(struct device *de
+ void *addr, dma_addr_t handle)
+ {
+ pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X) || \
++ defined(CONFIG_M5441X)
++ kfree((void *)handle);
++#else
+ vfree(addr);
++#endif
+ }
+ EXPORT_SYMBOL(dma_free_coherent);
+
+@@ -77,6 +99,9 @@ void dma_sync_single_for_device(struct d
+ case DMA_FROM_DEVICE:
+ cache_clear(handle, size);
+ break;
++ case PCI_DMA_BIDIRECTIONAL:
++ flush_dcache();
++ break;
+ default:
+ if (printk_ratelimit())
+ printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
+@@ -89,16 +114,23 @@ void dma_sync_sg_for_device(struct devic
+ enum dma_data_direction dir)
+ {
+ int i;
++#ifdef CONFIG_COLDFIRE
++ struct scatterlist *_sg;
+
++ for_each_sg(sg, _sg, nents, i)
++ dma_sync_single_for_device(dev, _sg->dma_address,
++ _sg->length, dir);
++#else
+ for (i = 0; i < nents; sg++, i++)
+ dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
++#endif
+ }
+ EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+ dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+ {
+- dma_addr_t handle = virt_to_bus(addr);
++ dma_addr_t handle = (dma_addr_t)virt_to_bus(addr);
+
+ dma_sync_single_for_device(dev, handle, size, dir);
+ return handle;
+@@ -120,10 +152,19 @@ int dma_map_sg(struct device *dev, struc
+ enum dma_data_direction dir)
+ {
+ int i;
+-
++#ifdef CONFIG_COLDFIRE
++ struct scatterlist *_sg;
++#endif
++#ifndef CONFIG_COLDFIRE
+ for (i = 0; i < nents; sg++, i++) {
+ sg->dma_address = sg_phys(sg);
+ dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
++#else
++ for_each_sg(sg, _sg, nents, i) {
++ _sg->dma_address = sg_phys(_sg);
++ dma_sync_single_for_device(dev, _sg->dma_address,
++ _sg->length, dir);
++#endif
+ }
+ return nents;
+ }
+--- a/arch/m68k/kernel/process.c
++++ b/arch/m68k/kernel/process.c
+@@ -4,6 +4,15 @@
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Kurt.Mahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ /*
+@@ -185,12 +194,21 @@ EXPORT_SYMBOL(kernel_thread);
+ void flush_thread(void)
+ {
+ unsigned long zero = 0;
++#if !defined(CONFIG_COLDFIRE)
+ set_fs(USER_DS);
+ current->thread.fs = __USER_DS;
+ if (!FPU_IS_EMU)
+ asm volatile (".chip 68k/68881\n\t"
+ "frestore %0@\n\t"
+ ".chip 68k" : : "a" (&zero));
++#else
++ set_fs(USER_DS);
++ current->thread.fs = USER_DS;
++#if defined(CONFIG_FPU)
++ if (!FPU_IS_EMU)
++ asm volatile ("frestore %0@\n\t" : : "a" (&zero));
++#endif
++#endif
+ }
+
+ /*
+@@ -258,6 +276,7 @@ int copy_thread(unsigned long clone_flag
+ * Must save the current SFC/DFC value, NOT the value when
+ * the parent was last descheduled - RGH 10-08-96
+ */
++#if !defined(CONFIG_COLDFIRE)
+ p->thread.fs = get_fs().seg;
+
+ if (!FPU_IS_EMU) {
+@@ -269,9 +288,34 @@ int copy_thread(unsigned long clone_flag
+ "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+ : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
+ : "memory");
++#else
++ p->thread.fs = get_fs();
++
++#if defined(CONFIG_FPU)
++ if (!FPU_IS_EMU) {
++ /* Copy the current fpu state */
++ asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0])
++ : "memory");
++
++ if (p->thread.fpstate[0]) {
++ asm volatile ("fmovemd %/fp0-%/fp7,%0"
++ : : "m" (p->thread.fp[0])
++ : "memory");
++ asm volatile ("fmovel %/fpiar,%0"
++ : : "m" (p->thread.fpcntl[0])
++ : "memory");
++ asm volatile ("fmovel %/fpcr,%0"
++ : : "m" (p->thread.fpcntl[1])
++ : "memory");
++ asm volatile ("fmovel %/fpsr,%0"
++ : : "m" (p->thread.fpcntl[2])
++ : "memory");
++ }
+ /* Restore the state in case the fpu was busy */
+ asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
+ }
++#endif
++#endif
+
+ return 0;
+ }
+@@ -280,7 +324,9 @@ int copy_thread(unsigned long clone_flag
+
+ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
+ {
++#if !defined(CONFIG_COLDFIRE) || defined(CONFIG_FPU)
+ char fpustate[216];
++#endif
+
+ if (FPU_IS_EMU) {
+ int i;
+@@ -297,6 +343,7 @@ int dump_fpu (struct pt_regs *regs, stru
+ }
+
+ /* First dump the fpu context to avoid protocol violation. */
++#if !defined(CONFIG_COLDFIRE)
+ asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+ return 0;
+@@ -307,6 +354,25 @@ int dump_fpu (struct pt_regs *regs, stru
+ asm volatile ("fmovemx %/fp0-%/fp7,%0"
+ :: "m" (fpu->fpregs[0])
+ : "memory");
++#elif defined(CONFIG_FPU)
++ asm volatile ("fsave %0" : : "m" (fpustate[0]) : "memory");
++ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
++ return 0;
++
++ asm volatile ("fmovel %/fpiar,%0"
++ : : "m" (fpu->fpcntl[0])
++ : "memory");
++ asm volatile ("fmovel %/fpcr,%0"
++ : : "m" (fpu->fpcntl[1])
++ : "memory");
++ asm volatile ("fmovel %/fpsr,%0"
++ : : "m" (fpu->fpcntl[2])
++ : "memory");
++ asm volatile ("fmovemd %/fp0-%/fp7,%0"
++ : : "m" (fpu->fpregs[0])
++ : "memory");
++#endif
++
+ return 1;
+ }
+ EXPORT_SYMBOL(dump_fpu);
+--- a/arch/m68k/kernel/setup.c
++++ b/arch/m68k/kernel/setup.c
+@@ -2,7 +2,14 @@
+ * linux/arch/m68k/kernel/setup.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+- */
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++*/
+
+ /*
+ * This file handles the architecture-dependent parts of system setup
+@@ -74,14 +81,26 @@ struct mem_info m68k_memory[NUM_MEMINFO]
+ EXPORT_SYMBOL(m68k_memory);
+
+ struct mem_info m68k_ramdisk;
++EXPORT_SYMBOL(m68k_ramdisk);
+
++#if !defined(CONFIG_COLDFIRE)
+ static char m68k_command_line[CL_SIZE];
++#else
++char m68k_command_line[CL_SIZE];
++unsigned long uboot_info_stk;
++EXPORT_SYMBOL(uboot_info_stk);
++#endif
+
+ void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
+ /* machine dependent irq functions */
+ void (*mach_init_IRQ) (void) __initdata = NULL;
+ void (*mach_get_model) (char *model);
+ void (*mach_get_hardware_list) (struct seq_file *m);
++
++#ifdef CONFIG_COLDFIRE
++void (*mach_tick)(void);
++#endif
++
+ /* machine dependent timer functions */
+ unsigned long (*mach_gettimeoffset) (void);
+ int (*mach_hwclk) (int, struct rtc_time*);
+@@ -137,13 +156,17 @@ extern void config_hp300(void);
+ extern void config_q40(void);
+ extern void config_sun3x(void);
+
++#ifdef CONFIG_COLDFIRE
++void coldfire_sort_memrec(void);
++#endif
++
+ #define MASK_256K 0xfffc0000
+
+ extern void paging_init(void);
+
+ static void __init m68k_parse_bootinfo(const struct bi_record *record)
+ {
+- while (record->tag != BI_LAST) {
++ while ((record->tag != BI_LAST) && !(CONFIG_COLDFIRE)) {
+ int unknown = 0;
+ const unsigned long *data = record->data;
+
+@@ -203,6 +226,10 @@ static void __init m68k_parse_bootinfo(c
+ record->size);
+ }
+
++#ifdef CONFIG_COLDFIRE
++ coldfire_sort_memrec();
++#endif
++
+ m68k_realnum_memory = m68k_num_memory;
+ #ifdef CONFIG_SINGLE_MEMORY_CHUNK
+ if (m68k_num_memory > 1) {
+@@ -215,7 +242,9 @@ static void __init m68k_parse_bootinfo(c
+
+ void __init setup_arch(char **cmdline_p)
+ {
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ int i;
++#endif
+
+ /* The bootinfo is located right after the kernel bss */
+ m68k_parse_bootinfo((const struct bi_record *)_end);
+@@ -230,9 +259,10 @@ void __init setup_arch(char **cmdline_p)
+ * We should really do our own FPU check at startup.
+ * [what do we do with buggy 68LC040s? if we have problems
+ * with them, we should add a test to check_bugs() below] */
+-#ifndef CONFIG_M68KFPU_EMU_ONLY
++#if !defined(CONFIG_M68KFPU_EMU_ONLY) && defined(CONFIG_FPU)
+ /* clear the fpu if we have one */
+- if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
++ if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|
++ FPU_CFV4E)) {
+ volatile int zero = 0;
+ asm volatile ("frestore %0" : : "m" (zero));
+ }
+@@ -320,16 +350,17 @@ void __init setup_arch(char **cmdline_p)
+ config_sun3x();
+ break;
+ #endif
++#ifdef CONFIG_COLDFIRE
++ case MACH_CFMMU:
++ config_coldfire();
++ break;
++#endif
+ default:
+ panic("No configuration setup");
+ }
+
+ paging_init();
+
+-#ifndef CONFIG_SUN3
+- for (i = 1; i < m68k_num_memory; i++)
+- free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
+- m68k_memory[i].size);
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (m68k_ramdisk.size) {
+ reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
+@@ -341,6 +372,10 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #endif
+
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
++ for (i = 1; i < m68k_num_memory; i++)
++ free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
++ m68k_memory[i].size);
+ #ifdef CONFIG_ATARI
+ if (MACH_IS_ATARI)
+ atari_stram_reserve_pages((void *)availmem);
+@@ -353,12 +388,22 @@ void __init setup_arch(char **cmdline_p)
+
+ #endif /* !CONFIG_SUN3 */
+
++#ifdef CONFIG_COLDFIRE
++ mmu_context_init();
++#endif
++
+ /* set ISA defs early as possible */
+ #if defined(CONFIG_ISA) && defined(MULTI_ISA)
+ if (MACH_IS_Q40) {
+ isa_type = ISA_TYPE_Q40;
+ isa_sex = 0;
+ }
++#ifdef CONFIG_GG2
++ if (MACH_IS_AMIGA && AMIGAHW_PRESENT(GG2_ISA)) {
++ isa_type = ISA_TYPE_GG2;
++ isa_sex = 0;
++ }
++#endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+ if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
+ isa_type = ISA_TYPE_AG;
+@@ -377,6 +422,7 @@ static int show_cpuinfo(struct seq_file
+ #define LOOP_CYCLES_68030 (8)
+ #define LOOP_CYCLES_68040 (3)
+ #define LOOP_CYCLES_68060 (1)
++#define LOOP_CYCLES_COLDFIRE (2)
+
+ if (CPU_IS_020) {
+ cpu = "68020";
+@@ -390,6 +436,9 @@ static int show_cpuinfo(struct seq_file
+ } else if (CPU_IS_060) {
+ cpu = "68060";
+ clockfactor = LOOP_CYCLES_68060;
++ } else if (CPU_IS_CFV4E) {
++ cpu = "ColdFire V4e";
++ clockfactor = LOOP_CYCLES_COLDFIRE;
+ } else {
+ cpu = "680x0";
+ clockfactor = 0;
+@@ -408,6 +457,8 @@ static int show_cpuinfo(struct seq_file
+ fpu = "68060";
+ else if (m68k_fputype & FPU_SUNFPA)
+ fpu = "Sun FPA";
++ else if (m68k_fputype & FPU_CFV4E)
++ fpu = "ColdFire V4e";
+ else
+ fpu = "none";
+ #endif
+@@ -424,6 +475,8 @@ static int show_cpuinfo(struct seq_file
+ mmu = "Sun-3";
+ else if (m68k_mmutype & MMU_APOLLO)
+ mmu = "Apollo";
++ else if (m68k_mmutype & MMU_CFV4E)
++ mmu = "ColdFire";
+ else
+ mmu = "unknown";
+
+@@ -506,7 +559,8 @@ module_init(proc_hardware_init);
+
+ void check_bugs(void)
+ {
+-#ifndef CONFIG_M68KFPU_EMU
++#if !defined(CONFIG_M68KFPU_EMU) && !defined(CONFIG_M5445X) && \
++ !defined(CONFIG_M5441X)
+ if (m68k_fputype == 0) {
+ printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+ "WHICH IS REQUIRED BY LINUX/M68K ***\n");
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -1,5 +1,12 @@
+ /*
+ * linux/arch/m68k/kernel/sys_m68k.c
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/m68k
+@@ -29,6 +36,9 @@
+ #include <asm/unistd.h>
+ #include <linux/elf.h>
+ #include <asm/tlb.h>
++#ifdef CONFIG_COLDFIRE
++#include <asm/cacheflush.h>
++#endif
+
+ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+@@ -45,6 +55,59 @@ asmlinkage long sys_mmap2(unsigned long
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ }
+
++/*
++ * Perform the select(nd, in, out, ex, tv) and mmap() system
++ * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
++ * handle more than 4 system call parameters, so these system calls
++ * used a memory block for parameter passing..
++ */
++
++struct mmap_arg_struct {
++ unsigned long addr;
++ unsigned long len;
++ unsigned long prot;
++ unsigned long flags;
++ unsigned long fd;
++ unsigned long offset;
++};
++
++asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
++{
++ struct mmap_arg_struct a;
++ int error = -EFAULT;
++
++ if (copy_from_user(&a, arg, sizeof(a)))
++ goto out;
++
++ error = -EINVAL;
++ if (a.offset & ~PAGE_MASK)
++ goto out;
++
++ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
++
++ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
++ a.fd, a.offset >> PAGE_SHIFT);
++out:
++ return error;
++}
++
++struct sel_arg_struct {
++ unsigned long n;
++ fd_set __user *inp, *outp, *exp;
++ struct timeval __user *tvp;
++};
++
++asmlinkage int old_select(struct sel_arg_struct __user *arg)
++{
++ struct sel_arg_struct a;
++
++ if (copy_from_user(&a, arg, sizeof(a)))
++ return -EFAULT;
++ /* sys_select() does the appropriate kernel locking */
++ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
++}
++
++#ifndef CONFIG_COLDFIRE
+ /* Convert virtual (user) address VADDR to physical address PADDR */
+ #define virt_to_phys_040(vaddr) \
+ ({ \
+@@ -368,6 +431,7 @@ cache_flush_060 (unsigned long addr, int
+ }
+ return 0;
+ }
++#endif /* CONFIG_COLDFIRE */
+
+ /* sys_cacheflush -- flush (part of) the processor cache. */
+ asmlinkage int
+@@ -399,6 +463,7 @@ sys_cacheflush (unsigned long addr, int
+ goto out;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ if (CPU_IS_020_OR_030) {
+ if (scope == FLUSH_SCOPE_LINE && len < 256) {
+ unsigned long cacr;
+@@ -443,6 +508,16 @@ sys_cacheflush (unsigned long addr, int
+ ret = cache_flush_060 (addr, scope, cache, len);
+ }
+ }
++#else /* CONFIG_COLDFIRE */
++ if ((cache & FLUSH_CACHE_INSN) && (cache & FLUSH_CACHE_DATA))
++ flush_bcache();
++ else if (cache & FLUSH_CACHE_INSN)
++ flush_icache();
++ else
++ flush_dcache();
++
++ ret = 0;
++#endif /* CONFIG_COLDFIRE */
+ out:
+ return ret;
+ }
+@@ -474,9 +549,14 @@ asmlinkage unsigned long sys_get_thread_
+ return current_thread_info()->tp_value;
+ }
+
++extern void *_vdso_tp;
++
+ asmlinkage int sys_set_thread_area(unsigned long tp)
+ {
+ current_thread_info()->tp_value = tp;
++#ifdef CONFIG_VDSO
++ *(unsigned long *)_vdso_tp = tp;
++#endif
+ return 0;
+ }
+
+--- a/arch/m68k/kernel/time.c
++++ b/arch/m68k/kernel/time.c
+@@ -2,6 +2,14 @@
+ * linux/arch/m68k/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ * Alison Wang b18965@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+@@ -9,9 +17,12 @@
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+-
++#include <linux/clk.h>
++#include <linux/clocksource.h>
++#include <linux/clockchips.h>
+ #include <linux/errno.h>
+ #include <linux/module.h>
++#include <linux/sysdev.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/param.h>
+@@ -27,6 +38,7 @@
+ #include <linux/time.h>
+ #include <linux/timex.h>
+ #include <linux/profile.h>
++#include <asm/mcfsim.h>
+
+ static inline int set_rtc_mmss(unsigned long nowtime)
+ {
+@@ -35,12 +47,18 @@ static inline int set_rtc_mmss(unsigned
+ return -1;
+ }
+
++#ifndef CONFIG_GENERIC_CLOCKEVENTS
+ /*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+ static irqreturn_t timer_interrupt(int irq, void *dummy)
+ {
++#ifdef CONFIG_COLDFIRE
++ /* kick hardware timer if necessary */
++ if (mach_tick)
++ mach_tick();
++#endif
+ do_timer(1);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
+@@ -91,11 +109,133 @@ void __init time_init(void)
+ {
+ mach_sched_init(timer_interrupt);
+ }
++#endif
+
++#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+ u32 arch_gettimeoffset(void)
+ {
+ return mach_gettimeoffset() * 1000;
+ }
++#endif
++
++#ifdef CONFIG_GENERIC_CLOCKEVENTS
++
++extern unsigned long long sys_dtim2_read(void);
++extern void sys_dtim2_init(void);
++static int cfv4_set_next_event(unsigned long evt,
++ struct clock_event_device *dev);
++static void cfv4_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *dev);
++
++#if defined(CONFIG_M5445X) || defined(CONFIG_M5441X)
++#define FREQ (MCF_BUSCLK / 16)
++#else
++#define FREQ (MCF_BUSCLK)
++#endif
++
++/*
++ * Clock Evnt setup
++ */
++static struct clock_event_device clockevent_cfv4 = {
++ .name = "CFV4 timer2even",
++ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
++ .rating = 200,
++ .shift = 20,
++ .set_mode = cfv4_set_mode,
++ .set_next_event = cfv4_set_next_event,
++};
++
++static int cfv4_set_next_event(unsigned long evt,
++ struct clock_event_device *dev)
++{
++ return 0;
++}
++
++static void cfv4_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *dev)
++{
++ if (mode != CLOCK_EVT_MODE_ONESHOT)
++ cfv4_set_next_event((FREQ / HZ), dev);
++}
++
++static int __init cfv4_clockevent_init(void)
++{
++ clockevent_cfv4.mult =
++ div_sc(FREQ, NSEC_PER_SEC,
++ clockevent_cfv4.shift);
++ clockevent_cfv4.max_delta_ns =
++ clockevent_delta2ns((FREQ / HZ),
++ &clockevent_cfv4);
++ clockevent_cfv4.min_delta_ns =
++ clockevent_delta2ns(1, &clockevent_cfv4);
++
++ clockevent_cfv4.cpumask = &cpumask_of_cpu(0);
++
++ printk(KERN_INFO "timer: register clockevent\n");
++ clockevents_register_device(&clockevent_cfv4);
++
++ return 0;
++}
++
++/*
++ * clocksource setup
++ */
++
++struct clocksource clocksource_cfv4 = {
++ .name = "ColdfireV4",
++ .rating = 250,
++ .mask = CLOCKSOURCE_MASK(32),
++ .read = sys_dtim2_read,
++ .shift = 20,
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
++};
++
++/*
++ * Initialize time subsystem. Called from linux/init/main.c
++ */
++void __init time_init(void)
++{
++ int ret;
++
++ printk(KERN_INFO "Initializing time\n");
++
++ cfv4_clockevent_init();
++ /* initialize the system timer */
++ sys_dtim2_init();
++
++ /* JKM */
++ clocksource_cfv4.mult = clocksource_hz2mult(FREQ,
++ clocksource_cfv4.shift);
++
++ /* register our clocksource */
++ ret = clocksource_register(&clocksource_cfv4);
++ if (ret)
++ printk(KERN_ERR "timer: unable to "
++ "register clocksource - %d\n", ret);
++}
++
++/*
++ * sysfs pieces
++ */
++
++static struct sysdev_class timer_class = {
++ .name = "timer",
++};
++
++static struct sys_device timer_device = {
++ .id = 0,
++ .cls = &timer_class,
++};
++
++static int __init timer_init_sysfs(void)
++{
++ int err = sysdev_class_register(&timer_class);
++ if (!err)
++ err = sysdev_register(&timer_device);
++ return err;
++}
++device_initcall(timer_init_sysfs);
++#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+
+ static int __init rtc_init(void)
+ {
+--- /dev/null
++++ b/arch/m68k/kernel/vmlinux-cf.lds
+@@ -0,0 +1,142 @@
++/* ld script to make m68k Coldfire Linux kernel
++ *
++ * Derived from arch/m68k/kernel/vmlinux-std.lds
++ *
++ * Updated 11/26/2007 for new CodeSourcery toolset
++ * by Kurt Mahan <kmahan@freescale.com>
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#define LOAD_OFFSET 0x00000000
++
++#include <asm-generic/vmlinux.lds.h>
++#include <asm/page_offset.h>
++
++#define START_OFFSET 0x00020000
++#define IMAGE_START PAGE_OFFSET_RAW + START_OFFSET
++
++OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
++OUTPUT_ARCH(m68k)
++ENTRY(_stext)
++jiffies = jiffies_64 + 4;
++
++SECTIONS
++{
++ . = IMAGE_START;
++ .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
++ _text = .; /* Text and read-only data */
++ *(.text.head)
++ } :text = 0x4e75
++
++ .text : AT(ADDR(.text) - LOAD_OFFSET) {
++ TEXT_TEXT
++ SCHED_TEXT
++ LOCK_TEXT
++ *(.fixup)
++ *(.gnu.warning)
++ } :text = 0x4e75
++ _etext = .; /* End of text section */
++
++ . = ALIGN(16);
++ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
++ __start___ex_table = .;
++ *(__ex_table)
++ __stop___ex_table = .;
++ }
++
++ RODATA
++
++ . = ALIGN(8192);
++ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
++ DATA_DATA
++ CONSTRUCTORS
++ } :data
++
++
++ . = ALIGN(16);
++ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET ) {
++ *(.data.cacheline_aligned)
++ } :data
++
++ _edata = .; /* End of data section */
++
++ NOTES /* support ld --build-id */
++
++ . = ALIGN(8192); /* Initrd */
++ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
++ __init_begin = .;
++ _sinittext = .;
++ *(.init.text)
++ _einittext = .;
++ }
++
++ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
++ *(.init.data)
++ }
++
++ . = ALIGN(16);
++ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
++ __setup_start = .;
++ *(.init.setup)
++ __setup_end = .;
++ }
++
++ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
++ __initcall_start = .;
++ INITCALLS
++ __initcall_end = .;
++ }
++
++ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
++ __con_initcall_start = .;
++ *(.con_initcall.init)
++ __con_initcall_end = .;
++ }
++
++ SECURITY_INIT
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ . = ALIGN(8192);
++ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
++ __initramfs_start = .;
++ *(.init.ramfs)
++ __initramfs_end = .;
++ }
++#endif
++
++ . = ALIGN(8192);
++ __init_end = .;
++
++ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
++ *(.data.init_task) /* The initial task and kernel stack */
++ }
++
++ _sbss = .;
++ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { /* BSS */
++ *(.bss)
++ }
++ _ebss = .;
++
++ _end = . ;
++
++ __ctors_start = . ;
++
++ __ctors_end = . ;
++
++ /* Sections to be discarded */
++ /DISCARD/ : {
++ *(.exit.text)
++ *(.exit.data)
++ *(.exitcall.exit)
++ }
++
++ /* Stabs debugging sections. */
++ STABS_DEBUG
++}
+--- a/arch/m68k/kernel/vmlinux.lds.S
++++ b/arch/m68k/kernel/vmlinux.lds.S
+@@ -1,10 +1,20 @@
++/*
++ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
+ PHDRS
+ {
+- text PT_LOAD FILEHDR PHDRS FLAGS (7);
++ headers PT_PHDR PHDRS ;
++ text PT_LOAD FILEHDR PHDRS FLAGS (5);
+ data PT_LOAD FLAGS (7);
+ }
+ #ifdef CONFIG_SUN3
+ #include "vmlinux-sun3.lds"
++#elif CONFIG_COLDFIRE
++#include "vmlinux-cf.lds"
+ #else
+ #include "vmlinux-std.lds"
+ #endif
+--- a/arch/m68k/lib/checksum.c
++++ b/arch/m68k/lib/checksum.c
+@@ -30,6 +30,10 @@
+ * 1998/8/31 Andreas Schwab:
+ * Zero out rest of buffer on exception in
+ * csum_partial_copy_from_user.
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ #include <linux/module.h>
+@@ -39,8 +43,132 @@
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
++#ifdef CONFIG_COLDFIRE
++
++static inline unsigned short from32to16(unsigned long x)
++{
++ /* add up 16-bit and 16-bit for 16+c bit */
++ x = (x & 0xffff) + (x >> 16);
++ /* add up carry.. */
++ x = (x & 0xffff) + (x >> 16);
++ return x;
++}
++
++static unsigned long do_csum(const unsigned char *buff, int len)
++{
++ int odd, count;
++ unsigned long result = 0;
++
++ if (len <= 0)
++ goto out;
++ odd = 1 & (unsigned long) buff;
++ if (odd) {
++ result = *buff;
++ len--;
++ buff++;
++ }
++ count = len >> 1; /* nr of 16-bit words.. */
++ if (count) {
++ if (2 & (unsigned long) buff) {
++ result += *(unsigned short *) buff;
++ count--;
++ len -= 2;
++ buff += 2;
++ }
++ count >>= 1; /* nr of 32-bit words.. */
++ if (count) {
++ unsigned long carry = 0;
++ do {
++ unsigned long w = *(unsigned long *) buff;
++ count--;
++ buff += 4;
++ result += carry;
++ result += w;
++ carry = (w > result);
++ } while (count);
++ result += carry;
++ result = (result & 0xffff) + (result >> 16);
++ }
++ if (len & 2) {
++ result += *(unsigned short *) buff;
++ buff += 2;
++ }
++ }
++ if (len & 1)
++ result += (*buff << 8);
++ result = from32to16(result);
++ if (odd)
++ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
++out:
++ return result;
++}
++
++/*
++ * This is a version of ip_compute_csum() optimized for IP headers,
++ * which always checksum on 4 octet boundaries.
++ */
++__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
++{
++ return ~do_csum(iph, ihl*4);
++}
++EXPORT_SYMBOL(ip_fast_csum);
++
++/*
++ * computes the checksum of a memory block at buff, length len,
++ * and adds in "sum" (32-bit)
++ *
++ * returns a 32-bit number suitable for feeding into itself
++ * or csum_tcpudp_magic
++ *
++ * this function must be called with even lengths, except
++ * for the last fragment, which may be odd
++ *
++ * it's best to have buff aligned on a 32-bit boundary
++ */
+ __wsum csum_partial(const void *buff, int len, __wsum sum)
+ {
++ unsigned int result = do_csum(buff, len);
++
++ /* add in old sum, and carry.. */
++ result += sum;
++ if (sum > result)
++ result += 1;
++ return result;
++}
++EXPORT_SYMBOL(csum_partial);
++
++/*
++ * copy from fs while checksumming, otherwise like csum_partial
++ */
++
++__wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len,
++ __wsum sum, int *csum_err)
++{
++ if (csum_err)
++ *csum_err = 0;
++ memcpy(dst, src, len);
++ return csum_partial(dst, len, sum);
++}
++EXPORT_SYMBOL(csum_partial_copy_from_user);
++
++/*
++ * copy from ds while checksumming, otherwise like csum_partial
++ */
++
++__wsum
++csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
++{
++ memcpy(dst, src, len);
++ return csum_partial(dst, len, sum);
++}
++EXPORT_SYMBOL(csum_partial_copy_nocheck);
++
++#else /* !CONFIG_COLDFIRE */
++
++unsigned int
++csum_partial(const unsigned char *buff, int len, unsigned int sum)
++{
+ unsigned long tmp1, tmp2;
+ /*
+ * Experiments with ethernet and slip connections show that buff
+@@ -423,3 +551,4 @@ csum_partial_copy_nocheck(const void *sr
+ return(sum);
+ }
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
++#endif /* CONFIG_COLDFIRE */
+--- a/arch/m68k/lib/muldi3.c
++++ b/arch/m68k/lib/muldi3.c
+@@ -1,6 +1,9 @@
+ /* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
+ gcc-2.7.2.3/longlong.h which is: */
+ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
++ Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ Jason Jin Jason.Jin@freescale.com
++ Shrek Wu B16972@freescale.com
+
+ This file is part of GNU CC.
+
+@@ -21,12 +24,22 @@ Boston, MA 02111-1307, USA. */
+
+ #define BITS_PER_UNIT 8
+
++#ifdef CONFIG_COLDFIRE
++#define umul_ppmm(w1, w0, u, v) \
++ do { \
++ unsigned long long x; \
++ x = (unsigned long long)u * v; \
++ w0 = (unsigned long)(x & 0x00000000ffffffff); \
++ w1 = (unsigned long)(x & 0xffffffff00000000) >> 32; \
++ } while (0)
++#else /* CONFIG_COLDFIRE */
+ #define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "dmi" ((USItype)(v)))
++#endif /* CONFIG_COLDFIRE */
+
+ #define __umulsidi3(u, v) \
+ ({DIunion __w; \
+--- a/arch/m68k/lib/string.c
++++ b/arch/m68k/lib/string.c
+@@ -1,4 +1,8 @@
+ /*
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+@@ -21,6 +25,7 @@ char *strcat(char *dest, const char *src
+ }
+ EXPORT_SYMBOL(strcat);
+
++#ifndef CONFIG_COLDFIRE
+ void *memset(void *s, int c, size_t count)
+ {
+ void *xs = s;
+@@ -149,6 +154,69 @@ void *memcpy(void *to, const void *from,
+ }
+ EXPORT_SYMBOL(memcpy);
+
++#else /* CONFIG_COLDFIRE */
++
++void *memset(void *s, int c, size_t count)
++{
++ unsigned long x;
++ void *originalTo = s;
++
++ for (x = 0; x < count; x++)
++ *(unsigned char *)s++ = (unsigned char)c;
++
++ return originalTo;
++}
++EXPORT_SYMBOL(memset);
++
++void *memcpy(void *to, const void *from, size_t n)
++{
++ void *xto = to;
++ size_t temp;
++
++ if (!n)
++ return xto;
++ if ((long) to & 1) {
++ char *cto = to;
++ const char *cfrom = from;
++ *cto++ = *cfrom++;
++ to = cto;
++ from = cfrom;
++ n--;
++ }
++ if (n > 2 && (long) to & 2) {
++ short *sto = to;
++ const short *sfrom = from;
++ *sto++ = *sfrom++;
++ to = sto;
++ from = sfrom;
++ n -= 2;
++ }
++ temp = n >> 2;
++ if (temp) {
++ long *lto = to;
++ const long *lfrom = from;
++ for (; temp; temp--)
++ *lto++ = *lfrom++;
++ to = lto;
++ from = lfrom;
++ }
++ if (n & 2) {
++ short *sto = to;
++ const short *sfrom = from;
++ *sto++ = *sfrom++;
++ to = sto;
++ from = sfrom;
++ }
++ if (n & 1) {
++ char *cto = to;
++ const char *cfrom = from;
++ *cto = *cfrom;
++ }
++ return xto;
++}
++EXPORT_SYMBOL(memcpy);
++#endif /* CONFIG_COLDFIRE */
++
+ void *memmove(void *dest, const void *src, size_t n)
+ {
+ void *xdest = dest;
+--- a/arch/m68k/lib/uaccess.c
++++ b/arch/m68k/lib/uaccess.c
+@@ -1,10 +1,15 @@
+ /*
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+ #include <linux/module.h>
++#ifndef CONFIG_COLDFIRE
+ #include <asm/uaccess.h>
+
+ unsigned long __generic_copy_from_user(void *to, const void __user *from,
+@@ -220,3 +225,245 @@ unsigned long __clear_user(void __user *
+ return res;
+ }
+ EXPORT_SYMBOL(__clear_user);
++
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/cf_uaccess.h>
++
++unsigned long __generic_copy_from_user(void *to, const void *from,
++ unsigned long n)
++{
++ unsigned long tmp;
++ __asm__ __volatile__
++ (" tstl %2\n"
++ " jeq 2f\n"
++ "1: movel (%1)+,%3\n"
++ " movel %3,(%0)+\n"
++ " subql #1,%2\n"
++ " jne 1b\n"
++ "2: movel %4,%2\n"
++ " bclr #1,%2\n"
++ " jeq 4f\n"
++ "3: movew (%1)+,%3\n"
++ " movew %3,(%0)+\n"
++ "4: bclr #0,%2\n"
++ " jeq 6f\n"
++ "5: moveb (%1)+,%3\n"
++ " moveb %3,(%0)+\n"
++ "6:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "7: movel %2,%%d0\n"
++ "71:clrl (%0)+\n"
++ " subql #1,%%d0\n"
++ " jne 71b\n"
++ " lsll #2,%2\n"
++ " addl %4,%2\n"
++ " btst #1,%4\n"
++ " jne 81f\n"
++ " btst #0,%4\n"
++ " jne 91f\n"
++ " jra 6b\n"
++ "8: addql #2,%2\n"
++ "81:clrw (%0)+\n"
++ " btst #0,%4\n"
++ " jne 91f\n"
++ " jra 6b\n"
++ "9: addql #1,%2\n"
++ "91:clrb (%0)+\n"
++ " jra 6b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,7b\n"
++ " .long 3b,8b\n"
++ " .long 5b,9b\n"
++ ".previous"
++ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
++ : "d"(n & 3), "0"(to), "1"(from), "2"(n/4)
++ : "d0", "memory");
++ return n;
++}
++EXPORT_SYMBOL(__generic_copy_from_user);
++
++
++unsigned long __generic_copy_to_user(void *to, const void *from,
++ unsigned long n)
++{
++ unsigned long tmp;
++ __asm__ __volatile__
++ (" tstl %2\n"
++ " jeq 3f\n"
++ "1: movel (%1)+,%3\n"
++ "22:movel %3,(%0)+\n"
++ "2: subql #1,%2\n"
++ " jne 1b\n"
++ "3: movel %4,%2\n"
++ " bclr #1,%2\n"
++ " jeq 4f\n"
++ " movew (%1)+,%3\n"
++ "24:movew %3,(%0)+\n"
++ "4: bclr #0,%2\n"
++ " jeq 5f\n"
++ " moveb (%1)+,%3\n"
++ "25:moveb %3,(%0)+\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "60:addql #1,%2\n"
++ "6: lsll #2,%2\n"
++ " addl %4,%2\n"
++ " jra 5b\n"
++ "7: addql #2,%2\n"
++ " jra 5b\n"
++ "8: addql #1,%2\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,60b\n"
++ " .long 22b,6b\n"
++ " .long 2b,6b\n"
++ " .long 24b,7b\n"
++ " .long 3b,60b\n"
++ " .long 4b,7b\n"
++ " .long 25b,8b\n"
++ " .long 5b,8b\n"
++ ".previous"
++ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
++ : "r"(n & 3), "0"(to), "1"(from), "2"(n / 4)
++ : "memory");
++ return n;
++}
++EXPORT_SYMBOL(__generic_copy_to_user);
++
++/*
++ * Copy a null terminated string from userspace.
++ */
++
++long strncpy_from_user(char *dst, const char *src, long count)
++{
++ long res = -EFAULT;
++ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
++ return res;
++ if (count == 0)
++ return count;
++ __asm__ __volatile__
++ ("1: moveb (%2)+,%%d0\n"
++ "12:moveb %%d0,(%1)+\n"
++ " jeq 2f\n"
++ " subql #1,%3\n"
++ " jne 1b\n"
++ "2: subl %3,%0\n"
++ "3:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "4: movel %4,%0\n"
++ " jra 3b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,4b\n"
++ " .long 12b,4b\n"
++ ".previous"
++ : "=d"(res), "=a"(dst), "=a"(src), "=d"(count)
++ : "i"(-EFAULT), "0"(count), "1"(dst), "2"(src), "3"(count)
++ : "d0", "memory");
++ return res;
++}
++EXPORT_SYMBOL(strncpy_from_user);
++
++/*
++ * Return the size of a string (including the ending 0)
++ *
++ * Return 0 on exception, a value greater than N if too long
++ */
++long strnlen_user(const char *src, long n)
++{
++ long res = -EFAULT;
++ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
++ return res;
++
++ res = -(long)src;
++ __asm__ __volatile__
++ ("1:\n"
++ " tstl %2\n"
++ " jeq 3f\n"
++ "2: moveb (%1)+,%%d0\n"
++ "22:\n"
++ " subql #1,%2\n"
++ " tstb %%d0\n"
++ " jne 1b\n"
++ " jra 4f\n"
++ "3:\n"
++ " addql #1,%0\n"
++ "4:\n"
++ " addl %1,%0\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "6: moveq %3,%0\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 2b,6b\n"
++ " .long 22b,6b\n"
++ ".previous"
++ : "=d"(res), "=a"(src), "=d"(n)
++ : "i"(0), "0"(res), "1"(src), "2"(n)
++ : "d0");
++ return res;
++}
++EXPORT_SYMBOL(strnlen_user);
++
++
++/*
++ * Zero Userspace
++ */
++
++unsigned long __clear_user(void *to, unsigned long n)
++{
++ __asm__ __volatile__
++ (" tstl %1\n"
++ " jeq 3f\n"
++ "1: movel %3,(%0)+\n"
++ "2: subql #1,%1\n"
++ " jne 1b\n"
++ "3: movel %2,%1\n"
++ " bclr #1,%1\n"
++ " jeq 4f\n"
++ "24:movew %3,(%0)+\n"
++ "4: bclr #0,%1\n"
++ " jeq 5f\n"
++ "25:moveb %3,(%0)+\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "61:addql #1,%1\n"
++ "6: lsll #2,%1\n"
++ " addl %2,%1\n"
++ " jra 5b\n"
++ "7: addql #2,%1\n"
++ " jra 5b\n"
++ "8: addql #1,%1\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,61b\n"
++ " .long 2b,6b\n"
++ " .long 3b,61b\n"
++ " .long 24b,7b\n"
++ " .long 4b,7b\n"
++ " .long 25b,8b\n"
++ " .long 5b,8b\n"
++ ".previous"
++ : "=a"(to), "=d"(n)
++ : "r"(n & 3), "d"(0), "0"(to), "1"(n/4));
++ return n;
++}
++EXPORT_SYMBOL(__clear_user);
++
++#endif /* CONFIG_COLDFIRE */
++
+--- a/arch/m68k/mm/Makefile
++++ b/arch/m68k/mm/Makefile
+@@ -6,3 +6,5 @@ obj-y := cache.o init.o fault.o hwtest.
+
+ obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
+ obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
++obj-$(CONFIG_MMU_CFV4E) += cf-mmu.o kmap.o memory.o
++obj-$(CONFIG_SRAM) += cf-sram.o
+--- a/arch/m68k/mm/cache.c
++++ b/arch/m68k/mm/cache.c
+@@ -4,13 +4,24 @@
+ * Instruction cache handling
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ #include <linux/module.h>
+ #include <asm/pgalloc.h>
+ #include <asm/traps.h>
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cfcache.h>
++#endif /* CONFIG_COLDFIRE */
+
++#ifndef CONFIG_COLDFIRE
+ static unsigned long virt_to_phys_slow(unsigned long vaddr)
+ {
+ if (CPU_IS_060) {
+@@ -69,11 +80,16 @@ static unsigned long virt_to_phys_slow(u
+ }
+ return 0;
+ }
++#endif /* CONFIG_COLDFIRE */
++
+
+ /* Push n pages at kernel virtual address and clear the icache */
+ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+ void flush_icache_range(unsigned long address, unsigned long endaddr)
+ {
++#ifdef CONFIG_COLDFIRE
++ flush_icache();
++#else /* !CONFIG_COLDFIRE */
+
+ if (CPU_IS_040_OR_060) {
+ address &= PAGE_MASK;
+@@ -94,9 +110,11 @@ void flush_icache_range(unsigned long ad
+ : "=&d" (tmp)
+ : "di" (FLUSH_I));
+ }
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(flush_icache_range);
+
++#ifndef CONFIG_COLDFIRE
+ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+ {
+@@ -115,4 +133,5 @@ void flush_icache_user_range(struct vm_a
+ : "di" (FLUSH_I));
+ }
+ }
++#endif /* CONFIG_COLDFIRE */
+
+--- /dev/null
++++ b/arch/m68k/mm/cf-mmu.c
+@@ -0,0 +1,311 @@
++/*
++ * linux/arch/m68k/mm/cf-mmu.c
++ *
++ * Based upon linux/arch/m68k/mm/sun3mmu.c
++ * Based upon linux/arch/ppc/mm/mmu_context.c
++ *
++ * Implementations of mm routines specific to the Coldfire MMU.
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#ifdef CONFIG_BLK_DEV_RAM
++#include <linux/blkdev.h>
++#endif
++#include <linux/bootmem.h>
++
++#include <asm/setup.h>
++#include <asm/uaccess.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/machdep.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/cf_pgalloc.h>
++
++#include <asm/coldfire.h>
++#include <asm/tlbflush.h>
++
++#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
++
++#undef DEBUG
++
++#ifdef CONFIG_VDSO
++unsigned long next_mmu_context;
++#else
++mm_context_t next_mmu_context;
++#endif
++
++unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
++
++atomic_t nr_free_contexts;
++struct mm_struct *context_mm[LAST_CONTEXT+1];
++const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
++
++extern unsigned long num_pages;
++EXPORT_SYMBOL(num_pages);
++/*
++ * Free memory used for system initialization.
++ */
++void free_initmem(void)
++{
++#if 0
++ unsigned long addr;
++ unsigned long start = (unsigned long)&__init_begin;
++ unsigned long end = (unsigned long)&__init_end;
++
++ printk(KERN_INFO "free_initmem: __init_begin = 0x%lx"
++ " __init_end = 0x%lx\n", start, end);
++
++ addr = (unsigned long)&__init_begin;
++ for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
++ /* not currently used */
++ virt_to_page(addr)->flags &= ~(1 << PG_reserved);
++ init_page_count(virt_to_page(addr));
++ free_page(addr);
++ totalram_pages++;
++ }
++#endif
++}
++
++/*
++ * Initialize the paging system.
++ */
++void __init paging_init(void)
++{
++ pgd_t *pg_dir;
++ pte_t *pg_table;
++ int i;
++ unsigned long address;
++ unsigned long next_pgtable;
++ unsigned long zones_size[MAX_NR_ZONES];
++ unsigned long size;
++ enum zone_type zone;
++
++ /* allocate zero page */
++ empty_zero_page = (void *)alloc_bootmem_pages(PAGE_SIZE);
++ memset((void *)empty_zero_page, 0, PAGE_SIZE);
++
++ /* zero kernel page directory */
++ pg_dir = swapper_pg_dir;
++ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
++ /*
++ * setup page tables for PHYSRAM
++ */
++
++ /* starting loc in page directory */
++ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
++
++ /* allocate page tables */
++ size = num_pages * sizeof(pte_t);
++ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
++ next_pgtable = (unsigned long)alloc_bootmem_pages(size);
++ address = PAGE_OFFSET;
++ while (address < (unsigned long)high_memory) {
++ /* setup page table in page directory */
++ pg_table = (pte_t *)next_pgtable;
++ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
++ pgd_val(*pg_dir) = (unsigned long)pg_table;
++ pg_dir++;
++
++ /* create PTEs in page table */
++ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
++ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
++ if (address >= (unsigned long)high_memory)
++ pte_val(pte) = 0;
++
++ set_pte(pg_table, pte);
++ address += PAGE_SIZE;
++ }
++ }
++
++ /*
++ * setup page tables for DMA area
++ */
++
++ /* starting loc in page directory */
++ pg_dir = swapper_pg_dir;
++ pg_dir += CONFIG_DMA_BASE >> PGDIR_SHIFT;
++
++ /* allocate page tables */
++ size = (CONFIG_DMA_SIZE >> PAGE_SHIFT) * sizeof(pte_t);
++ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
++ next_pgtable = (unsigned long)alloc_bootmem_pages(size);
++ address = CONFIG_DMA_BASE;
++ while (address < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) {
++ /* setup page table in page directory */
++ pg_table = (pte_t *)next_pgtable;
++ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
++ pgd_val(*pg_dir) = (unsigned long)pg_table;
++ pg_dir++;
++
++ /* create PTEs in page table */
++ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
++ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
++ if (address >= (CONFIG_DMA_BASE + CONFIG_DMA_SIZE))
++ pte_val(pte) = 0;
++
++ set_pte(pg_table, pte);
++ address += PAGE_SIZE;
++ }
++ }
++
++ /*
++ * setup zones
++ */
++
++ current->mm = NULL;
++
++ /* clear zones */
++ for (zone = 0; zone < MAX_NR_ZONES; zone++)
++ zones_size[zone] = 0x0;
++
++ zones_size[ZONE_DMA] = CONFIG_DMA_SIZE >> PAGE_SHIFT;
++ zones_size[ZONE_NORMAL] = (((unsigned long)high_memory -
++ PAGE_OFFSET) >> PAGE_SHIFT) -
++ zones_size[ZONE_DMA];
++
++ free_area_init(zones_size);
++}
++/*
++ * Handle a missed TLB
++ */
++int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
++{
++ struct mm_struct *mm;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long mmuar;
++ int asid;
++ unsigned long flags;
++
++ local_save_flags(flags);
++ local_irq_disable();
++
++ mmuar = (dtlb) ? regs->mmuar
++ : regs->pc + (extension_word * sizeof(long));
++
++ mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
++
++ if (!mm) {
++ local_irq_restore(flags);
++ return -1;
++ }
++
++ pgd = pgd_offset(mm, mmuar);
++ if (pgd_none(*pgd)) {
++ local_irq_restore(flags);
++ return -1;
++ }
++
++ pmd = pmd_offset(pgd, mmuar);
++ if (pmd_none(*pmd)) {
++ local_irq_restore(flags);
++ return -1;
++ }
++
++ pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
++ : pte_offset_map(pmd, mmuar);
++ if (pte_none(*pte) || !pte_present(*pte)) {
++ local_irq_restore(flags);
++ return -1;
++ }
++
++ if (write) {
++ if (!pte_write(*pte)) {
++ local_irq_restore(flags);
++ return -1;
++ }
++ set_pte(pte, pte_mkdirty(*pte));
++ }
++
++ set_pte(pte, pte_mkyoung(*pte));
++ asid = cpu_context(mm) & 0xff;
++ if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
++ set_pte(pte, pte_wrprotect(*pte));
++
++ *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
++ | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
++ >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V;
++
++ *MMUDR = (pte_val(*pte) & PAGE_MASK)
++ | ((pte->pte) & CF_PAGE_MMUDR_MASK)
++ | MMUDR_SZ8K | MMUDR_X;
++
++ if (dtlb)
++ *MMUOR = MMUOR_ACC | MMUOR_UAA;
++ else
++ *MMUOR = MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA;
++
++ asm("nop");
++
++#ifdef DEBUG
++ printk(KERN_INFO "cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar & PAGE_MASK),
++ (pte_val(*pte) & PAGE_MASK));
++#endif
++ local_irq_restore(flags);
++ return 0;
++}
++
++
++/*
++ * Context Management
++ *
++ * Based on arch/ppc/mmu_context.c
++ */
++
++/*
++ * Initialize the context management system.
++ */
++void __init mmu_context_init(void)
++{
++ /*
++ * Some processors have too few contexts to reserve one for
++ * init_mm, and require using context 0 for a normal task.
++ * Other processors reserve the use of context zero for the kernel.
++ * This code assumes FIRST_CONTEXT < 32.
++ */
++ context_map[0] = (1 << FIRST_CONTEXT) - 1;
++ next_mmu_context = FIRST_CONTEXT;
++ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
++}
++
++/*
++ * Steal a context from a task that has one at the moment.
++ * This is only used on 8xx and 4xx and we presently assume that
++ * they don't do SMP. If they do then thicfpgalloc.hs will have to check
++ * whether the MM we steal is in use.
++ * We also assume that this is only used on systems that don't
++ * use an MMU hash table - this is true for 8xx and 4xx.
++ * This isn't an LRU system, it just frees up each context in
++ * turn (sort-of pseudo-random replacement :). This would be the
++ * place to implement an LRU scheme if anyone was motivated to do it.
++ * -- paulus
++ */
++void steal_context(void)
++{
++ struct mm_struct *mm;
++ /* free up context `next_mmu_context' */
++ /* if we shouldn't free context 0, don't... */
++ if (next_mmu_context < FIRST_CONTEXT)
++ next_mmu_context = FIRST_CONTEXT;
++ mm = context_mm[next_mmu_context];
++ flush_tlb_mm(mm);
++ destroy_context(mm);
++}
+--- /dev/null
++++ b/arch/m68k/mm/cf-sram.c
+@@ -0,0 +1,80 @@
++/*
++ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Author: Lanttor.Guo@freescale.com
++ *
++ * Providing on-chip SRAM allocation and free APIs to kernel
++ * The implemention uses gen_pool_alloc/gen_pool_free interface
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/genalloc.h>
++
++/* sram memory min allocation size per once */
++static long blk_size = CONFIG_SRAM_ALLOC_GRANULARITY;
++static struct gen_pool *sram_pool;
++
++/*
++ * Set up memory pools to manage on-chip sram.
++ * @ start the start address of SRAM
++ * @ size the size of SRAM
++ * return return 0 express success
++ */
++int declare_sram_pool(void *start, size_t size)
++{
++ int status = 0;
++
++ pr_debug("%s %p %d\n", __func__, start, size);
++
++ sram_pool = gen_pool_create(ilog2(blk_size), -1);
++ if (!sram_pool) {
++ printk(KERN_ERR "gen_pool_creat faile at %s()\n", __func__);
++ status = -ENOMEM;
++ }
++
++ status = gen_pool_add(sram_pool, (unsigned long)start, size, -1);
++ if (status < 0)
++ printk(KERN_ERR "gen_pool_add failed at %s()\n", __func__);
++
++ return status;
++
++}
++
++/*
++ * Allocate memory from sram pool
++ * @ len the size of allocated memory
++ * return return the start addr of allocated memory
++ */
++void *sram_alloc(size_t len)
++{
++ unsigned long vaddr;
++
++ if (!len) {
++ printk(KERN_ERR "the len parameter of sram_alloc() is zero\n");
++ return NULL;
++ }
++
++ vaddr = gen_pool_alloc(sram_pool, len);
++ if (!vaddr)
++ return NULL;
++
++ return (void *)vaddr;
++}
++EXPORT_SYMBOL(sram_alloc);
++
++/*
++ * Free memory to sram pool
++ * @ addr the addr of allocated memory
++ * @ len the size of allocated memory
++ */
++void sram_free(void *addr, size_t len)
++{
++ gen_pool_free(sram_pool, (unsigned long)addr, len);
++}
++EXPORT_SYMBOL(sram_free);
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -12,6 +12,14 @@
+ * them here complete with the comments from the original atari
+ * config.c...
+ * -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998
++ *
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ /* This function tests for the presence of an address, specially a
+@@ -25,6 +33,7 @@
+
+ #include <linux/module.h>
+
++#ifndef CONFIG_COLDFIRE
+ int hwreg_present( volatile void *regp )
+ {
+ int ret = 0;
+@@ -82,4 +91,5 @@ int hwreg_write( volatile void *regp, un
+ return( ret );
+ }
+ EXPORT_SYMBOL(hwreg_write);
++#endif
+
+--- a/arch/m68k/mm/init.c
++++ b/arch/m68k/mm/init.c
+@@ -2,6 +2,13 @@
+ * linux/arch/m68k/mm/init.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ *
+ * Contains common initialization routines, specific init code moved
+ * to motorola.c and sun3mmu.c
+@@ -32,6 +39,7 @@
+ #include <asm/sections.h>
+ #include <asm/tlb.h>
+
++
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+ pg_data_t pg_data_map[MAX_NUMNODES];
+@@ -113,7 +121,7 @@ void __init mem_init(void)
+ }
+ }
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ /* insert pointer tables allocated so far into the tablelist */
+ init_pointer_table((unsigned long)kernel_pg_dir);
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+@@ -132,6 +140,11 @@ void __init mem_init(void)
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10));
++
++#ifdef CONFIG_VDSO
++ /* init the vdso page */
++ vdso_init();
++#endif
+ }
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+--- a/arch/m68k/mm/kmap.c
++++ b/arch/m68k/mm/kmap.c
+@@ -2,6 +2,13 @@
+ * linux/arch/m68k/mm/kmap.c
+ *
+ * Copyright (C) 1997 Roman Hodek
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ *
+ * 10/01/99 cleaned up the code and changing to the same interface
+ * used by other architectures /Roman Zippel
+@@ -24,7 +31,11 @@
+
+ #undef DEBUG
+
++#ifndef CONFIG_COLDFIRE
+ #define PTRTREESIZE (256*1024)
++#else
++#define PTRTREESIZE PAGE_SIZE
++#endif
+
+ /*
+ * For 040/060 we can use the virtual memory area like other architectures,
+@@ -50,7 +61,11 @@ static inline void free_io_area(void *ad
+
+ #else
+
++#ifdef CONFIG_COLDFIRE
++#define IO_SIZE PAGE_SIZE
++#else
+ #define IO_SIZE (256*1024)
++#endif
+
+ static struct vm_struct *iolist;
+
+@@ -126,8 +141,58 @@ void __iomem *__ioremap(unsigned long ph
+ }
+ #endif
+
++#ifdef CONFIG_M5445X
++ if (physaddr >= 0xf0000000) {
++ /*
++ * On the M5445x processors an ACR is setup to map
++ * the 0xF0000000 range into kernel memory as
++ * non-cacheable.
++ */
++ return (void __iomem *)physaddr;
++ }
++ if ((physaddr >= KMAP_START) && (physaddr <= KMAP_END)) {
++ /* if physaddr belongs to virtual address range for ioremap,
++ * then return physaddr because it has been ioremapped
++ */
++ return (void __iomem *)physaddr;
++ }
++#endif
++#ifdef CONFIG_M547X_8X
++ if (physaddr >= 0xf0000000) {
++ /*
++ * On the M547x/M548x processors an ACR is setup to map
++ * the 0xF0000000 range into kernel memory as
++ * non-cacheable.
++ */
++ return (void __iomem *)physaddr;
++ }
++
++ if ((physaddr >= 0xd0000000) && (physaddr + size < 0xd800ffff)) {
++ printk(KERN_ERR "ioremap:PCI 0x%lx,0x%lx(%d)"
++ " - PCI area hit\n", physaddr, size, cacheflag);
++ return (void *)physaddr;
++ }
++#endif
++#ifdef CONFIG_M5441X
++ if (physaddr >= 0xe0000000) {
++ /*
++ * On the M5441x processors an ACR is setup to map
++ * the 0xe0000000 range into kernel memory as
++ * non-cacheable.
++ */
++ return (void __iomem *)physaddr;
++ }
++ if ((physaddr >= KMAP_START) && (physaddr <= KMAP_END)) {
++ /* if physaddr belongs to virtual address range for ioremap,
++ * then return physaddr because it has been ioremapped
++ */
++ return (void __iomem *)physaddr;
++ }
++#endif
++
+ #ifdef DEBUG
+- printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
++ printk(KERN_ERR "ioremap: paddr=0x%lx,size=0x%lx(%d) - ",
++ physaddr, size, cacheflag);
+ #endif
+ /*
+ * Mappings have to be aligned
+@@ -146,7 +211,8 @@ void __iomem *__ioremap(unsigned long ph
+ virtaddr = (unsigned long)area->addr;
+ retaddr = virtaddr + offset;
+ #ifdef DEBUG
+- printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
++ printk(KERN_ERR " paddr=0x%lx,vaddr=0x%lx,retaddr=0x%lx",
++ physaddr, virtaddr, retaddr);
+ #endif
+
+ /*
+@@ -171,7 +237,12 @@ void __iomem *__ioremap(unsigned long ph
+ break;
+ }
+ } else {
++#ifndef CONFIG_COLDFIRE
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
++#else
++ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
++ _PAGE_READWRITE);
++#endif
+ switch (cacheflag) {
+ case IOMAP_NOCACHE_SER:
+ case IOMAP_NOCACHE_NONSER:
+@@ -251,6 +322,13 @@ void __iounmap(void *addr, unsigned long
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
++#ifdef CONFIG_M547X_8X
++ if ((addr >= (void *)0xd0000000)
++ && (addr + size < (void *)0xd800ffff)) {
++ printk(KERN_ERR "%s: PCI address\n", __func__);
++ return;
++ }
++#endif
+ while ((long)size > 0) {
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (pgd_bad(*pgd_dir)) {
+--- a/arch/m68k/mm/memory.c
++++ b/arch/m68k/mm/memory.c
+@@ -2,6 +2,13 @@
+ * linux/arch/m68k/mm/memory.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
+ */
+
+ #include <linux/module.h>
+@@ -127,6 +134,7 @@ int free_pointer_table (pmd_t *ptable)
+ return 0;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ /* invalidate page in both caches */
+ static inline void clear040(unsigned long paddr)
+ {
+@@ -173,6 +181,7 @@ static inline void pushcl040(unsigned lo
+ clear040(paddr);
+ local_irq_restore(flags);
+ }
++#endif /* CONFIG_COLDFIRE */
+
+ /*
+ * 040: Hit every page containing an address in the range paddr..paddr+len-1.
+@@ -203,6 +212,9 @@ static inline void pushcl040(unsigned lo
+
+ void cache_clear (unsigned long paddr, int len)
+ {
++#ifdef CONFIG_COLDFIRE
++ flush_bcache();
++#else
+ if (CPU_IS_040_OR_060) {
+ int tmp;
+
+@@ -237,6 +249,7 @@ void cache_clear (unsigned long paddr, i
+ if(mach_l2_flush)
+ mach_l2_flush(0);
+ #endif
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(cache_clear);
+
+@@ -250,6 +263,9 @@ EXPORT_SYMBOL(cache_clear);
+
+ void cache_push (unsigned long paddr, int len)
+ {
++#ifdef CONFIG_COLDFIRE
++ flush_bcache();
++#else
+ if (CPU_IS_040_OR_060) {
+ int tmp = PAGE_SIZE;
+
+@@ -290,6 +306,7 @@ void cache_push (unsigned long paddr, in
+ if(mach_l2_flush)
+ mach_l2_flush(1);
+ #endif
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(cache_push);
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3,6 +3,10 @@
+ *
+ * (C) Copyright Al Viro 2000, 2001
+ * Released under GPL v2.
++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Change to align on page size for coldfire
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * Based on code from fs/super.c, copyright Linus Torvalds and others.
+ * Heavily rewritten.
+@@ -2161,7 +2165,11 @@ int copy_mount_options(const void __user
+ /* copy_from_user cannot cross TASK_SIZE ! */
+ size = TASK_SIZE - (unsigned long)data;
+ if (size > PAGE_SIZE)
++#ifndef CONFIG_COLDFIRE
+ size = PAGE_SIZE;
++#else
++ size = PAGE_SIZE - ((unsigned long)data & ~PAGE_MASK);
++#endif
+
+ i = size - exact_copy_from_user((void *)page, data, size);
+ if (!i) {
+--- a/include/linux/fsl_devices.h
++++ b/include/linux/fsl_devices.h
+@@ -6,7 +6,7 @@
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+- * Copyright 2004 Freescale Semiconductor, Inc
++ * Copyright (C) 2004-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+@@ -18,7 +18,7 @@
+ #define _FSL_DEVICE_H_
+
+ #include <linux/types.h>
+-
++#include <linux/interrupt.h>
+ /*
+ * Some conventions on how we handle peripherals on Freescale chips
+ *
+@@ -119,4 +119,14 @@ int fsl_deep_sleep(void);
+ static inline int fsl_deep_sleep(void) { return 0; }
+ #endif
+
++struct fsl_ata_platform_data {
++#ifdef CONFIG_FSL_PATA_USE_DMA
++ int udma_mask; /* UDMA modes h/w can handle */
++ int fifo_alarm; /* value for fifo_alarm reg */
++ int max_sg; /* longest sglist h/w can handle */
++#endif
++ int (*init)(struct platform_device *pdev);
++ void (*exit)(void);
++ int (*get_clk_rate)(void);
++};
+ #endif /* _FSL_DEVICE_H_ */