diff options
Diffstat (limited to 'target/linux/coldfire/patches/003-mcfv4e_coldfire_headers2.patch')
-rw-r--r-- | target/linux/coldfire/patches/003-mcfv4e_coldfire_headers2.patch | 1865 |
1 files changed, 1865 insertions, 0 deletions
diff --git a/target/linux/coldfire/patches/003-mcfv4e_coldfire_headers2.patch b/target/linux/coldfire/patches/003-mcfv4e_coldfire_headers2.patch new file mode 100644 index 0000000000..473cebcf7c --- /dev/null +++ b/target/linux/coldfire/patches/003-mcfv4e_coldfire_headers2.patch @@ -0,0 +1,1865 @@ +From 3da86cd2810e9ba4e4a9e7471a92025172c1c990 Mon Sep 17 00:00:00 2001 +From: Kurt Mahan <kmahan@freescale.com> +Date: Wed, 31 Oct 2007 16:41:41 -0600 +Subject: [PATCH] Add Coldfire specific header files. + +LTIBName: mcfv4e-coldfire-headers2 +Signed-off-by: Kurt Mahan <kmahan@freescale.com> +--- + include/asm-m68k/cf_cacheflush.h | 160 ++++++++++++++++ + include/asm-m68k/cf_entry.h | 146 +++++++++++++++ + include/asm-m68k/cf_pgalloc.h | 99 ++++++++++ + include/asm-m68k/cf_pgtable.h | 357 ++++++++++++++++++++++++++++++++++++ + include/asm-m68k/cf_tlbflush.h | 59 ++++++ + include/asm-m68k/cf_uaccess.h | 376 ++++++++++++++++++++++++++++++++++++++ + include/asm-m68k/cfcache.h | 86 +++++++++ + include/asm-m68k/cfmmu.h | 104 +++++++++++ + include/asm-m68k/coldfire.h | 38 ++++ + include/asm-m68k/coldfire_edma.h | 39 ++++ + include/asm-m68k/mcfqspi.h | 50 +++++ + include/asm-m68k/mcfsim.h | 96 ++++++++++ + include/asm-m68k/mcfuart.h | 180 ++++++++++++++++++ + 13 files changed, 1790 insertions(+), 0 deletions(-) + create mode 100644 include/asm-m68k/cf_cacheflush.h + create mode 100644 include/asm-m68k/cf_entry.h + create mode 100644 include/asm-m68k/cf_pgalloc.h + create mode 100644 include/asm-m68k/cf_pgtable.h + create mode 100644 include/asm-m68k/cf_tlbflush.h + create mode 100644 include/asm-m68k/cf_uaccess.h + create mode 100644 include/asm-m68k/cfcache.h + create mode 100644 include/asm-m68k/cfmmu.h + create mode 100644 include/asm-m68k/coldfire.h + create mode 100644 include/asm-m68k/coldfire_edma.h + create mode 100644 include/asm-m68k/mcfqspi.h + create mode 100644 include/asm-m68k/mcfsim.h + create mode 100644 include/asm-m68k/mcfuart.h + +--- /dev/null ++++ b/include/asm-m68k/cf_cacheflush.h +@@ -0,0 +1,160 @@ ++#ifndef M68K_CF_CACHEFLUSH_H ++#define M68K_CF_CACHEFLUSH_H ++ ++#include <asm/cfcache.h> ++ ++/* ++ * Cache handling functions ++ */ ++ ++#define flush_icache() \ ++({ \ ++ unsigned long set; \ ++ unsigned long start_set; \ ++ unsigned long end_set; \ ++ \ ++ start_set = 0; \ ++ end_set = (unsigned long)LAST_DCACHE_ADDR; \ ++ \ ++ for (set = start_set; set <= end_set; set += (0x10 - 3)) \ ++ asm volatile("cpushl %%ic,(%0)\n" \ ++ "\taddq%.l #1,%0\n" \ ++ "\tcpushl %%ic,(%0)\n" \ ++ "\taddq%.l #1,%0\n" \ ++ "\tcpushl %%ic,(%0)\n" \ ++ "\taddq%.l #1,%0\n" \ ++ "\tcpushl %%ic,(%0)" : : "a" (set)); \ ++}) ++ ++/* ++ * invalidate the cache for the specified memory range. ++ * It starts at the physical address specified for ++ * the given number of bytes. ++ */ ++extern void cache_clear(unsigned long paddr, int len); ++/* ++ * push any dirty cache in the specified memory range. ++ * It starts at the physical address specified for ++ * the given number of bytes. ++ */ ++extern void cache_push(unsigned long paddr, int len); ++ ++/* ++ * push and invalidate pages in the specified user virtual ++ * memory range. ++ */ ++extern void cache_push_v(unsigned long vaddr, int len); ++ ++/* This is needed whenever the virtual mapping of the current ++ process changes. */ ++ ++ ++#define flush_cache_all() do { } while (0) ++#define flush_cache_mm(mm) do { } while (0) ++#define flush_cache_range(mm, a, b) do { } while (0) ++#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) ++ ++#define flush_dcache_range(paddr, len) do { } while (0) ++ ++/* Push the page at kernel virtual address and clear the icache */ ++/* use cpush %bc instead of cpush %dc, cinv %ic */ ++#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page)) ++extern inline void __flush_page_to_ram(void *address) ++{ ++ unsigned long set; ++ unsigned long start_set; ++ unsigned long end_set; ++ unsigned long addr = (unsigned long) address; ++ ++ addr &= ~(PAGE_SIZE - 1); /* round down to page start address */ ++ ++ start_set = addr & _ICACHE_SET_MASK; ++ end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK; ++ ++ if (start_set > end_set) { ++ /* from the begining to the lowest address */ ++ for (set = 0; set <= end_set; set += (0x10 - 3)) ++ asm volatile("cpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)" : : "a" (set)); ++ ++ /* next loop will finish the cache ie pass the hole */ ++ end_set = LAST_ICACHE_ADDR; ++ } ++ for (set = start_set; set <= end_set; set += (0x10 - 3)) ++ asm volatile("cpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%bc,(%0)" : : "a" (set)); ++} ++ ++#define flush_dcache_page(page) do { } while (0) ++#define flush_icache_page(vma, pg) do { } while (0) ++#define flush_icache_user_range(adr, len) do { } while (0) ++/* NL */ ++#define flush_icache_user_page(vma, page, addr, len) do { } while (0) ++ ++/* Push n pages at kernel virtual address and clear the icache */ ++/* use cpush %bc instead of cpush %dc, cinv %ic */ ++extern inline void flush_icache_range(unsigned long address, ++ unsigned long endaddr) ++{ ++ unsigned long set; ++ unsigned long start_set; ++ unsigned long end_set; ++ ++ start_set = address & _ICACHE_SET_MASK; ++ end_set = endaddr & _ICACHE_SET_MASK; ++ ++ if (start_set > end_set) { ++ /* from the begining to the lowest address */ ++ for (set = 0; set <= end_set; set += (0x10 - 3)) ++ asm volatile("cpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)" : : "a" (set)); ++ ++ /* next loop will finish the cache ie pass the hole */ ++ end_set = LAST_ICACHE_ADDR; ++ } ++ for (set = start_set; set <= end_set; set += (0x10 - 3)) ++ asm volatile("cpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)\n" ++ "\taddq%.l #1,%0\n" ++ "\tcpushl %%ic,(%0)" : : "a" (set)); ++} ++ ++static inline void copy_to_user_page(struct vm_area_struct *vma, ++ struct page *page, unsigned long vaddr, ++ void *dst, void *src, int len) ++{ ++ memcpy(dst, src, len); ++ flush_icache_user_page(vma, page, vaddr, len); ++} ++static inline void copy_from_user_page(struct vm_area_struct *vma, ++ struct page *page, unsigned long vaddr, ++ void *dst, void *src, int len) ++{ ++ memcpy(dst, src, len); ++} ++ ++#define flush_cache_dup_mm(mm) flush_cache_mm(mm) ++#define flush_cache_vmap(start, end) flush_cache_all() ++#define flush_cache_vunmap(start, end) flush_cache_all() ++#define flush_dcache_mmap_lock(mapping) do { } while (0) ++#define flush_dcache_mmap_unlock(mapping) do { } while (0) ++ ++#endif /* M68K_CF_CACHEFLUSH_H */ +--- /dev/null ++++ b/include/asm-m68k/cf_entry.h +@@ -0,0 +1,146 @@ ++#ifndef __CF_M68K_ENTRY_H ++#define __CF_M68K_ENTRY_H ++ ++#include <asm/setup.h> ++#include <asm/page.h> ++#include <asm/coldfire.h> ++#include <asm/cfmmu.h> ++#include <asm/asm-offsets.h> ++ ++/* ++ * Stack layout in 'ret_from_exception': ++ * ++ * This allows access to the syscall arguments in registers d1-d5 ++ * ++ * 0(sp) - d1 ++ * 4(sp) - d2 ++ * 8(sp) - d3 ++ * C(sp) - d4 ++ * 10(sp) - d5 ++ * 14(sp) - a0 ++ * 18(sp) - a1 ++ * 1C(sp) - a2 ++ * 20(sp) - d0 ++ * 24(sp) - orig_d0 ++ * 28(sp) - stack adjustment ++ * 2C(sp) - sr ++ * 2E(sp) - pc ++ * 32(sp) - format & vector ++ * 36(sp) - MMUSR ++ * 3A(sp) - MMUAR ++ */ ++ ++/* ++ * 97/05/14 Andreas: Register %a2 is now set to the current task throughout ++ * the whole kernel. ++ */ ++ ++/* the following macro is used when enabling interrupts */ ++/* portable version */ ++#define ALLOWINT (~0x700) ++#define MAX_NOINT_IPL 0 ++ ++#ifdef __ASSEMBLY__ ++ ++#define curptr a2 ++ ++LFLUSH_I_AND_D = 0x00000808 ++LSIGTRAP = 5 ++ ++/* process bits for task_struct.ptrace */ ++PT_TRACESYS_OFF = 3 ++PT_TRACESYS_BIT = 1 ++PT_PTRACED_OFF = 3 ++PT_PTRACED_BIT = 0 ++PT_DTRACE_OFF = 3 ++PT_DTRACE_BIT = 2 ++ ++#define SAVE_ALL_INT save_all_int ++#define SAVE_ALL_SYS save_all_sys ++#define RESTORE_ALL restore_all ++/* ++ * This defines the normal kernel pt-regs layout. ++ * ++ * regs a3-a6 and d6-d7 are preserved by C code ++ * the kernel doesn't mess with usp unless it needs to ++ */ ++ ++/* ++ * a -1 in the orig_d0 field signifies ++ * that the stack frame is NOT for syscall ++ */ ++.macro save_all_int ++ movel MMUSR,%sp@- ++ movel MMUAR,%sp@- ++ clrl %sp@- | stk_adj ++ pea -1:w | orig d0 ++ movel %d0,%sp@- | d0 ++ subal #(8*4), %sp ++ moveml %d1-%d5/%a0-%a1/%curptr,%sp@ ++.endm ++ ++.macro save_all_sys ++ movel MMUSR,%sp@- ++ movel MMUAR,%sp@- ++ clrl %sp@- | stk_adj ++ movel %d0,%sp@- | orig d0 ++ movel %d0,%sp@- | d0 ++ subal #(8*4), %sp ++ moveml %d1-%d5/%a0-%a1/%curptr,%sp@ ++.endm ++ ++.macro restore_all ++ moveml %sp@,%a0-%a1/%curptr/%d1-%d5 ++ addal #(8*4), %sp ++ movel %sp@+,%d0 | d0 ++ addql #4,%sp | orig d0 ++ addl %sp@+,%sp | stk_adj ++ addql #8,%sp | MMUAR & MMUSR ++ rte ++.endm ++ ++#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */ ++ ++#define SAVE_SWITCH_STACK save_switch_stack ++#define RESTORE_SWITCH_STACK restore_switch_stack ++#define GET_CURRENT(tmp) get_current tmp ++ ++.macro save_switch_stack ++ subal #(6*4), %sp ++ moveml %a3-%a6/%d6-%d7,%sp@ ++.endm ++ ++.macro restore_switch_stack ++ moveml %sp@,%a3-%a6/%d6-%d7 ++ addal #(6*4), %sp ++.endm ++ ++.macro get_current reg=%d0 ++ movel %sp,\reg ++ andl #-THREAD_SIZE,\reg ++ movel \reg,%curptr ++ movel %curptr@,%curptr ++.endm ++ ++#else /* C source */ ++ ++#define STR(X) STR1(X) ++#define STR1(X) #X ++ ++#define PT_OFF_ORIG_D0 0x24 ++#define PT_OFF_FORMATVEC 0x32 ++#define PT_OFF_SR 0x2C ++#define SAVE_ALL_INT \ ++ "clrl %%sp@-;" /* stk_adj */ \ ++ "pea -1:w;" /* orig d0 = -1 */ \ ++ "movel %%d0,%%sp@-;" /* d0 */ \ ++ "subal #(8*4),%sp" \ ++ "moveml %%d1-%%d5/%%a0-%%a2,%%sp@" ++#define GET_CURRENT(tmp) \ ++ "movel %%sp,"#tmp"\n\t" \ ++ "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \ ++ "movel "#tmp",%%a2\n\t" ++ ++#endif ++ ++#endif /* __CF_M68K_ENTRY_H */ +--- /dev/null ++++ b/include/asm-m68k/cf_pgalloc.h +@@ -0,0 +1,99 @@ ++#ifndef M68K_CF_PGALLOC_H ++#define M68K_CF_PGALLOC_H ++ ++#include <asm/coldfire.h> ++#include <asm/page.h> ++#include <asm/cf_tlbflush.h> ++ ++extern inline void pte_free_kernel(pte_t *pte) ++{ ++ free_page((unsigned long) pte); ++} ++ ++extern const char bad_pmd_string[]; ++ ++extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ++ unsigned long address) ++{ ++ unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); ++ ++ if (!page) ++ return NULL; ++ ++ memset((void *)page, 0, PAGE_SIZE); ++ return (pte_t *) (page); ++} ++ ++extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) ++{ ++ return (pmd_t *) pgd; ++} ++ ++#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) ++#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) ++ ++#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) ++ ++#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ ++ (unsigned long)(page_address(page))) ++#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) ++ ++static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page) ++{ ++ __free_page(page); ++} ++ ++#define __pmd_free_tlb(tlb, pmd) do { } while (0) ++ ++static inline struct page *pte_alloc_one(struct mm_struct *mm, ++ unsigned long address) ++{ ++ struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); ++ pte_t *pte; ++ ++ if (!page) ++ return NULL; ++ ++ pte = kmap(page); ++ if (pte) { ++ clear_page(pte); ++ __flush_page_to_ram(pte); ++ flush_tlb_kernel_page(pte); ++ nocache_page(pte); ++ } ++ kunmap(pte); ++ ++ return page; ++} ++ ++extern inline void pte_free(struct page *page) ++{ ++ __free_page(page); ++} ++ ++/* ++ * In our implementation, each pgd entry contains 1 pmd that is never allocated ++ * or freed. pgd_present is always 1, so this should never be called. -NL ++ */ ++#define pmd_free(pmd) BUG() ++ ++extern inline void pgd_free(pgd_t *pgd) ++{ ++ free_page((unsigned long) pgd); ++} ++ ++extern inline pgd_t *pgd_alloc(struct mm_struct *mm) ++{ ++ pgd_t *new_pgd; ++ ++ new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); ++ if (!new_pgd) ++ return NULL; ++ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); ++ memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); ++ return new_pgd; ++} ++ ++#define pgd_populate(mm, pmd, pte) BUG() ++ ++#endif /* M68K_CF_PGALLOC_H */ +--- /dev/null ++++ b/include/asm-m68k/cf_pgtable.h +@@ -0,0 +1,357 @@ ++#ifndef _CF_PGTABLE_H ++#define _CF_PGTABLE_H ++ ++#include <asm/cfmmu.h> ++#include <asm/page.h> ++ ++#ifndef __ASSEMBLY__ ++#include <asm/virtconvert.h> ++#include <linux/linkage.h> ++ ++/* For virtual address to physical address conversion */ ++#define VTOP(addr) __pa(addr) ++#define PTOV(addr) __va(addr) ++ ++ ++#endif /* !__ASSEMBLY__ */ ++ ++/* Page protection values within PTE. */ ++ ++/* MMUDR bits, in proper place. */ ++#define CF_PAGE_LOCKED (0x00000002) ++#define CF_PAGE_EXEC (0x00000004) ++#define CF_PAGE_WRITABLE (0x00000008) ++#define CF_PAGE_READABLE (0x00000010) ++#define CF_PAGE_SYSTEM (0x00000020) ++#define CF_PAGE_COPYBACK (0x00000040) ++#define CF_PAGE_NOCACHE (0x00000080) ++ ++#define CF_CACHEMASK (~0x00000040) ++#define CF_PAGE_MMUDR_MASK (0x000000fe) ++ ++#define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE) ++ ++/* MMUTR bits, need shifting down. */ ++#define CF_PAGE_VALID (0x00000400) ++#define CF_PAGE_SHARED (0x00000800) ++ ++#define CF_PAGE_MMUTR_MASK (0x00000c00) ++#define CF_PAGE_MMUTR_SHIFT (10) ++#define CF_ASID_MMU_SHIFT (2) ++ ++/* Fake bits, not implemented in CF, will get masked out before ++ hitting hardware, and might go away altogether once this port is ++ complete. */ ++#if PAGE_SHIFT < 13 ++#error COLDFIRE Error: Pages must be at least 8k in size ++#endif ++#define CF_PAGE_ACCESSED (0x00001000) ++#define CF_PAGE_FILE (0x00000200) ++#define CF_PAGE_DIRTY (0x00000001) ++ ++#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */ ++#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */ ++#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */ ++#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */ ++#define _DESCTYPE_MASK 0x003 ++#define _CACHEMASK040 (~0x060) ++#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */ ++ ++ ++/* Externally used page protection values. */ ++#define _PAGE_PRESENT (CF_PAGE_VALID) ++#define _PAGE_ACCESSED (CF_PAGE_ACCESSED) ++#define _PAGE_DIRTY (CF_PAGE_DIRTY) ++#define _PAGE_READWRITE (CF_PAGE_WRITABLE \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_SYSTEM \ ++ | CF_PAGE_SHARED) ++ ++/* Compound page protection values. */ ++#define PAGE_NONE __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED) ++ ++#define PAGE_SHARED __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED) ++ ++#define PAGE_INIT __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC \ ++ | CF_PAGE_SYSTEM \ ++ | CF_PAGE_SHARED) ++ ++#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC \ ++ | CF_PAGE_SYSTEM \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_ACCESSED) ++ ++#define PAGE_COPY __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_DIRTY) ++/* ++ * Page protections for initialising protection_map. See mm/mmap.c ++ * for use. In general, the bit positions are xwr, and P-items are ++ * private, the S-items are shared. ++ */ ++ ++#define __P000 PAGE_NONE ++#define __P100 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_EXEC) ++#define __P010 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_ACCESSED) ++#define __P110 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_EXEC) ++#define __P001 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_READABLE) ++#define __P101 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC) ++#define __P011 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_ACCESSED) ++#define __P111 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_WRITABLE \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC) ++ ++#define __S000 PAGE_NONE ++#define __S100 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_EXEC) ++#define __S010 PAGE_SHARED ++#define __S110 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_EXEC) ++#define __S001 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_READABLE) ++#define __S101 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC) ++#define __S011 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_READABLE) ++#define __S111 __pgprot(CF_PAGE_VALID \ ++ | CF_PAGE_ACCESSED \ ++ | CF_PAGE_SHARED \ ++ | CF_PAGE_READABLE \ ++ | CF_PAGE_EXEC) ++ ++#define PTE_MASK PAGE_MASK ++#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) ++ ++#ifndef __ASSEMBLY__ ++ ++/* ++ * Conversion functions: convert a page and protection to a page entry, ++ * and a page entry and page directory to the page they refer to. ++ */ ++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) ++ ++extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ++{ ++ pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); ++ return pte; ++} ++ ++#define pmd_set(pmdp, ptep) do {} while (0) ++ ++extern inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) ++{ ++ pgd_val(*pgdp) = virt_to_phys(pmdp); ++} ++ ++#define __pte_page(pte) \ ++ ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET)) ++#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd))) ++ ++extern inline int pte_none(pte_t pte) ++{ ++ return !pte_val(pte); ++} ++extern inline int pte_present(pte_t pte) ++{ ++ return pte_val(pte) & CF_PAGE_VALID; ++} ++extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, ++ pte_t *ptep) ++{ ++ pte_val(*ptep) = 0; ++} ++ ++#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) ++#define pte_page(pte) virt_to_page(__pte_page(pte)) ++ ++extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } ++#define pmd_none(pmd) pmd_none2(&(pmd)) ++extern inline int pmd_bad2(pmd_t *pmd) { return 0; } ++#define pmd_bad(pmd) pmd_bad2(&(pmd)) ++#define pmd_present(pmd) (!pmd_none2(&(pmd))) ++extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; } ++ ++extern inline int pgd_none(pgd_t pgd) { return 0; } ++extern inline int pgd_bad(pgd_t pgd) { return 0; } ++extern inline int pgd_present(pgd_t pgd) { return 1; } ++extern inline void pgd_clear(pgd_t *pgdp) {} ++ ++ ++#define pte_ERROR(e) \ ++ printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ ++ __FILE__, __LINE__, pte_val(e)) ++#define pmd_ERROR(e) \ ++ printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ ++ __FILE__, __LINE__, pmd_val(e)) ++#define pgd_ERROR(e) \ ++ printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ ++ __FILE__, __LINE__, pgd_val(e)) ++ ++ ++/* ++ * The following only work if pte_present() is true. ++ * Undefined behaviour if not... ++ * [we have the full set here even if they don't change from m68k] ++ */ ++extern inline int pte_read(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_READABLE; } ++extern inline int pte_write(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_WRITABLE; } ++extern inline int pte_exec(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_EXEC; } ++extern inline int pte_dirty(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_DIRTY; } ++extern inline int pte_young(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_ACCESSED; } ++extern inline int pte_file(pte_t pte) \ ++ { return pte_val(pte) & CF_PAGE_FILE; } ++ ++extern inline pte_t pte_wrprotect(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; } ++extern inline pte_t pte_rdprotect(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; } ++extern inline pte_t pte_exprotect(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; } ++extern inline pte_t pte_mkclean(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; } ++extern inline pte_t pte_mkold(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; } ++extern inline pte_t pte_mkwrite(pte_t pte) \ ++ { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; } ++extern inline pte_t pte_mkread(pte_t pte) \ ++ { pte_val(pte) |= CF_PAGE_READABLE; return pte; } ++extern inline pte_t pte_mkexec(pte_t pte) \ ++ { pte_val(pte) |= CF_PAGE_EXEC; return pte; } ++extern inline pte_t pte_mkdirty(pte_t pte) \ ++ { pte_val(pte) |= CF_PAGE_DIRTY; return pte; } ++extern inline pte_t pte_mkyoung(pte_t pte) \ ++ { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; } ++extern inline pte_t pte_mknocache(pte_t pte) \ ++ { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; } ++extern inline pte_t pte_mkcache(pte_t pte) \ ++ { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; } ++ ++#define swapper_pg_dir kernel_pg_dir ++extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; ++ ++/* Find an entry in a pagetable directory. */ ++#define pgd_index(address) ((address) >> PGDIR_SHIFT) ++ ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++ ++/* Find an entry in a kernel pagetable directory. */ ++#define pgd_offset_k(address) pgd_offset(&init_mm, address) ++ ++/* Find an entry in the second-level pagetable. */ ++extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address) ++{ ++ return (pmd_t *) pgd; ++} ++ ++/* Find an entry in the third-level pagetable. */ ++#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) ++#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \ ++ __pte_offset(address)) ++ ++/* Disable caching for page at given kernel virtual address. */ ++static inline void nocache_page(void *vaddr) ++{ ++ pgd_t *dir; ++ pmd_t *pmdp; ++ pte_t *ptep; ++ unsigned long addr = (unsigned long)vaddr; ++ ++ dir = pgd_offset_k(addr); ++ pmdp = pmd_offset(dir, addr); ++ ptep = pte_offset_kernel(pmdp, addr); ++ *ptep = pte_mknocache(*ptep); ++} ++ ++/* Enable caching for page at given kernel virtual address. */ ++static inline void cache_page(void *vaddr) ++{ ++ pgd_t *dir; ++ pmd_t *pmdp; ++ pte_t *ptep; ++ unsigned long addr = (unsigned long)vaddr; ++ ++ dir = pgd_offset_k(addr); ++ pmdp = pmd_offset(dir, addr); ++ ptep = pte_offset_kernel(pmdp, addr); ++ *ptep = pte_mkcache(*ptep); ++} ++ ++#define PTE_FILE_MAX_BITS 21 ++#define PTE_FILE_SHIFT 11 ++ ++static inline unsigned long pte_to_pgoff(pte_t pte) ++{ ++ return pte_val(pte) >> PTE_FILE_SHIFT; ++} ++ ++static inline pte_t pgoff_to_pte(unsigned pgoff) ++{ ++ pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE); ++ return pte; ++} ++ ++/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ ++#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ ++ (offset << PTE_FILE_SHIFT) }) ++#define __swp_type(x) ((x).val & 0xFF) ++#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT) ++#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) ++#define __swp_entry_to_pte(x) (__pte((x).val)) ++ ++#define pmd_page(pmd) virt_to_page(__pmd_page(pmd)) ++ ++#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \ ++ __pte_offset(address)) ++#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) ++#define pte_unmap(pte) kunmap(pte) ++#define pte_unmap_nested(pte) kunmap(pte) ++ ++#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) ++#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) ++ ++ ++#endif /* !__ASSEMBLY__ */ ++#endif /* !_CF_PGTABLE_H */ +--- /dev/null ++++ b/include/asm-m68k/cf_tlbflush.h +@@ -0,0 +1,59 @@ ++#ifndef M68K_CF_TLBFLUSH_H ++#define M68K_CF_TLBFLUSH_H ++ ++#include <asm/coldfire.h> ++ ++/* Flush all userspace mappings. */ ++static inline void flush_tlb_all(void) ++{ ++ preempt_disable(); ++ *MMUOR = MMUOR_CNL; ++ preempt_enable(); ++} ++ ++/* Clear user TLB entries within the context named in mm */ ++static inline void flush_tlb_mm(struct mm_struct *mm) ++{ ++ preempt_disable(); ++ *MMUOR = MMUOR_CNL; ++ preempt_enable(); ++} ++ ++/* Flush a single TLB page. */ ++static inline void flush_tlb_page(struct vm_area_struct *vma, ++ unsigned long addr) ++{ ++ preempt_disable(); ++ *MMUOR = MMUOR_CNL; ++ preempt_enable(); ++} ++/* Flush a range of pages from TLB. */ ++ ++static inline void flush_tlb_range(struct mm_struct *mm, ++ unsigned long start, unsigned long end) ++{ ++ preempt_disable(); ++ *MMUOR = MMUOR_CNL; ++ preempt_enable(); ++} ++ ++/* Flush kernel page from TLB. */ ++static inline void flush_tlb_kernel_page(void *addr) ++{ ++ preempt_disable(); ++ *MMUOR = MMUOR_CNL; ++ preempt_enable(); ++} ++ ++static inline void flush_tlb_kernel_range(unsigned long start, ++ unsigned long end) ++{ ++ flush_tlb_all(); ++} ++ ++extern inline void flush_tlb_pgtables(struct mm_struct *mm, ++ unsigned long start, unsigned long end) ++{ ++} ++ ++#endif /* M68K_CF_TLBFLUSH_H */ +--- /dev/null ++++ b/include/asm-m68k/cf_uaccess.h +@@ -0,0 +1,376 @@ ++#ifndef __M68K_CF_UACCESS_H ++#define __M68K_CF_UACCESS_H ++ ++/* ++ * User space memory access functions ++ */ ++ ++/* The "moves" command is not available in the CF instruction set. */ ++#include <linux/compiler.h> ++#include <linux/errno.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <asm/segment.h> ++ ++#define VERIFY_READ 0 ++#define VERIFY_WRITE 1 ++ ++/* We let the MMU do all checking */ ++#define access_ok(type, addr, size) 1 ++ ++/* ++ * The exception table consists of pairs of addresses: the first is the ++ * address of an instruction that is allowed to fault, and the second is ++ * the address at which the program should continue. No registers are ++ * modified, so it is entirely up to the continuation code to figure out ++ * what to do. ++ * ++ * All the routines below use bits of fixup code that are out of line ++ * with the main instruction path. This means when everything is well, ++ * we don't even have to jump over them. Further, they do not intrude ++ * on our cache or tlb entries. ++ */ ++ ++struct exception_table_entry ++{ ++ unsigned long insn, fixup; ++}; ++ ++extern int __put_user_bad(void); ++extern int __get_user_bad(void); ++ ++#define __put_user_asm(res, x, ptr, bwl, reg, err) \ ++asm volatile ("\n" \ ++ "1: move."#bwl" %2,%1\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "10: moveq.l %3,%0\n" \ ++ " jra 2b\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 1b,10b\n" \ ++ " .long 2b,10b\n" \ ++ " .previous" \ ++ : "+d" (res), "=m" (*(ptr)) \ ++ : #reg (x), "i" (err)) ++ ++/* ++ * These are the main single-value transfer routines. They automatically ++ * use the right size if we just have the right pointer type. ++ */ ++ ++#define __put_user(x, ptr) \ ++({ \ ++ typeof(*(ptr)) __pu_val = (x); \ ++ int __pu_err = 0; \ ++ __chk_user_ptr(ptr); \ ++ switch (sizeof (*(ptr))) { \ ++ case 1: \ ++ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ ++ break; \ ++ case 2: \ ++ __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \ ++ break; \ ++ case 4: \ ++ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ ++ break; \ ++ case 8: \ ++ { \ ++ const void __user *__pu_ptr = (ptr); \ ++ asm volatile ("\n" \ ++ "1: move.l %2,(%1)+\n" \ ++ "2: move.l %R2,(%1)\n" \ ++ "3:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "10: movel %3,%0\n" \ ++ " jra 3b\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 1b,10b\n" \ ++ " .long 2b,10b\n" \ ++ " .long 3b,10b\n" \ ++ " .previous" \ ++ : "+d" (__pu_err), "+a" (__pu_ptr) \ ++ : "r" (__pu_val), "i" (-EFAULT) \ ++ : "memory"); \ ++ break; \ ++ } \ ++ default: \ ++ __pu_err = __put_user_bad(); \ ++ break; \ ++ } \ ++ __pu_err; \ ++}) ++#define put_user(x, ptr) __put_user(x, ptr) ++ ++ ++#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ ++ type __gu_val; \ ++ asm volatile ("\n" \ ++ "1: move."#bwl" %2,%1\n" \ ++ "2:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "10: move.l %3,%0\n" \ ++ " subl %1,%1\n" \ ++ " jra 2b\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 1b,10b\n" \ ++ " .previous" \ ++ : "+d" (res), "=&" #reg (__gu_val) \ ++ : "m" (*(ptr)), "i" (err)); \ ++ (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ ++}) ++ ++#define __get_user(x, ptr) \ ++({ \ ++ int __gu_err = 0; \ ++ __chk_user_ptr(ptr); \ ++ switch (sizeof(*(ptr))) { \ ++ case 1: \ ++ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ ++ break; \ ++ case 2: \ ++ __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \ ++ break; \ ++ case 4: \ ++ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ ++ break; \ ++/* case 8: disabled because gcc-4.1 has a broken typeof \ ++ { \ ++ const void *__gu_ptr = (ptr); \ ++ u64 __gu_val; \ ++ asm volatile ("\n" \ ++ "1: move.l (%2)+,%1\n" \ ++ "2: move.l (%2),%R1\n" \ ++ "3:\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "10: move.l %3,%0\n" \ ++ " subl %1,%1\n" \ ++ " subl %R1,%R1\n" \ ++ " jra 3b\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 1b,10b\n" \ ++ " .long 2b,10b\n" \ ++ " .previous" \ ++ : "+d" (__gu_err), "=&r" (__gu_val), \ ++ "+a" (__gu_ptr) \ ++ : "i" (-EFAULT) \ ++ : "memory"); \ ++ (x) = (typeof(*(ptr)))__gu_val; \ ++ break; \ ++ } */ \ ++ default: \ ++ __gu_err = __get_user_bad(); \ ++ break; \ ++ } \ ++ __gu_err; \ ++}) ++#define get_user(x, ptr) __get_user(x, ptr) ++ ++unsigned long __generic_copy_from_user(void *to, const void __user *from, ++ unsigned long n); ++unsigned long __generic_copy_to_user(void __user *to, const void *from, ++ unsigned long n); ++ ++#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ ++ asm volatile ("\n" \ ++ "1: move."#s1" (%2)+,%3\n" \ ++ " move."#s1" %3,(%1)+\n" \ ++ "2: move."#s2" (%2)+,%3\n" \ ++ " move."#s2" %3,(%1)+\n" \ ++ " .ifnc \""#s3"\",\"\"\n" \ ++ "3: move."#s3" (%2)+,%3\n" \ ++ " move."#s3" %3,(%1)+\n" \ ++ " .endif\n" \ ++ "4:\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 1b,10f\n" \ ++ " .long 2b,20f\n" \ ++ " .ifnc \""#s3"\",\"\"\n" \ ++ " .long 3b,30f\n" \ ++ " .endif\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "10: clr."#s1" (%1)+\n" \ ++ "20: clr."#s2" (%1)+\n" \ ++ " .ifnc \""#s3"\",\"\"\n" \ ++ "30: clr."#s3" (%1)+\n" \ ++ " .endif\n" \ ++ " moveq.l #"#n",%0\n" \ ++ " jra 4b\n" \ ++ " .previous\n" \ ++ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ ++ : : "memory") ++ ++static __always_inline unsigned long ++__constant_copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ unsigned long res = 0, tmp; ++ ++ switch (n) { ++ case 1: ++ __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); ++ break; ++ case 2: ++ __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, ++ d, 2); ++ break; ++ case 3: ++ __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); ++ break; ++ case 4: ++ __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, ++ r, 4); ++ break; ++ case 5: ++ __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); ++ break; ++ case 6: ++ __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); ++ break; ++ case 7: ++ __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); ++ break; ++ case 8: ++ __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); ++ break; ++ case 9: ++ __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); ++ break; ++ case 10: ++ __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); ++ break; ++ case 12: ++ __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); ++ break; ++ default: ++ /* we limit the inlined version to 3 moves */ ++ return __generic_copy_from_user(to, from, n); ++ } ++ ++ return res; ++} ++ ++#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ ++ asm volatile ("\n" \ ++ " move."#s1" (%2)+,%3\n" \ ++ "11: move."#s1" %3,(%1)+\n" \ ++ "12: move."#s2" (%2)+,%3\n" \ ++ "21: move."#s2" %3,(%1)+\n" \ ++ "22:\n" \ ++ " .ifnc \""#s3"\",\"\"\n" \ ++ " move."#s3" (%2)+,%3\n" \ ++ "31: move."#s3" %3,(%1)+\n" \ ++ "32:\n" \ ++ " .endif\n" \ ++ "4:\n" \ ++ "\n" \ ++ " .section __ex_table,\"a\"\n" \ ++ " .align 4\n" \ ++ " .long 11b,5f\n" \ ++ " .long 12b,5f\n" \ ++ " .long 21b,5f\n" \ ++ " .long 22b,5f\n" \ ++ " .ifnc \""#s3"\",\"\"\n" \ ++ " .long 31b,5f\n" \ ++ " .long 32b,5f\n" \ ++ " .endif\n" \ ++ " .previous\n" \ ++ "\n" \ ++ " .section .fixup,\"ax\"\n" \ ++ " .even\n" \ ++ "5: moveq.l #"#n",%0\n" \ ++ " jra 4b\n" \ ++ " .previous\n" \ ++ : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ ++ : : "memory") ++ ++static __always_inline unsigned long ++__constant_copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ unsigned long res = 0, tmp; ++ ++ switch (n) { ++ case 1: ++ __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); ++ break; ++ case 2: ++ __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2); ++ break; ++ case 3: ++ __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); ++ break; ++ case 4: ++ __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); ++ break; ++ case 5: ++ __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); ++ break; ++ case 6: ++ __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); ++ break; ++ case 7: ++ __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); ++ break; ++ case 8: ++ __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); ++ break; ++ case 9: ++ __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); ++ break; ++ case 10: ++ __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); ++ break; ++ case 12: ++ __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); ++ break; ++ default: ++ /* limit the inlined version to 3 moves */ ++ return __generic_copy_to_user(to, from, n); ++ } ++ ++ return res; ++} ++ ++#define __copy_from_user(to, from, n) \ ++(__builtin_constant_p(n) ? \ ++ __constant_copy_from_user(to, from, n) : \ ++ __generic_copy_from_user(to, from, n)) ++ ++#define __copy_to_user(to, from, n) \ ++(__builtin_constant_p(n) ? \ ++ __constant_copy_to_user(to, from, n) : \ ++ __generic_copy_to_user(to, from, n)) ++ ++#define __copy_to_user_inatomic __copy_to_user ++#define __copy_from_user_inatomic __copy_from_user ++ ++#define copy_from_user(to, from, n) __copy_from_user(to, from, n) ++#define copy_to_user(to, from, n) __copy_to_user(to, from, n) ++ ++long strncpy_from_user(char *dst, const char __user *src, long count); ++long strnlen_user(const char __user *src, long n); ++unsigned long __clear_user(void __user *to, unsigned long n); ++ ++#define clear_user __clear_user ++ ++#define strlen_user(str) strnlen_user(str, 32767) ++ ++#endif /* _M68K_CF_UACCESS_H */ +--- /dev/null ++++ b/include/asm-m68k/cfcache.h +@@ -0,0 +1,86 @@ ++/* ++ * include/asm-m68k/cfcache.h ++ */ ++#ifndef CF_CFCACHE_H ++#define CF_CFCACHE_H ++ ++#define CF_CACR_DEC (0x80000000) /* Data Cache Enable */ ++#define CF_CACR_DW (0x40000000) /* Data default Write-protect */ ++#define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */ ++#define CF_CACR_DDPI (0x10000000) /* Data Disable CPUSHL Invalidate */ ++#define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */ ++#define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */ ++#define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */ ++#define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */ ++#define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */ ++#define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */ ++#define CF_CACR_IVO (0x00100000) /* Invalidate only */ ++#define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */ ++#define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */ ++#define CF_CACR_IEC (0x00008000) /* Instruction Cache Enable */ ++#define CF_CACR_SPA (0x00004000) /* Search by Physical Address */ ++#define CF_CACR_DNFB (0x00002000) /* Default cache-inhibited fill buf */ ++#define CF_CACR_IDPI (0x00001000) /* Instr Disable CPUSHL Invalidate */ ++#define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */ ++#define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */ ++#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */ ++#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */ ++ ++#define DCACHE_LINE_SIZE 0x0010 /* bytes per line */ ++#define DCACHE_WAY_SIZE 0x2000 /* words per cache block */ ++#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+CF_CACR_BCINVA+CF_CACR_ICINVA) ++#ifdef CONFIG_M5445X_DISABLE_CACHE ++/* disable cache for testing rev0 silicon */ ++#define CACHE_INITIAL_MODE (CF_CACR_EUSP) ++#else ++#define CACHE_INITIAL_MODE (CF_CACR_DEC+CF_CACR_BEC+CF_CACR_IEC+CF_CACR_EUSP) ++#endif ++ ++#define _DCACHE_SIZE (2*16384) ++#define _ICACHE_SIZE (2*16384) ++ ++#define _SET_SHIFT 4 ++ ++/* ++ * Masks for cache sizes. Programming note: because the set size is a ++ * power of two, the mask is also the last address in the set. ++ * This may need to be #ifdef for other Coldfire processors. ++ */ ++ ++#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT) ++#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT) ++#define LAST_DCACHE_ADDR _DCACHE_SET_MASK ++#define LAST_ICACHE_ADDR _ICACHE_SET_MASK ++ ++ ++#ifndef __ASSEMBLY__ ++ ++extern void DcacheFlushInvalidate(void); ++ ++extern void DcacheDisable(void); ++extern void DcacheEnable(void); ++ ++/******************************************************************************/ ++/*** Unimplemented Cache functionality ***/ ++/******************************************************************************/ ++#define preDcacheInvalidateBlockMark() ++#define postDcacheInvalidateBlockMark() ++#define DcacheZeroBlock(p, l) fast_bzero((char *)(p), (long)(l)) ++#define loadDcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e") ++#define IcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e") ++ ++/******************************************************************************/ ++/*** Redundant Cache functionality on ColdFire ***/ ++/******************************************************************************/ ++#define DcacheInvalidateBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l) ++#define DcacheFlushCacheBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l) ++#define DcacheFlushBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l) ++ ++extern void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size); ++extern void FLASHDcacheFlushInvalidate(void); ++ ++extern void cacr_set(unsigned long x); ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#endif /* CF_CACHE_H */ +--- /dev/null ++++ b/include/asm-m68k/cfmmu.h +@@ -0,0 +1,104 @@ ++/* ++ * Definitions for Coldfire V4e MMU ++ */ ++#include <asm/movs.h> ++ ++#ifndef __CF_MMU_H__ ++#define __CF_MMU_H__ ++ ++ ++#define MMU_BASE 0xE1000000 ++ ++ ++#define MMUCR (MMU_BASE+0x00) ++#define MMUCR_ASMN 1 ++#define MMUCR_ASM (1<<MMUCR_ASMN) ++#define MMUCR_ENN 0 ++#define MMUCR_EN (1<<MMUCR_ENN) ++ ++#define MMUOR REG16(MMU_BASE+0x04+0x02) ++#define MMUOR_AAN 16 ++#define MMUOR_AA (0xffff<<MMUOR_AAN) ++#define MMUOR_STLBN 8 ++#define MMUOR_STLB (1<<MMUOR_STLBN) ++#define MMUOR_CAN 7 ++#define MMUOR_CA (1<<MMUOR_CAN) ++#define MMUOR_CNLN 6 ++#define MMUOR_CNL (1<<MMUOR_CNLN) ++#define MMUOR_CASN 5 ++#define MMUOR_CAS (1<<MMUOR_CASN) ++#define MMUOR_ITLBN 4 ++#define MMUOR_ITLB (1<<MMUOR_ITLBN) ++#define MMUOR_ADRN 3 ++#define MMUOR_ADR (1<<MMUOR_ADRN) ++#define MMUOR_RWN 2 ++#define MMUOR_RW (1<<MMUOR_RWN) ++#define MMUOR_ACCN 1 ++#define MMUOR_ACC (1<<MMUOR_ACCN) ++#define MMUOR_UAAN 0 ++#define MMUOR_UAA (1<<MMUOR_UAAN) ++ ++#define MMUSR REG32(MMU_BASE+0x08) ++#define MMUSR_SPFN 5 ++#define MMUSR_SPF (1<<MMUSR_SPFN) ++#define MMUSR_RFN 4 ++#define MMUSR_RF (1<<MMUSR_RFN) ++#define MMUSR_WFN 3 ++#define MMUSR_WF (1<<MMUSR_WFN) ++#define MMUSR_HITN 1 ++#define MMUSR_HIT (1<<MMUSR_HITN) ++ ++#define MMUAR REG32(MMU_BASE+0x10) ++#define MMUAR_VPN 1 ++#define MMUAR_VP (0xfffffffe) ++#define MMUAR_SN 0 ++#define MMUAR_S (1<<MMUAR_SN) ++ ++#define MMUTR REG32(MMU_BASE+0x14) ++#define MMUTR_VAN 10 ++#define MMUTR_VA (0xfffffc00) ++#define MMUTR_IDN 2 ++#define MMUTR_ID (0xff<<MMUTR_IDN) ++#define MMUTR_SGN 1 ++#define MMUTR_SG (1<<MMUTR_SGN) ++#define MMUTR_VN 0 ++#define MMUTR_V (1<<MMUTR_VN) ++ ++#define MMUDR REG32(MMU_BASE+0x18) ++#define MMUDR_PAN 10 ++#define MMUDR_PA (0xfffffc00) ++#define MMUDR_SZN 8 ++#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN) ++#define MMUDR_SZ1M (0<<MMUDR_SZN) ++#define MMUDR_SZ4K (1<<MMUDR_SZN) ++#define MMUDR_SZ8K (2<<MMUDR_SZN) ++#define MMUDR_SZ16M (3<<MMUDR_SZN) ++#define MMUDR_CMN 6 ++#define MMUDR_INC (2<<MMUDR_CMN) ++#define MMUDR_IC (0<<MMUDR_CMN) ++#define MMUDR_DWT (0<<MMUDR_CMN) ++#define MMUDR_DCB (1<<MMUDR_CMN) ++#define MMUDR_DNCP (2<<MMUDR_CMN) ++#define MMUDR_DNCIP (3<<MMUDR_CMN) ++#define MMUDR_SPN 5 ++#define MMUDR_SP (1<<MMUDR_SPN) ++#define MMUDR_RN 4 ++#define MMUDR_R (1<<MMUDR_RN) ++#define MMUDR_WN 3 ++#define MMUDR_W (1<<MMUDR_WN) ++#define MMUDR_XN 2 ++#define MMUDR_X (1<<MMUDR_XN) ++#define MMUDR_LKN 1 ++#define MMUDR_LK (1<<MMUDR_LKN) ++ ++ ++#ifndef __ASSEMBLY__ ++#define CF_PMEGS_NUM 256 ++#define CF_INVALID_CONTEXT 255 ++#define CF_PAGE_PGNUM_MASK (PAGE_MASK) ++ ++extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, ++ int extension_word); ++#endif /* __ASSEMBLY__*/ ++ ++#endif /* !__CF_MMU_H__ */ +--- /dev/null ++++ b/include/asm-m68k/coldfire.h +@@ -0,0 +1,38 @@ ++#ifndef _COLDFIRE_H_ ++#define _COLDFIRE_H_ ++ ++#define MCF_MBAR 0x0 ++#define MCF_RAMBAR1 0x40000000 ++#define MCF_SRAM 0x80000000 ++#define MCF_CLK CONFIG_MCFCLK ++#define MCF_BUSCLK (CONFIG_MCFCLK/2) ++ ++#ifdef __ASSEMBLY__ ++#define REG32 ++#define REG16 ++#define REG08 ++#else /* __ASSEMBLY__ */ ++#define REG32(x) ((volatile unsigned long *)(x)) ++#define REG16(x) ((volatile unsigned short *)(x)) ++#define REG08(x) ((volatile unsigned char *)(x)) ++ ++#define MCF_REG32(x) *(volatile unsigned long *)(MCF_MBAR+(x)) ++#define MCF_REG16(x) *(volatile unsigned short *)(MCF_MBAR+(x)) ++#define MCF_REG08(x) *(volatile unsigned char *)(MCF_MBAR+(x)) ++ ++void cacr_set(unsigned long); ++unsigned long cacr_get(void); ++ ++#define coldfire_enable_irq0(irq) MCF_INTC0_CIMR = (irq); ++ ++#define coldfire_enable_irq1(irq) MCF_INTC1_CIMR = (irq); ++ ++#define coldfire_disable_irq0(irq) MCF_INTC0_SIMR = (irq); ++ ++#define coldfire_disable_irq1(irq) MCF_INTC1_SIMR = (irq); ++ ++#define getiprh() MCF_INTC0_IPRH ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* _COLDFIRE_H_ */ +--- /dev/null ++++ b/include/asm-m68k/coldfire_edma.h +@@ -0,0 +1,39 @@ ++#ifndef _LINUX_COLDFIRE_DMA_H ++#define _LINUX_COLDFIRE_DMA_H ++ ++#include <linux/interrupt.h> ++ ++#define EDMA_DRIVER_NAME "ColdFire-eDMA" ++#define DMA_DEV_MINOR 1 ++ ++#define EDMA_INT_CHANNEL_BASE 8 ++#define EDMA_INT_CONTROLLER_BASE 64 ++#define EDMA_CHANNELS 16 ++ ++#define EDMA_IRQ_LEVEL 5 ++ ++typedef irqreturn_t (*edma_irq_handler)(int, void *); ++typedef void (*edma_error_handler)(int, void *); ++ ++void set_edma_params(int channel, u32 source, u32 dest, ++ u32 attr, u32 soff, u32 nbytes, u32 slast, ++ u32 citer, u32 biter, u32 doff, u32 dlast_sga); ++ ++void start_edma_transfer(int channel, int major_int); ++ ++void stop_edma_transfer(int channel); ++ ++void confirm_edma_interrupt_handled(int channel); ++ ++void init_edma(void); ++ ++int request_edma_channel(int channel, ++ edma_irq_handler handler, ++ edma_error_handler error_handler, ++ void *dev, ++ spinlock_t *lock, ++ const char *device_id); ++ ++int free_edma_channel(int channel, void *dev); ++ ++#endif +--- /dev/null ++++ b/include/asm-m68k/mcfqspi.h +@@ -0,0 +1,50 @@ ++/****************************************************************************/ ++/* ++ * mcfqspi.h - Master QSPI controller for the ColdFire processors ++ * ++ * (C) Copyright 2005, Intec Automation, ++ * Mike Lavender (mike@steroidmicros) ++ * ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 2 of the License, or ++ (at your option) any later version. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, write to the Free Software ++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ ++/* ------------------------------------------------------------------------- */ ++ ++#ifndef MCFQSPI_H_ ++#define MCFQSPI_H_ ++ ++#define QSPI_CS_INIT 0x01 ++#define QSPI_CS_ASSERT 0x02 ++#define QSPI_CS_DROP 0x04 ++ ++struct coldfire_spi_master { ++ u16 bus_num; ++ u16 num_chipselect; ++ u8 irq_source; ++ u32 irq_vector; ++ u32 irq_mask; ++ u8 irq_lp; ++ u8 par_val; ++ void (*cs_control)(u8 cs, u8 command); ++}; ++ ++ ++struct coldfire_spi_chip { ++ u8 mode; ++ u8 bits_per_word; ++ u8 del_cs_to_clk; ++ u8 del_after_trans; ++ u16 void_write_data; ++}; ++#endif /*MCFQSPI_H_*/ +--- /dev/null ++++ b/include/asm-m68k/mcfsim.h +@@ -0,0 +1,96 @@ ++/* ++ * mcfsim.h -- ColdFire System Integration Module support. ++ * ++ * (C) Copyright 1999-2003, Greg Ungerer (gerg@snapgear.com) ++ * (C) Copyright 2000, Lineo Inc. (www.lineo.com) ++ */ ++ ++#ifndef mcfsim_h ++#define mcfsim_h ++ ++#if defined(CONFIG_COLDFIRE) ++#include <asm/coldfire.h> ++#endif ++ ++#if defined(CONFIG_M54455) ++#include <asm/mcf5445x_intc.h> ++#include <asm/mcf5445x_gpio.h> ++#include <asm/mcf5445x_i2c.h> ++#include <asm/mcf5445x_ccm.h> ++#include <asm/mcf5445x_pci.h> ++#include <asm/mcf5445x_pciarb.h> ++#include <asm/mcf5445x_eport.h> ++#endif ++ ++/* ++ * Define the base address of the SIM within the MBAR address space. ++ */ ++#define MCFSIM_BASE 0x0 /* Base address of SIM */ ++ ++/* ++ * Bit definitions for the ICR family of registers. ++ */ ++#define MCFSIM_ICR_AUTOVEC 0x80 /* Auto-vectored intr */ ++#define MCFSIM_ICR_LEVEL0 0x00 /* Level 0 intr */ ++#define MCFSIM_ICR_LEVEL1 0x04 /* Level 1 intr */ ++#define MCFSIM_ICR_LEVEL2 0x08 /* Level 2 intr */ ++#define MCFSIM_ICR_LEVEL3 0x0c /* Level 3 intr */ ++#define MCFSIM_ICR_LEVEL4 0x10 /* Level 4 intr */ ++#define MCFSIM_ICR_LEVEL5 0x14 /* Level 5 intr */ ++#define MCFSIM_ICR_LEVEL6 0x18 /* Level 6 intr */ ++#define MCFSIM_ICR_LEVEL7 0x1c /* Level 7 intr */ ++ ++#define MCFSIM_ICR_PRI0 0x00 /* Priority 0 intr */ ++#define MCFSIM_ICR_PRI1 0x01 /* Priority 1 intr */ ++#define MCFSIM_ICR_PRI2 0x02 /* Priority 2 intr */ ++#define MCFSIM_ICR_PRI3 0x03 /* Priority 3 intr */ ++ ++/* ++ * Bit definitions for the Interrupt Mask register (IMR). ++ */ ++#define MCFSIM_IMR_EINT1 0x0002 /* External intr # 1 */ ++#define MCFSIM_IMR_EINT2 0x0004 /* External intr # 2 */ ++#define MCFSIM_IMR_EINT3 0x0008 /* External intr # 3 */ ++#define MCFSIM_IMR_EINT4 0x0010 /* External intr # 4 */ ++#define MCFSIM_IMR_EINT5 0x0020 /* External intr # 5 */ ++#define MCFSIM_IMR_EINT6 0x0040 /* External intr # 6 */ ++#define MCFSIM_IMR_EINT7 0x0080 /* External intr # 7 */ ++ ++#define MCFSIM_IMR_SWD 0x0100 /* Software Watchdog intr */ ++#define MCFSIM_IMR_TIMER1 0x0200 /* TIMER 1 intr */ ++#define MCFSIM_IMR_TIMER2 0x0400 /* TIMER 2 intr */ ++#define MCFSIM_IMR_MBUS 0x0800 /* MBUS intr */ ++#define MCFSIM_IMR_UART1 0x1000 /* UART 1 intr */ ++#define MCFSIM_IMR_UART2 0x2000 /* UART 2 intr */ ++ ++/* ++ * Mask for all of the SIM devices. Some parts have more or less ++ * SIM devices. This is a catchall for the sandard set. ++ */ ++#ifndef MCFSIM_IMR_MASKALL ++#define MCFSIM_IMR_MASKALL 0x3ffe /* All intr sources */ ++#endif ++ ++ ++/* ++ * PIT interrupt settings, if not found in mXXXXsim.h file. ++ */ ++#ifndef ICR_INTRCONF ++#define ICR_INTRCONF 0x2b /* PIT1 level 5, priority 3 */ ++#endif ++#ifndef MCFPIT_IMR ++#define MCFPIT_IMR MCFINTC_IMRH ++#endif ++#ifndef MCFPIT_IMR_IBIT ++#define MCFPIT_IMR_IBIT (1 << (MCFINT_PIT1 - 32)) ++#endif ++ ++ ++#ifndef __ASSEMBLY__ ++/* ++ * Definition for the interrupt auto-vectoring support. ++ */ ++extern void mcf_autovector(unsigned int vec); ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* mcfsim_h */ +--- /dev/null ++++ b/include/asm-m68k/mcfuart.h +@@ -0,0 +1,180 @@ ++/* ++ * mcfuart.h -- ColdFire internal UART support defines. ++ * ++ * Matt Waddel Matt.Waddel@freescale.com ++ * Copyright Freescale Semiconductor, Inc. 2007 ++ * ++ * Derived from m68knommu version of this same file (Greg Ungerer & Lineo). ++ * ++ */ ++ ++#ifndef mcfuart_h ++#define mcfuart_h ++ ++/* ++ * Define the base address of the UARTS within the MBAR address ++ * space. ++ */ ++#if defined(CONFIG_M54455) ++#include <asm/mcf5445x_intc.h> ++#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */ ++#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */ ++#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */ ++#define MCFINT_VECBASE 64 ++#define MCFINT_UART0 26 ++#endif ++ ++ ++/* ++ * Define the ColdFire UART register set addresses. ++ */ ++#define MCFUART_UMR 0x00 /* Mode register (r/w) */ ++#define MCFUART_USR 0x04 /* Status register (r) */ ++#define MCFUART_UCSR 0x04 /* Clock Select (w) */ ++#define MCFUART_UCR 0x08 /* Command register (w) */ ++#define MCFUART_URB 0x0c /* Receiver Buffer (r) */ ++#define MCFUART_UTB 0x0c /* Transmit Buffer (w) */ ++#define MCFUART_UIPCR 0x10 /* Input Port Change (r) */ ++#define MCFUART_UACR 0x10 /* Auxiliary Control (w) */ ++#define MCFUART_UISR 0x14 /* Interrup Status (r) */ ++#define MCFUART_UIMR 0x14 /* Interrupt Mask (w) */ ++#define MCFUART_UBG1 0x18 /* Baud Rate MSB (r/w) */ ++#define MCFUART_UBG2 0x1c /* Baud Rate LSB (r/w) */ ++#ifdef CONFIG_M5272 ++#define MCFUART_UTF 0x28 /* Transmitter FIFO (r/w) */ ++#define MCFUART_URF 0x2c /* Receiver FIFO (r/w) */ ++#define MCFUART_UFPD 0x30 /* Frac Prec. Divider (r/w) */ ++#else ++#define MCFUART_UIVR 0x30 /* Interrupt Vector (r/w) */ ++#endif ++#define MCFUART_UIPR 0x34 /* Input Port (r) */ ++#define MCFUART_UOP1 0x38 /* Output Port Bit Set (w) */ ++#define MCFUART_UOP0 0x3c /* Output Port Bit Reset (w) */ ++ ++ ++/* ++ * Define bit flags in Mode Register 1 (MR1). ++ */ ++#define MCFUART_MR1_RXRTS 0x80 /* Auto RTS flow control */ ++#define MCFUART_MR1_RXIRQFULL 0x40 /* RX IRQ type FULL */ ++#define MCFUART_MR1_RXIRQRDY 0x00 /* RX IRQ type RDY */ ++#define MCFUART_MR1_RXERRBLOCK 0x20 /* RX block error mode */ ++#define MCFUART_MR1_RXERRCHAR 0x00 /* RX char error mode */ ++ ++#define MCFUART_MR1_PARITYNONE 0x10 /* No parity */ ++#define MCFUART_MR1_PARITYEVEN 0x00 /* Even parity */ ++#define MCFUART_MR1_PARITYODD 0x04 /* Odd parity */ ++#define MCFUART_MR1_PARITYSPACE 0x08 /* Space parity */ ++#define MCFUART_MR1_PARITYMARK 0x0c /* Mark parity */ ++ ++#define MCFUART_MR1_CS5 0x00 /* 5 bits per char */ ++#define MCFUART_MR1_CS6 0x01 /* 6 bits per char */ ++#define MCFUART_MR1_CS7 0x02 /* 7 bits per char */ ++#define MCFUART_MR1_CS8 0x03 /* 8 bits per char */ ++ ++/* ++ * Define bit flags in Mode Register 2 (MR2). ++ */ ++#define MCFUART_MR2_LOOPBACK 0x80 /* Loopback mode */ ++#define MCFUART_MR2_REMOTELOOP 0xc0 /* Remote loopback mode */ ++#define MCFUART_MR2_AUTOECHO 0x40 /* Automatic echo */ ++#define MCFUART_MR2_TXRTS 0x20 /* Assert RTS on TX */ ++#define MCFUART_MR2_TXCTS 0x10 /* Auto CTS flow control */ ++ ++#define MCFUART_MR2_STOP1 0x07 /* 1 stop bit */ ++#define MCFUART_MR2_STOP15 0x08 /* 1.5 stop bits */ ++#define MCFUART_MR2_STOP2 0x0f /* 2 stop bits */ ++ ++/* ++ * Define bit flags in Status Register (USR). ++ */ ++#define MCFUART_USR_RXBREAK 0x80 /* Received BREAK */ ++#define MCFUART_USR_RXFRAMING 0x40 /* Received framing error */ ++#define MCFUART_USR_RXPARITY 0x20 /* Received parity error */ ++#define MCFUART_USR_RXOVERRUN 0x10 /* Received overrun error */ ++#define MCFUART_USR_TXEMPTY 0x08 /* Transmitter empty */ ++#define MCFUART_USR_TXREADY 0x04 /* Transmitter ready */ ++#define MCFUART_USR_RXFULL 0x02 /* Receiver full */ ++#define MCFUART_USR_RXREADY 0x01 /* Receiver ready */ ++ ++#define MCFUART_USR_RXERR (MCFUART_USR_RXBREAK | MCFUART_USR_RXFRAMING | \ ++ MCFUART_USR_RXPARITY | MCFUART_USR_RXOVERRUN) ++ ++/* ++ * Define bit flags in Clock Select Register (UCSR). ++ */ ++#define MCFUART_UCSR_RXCLKTIMER 0xd0 /* RX clock is timer */ ++#define MCFUART_UCSR_RXCLKEXT16 0xe0 /* RX clock is external x16 */ ++#define MCFUART_UCSR_RXCLKEXT1 0xf0 /* RX clock is external x1 */ ++ ++#define MCFUART_UCSR_TXCLKTIMER 0x0d /* TX clock is timer */ ++#define MCFUART_UCSR_TXCLKEXT16 0x0e /* TX clock is external x16 */ ++#define MCFUART_UCSR_TXCLKEXT1 0x0f /* TX clock is external x1 */ ++ ++/* ++ * Define bit flags in Command Register (UCR). ++ */ ++#define MCFUART_UCR_CMDNULL 0x00 /* No command */ ++#define MCFUART_UCR_CMDRESETMRPTR 0x10 /* Reset MR pointer */ ++#define MCFUART_UCR_CMDRESETRX 0x20 /* Reset receiver */ ++#define MCFUART_UCR_CMDRESETTX 0x30 /* Reset transmitter */ ++#define MCFUART_UCR_CMDRESETERR 0x40 /* Reset error status */ ++#define MCFUART_UCR_CMDRESETBREAK 0x50 /* Reset BREAK change */ ++#define MCFUART_UCR_CMDBREAKSTART 0x60 /* Start BREAK */ ++#define MCFUART_UCR_CMDBREAKSTOP 0x70 /* Stop BREAK */ ++ ++#define MCFUART_UCR_TXNULL 0x00 /* No TX command */ ++#define MCFUART_UCR_TXENABLE 0x04 /* Enable TX */ ++#define MCFUART_UCR_TXDISABLE 0x08 /* Disable TX */ ++#define MCFUART_UCR_RXNULL 0x00 /* No RX command */ ++#define MCFUART_UCR_RXENABLE 0x01 /* Enable RX */ ++#define MCFUART_UCR_RXDISABLE 0x02 /* Disable RX */ ++ ++/* ++ * Define bit flags in Input Port Change Register (UIPCR). ++ */ ++#define MCFUART_UIPCR_CTSCOS 0x10 /* CTS change of state */ ++#define MCFUART_UIPCR_CTS 0x01 /* CTS value */ ++ ++/* ++ * Define bit flags in Input Port Register (UIP). ++ */ ++#define MCFUART_UIPR_CTS 0x01 /* CTS value */ ++ ++/* ++ * Define bit flags in Output Port Registers (UOP). ++ * Clear bit by writing to UOP0, set by writing to UOP1. ++ */ ++#define MCFUART_UOP_RTS 0x01 /* RTS set or clear */ ++ ++/* ++ * Define bit flags in the Auxiliary Control Register (UACR). ++ */ ++#define MCFUART_UACR_IEC 0x01 /* Input enable control */ ++ ++/* ++ * Define bit flags in Interrupt Status Register (UISR). ++ * These same bits are used for the Interrupt Mask Register (UIMR). ++ */ ++#define MCFUART_UIR_COS 0x80 /* Change of state (CTS) */ ++#define MCFUART_UIR_DELTABREAK 0x04 /* Break start or stop */ ++#define MCFUART_UIR_RXREADY 0x02 /* Receiver ready */ ++#define MCFUART_UIR_TXREADY 0x01 /* Transmitter ready */ ++ ++#ifdef CONFIG_M5272 ++/* ++ * Define bit flags in the Transmitter FIFO Register (UTF). ++ */ ++#define MCFUART_UTF_TXB 0x1f /* Transmitter data level */ ++#define MCFUART_UTF_FULL 0x20 /* Transmitter fifo full */ ++#define MCFUART_UTF_TXS 0xc0 /* Transmitter status */ ++ ++/* ++ * Define bit flags in the Receiver FIFO Register (URF). ++ */ ++#define MCFUART_URF_RXB 0x1f /* Receiver data level */ ++#define MCFUART_URF_FULL 0x20 /* Receiver fifo full */ ++#define MCFUART_URF_RXS 0xc0 /* Receiver status */ ++#endif ++ ++#endif /* mcfuart_h */ |